text stringlengths 8 6.05M |
|---|
import sys
import os
import math
# from bank_reserves.random_walk import RandomWalker
import pandas as pd
import random, pickle, time
from datetime import datetime
# import json
random.seed(42)
class Consumer():
def __init__(self, unique_id):
# initialize the parent class with required parameters
# for tracking total value of loans outstanding
# self.network_capacity=self.random.randint(1,10)
self.if_update = False
self.unique_id = unique_id
self.step_count = 0
self.mMTC_usage_ratio = round(random.uniform(0,1),1)
self.URLLC_usage_ratio = 1-self.mMTC_usage_ratio
self.scale = random.randint(0,10)
self.URLLC_scale = self.URLLC_usage_ratio*self.scale
self.mMTC_scale = self.mMTC_usage_ratio*self.scale
self.mMTC_cost = 1
self.URLLC_cost = 1.4
self.cost = self.mMTC_scale*self.mMTC_cost + self.URLLC_scale*self.URLLC_cost
self.willingness_to_pay = self.cost + round(random.uniform(-2.2,2.2),2)
self.willingness_to_pay_copy = self.willingness_to_pay
def mMTC_scale_later(self, step):
if self.if_update:
self.mMTC_scale += 0.20*step
def URLLC_scale_later(self, step):
if self.if_update:
self.URLLC_scale += 0.12*step
# print("self.awareness")
def cost_later(self, step):
if self.if_update:
self.cost = self.mMTC_scale*self.mMTC_cost + self.URLLC_scale*self.URLLC_cost
def update_willingness_to_pay(self, step):
if self.if_update:
self.willingness_to_pay_copy = self.cost - 0.05*step + round(random.uniform(-0.8,4),2)
# print(self.willingness_to_pay_copy)
class SystemIntegrator():
def __init__(self, unique_id, consumer_list):
# initialize the parent class with required parameters
# super().__init__(unique_id, model)
# for tracking total value of loans outstanding
self.unique_id = unique_id
self.consumers = consumer_list
self.hardware_investment = 0
self.total_revenue = 0
self.total_mMTC_services = 0
self.total_URLLC_services = 0
self.in_inv_list = []
# self.network_investment=network_investment
# self.price = price
def add_investment(self, step):
self.total_revenue = 0
self.total_mMTC_services = 0
self.total_URLLC_services = 0
self.number_of_active_consumers=0
for i in self.consumers:
if i.willingness_to_pay_copy >= i.cost and i.if_update==True:
self.number_of_active_consumers+=1
self.total_revenue += i.cost #only positive transaction results should be counted
self.total_mMTC_services += i.mMTC_scale
self.total_URLLC_services += i.URLLC_scale
print("Step: {}, SI_ID: {}, CustID: {}, revenue: {}, mmtc: {}, urllc: {}, mmtc_scale: {}, urllc_scale: {}, cost: {}".format(step, self.unique_id, i.unique_id, self.total_revenue, self.total_mMTC_services, self.total_URLLC_services, i.mMTC_scale, i.URLLC_scale, i.cost))
# self.in_inv = price*(total_network_density + total_data_services)
self.in_inv_list.append([self.total_revenue, self.total_mMTC_services, self.total_URLLC_services,
self.number_of_active_consumers])
# print(self.in_inv_list)
def create_si_df(self):
self.in_inv_df = pd.DataFrame(data=self.in_inv_list, columns=["total_returns", "total_mMTC_services_SI", "total_URLLC_services_SI", "number_of_active_consumers"])
self.in_inv_df.to_csv("si_in_inv" + str(self.unique_id) + ".csv")
#No more attributes, pay everything to the ISPs: Add from all ISPs and pay it forward to their respective ISPs
class ISP():
def __init__(self, unique_id, system_integrator_list, market_share, investment_URLLC, investment_mMTC, URLLC_spectrum_capacity, mMTC_spectrum_capacity, URLLC_infrastructure_cost, mMTC_infrastructure_cost):
# initialize the parent class with required parameters
# super().__init__(unique_id, model)
# for tracking total value of loans outstanding
"""percent of deposits the bank must keep in reserves - this is a
UserSettableParameter in server.py"""
self.unique_id=unique_id
self.system_integrator=system_integrator_list
self.market_share=market_share
self.URLLC_spectrum_capacity = URLLC_spectrum_capacity
self.mMTC_spectrum_capacity = mMTC_spectrum_capacity
self.URLLC_infrastructure_cost = URLLC_infrastructure_cost
self.mMTC_infrastructure_cost = mMTC_infrastructure_cost
self.URLLC_total_cost = (self.market_share * ((investment_URLLC)*0.5)) + self.URLLC_infrastructure_cost
self.mMTC_total_cost = (self.market_share * ((investment_mMTC)*0.5)) + self.mMTC_infrastructure_cost
self.total_cost = self.URLLC_total_cost + self.mMTC_total_cost
# self.price_per_unit_service = 20
self.total_returns=0
self.total_URLLC_services_ISP=0
self.total_mMTC_services_ISP=0
self.total_returns_list=[]
def update_returns(self):
self.total_returns=0
self.total_URLLC_services_ISP=0
self.total_mMTC_services_ISP=0
for i in self.system_integrator:
self.total_returns+=(i.total_revenue*0.70)
self.total_mMTC_services_ISP+=i.total_mMTC_services
self.total_URLLC_services_ISP+=i.total_URLLC_services
self.total_returns_list.append([self.total_returns, self.total_mMTC_services_ISP, self.total_URLLC_services_ISP])
# print(self.total_returns_list)
def create_isp_df(self):
self.total_returns_df=pd.DataFrame(data=self.total_returns_list, columns=["total_returns", "total_mMTC_services_ISP", "total_URLLC_services_ISP"])
self.total_returns_df.to_csv("isp_total_returns"+str(self.unique_id)+".csv")
investment_URLLC=80
investment_mMTC=20
class City():
def __init__(self, isp_list):
# initialize the parent class with required parameters
# super().__init__()
self.city_investment_URLLC=investment_URLLC*0.5
self.city_investment_mMTC=investment_mMTC*0.5
self.total_investment=self.city_investment_mMTC+self.city_investment_URLLC
self.initial_investment = self.total_investment*0.5
# self.step_count=0
# self.area=area
# self.population=population
# self.pop_density=self.population/self.area
self.ISPs=isp_list
self.total_returns=0
self.total_URLLC_services_City=0
self.total_mMTC_services_City=0
self.total_returns_list=[]
def update_returns(self):
self.total_returns=0
self.total_URLLC_services_City=0
self.total_mMTC_services_City=0
for i in self.ISPs:
self.total_returns+=(i.total_returns*0.12)
self.total_mMTC_services_City+=i.total_mMTC_services_ISP
self.total_URLLC_services_City+=i.total_URLLC_services_ISP
self.total_returns_list.append([self.total_returns,self.total_mMTC_services_City, self.total_URLLC_services_City])
def create_city_df(self):
self.total_returns_df=pd.DataFrame(data=self.total_returns_list, columns=["total_returns", "total_mMTC_services_City", "total_URLLC_services_City"])
self.total_returns_df.to_csv("city_total_returns.csv")
# class TSP():
# def __init__(self, unique_id, model, price=50):
# # initialize the parent class with required parameters
# super().__init__(unique_id, model)
# # for tracking total value of loans outstanding
# """percent of deposits the bank must keep in reserves - this is a
# UserSettableParameter in server.py"""
# self.price = price
# class Regulator():
# def __init__(self, unique_id, model, initial_investment, people, price=50, budget=1000):
# # initialize the parent class with required parameters
# super().__init__(unique_id, model)
# # for tracking total value of loans outstanding
# # self.bank_loans = 0
# self.data_collected = 0
# self.budget=budget
# self.initial_investment = 200
# self.price=price
# self.step_count=0
# self.cost_eff=0
# self.people=people
# self.number_of_locations = self.initial_investment/self.price
# self.location_list = []
# self.cost_eff_list =[]
# """percent of deposits the bank must keep in reserves - this is a
# UserSettableParameter in server.py"""
# self.total_awareness = 0
# # for tracking total value of deposits
# def cost_effectiveness(self, price) :
# citizen_awareness=0
# for i in self.people:
# citizen_awareness+=i.awareness
# self.cost_eff = price*self.number_of_locations/(citizen_awareness)
# self.cost_eff_list.append(self.cost_eff)
# self.cost_eff_df=pd.DataFrame(self.cost_eff_list)
# self.cost_eff_df.to_csv("costeff.csv")
# def number_of_locations_later(self, cost_eff_threshold):
# if self.cost_eff > cost_eff_threshold:
# if (self.number_of_locations+1)*self.price< self.budget:
# self.number_of_locations +=1
# self.location_list.append(self.number_of_locations)
# self.num_loc_df=pd.DataFrame(self.location_list)
# self.num_loc_df.to_csv("num_loc.csv")
# # self.num_loc_df=self.num_loc_df.append(location_list, ignore_index=True)
# def data_collected_later(self):
# self.data_collected=self.number_of_locations*self.step_count
# # subclass of RandomWalker, which is subclass to Mesa
# def step(self):
# self.step_count+=1
# for i in self.people:
# i.step1(self.data_collected, self.step_count)
# self.cost_effectiveness( self.price)
# self.number_of_locations_later( 1)
# self.data_collected_later()
# # print("self.awareness")
# class Person(RandomWalker):
# def __init__(self, unique_id, pos, model, moore, tsp, awareness_threshold):
# # init parent class with required parameters
# super().__init__(unique_id, pos, model, moore=moore)
# # the amount of awareness
# self.awareness_threshold=awareness_threshold
# self.awareness = self.random.randint(1, self.awareness_threshold + 1)
# self.awareness_copy = self.awareness
# self.tsp=tsp
# # self.regulator=regulator
# """start everyone off with a random amount in their wallet from 1 to a
# user settable rich threshold amount"""
# def step(self):
# self.random_move()
# # step is called for each agent in model.BankReservesModel.schedule.step()
# def step1(self,data_collected, step_count):
# # move to a cell in my Moore neighborhood
# # self.random_move()
# self.awareness_later(data_collected, step_count)
# # print(self.awareness)
# # trade
# def awareness_later(self,data_collected,step_count):
# if self.awareness<1.35*self.awareness_copy:
# self.awareness=math.sqrt(4*(0.01)*data_collected) + self.awareness_copy
# else:
# self.awareness=-2*math.sqrt(0.002*data_collected) + self.awareness_copy
# # y=-2\left(0.005x\right)^{\frac{1}{2}}+2
# def pollution_later(self,data_collected,step_count):
# self.pollution=math.sqrt(4*(0.02)*data_collected + self.awareness_copy) |
import discord
from discord.ext import commands
class UnPoof(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def unpoof(self, ctx, *, Name):
bans = await ctx.guild.bans()
for i in bans:
if i.user.name.lower() == Name.lower():
await ctx.guild.unban(i.user, reason = None)
await ctx.send(f'👌 {i.user.name} unbanned!')
def setup(client):
client.add_cog(UnPoof(client)) |
#!/usr/bin/env python
import os
import yaml
import click
@click.command()
@click.option(
"--jjbfile",
type=click.File('r'),
required=True,
help="Extract dsl from this JJB job")
@click.option(
"--outdir",
type=click.Path(exists=True),
required=True,
help="Dir to write extracted groovy into"
)
def extract_dsl(jjbfile, outdir):
jjb = yaml.safe_load(jjbfile)
for item in jjb:
key = item.keys()[0]
if key in ("job", "job-template") and 'dsl' in item[key]:
dsl = item[key]['dsl'].replace('{{', '{').replace('}}', '}')
outfile = "{outdir}/{base}-{item}.groovy".format(
outdir=outdir,
base=os.path.basename(jjbfile.name).split('.')[0],
item=item[key]['name'])
with open(outfile, "w") as outf:
outf.write(dsl)
if __name__ == "__main__":
extract_dsl()
|
# r = requests.get("http://www.pythonhow.com/real-estate/rock-springs-wy/LCWYROCKSPRINGS/")
# r = requests.get("http://www.pyclass.com/real-estate/rock-springs-wy/LCWYROCKSPRINGS/",
# headers={'User-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'})
import requests
from bs4 import BeautifulSoup
r = requests.get("http://www.pyclass.com/real-estate/rock-springs-wy/LCWYROCKSPRINGS/", headers={'User-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'})
c= r.content
soup= BeautifulSoup(c, "html.parser")
alll= soup.find_all("div", {"class":"propertyRow"})
print(len(alll)) |
import json
import os
import logging
import logging
class JsonConfigReader(object):
def __init__(self, config_path, options=None):
"""Class constructor.
Args:
config_path (str): configuration folder wihtout trailling '\' or file absolute path with *.json extension.
options (?dict): Options that contain information about environment and config file patther.
options['env'] (dict): Environment description. Example: '{'name': 'qa', 'branch': 'hotfix'}' or {'name': stg} etc.
options['file_pattern'] (str): File pattern option. Should match Environment keys. Example: '{env[name]}_{env[branch]}_config' or '{evn[name]}Config' etc.
Returns:
Void
"""
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger(__name__)
self.config_path = config_path
self.options = options
self.config_file_path = self.get_config_file_path()
config = self.read_json_file(self.config_file_path)
self.config = self._parse_config(config)
def read_json_file(self, file_path):
"""Class method reads the data from json file and returs dictionary.
Returns:
dict: Config file content.
Raises:
IOError: If methods fails reading data from file exaption will be risen
"""
config = None
try:
with open(file_path, 'r') as f:
config = json.load(f)
except IOError as e:
self.logger.error('Cannot read config file "{}"'.format(file_path))
self.logger.error(e.strerror)
return config
def get_value_by_property_address(self, property_address, dictionary):
"""Class method that returns value from the dict based on the address string.
Args:
property_address (str): Address of the property in the config. Example: 'dataBase.connection'
dictionary (dict): Configuration
Returns:
Could be bool, dict, srt, number, list etc.
"""
list = property_address.split('.')
value = dictionary
for address in list:
if type(value) is not dict:
return value
value = value[address]
return value
def get_config_file_path(self):
"""Class method that returns full config file path.
Returns:
srt: full config file path
"""
if self.options is None or type(self.options) is not dict:
return self.config_path
file_pattern = self.options['file_pattern']
file_name = file_pattern.format(env=self.options['env'])
file_name = file_name + '.json'
return os.path.join(self.config_path, file_name)
def is_default_value(self, string_value):
"""Class method that figures out whether string is custom default variable in config.
Args:
string_value (str): Specificly formatted string. Example: '<defaults.dataBase>'
Returns:
bool: True if it is default variable, False otherwise
"""
return type(string_value) is str and '<' in string_value and '>' in string_value
def get(self):
"""Class method that returns parsed config content in dict format.
Returns:
dict: Configuration file content
"""
return self.config
def _parse_config(self, config, original_config=None):
"""Class method parse config file, replace default variables with appropriate values and return condig.
Args:
config (dict): Config file content.
original_config (dict, optional): Optional parameter that represents original config file content
Returns:
dict: Configuration file content
"""
keys_list = config.keys()
for key in keys_list:
value = config[key]
if self.is_default_value(value):
original_config = config if None else original_config
address = self._get_default_address(value)
config[key] = self.get_value_by_property_address(address, original_config)
continue
if type(value) is dict:
self._parse_config(value, config)
continue
if type(value) is list:
for element in value:
self._parse_config(element, config)
return config
def _get_default_address(self, address):
"""Class method format default address variable string into regular property address string.
Args:
address (srt): default address variable string.
Returns:
srt: Regular property address string.
Example:
>> _get_default_address('<defaults.dataBase>')
>> 'defaults.dataBase'
"""
return address.replace('>', '').replace('<', '')
|
import unittest
from katas.beta.simple_beads_count import count_red_beads
class CountRedBeadsTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(count_red_beads(1), 0)
def test_equal_2(self):
self.assertEqual(count_red_beads(3), 4)
def test_equal_3(self):
self.assertEqual(count_red_beads(5), 8)
|
import numpy as np
from solver import tsv2arr
from math import floor
from settings import *
def create_sliding_window(data: np.ndarray, window_size: int):
"""
Converts data to sliding window data
:param data: Timeseries data to convert to sliding window data. Columns of data are x1,..,xn,t.
:param window_size: Length of sliding window.
:return: Sliding window data. Each row contains all the data in one sliding window.
"""
# Number of sliding windows to create
N = data.shape[0] - window_size + 1
# Index array
I = np.repeat(np.arange(window_size).reshape((1, window_size)), N, axis=0) + \
np.arange(N).reshape((N, 1))
return data[..., :][I]
def approximate_derivatives(data: np.ndarray):
"""
Aapproximate derivative using forward difference.
:param data: Timeseries data with timestamps in the last column.
:return: Numpy array with derivatives.
"""
derivative = (data[1:, :-1] - data[:-1, :-1]) / \
(data[1:, -1] - data[:-1, -1]).reshape((data.shape[0] - 1, 1))
return derivative
def create_training_data(data: np.ndarray, window_size: int, step: int):
"""
Create training data of sliding windows with target values as derivatives.
:param data: Timeseries data with timestamps in the last column.
:param window_size: Length of sliding window.
:param step: For sparsing the data. Step 1 uses all the data, step 2 uses every other datapoint and so on
:return: Tuple with train and target data
"""
sparse_data = data[::step, ...]
train_data = create_sliding_window(sparse_data[:-1, ...], window_size)
target_data = approximate_derivatives(sparse_data[window_size - 1:, ...])
return train_data, target_data
def get_data(file, n, sparse, window_size):
"""
Returns data from the file as a numpy array.
:param file: The file to read data from
:param n: Number of rows to include (max) from file
:param sparse: If sparse=x, only use every x-th row
:param window_size: The size of the sliding window for data
:return: The data from file
"""
# Generate some training- and test-data
raw_data = tsv2arr(file)
in_train=[]
target_train = []
for i in range(0,int(raw_data[-1,-1])):
run = np.array(list(filter(lambda x:x[-1]==i,raw_data)))
run = create_training_data(run[:,:-1], window_size,sparse)
in_train.append(run[0])
target_train.append(run[1])
in_train = np.array(in_train)
in_train = np.reshape(in_train,(-1,window_size,DATA_NUM_VARIABLES))
target_train = np.array(target_train)
target_train = np.reshape(target_train,(-1,DATA_NUM_VARIABLES-1))
last_run = np.array(list(filter(lambda x:x[-1]==raw_data[-1,-1], raw_data)))
in_test,target_test = create_training_data(last_run[:,:-1],window_size,sparse)
return in_train, target_train, in_test, target_test
def split_data(x_data, y_data, split):
"""
Splits the training (x_data) and testing (y_data) data into four parts.
The first part is the x-data for training, second is x-data for prediction (backprop to nn),
third part is y-data for training and last part is y-data for prediction
:param x_data: General input data
:param y_data: General output data
:param split: The split percentage (how large should the prediction data be in percentage)
:return: x-data for training, x-data for prediction, y-data for training, y-data for prediction
"""
splitting_index = floor(len(x_data) * (1 - split))
x_train = x_data[:splitting_index]
x_prediction = x_data[splitting_index:]
y_train = y_data[:splitting_index]
y_prediction = y_data[splitting_index:]
return x_train, x_prediction, y_train, y_prediction
|
# decision.py contains the class structure and state behaviour of the mars
# rover in the Unity simulator.
import numpy as np
from rover_commands_2 import Driving, Steering
from rover_commands_3 import Navigate
from navigable_pixels import NavPixels, NavPixelLimits, NavPixelsInSegment
class Decision():
def decision_step(Rover):
a = Navigate(Rover)
Rover = a.drive_and_steer_responsively()
b = Steering(Rover)
b.steer_mean_biased_left()
# c = NavPixelsInSegment(Rover)
# print("FRONT PIXELS", c.front)
# print("LEFT PIXELS", c.left)
# print("RIGHT PIXELS", c.right)
return Rover
|
from apel.common import valid_from, valid_until, parse_timestamp, \
iso2seconds
from unittest import TestCase
import datetime
class DateTimeUtilsTest(TestCase):
'''
Test case for date/time functions from apel.common
'''
def test_valid_from(self):
now = datetime.datetime.now()
result = now-datetime.timedelta(days=1)
self.assertEqual(result, valid_from(now))
def test_valid_until(self):
now = datetime.datetime.now()
result = now+datetime.timedelta(days=28)
self.assertEqual(result, valid_until(now))
def test_parse_timestamp(self):
'''
Checks that the different time formats that we might have to parse
are handled correctly. Note that we convert into datetime objects
with no timezone information for internal use.
'''
valid_dates = ['2010-01-01 10:01:02','2010-01-01T10:01:02Z','2010-01-01T11:01:02+01:00']
dts = [ parse_timestamp(date) for date in valid_dates ]
for dt in dts:
self.assertEqual(dt.year, 2010)
self.assertEqual(dt.month, 1)
self.assertEqual(dt.day, 1)
self.assertEqual(dt.hour, 10)
self.assertEqual(dt.minute, 1)
self.assertEqual(dt.second, 2)
self.assertEqual(dt.tzinfo, None)
def test_iso2seconds(self):
txt1, txt2, txt3 = 'P1Y', 'P1M', 'P1D'
self.assertTrue(iso2seconds(txt1), 3600*24*365)
self.assertTrue(iso2seconds(txt2), 3600*24*30)
self.assertTrue(iso2seconds(txt3), 3600*24) |
from urllib.parse import urljoin
import requests
from ex02_bearer import bearer_token_for_namespace
import stacksmith
def get_apps(namespace, token):
endpoint = urljoin(stacksmith.url, 'ns/{ns}/apps'.format(ns=namespace))
response = requests.get(
endpoint, headers={'authorization': token})
assert response.status_code == 200, (
'Failed to applications for namepsace "{ns}": {error}'.format(
ns=namespace, error=response.json()['error']))
return response.json()
def main():
"""
Use the Stacksmith API to fetch applications for a namespace.
"""
namespace = stacksmith.namespace
print('Fetching applications for namespace "{ns}"'.format(ns=namespace))
bearer_token = bearer_token_for_namespace(namespace)
apps = get_apps(namespace, bearer_token)
print('Found {n} applications for namespace "{ns}"'
.format(n=len(apps), ns=namespace))
for app in apps:
print('{name}: {link}'
.format(name=app['name'], link=app['links']['self']))
if __name__ == "__main__":
main()
|
import os
from Helper import Utils
'''Global path variables are set in here.'''
'''Be aware that a change of this file might affect several other files.'''
# Data directories
global_path_to_original_train_data = '../../Resources/TrainingDataSets/train1'
global_path_to_original_test_data = '../../Resources/TrainingDataSets/test1'
global_path_to_train_data = '../../Resources/TrainingDataSets/train2'
global_path_to_test_data = '../../Resources/TrainingDataSets/test2'
global_path_to_original_random_images = '../../Resources/TrainingDataSets/randomimage1'
global_path_to_random_images = '../../Resources/TrainingDataSets/randomimage2'
# Result directories
global_path_to_other_results = '../../Resources/OtherResults'
global_path_to_cifar10data = '../../Resources/TensorFlowFiles/cifar10_data'
global_path_to_cifar10eval = '../../Resources/TensorFlowFiles/cifar10_eval'
global_path_to_cifar10train = '../../Resources/TensorFlowFiles/cifar10_train/cifar10_train'
global_path_to_cifar10train100k = '../../Resources/TensorFlowFiles/cifar10_train/Standard100K/'
global_path_to_cifar10eval_single_directory = '../../Resources/TensorFlowFiles/cifar10eval_single_directory/'
global_path_to_cifar10batches = '../../Resources/TensorFlowFiles/cifar10_data/cifar-10-batches-bin/'
global_path_to_cifar10predictSingleImageBatch = '../../Resources/TensorFlowFiles/cifar10_data/cifar10_predict_single_image_batch/'
# TODO(Sören Schleibaum): Has to be removed.
global_output_test = '../../Resources/TensorFlowFiles/cifar10_data/test'
# Warn if directories are not there
# TODO
# Create missing directories
Utils.create_directory(global_path_to_other_results)
Utils.create_directory(global_path_to_cifar10batches)
Utils.create_directory(global_path_to_train_data)
Utils.create_directory(global_path_to_test_data)
# Uncomment to check if the directories exist
# print(os.path.exists(global_path_to_original_train_data))
# print(os.path.exists(global_path_to_original_test_data))
# print(os.path.exists(global_path_to_train_data))
# print(os.path.exists(global_path_to_test_data))
# print(os.path.exists(global_path_to_cifar10data))
# print(os.path.exists(global_path_to_cifar10eval))
# print(os.path.exists(global_path_to_cifar10train))
|
import os
import sys
import cv2
import numpy as np
input_file = 'C:/Users/dell/Desktop/niit/niit/3rd semester/letter.data'
img_resize_factor = 12
start = 6
end = -1
height, width = 16, 8
with open(input_file, 'r') as f:
for line in f.readlines():
data = np.array([255 * float(x) for x in line.split('\t')[start:end]])
img = np.reshape(data, (height, width))
img_scaled = cv2.resize(img, None, fx=img_resize_factor, fy=img_resize_factor)
cv2.imshow('Image', img_scaled)
c = cv2.waitKey()
if c == 27:
break
import numpy as np
import neurolab as nl
input_file = 'C:/Users/dell/Desktop/niit/niit/3rd semester/letter.data'
num_datapoints = 50
orig_labels = 'omandig'
num_orig_labels = len(orig_labels)
num_train = int(0.7 * num_datapoints)
num_test = num_datapoints - num_train
start = 6
end = -1
data = []
labels = []
with open(input_file, 'r') as f:
for line in f.readlines():
list_vals = line.split('\t')
if list_vals[1] not in orig_labels:
continue
label = np.zeros((num_orig_labels, 1))
label[orig_labels.index(list_vals[1])] = 1
labels.append(label)
cur_char = np.array([float(x) for x in list_vals[start:end]])
data.append(cur_char)
if len(data) >= num_datapoints:
break
data = np.asfarray(data)
labels = np.array(labels).reshape(num_datapoints, num_orig_labels)
num_dims = len(data[0])
nn = nl.net.newff([[0, 1] for _ in range(len(data[0]))],
[128, 16, num_orig_labels])
nn.trainf = nl.train.train_gd
error_progress = nn.train(data[:num_train,:], labels[:num_train,:],
epochs=10000, show=100, goal=0.01)
print('\nTesting on unknown data:')
predicted_test = nn.sim(data[num_train:, :])
for i in range(num_test):
print('\nOriginal:', orig_labels[np.argmax(labels[i])])
print('Predicted:', orig_labels[np.argmax(predicted_test[i])])
|
#クイックソート(ピポットは先頭)
def sort(A):
if len(A) < 2:
return A
p = A[0]
X,Y = divide(p,A[1:])
return sort(X) + [p] + sort(Y)
def divide(p,A):
if len(A) < 1:
return ([],[])
X,Y = divide(p,A[1:])
a = A[0]
if a < p:
return ([a] + X,Y)
else:
return (X,[a] + Y) |
import requests
import re
import time
from bs4 import BeautifulSoup
import sys
from operator import itemgetter
username_=sys.argv[1]
password_=sys.argv[2]
timetorun=int(sys.argv[3])
period = int(sys.argv[4])
def get_router_data(param):
login_data = {
'username': username_,
'password': password_,
'submit.htm?login.htm': 'Send'
}
with requests.Session() as s:
try:
url="http://192.168.1.1/login.cgi"
r=s.post(url,data=login_data)
wifi_client=s.get("http://192.168.1.1/wlstatbl.htm")
dhcp_table=s.get("http://192.168.1.1/dhcptbl.htm")
stats_table= s.get("http://192.168.1.1/stats.htm")
except requests.exceptions.RequestException as e: # This is the correct syntax
print(e)
if param == 1:
f = open("wifi_tab.html","w+")
f.write(wifi_client.text)
f.close()
if param == 2:
f = open("wifi_tab2.html","w+")
f.write(wifi_client.text)
f.close()
f = open("dhcp_tab.html","w+")
f.write(dhcp_table.text)
f.close()
f = open("stats_table.html","w+")
f.write(stats_table.text)
f.close()
if param == 1:
soup = BeautifulSoup(open("wifi_tab.html"), features="html5lib")
if param == 2:
soup = BeautifulSoup(open("wifi_tab2.html"), features="html5lib")
my_table = soup.findAll("tr",{'bgcolor':"#b7b7b7"})
lst = []
for each in my_table:
lst.append(each.findAll("font"))
final_list = []
for each in lst :
tmp=[]
for every in each:
tmp.append(every.text)
final_list.append(tmp)
return final_list
def read_dhcp():
soup = BeautifulSoup(open("dhcp_tab.html"),features="html5lib")
lst_of_devs = []
my_table_2 = soup.findAll("td",{'bgcolor':"#b7b7b7"})
for each in my_table_2:
lst_of_devs.append(each.text)
list_of_things= []
for i,each in enumerate(lst_of_devs):
if i%5 == 0 or i%5 == 1 or i%5 == 2:
list_of_things.append(each)
fin_list = []
tmp = []
for i in range(0,len(list_of_things)):
if i>0 and i%3 == 0:
fin_list.append(tmp)
tmp = []
tmp.append(list_of_things[i])
else:
tmp.append(list_of_things[i])
return fin_list
def router_speed():
f = open("stats_table.html")
text_inside = f.read()
ls = re.findall("[<][t][d][>].*[\s](.*)",text_inside)
return ls
def processlist(prev_lst,current_lst):
i=0
while i < len(prev_lst):
j=0
while j < len(prev_lst[0]):
if j == 1 or j==2 :
prev_lst[i][j] = (-int(prev_lst[i][j]) + int(current_lst[i][j]))*1.2
j+=1
i+=1
return prev_lst
if timetorun<0:
while True :
prev_lst = get_router_data(1)
time.sleep(period)
current_lst =get_router_data(2)
processed_list = processlist(prev_lst,current_lst)
dhcp_list = read_dhcp()
for i,each in enumerate(processed_list):
mac_key = processed_list[i][0]
for j,every in enumerate(dhcp_list):
if dhcp_list[j][2]==mac_key and len(dhcp_list[j][0])>2 :
processed_list[i][0]=dhcp_list[j][0]
break
speeds = router_speed()
processed_list = sorted(processed_list, key = itemgetter(1),reverse=True)
print("Upstream "+speeds[0]+" Downstream "+speeds[1])
for each in processed_list:
print(str(each[0])+" Sending "+ str(each[1])+" Receiving " + str(each[2]))
print("\n\n")
else:
for i in range(0,timetorun):
prev_lst = get_router_data(1)
time.sleep(period)
current_lst =get_router_data(2)
processed_list = processlist(prev_lst,current_lst)
dhcp_list = read_dhcp()
for i,each in enumerate(processed_list):
mac_key = processed_list[i][0]
for j,every in enumerate(dhcp_list):
if dhcp_list[j][2]==mac_key and len(dhcp_list[j][0])>2 :
processed_list[i][0]=dhcp_list[j][0]
break
speeds = router_speed()
processed_list = sorted(processed_list, key = itemgetter(1),reverse=True)
print("Upstream "+speeds[0]+" Downstream "+speeds[1])
for each in processed_list:
print(str(each[0])+" Sending "+ str(each[1])+" Receiving " + str(each[2]))
print("\n\n")
print("\n\n") |
'''******************************************
ATHON
Programa de Introdução a Linguagem Python
Disiplina: Lógica de Programação
Professor: Francisco Tesifom Munhoz
Data: Primeiro Semestre 2021
*********************************************
Atividade: Lista 2 (Ex 10)
Autor: Yuri Pellini
Data: 19 de Maio de 2021
Comentários:
******************************************'''
#Entrada
Sal=float(input("Coloque seu salário: R$"))
#Processamento
G1=float(Sal*0.08)
G2=float(Sal*0.09)
G3=float(Sal*0.11)
# Saida
if(Sal<=1174.86):
print("Você paga uma taxa INSS de 8% e o valor que deve-se pagar é de R$",G1)
else:
if(Sal<=1958.10):
print("Você paga uma taxa INSS de 9% e o valor que deve-se pagar é de R$",G2)
else:
if(Sal<=3916.20):
print("Você paga uma taxa INSS de 11% e o valor que deve-se pagar é de R$",G3)
else:
print("Você paga uma taxa INSS fixa no valor de R$430.78") |
import pandas as pd
import pickle
import re
from pymystem3 import Mystem
def predict_department(text: str) -> int:
"""
Predicts target department id for a task
Input: text.
A string containing task description
Output: department_id.
Integer value.
One of [ 9, 8, 5, 18, 17, 14, 19, 15, 13, 21, 11, 10]
"""
preprocessed_text = preprocess(text)
model = pickle.load(open('models/dep_classification.sav', 'rb'))
text_for_prediction = [preprocessed_text]
predicted_department = model.predict(text_for_prediction)[0]
return predicted_department
def preprocess(text):
preprocessed_text = words_only(text)
preprocessed_text = remove_stopwords(preprocessed_text)
preprocessed_text = lemmatize(preprocessed_text)
return preprocessed_text
def words_only(text):
regex = re.compile("[А-Яа-я:=!\)\()\_\%/|]+")
try:
return " ".join(regex.findall(text))
except:
return ""
def remove_stopwords(text):
with open('data/stopwords.txt') as filename:
stopwords = filename.read().split('\n')
try:
return " ".join([word for word in text.split() if word not in stopwords])
except:
return " "
def lemmatize(text):
m = Mystem()
try:
return "".join(m.lemmatize(text)).strip()
except:
return " " |
#!/usr/bin/env python
# Copyright (c) 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies actions with multiple outputs will correctly rebuild.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
test = TestGyp.TestGyp()
test.run_gyp('multiple-outputs.gyp', chdir='src')
chdir = 'relocate/src'
test.relocate('src', chdir)
def build_and_check():
# Build + check that both outputs exist.
test.build('multiple-outputs.gyp', chdir=chdir)
test.built_file_must_exist('out1.txt', chdir=chdir)
test.built_file_must_exist('out2.txt', chdir=chdir)
# Plain build.
build_and_check()
# Remove either + rebuild. Both should exist (again).
os.remove(test.built_file_path('out1.txt', chdir=chdir))
build_and_check();
# Remove the other + rebuild. Both should exist (again).
os.remove(test.built_file_path('out2.txt', chdir=chdir))
build_and_check();
test.pass_test()
|
# More files
from sys import argv
from os.path import exists
script,from_file,to_file=argv
print "Copying files from %s to %s." %(from_file,to_file)
in_file=open(from_file)
in_data=in_file.read()
print "The input file is %d bytes long " %len(in_data)
print "Does the output file exists? %r " %exists(to_file)
print "Ready,hit ENTER to continue,CTRL-C to abort."
raw_input()
out_file=open(to_file,'w')
out_file.write(in_data)
print "All Done !!"
out_file.close()
in_file.close()
|
import unittest
from katas.kyu_7.a_rule_of_divisibility_by_13 import thirt
class ThirtTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(thirt(1234567), 87)
def test_equals_2(self):
self.assertEqual(thirt(321), 48)
def test_equals_3(self):
self.assertEqual(thirt(8529), 79)
def test_equals_4(self):
self.assertEqual(thirt(85299258), 31)
def test_equals_5(self):
self.assertEqual(thirt(5634), 57)
def test_equals_6(self):
self.assertEqual(thirt(1111111111), 71)
def test_equals_7(self):
self.assertEqual(thirt(987654321), 30)
|
'''
66. Plus One
Given a non-negative integer represented as a non-empty array of digits, plus
one to the integer.
You may assume the integer do not contain any leading zero, except the number 0
itself.
The digits are stored such that the most significant digit is at the head of the
list.
'''
class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
if not digits:
return [1]
len1 = len(digits)
i1 = len1 - 1
carry = 1
while carry == 1:
if i1 >= 0:
value = digits[i1] + carry
if value >= 10:
carry = 1
digits[i1] = value - 10
i1 = i1 - 1
else:
digits[i1] = value
return digits
else:
digits.insert(0, 1)
break
return digits
if __name__ == '__main__':
digits = [9, 9, 9]
cs = Solution()
print cs.plusOne(digits)
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose, TakeFirst, Join
from w3lib.html import remove_tags
import re
# 新闻资讯
class NewsItem(scrapy.Item):
url = scrapy.Field() # 链接
author = scrapy.Field() # 作者
editor = scrapy.Field() #编辑
content = scrapy.Field() # 正文
title = scrapy.Field() # 标题
thumbnail = scrapy.Field() # 缩略图
pdate = scrapy.Field() # 发布日期时间
tag = scrapy.Field() # 标签
channel = scrapy.Field() # 栏目
description = scrapy.Field() # 摘要
origin = scrapy.Field() # 新闻源
website = scrapy.Field() # 网站
category = scrapy.Field() #类别, 专题/新闻
has_pic = scrapy.Field() #是否有图
has_video = scrapy.Field() #是否有视频
reserved_1 = scrapy.Field() #保留字段1
reserved_2 = scrapy.Field() #保留字段2
reserved_3 = scrapy.Field() #保留字段3
reserved_4 = scrapy.Field() #保留字段4
reserved_5 = scrapy.Field() #保留字段5
reserved_6 = scrapy.Field() #保留字段6
# 图片
class ImagesItem(scrapy.Item):
image_urls = scrapy.Field()
title = scrapy.Field()
images = scrapy.Field()
# 煎蛋网段子
class JandanDuanItem(scrapy.Item):
author = scrapy.Field()
url = scrapy.Field()
content = scrapy.Field()
oo = scrapy.Field()
xx = scrapy.Field()
pdate = scrapy.Field()
uuid = scrapy.Field()
# 煎蛋网自建评论
class JandanCommentItem(scrapy.Item):
pass
# 代理
class ProxyItem(scrapy.Item):
ip = scrapy.Field()
port = scrapy.Field()
http = scrapy.Field()
method = scrapy.Field()
location = scrapy.Field()
speed = scrapy.Field()
url = scrapy.Field()
anonymous = scrapy.Field()
verify_time = scrapy.Field()
# 音乐
class MusicItem(scrapy.Item):
music_name = scrapy.Field()
music_alias = scrapy.Field()
music_singer = scrapy.Field()
music_time = scrapy.Field()
music_rating = scrapy.Field()
music_votes = scrapy.Field()
music_tags = scrapy.Field()
music_url = scrapy.Field()
# 乐评
class MusicReviewItem(scrapy.Item):
review_title = scrapy.Field()
review_content = scrapy.Field()
review_author = scrapy.Field()
review_music = scrapy.Field()
review_time = scrapy.Field()
review_url = scrapy.Field()
# 视频
class VideoItem(scrapy.Item):
video_name = scrapy.Field()
video_alias = scrapy.Field()
video_actor = scrapy.Field()
video_year = scrapy.Field()
video_time = scrapy.Field()
video_rating = scrapy.Field()
video_votes = scrapy.Field()
video_tags = scrapy.Field()
video_url = scrapy.Field()
video_director = scrapy.Field()
video_type = scrapy.Field()
video_bigtype = scrapy.Field()
video_area = scrapy.Field()
video_language = scrapy.Field()
video_length = scrapy.Field()
video_writer = scrapy.Field()
video_desc = scrapy.Field()
video_episodes = scrapy.Field()
# 影评
class VideoReviewItem(scrapy.Item):
review_title = scrapy.Field()
review_content = scrapy.Field()
review_author = scrapy.Field()
review_video = scrapy.Field()
review_time = scrapy.Field()
review_url = scrapy.Field()
def ends_filter(value):
#拉勾网清洗函数
if '查看地图' in value:
tp_list = value.split('\n')
v_list = [v.strip() for v in tp_list if '查看地图' not in v]
return ''.join(v_list).strip()
elif '发布于拉勾网' in value:
return value.replace('发布于拉勾网', '').strip()
elif '/' in value:
return value.replace('/', '').strip()
else:
return value.strip()
class LagouItemLoader(ItemLoader):
#default_input_processer =
#default_item_class = scrapy.Item
default_output_processor = TakeFirst()
title_in = MapCompose(remove_tags)
title_out = Join()
class LagouItem(scrapy.Item):
title = scrapy.Field(
input_processor=MapCompose(remove_tags),
output_processor=Join(),
)
url = scrapy.Field()
salary = scrapy.Field(input_processor=MapCompose(ends_filter))
job_city = scrapy.Field(input_processor=MapCompose(ends_filter))
work_years = scrapy.Field(input_processor=MapCompose(ends_filter))
degree_need = scrapy.Field(input_processor=MapCompose(ends_filter))
job_type = scrapy.Field()
tags = scrapy.Field(output_processor=Join(','))
publish_time = scrapy.Field(input_processor=MapCompose(ends_filter))
job_advantage = scrapy.Field(input_processor=Join('\n'))
job_desc = scrapy.Field(input_processor=Join('\n'))
work_addr = scrapy.Field(input_processor=MapCompose(remove_tags, ends_filter))
company_name = scrapy.Field()
company_url = scrapy.Field()
crawl_time = scrapy.Field()
def title_filter(value):
return value.split('_')[0]
def content_filter(value):
return re.sub(r'\t|\r|\n', '', value)
class NewsLoader(ItemLoader):
default_output_processor = TakeFirst()
class DemoItem(scrapy.Item):
url = scrapy.Field()
title = scrapy.Field(
input_processor=MapCompose(remove_tags, title_filter),
)
content = scrapy.Field(
input_processor=MapCompose(remove_tags, content_filter),
output_processor=Join(),
)
class QiushiItem(scrapy.Item):
pass |
import torch
import numpy as np
import matplotlib.pyplot as pp
import copy
import pickle
import gzip
import hashlib
import os.path
import sklearn.datasets
from sklearn.datasets import load_boston
def run_exp(meta_seed, nhid, n_train_seeds):
torch.manual_seed(meta_seed)
np.random.seed(meta_seed)
gamma = 0.9
##nhid = 32
act = torch.nn.LeakyReLU()
#act = torch.nn.Tanh()
model = torch.nn.Sequential(torch.nn.Linear(8, nhid), act,
torch.nn.Linear(nhid, nhid), act,
torch.nn.Linear(nhid, nhid), act,
torch.nn.Linear(nhid, 1))
def init_weights(m):
if isinstance(m, torch.nn.Linear):
k = np.sqrt(6 / (np.sum(m.weight.shape)))
m.weight.data.uniform_(-k, k)
m.bias.data.fill_(0)
model.apply(init_weights)
opt = torch.optim.Adam(model.parameters(), 1e-3)#, weight_decay=1e-5)
#X_, Y_ = load_boston(return_X_y=True)
X_, Y_ = sklearn.datasets.fetch_california_housing('~/data/',return_X_y=True)
ntest = int(0.05 * X_.shape[0])
ntrain = X_.shape[0] - ntest
shuffleidx = np.arange(len(X_))
np.random.seed(1283)
np.random.shuffle(shuffleidx)
np.random.seed(meta_seed)
X_ = X_[shuffleidx]
Y_ = Y_[shuffleidx]
X_train = X_[:ntrain]
Y_train = Y_[:ntrain]
X_test = X_[ntrain:]
Y_test = Y_[ntrain:]
xta, xtb = X_train.min(), X_train.max()
yta, ytb = Y_train.min(), Y_train.max()
X_train = (X_train-xta)/(xtb-xta)
X_test = (X_test-xta)/(xtb-xta)
Y_train = (Y_train-yta)/(ytb-yta)
Y_test = (Y_test-yta)/(ytb-yta)
train_x = torch.tensor(X_train[:n_train_seeds]).float()
train_y = torch.tensor(Y_train[:n_train_seeds]).float()
test_x = torch.tensor(X_test).float()
test_y = torch.tensor(Y_test).float()
train_perf = []
test_perf = []
all_dots = []
xent = torch.nn.CrossEntropyLoss()
for i in range(1000):
if not i % 5:
train_perf.append(np.mean((model(train_x).data.numpy() - train_y.data.numpy())**2))
test_perf.append(np.mean((model(test_x).data.numpy() - test_y.data.numpy())**2))
s = train_x[:200]
all_grads = []
for i in range(len(s)):
Qsa = model(s[i][None, :])
grads = torch.autograd.grad(Qsa, model.parameters())
fg = np.concatenate([i.reshape((-1,)).data.numpy() for i in grads])
all_grads.append(fg)
dots = []
cosd = []
for i in range(len(s)):
for j in range(i+1, len(s)):
dots.append(all_grads[i].dot(all_grads[j]))
all_dots.append(np.float32(dots).mean())
mbidx = np.random.randint(0, len(train_x), 32)
x = train_x[mbidx]
y = train_y[mbidx]
pred = model(x)
loss = ((pred-y)**2).mean()
loss.backward()
opt.step()
opt.zero_grad()
s = train_x[:200]
all_grads = []
for i in range(len(s)):
Qsa = model(s[i][None, :])
grads = torch.autograd.grad(Qsa.max(), model.parameters())
fg = np.concatenate([i.reshape((-1,)).data.numpy() for i in grads])
all_grads.append(fg)
dots = []
cosd = []
for i in range(len(s)):
for j in range(i+1, len(s)):
dots.append(all_grads[i].dot(all_grads[j]))
cosd.append(all_grads[i].dot(all_grads[j]) / (np.sqrt((all_grads[i]**2).sum()) *
np.sqrt((all_grads[j]**2).sum())))
dots = np.float32(dots)
print(train_perf[-5:], test_perf[-5:], all_dots[-5:])
return {'dots': dots,
'cosd': cosd,
'all_dots': all_dots,
'train_perf': train_perf,
'test_perf': test_perf,
}
def main():
for nhid in [16,32,64,128]:#
for n_train_seeds in [20, 100, 500, 1000, 5000, 10000]:#[4,8,16,32,64,128]:
for meta_seed in [0,1,2,3]:
cfg = {'nhid': nhid,
'n_train_seeds': n_train_seeds,
'meta_seed': meta_seed,
'what':'calif-2'}
print(cfg)
h = hashlib.sha1(bytes(str(sorted(cfg.items())), 'utf8')).hexdigest()
path = f'results/{h}.pkl.gz'
if os.path.exists(path):
continue
open(path, 'w').write('touch')
results = run_exp(meta_seed, nhid, n_train_seeds)
with gzip.open(path, 'wb') as f:
pickle.dump((cfg, results), f)
if __name__ == '__main__':
main()
|
# utils/models.py
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from django.db import models
# import urlparse
from django.utils.six.moves.urllib.parse import urlparse, urlunparse
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now as timezone_now
from django.utils.safestring import mark_safe
# from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import fields
from django.conf import settings
from django.core.exceptions import FieldError
from django.template.defaultfilters import escape
class UrlMixin(models.Model):
"""
A replacement for get_absolute_url()
Models extending this mixin should have either get_url or get_url_path implemented.
"""
class Meta:
abstract = True
def get_url(self):
if hasattr(self.get_url_path, "dont_recurse"):
raise NotImplementedError
try:
path = self.get_url_path()
except NotImplementedError:
raise
website_url = getattr(
settings, "DEFAULT_WEBSITE_URL",
"http://127.0.0.1:8000"
)
return website_url + path
get_url.dont_recurse = True
def get_url_path(self):
if hasattr(self.get_url, "dont_recurse"):
raise NotImplementedError
try:
url = self.get_url()
except NotImplementedError:
raise
# bits = urlparse.urlparse(url)
# return urlparse.urlunparse(("", "") + bits[2:])
bits = urlparse(url)
return urlunparse(("", "") + bits[2:])
get_url_path.dont_recurse = True
def get_absolute_url(self):
return self.get_url_path()
class CreationModificationDateMixin(models.Model):
"""
Abstract base class with a creation and modification date and time
"""
created = models.DateTimeField(_("creation date and time"), editable=False, )
modified = models.DateTimeField(_("modification date and time"), null=True, editable=False, )
def save(self, *args, **kwargs):
if not self.pk:
self.created = timezone_now()
else:
# To ensure that we have a creation data always, we add this one
if not self.created:
self.created = timezone_now()
self.modified = timezone_now()
super(CreationModificationDateMixin, self).save(*args, **kwargs)
save.alters_data = True
class Meta:
abstract = True
class MetaTagsMixin(models.Model):
"""
Abstract base class for meta tags in the <head> section
"""
meta_keywords = models.CharField(_("Keywords"), max_length=255, blank=True, help_text=_("Separate keywords by comma."), )
meta_description = models.CharField(_("Description"), max_length=255, blank=True, )
meta_author = models.CharField(_("Author"), max_length=255, blank=True, )
meta_copyright = models.CharField(_("Copyright"), max_length=255, blank=True, )
class Meta:
abstract = True
def get_meta_keywords(self):
tag = ""
if self.meta_keywords:
tag = '<meta name="keywords" content="%s" />\n' % escape(self.meta_keywords)
return mark_safe(tag)
def get_meta_description(self):
tag = ""
if self.meta_description:
tag = '<meta name="description" content="%s" />\n' % escape(self.meta_description)
return mark_safe(tag)
def get_meta_author(self):
tag = ""
if self.meta_author:
tag = '<meta name="author" content="%s" />\n' % escape(self.meta_author)
return mark_safe(tag)
def get_meta_copyright(self):
tag = ""
if self.meta_copyright:
tag = '<meta name="copyright" content="%s" />\n' % escape(self.meta_copyright)
return mark_safe(tag)
def get_meta_tags(self):
return mark_safe("".join((
self.get_meta_keywords(),
self.get_meta_description(),
self.get_meta_author(),
self.get_meta_copyright(),
))
)
def object_relation_mixin_factory( prefix=None, prefix_verbose=None, add_related_name=False, limit_content_type_choices_to={},
limit_object_choices_to={}, is_required=False, ):
"""
returns a mixin class for generic foreign keys using "Content type - object Id" with dynamic field names.
This function is just a class generator Parameters:
prefix : a prefix, which is added in front of the fields
prefix_verbose : a verbose name of the prefix, used to generate a title for the field column of the content object in the Admin.
add_related_name : a boolean value indicating, that a related name for the generated content type foreign key should be added.
This value should be true, if you use more than one ObjectRelationMixin in your model.
The model fields are created like this:
<<prefix>>_content_type : Field name for the "content type"
<<prefix>>_object_id : Field name for the "object Id"
<<prefix>>_content_object : Field name for the "content object"
"""
p = ""
if prefix:
p = "%s_" % prefix
content_type_field = "%scontent_type" % p
object_id_field = "%sobject_id" % p
content_object_field = "%scontent_object" % p
class TheClass(models.Model):
class Meta:
abstract = True
if add_related_name:
if not prefix:
raise FieldError("if add_related_name is set to True, prefix must be given" )
related_name = prefix
else:
related_name = None
optional = not is_required
ct_verbose_name = (_("%s's type (model)") % prefix_verbose
if prefix_verbose else _("Related object's type (model)")
)
content_type = models.ForeignKey( ContentType, verbose_name=ct_verbose_name, related_name=prefix, blank=optional,
null=optional, help_text=_("Please select the type (model) for the relation, you want to build."),
limit_choices_to=limit_content_type_choices_to,
)
fk_verbose_name = (prefix_verbose or _("Related object"))
object_id = models.CharField( fk_verbose_name, blank=optional, null=False, max_length=255, default="", # for south migrations
help_text=_("Please enter the ID of the related object."),
)
object_id.limit_choices_to = limit_object_choices_to
# can be retrieved by
# MyModel._meta.get_field("object_id").limit_choices_to
content_object = fields.GenericForeignKey( ct_field=content_type_field, fk_field=object_id_field, )
TheClass.add_to_class(content_type_field, content_type)
TheClass.add_to_class(object_id_field, object_id)
TheClass.add_to_class(content_object_field, content_object)
return TheClass
|
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Notification
@receiver(post_save, sender=Notification)
def create_notification(sender, instance, created, **kwargs):
print("hey")
#
# def my_handler(sender, instance, created, **kwargs):
#
# notify.send(instance, verb='was saved')
#
# post_save.connect(my_handler, sender=Notification)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-06 20:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('polls', '0003_choice'),
]
operations = [
migrations.AlterField(
model_name='choice',
name='question',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='opcoes', to='polls.Question'),
),
]
|
from panda3d.core import GeomVertexFormat, Vec3, LTexCoord, NodePath, RenderState, GeomEnums, GeomVertexData, \
GeomVertexWriter, InternalName
from .Geometry import Geometry
from .PolygonView import PolygonView
class MeshVertex:
def __init__(self, pos, normal, texcoord):
self.pos = pos
self.normal = normal
self.texcoord = texcoord
# Dynamic mesh with an arbitrary number of polygons.
class Mesh:
def __init__(self):
self.views = []
self.vertexBuffer = GeomVertexData('mesh-vdata', GeomVertexFormat.getV3n3t2(), GeomEnums.UHDynamic)
self.np = NodePath("mesh")
self.vwriter = None
self.twriter = None
self.nwriter = None
def addView(self, primitiveType, drawMask, state = None):
self.views.append(PolygonView(self, primitiveType, drawMask, renderState=state))
def begin(self, numVerts):
self.vertexBuffer.uncleanSetNumRows(numVerts)
for view in self.views:
view.clear()
self.vwriter = GeomVertexWriter(self.vertexBuffer, InternalName.getVertex())
self.twriter = GeomVertexWriter(self.vertexBuffer, InternalName.getTexcoord())
self.nwriter = GeomVertexWriter(self.vertexBuffer, InternalName.getNormal())
def end(self):
self.vwriter = None
self.nwriter = None
self.twriter = None
def addFace(self, meshVertices, state = RenderState.makeEmpty()):
row = self.vwriter.getWriteRow()
for vert in meshVertices:
self.vwriter.setData3f(vert.pos)
self.nwriter.setData3f(vert.normal)
self.twriter.setData2f(vert.texcoord)
for view in self.views:
view.generateIndices()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 12 12:08:25 2019
@author: xabuka
"""
# best Identify langauge code
from sklearn.datasets import load_files
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, RidgeClassifier
from sklearn import metrics
from sklearn.naive_bayes import BernoulliNB, ComplementNB, MultinomialNB
from sklearn.model_selection import train_test_split
#data_set = load_files('../corpora/Balanced_Shami/train/dialects', encoding = 'utf-8',decode_error='ignore')
#data_test = load_files('../corpora/Balanced_Shami/test', encoding = 'utf-8',decode_error='ignore')
#X_train = data_set.data
#y_train = data_set.target
#X_test = data_test.data
#y_test = data_test.target
data_set = load_files('../PalSenti/', encoding = 'utf-8',decode_error='ignore')
X_train, X_test, y_train, y_test = train_test_split(data_set.data, data_set.target, test_size=0.2, random_state=42)
print('data loaded')
# order of labels in `target_names` can be different from `categories`
target_names = data_set.target_names
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(x_train)
data_test_size_mb = size_mb(x_test)
print("%d documents - %0.3fMB (training set)" % (
len(x_train), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(x_test),data_test_size_mb))
print("%d categories" % len(target_names))
print()
#word_vect = TfidfVectorizer(sublinear_tf=True, max_df=0.5,analyzer = 'word', ngram_range=(1,1))
char_vect = TfidfVectorizer(max_features = 50000, sublinear_tf=True,norm ='l1', max_df=0.75,analyzer = 'char_wb', ngram_range=(2,5))
union = FeatureUnion([("w_v", TfidfVectorizer(sublinear_tf=True, max_df=0.5,analyzer = 'word', ngram_range=(1,1)
)),
("c_wb", TfidfVectorizer(sublinear_tf=True, max_df=0.5,analyzer = 'char_wb', ngram_range=(2,5)
)),
# ("c_v", TfidfVectorizer(sublinear_tf=True, max_df=0.5,analyzer = 'char', ngram_range=(2,5)
# ))
])
#union.fit_transform(data_train.data)
X_features = union.fit_transform(X_train) #union.fit_transform(data_train.data)
#Y_train = union.transform
X_test = union.transform(X_test)#union.transform(data_test.data)
print("Combined space has", X_features.shape[1], "features")
# this is for lev only
#svm = SGDClassifier(alpha=0.001, max_iter=50,penalty="l2")
# this is for high level lev, msa, eg, na
svm = MultinomialNB(alpha=0.0001)
svm.fit(X_features, y_train)
#pipeline = Pipeline([("features", union), ("svm", svm)])
pred = svm.predict(X_test)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
print("classification report:")
print(metrics.classification_report(y_test, pred,target_names=target_names))
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred)) |
import enemy,pygame
class Skeleton(enemy.Enemy):
frame={}
sound={}
def __init__(self,position=(0,0),index=0):
super(Skeleton, self).__init__(position)
self.rect = Skeleton.frame[self.frame_on][self.frame_index].get_rect(topleft=position)
self.hit_box = pygame.Rect(position[0], position[1], 28 * 3, 48 * 3)
self.hit_box.center = self.rect.center
self.reload=0
self.run_speed=8
self.test_mode=False
self.hp=250
def draw(self, screen, camera=(0, 0)):
self.draw_live_bar(screen, camera)
if self.rect.centerx - camera[0] > -100 and self.rect.centerx - camera[0] < 1400 and self.rect.centery - camera[
1] > -50 and self.rect.centery - camera[1] < 800:
screen.blit(pygame.transform.flip(Skeleton.frame[self.frame_on][int(self.frame_index)], self.side_left, False),(self.rect.x - camera[0], self.rect.y - camera[1]))
if self.test_mode:
rect = pygame.Surface(self.hit_box.size).convert_alpha()
rect.fill((200, 0, 0, 100))
screen.blit(rect, (self.hit_box.x - camera[0], self.hit_box.y - camera[1]))
def animation(self):
self.frame_index += 13*(1/40)
if len(Skeleton.frame[self.frame_on]) <= self.frame_index:
self.frame_index = 0
return True
return False |
import pickle
reviews = pickle.load(open("data/gameReviewDict.p", "rb"))
from clusterGames import *
def clusterTestGame(fText):
gameName = "Test Game Title.txt"
return getGameCluster(reviews, gameName, fText)
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
from dajaxice.core import dajaxice_autodiscover, dajaxice_config
dajaxice_autodiscover()
urlpatterns = patterns('',
# Welcome Page:
url(r'^$', 'kitty.views.home', name='home'),
# creating a kitty
url(r'^create/', 'kitty.views.create', name='create'),
# display item Modal
url(r'^(?P<id>\w{5})/itemModal/(?P<itemID>\d+)/', 'kitty.views.itemModal'),
# display user Modal
url(r'^(?P<id>\w{5})/userModal/(?P<userID>\d+)/', 'kitty.views.userModal'),
# display kitty with ID
url(r'^(?P<id>\w{5})/', 'kitty.views.show'),
# for setting language
url(r'^i18n/', include('django.conf.urls.i18n')),
# Dajax
url(dajaxice_config.dajaxice_url, include('dajaxice.urls')),
# url(r'^kitty/', include('kitty.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
# API URLS
urlpatterns += patterns('kitty.api',
url(r'^api/incItem/(?P<user_item_id>\d+)', 'incItem'),
url(r'^api/decItem/(?P<user_item_id>\d+)', 'decItem'),
url(r'^api/userItems/(?P<user_id>\d+)', 'userItems'),
url(r'^api/users/(?P<id>\w{5})/', 'users'),
url(r'^api/kitty/(?P<id>\w{5})/', 'kitty'),
) |
# import numpy as np
# import matplotlib.pyplot as plt
# data_set = np.loadtxt(
# fname="amp.csv",
# dtype="int",
# delimiter=",",
# )
# # 散布図を描画 → scatterを使用する
# # 1行ずつ取り出して描画
# #plt.scatter(x座標の値, y座標の値)
# for data in data_set:
# plt.scatter(data)
# plt.title("correlation")
# plt.xlabel("Average Temperature of SAITAMA")
# plt.ylabel("Average Temperature of IWATE")
# plt.grid()
# plt.show()
import pandas as pd
import matplotlib.pyplot as plt
# df = pd.read_csv('amp.csv', names=['num1', 'num2'])
# plt.plot(range(0, 3999), df['num2'], marker="o", markersize=1)
# plt.xlabel('Number of Frequency Points')
# plt.show()
# 畳み込みのxとyを表示した時
# df = pd.read_csv('out_y.csv', names=['num1', 'num2'])
# plt.plot(range(0, 3999), df['num2'],
# label="y[n]:output", marker="o", markersize=1)
# df = pd.read_csv('test.csv', names=['num1', 'num2'])
# plt.plot(range(0, 4000), df['num2'],
# label="x[n]:input", marker="o", markersize=1)
# plt.legend()
# plt.show()
# hの振幅スペクトル
df = pd.read_csv('amp_h_cut.csv', names=['num1', 'num2'])
plt.plot(range(0, 2000), df['num2'], marker="o", markersize=1)
plt.xlabel('Frequency')
plt.legend()
plt.show()
# スペクトル全部
# df = pd.read_csv('amp_h.csv', names=['num1', 'num2'])
# plt.plot(range(0, 3999), df['num2'], label="h", marker="o", markersize=1)
# df = pd.read_csv('amp.csv', names=['num1', 'num2'])
# plt.plot(range(0, 3999), df['num2'], label="x[n]", marker="o", markersize=1)
# df = pd.read_csv('amp_y.csv', names=['num1', 'num2'])
# plt.plot(range(0, 3999), df['num2'], label="y[n]", marker="o", markersize=1)
# plt.xlabel('Number of Frequency Points')
# plt.legend()
# plt.show()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('query', '0015_auto_20160203_1037'),
]
operations = [
migrations.AddField(
model_name='term',
name='timeframe',
field=models.CharField(default='pre', max_length=4, choices=[(b'pre', b'pre-WWII: 1900-1940'), (b'WWII', b'WWII: 1940-1945'), (b'post', b'post-WWII: 1945-1990')]),
preserve_default=False,
),
migrations.AlterField(
model_name='term',
name='word',
field=models.CharField(max_length=200),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='term',
unique_together=set([('timeframe', 'word')]),
),
]
|
import pytest
@pytest.fixture(scope="session")
def init_data2():
print("---初始化数据init_data2---")
|
# -*- encoding: utf-8 -*-
import datetime
def formata_data(data):
data = datetime.datetime.strptime(data, '%d/%m/%Y').date()
return data.strftime("%Y%m%d")
def formata_valor(valor):
return str("%.2f" % valor).replace(".", "")
|
class Node:
def __init__(self,value):
self.value = value
self.left = None
self.right = None
class BinarySearchTree:
def __init__(self):
self.root = None
def search(self,value):
if self.root is None:
return False
current_root = self.root
while(True):
if current_root is None:
return False
elif value == current_root.value:
return True
else:
if value>current_root.value:
current_root = current_root.right
continue
else:
current_root = current_root.left
continue
def insert(self,value):
new_node = Node(value)
if self.root is None:
self.root = new_node
return
current_root = self.root
while(True):
if value == current_root.value:
return None
if value> current_root.value:
if current_root.right is None:
current_root.right = new_node
return
else:
current_root = current_root.right
continue
else:
if current_root.left is None:
current_root.left = new_node
return
else:
current_root = current_root.left
continue
def remove(self,data):
# if the tree is empty, return false
if self.root is None:
return False
# if we are removing root node
if data == self.root.value:
# if there is only one node
if self.root.right is None and self.root.left is None:
self.root = None
return True
# if the root node only has one subchild
if self.root.right is None and self.root.left is not None:
old_root = self.root
self.root = self.root.left
old_root.left = None
return True
if self.root.left is None and self.root.right is not None:
old_root = self.root
self.root = self.root.right
old_root.right = None
return True
# if the root node has both child
# similar to what we do for other nodes with 2 child, find the minimum of right subtree and
# replace with it
if self.root.left is not None and self.root.right is not None:
minNode = self.root.right
parent_minNode = minNode
while minNode.left is not None:
parent_minNode = minNode
minNode = minNode.left
# transfer the value from minimum node of right subtree to deleted node
self.root.value = minNode.value
# delete the min Node
if self.root.right.left is not None:
parent_minNode.left = None
return True
# if we are removing some other node
# first need to find the node that we want to delete
current_node = self.root
prev_node = self.root
while current_node is not None and current_node.value != data:
prev_node = current_node
if(data>current_node.value):
current_node = current_node.right
else:
current_node = current_node.left
# if data not found
if current_node is None:
return False
# now if data is found, 3 cases
# first case, the deleted node is a leaf node
if current_node.left is None and current_node.right is None:
if data > prev_node.value:
prev_node.right = None
return True
else:
prev_node.left = None
return True
# second case, the deleted node has one child
if current_node.left is None and current_node.right is not None:
if data > prev_node.value:
prev_node.right = current_node.right
return True
else:
prev_node.left = current_node.right
return True
if current_node.right is None and current_node.left is not None:
if data > prev_node.value:
prev_node.right = current_node.left
return True
else:
prev_node.left = current_node.left
return True
# if the deleted node has both left and right child
if current_node.left is not None and current_node.right is not None:
# find the minimum in right subtree
# one trick, the minimum in the right subtree will be the lowest level node in left
# draw a bst and see for myself
# also all the min nodes will go to left side, so when looping we need to traverse through the left
minNode = current_node.right
parent_minNode = minNode
while minNode.left is not None:
parent_minNode = minNode
minNode = minNode.left
# transfer the value from minimum node of right subtree to deleted node
current_node.value = minNode.value
# delete the min Node
if current_node.right.left is not None:
parent_minNode.left = None
return True
|
# -*- coding: utf-8 -*-
from django.contrib.auth.views import REDIRECT_FIELD_NAME
from django.shortcuts import redirect, resolve_url
def login(request):
if request.method == 'GET' and request.user.is_active and request.user.is_staff:
return redirect(resolve_url('npcms:dashboard'))
from django.contrib.auth.views import login
from django.contrib.admin.forms import AdminAuthenticationForm
redirect_to = request.POST.get(REDIRECT_FIELD_NAME,
request.GET.get(REDIRECT_FIELD_NAME, resolve_url('npcms:dashboard')))
return login(request, template_name='npcms/auth/login.html', authentication_form=AdminAuthenticationForm,
extra_context={REDIRECT_FIELD_NAME: redirect_to})
def logout(request):
from django.contrib.auth.views import logout_then_login
return logout_then_login(request, login_url='npcms:login') |
writeFile = open("dict.txt", 'w');
with open("dict1.txt") as file:
for line in file:
# Checa se todos os caracteres sao uppercase:
if(line == line.upper()):
continue;
# Converte todos os caracteres para lowercase:
newWord = line.lower();
# Checa se a palavra possui um caractere invalido:
invalid = ['k', 'w', 'y', '-'];
isValid = True;
for caractere in newWord:
if caractere in invalid:
isValid = False;
break;
if((caractere == 'á') or (caractere == 'ã') or
(caractere == 'à') or (caractere == 'â')):
arr = list(newWord);
arr[arr.index(caractere)] = 'a';
newWord = "".join(arr);
elif((caractere == 'é') or (caractere == 'è') or
(caractere == 'ê')):
arr = list(newWord);
arr[arr.index(caractere)] = 'e';
newWord = "".join(arr);
elif((caractere == 'í') or (caractere == 'ì') or
(caractere == 'î')):
arr = list(newWord);
arr[arr.index(caractere)] = 'i';
newWord = "".join(arr);
elif((caractere == 'ó') or (caractere == 'õ') or
(caractere == 'ò') or (caractere == 'ô')):
arr = list(newWord);
arr[arr.index(caractere)] = 'o';
newWord = "".join(arr);
elif((caractere == 'ú') or (caractere == 'ù') or
(caractere == 'û') or (caractere == 'ü')):
arr = list(newWord);
arr[arr.index(caractere)] = 'u';
newWord = "".join(arr);
# Salva a palavra:
if(isValid):
writeFile.write(newWord);
writeFile.close(); |
def funny(s,t):
if len(s) != len(t):
return False
elif len(s) == 0:
return True
d={}
for i in range(len(s)):
if s[i] not in d.keys():
d[s[i]] = t[i]
else:
if d[s[i]] != t[i]:
return False
return True
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-23 14:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.DateTimeField(auto_now_add=True)),
('value', models.DecimalField(decimal_places=2, default=1, max_digits=20)),
('notes', models.TextField(blank=True)),
('url', models.URLField(blank=True)),
],
),
migrations.CreateModel(
name='Goal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('target', models.IntegerField()),
('begin_value', models.IntegerField(default=0)),
],
),
migrations.AddField(
model_name='event',
name='goal',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mygoals.Goal'),
),
]
|
from script.base_api.service_idea.pay import *
|
import sys
s = "4nt1_D3bugg3rs_4r3nt_So_B4d_Aft3r_All_R1ght?"
xor_bytes = [0xd1, 0xee, 0xd9]
print "int xor_bytes[%d] = {" % len(s)
for i, c in enumerate(s):
b = xor_bytes[i % len(xor_bytes)] ^ ord(c)
sys.stdout.write("0x{:02x}".format(b) + ", ")
if (i + 1) % 16 == 0:
print ""
print ""
print "}"
|
import pickle
import torch
import argparse
import os
import subprocess
import matplotlib.pyplot as plt
from drivingenvs.vehicles.ackermann import AckermannSteeredVehicle
from drivingenvs.envs.driving_env_with_vehicles import DrivingEnvWithVehicles
"""
Given a path to experiment output, make a video of the policy acting in the environment.
"""
parser = argparse.ArgumentParser(description='Parse videomaker params')
parser.add_argument('--video_name', type=str, required=True, help='name of the output video (actul video will have \'.mp4\' appended to it).')
parser.add_argument('--video_fp', type=str, required=False, default = '', help='location to output the video to (defaults to current dir).')
parser.add_argument('--experiment_fp', type=str, required=True, help='location where the experiment results are (the base dir).')
parser.add_argument('--env_fp', type=str, required=False, help='location of env file (defaults to the one stored in the experiment dir).')
parser.add_argument('--itr', type=int, required=False, help='the iteration of the policy to test. Defaults to the best one if no argument provided.')
parser.add_argument('--framerate', type=int, required=False, default=10, help='framerate of the output video')
parser.add_argument('--deterministic', type=bool, required=False, default=True, help='determines whether the policy takes deterministic actions')
args = parser.parse_args()
print(args)
env_fp = os.path.join(args.env_fp, 'env.cpt') if args.env_fp else os.path.join(args.experiment_fp, 'env.cpt')
policy_fp = os.path.join(args.experiment_fp, 'itr_{}/policy.cpt'.format(args.itr)) if args.itr else os.path.join(args.experiment_fp, '_best/policy.cpt')
output_fp = os.path.join(args.video_fp, '{}.mp4'.format(args.video_name))
tmp_fp = os.path.join(args.video_fp, 'tmp')
env = pickle.load(open(env_fp, 'rb'))
policy = torch.load(policy_fp)
print(env)
print(policy)
subprocess.call(['mkdir', tmp_fp])
o = env.reset(reset_kwargs = {'lane':2})
frame = 0
#import pdb;pdb.set_trace()
while True:
print('Frame {}'.format(frame), end='\r')
env.render(render_kwargs = {'window':50})
plt.savefig(os.path.join(tmp_fp, 'frame{0:05d}.png'.format(frame)))
plt.close()
#o, r, t, i = env.step(policy.action(o, deterministic=args.deterministic))
o, r, t, i = env.step(torch.tensor([1.0, 0.1]))
frame += 1
if t:
break
os.chdir(tmp_fp)
os.chdir('../')
subprocess.call(['ffmpeg', '-framerate', str(args.framerate), '-i', 'tmp/frame%05d.png', '-pix_fmt', 'yuv420p', output_fp])
subprocess.call(['rm', '-r', 'tmp'])
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# type: ignore
import base64
import os
from pathlib import Path
from typing import List, Set
import nox
from nox.sessions import Session
DEFAULT_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
PYTHON_VERSIONS = os.environ.get(
"NOX_PYTHON_VERSIONS", ",".join(DEFAULT_PYTHON_VERSIONS)
).split(",")
def setup_env(session: Session, name: str) -> None:
env = {}
if name in ["metaworld"]:
key = "CIRCLECI_MJKEY"
if key in os.environ:
# job is running in CI
env[
"LD_LIBRARY_PATH"
] = "$LD_LIBRARY_PATH:/home/circleci/.mujoco/mujoco200/bin"
session.install(f".[{name}]", env=env)
def setup_mtenv(session: Session) -> None:
key = "CIRCLECI_MJKEY"
if key in os.environ:
# job is running in CI
mjkey = base64.b64decode(os.environ[key]).decode("utf-8")
mjkey_path = "/home/circleci/.mujoco/mjkey.txt"
with open(mjkey_path, "w") as f:
# even if the mjkey exists, we can safely overwrite it.
for line in mjkey:
f.write(line)
session.install("--upgrade", "setuptools", "pip")
session.install(".[dev]")
def get_core_paths(root: str) -> List[str]:
"""Return all the files/directories that are part of core package.
In practice, it just excludes the directories in env module"""
paths = []
for _path in Path(root).iterdir():
if _path.stem == "envs":
for _env_path in _path.iterdir():
if _env_path.is_file():
paths.append(str(_env_path))
else:
paths.append(str(_path))
return paths
class EnvSetup:
def __init__(
self, name: str, setup_path: Path, supported_python_versions: Set[str]
) -> None:
self.name = name
self.setup_path = str(setup_path)
self.path = str(setup_path.parent)
self.supported_python_versions = supported_python_versions
def parse_setup_file(session: Session, setup_path: Path) -> EnvSetup:
command = ["python", str(setup_path), "--name", "--classifiers"]
classifiers = session.run(*command, silent=True).splitlines()
name = classifiers[0]
python_version_string = "Programming Language :: Python :: "
supported_python_versions = {
stmt.replace(python_version_string, "")
for stmt in classifiers[1:]
if python_version_string in stmt
}
return EnvSetup(
name=name,
setup_path=setup_path,
supported_python_versions=supported_python_versions,
)
def get_all_envsetups(session: Session) -> List[EnvSetup]:
return [
parse_setup_file(session=session, setup_path=setup_path)
for setup_path in Path("mtenv/envs").glob("**/setup.py")
]
def get_all_env_setup_paths_as_nox_params():
return [
nox.param(setup_path, id=setup_path.parent.stem)
for setup_path in Path("mtenv/envs").glob("**/setup.py")
]
def get_supported_envsetups(session: Session) -> List[EnvSetup]:
"""Get the list of EnvSetups that can run in a given session."""
return [
env_setup
for env_setup in get_all_envsetups(session=session)
if session.python in env_setup.supported_python_versions
]
def get_supported_env_paths(session: Session) -> List[str]:
"""Get the list of env_paths that can run in a given session."""
return [env_setup.path for env_setup in get_supported_envsetups(session=session)]
@nox.session(python=PYTHON_VERSIONS)
def lint(session: Session) -> None:
setup_mtenv(session=session)
for _path in (
get_core_paths(root="mtenv")
+ get_core_paths(root="tests")
+ get_supported_env_paths(session=session)
):
session.run("black", "--check", _path)
session.run("flake8", _path)
@nox.session(python=PYTHON_VERSIONS)
def mypy(session: Session) -> None:
setup_mtenv(session=session)
for _path in get_core_paths(root="mtenv"):
session.run("mypy", "--strict", _path)
for envsetup in get_supported_envsetups(session=session):
setup_env(session=session, name=envsetup.name)
session.run("mypy", envsetup.path)
@nox.session(python=PYTHON_VERSIONS)
def test_wrappers(session) -> None:
setup_mtenv(session=session)
session.run("pytest", "tests/wrappers")
@nox.session(python=PYTHON_VERSIONS)
def test_examples(session) -> None:
setup_mtenv(session=session)
session.run("pytest", "tests/examples")
@nox.session(python=PYTHON_VERSIONS)
@nox.parametrize("env_setup_path", get_all_env_setup_paths_as_nox_params())
def test_envs(session, env_setup_path) -> None:
setup_mtenv(session=session)
envsetup = parse_setup_file(session=session, setup_path=env_setup_path)
if session.python not in envsetup.supported_python_versions:
print(f"Python {session.python} is not supported by {envsetup.name}")
return
setup_env(session=session, name=envsetup.name)
env = {"NOX_MTENV_ENV_PATH": envsetup.path}
command_for_headless_rendering = [
"xvfb-run",
"-a",
"-s",
"-screen 0 1024x768x24 -ac +extension GLX +render -noreset",
]
commands = []
key = "CIRCLECI_MJKEY"
if key in os.environ and envsetup.name in ["metaworld"]:
env["LD_LIBRARY_PATH"] = "$LD_LIBRARY_PATH:/home/circleci/.mujoco/mujoco200/bin"
if envsetup.name.startswith("MT-HiPBMDP"):
env["PYTHONPATH"] = "mtenv/envs/hipbmdp/local_dm_control_suite"
if envsetup.name in ["hipbmdp", "mpte"]:
commands = commands + command_for_headless_rendering
commands = commands + ["pytest", "tests/envs"]
session.run(*commands, env=env)
|
import envi.qt as envi_qt
import envi.bits as e_bits
import envi.qt.memory as e_mem_qt
import envi.qt.memcanvas as e_mem_canvas
import vstruct.qt as vs_qt
import vqt.menubuilder as vqt_menu
import vivisect.base as viv_base
import vivisect.renderers as viv_rend
import vivisect.qt.views as viv_q_views
import vivisect.qt.funcgraph as viv_q_funcgraph
import vivisect.qt.funcviews as viv_q_funcviews
from PyQt4 import QtCore, QtGui, QtWebKit
from envi.threads import firethread
from vivisect.const import *
# FIXME HACK where do these really live?
qt_horizontal = 1
qt_vertical = 2
def cmpoffset(x,y):
return cmp(x[0], y[0])
class VQVivMemoryCanvas(e_mem_canvas.VQMemoryCanvas):
def __init__(self, *args, **kwargs):
e_mem_canvas.VQMemoryCanvas.__init__(self, *args, **kwargs)
self.vw = self.mem
self._last_sname = None
self.vqAddHotKey('c', self._hotkey_c)
self.vqAddHotKey('f', self._hotkey_f)
self.vqAddHotKey('s', self._hotkey_s)
self.vqAddHotKey('p', self._hotkey_p)
self.vqAddHotKey('u', self._hotkey_u)
self.vqAddHotKey('n', self._hotkey_n)
self.vqAddHotKey(';', self._hotkey_semi)
self.vqAddHotKey('S', self._hotkey_S)
self.vqAddHotKey('ctrl+S', self._hotkey_cS)
self.vqAddHotKey('U', self._hotkey_U)
def wheelEvent(self, event):
frame = self.page().mainFrame()
sbcur = frame.scrollBarValue(qt_vertical)
sbmin = frame.scrollBarMinimum(qt_vertical)
sbmax = frame.scrollBarMaximum(qt_vertical)
if sbcur == sbmax:
lastva, lastsize = self._canv_rendvas[-1]
mapva, mapsize, mperm, mfname = self.vw.getMemoryMap(lastva)
sizeremain = (mapva + mapsize) - (lastva + lastsize)
if sizeremain:
self.renderMemoryAppend(min(sizeremain, 128))
elif sbcur == sbmin:
firstva, firstsize = self._canv_rendvas[0]
mapva, mapsize, mperm, mfname = self.vw.getMemoryMap(firstva)
#presize = 1
#prevloc = self.vw.getPrevLocation(firstva)
#if prevloc:
#presize = prevloc[1]
sizeremain = firstva - mapva
if sizeremain:
self.renderMemoryPrepend(min(sizeremain, 128))
return e_mem_canvas.VQMemoryCanvas.wheelEvent(self, event)
def _hotkey_c(self, canv, key):
if self._canv_curva:
self.vw.makeCode(self._canv_curva)
def _hotkey_f(self, canv, key):
if self._canv_curva:
self.vw.makeFunction(self._canv_curva)
def _hotkey_s(self, canv, key):
if self._canv_curva:
self.vw.makeString(self._canv_curva)
def _hotkey_p(self, canv, key):
if self._canv_curva:
self.vw.makePointer(self._canv_curva)
def _hotkey_u(self, canv, key):
if self._canv_curva:
self.vw.makeUnicode(self._canv_curva)
def _hotkey_U(self, canv, key):
if self._canv_curva:
self.vw.delLocation(self._canv_curva)
def _hotkey_n(self, canv, key):
if self._canv_curva:
self._menuRename(self._canv_curva)
def _hotkey_semi(self, canv, key):
if self._canv_curva:
self._menuComment(self._canv_curva)
def _hotkey_S(self, canv, key):
if self._canv_curva:
self._menuMakeStruct(self._canv_curva)
def _hotkey_cS(self, canv, key):
if self._canv_curva:
if self._last_sname != None:
self.vw.makeStructure(self._canv_curva, self._last_sname)
def _clearColorMap(self):
frame = self.page().mainFrame()
style = frame.findFirstElement('#cmapstyle')
style.setInnerXml('');
def _applyColorMap(self, cmap):
frame = self.page().mainFrame()
style = frame.findFirstElement('#cmapstyle')
rows = []
for va,color in cmap.items():
rows.append('.va_0x%.8x { color: #000000; background-color: %s }' % (va, color))
style.setInnerXml('\n'.join(rows))
def _navExpression(self, expr):
if self._canv_navcallback:
self._canv_navcallback(expr)
def _menuRename(self, va):
name, ok = QtGui.QInputDialog.getText(self, 'Enter...', 'Name')
if ok:
self.vw.makeName(va, str(name))
def _menuComment(self, va):
comment, ok = QtGui.QInputDialog.getText(self, 'Enter...', 'Comment')
if ok:
self.vw.setComment(va, str(comment))
def _menuBookmark(self, va):
bname, ok = QtGui.QInputDialog.getText(self, 'Enter...', 'Bookmark Name')
if ok:
self.vw.setVaSetRow('Bookmarks', (va, str(bname)))
def _menuEditFunctionArgument(self, fva, idx, atype, aname):
newname, ok = QtGui.QInputDialog.getText(self, 'Enter...', 'Argument Name')
if ok:
self.vw.setFunctionArg(fva, idx, atype, aname=str(newname), doprec=False)
def _menuEditFunctionLocal(self, fva, offset, atype, aname):
newname, ok = QtGui.QInputDialog.getText(self, 'Enter...', 'Local Name')
if ok:
self.vw.setFunctionLocal(fva, offset, atype, str(newname))
def _menuMakeStruct(self, va):
sname = vs_qt.selectStructure(self.vw.vsbuilder)
if sname != None:
self.vw.makeStructure(va, sname)
self._last_sname = sname
def _menuFunctionGraph(self, fva):
#docwid = self.parentWidget().vwqgui.vqBuildDockWidget('VQVivFuncgraphView', floating=True)
vwqgui = self.parentWidget().vwqgui
fgraph = viv_q_funcgraph.VQVivFuncgraphView(self.vw, vwqgui)
#fgwid = docwid.widget()
fgraph.renderFunctionGraph(fva)
fgraph.show()
vwqgui.vqDockWidget(fgraph, floating=True)
@firethread
def _menuFuncEmuShow(self, fva, va):
self.vw.vprint('Running emulator to: 0x%.8x' % (va,))
emu = self.vw.getEmulator()
emu.runFunction(fva, stopva=va)
regs = emu.getRegisters()
rnames = regs.keys()
rnames.sort()
self.vw.vprint("Showing Register/Magic State At: 0x%.8x" % va)
op = self.vw.parseOpcode(va)
self.vw.canvas.addVaText("0x%.8x: " % va, va)
op.render(self.vw.canvas)
self.vw.canvas.addText("\n")
for i in xrange(len(op.opers)):
o = op.opers[i]
o.render(self.vw.canvas, op, i)
self.vw.canvas.addText(" = ")
oval = o.getOperValue(op, emu)
mag = emu.getMagic(oval)
base = "0x%.8x (%d)" % (oval,oval)
if mag != None:
if mag.va > oval:
base += " %s - %d" % (repr(mag), mag.va - oval)
else:
base += " %s + %d" % (repr(mag), oval - mag.va)
self.vw.vprint(base)
def _locMenuRepr(self, va):
vw = self.mem
p = vw.arch.pointerString(va)
locstr = "Undefined"
loc = vw.getLocation(va)
if loc != None:
locstr = vw.reprLocation(loc)
return '%s: %s' % (p, locstr[:32])
def _menuFuncCallGraph(self, fva):
vivgui = self.vw.getVivGui()
callview = viv_q_funcviews.FuncCallsView( self.vw )
callview.functionSelected( fva )
callview.show()
vivgui.vqDockWidget( callview, floating=True )
#blockview = viv_q_funcviews.FunctionBlocksView( self.vw )
#blockview.functionSelected( fva )
#blockview.show()
#vivgui.vqDockWidget( blockview, floating=True )
def _menuFuncCodeBlocks(self, fva):
vivgui = self.vw.getVivGui()
#blockview = viv_q_funcviews.FunctionBlocksView( self.vw, parent=vivgui )
blockview = viv_q_funcviews.FunctionBlocksView( self.vw )
blockview.functionSelected( fva )
blockview.show()
vivgui.vqDockWidget( blockview, floating=True )
#self.testtest.functionSelected( fva )
#self.testtest.show()
def _woot(self, *args, **kwargs):
self.vw.vprint('Still working on it....')
def contextMenuEvent(self, event):
vw = self.mem # Our mem object is a viv workspace!
menu = vqt_menu.VQMenu('context', parent=self)
menu.splitchar = '/'
va = self._canv_curva
if va != None:
menu.addField('Rename (n)', self._menuRename, (self._canv_curva,))
menu.addField('Comment (;)', self._menuComment, (self._canv_curva,))
# Check for and add xrefs right click option
for x,tova,xrtype,xrflag in vw.getXrefsTo(va):
locstr = self._locMenuRepr(x)
menu.addField('Xrefs To/%s' % locstr, self._navExpression, args=('0x%.8x' % x,))
for fromva,x,xrtype,xrflag in vw.getXrefsFrom(va):
locstr = self._locMenuRepr(x)
menu.addField('Xrefs From/%s' % locstr, self._navExpression, ('0x%.8x' % x,))
fva = vw.getFunction(va)
if fva != None:
fname = vw.getName(fva)
menu.addField('Function/%s' % fname, self._navExpression, args=(fname,))
for i,(atype,aname) in enumerate(vw.getFunctionArgs(fva)):
menu.addField('Function/Arguments/%s_%d' % (aname,i), self._menuEditFunctionArgument, (fva, i, atype, aname))
locals = vw.getFunctionLocals(fva)
locals.sort(cmp=cmpoffset)
for aoffset, atype, aname in locals:
menu.addField('Function/Locals/%s_%d' % (aname,aoffset), self._menuEditFunctionLocal, (fva, aoffset, atype, aname))
#menu.addField('Function/Edit...', self._woot, (fva,))
menu.addField('Function/Emulation/Emulate To Here', self._woot, (fva,va))
menu.addField('Function/Emulation/Show Emu State', self._menuFuncEmuShow, (fva,va))
menu.addField('Function/Highlight', self._woot, (fva,))
menu.addField('Function/Show Callers', self._woot, (fva,))
menu.addField('Function/Code Blocks', self._menuFuncCodeBlocks, (fva, ))
menu.addField('Function/Call Graph', self._menuFuncCallGraph, (fva, ))
# FIXME function code flow to here with highlight
loc = vw.getLocation(va)
if loc == None:
menu.addField('Make/Code (c)', vw.makeCode, (self._canv_curva,))
menu.addField('Make/Function (f)', vw.makeFunction, (self._canv_curva,))
menu.addField('Make/String (s)', vw.makeString, (self._canv_curva,))
menu.addField('Make/Pointer (p)', vw.makePointer, (self._canv_curva,))
menu.addField('Make/Unicode (u)', vw.makeUnicode, (self._canv_curva,))
menu.addField('Make/Structure (S)', self._menuMakeStruct, (self._canv_curva,))
else:
if loc[L_LTYPE] == LOC_OP:
op = vw.parseOpcode(va)
for idx,oper in enumerate(op.opers):
# Give the option to switch ('hint') that you want
# the immediate operand displayed differently...
if oper.isImmed():
val = oper.getOperValue(op)
hval = e_bits.hex(val)
cval = val
r = []
while cval:
r.append(chr(cval & 0xff))
cval = cval >> 8
cstr = repr(''.join(r))
menu.addField('Immediate/Decimal (%d)' % val, vw.setSymHint, args=(va, idx, str(val)))
menu.addField('Immediate/Hex (%s)' % hval, vw.setSymHint, args=(va, idx, hval))
menu.addField('Immediate/Chars (%s)' % cstr, vw.setSymHint, args=(va, idx, cstr))
names = vw.vsconsts.revLookup(val)
if names != None:
for name in names:
menu.addField('Immediate/%s' % name, vw.setSymHint, args=(va, idx, name))
menu.addField('Undefine ()', vw.delLocation, (self._canv_curva,))
menu.addField('Color Maps/Clear All...', self._clearColorMap)
names = vw.getColorMaps()
names.sort()
for name in names:
map = vw.getColorMap(name)
menu.addField('Color Maps/%s' % name, self._applyColorMap, (map,))
menu.addField('Bookmark (B)', self._menuBookmark, args=(self._canv_curva, ))
menu.exec_(event.globalPos())
class VQVivMemoryView(e_mem_qt.VQMemoryWindow, viv_base.VivEventCore):
__canvas_class__ = VQVivMemoryCanvas
def __init__(self, vw, vwqgui):
self.vw = vw
self.vwqgui = vwqgui
e_mem_qt.VQMemoryWindow.__init__(self, vw, syms=vw, parent=vwqgui)
viv_base.VivEventCore.__init__(self, vw)
vwqgui.addEventCore(self)
vwqgui.vivNavSignal.connect( self.vivMemNavSlot )
vwqgui.vivMemColorSignal.connect( self.mem_canvas._applyColorMap )
self.mem_canvas.setNavCallback(self._navGotoExpr)
self.mem_canvas._canv_rend_middle = True
self.mem_canvas.vqAddHotKey('x', self._hotkey_x)
def _hotkey_x(self, canv, key):
if self.mem_canvas._canv_curva:
xrefs = self.vw.getXrefsTo(self.mem_canvas._canv_curva)
if len(xrefs) == 0:
self.vw.vprint('No xrefs found!')
return
title = 'Xrefs To: 0x%.8x' % self.mem_canvas._canv_curva
view = viv_q_views.VQXrefView(self.vw, self.vwqgui, xrefs=xrefs, title=title)
self.vwqgui.vqDockWidget(view, floating=True)
def closeEvent(self, event):
# FIXME this doesn't actually do anything...
self.parentWidget().delEventCore(self)
return e_mem_qt.VQMemoryWindow.closeEvent(self, event)
def _navGotoExpr(self, expr):
parent = self.parentWidget()
if parent.isFloating():
# If we are floating, consume nav events internally
self.vqMemNavSlot(expr)
return
self.vwqgui.vivNavSignal.emit(expr)
def vivMemNavSlot(self, expr, sizeexpr=None):
parent = self.parentWidget()
if parent.isFloating():
return
if parent.vqIsVisible():
self.vqMemNavSlot(expr, sizeexpr=sizeexpr)
def loadDefaultRenderers(self):
import envi.memcanvas.renderers as e_render
# FIXME check endianness
self.mem_canvas.addRenderer("bytes", e_render.ByteRend())
self.mem_canvas.addRenderer("u_int_16", e_render.ShortRend())
self.mem_canvas.addRenderer("u_int_32", e_render.LongRend())
self.mem_canvas.addRenderer("u_int_64", e_render.QuadRend())
vivrend = viv_rend.WorkspaceRenderer(self.vw)
self.mem_canvas.addRenderer('Viv', vivrend)
self.mem_canvas.setRenderer('Viv')
def _updateFunction(self, fva):
for cbva, cbsize, cbfva in self.vw.getFunctionBlocks(fva):
self.mem_canvas.renderMemoryUpdate(cbva, cbsize)
def VWE_SYMHINT(self, vw, event, einfo):
va, idx, hint = einfo
self.mem_canvas.renderMemoryUpdate(va, 1)
def VWE_ADDLOCATION(self, vw, event, einfo):
va,size,ltype,tinfo = einfo
self.mem_canvas.renderMemoryUpdate(va, size)
def VWE_DELLOCATION(self, vw, event, einfo):
va,size,ltype,tinfo = einfo
self.mem_canvas.renderMemoryUpdate(va, size)
def VWE_ADDFUNCTION(self, vw, event, einfo):
va,meta = einfo
self.mem_canvas.renderMemoryUpdate(va, 1)
def VWE_SETFUNCMETA(self, vw, event, einfo):
fva, key, val = einfo
self._updateFunction(fva)
def VWE_SETFUNCARGS(self, vw, event, einfo):
fva, fargs = einfo
self._updateFunction(fva)
def VWE_COMMENT(self, vw, event, einfo):
va,cmnt = einfo
self.mem_canvas.renderMemoryUpdate(va, 1)
def VWE_SETNAME(self, vw, event, einfo):
va,name = einfo
self.mem_canvas.renderMemoryUpdate(va, 1)
for fromva,tova,rtype,rflag in self.vw.getXrefsTo(va):
self.mem_canvas.renderMemoryUpdate(fromva, 1)
|
"""Module to create README.md file with basic scaffold."""
import argparse
import os
import shutil
import markdown_generator as mg
from readme_generator.scaffold_options import (dbms, frameworks, languages,
serving_options, test_options,)
from write_me.dep_info import parse
from write_me.django_setings_info import get_settings_info
from write_me.django_uri_info import get_url_docstrings
from write_me.get_license import get_license_type
from write_me.list_files import get_all_py_files
from write_me.project_data import get_project_url
from write_me.pyramid_ini import get_dev_info
from write_me.travis_badge import get_travis_badge
from write_me.stp_info import parse_setup_py
from write_me.tsting_info import get_docstrings
settings_dict = get_settings_info()
url_dict = get_url_docstrings()
setup_dict = parse_setup_py()
dependencies = parse()
license = get_license_type()
test_dict = get_docstrings()
get_all_py = get_all_py_files()
user_data = get_project_url()
pyramid_info = get_dev_info()
badge = get_travis_badge()
testing_lst = parse(get_all_py)
testing_mod = ''
for i in testing_lst:
if i == "pytest" or i == "nose":
testing_mod = i
if not testing_mod:
testing_mod = "unittest"
parser = argparse.ArgumentParser() # pragma: no cover
parser.add_argument('-v', '--verbose',
help='create verbose readme',
action='store_true')
parser.add_argument('-d', '--django',
help='Django readme scaffolding',
action='store_true')
parser.add_argument('-p', '--pyramid',
help='Pyramid readme scaffolding',
action='store_true')
parser.add_argument('-f', '--flask',
help='Flask readme scaffolding',
action='store_true')
args = parser.parse_args()
def overwrite(answer=None):
"""Check if user wants to overwrite existing README.md."""
prompt_txt = """
Do you want to overwrite your present README file?
Don't worry, if you overwrite your present README
it will be backed up to README.md.old
Yes or no?
"""
poss_answers = ['n', 'no', 'y', 'yes']
if not answer:
answer = input(prompt_txt).lower()
while answer not in poss_answers:
answer = input(prompt_txt).lower()
if answer == 'yes' or answer == 'y':
if os.path.isfile('README.md'):
shutil.move('README.md', 'README.md.old')
return 'README.md'
elif answer == 'no' or answer == 'n':
return 'README.md.new'
def main():
"""Create README."""
if os.path.isfile('README.md'):
readme = overwrite()
else:
readme = 'README.md'
with open(readme, 'w') as f:
w = mg.Writer(f)
w.write_heading(user_data['project_name'], 1)
w.write_hrule()
# Description and Key Features
if badge:
w.writeline(badge)
w.writeline()
w.writeline('Version: ' + mg.emphasis(setup_dict['version']))
w.writeline()
w.writeline(setup_dict['description'])
key_features = mg.List()
key_features.append('Feature #1')
key_features.append('Feature #2')
key_features.append('Feature #3')
w.write(key_features)
# AUTHORS
w.write_heading('Authors', 3)
w.write_hrule()
authors = mg.List()
for i in range(len(setup_dict['author'])):
authors.append(mg.link(user_data['project_user_profile_url'], setup_dict['author'][i]))
w.write(authors)
# DEPENDENCIES
w.write_heading('Dependencies', 3)
w.write_hrule()
deps = mg.List()
for dep in dependencies:
deps.append(dep)
w.write(deps)
if args.verbose:
# DOCS
w.write_heading('Documentation', 3)
w.write_hrule()
w.writeline('Additional documentation can be found at: {}'.format('http://write-me.readthedocs.io/en/stable/'))
w.write_heading('Getting Started', 3)
w.write_hrule()
w.write_heading('Getting Started', 3)
# GETTING STARTED: Installation requirements
w.write_heading(mg.emphasis('Prerequisites'), 5)
prereqs = mg.List()
prereqs.append(mg.link('https://www.python.org/downloads/', 'python (3.6+)'))
prereqs.append(mg.link('https://pip.pypa.io/en/stable/', 'pip'))
prereqs.append(mg.link('https://git-scm.com/', 'git'))
w.write(prereqs)
# GETTING STARTED: Cloning/VE Instructions
w.write_heading(mg.emphasis('Installation'), 5)
w.writeline('First, clone the project repo from Github. Then, change directories into the cloned repository. To accomplish this, execute these commands:')
w.writeline()
w.writeline('`$ git clone {}.git`'.format(user_data['url']))
w.writeline()
w.writeline('`$ cd {}`'.format(user_data['project_name']))
w.writeline()
w.writeline('Now now that you have cloned your repo and changed directories into the project, create a virtual environment named "ENV", and install the project requirements into your VE.')
w.writeline()
w.writeline('`$ python3 -m venv ENV`')
w.writeline()
w.writeline('`$ source ENV/bin/activate`')
w.writeline()
w.writeline('`$ pip install -r requirements.txt`')
if args.django:
# GETTING STARTED: Serving the App (Django)
w.write_heading(mg.emphasis('Serving Locally'), 5)
w.writeline(serving_options['django']['instructions'])
w.writeline(serving_options['django']['serve_command'])
w.writeline(serving_options['django']['hosting'])
elif args.pyramid:
# GETTING STARTED: Serving the App (Pyramid)
w.write_heading(mg.emphasis('Serving Locally'), 5)
w.writeline(serving_options['pyramid']['instructions'])
w.writeline(serving_options['pyramid']['serve_command'])
w.writeline(serving_options['pyramid']['hosting'])
elif args.flask:
# GETTING STARTED: Serving the App (Flask)
w.write_heading(mg.emphasis('Serving Locally'), 5)
w.writeline(serving_options['flask']['instructions'])
w.writeline(serving_options['flask']['serve_command'])
w.writeline(serving_options['flask']['hosting'])
# TESTS: Running & Files
w.write_heading('Test Suite', 3)
w.write_hrule()
if len(test_dict.keys()) > 0:
w.write_heading(mg.emphasis('Running Tests'), 5)
w.writeline('This application uses {} as a testing suite. To run tests, run:'.format(mg.link(test_options[testing_mod][0], testing_mod)))
w.writeline()
w.writeline('`{}`'.format(test_options[testing_mod][1]))
w.writeline()
w.writeline('To view test coverage, run:')
w.writeline()
w.writeline('`{}`'.format(test_options[testing_mod][2]))
w.write_heading(mg.emphasis('Test Files'), 5)
w.writeline('The testing files for this project are:')
w.writeline()
test_table = mg.Table()
test_table.add_column('File Name', mg.Alignment.CENTER)
test_table.add_column('Description', mg.Alignment.CENTER)
for key, val in test_dict.items():
test_table.append('`{}`'.format(key), val)
w.write(test_table)
# URLS - table
if args.django or args.pyramid or args.flask:
w.write_heading('URLs', 3)
w.write_hrule()
w.writeline('The URLS for this project can be found in the following modules:')
w.writeline()
urls_table = mg.Table()
urls_table.add_column('URL module', mg.Alignment.CENTER)
urls_table.add_column('Description', mg.Alignment.CENTER)
for key, val in url_dict.items():
urls_table.append(key, val)
w.write(urls_table)
else:
w.writeline('This repository contains no tests.')
# APPLICATIONS (Django) -v
if args.django and args.verbose:
w.write_heading('Django Apps', 3)
w.write_hrule()
models_list = mg.List()
for model in settings_dict['INSTALLED_APPS']:
if "django.contrib" not in model:
models_list.append(model)
w.write(models_list)
# TOOLS
w.write_heading('Development Tools', 3)
w.write_hrule()
tools_list = mg.List()
tools_list.append('{} - programming language'.format(mg.emphasis('python')))
if os.path.isfile('requirements.txt'):
with open('requirements.txt', 'r') as f:
reqs = []
for line in f:
line = line.strip()
reqs.append(line)
reqs = [i.split('==')[0] for i in reqs]
for package in reqs:
if package.lower() in frameworks:
tools_list.append('{} - web framework'.format(mg.emphasis(package.lower())))
elif package.lower() in dbms:
tools_list.append('{} - DB management system'.format(mg.emphasis(package.lower())))
elif package.lower() in languages:
tools_list.append('{} - programming language'.format(mg.emphasis(package.lower())))
w.write(tools_list)
if args.verbose:
# CONTRIBUTIONS
w.write_heading('Contributions', 3)
w.write_hrule()
w.writeline('If you wish to contribute to this project, please contact {}.'.format(setup_dict['author_email']))
# LICENSE
w.write_heading('License', 3)
w.write_hrule()
w.writeline(license)
# ACKNOWLEDGEMENTS
w.write_heading('Acknowledgements', 3)
w.write_hrule()
shoutouts = mg.List()
shoutouts.append('Coffee')
w.write(shoutouts)
w.writeline(mg.emphasis('This README was generated using ' + mg.link('https://github.com/chelseadole/write-me', 'writeme.')))
return """
README generated.
User TODOs:
* Add application highlights to bullet-point "Features" section
* Add contributor Github URL links to "Authors" section
* Link additional documentation to "Documentation" section
* Populate "Acknowledgements" section
Please review your new README.
"""
if __name__ == "__main__":
main()
|
from django import forms
from .models import *
class QualificationForm(forms.ModelForm):
field=forms.CharField(label='Field of Study',widget=forms.TextInput(attrs={"placeholder":"Field of Study"}))
institute=forms.CharField(label='Name of Institution',widget=forms.TextInput(attrs={"placeholder":"Name of College/University"}))
class Meta:
model=Qualification
fields=('category','level','field','institute','date_started','date_completed',)
class CategoryForm(forms.ModelForm):
category=forms.CharField(label='Category',widget=forms.TextInput(attrs={"placeholder":"Category of Study"}))
class Meta:
model=Category
fields=('category',)
class MaindocumentForm(forms.ModelForm):
docttype=forms.CharField(label='Document Type',widget=forms.TextInput(attrs={"placeholder":"Document Type"}))
description=forms.CharField(label='Description',widget=forms.Textarea(attrs={"placeholder":"Description"}))
class Meta:
model=Maindocument
fields=('docttype','description','is_active',)
class UploadMainDocumentForm(forms.ModelForm):
class Meta:
model=Staffdocument
fields=('documentupload',)
class CategoryLevelForm(forms.ModelForm):
level=forms.CharField(label='Level',widget=forms.TextInput(attrs={"placeholder":"Level of Study"}))
class Meta:
model=Categorylevel
fields=('level',)
class UploadDocumentForm(forms.ModelForm):
doctitle=forms.CharField(label='Document Title',widget=forms.TextInput(attrs={"placeholder":"Title of Document"}))
class Meta:
model=Documentupload
fields=('documentupload','doctitle') |
"""
This module lets you practice using Create MOVEMENT and SENSORS,
in particular the DISTANCE and ANGLE sensors.
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and Muqing Zheng & Jindong Chen. September 2015.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
from safest import safest_create as create
import time
def main():
"""
Calls start_robot to run YOUR robot code in run_robot safely.
"""
port = 5 # Use YOUR laptop's COM number
run_despite_sensor_garbage = False # Leave this False
start_robot(port, run_despite_sensor_garbage)
def run_MY_robot(robot):
"""
Tests the go_by_time function.
After ALL tests have completed, asks the given robot to shutdown().
Preconditions:
:type robot: create.Create
"""
# Test 1:
go_by_time(robot, 100, 50) # Fast robot that goes 1 meter.
time.sleep(2) # Pause a bit before doing the next test.
# ------------------------------------------------------------------
# DONE: 2. Add a second test, with any reasonable arguments you like.
# ------------------------------------------------------------------
# Test 1:
go_by_time(robot, 120, 60) # Fast robot that goes 1.2 meter.
time.sleep(2) # Pause a bit before doing the next test.
def go_by_time(robot, distance_in_CM, speed_in_CM_per_second):
"""
1. Makes the robot go FORWARD (in a straight line)
** FOR THE GIVEN DISTANCE ** at the given speed,
using what we will call the GO_BY_TIME algorithm:
1. Compute the number of SECONDS the robot must move to
achieve the given DISTANCE at the given SPEED.
2. Start the robot moving at the given speed.
3. Sleep the COMPUTED number of seconds.
4. Stop the robot.
2. Prints the DISTANCE traveled DURING THE FORWARD MOTION
(as measured by the robot's distance sensor).
Preconditions:
:type robot: create.Create
:type distance_in_CM: float
:type speed_in_CM_per_second: int
and the latter two are both positive.
"""
# ------------------------------------------------------------------
# done: 3. Implement and test this function.
#
# CAUTION: Do NOT use 'time' as a VARIABLE since it is
# the name of a MODULE that you need. Instead, consider
# using something like 'seconds' for the seconds to move.
#
# HINT: *** First solve this problem BY HAND on an example! ***
# ------------------------------------------------------------------
robot.go(speed_in_CM_per_second)
time.sleep(distance_in_CM / speed_in_CM_per_second)
robot.stop()
val = robot.getSensor(create.Sensors.distance)
print(val)
# ----------------------------------------------------------------------
# TODO: 4. ** OPTIONAL **
# Get out a yardstick and MEASURE how far the robot ACTUALLY went.
# Compare that to:
# -- How far you TOLD the robot to move.
# -- How far the robot REPORTED that it moved.
# Nothing to turn in for this TODO, but do it if you have time.
# ----------------------------------------------------------------------
def measure_MY_robot(robot):
go1_by_time(robot, 91.44, 50)
def go1_by_time(robot, distance_in_CM, speed_in_CM_per_second):
robot.go(speed_in_CM_per_second)
time.sleep(distance_in_CM / speed_in_CM_per_second)
robot.stop()
val = robot.getSensor(create.Sensors.distance)
print(val)
def start_robot(port, run_despite_sensor_garbage):
"""
Constructs a robot and calls run_MY_robot, sending it the robot.
** Put all YOUR code in the run_MY_robot function. **
The code in this function ensures that the robot
call its shutdown method even if the code breaks.
"""
# DO NOT MODIFY THIS FUNCTION.
try:
robot = create.Create(port, run_despite_sensor_garbage)
run_MY_robot(robot)
measure_MY_robot(robot)
if robot:
robot.shutdown()
except create.RobotError:
pass # Already handled, nothing more needs to be done.
except:
if robot and isinstance(robot, create.Create):
robot.try_shutdown()
raise
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
#!/usr/bin/env python2
if __name__ == '__main__':
import sys
from gdrive import main
sys.exit(main(sys.argv))
|
import numpy as np
def normalize(a):
a = np.array(a)
return a/np.linalg.norm(a)
def add_vectors(a,b):
c = (a[0]+b[0], a[1]+b[1])
return c
|
import numpy as np
#1. 훈련 데이터
x = np.array(range(1,101))
y = np.array(range(1,101))
#print(x)
# x_train, x_val, x_test = np.split(x, [60,80])
# y_train, y_val, y_test = np.split(y, [60,80])
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split( x, y, random_state = 66, test_size = 0.4)
x_val, x_test, y_val, y_test = train_test_split(x_test, y_test, random_state = 66, test_size = 0.5)
print(x_train)
print(x_val)
print(x_test)
# #열이 우선이다 행은 무시된다
# #2. 모델 구성
# from keras.models import Sequential
# from keras.layers import Dense
# model = Sequential()
# model.add(Dense(5, input_shape = (1, ), activation ='relu'))
# model.add(Dense(4))
# model.add(Dense(3))
# model.add(Dense(1))
# #model.summary()
# # #model.summary() # param은 dense 가 5와 3일떄는 input weight 5, bias 1, x 3
# #3. 훈련
# model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
# #model.fit(x, y, epochs=100, batch_size=11)
# model.fit(x_train, y_train, epochs=100, batch_size=1)
# # validation_data= (x_val, y_val))
# #4. 평가 예측
# # loss, acc = model.evaluate(x_test, y_test, batch_size=3)
# # print('acc:', acc)
# y_predict = model.predict(x_test)
# print(y_predict)
# #RMSE 구하기
# from sklearn.metrics import mean_squared_error
# def RMSE(y_test, y_predict):
# return np.sqrt(mean_squared_error(y_test, y_predict))
# print("RMSE: ", RMSE(y_test, y_predict))
# #R2 구하기
# from sklearn.metrics import r2_score
# r2_y_predict = r2_score(y_test, y_predict)
# print("R2: ", r2_y_predict)
|
class MatrixException(Exception):
def __init__(self, text=None):
self.text=text
def __str__(self):
if self.text is None:
return 'Raised'
else:
return self.text
class RMatrixException(MatrixException):
pass
class RMatrixArithmeticError(RMatrixException):
def __init__(self, reason=None):
self.reason=reason
def __str__(self):
reasonstring=self.reason
if reasonstring is None:
reasonstring='unspecified'
return 'reason:'+reasonstring |
# Copyright 2010 Alon Zakai ('kripken'). All rights reserved.
# This file is part of Syntensity/the Intensity Engine, an open source project. See COPYING.txt for licensing.
import os
# Offer to interactively set up the settings.json
def run(config_filename, template_filename):
print
print '======================================'
print 'Intensity Engine Server - Setup Wizard'
print '======================================'
print
import json, re
if config_filename == template_filename:
print 'Using default values from your existing settings.json. Delete it to use the engine defaults in this wizard.\n'
# Load
f = open(template_filename)
template = json.loads(f.read())
f.close()
# Ask questions
def set_activity(value, default):
if value == '':
value = default
if value == '':
activity_id = ''
map_asset_id = ''
else:
activity_id = value
map_asset_id = '' # Will be discovered by the server on startup
# Save them both
section = 'Activity'
if not section in template:
template[section] = {}
template[section]['force_activity_id'] = activity_id
template[section]['force_map_asset_id'] = map_asset_id
QUESTIONS = [
(
'Enter the server IP address/hostname (or "localhost" to make it only accessible on the same machine)',
('Network', 'address'),
'localhost',
None,
),
(
'Enter the port to listen on',
('Network', 'port'),
'28787',
None,
),
(
'Enter the admin port to listen on (for controlling the server externally, like requisitioning)',
('Network', 'admin_port'),
'28789',
None,
),
# (
# 'Enter the master server to interact with',
# ('Network', 'master_server'),
# 'www.syntensity.com:8888',
# ),
(
'Enter the activity to run (paste the activity ID, or the URL of the activity - something like "http://www.syntensity.com:8888/tracker/activity/view/.../")',
('Activity', 'force_activity_id'),
'',
set_activity,
),
]
for question in QUESTIONS:
text = question[0]
section = question[1][0]
option = question[1][1]
default = question[2]
post = question[3] if len(question) >= 4 else None
try:
default = template[section][option]
except:
pass
value = raw_input('%s [%s]: ' % (text, default))
if post is None:
if value == '': value = default
if not section in template:
template[section] = {}
template[section][option] = value
else:
post(value, default)
print '\nSetup wizard complete. Writing to: %s' % config_filename
# Write
config_file = open(config_filename, 'w')
config_.write(json.dumps(template, sort_keys=True, indent=4))
config_file.flush()
os.fsync(config_file.fileno())
config_file.close()
def ask(config_filename, template_filename, args):
show = False
force = False
if False:#not os.path.exists(config_filename): # XXX For now, do not show by default - until tested on Windows
show = True
else:
template_filename = config_filename # Start from existing config
if '--wizard' in args:
show = True
force = True
ran = False
if show:
try:
if force or raw_input('\nRun setup wizard? [Y/n] ').lower() in ['y', '']:
run(config_filename, template_filename)
ran = True
except EOFError:
pass
print
print '======================================================================='
if not ran:
print 'Setup wizard not run'
print 'You can run the setup wizard later, by running the server with --wizard'
print '======================================================================='
print
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 12:31:14 2019
Update: trial2:
>using classes
trial 3:
>Using factorize for non-numeric data
trial 4:
>Attempting to use duplicate dataset where factorization is not vital
>13/3/19: Made DecideLabel and is_number independent of val (using local variables now)
@author: 510571
"""
import tkinter as tk
import matplotlib.pyplot as plot
import pandas
import numpy as np
import seaborn as sns
import itertools
from mpl_toolkits.mplot3d import Axes3D
import copy
class visuals():
def __init__(self,dataset): #constructor of class
self.dataset = dataset
self.scrollwin = 'NONE'
self.dup_dataset = copy.copy(self.dataset)
self.ticks = [' ' for self.i in range(len(self.dataset.columns.values))]
self.tickmark = [0 for self.i in range(len(self.dataset.columns.values))]
self.alpha_data_process()
def alpha_data_process(self): #label encode categorical data and save this data to be used as names of axes, etc.
for i in range(len(self.dataset.columns.values)):
if( (self.dataset.iloc[:,i].dtype != np.int64) and (self.dataset.iloc[:,i].dtype != np.float64)):
self.ticks[i] = np.unique(self.dataset.iloc[:,i])
self.tickmark[i] = 1
self.tempcol = pandas.factorize(self.dataset.iloc[:,i])
self.dataset.iloc[:,i] = self.tempcol[0]
'''
for i in range(len(self.dataset.columns.values)):
if( (self.dataset.iloc[:,i].dtype != np.number) or (self.dataset.iloc[:,i].dtype != np.floating)):
self.ticks[i] = np.unique(self.dataset.iloc[:,i])
self.tickmark[i] = 1
self.tempcol = pandas.factorize(self.dataset.iloc[:,i])
self.tempcol[0]
self.dataset.iloc[:,i]
'''
def myfunction(self,event): #vital function in scrollwindow()
self.canvas.configure(scrollregion=self.canvas.bbox("all"),width=self.widh,height=self.heig)
def scrollwindow(self,widh,heig): #put scrollbars in any window of choice
self.widh = widh
self.heig = heig
self.myframe=tk.Frame(self.scrollwin,width=100,height=100,bd=1)
self.myframe.place(x=10,y=10)
self.canvas=tk.Canvas(self.myframe)
self.frame=tk.Frame(self.canvas)
self.myscrollbar=tk.Scrollbar(self.myframe,orient="vertical",command=self.canvas.yview)
self.myscrollbar2=tk.Scrollbar(self.myframe,orient="horizontal",command=self.canvas.xview)
self.canvas.configure(yscrollcommand=self.myscrollbar.set)
self.canvas.configure(xscrollcommand=self.myscrollbar2.set)
self.myscrollbar.pack(side="right",fill="y")
self.myscrollbar2.pack(side="bottom",fill="x")
self.canvas.pack(side="left")
self.canvas.create_window((0,0),window=self.frame,anchor='nw')
self.frame.bind("<Configure>",self.myfunction)
def selectallboxes(self,col1): #To select all columns at once in OpenFileSelective
lft = {}
for val in range(self.shape[1]):
col1[val] = tk.IntVar()
lft[val] = tk.Checkbutton(self.frame, text = self.DecideLabel(self.colname[val],val), variable = col1[val])
lft[val].select()
lft[val].grid(row=val+2, column=0, sticky = 'w')
def Plotgraph(self): #Function to plot 2D graph along X and Y axis
self.plotwindow = tk.Toplevel()
self.plotwindow.geometry("500x500")
self.plotwindow.title('Plot Graph')
#global colname
self.colname = self.dataset.columns.values
self.col1 = {}
self.shape = self.dataset.shape
self.scrollwin = self.plotwindow
self.scrollwindow(450,450)
self.b1 = tk.Label(self.frame, text = 'Y axis')
self.b2 = tk.Label(self.frame, text = 'X axis')
self.b1.grid(row=0, column =1, ipadx = 0, ipady =0, sticky = 'w')
self.b2.grid(row=0, column =0, ipadx = 0, ipady =0, sticky = 'w')
self.v1 = tk.IntVar()
self.v2 = tk.IntVar()
self.v1.set(0)
self.v2.set(0)
for self.i in range(self.shape[1]):
self.col1[self.i] = [self.colname[self.i],self.i]
for self.val, self.col1 in enumerate(self.col1):
self.lft = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v1,
value=self.val)
self.lft.grid(row = self.val+1 ,column = 1,ipadx = 0, ipady =0, sticky = 'w')
self.rgt = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v2,
value=self.val)
self.rgt.grid(row = self.val+1 ,column = 0,ipadx = 0, ipady =0, sticky = 'w')
self.clickbutton = tk.Button(self.frame, text = 'Plot', command = self.clickplotgraph)
self.clickbutton.grid(row=0, column=60, ipadx = 0, ipady =0)
def clickplotgraph(self): #Function executed after button pressed in Plotgraph() function
#print('using dup_dataset')
self.x = self.dup_dataset.iloc[:,self.v2.get()]
self.y = self.dup_dataset.iloc[:,self.v1.get()]
plot.figure(figsize = (10,10))
'''
if (self.tickmark[self.v2.get()] == 1):
plot.xticks(np.unique(self.x), self.ticks[self.v2.get()])
if (self.tickmark[self.v1.get()] == 1):
plot.yticks(np.unique(self.y), self.ticks[self.v1.get()])
'''
plot.xlabel(self.colname[self.v2.get()])
plot.ylabel(self.colname[self.v1.get()])
plot.scatter(self.x,self.y)
plot.title('Graph')
plot.show()
def Histogram(self): #Function to plot histogram for a column
self.histwindow = tk.Toplevel()
self.histwindow.geometry("500x500")
self.histwindow.title('Histogram')
self.colname = self.dataset.columns.values
self.col1 = {}
self.shape = self.dataset.shape
self.scrollwin = self.histwindow
self.scrollwindow(450,450)
self.w1 = tk.Scale(self.frame, from_=1,tickinterval=0, to=100, orient= 'horizontal', length = 200, label = 'Select Bins')
self.w1.set(10)
self.w1.grid(row = 0, column = 1, sticky = 'w',ipadx = 10)
self.b1 = tk.Label(self.frame, text = 'Select attribute: ')
self.b1.grid(row=0, column=0, ipadx = 10)
self.v = tk.IntVar()
self.v.set(0)
for self.i in range(self.shape[1]):
self.col1[self.i] = [self.colname[self.i],self.i]
for self.val, self.col1 in enumerate(self.col1):
self.lft = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v,
value=self.val)
self.lft.grid(row = self.val+1 ,column = 0, sticky = 'w',ipadx = 0, ipady =0)
self.clickbutton = tk.Button(self.frame, text = 'Plot', command = self.clickhistogram)
self.clickbutton.grid(row=0, column=2, ipadx = 10)
def clickhistogram(self): #Function executed after button pressed in Histogram() function
self.x = self.dup_dataset.iloc[:,self.v.get()]
plot.figure(figsize = (10,10))
'''
if (self.tickmark[self.v.get()] == 1):
plot.xticks(np.unique(self.x), self.ticks[self.v.get()])
'''
plot.xlabel(self.colname[self.v.get()])
plot.hist(self.x, bins = self.w1.get())
plot.title('Histogram')
plot.show()
def CorrDiagram(self): #Function to show Correaltion diagrams between columns
plot.figure(figsize = (10,10))
plot.title('Correlation Matrix')
self.corr = self.dataset.corr()
self.colname = self.dataset.columns.values
corrlabel = [' ' for i in range(len(self.colname))]
#ycorr = [' ' for i in range(len(self.corr.columns.values))]
for self.val in range(len(self.colname)):
corrlabel[self.val] = self.DecideLabel(self.colname[self.val],self.val)
sns.heatmap(self.corr,
xticklabels=corrlabel,
yticklabels=corrlabel)
def PieChart(self): #Function to plot a pie chart for a column
self.piewindow = tk.Toplevel()
self.piewindow.geometry("500x500")
self.piewindow.title('Pie Chart')
self.colname = self.dataset.columns.values
self.col1 = {}
self.shape = self.dataset.shape
self.scrollwin = self.piewindow
self.scrollwindow(450,450)
self.b1 = tk.Label(self.frame, text = 'Select column to plot')
self.b1.grid(row=0, column=0)
self.v = tk.IntVar()
self.v.set(0)
for self.i in range(self.shape[1]):
self.col1[self.i] = [self.colname[self.i],self.i]
for self.val, self.col1 in enumerate(self.col1):
self.lft = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v,
value=self.val)
self.lft.grid(row = self.val+1 ,column = 0, sticky = 'w',ipadx = 0, ipady =0)
self.clickbutton = tk.Button(self.frame, text = 'Plot', command = self.clickpiechart)
self.clickbutton.grid(row=0, column=1)
def clickpiechart(self): #Function executed after button pressed in PieChart() function
plot.figure(figsize = (10,10))
'''
if (self.tickmark[self.v.get()] == 1):
self.labeldata = self.ticks[self.v.get()]
#plot.xticks(np.unique(self.x), self.ticks[self.v2.get()])
else:
'''
self.labeldata = self.dup_dataset.iloc[:,self.v.get()].unique()
plot.xlabel(self.colname[self.v.get()])
plot.title('Pie Chart')
plot.show()
plot.pie(self.dataset.groupby([self.dataset.iloc[:,self.v.get()]]).size(), labels = self.labeldata, autopct='%1.0f%%')
def BarGraph(self): #Function to plot a bargraph between X an Y axis
self.barwindow = tk.Toplevel()
self.barwindow.geometry("500x500")
self.barwindow.title('Bar Graph')
self.colname = self.dataset.columns.values
self.col1 = {}
self.shape = self.dataset.shape
self.scrollwin = self.barwindow
self.scrollwindow(450,450)
self.w1 = tk.Scale(self.frame, from_=0.01, to=1, orient= 'horizontal', length = 200, label = 'Select Width', resolution = 0.01)
self.w1.set(0.8)
self.w1.grid(row = 0, column = 3, sticky = 'w',ipadx = 5)
self.b1 = tk.Label(self.frame, text = 'Y axis')
self.b2 = tk.Label(self.frame, text = 'X axis')
self.b1.grid(row=0, column =1, ipadx = 0, ipady =0, sticky = 'w')
self.b2.grid(row=0, column =0, ipadx = 0, ipady =0, sticky = 'w')
self.v1 = tk.IntVar()
self.v2 = tk.IntVar()
self.v1.set(0)
self.v2.set(0)
for self.i in range(self.shape[1]):
self.col1[self.i] = [self.colname[self.i],self.i]
for self.val, self.col1 in enumerate(self.col1):
self.lft = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v1,
value=self.val)
self.lft.grid(row = self.val+1 ,column = 1,ipadx = 0, ipady =0, sticky = 'w')
self.rgt = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v2,
value=self.val)
self.rgt.grid(row = self.val+1 ,column = 0,ipadx = 0, ipady =0, sticky = 'w')
self.clickbutton = tk.Button(self.frame, text = 'Plot',command = self.clickbargraph)
self.clickbutton.grid(row=0, column=2, ipadx = 0)
def clickbargraph(self): #Function executed after button pressed in BarGraph() function
self.x = self.dup_dataset.iloc[:,self.v2.get()]
self.y = self.dup_dataset.iloc[:,self.v1.get()]
plot.figure(figsize = (10,10))
'''
if (self.tickmark[self.v2.get()] == 1):
plot.xticks(np.unique(self.x), self.ticks[self.v2.get()])
if (self.tickmark[self.v1.get()] == 1):
plot.yticks(np.unique(self.y), self.ticks[self.v1.get()])
'''
plot.xlabel(self.colname[self.v2.get()])
plot.ylabel(self.colname[self.v1.get()])
plot.bar(self.x,self.y,width = self.w1.get())
plot.title('Bar Graph')
plot.show()
def Graph3D(self): #Function to plot a 3D graph with X, Y and Z axis
self.G3Dwindow = tk.Toplevel()
self.G3Dwindow.geometry("500x500")
self.G3Dwindow.title('3D Scatter Graph')
self.colname = self.dataset.columns.values
self.col1 = {}
self.shape = self.dataset.shape
self.scrollwin = self.G3Dwindow
self.scrollwindow(450,450)
self.b1 = tk.Label(self.frame, text = 'Y axis')
self.b2 = tk.Label(self.frame, text = 'X axis')
self.b3 = tk.Label(self.frame, text = 'Z axis')
self.b1.grid(row=0, column =1, ipadx = 0, ipady =0, sticky = 'w')
self.b2.grid(row=0, column =0, ipadx = 0, ipady =0, sticky = 'w')
self.b3.grid(row=0, column =2, ipadx = 0, ipady =0, sticky = 'w')
self.v1 = tk.IntVar()
self.v2 = tk.IntVar()
self.v3 = tk.IntVar()
self.v1.set(0)
self.v2.set(0)
self.v3.set(0)
for self.i in range(self.shape[1]):
self.col1[self.i] = [self.colname[self.i],self.i]
for self.val, self.col1 in enumerate(self.col1):
self.lft = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v1,
value=self.val)
self.lft.grid(row = self.val+1 ,column = 1,ipadx = 0, ipady =0, sticky = 'w')
self.rgt = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v2,
value=self.val)
self.rgt.grid(row = self.val+1 ,column = 0,ipadx = 0, ipady =0, sticky = 'w')
self.zax = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v3,
value=self.val)
self.zax.grid(row = self.val+1 ,column = 2,ipadx = 0, ipady =0, sticky = 'w')
self.clickbutton = tk.Button(self.frame, text = 'Plot', command = self.clickgraph3d)
self.clickbutton.grid(row=0, column=3, ipadx = 0)
def clickgraph3d(self): #Function executed after button pressed in Graph3D() function
self.x = self.dataset.iloc[:,self.v2.get()]
self.y = self.dataset.iloc[:,self.v1.get()]
self.z = self.dataset.iloc[:,self.v3.get()]
self.fig = plot.figure(figsize=(10, 10))
self.ax = self.fig.add_subplot(111, projection='3d')
if (self.tickmark[self.v2.get()] == 1):
self.ax.set_xticks(np.unique(self.x))
self.ax.set_xticklabels(self.ticks[self.v2.get()])
if (self.tickmark[self.v1.get()] == 1):
self.ax.set_yticks(np.unique(self.y))
self.ax.set_yticklabels(self.ticks[self.v1.get()])
if (self.tickmark[self.v3.get()] == 1):
self.ax.set_zticks(np.unique(self.z))
self.ax.set_zticklabels(self.ticks[self.v3.get()])
self.ax.scatter(self.x, self.y, self.z, s=50, alpha=0.6, edgecolors='w')
self.ax.set_xlabel(self.colname[self.v2.get()])
self.ax.set_ylabel(self.colname[self.v1.get()])
self.ax.set_zlabel(self.colname[self.v3.get()])
def BubbleChart(self): #Function to plot a Bubble chart
self.bubblewindow = tk.Toplevel()
self.bubblewindow.geometry("700x500")
self.bubblewindow.title('Bubble Chart')
self.colname = self.dataset.columns.values
self.scrollwin = self.bubblewindow
self.scrollwindow(650,450)
self.col1 = {}
self.shape = self.dataset.shape
self.w1 = tk.Scale(self.frame, from_=1, to=500, orient= 'horizontal', length = 200, label = 'Bubble Magnification')
self.w1.set(25)
self.w1.grid(row = 0, column = 4, sticky = 'w',ipadx = 10)
self.b1 = tk.Label(self.frame, text = 'Y axis')
self.b2 = tk.Label(self.frame, text = 'X axis')
self.b3 = tk.Label(self.frame, text = 'Bubble size attribute')
self.b1.grid(row=0, column =1, ipadx = 0, ipady =0, sticky = 'w')
self.b2.grid(row=0, column =0, ipadx = 0, ipady =0, sticky = 'w')
self.b3.grid(row=0, column =2, ipadx = 0, ipady =0, sticky = 'w')
self.v1 = tk.IntVar()
self.v2 = tk.IntVar()
self.v3 = tk.IntVar()
self.v1.set(0)
self.v2.set(0)
self.v3.set(0)
for self.i in range(self.shape[1]):
self.col1[self.i] = [self.colname[self.i],self.i]
for self.val, self.col1 in enumerate(self.col1):
self.lft = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v1,
value=self.val)
self.lft.grid(row = self.val+1 ,column = 1,ipadx = 0, ipady =0, sticky = 'w')
self.rgt = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v2,
value=self.val)
self.rgt.grid(row = self.val+1 ,column = 0,ipadx = 0, ipady =0, sticky = 'w')
self.zax = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v3,
value=self.val)
self.zax.grid(row = self.val+1 ,column = 2,ipadx = 0, ipady =0, sticky = 'w')
self.clickbutton = tk.Button(self.frame, text = 'Plot', command = self.clickbubblechart)
self.clickbutton.grid(row=0, column=3, ipadx = 0)
def clickbubblechart(self): #Function executed after button pressed in BubbleChart() function
self.x = self.dataset.iloc[:,self.v2.get()]
self.y = self.dataset.iloc[:,self.v1.get()]
self.z = self.dataset.iloc[:,self.v3.get()]
self.fig = plot.figure(figsize=(10, 10))
self.ax = self.fig.add_subplot(111)
if (self.tickmark[self.v2.get()] == 1):
self.ax.set_xticks(np.unique(self.x))
self.ax.set_xticklabels(self.ticks[self.v2.get()])
if (self.tickmark[self.v1.get()] == 1):
self.ax.set_yticks(np.unique(self.y))
self.ax.set_yticklabels(self.ticks[self.v1.get()])
plot.scatter(self.x, self.y, s=self.z*self.w1.get(),
alpha=0.4, edgecolors='w')
self.ax.set_title('Bubble Chart')
self.ax.set_xlabel(self.colname[self.v2.get()])
self.ax.set_ylabel(self.colname[self.v1.get()])
def HueGraph(self): #Function to plot a hue graph between X and Y axis and a Hue column
self.huewindow = tk.Toplevel()
self.huewindow.geometry("700x500")
self.huewindow.title('Hue Graph')
self.colname = self.dataset.columns.values
self.col1 = {}
self.shape = self.dataset.shape
self.scrollwin = self.huewindow
self.scrollwindow(650,450)
self.b1 = tk.Label(self.frame, text = 'Y axis')
self.b2 = tk.Label(self.frame, text = 'X axis')
self.b3 = tk.Label(self.frame, text = 'Hue attribute')
self.b1.grid(row=0, column =1, ipadx = 0, ipady =0, sticky = 'w')
self.b2.grid(row=0, column =0, ipadx = 0, ipady =0, sticky = 'w')
self.b3.grid(row=0, column =2, ipadx = 0, ipady =0, sticky = 'w')
self.v1 = tk.IntVar()
self.v2 = tk.IntVar()
self.v3 = tk.IntVar()
self.v1.set(0)
self.v2.set(0)
self.v3.set(0)
for self.i in range(self.shape[1]):
self.col1[self.i] = [self.colname[self.i],self.i]
for self.val, self.col1 in enumerate(self.col1):
self.lft = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v1,
value=self.val)
self.lft.grid(row = self.val+1 ,column = 1,ipadx = 0, ipady =0, sticky = 'w')
self.rgt = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v2,
value=self.val)
self.rgt.grid(row = self.val+1 ,column = 0,ipadx = 0, ipady =0, sticky = 'w')
self.zax = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v3,
value=self.val)
self.zax.grid(row = self.val+1 ,column = 2,ipadx = 0, ipady =0, sticky = 'w')
self.clickbutton = tk.Button(self.frame, text = 'Plot', command = self.clickhuegraph)
self.clickbutton.grid(row=0, column=3, ipadx = 0)
def clickhuegraph(self): #Function executed after button pressed in HueGraph() function
self.x = self.dataset.iloc[:,self.v2.get()]
self.y = self.dataset.iloc[:,self.v1.get()]
self.z = self.dataset.iloc[:,self.v3.get()]
self.fig = plot.figure(figsize=(10, 10))
self.ax = self.fig.add_subplot(111)
if (self.tickmark[self.v2.get()] == 1):
self.ax.set_xticks(np.unique(self.x))
self.ax.set_xticklabels(self.ticks[self.v2.get()])
if (self.tickmark[self.v1.get()] == 1):
self.ax.set_yticks(np.unique(self.y))
self.ax.set_yticklabels(self.ticks[self.v1.get()])
print(self.z)
plot.scatter(self.x, self.y, c=self.z, cmap=plot.cm.coolwarm,edgecolors='black',s =50)
self.ax.set_title('Hue Graph')
self.ax.set_xlabel(self.colname[self.v2.get()])
self.ax.set_ylabel(self.colname[self.v1.get()])
#self.ax.legend( labels = self.ticks[self.v3.get()]) #!!!INCORRECT-NEED TO FIX
plot.show()
def KernelPlot(self): #Function to plot a kernel density plot between X and Y axis
self.kernelwindow = tk.Toplevel()
self.kernelwindow.geometry("500x500")
self.kernelwindow.title('Kernel Density Plot')
self.colname = self.dataset.columns.values
self.col1 = {}
self.shape = self.dataset.shape
self.scrollwin = self.kernelwindow
self.scrollwindow(450,450)
self.b1 = tk.Label(self.frame, text = 'Y axis')
self.b2 = tk.Label(self.frame, text = 'X axis')
self.b1.grid(row=0, column =1, ipadx = 0, ipady =0, sticky = 'w')
self.b2.grid(row=0, column =0, ipadx = 0, ipady =0, sticky = 'w')
self.v1 = tk.IntVar()
self.v2 = tk.IntVar()
self.v1.set(0)
self.v2.set(0)
for self.i in range(self.shape[1]):
self.col1[self.i] = [self.colname[self.i],self.i]
for self.val, self.col1 in enumerate(self.col1):
self.lft = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v1,
value=self.val)
self.lft.grid(row = self.val+1 ,column = 1,ipadx = 0, ipady =0, sticky = 'w')
self.rgt = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v2,
value=self.val)
self.rgt.grid(row = self.val+1 ,column = 0,ipadx = 0, ipady =0, sticky = 'w')
self.clickbutton = tk.Button(self.frame, text = 'Plot', command = self.clickkernalplot)
self.clickbutton.grid(row=0, column=2, ipadx = 0)
def clickkernalplot(self): #Function executed after button pressed in KernelPlot() function
self.x = self.dataset.iloc[:,self.v2.get()]
self.y = self.dataset.iloc[:,self.v1.get()]
self.fig = plot.figure(figsize=(10, 10))
self.ax = self.fig.add_subplot(111)
if (self.tickmark[self.v2.get()] == 1):
self.ax.set_xticks(np.unique(self.x))
self.ax.set_xticklabels(self.ticks[self.v2.get()])
if (self.tickmark[self.v1.get()] == 1):
self.ax.set_yticks(np.unique(self.y))
self.ax.set_yticklabels(self.ticks[self.v1.get()])
#self.ax.set_yticklabels(np.unique(self.y),self.ticks[self.v1.get()])
self.ax = sns.kdeplot(self.x, self.y,
cmap="YlOrBr", shade=True, shade_lowest=False)
self.ax.set_title('Kernel Density Plot')
self.ax.set_xlabel(self.colname[self.v2.get()])
self.ax.set_ylabel(self.colname[self.v1.get()])
plot.show()
#for i in len(np.unique(z)):
# ax.legend(i)
def KernelPlot3D(self): #Function to plot 3D kernel blot btween X and Y axis and a Label
self.kernel3dwindow = tk.Toplevel()
self.kernel3dwindow.geometry("700x500")
self.kernel3dwindow.title('Kernel Density Plot')
self.colname = self.dataset.columns.values
self.col1 = {}
self.shape = self.dataset.shape
self.scrollwin = self.kernel3dwindow
self.scrollwindow(650,450)
self.b1 = tk.Label(self.frame, text = 'Y axis')
self.b2 = tk.Label(self.frame, text = 'X axis')
self.b3 = tk.Label(self.frame, text = 'Label')
self.b1.grid(row=0, column =0, ipadx = 0, ipady =0, sticky = 'w')
self.b2.grid(row=0, column =1, ipadx = 0, ipady =0, sticky = 'w')
self.b3.grid(row=0, column =2, ipadx = 0, ipady =0, sticky = 'w')
self.v1 = tk.IntVar()
self.v2 = tk.IntVar()
self.v3 = tk.IntVar()
self.v1.set(0)
self.v2.set(0)
self.v3.set(0)
for self.i in range(self.shape[1]):
self.col1[self.i] = [self.colname[self.i],self.i]
for self.val, self.col1 in enumerate(self.col1):
self.lft = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v1,
value=self.val)
self.lft.grid(row = self.val+1 ,column = 0,ipadx = 0, ipady =0, sticky = 'w')
self.rgt = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v2,
value=self.val)
self.rgt.grid(row = self.val+1 ,column = 1,ipadx = 0, ipady =0, sticky = 'w')
self.zax = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v3,
value=self.val)
self.zax.grid(row = self.val+1 ,column = 2,ipadx = 0, ipady =0, sticky = 'w')
self.clickbutton = tk.Button(self.frame, text = 'Plot', command = self.clickkernalplot3d)
self.clickbutton.grid(row=0, column=3, ipadx = 0)
def clickkernalplot3d(self): #!!! Make more robust.. Function executed after button pressed in KernelPlot3D() function
self.x = self.dataset.iloc[:,self.v2.get()]
self.y = self.dataset.iloc[:,self.v1.get()]
self.z = self.dataset.iloc[:,self.v3.get()]
#print(z)
self.label = np.unique(self.z)
self.fig = plot.figure(figsize=(10, 10))
self.ax = self.fig.add_subplot(111)
#labeltemp = pandas.factorize(label)
#z = pandas.factorize(z)
self.dup_x = self.x
self.dup_y = self.y
self.shapeleb = self.label.shape
self.w, self.h = self.shape[0], self.shapeleb[0]
self.tempx = [['x' for self.dup_x in range(self.w)] for self.dup_y in range(self.h)]
self.tempy = [['x' for self.dup_x in range(self.w)] for self.dup_y in range(self.h)]
self.newarrayx = np.column_stack((self.x,self.z))
self.newarrayy = np.column_stack((self.y,self.z))
for self.i in range(self.shapeleb[0]):
for self.k in range(self.shape[0]):
if(self.label[self.i] == self.newarrayx[self.k][1]):
self.tempx[self.i][self.k] = (self.newarrayx[self.k][0])
for self.i in range(self.shapeleb[0]):
for self.k in range(self.shape[0]):
if(self.label[self.i] == self.newarrayy[self.k][1]):
self.tempy[self.i][self.k] = (self.newarrayy[self.k][0])
for self.i in range(self.shapeleb[0]):
while 'x' in self.tempy[self.i]:
self.tempy[self.i].remove('x')
for self.i in range(self.shapeleb[0]):
while 'x' in self.tempx[self.i]:
self.tempx[self.i].remove('x')
#palette = itertools.cycle(sns.color_palette())
self.palette = itertools.cycle(['Blues','Reds','Greens','Greys','winter','Oranges','rainbow'])
print(self.tempx, self.tempy)
for self.i in range(self.shapeleb[0]):
try:
self.ax = sns.kdeplot(self.tempx[self.i], self.tempy[self.i],
cmap=next(self.palette), shade=True, shade_lowest=False)
except Exception:
pass
self.ax.set_title('Kernel Density Plot')
self.ax.set_xlabel(self.colname[self.v2.get()])
self.ax.set_ylabel(self.colname[self.v1.get()])
plot.show()
#for i in len(np.unique(z)):
# ax.legend(i)
def ViolinPlot(self): #Function to display a Violin plot between X and Y axis
self.violinwindow = tk.Toplevel()
self.violinwindow.geometry("500x500")
self.violinwindow.title('Violin Plot')
self.colname = self.dataset.columns.values
self.col1 = {}
self.shape = self.dataset.shape
self.scrollwin = self.violinwindow
self.scrollwindow(450,450)
self.b1 = tk.Label(self.frame, text = 'Y axis')
self.b2 = tk.Label(self.frame, text = 'X axis')
self.b1.grid(row=0, column =1, ipadx = 0, ipady =0, sticky = 'w')
self.b2.grid(row=0, column =0, ipadx = 0, ipady =0, sticky = 'w')
self.v1 = tk.IntVar()
self.v2 = tk.IntVar()
self.v1.set(0)
self.v2.set(0)
for self.i in range(self.shape[1]):
self.col1[self.i] = [self.colname[self.i],self.i]
for self.val, self.col1 in enumerate(self.col1):
self.lft = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v1,
value=self.val)
self.lft.grid(row = self.val+1 ,column = 1,ipadx = 0, ipady =0, sticky = 'w')
self.rgt = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v2,
value=self.val)
self.rgt.grid(row = self.val+1 ,column = 0,ipadx = 0, ipady =0, sticky = 'w')
self.clickbutton = tk.Button(self.frame, text = 'Plot', command = self.clickviolinplot)
self.clickbutton.grid(row=0, column=2, ipadx = 0)
def clickviolinplot(self): #Function executed after button pressed in ViolinPlot() function
self.x = self.dup_dataset.iloc[:,self.v2.get()]
self.y = self.dup_dataset.iloc[:,self.v1.get()]
plot.figure(figsize = (10,10))
'''
if (self.tickmark[self.v2.get()] == 1):
plot.xticks(np.unique(self.x), self.ticks[self.v2.get()])
#print('x activated')
if (self.tickmark[self.v1.get()] == 1):
plot.yticks(np.unique(self.y), self.ticks[self.v1.get()])
#print('y activated')
'''
plot.xlabel(self.colname[self.v2.get()])
plot.ylabel(self.colname[self.v1.get()])
sns.violinplot(x=self.x, y=self.y, data = self.dup_dataset)
#plot.bar(x,y,width = w1.get())
plot.title('Violin Plot')
plot.show()
def PairGraph3D(self): #Function to plot a pairwise graph
self.pairG3Dwindow = tk.Toplevel()
self.pairG3Dwindow.geometry("500x500")
self.pairG3Dwindow.title('Pairwise Graph')
self.colname = self.dup_dataset.columns.values
self.col1 = {}
self.shape = self.dataset.shape
self.scrollwin = self.pairG3Dwindow
self.scrollwindow(450,450)
self.b3 = tk.Label(self.frame, text = 'Hue column')
self.b3.grid(row=0, column =0, ipadx = 0, ipady =0, sticky = 'w')
self.v1 = tk.IntVar()
self.v2 = tk.IntVar()
self.v3 = tk.IntVar()
self.v1.set(0)
self.v2.set(0)
self.v3.set(0)
for self.i in range(self.shape[1]):
self.col1[self.i] = [self.colname[self.i],self.i]
for self.val, self.col1 in enumerate(self.col1):
self.zax = tk.Radiobutton(self.frame,
text= self.DecideLabel(self.colname[self.val],self.val),
variable=self.v3,
value=self.val)
self.zax.grid(row = self.val+1 ,column = 0,ipadx = 0, ipady =0, sticky = 'w')
self.clickbutton = tk.Button(self.frame, text = 'Plot', command = self.clickpairgraph)
self.clickbutton.grid(row=0, column=1, ipadx = 0)
def clickpairgraph(self): #Function executed after button pressed in PairGraph3D() function
#if (self.tickmark[self.v3.get()] == 1):
#plot.yticks(np.unique(self.y), self.ticks[self.v3.get()])
sns.pairplot(self.dataset, hue = self.colname[self.v3.get()] ,size=1.8, aspect=1.8,
palette='rainbow',
plot_kws=dict(edgecolor="black", linewidth=0.5))
def viewencodedvalues(self): #Display Label encoded values in dataset if present
#print('INSIDE VIEWENCODEDVALUES FUNCTION')
self.encodedvalueswindow = tk.Toplevel()
self.encodedvalueswindow.geometry("700x500")
self.encodedvalueswindow.title('Encoded Values')
self.colname = self.dataset.columns.values
self.scrollwin = self.encodedvalueswindow
self.scrollwindow(650,450)
for self.val in range(len(self.colname)):
if(self.tickmark[self.val] == 1):
for self.k in range(len(np.unique(self.dataset.iloc[:,self.val]))):
print(self.ticks[self.val][self.k] , '=>' , np.unique(self.dataset.iloc[:,self.val])[self.k])
tk.Label(self.frame, text = self.DecideLabel(self.colname[self.val],self.val)+ ' |', font=("Courier", 15) ).grid(row = 0, column = self.val, sticky = 'w',ipadx = 10)
tk.Label(self.frame, text = str(self.ticks[self.val][self.k]) + '=>' + str(np.unique(self.dataset.iloc[:,self.val])[self.k]), font=("Courier", 15) ).grid( row=self.k+1, column = self.val, sticky = 'w', ipadx = 10)
def is_number(self,testn): #Useful in function DecideLabel. Tells if value passed is a number or a string
#self.testn = self.name
try:
float(testn) # Type-casting the string to `float`.
# If string is not a valid `float`,
# it'll raise `ValueError` exception
except ValueError:
return testn
return float(testn)
def DecideLabel(self,name,i): #Used in naming columns for selections in menu. If columns have string heading, displays string. If numeric value/ no proper column heading, returns 'Column n'
#self.i = self.val
#self.name = self.colname[self.i]
tempname = self.is_number(name)
if type(tempname) is str:
return name
elif type(tempname) is not str :
return "column " + str(i+1) |
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
previous, result = 0,0
search = {}
for next in range(len(s)):
if s[next] in search:
previous = max(previous,search[s[next]]+1)
search[s[next]] = next
result = max(result,next-previous+1)
return result
|
#!/usr/bin/env python
# test has been developed by Robert Harakaly and changed for SAM by Victor Galaktionov
# get information about a LFC file or directory in the name server (lfc_statgx)
# meta: proxy=true
# meta: preconfig=../../LFC-config
import os, lfc, sys
from testClass import _test, _ntest, _testRunner, SAM_Run, LFC_VO
class test_root_dir(_test):
def info(self):
return "Test root dir: "
def test(self):
statg=lfc.lfc_filestatg()
ret=lfc.lfc_statg("/","", statg)
return (statg,ret)
def ret(self):
return None
def compare(self, testVal, retVal):
(ret, retRetVal) = retVal
(test, testRetVal) = testVal
return retRetVal == testRetVal
class test_existing_guid(_test):
def info(self):
return "Test existing guid: "
def prepare(self):
self.guid = self.get_guid()
self.name = LFC_VO + "/python_filestatg_guid_test"
if not lfc.lfc_stat(self.name,None): # new
self.clean() # new
ret = lfc.lfc_creatg(self.name,self.guid,0664)
def clean(self):
lfc.lfc_unlink(self.name)
def test(self):
statg=lfc.lfc_filestatg()
ret=lfc.lfc_statg("",self.guid, statg)
return (statg,ret)
def ret(self):
retval=lfc.lfc_filestatg()
retval.fileid=24553L
retval.filemode=33204
retval.nlink=1
retval.uid=137
retval.gid=101
retval.filesize=0L
retval.atime=1184059742
retval.mtime=1171381061
retval.ctime=1171381061
retval.fileclass=0
retval.status='-'
retval.guid=self.guid
retval.csumtype=" "
retval.csumvalue=" "
return retval
def compare(self, testVal, retVal):
(ret, retRetVal) = retVal
(test, testRetVal) = testVal
retval = True
if (retRetVal == testRetVal):
retval = retval & ( test.nlink == ret.nlink )
retval = retval & ( test.filesize == ret.filesize )
retval = retval & ( test.fileclass == ret.fileclass )
retval = retval & ( test.status == ret.status )
retval = retval & ( test.guid == ret.guid )
# retval = retval & ( test.csumtype == ret.csumtype )
# retval = retval & ( test.csumvalue == ret.csumvalue )
else:
retval = False
return retval
class test_nonexisting_guid(_test):
def __init__(self):
self.retVal = -1
def info(self):
return "Test nonexisting guid: "
def compare(self, testVal, retVal):
(ret, retRetVal) = retVal
(test, testRetVal) = testVal
retval = True
if (retRetVal == testRetVal):
retval = True # retval & ( test.nlink == ret.nlink )
else:
retval = False
return retval
class test_nonexistent_file(_test):
def __init__(self):
self.retVal = -1
def info(self):
return "Check for nonexistent file: "
def test(self):
stat=lfc.lfc_filestat()
# statPtr=lfc.lfc_filestatPtr(stat)
# ret=lfc.lfc_stat("/nonexisting",statPtr)
ret=lfc.lfc_stat("/nonexisting",stat)
return stat,ret
def ret(self):
retval=lfc.lfc_filestatg()
retval.fileid=0L
retval.filemode=0
retval.nlink=-1
retval.uid=0
retval.gid=0
retval.filesize=0L
retval.atime=0
retval.mtime=0
retval.ctime=0
retval.fileclass=0
retval.status=' '
retval.guid=""
retval.csumtype=""
retval.csumvalue=""
return retval
def compare(self, testVal, retVal):
(ret, retRetVal) = retVal
(test, testRetVal) = testVal
retval = True
if (retRetVal == testRetVal):
retval = True # retval & ( test.nlink == ret.nlink )
else:
retval = False
return retval
class lfc_statg_test(_testRunner):
def __init__(self):
self.name = "lfc_statg_test"
self.tests=[test_root_dir,test_existing_guid,test_nonexistent_file]
#************* Interface for SAM and Python tests ***************
SAM_Run(lfc_statg_test)
|
import base64
import json
import time
import urllib.parse
import hashlib
import hmac
import tempfile
import threading
from django.core.files import File
from django.core.files.storage import Storage
import requests.exceptions
from django.utils.functional import cached_property
extra_headers = {
'User-Agent': 'Backathon+Python/3 <github.com/brownan/backathon>'
}
# Timeout used in HTTP calls
TIMEOUT = 5
class B2Storage(Storage):
"""Django storage backend for Backblaze B2
B2's object store doesn't fit perfectly into Django's storage abstraction
for our use case. B2 has three classes of transactions: class A are free,
class B cost a bit of money, and class C cost an order of magnitude more
per request. So this class and the calling code should access B2 in a
pattern that minimises unnecessary requests.
This turns out to be tricky. A naïve implementation may implement file
metadata functions (Storage.size(), Storage.exists(), etc) as calls to
b2_get_file_info and downloads as a call to b2_download_file_by_name,
both class B transactions. But notice that B2 gives you quite a lot of
information along with a b2_list_file_names call, which can return file
metadata for up to 1000 files in bulk and therefore save on
transaction costs despite being a class C transaction. Unfortunately,
Django's Storage class doesn't have an equivalent call; the listdir()
call is expected to return names, not a data structure of information on
each file.
To support workflows that loop over results of listdir() and get metadata
on each one, we implement a metadata cache. Calls to get metadata on
files that have recently been iterated over will not incur another call
to a b2 API.
"""
def __init__(self,
account_id,
application_key,
bucket_name,
):
self.account_id = account_id
self.application_key = application_key
self.bucket_name = bucket_name
# Thread local variables hold the requests Session object, as well as
# various authorization tokens acquired from B2
self._local = threading.local()
@cached_property
def bucket_id(self):
"""The bucket ID
Some B2 API calls require the bucket ID, some require the name. Since
the name is part of our config, we have to query for the ID
"""
data = self._call_api("b2_list_buckets", {'accountId': self.account_id})
for bucketinfo in data['buckets']:
if bucketinfo['bucketName'] == self.bucket_name:
return bucketinfo['bucketId']
raise IOError("No such bucket name {}".format(self.bucket_name))
@property
def _session(self):
# Initialize a new requests.Session for this thread if one doesn't
# exist
try:
return self._local.session
except AttributeError:
session = requests.Session()
session.headers.update(extra_headers)
self._local.session = session
return session
@property
def _metadata_cache(self):
# Maps filenames to metadata dicts as returned by b2_get_file_info
# and several other calls. This is used to cache metadata between
# calls to Storage.listdir() and other Storage.* methods that get
# metadata, so the file doesn't have to be downloaded if calling code
# just wants to loop over file names and get some metadata about
# each one.
try:
return self._local.metadata_cache
except AttributeError:
self._local.metadata_cache = {}
return self._local.metadata_cache
def _post_with_backoff_retry(self, *args, **kwargs):
"""Calls self._session.post with the given arguments
Implements automatic retries and backoffs as per the B2 documentation
"""
if "timeout" not in kwargs:
kwargs['timeout'] = TIMEOUT
delay = 1
max_delay = 64
while True:
try:
response = self._session.post(*args, **kwargs)
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout):
# No response from server at all
if max_delay < delay:
# Give up
raise
time.sleep(delay)
delay *= 2
else:
if response.status_code == 503:
# Service unavailable
if max_delay < delay:
# Give up
return response
time.sleep(delay)
delay *= 2
elif response.status_code == 429:
# Too many requests
time.sleep(int(response.headers.get('Retry-After', 1)))
delay = 1
else:
# Success. Or at least, not a response that we want to retry
return response
def _authorize_account(self):
"""Calls b2_authorize_account to get a session authorization token
If successful, sets the authorization_token and api_url
If unsuccessful, raises an IOError with a description of the error
"""
response = self._post_with_backoff_retry(
"https://api.backblazeb2.com/b2api/v1/b2_authorize_account",
headers={
'Authorization': 'Basic {}'.format(
base64.b64encode("{}:{}".format(
self.account_id,
self.application_key
).encode("ASCII")).decode("ASCII")
),
},
json={},
)
try:
data = response.json()
except ValueError:
# Invalid or no JSON returned from response
response.raise_for_status()
raise IOError("Invalid json response from B2")
if response.status_code != 200:
raise IOError("{}: {}".format(response.status_code,
data['message']))
self._local.authorization_token = data['authorizationToken']
self._local.api_url = data['apiUrl']
self._local.download_url = data['downloadUrl']
def _call_api(self, api_name, data):
"""Calls the given API with the given data
If the account hasn't been authorized yet, calls b2_authorize_account
first to obtain the authorization token
If successful, returns the response json object
If unsuccessful, raises an IOError with a description of the error
"""
api_url = getattr(self._local, 'api_url', None)
authorization_token = getattr(self._local, 'authorization_token', None)
if api_url is None or authorization_token is None:
self._authorize_account()
response = self._post_with_backoff_retry(
"{}/b2api/v1/{}".format(self._local.api_url, api_name),
headers = {
'Authorization': self._local.authorization_token,
},
json=data,
)
try:
data = response.json()
except ValueError:
response.raise_for_status()
raise IOError("Invalid json response from B2 on call to {}".format(api_name))
if response.status_code == 401 and data['code'] == "expired_auth_token":
# Auth token has expired. Retry after getting a new one.
self._local.api_url = None
self._local.authorization_token = None
return self._call_api(api_name, data)
if response.status_code != 200:
raise IOError(data['message'])
return data
def _get_upload_url(self):
"""Sets self.upload_url and self.upload_token or raises IOError"""
data = self._call_api("b2_get_upload_url",
data={'bucketId': self.bucket_id})
self._local.upload_url = data['uploadUrl']
self._local.upload_token = data['authorizationToken']
def _upload_file(self, name, content):
"""Calls b2_upload_file to upload the given data to the given name
If necessary, calls b2_get_upload_url to get a new upload url
:param content: a django File object of some sort
:type content: File
:returns: the result from the upload call, a dictionary of object
metadata
"""
if (getattr(self._local, "upload_url", None) is None or
getattr(self._local, "upload_token", None) is None
):
self._get_upload_url()
response = None
response_data = None
filename = urllib.parse.quote(name, encoding="utf-8")
content_type = getattr(content, 'content_type', 'b2/x-auto')
digest = hashlib.sha1()
content.seek(0)
for chunk in content.chunks():
digest.update(chunk)
# Don't use the backoff handler when uploading. For most problems
# we just call _get_upload_url() and try again immediately
for _ in range(5):
response = None
response_data = None
content.seek(0)
try:
response = self._session.post(
self._local.upload_url,
headers = {
'Authorization': self._local.upload_token,
'X-Bz-File-Name': filename,
'Content-Type': content_type,
'Content-Length': content.size,
'X-Bz-Content-Sha1': digest.hexdigest(),
},
timeout=TIMEOUT,
data=content,
)
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout):
self._get_upload_url()
continue
try:
response_data = response.json()
except ValueError():
raise IOError("Invalid json returned from B2 API")
if response.status_code == 401 and response_data['code'] == "expired_auth_token":
self._get_upload_url()
continue
if response.status_code == 408:
# Request timeout
self._get_upload_url()
continue
if 500 <= response.status_code <= 599:
# Any server errors
self._get_upload_url()
continue
# Any other errors indicate a permanent problem with the request
if response.status_code != 200:
raise IOError(response_data['message'])
return response_data
# The loop exited, so all 5 tries failed. See if we can raise an
# appropriate error from the last try
if response_data is not None:
raise IOError(response_data['message'])
if response is not None:
response.raise_for_status()
# This path could be hit if the last failure was due to a connection
# error or timeout
raise IOError("Upload failed, unknown reason")
def _download_file(self, name):
"""Downloads a file by name
Returns (metadata dict, file handle)
file handle is open for reading in binary mode.
Raises IOError if there was a problem downloading the file
"""
f = tempfile.SpooledTemporaryFile(
suffix=".b2tmp"
)
if (getattr(self._local, "download_url", None) is None or
getattr(self._local, "authorization_token", None) is None
):
self._authorize_account()
digest = hashlib.sha1()
response = self._session.get(
"{}/file/{}/{}".format(
self._local.download_url,
self.bucket_name,
name
),
timeout=TIMEOUT,
headers={
'Authorization': self._local.authorization_token,
},
stream=True,
)
with response:
if response.status_code != 200:
try:
resp_json = response.json()
except ValueError:
response.raise_for_status()
raise IOError("Non-200 status code returned for download "
"request")
raise IOError(resp_json['message'])
for chunk in response.iter_content(chunk_size=B2File.DEFAULT_CHUNK_SIZE):
digest.update(chunk)
f.write(chunk)
if not hmac.compare_digest(
digest.hexdigest(),
response.headers['X-Bz-Content-Sha1'],
):
f.close()
raise IOError("Corrupt download: Sha1 doesn't match")
data = {
'fileId': response.headers['X-Bz-File-Id'],
'fileName': response.headers['X-Bz-File-Name'],
'contentSha1': response.headers['X-Bz-Content-Sha1'],
'contentLength': response.headers['Content-Length'],
'contentType': response.headers['Content-Type'],
'uploadTimestamp': response.headers['X-Bz-Upload-Timestamp'],
'fileInfo': {},
}
for h in response.headers:
if h.startswith("X-Bz-Info-"):
data['fileInfo'][h[10:]] = response.headers[h]
f.seek(0)
return data, f
def _save(self, name, content):
"""Saves a file under the given name"""
metadata = self._upload_file(name, content)
return metadata['fileName']
def _open(self, name, mode="rb"):
"""Opens a file for reading. Opening a file for writing is not
currently supported
"""
if "w" in mode:
raise NotImplementedError("Opening files for writing is not "
"supported")
if "b" not in mode:
raise NotImplementedError("Automatic encoding is not supported. "
"Open file in binary mode")
return B2File(name, self, data=self._metadata_cache.get(name, None))
def _get_files_by_prefix(self, prefix):
"""Helper method for listdir(). See listdir() docstring"""
if prefix and not prefix.endswith("/"):
prefix = prefix + "/"
start_filename = None
while True:
data = self._call_api(
"b2_list_file_names",
{
'bucketId': self.bucket_id,
# 1000 is the maximum number of items that can be
# returned in a single class C transaction. The API can
# return more but it will charge multiple transactions.
# This may be worth it if we discover it gives us
# performance gains.
'maxFileCount': 1000,
'prefix': prefix,
'startFileName': start_filename,
}
)
self._metadata_cache.update(
(d['fileName'], d) for d in data['files']
if d['action'] == 'upload'
)
yield from (d['fileName'] for d in data['files'] if d['action'] == 'upload')
start_filename = data['nextFileName']
if start_filename is None:
break
self._metadata_cache.clear()
############################
# Public Storage API methods
############################
def get_available_name(self, name, max_length=None):
"""Overwrite existing files with the same name"""
return name
def get_valid_name(self, name):
"""No special filename adjustments are made
Instead we let the B2 api return an error on invalid filenames.
Because of our policy of overwriting existing files, the choice was
made to not adjust names automatically.
"""
return name
def delete(self, name):
"""Deletes the given file
In B2 this calls the b2_hide_file API. If you want to recover the space
taken by this file, make sure you have your bucket lifecycle policy
set to delete hidden files"""
self._call_api(
"b2_hide_file",
{
'bucketId': self.bucket_id,
'fileName': name,
}
)
def exists(self, name):
"""exists() call is not currently implemented
There's not an efficient way to check this. We could download the
file, incurring a class B transaction and the bandwidth costs,
or we could list all files in the bucket incurring a class C
transaction. Callers are advised to listdir() and use the
resulting list to check what they need.
"""
raise NotImplementedError("Not currently implemented")
def listdir(self, path):
"""List files with a given path prefix in the bucket
This method returns an iterator over filenames with the given prefix.
Note that this means it's effectively a recursive directory listing
as subdirectory contents are also listed.
Because B2 is an object store, the returned list of directories is
always empty. Only files are returned.
:returns: ([], filename_iterator)
For very large buckets, this method efficiently batches calls to the
API to return up to 1000 files per call. No more than 1000 entries
are held in memory at a time.
This method also caches the metadata for each file. Callers that
iterate over entries from this call may use the metadata methods such
as B2Storage.size(), B2Storage.get_modified_time(), or access
metadata properties on B2File objects returned from B2Storage.open()
without incurring additional B2 service API calls.
Any other usage pattern may require additional calls to the B2 API.
"""
return [], self._get_files_by_prefix(path)
def size(self, name):
return self._open(name).size
def url(self, name):
# TODO
raise NotImplementedError("Not currently implemented")
def get_accessed_time(self, name):
raise NotImplementedError("Access time is not implemented for B2")
def get_created_time(self, name):
return self._open(name).modified_time
def get_modified_time(self, name):
return self._open(name).modified_time
class B2File(File):
"""An object in B2
Such objects hold metadata, and the contents are downloaded on demand
when requested.
"""
def __init__(self, name, storage, data=None):
"""Initiate a new B2File, representing an object that exists in B2
data is a dictionary as returned by b2_get_file_info. It
should have (at least) these keys:
* fileId
* fileName
* contentSha1
* contentLength
* contentType
* fileInfo
* uploadTimestamp
:type name: str
:type storage: B2Storage
:type data: dict
"""
self.name = name
self.storage = storage
self.mode = "rb"
self.data = data
self._file = None
@property
def size(self):
if self.data is None:
self.load()
return self.data['contentLength']
@property
def sha1(self):
if self.data is None:
self.load()
return self.data['contentSha1']
@property
def content_type(self):
if self.data is None:
self.load()
return self.data['contentType']
@property
def modified_time(self):
if self.data is None:
self.load()
return self.data['uploadTimestamp']
@property
def file(self):
if self._file is None:
self.load()
return self._file
def load(self):
self.data, self._file = self.storage._download_file(self.name)
|
import time
import json
import random
from flask import Flask, jsonify
from flask_socketio import SocketIO, emit, join_room, leave_room
import AnomalyDetector
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app, cors_allowed_origins='*')
detector = AnomalyDetector.AnomalyDetector()
@socketio.on('echo')
def echo(data):
print(data)
emit('echo', data)
@socketio.on('subscribe')
def client_connected(data):
'''
{
"id": "gwijoaas09-wg5sdfs4rege-w4h54h-w4hw5wegeg",
"name":"bob",
"group": "police_officer",
"manager": false,
"timestamp": "UNIX",
"room": "w7yh9gg-h4-ge4gaw4aw-wafwfawe",
"event_type": "subscribe"
}
'''
notify_room(data)
@socketio.on('socketboi')
def sensor_update(data):
'''
{
"id":"gwijoaas09-wg5sdfs4rege-w4h54h-w4hw5wegeg",
"accelerometer": [0.0,0.0,0.0]
}
'''
print("\n\n\n\n\n\n\n\n\n\n\n-------------------------------")
print(data)
print("-------------------------------")
anomaly = detector.analyze(json.loads(data))
notify_room(data)
if anomaly:
emit('anomaly', json.dumps(anomaly), broadcast=True)
@socketio.on('unsubscribe')
def on_leave(data):
'''
{
"id": "gwijoaas09-wg5sdfs4rege-w4h54h-w4hw5wegeg",
"name":"bob",
"group": "police_officer",
"manager": false,
"timestamp": "UNIX",
"room": "w7yh9gg-h4-ge4gaw4aw-wafwfawe",
"event_type": "subscribe"
}
'''
notify_room(point)
def notify_room(event_json):
emit('update', event_json, broadcast=True)
if __name__ == '__main__':
import os
port = int(os.environ.get('PORT', 5000))
socketio.run(app, host="0.0.0.0", port=port, debug=True)
|
import requests
import json
import os
import gitlab
import sys
from namespace_definer import namespace
from support import parse_file_variable, parse_file_variable, mask_vars
from authorization import gitlab_private_token, gitlab_url
def show_vars(namespace):
if isinstance(namespace[next(iter(namespace))], gitlab.v4.objects.Project):
# if single project passed
project = (namespace[next(iter(namespace))])
print(f"\n===== Project {project.name} had this variables ====\n")
for variable in project.variables.list():
print(f"{variable.key}={variable.value}")
elif isinstance(namespace[next(iter(namespace))], gitlab.v4.objects.Group):
# if group passed
group = namespace[next(iter(namespace))]
print(f"\n===== Group {group.name} had this variables ====\n")
for variable in group.variables.list():
print(f"{variable.key}={variable.value}")
else:
print(
f"There is no {namespace[next(iter(namespace))]} name of group or project")
def show_vars_for_all(namespace):
# function will show all variables in every project of a group
# using requests because gitlab lib do it tooo long
if not isinstance(namespace[next(iter(namespace))], gitlab.v4.objects.Group):
return f"{namespace[next(iter(namespace))].name} is not a group name"
# list of all projects as GroupObject, this object does not have
# .variables method so we need it only to find projects in group
# TODO imporve that
group_objects = [x for x in namespace[next(
iter(namespace))].projects.list()]
projects = []
header = {"PRIVATE-TOKEN": gitlab_private_token}
for i in group_objects:
r = requests.get(
url=f"{gitlab_url}/api/v4/projects/{i.id}/variables", headers=header)
print(
f"\n=============== VARIABLES FOR PROJECT {i.name} ================== \n")
for var in r.json():
print(f"{var['key']}={var['value']}")
def add_variable(data, masked=False, namespace=namespace):
if not data:
print("no data to manipulate")
sys.exit(1)
for var in data:
try:
new_var = namespace[next(iter(namespace))].variables.create(var)
if masked:
mask_vars(new_var)
print(
f"===== Variable {var} added to {namespace[next(iter(namespace))].name} =====")
except gitlab.exceptions.GitlabCreateError as e:
print(f"for variable {var} error ocured: \n {e.response_body}")
continue
def change_variable(data, namespace=namespace):
if not data:
print("no data to manipulate")
sys.exit(1)
for var in data:
variable = namespace[next(iter(namespace))].variables.get(var['key'])
variable.value = var['value']
variable.save()
print(
f" ======= Project {namespace[next(iter(namespace))].name}, new value for {var['key']} is {var['value']} ===============")
def delete_variable(data, namespace=namespace, fromfile=False):
if not data:
print("no data to manipulate")
sys.exit(1)
if fromfile:
for var in data:
try:
namespace[next(iter(namespace))].variables.delete(var['key'])
print(
f" ===== variable {var['key']} removed from {namespace[next(iter(namespace))].name} =====")
except gitlab.exceptions.GitlabDeleteError as e:
print(f" ===== no such variable {var} =====")
continue
else:
try:
namespace[next(iter(namespace))].variables.delete(str(data))
print(
f"===== variable {data} removed from {namespace[next(iter(namespace))].name} =====")
except gitlab.exceptions.GitlabDeleteError as e:
print(f" ===== no such variable {arg} =====")
|
from django.core.paginator import Paginator
posts = ['1','2','3','4','5']
# this will show two post at a time on the page
p = Paginator(posts, 2)
print(p.num_pages) #return 3 pages
#using page_range will make an interable
for page in p.page_range:
print("page " + str(page)) # return 1,2,3. these are the page numbers
p1 = p.page(1)
print(p1) #return Page 1 of 3
print(p1.number) #return 1
print(p1.object_list) #return list ['1', '2'], showing objects on a page
# Checking to see if there is a prior page
print(p1.has_previous())
# Checking to see if there is a prior page
print(p1.has_next())
print(p1.next_page_number())
|
from django.forms import ModelForm
from .models import Produit,Facture
from django import forms
class ProduitForm(ModelForm):
class Meta:
model=Produit
fields='__all__'
class FactureForm(ModelForm):
class Meta:
model=Facture
fields='__all__'
|
import DFS
connection_file = open("connections.txt", "r") # open file for reading only
location_file = open("locations.txt", "r")
connections_data = connection_file.readlines() # save file into list
location_data = location_file.readlines()
connection_file.close() # close file
location_file.close()
#path = DFS.DFS("G4b", "G2b", sorted_adj_list, Get_Connections)
#path = DFS.DFS("B1", "C1", sorted_adj_list, Get_Connections)
#path = DFS.DFS("C1", "B1", sorted_adj_list, Get_Connections)
def main():
Adj_List = DFS.Construct_data_array(connections_data)
Cordinate_List = DFS.Construct_data_array(location_data)
sorted_adj_list = DFS.Sort_Adjency_List(Adj_List)
"""
print("Adjency List:")
for item in Adj_List:
print(item)
print("Sorted Adjency List")
for item in sorted_adj_list:
print(item) """
""" print("TESTS")
print(DFS.Get_Connections("A1",sorted_adj_list))
print(DFS.Get_Locations("A1",Cordinate_List))
print(DFS.Get_Index("A1", sorted_adj_list))
print(DFS.distance_calc("A1", "A2",Cordinate_List)) """
""" print("Cordinate List:")
for item in Cordinate_List:
print(item) """
path = DFS.DFS("C1", "B1", sorted_adj_list, DFS.Get_Connections)
if path == False:
print("Path not found")
else:
print("\n")
DFS.PrintPathStack(path, Cordinate_List)
if __name__ == "__main__": main() |
from itertools import chain
from datetime import datetime
import re
from datatypes import TransactionType, TransactionDirection, ParsedBankAccountTransaction, ParsedCreditCardTransaction
from datatypes import Account, Bank, Card, UnknownSubject, UnknownWallet
from common.parsing import extract_literals, extract_keywords
from common.utils import get_nested_item
import datatypes
KEYWORD_FIELDS = [
'name',
'humanConceptName',
'concept.name',
'extendedName',
'humanExtendedConceptName',
'cardTransactionDetail.concept.name',
'cardTransactionDetail.concept.shop.name',
'wireTransactionDetail.sender.person.name'
]
def get_type(transaction_code, transation_direction):
"""
ipdb> pp(dict(set([(b['id'], b['name']) for b in [a['scheme']['subCategory'] for a in raw_transactions]])))
{'0017': 'PAGO CON TARJETA',
'0114': 'INGRESO POR NOMINA O PENSION',
'0022': 'DISPOSIC. DE EFECTIVO CAJERO/OFICINA',
'0054': 'OTROS',
'0058': 'PAGO DE ADEUDO DIRECTO SEPA',
'0060': 'RECIBO TARJETA CRÉDITO',
'0149': 'TRANSFERENCIA RECIBIDA'
'0064': 'TRANSFERENCIA REALIZADA'}
If we need more detail, like in case of OTROS:
code will came from transaction['concept']['id']
{ "00200": "RET. EFECTIVO A DEBITO CON TARJ. EN CAJERO. AUT."
"00400": "COMPRA BBVA WALLET"
}
"""
PAYCHECK = ['0114']
PURCHASE = ['0017', '00400', '0005']
TRANSFER = ['0149', '0064']
WITHDRAWAL = ['0022', '00200', '0007']
DOMICILED_RECEIPT = ['0058']
CREDIT_CARD_INVOICE = ['0060', '0070']
if transaction_code in PURCHASE:
return {
TransactionDirection.CHARGE: TransactionType.PURCHASE,
TransactionDirection.INCOME: TransactionType.PURCHASE_RETURN
}.get(transation_direction)
if transaction_code in TRANSFER:
return {
TransactionDirection.CHARGE: TransactionType.ISSUED_TRANSFER,
TransactionDirection.INCOME: TransactionType.RECEIVED_TRANSFER
}.get(transation_direction)
if transaction_code in PAYCHECK:
return {
TransactionDirection.CHARGE: None,
TransactionDirection.INCOME: TransactionType.RECEIVED_TRANSFER
}.get(transation_direction)
if transaction_code in WITHDRAWAL:
return {
TransactionDirection.CHARGE: TransactionType.ATM_WITHDRAWAL,
TransactionDirection.INCOME: None
}.get(transation_direction)
if transaction_code in DOMICILED_RECEIPT:
return {
TransactionDirection.CHARGE: TransactionType.DOMICILED_RECEIPT,
TransactionDirection.INCOME: TransactionType.RETURN_DEPOSIT
}.get(transation_direction)
if transaction_code in CREDIT_CARD_INVOICE:
return {
TransactionDirection.CHARGE: TransactionType.CREDIT_CARD_INVOICE,
TransactionDirection.INCOME: TransactionType.CREDIT_CARD_INVOICE_PAYMENT
}.get(transation_direction)
def get_source(details, transaction_type):
def safe_issuer(subject):
return UnknownSubject() if subject is None else datatypes.Issuer(subject)
if transaction_type is TransactionType.ATM_WITHDRAWAL:
return details['account']
if transaction_type is TransactionType.ISSUED_TRANSFER:
return details['account']
if transaction_type is TransactionType.CREDIT_CARD_INVOICE:
return details['account']
if transaction_type is TransactionType.CREDIT_CARD_INVOICE_PAYMENT:
return details['account']
if transaction_type is TransactionType.DOMICILED_RECEIPT:
return details['account']
if transaction_type is TransactionType.MORTAGE_RECEIPT:
return details['account']
if transaction_type is TransactionType.BANK_COMISSION:
return details['account']
if transaction_type is TransactionType.MORTAGE_RECEIPT:
return details['account']
if transaction_type is TransactionType.BANK_COMISSION:
return details['account']
if transaction_type is TransactionType.PURCHASE:
return details['account']
if transaction_type is TransactionType.BANK_COMISSION_RETURN:
return details['bank']
if transaction_type is TransactionType.RETURN_DEPOSIT:
return safe_issuer(details['creditor_name'])
if transaction_type is TransactionType.RECEIVED_TRANSFER:
return safe_issuer(details['issuer_name'])
if transaction_type is TransactionType.PURCHASE_RETURN:
return safe_issuer(details['shop_name'])
def get_destination(details, transaction_type):
def safe_recipient(subject):
return UnknownSubject() if subject is None else datatypes.Recipient(subject)
if transaction_type is TransactionType.RECEIVED_TRANSFER:
return details['account']
if transaction_type is TransactionType.BANK_COMISSION_RETURN:
return details['account']
if transaction_type is TransactionType.RETURN_DEPOSIT:
return details['account']
if transaction_type is TransactionType.PURCHASE_RETURN:
return details['account']
if transaction_type is TransactionType.ATM_WITHDRAWAL:
return UnknownWallet()
if transaction_type is TransactionType.CREDIT_CARD_INVOICE:
return details['bank']
if transaction_type is TransactionType.MORTAGE_RECEIPT:
return details['bank']
if transaction_type is TransactionType.BANK_COMISSION:
return details['bank']
if transaction_type is TransactionType.CREDIT_CARD_INVOICE_PAYMENT:
return details['bank']
if transaction_type is TransactionType.ISSUED_TRANSFER:
return safe_recipient(details['beneficiary'])
if transaction_type is TransactionType.DOMICILED_RECEIPT:
return safe_recipient(details['creditor_name'])
if transaction_type is TransactionType.PURCHASE:
return safe_recipient(details['shop_name'])
def get_account_transaction_details(transaction, transaction_type):
details = {}
def set_detail(fieldname, xpath_string_or_list, fmt=None, default=None):
nonlocal details
xpath_list = xpath_string_or_list if isinstance(xpath_string_or_list, list) else [xpath_string_or_list]
for xpath in xpath_list:
value = get_nested_item(transaction, xpath)
formatted_value = None if value is None else (value if fmt is None else fmt(value))
if formatted_value is not None:
details[fieldname] = formatted_value
return
details[fieldname] = default
def title(str):
return str.title()
def capture(regex, group):
def wrap(value):
match = re.search(regex, value)
return match.groups()[0] if match else None
return wrap
if transaction_type is TransactionType.PURCHASE:
set_detail('shop_name', ['comments.[0].text', 'cardTransactionDetail.shop.name', 'humanConceptName'], fmt=title)
set_detail('card_number', 'origin.panCode')
set_detail('activity', 'cardTransactionDetail.shop.businessActivity.name')
if transaction_type is TransactionType.ATM_WITHDRAWAL:
set_detail('card_number', 'origin.detailSourceKey', fmt=capture(r'(\d+)', 0))
set_detail('atm_name', ['cardTransactionDetail.shop.name', 'extendedName'])
if transaction_type is TransactionType.ISSUED_TRANSFER:
set_detail('beneficiary', 'wireTransactionDetail.sender.person.name', fmt=title)
set_detail('concept', 'humanExtendedConceptName')
if transaction_type is TransactionType.RECEIVED_TRANSFER:
set_detail('origin_account_number', 'wireTransactionDetail.sender.account.formats.ccc')
set_detail('issuer_name', 'wireTransactionDetail.sender.person.name')
set_detail('concept', 'humanExtendedConceptName')
if transaction_type is TransactionType.DOMICILED_RECEIPT:
set_detail('creditor_name', ['billTransactionDetail.creditor.name'])
set_detail('concept', ['billTransactionDetail.extendedBillConceptName', 'extendedName'], fmt=title)
if transaction_type is TransactionType.RETURN_DEPOSIT:
set_detail('return_reason', 'billTransactionDetail.extendedIntentionName', fmt=title)
return details
def get_card_transaction_details(transaction, transaction_type):
details = {}
def set_detail(fieldname, xpath_string_or_list, fmt=None, default=None):
nonlocal details
xpath_list = xpath_string_or_list if isinstance(xpath_string_or_list, list) else [xpath_string_or_list]
for xpath in xpath_list:
value = get_nested_item(transaction, xpath)
formatted_value = None if value is None else (value if fmt is None else fmt(value))
if formatted_value is not None:
details[fieldname] = formatted_value
return
details[fieldname] = default
def title(str):
return str.title()
if transaction_type is TransactionType.PURCHASE:
set_detail('shop_name', 'shop.name', fmt=title)
return details
def decode_date(date):
year, month, day, hour, minute, second = re.search(r'^(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)', date).groups()
return datetime(*map(int, [year, month, day, hour, minute, second]))
def get_card(account_config, card_number):
if card_number is None:
return None
if card_number in account_config.cards:
return Card.from_config(account_config.cards[card_number])
# Possibly an old card no longer registered
return Card('Unknown card', card_number)
def get_comment(details, transaction_type):
if transaction_type in [TransactionType.ISSUED_TRANSFER, TransactionType.RECEIVED_TRANSFER]:
return details['concept']
if transaction_type is TransactionType.RETURN_DEPOSIT:
return details['return_reason']
if transaction_type is TransactionType.DOMICILED_RECEIPT:
return details['concept']
def parse_account_transaction(bank_config, account_config, transaction):
amount = transaction['amount']['amount']
transaction_code = get_nested_item(transaction, 'scheme.subCategory.id')
if transaction_code == '0054': # "Otros ..."
transaction_code = get_nested_item(transaction, 'concept.id')
transation_direction = TransactionDirection.CHARGE if amount < 0 else TransactionDirection.INCOME
transaction_type = get_type(transaction_code, transation_direction)
details = get_account_transaction_details(transaction, transaction_type)
details['account'] = Account.from_config(account_config)
details['bank'] = Bank.from_config(bank_config)
card_number = details.pop('card_number', None)
used_card = get_card(account_config, card_number)
keywords = extract_keywords(
chain(
extract_literals(transaction, KEYWORD_FIELDS),
filter(lambda value: isinstance(value, str), details.values())
)
)
comment = get_comment(details, transaction_type)
source = get_source(details, transaction_type)
destination = get_destination(details, transaction_type)
del details['bank']
return ParsedBankAccountTransaction(
transaction_id=transaction['id'],
type=transaction_type,
currency=transaction['amount']['currency']['code'],
amount=amount,
balance=transaction['balance']['availableBalance']['amount'],
value_date=decode_date(transaction['valueDate']),
transaction_date=decode_date(transaction['transactionDate']),
source=source,
destination=destination,
account=details.pop('account'),
card=used_card,
details=details,
keywords=keywords,
comment=comment if comment is not None else '',
)
def parse_credit_card_transaction(bank_config, account_config, card_config, transaction):
amount = transaction['amount']['amount']
transaction_code = get_nested_item(transaction, 'concept.id')
if transaction_code == '0000':
# code 0000 seems like an error, as it's really a regular purcharse,
# so we fake the code
transaction_code = '0005'
transation_direction = TransactionDirection.CHARGE if amount < 0 else TransactionDirection.INCOME
transaction_type = get_type(transaction_code, transation_direction)
# Transactions have a _PT or _TT termination, that changes once over time and makes id unusable
# this is an attempt to fix this and still being able to use the id as an unique id
transaction_id = re.sub(r'_[PT]T$', '', transaction['id'])
details = get_card_transaction_details(transaction, transaction_type)
details['account'] = Account.from_config(account_config)
details['bank'] = Bank.from_config(bank_config)
# As we are processing a concrete card, and transaction doesn't have this
# information, we set it to be able to process all transactions equally
card_used = Card.from_config(card_config)
keywords = extract_keywords(
chain(
extract_literals(transaction, KEYWORD_FIELDS),
filter(lambda value: isinstance(value, str), details.values())
)
)
comment = get_comment(details, transaction_type)
source = get_source(details, transaction_type)
destination = get_destination(details, transaction_type)
del details['bank']
del details['account']
is_debit_operation = transaction.get('operationTypeIndicator') == 'D'
is_consolidated = transaction.get('status', {}).get('id') == '7'
notify_not_added = False
status_flags = datatypes.StatusFlags()
if is_debit_operation:
if notify_not_added:
from common.notifications import get_notifier
import bank
banking_configuration = bank.load_config(bank.env()['main_config_file'])
notifier = get_notifier(banking_configuration.notifications)
notifier('Debit transaction found, not adding {bank.name} card transaction: {date} {amount}, {source}->{destination}'.format(
bank=bank_config, amount=amount, date=transaction['valueDate'], source=str(source), destination=str(destination))
)
status_flags.invalid = True
if not is_consolidated and notify_not_added:
if notify_not_added:
from common.notifications import get_notifier
import bank
banking_configuration = bank.load_config(bank.env()['main_config_file'])
notifier = get_notifier(banking_configuration.notifications)
notifier('Non consolidated transaction found, not adding {bank.name} card transaction: {date} {amount}, {source}->{destination}'.format(
bank=bank_config, amount=amount, date=transaction['valueDate'], source=str(source), destination=str(destination))
)
return None
return ParsedCreditCardTransaction(
transaction_id=transaction_id,
type=transaction_type,
currency=transaction['amount']['currency']['code'],
amount=amount,
value_date=decode_date(transaction['valueDate']),
transaction_date=decode_date(transaction['transactionDate']),
source=source,
destination=destination,
card=card_used,
details=details,
keywords=keywords,
comment=comment if comment is not None else '',
status_flags=status_flags
)
|
from pymongo import MongoClient
class DbWorker:
def __init__(self):
client = MongoClient()
self.db = client.test_database
self.applications = self.db.applications
def add(self, app):
if self.applications.find_one({'id': app.get('id')}):
self.applications.update({'id': app.get('id')}, app)
else:
self.applications.save(app)
def get_item(self, id):
return self.applications.find_one({'id': id})
def list(self):
return self.applications.find()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-26 13:33
from __future__ import unicode_literals
import django.db.models.deletion
import storages.backends.s3boto3
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("elections", "0030_merge_20170424_1402")]
operations = [
migrations.CreateModel(
name="Document",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("source_url", models.URLField(max_length=1000)),
(
"uploaded_file",
models.FileField(
storage=storages.backends.s3boto3.S3Boto3Storage(),
upload_to="",
),
),
],
),
migrations.AddField(
model_name="election",
name="notice",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="elections.Document",
),
),
]
|
import xlrd, openpyxl
# 1. Read xlsx file
self.input_data = []
INPUT_FILE = 'sam.xlsx'
input_xls = xlrd.open_workbook(INPUT_FILE)
sheet = input_xls.sheet_by_index(0)
for row_index in range(0, sheet.nrows):
row = [sheet.cell(row_index, col_index).value for col_index in range(sheet.ncols)]
self.input_data.append(row)
# 2. Write xlsx file
def create_result_file(self):
self.xfile = openpyxl.load_workbook(self.result_file_name)
self.sheet = self.xfile.active
self.row_index = 1
def insert_row(self, result_row):
self.row_index += 1
for i, elm in enumerate(result_row):
self.sheet.cell(row=self.row_index, column=i + 1).value = elm
self.xfile.save(self.result_file_name) |
import jwt
from django.contrib.auth import get_user_model
from rest_framework import serializers
from residents.models import Lot,Community,Area,Street,Resident,ResidentLotThroughModel,Profile
from rest_framework_jwt.compat import Serializer
from ivms.models import IPCamera,Boomgate
from security_guards.models import Security,ReasonSetting,PassNumber,DeviceNumber,BoomgateLog,Post_Log
from django.contrib.auth.hashers import make_password, check_password
from rest_framework_jwt.compat import PasswordField
from rest_framework_jwt.settings import api_settings
from api.utils import jwt_payload_handler, jwt_encode_handler, jwt_decode_handler,jwt_get_username_from_payload_handler
from django.utils.translation import ugettext as _
from .resident import UserSerializer,StreetSerializer
from django.utils import timezone
from datetime import datetime, timedelta
class LotOnlySerializer(serializers.ModelSerializer):
has_resident = serializers.SerializerMethodField()
def get_has_resident(self,obj):
resident = obj.resident_set.count()
if resident == 0:
return False
return True
class Meta:
model = Lot
fields = (
'id',
'is_lock',
'has_resident',
'name',
)
class PostLogSerializer(serializers.ModelSerializer):
security_guard_name = serializers.SerializerMethodField()
timestamp = serializers.DateTimeField(required=False)
def get_security_guard_name(self,obj):
return obj.security_guard.first_name +' '+obj.security_guard.last_name
class Meta:
model = Post_Log
fields=(
'id',
'area',
'security_guard',
'longitude',
'qr_uuid',
'latitude',
'timestamp',
'security_guard_name',
)
def get_current_user(self):
request = self.context.get("request")
if request and hasattr(request, "user"):
return request.user
return None
def validate_longitude(self, value):
user = self.get_current_user()
pl = Post_Log.objects.filter(security_guard_id = user.id).order_by('-timestamp').first()
if pl:
if pl.timestamp > (timezone.now()- timedelta(minutes=30)):
raise serializers.ValidationError("Please try again later.")
return value
class StreetLotSerializer(serializers.ModelSerializer):
lot_set =LotOnlySerializer(many=True,read_only=True)
class Meta:
model = Street
fields = (
'id',
'name',
'lot_set',
)
class BoomgateLogSerializer(serializers.ModelSerializer):
class Meta:
model = BoomgateLog
fields = (
'type',
'reason',
)
class IPCamSerializer(serializers.ModelSerializer):
class Meta:
model = IPCamera
fields = (
'url',
'type',
)
class BoomgateSerializer(serializers.ModelSerializer):
class Meta:
model = Boomgate
fields = (
'url',
'type',
)
class ReasonSerializer(serializers.ModelSerializer):
class Meta:
model = ReasonSetting
fields = (
'id',
'reason',
)
class SecuritySerializer(serializers.ModelSerializer):
community_name = serializers.CharField(source='community.name')
area_name = serializers.CharField(source='area.name')
class Meta:
model = Security
fields=('id','username','first_name','last_name','status','community_name','area_name','area')
read_only_fields = ('__all__',)
class PassNumberSerializer(serializers.ModelSerializer):
class Meta:
model = PassNumber
fields=('id','pass_no',)
read_only_fields = ('__all__',)
class DeviceNumberSerializer(serializers.ModelSerializer):
class Meta:
model = DeviceNumber
fields=('id','device_no','phone_number')
read_only_fields = ('__all__',)
class ResidentUserSerializer(serializers.ModelSerializer):
user = UserSerializer()
class Meta:
model = Resident
fields=(
'user',
)
class ResidentLotThroughModelSerializer(serializers.ModelSerializer):
resident = ResidentUserSerializer()
class Meta:
ref_name = 'SecurityLotTroughModel'
model = ResidentLotThroughModel
fields=(
'resident',
'order',
)
class GetPrimarySerializer(serializers.ModelSerializer):
street = StreetSerializer()
resident = serializers.SerializerMethodField()
def get_resident(self, obj):
res = ResidentLotThroughModel.objects.filter(order=0,lot=obj)
serializer = ResidentLotThroughModelSerializer(instance=res,many=True)
return serializer.data
class Meta:
model = Lot
fields = (
'street',
'id',
'name',
'resident',
)
ordering = ['-id']
class SecurityWebTokenSerializer(Serializer):
"""
Serializer class used to validate a username and password.
'username' is identified by the custom UserModel.USERNAME_FIELD.
Returns a JSON Web Token that can be used to authenticate later calls.
"""
def __init__(self, *args, **kwargs):
"""
Dynamically add the USERNAME_FIELD to self.fields.
"""
super(SecurityWebTokenSerializer, self).__init__(*args, **kwargs)
self.fields['username'] = serializers.CharField()
self.fields['password'] = PasswordField(write_only=True)
def validate(self, attrs):
credentials = {
'username': attrs.get('username'),
'password': attrs.get('password')
}
if all(credentials.values()):
try:
user = Security.objects.get(username=credentials['username'])
except Security.DoesNotExist:
user = None
if user:
password = credentials['password'] + user.salt
if check_password(password ,user.password):
if user.status == 'I':
msg = 'User account is disabled.'
raise serializers.ValidationError(msg)
payload = jwt_payload_handler(user)
return {
'token': jwt_encode_handler(payload),
'user': user
}
else:
msg = 'Unable to log in with provided credentials.'
raise serializers.ValidationError(msg)
else:
msg = 'Unable to log in with provided credentials.'
raise serializers.ValidationError(msg)
else:
msg = 'Must include "username" and "password".'
raise serializers.ValidationError(msg)
class SVerificationBaseSerializer(Serializer):
"""
Abstract serializer used for verifying and refreshing JWTs.
"""
token = serializers.CharField()
def validate(self, attrs):
msg = 'Please define a validate method.'
raise NotImplementedError(msg)
def _check_payload(self, token):
# Check payload valid (based off of JSONWebTokenAuthentication,
# may want to refactor)
try:
payload = jwt_decode_handler(token)
except jwt.ExpiredSignature:
msg = _('Signature has expired.')
raise serializers.ValidationError(msg)
except jwt.DecodeError:
msg = _('Error decoding signature.')
raise serializers.ValidationError(msg)
return payload
def _check_user(self, payload):
username = jwt_get_username_from_payload_handler(payload)
if not username:
msg = _('Invalid payload.')
raise serializers.ValidationError(msg)
# Make sure user exists
try:
user = Security.objects.get(username=username)
except Security.DoesNotExist:
msg = _("User doesn't exist.")
raise serializers.ValidationError(msg)
if not user.status != "I":
msg = _('User account is disabled.')
raise serializers.ValidationError(msg)
return user
class SVerifyJSONWebTokenSerializer(SVerificationBaseSerializer):
"""
Check the veracity of an access token.
"""
def validate(self, attrs):
token = attrs['token']
payload = self._check_payload(token=token)
user = self._check_user(payload=payload)
return {
'token': token,
'user': user
}
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from ackermann_msgs.msg import AckermannDriveStamped
import sys, select, termios, tty
banner = """
Reading from the keyboard and Publishing to AckermannDriveStamped!
---------------------------
Moving around:
w
a s d
anything else : stop
CTRL-C to quit
"""
keyBindings = {
'w':(1,0),
'd':(1,-1),
'a':(1,1),
's':(-1,0)
}
def getKey():
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
key = sys.stdin.read(1)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
speed = 0.5
turn = 0.25
def vels(speed,turn):
return "currently:\tspeed %s\tturn %s " % (speed,turn)
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
pub = rospy.Publisher('/vesc/low_level/ackermann_cmd_mux/input/teleop', AckermannDriveStamped, queue_size=1)
pub_red = rospy.Publisher('/TFF/RED_Emergency_Stop', Bool, queue_size=1)
pub_yellow = rospy.Publisher('/TFF/YELLOW_Normal_Stop', Bool, queue_size=1)
pub_green = rospy.Publisher('/TFF/GREEN_Start_DDPG', Bool, queue_size=1)
rospy.init_node('keyop')
x = 0
th = 0
status = 0
print("Keyboard Input ...")
msg = AckermannDriveStamped();
msg.header.stamp = rospy.Time.now();
msg.header.frame_id = "base_link";
msg.drive.speed = 1;
msg.drive.acceleration = 1;
msg.drive.jerk = 1;
msg.drive.steering_angle = 1
msg.drive.steering_angle_velocity = 1
pub.publish(msg)
try:
while(1):
key = getKey()
# if msg_red.data != True:
# msg_red.data = True
# pub_red.publish(msg_red)
# if msg_yellow.data != True:
# msg_yellow.data = True
# pub_yellow.publish(msg_yellow)
if key in keyBindings.keys():
x = keyBindings[key][0]
th = keyBindings[key][1]
else:
x = 0
th = 0
if (key == '\x03'):
break
if key == 'w' or key == 'd' or key == 'a' or key == 's':
msg = AckermannDriveStamped();
msg.header.stamp = rospy.Time.now();
msg.header.frame_id = "base_link";
msg.drive.speed = x*speed;
msg.drive.acceleration = 1;
msg.drive.jerk = 1;
msg.drive.steering_angle = th*turn
msg.drive.steering_angle_velocity = 1
pub.publish(msg)
else:
if key == 'r':
msg_red = Bool()
msg_red.data = False
pub_red.publish(msg_red)
elif key == 'y':
msg_yellow = Bool()
msg_yellow.data = False
pub_yellow.publish(msg_yellow)
elif key == 'g':
msg_green = Bool()
msg_green.data = True
pub_green.publish(msg_green)
except:
print 'error'
finally:
msg = AckermannDriveStamped();
msg.header.stamp = rospy.Time.now();
msg.header.frame_id = "base_link";
msg.drive.speed = 0;
msg.drive.acceleration = 1;
msg.drive.jerk = 1;
msg.drive.steering_angle = 0
msg.drive.steering_angle_velocity = 1
pub.publish(msg)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
|
from django.db import models
class Todo(models.Model):
title = models.CharField('内容', max_length=200)
finished = models.BooleanField('完了済み', null=True)
created_at = models.DateTimeField('作成日時', auto_now_add=True)
updated_at = models.DateTimeField('更新日時', auto_now=True)
class Meta:
verbose_name = 'ToDo'
verbose_name_plural = 'ToDos'
def __str__(self):
return self.title
|
#!/usr/bin/env python3
"""
Example usage of the ODrive python library to monitor and control ODrive devices
"""
from __future__ import print_function
import odrive
from odrive.enums import *
from odrive.utils import dump_errors
import time
import math
import sys
import fibre
from odrive_manager import OdriveManager
def idle_wait():
while odrv0.axis0.current_state != AXIS_STATE_IDLE:
time.sleep(0.1)
print(dump_errors(odrv0))
# Find a connected ODrive (this will block until you connect one)
print("finding an odrive...")
#odrv0 = OdriveManager(path='/dev/ttyACM0', serial_number='336B31643536').find_odrive()
odrv0 = odrive.find_any()
odrv0.axis0.requested_state = AXIS_STATE_IDLE
odrv0.axis0.controller.config.control_mode = CTRL_MODE_CURRENT_CONTROL
odrv0.axis0.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL
# Find an ODrive that is connected on the serial port /dev/ttyUSB0
#odrv0 = odrive.find_any("serial:/dev/ttyUSB0")
# odrv0.axis0.motor.DC_calib_phB = 0.0
# odrv0.axis0.motor.DC_calib_phC = 0.0
print(dump_errors(odrv0,True))
odrv0.axis0.motor.config.current_control_bandwidth = 20
odrv0.axis0.encoder.config.bandwidth = 100
print(dump_errors(odrv0,True))
odrv0.axis0.requested_state = 11 # AXIS_STATE_BRUSHED_CURRENT_CONTROL
#odrv0.axis0.requested_state = 12 # AXIS_STATE_BRUSHED_CURRENT_CONTROL
odrv0.axis0.motor.current_control.p_gain = 10.0
odrv0.axis0.motor.current_control.i_gain = 0.1
odrv0.axis0.motor.config.phase_resistance = 2.5
odrv0.axis0.motor.config.phase_inductance = 0.001
print(odrv0.axis0)
print()
print("motor.current_control:")
print(odrv0.axis0.motor.current_control)
print()
print("motor.config:")
print(odrv0.axis0.motor.config)
odrv0.axis0.motor.current_control.final_v_beta = 1.0 # Voltage Ramp Rate
max_vel = 0
reverse = False
reverse_start_time = 0
reverse_duration_allowed_sec = 4
ctrl = odrv0.axis0.motor.current_control
start_time = time.time()
reverse_position = 0
print(odrv0.axis0.encoder.pos_estimate)
print(odrv0.axis0.encoder.pos_estimate)
# odrv0.axis0.controller.current_setpoint = 3.0
# time.sleep(2)
# odrv0.axis0.controller.current_setpoint = 1
# time.sleep(2)
# odrv0.axis0.controller.current_setpoint = 0
# sys.exit()
odrv0.axis0.controller.current_setpoint = 0
while odrv0.axis0.controller.current_setpoint < 3.0:
odrv0.axis0.controller.current_setpoint += 0.01
time.sleep(0.01)
try:
while True:
print("current setpoint {:0.2f}, voltage: {:0.2f}, PH B: {:0.2f}, PH C: {:0.2f}, Iq_measured {:0.2f}, bus voltage {:0.2f}, vel estimate {:0.2f}, max_vel {:0.2f}.".format(odrv0.axis0.controller.current_setpoint, ctrl.final_v_alpha, odrv0.axis0.motor.current_meas_phB, odrv0.axis0.motor.current_meas_phC, ctrl.Iq_measured, odrv0.vbus_voltage, odrv0.axis0.encoder.vel_estimate, max_vel))
odrv0.axis0.controller.current_setpoint *=1.2
if odrv0.axis0.error:
print(dump_errors(odrv0,True))
print("Gate Driver: {}".format(odrv0.axis0.motor.gate_driver))
print("current setpoint {:0.2f}, voltage: {:0.2f}, PH B: {:0.2f}, PH C: {:0.2f}, Iq_measured {:0.2f}, bus voltage {:0.2f}, vel estimate {:0.2f}, max_vel {:0.2f}.".format(odrv0.axis0.controller.current_setpoint, ctrl.final_v_alpha, odrv0.axis0.motor.current_meas_phB, odrv0.axis0.motor.current_meas_phC, ctrl.Iq_measured, odrv0.vbus_voltage, odrv0.axis0.encoder.vel_estimate, max_vel))
sys.exit()
time.sleep(1)
except KeyboardInterrupt:
odrv0.axis0.controller.current_setpoint = 0.0
odrv0.axis0.controller.current_setpoint = 0.0
odrv0.axis0.requested_state = 1
odrv0.axis0.requested_state = 1
time.sleep(1)
sys.exit()
# time.sleep(100)
# This is a current-based hard block homing routine.
while True:
print("current setpoint {}, voltage: {}, Iq_measured {}, bus voltage {}, vel estimate {}, max_vel {}.".format(odrv0.axis0.controller.current_setpoint, ctrl.final_v_alpha, ctrl.Iq_measured, odrv0.vbus_voltage, odrv0.axis0.encoder.vel_estimate, max_vel))
# if reverse:
# if odrv0.axis0.encoder.pos_estimate - reverse_position > 400 or time.time() - reverse_start_time > reverse_duration_allowed_sec:
# reverse = False
# odrv0.axis0.controller.current_setpoint *= -1
# max_vel = 0
# start_time = time.time()
# print(odrv0.axis0.encoder.pos_estimate)
# print(odrv0.axis0.encoder.pos_estimate)
# else:
if abs(odrv0.axis0.encoder.vel_estimate) > max_vel:
max_vel = abs(odrv0.axis0.encoder.vel_estimate)
if max_vel > 100 and abs(odrv0.axis0.encoder.vel_estimate) < 10:
print("Contact")
time.sleep(10)
break
if max_vel < 400 and time.time()-start_time > 2.0:
odrv0.axis0.controller.current_setpoint *= 1.1
#reverse = True
reverse_start_time = time.time()
reverse_position = odrv0.axis0.encoder.pos_estimate
#if max_vel >
time.sleep(0.1)
print(dump_errors(odrv0,True))
print(odrv0.axis0.encoder.pos_estimate)
print(odrv0.axis0.encoder.pos_estimate)
print(odrv0.axis0.encoder.pos_estimate)
odrv0.axis0.requested_state = AXIS_STATE_ENCODER_INDEX_SEARCH
idle_wait()
print("Index Search complete")
print(dump_errors(odrv0,True))
print(odrv0.axis0.encoder.pos_estimate)
print(odrv0.axis0.encoder.pos_estimate)
print("Bus voltage is " + str(odrv0.vbus_voltage) + "V")
timer = time.time()
time.sleep(2)
if True:
odrv0.axis0.encoder.config.bandwidth = 100
odrv0.axis0.controller.config.vel_ramp_rate = 30
odrv0.axis0.controller.config.vel_gain = -0.03
odrv0.axis0.controller.config.vel_integrator_gain = 0
odrv0.axis0.controller.vel_integrator_current = 0
odrv0.axis0.requested_state = AXIS_STATE_IDLE
odrv0.axis0.controller.config.control_mode = CTRL_MODE_POSITION_CONTROL
odrv0.axis0.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL
odrv0.axis0.controller.config.pos_gain = 2
odrv0.axis0.controller.vel_ramp_enable = True
odrv0.axis0.controller.config.vel_ramp_rate = 1
odrv0.axis0.motor.config.current_lim = 3.0
odrv0.axis0.controller.config.vel_limit_tolerance = 2.5
odrv0.axis0.controller.config.vel_limit = 200
odrv0.axis0.trap_traj.config.vel_limit = 200
odrv0.axis0.trap_traj.config.accel_limit = 50
odrv0.axis0.trap_traj.config.decel_limit = 50
odrv0.axis0.trap_traj.config.A_per_css = 0
#base_position = 1130/2
base_position = 0
offset = 400
odrv0.axis0.controller.pos_setpoint = base_position
min = base_position - offset
count = 0
steps = 6
#sys.exit()
while True:
print("For setpoint {}, estimate is {}, current: {}, bus_voltage {}".format(odrv0.axis0.controller.pos_setpoint, odrv0.axis0.encoder.pos_estimate, ctrl.Iq_measured, odrv0.vbus_voltage))
#odrv0.axis0.encoder.pos_estimate
#print(dump_errors(odrv0))
time.sleep(0.1)
if time.time() - timer > 0.5:
timer = time.time()
count += 1
#offset *= -1
#odrv0.axis0.controller.move_to_pos(base_position + offset)
if count > steps:
count = 0
odrv0.axis0.controller.move_to_pos(min + (2*offset)*count/steps)
if count == 0:
timer += 1.0
#time.sleep(1)
if odrv0.axis0.error:
print(dump_errors(odrv0,True))
print("Gate Driver: {}".format(odrv0.axis0.motor.gate_driver))
print("For setpoint {}, estimate is {}, current: {}, bus_voltage {}".format(odrv0.axis0.controller.pos_setpoint, odrv0.axis0.encoder.pos_estimate, ctrl.Iq_measured, odrv0.vbus_voltage))
sys.exit()
|
"""merge 10-fold result"""
import codecs
lines = {}
for i in xrange(10):
with codecs.open('test_mapped_' + str(i) + '.csv', 'r', 'utf8') as reader:
line_num = 0
for line in reader:
lines[line_num * 10 + i] = line
line_num += 1
with codecs.open('test_mapped.csv', 'w', 'utf8') as writer:
for key in sorted(lines.keys()):
writer.write(lines[key])
|
a = 'hello'
b = 1
print(dir(b))
|
from django.urls import path
from . import views
app_name='register'
urlpatterns=[
path('index',views.index,name='index'),
path('submit',views.submitDetails,name='submit')
]
|
import Queue
import time
from rabbit_consumer import RabbitConsumerProc, RabbitConsumerThread
from abstract_bot import AbstractBot
from vibebot.ConsumerCallback import ConsumerCallback
class EventBot(AbstractBot):
def __build_consumers__(self, exchange_callbacks):
self.logger.info("Event bot created v0.0.11")
if len(exchange_callbacks) == 0:
self.logger.error("No callbacks declared, exiting EventBot")
exit()
consumers = {}
consumer_id_ctr = 0
self.consumers_callbacks = {}
self.internal_error_queue = Queue.Queue()
self.rabbit_user = self.config.get('rabbit', 'RABBIT_USER')
self.rabbit_pw = self.config.get('rabbit', 'RABBIT_PW')
self.rabbit_host = self.config.get('rabbit', 'RABBIT_HOST')
self.rabbit_port = int(self.config.get('rabbit', 'RABBIT_PORT'))
self.stopping = False
for exchange_callback in exchange_callbacks:
for consumerCount in range(0, exchange_callback.consumer_count):
self.logger.info("Creating exchange callback for exchange: %s with consumer id: %s", exchange_callback.exchange, consumer_id_ctr)
consumer_callback = ConsumerCallback(consumer_id_ctr, exchange_callback.exchange, exchange_callback.callback_func)
thread = self.rabbit_klass(self.bot_id, exchange_callback.exchange, self.callback_wrapper(consumer_callback), self.rabbit_user, self.rabbit_pw,
self.rabbit_host, self.rabbit_port, consumer_id_ctr, self.internal_error_queue, self.statsd)
consumers[consumer_id_ctr] = thread
self.consumers_callbacks[consumer_id_ctr] = consumer_callback
consumer_id_ctr += 1
self.logger.info("Event bot created!")
return consumers
def __start_consumers__(self):
for thread in self.consumers.values():
thread.start()
while not self.stopping:
time.sleep(2)
while not self.internal_error_queue.empty():
consumer_id = self.internal_error_queue.get()
self.logger.warn("Internal error detected restarting consumer:" + str(consumer_id))
thread = self.consumers[consumer_id]
callback = self.consumers_callbacks[consumer_id]
# Exception thrown so this threadess should be dead - call join
thread.join()
new_thread = self.rabbit_klass(self.bot_id, callback.exchange, self.callback_wrapper(callback), self.rabbit_user,
self.rabbit_pw, self.rabbit_host, self.rabbit_port, consumer_id, self.internal_error_queue, self.statsd)
self.consumers[consumer_id] = new_thread
self.statsd.incr(callback.exchange + "." + 'callback.restart')
new_thread.start()
def __stop_consumers__(self):
self.stopping = True
for thread in self.consumers.values():
# thread.stop() # gonna need this for threads
thread.total_stop()
# if thread.is_alive():
# self.logger("threadess '{}' not finished after 10secs, forcibly terminating", thread.consumer_id)
# thread.terminate()
def callback_wrapper(self, consumer_callback):
def _inner(json):
ret = consumer_callback.timed_callback_execution(json)
return ret
return _inner |
import subprocess, argparse
parser = argparse.ArgumentParser(description='A script for running NuGet.')
parser.add_argument('config', help='Path to NuGet config file.', type=str)
parser.add_argument('slndir', help='Path to VS Solution Dir.', type=str)
args = parser.parse_args()
print("Installing NuGet packages...")
subprocess.run(r"./nuget.exe install -ConfigFile " + args.config + r" -SolutionDirectory " + args.slndir) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test Twitter developer keys.
Usage: python validate_twitter_keys.py
Input data files: ../conf/developer.key
"""
import json
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy.streaming import StreamListener
from multiprocessing import Queue
class StdOutListener(StreamListener):
""" A listener handles tweets that are received from the stream.
This is a basic listener that just prints received tweets to stdout.
"""
def __init__(self, name):
StreamListener.__init__(self)
self.name = name
def on_data(self, data):
json_data = json.loads(data)
if 'id' in json_data:
print('key {0} is working!'.format(self.name))
else:
print('key {0} is NOT working!'.format(self.name))
return False
def start_streaming(crawler_conf):
listener = StdOutListener(name=crawler_conf['app_name'])
auth = OAuthHandler(crawler_conf['consumer_key'], crawler_conf['consumer_secret'])
auth.set_access_token(crawler_conf['access_token'], crawler_conf['access_secret'])
stream = Stream(auth, listener)
stream.filter(track=["youtube", "youtu be"])
if __name__ == '__main__':
with open('../conf/developer.key', 'r') as fin:
key_dict = json.load(fin)
num_keys = len(key_dict)
processes = []
crawler_queue = Queue()
for i in range(num_keys):
start_streaming(key_dict['key{0}'.format(i)])
|
import pygame as pg
import os
from sprites import *
import animation as ani
from settings import Settings
class Game:
def __init__(self):
pg.init()
self.screen = ... # Crée un écran pygame de taille Settings.WIDTH x Settings.HEIGHT
... # Donner Settings.TITLE en tire à la fenêtre
self.clock = ... # Un objet de type Clock
self.running = ... # Le jeu doit tourner
self.bgcolor = ... # Black
self.LoadData()
def LoadData(self):
self.dir = os.path.dirname(__file__)
data_dir = os.path.join(self.dir, 'data')
self.spritesheet = ani.Spritesheet(os.path.join(data_dir, Settings.SPRITESHEET))
def Launch(self):
self.all_sprites = pg.sprite.Group() # Une liste de sprites
self.ennemies = ... # Une autre
player1_stand = [ani.Frame((614, 1063, 120, 191))]
player2_stand = [ani.Frame((581, 1265, 121, 191))]
player1_animation = ani.Animation(self.spritesheet, player1_stand)
player2_animation = ani.Animation(self.spritesheet, player2_stand)
player1_animator = ani.Animator(player1_animation)
player2_animator = ani.Animator(player2_animation)
self.player1 = Player1(player1_animator)
self.player2 = Player2(player2_animator)
self.all_sprites.add(self.player1)
self.all_sprites.add(self.player2)
for i in range(10):
ennemi_stand = [ani.Frame((568, 1671, 122, 139))]
ennemi_animation = ani.Animation(self.spritesheet, ennemi_stand)
ennemi_animator = ani.Animator(ennemi_animation)
ennemi = Ennemi(ennemi_animator, 50, 10, 10, self.player1)
self.all_sprites.add(ennemi)
self.ennemies(ennemi)
self.Run()
def Run(self):
while self.running:
self.clock.tick(Settings.FPS)
self.Events()
self.Update()
self.Draw()
def Events(self):
for ev in pg.event.get():
if ev.type == pg.QUIT:
self.running = False
def Update(self):
self.all_sprites.update()
def Draw(self):
self.screen.fill(self.bgcolor)
self.all_sprites.draw(self.screen)
#######
pg.display.flip()
g = Game()
g.Launch()
pg.quit()
|
import json
import os
import re
import base64
from urllib import parse
from common.common_util import replace_string
def get_cases(path):
with open(path, 'r', encoding='utf-8') as file:
case_content_json = json.loads(file.read())
entries = case_content_json.get("log").get("entries")
step_lists = []
for entry in entries:
request_data = entry.get("request")
# print(request_data)
try:
response_data = json.loads(base64.b64decode(entry.get("response", {}).get("content", {}).get("text", "{}")).decode())
except Exception:
response_data = {}
url_obj = parse.urlsplit(request_data.get("url"))
request_method = request_data.get("method")
post_data = request_data.get("postData")
uri = url_obj.path
server_name, method_name = uri[1:].replace("-", "_").split("/", 1)
step_dict = {
"method": request_method,
"uri": uri,
"path_params": get_path_params(uri),
"params": url_obj.query,
"body": get_body(post_data),
"server_name": server_name,
"method_name": get_method_name(method_name.replace("/", "_") + "_" + request_method.lower()),
"assert_data": parse_res(response_data, "$")
}
step_lists.append(step_dict)
return step_lists
def get_body(body):
if body:
try:
body = json.loads(body.get("text"))
except Exception:
body = body.get("text")
return body
def get_method_name(url: str):
if re.match(r".*\d+", url):
method_re = re.sub(r"_\d+", "_.+?", url)
method_names = [x for x in current_method if re.match(method_re, x)]
if method_names:
return method_names[0]
else:
return url
else:
return url
def get_path_params(url: str):
path_params = re.findall(r"/(\d+)?", url)
path_params_str = ""
for path_param in path_params:
if path_param:
path_params_str = path_params_str + f"{path_param}, "
return path_params_str
def parse_res(res, path):
assert_list = []
if isinstance(res, dict):
for k, v in res.items():
if not isinstance(v, dict) and not isinstance(v, list):
assert_list.append({
"variable_name": k,
"variable_value": v,
"variable_path": path + "." + str(k)
})
if isinstance(v, dict) and v:
assert_list.extend(parse_res(v, path + "." + str(k)))
if isinstance(v, list) and v:
assert_list.extend(parse_res(v[0], path + "." + str(k) + "[0]"))
return assert_list
def get_current_method():
from script import base_api
current_methods = dir(base_api)
methods = ["get", "post", "patch", "head", "delete", "put"]
current_methods = [x for x in current_methods if x.split("_")[-1] in methods]
return current_methods
def create_file(step_list, is_assert=False):
current_path = os.path.join(os.getcwd(), "temp.py")
server_names = list(set([x.get("server_name") for x in step_list]))
import_datas = []
for server_name in server_names:
import_datas.append(f"from script.base_api.{server_name} import *")
import_datas = replace_string(import_template, {"import_datas": "\r\n".join(import_datas)})
step_str_list = []
for step in step_list:
assert_str_list = []
if is_assert:
assert_datas = step.get("assert_data")
for assert_data in assert_datas:
assert_str_list.append(replace_string(assert_template, assert_data))
step["assert_data"] = "".join(assert_str_list)
step_str_list.append(replace_string(step_template, step))
case_data = replace_string(case_template, {"steps": "".join(step_str_list)})
with open(current_path, "w", encoding="utf-8") as file:
file.write(import_datas)
file.write(case_data)
case_template = """
@allure.feature("待修改")
@allure.testcase("待修改")
class AutoCreate:
@allure.story("待修改")
def test_auto_create(self):
${steps}
"""
step_template = """
params = "${params}"
body = ${body}
res = ${method_name}(${path_params}params, body=body)
${assert_data}
"""
assert_template = """
${variable_name} = jsonpath.jsonpath(res, "${variable_path}")[0] if jsonpath.jsonpath(res, "${variable_path}") else None
pytest.assume(${variable_name} == "${variable_value}")
"""
import_template = """
import pytest
import allure
import jsonpath
${import_datas}
"""
if __name__ == '__main__':
file_path = "/Users/huangqiang/Desktop/order.har"
current_method = get_current_method()
create_file(get_cases(file_path), is_assert=False)
|
########################################################################################################################
# LDAP Authentication Settings
########################################################################################################################
import ldap
import os
from django_auth_ldap.config import LDAPSearch, GroupOfNamesType
LDAP_SERVER = 'ipa.hqvfx.auth'
AUTH_LDAP_SERVER_URI = 'ldap://' + LDAP_SERVER
AUTH_LDAP_BIND_DN = 'uid=admin,cn=users,cn=accounts,dc=hqvfx,dc=auth'
AUTH_LDAP_BIND_PASSWORD = os.environ.get('MY_PASS')
AUTH_LDAP_USER_DN_TEMPLATE = 'uid=%(user)s,cn=users,cn=accounts,dc=hqvfx,dc=auth'
AUTH_LDAP_USER_ATTR_MAP = {
'first_name': 'givenName',
'last_name': 'sn',
'email': 'mail'
}
AUTH_LDAP_GROUP_BASE = "cn=groups,cn=accounts,dc=hqvfx,dc=auth"
AUTH_LDAP_GROUP_FILTER = "(objectClass=groupOfNames)"
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(AUTH_LDAP_GROUP_BASE,
ldap.SCOPE_SUBTREE, AUTH_LDAP_GROUP_FILTER)
AUTH_LDAP_GROUP_TYPE = GroupOfNamesType(name_attr="cn")
AUTH_LDAP_USER_FLAGS_BY_GROUP = {
'is_staff': 'cn=ipausers,' + AUTH_LDAP_GROUP_BASE,
'is_support': 'cn=ipausers,' + AUTH_LDAP_GROUP_BASE,
'is_superuser': 'cn=ipausers,' + AUTH_LDAP_GROUP_BASE,
}
AUTHENTICATION_BACKENDS = (
'django_auth_ldap.backend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
)
|
# -*- encoding: utf-8 -*-
# Livejournal toolkit, includes some simple caching.
import re
import urllib2
from urllib import urlencode, addinfourl
import httplib
import logging
from StringIO import StringIO
import xml.etree.ElementTree as ElementTree
from django.core.cache import cache
from django.conf import settings
# LiveJournal does not support Last-Modified/If-Modified-Since validators.
# It also does not provide any timeouts with Cache-Control/Expires.
# But some pages support Etag/If-None-Match so I implement it and only it.
# By the way, FOAF (the heaviest page) does not support Etag on 01 Feb 2009
class CachedPage:
def __init__(self, url, headers, data):
self.url = url
self.headers = headers
self.data = data
def mkaddinfourl(self):
return addinfourl(StringIO(self.data), self.headers, self.url)
def cached_urlopen(req, *args, **kwargs):
PREFIX = 'webcache:'
if isinstance(req, basestring):
url = req
req = urllib2.Request(url)
else:
assert req.get_method() == 'GET'
url = req.get_full_url()
# FIXME: replace quick hack with something better
req.add_header('User-Agent', settings.USER_AGENT)
cacheaddr = PREFIX + url
page = cache.get(cacheaddr)
if not page or page.headers.get('ETag'):
if page and page.headers.get('ETag'):
req.add_header('If-None-Match', page.headers.get('ETag'))
try:
# FIXME: this should be rather done with urllib2 opener, but
# I don't want to dive into openers right now, see
# <http://diveintopython.org/http_web_services/etags.html>
# if you want to.
fd = urllib2.urlopen(req, *args, **kwargs)
# we're here if page was either modified or not cached
timeout = 15*60 if fd.headers.get('ETag') else 3*60
page = CachedPage(url, dict(fd.headers.items()), fd.read())
cache.set(cacheaddr, page, timeout)
return page.mkaddinfourl()
except urllib2.HTTPError, e:
if e.code == httplib.NOT_MODIFIED:
# Should cache be refreshed? I don't know. Let it be so.
cache.set(cacheaddr, page, 15*60)
return page.mkaddinfourl()
else:
raise
else:
return page.mkaddinfourl()
def get_fdata(user):
""" Returns dict{'friends": set(...), 'fans': set(...)}"""
fd = cached_urlopen('http://www.livejournal.com/misc/fdata.bml?' + urlencode({'user': user}))
return parse_fdata(fd, user)
def parse_fdata(fd, user=None):
friends = set()
fans = set()
for line in fd:
line = line.rstrip()
if line.startswith('#'):
pass
elif line.startswith('> '):
friends.add(line[2:])
elif line.startswith('< '):
fans.add(line[2:])
elif line:
logging.warning('LJ: unknown line in <%s>\'s fdata: <%s>' % (user, line))
return {'friends': friends, 'fans': fans}
def baseurl(user):
return 'http://%s.livejournal.com' % dnsize(user)
def baseurl_comm(comm):
return 'http://community.livejournal.com/%s' % comm
def dnsize(user):
""" Convert LJ username to DNS-friendly form """
return user.replace('_', '-')
class LJUser:
def __init__(self, nick, member_name, tagLine, image=None):
self.login = nick
self.name = member_name
self.journal_name = tagLine
self.avatar = image
@property
def url(self):
return baseurl(self.login)
def get_foaf(user, include_myself=False):
"""
Returns list of friends of `user' as a dict.
Keys are usernames, values are `LJUser' instances.
"""
assert user.replace('_', '').isalnum()
url = baseurl(user) + '/data/foaf'
fd = cached_urlopen(url)
return parse_foaf(fd, include_myself)
def parse_foaf(fd, include_myself=False):
"""
Parses LJ Friend-Of-A-Friend file.
You should not rely on it's information: sometimes it's empty when
user has ~100 friends, sometimes it's limited to ~970 items when
user has ~1500..2000 friends.
"""
ns = {'foaf': '{http://xmlns.com/foaf/0.1/}',
'rdf': '{http://www.w3.org/1999/02/22-rdf-syntax-ns#}'}
retval = {}
tree = ElementTree.parse(fd)
for el in tree.findall('/%(foaf)sPerson/%(foaf)sknows/%(foaf)sPerson' % ns):
kwargs = {}
for tag in ('nick', 'member_name', 'tagLine', 'image'):
kwargs[tag] = el.findtext('%s%s' % (ns['foaf'], tag))
retval[kwargs['nick']] = LJUser(**kwargs)
if include_myself:
el = tree.find('/%(foaf)sPerson' % ns)
kwargs = {'nick': el.findtext('%(foaf)snick' % ns),
'member_name': el.findtext('%(foaf)sname' % ns),
'tagLine': u'N/A (собственный блог)'}
image = el.find('%(foaf)simg' % ns)
if image is not None:
# XXX: maybe, Python 2.5 bug, '{rdf}:resource' does not work
# but it is expeted to work as far as I see
kwargs['image'] = image.get('%(rdf)sresource' % ns)
retval[kwargs['nick']] = LJUser(**kwargs)
return retval
class OPMLOutline:
def __init__(self, text, xmlURL, htmlURL, login):
self.text = text
self.xmlURL = xmlURL
self.htmlURL = htmlURL
self.login = login
def get_opml(user):
"""
Returns dict{'communities': dict{ljuser -> `OPMLOutline'},
'users': dict{ljuser -> `OPMLOutline'}}
Only entries with URLs are returned.
Livejournal does not export friends-groups due to privacy reasons.
"""
fd = cached_urlopen('http://www.livejournal.com/tools/opml.bml?' + urlencode({'user': user}))
return parse_opml(fd, user)
def parse_opml(fd, user=None):
retval = {'communities': {}, 'users': {}}
# LiveJournal can produce OPML file that is not UTF-8/strict cutting
# feed description at the middle of character. This hack fixes it.
content = fd.read().decode('utf-8', 'replace')
content, errors = re.subn(ur'(\btext="[^"]*\ufffd) ', ur'\1" ', content)
if errors:
logging.warning("LJ: user <%s> has bad opml file, %i errors fixed."
% (user, errors))
content = content.encode('utf-8')
tree = ElementTree.XML(content)
for el in tree.findall('.//outline'):
xmlURL = el.get('xmlURL', None)
text = el.get('text', None)
if xmlURL is not None and text is not None:
regexp_list = (r'(http://community.livejournal.com/([_0-9a-zA-Z]+))/data',
r'(http://users.livejournal.com/([_0-9a-zA-Z]+))/data',
r'(http://([-0-9a-zA-Z]+).livejournal.com)/data')
for i, regexp in enumerate(regexp_list):
m = re.match(regexp, xmlURL)
if m:
sort = 'communities' if i == 0 else 'users'
htmlURL, login = m.group(1), m.group(2).replace('-', '_')
retval[sort][login] = OPMLOutline(text, xmlURL, htmlURL, login)
return retval
def is_valid_login(user):
return user.replace('_', '').isalnum()
def is_valid_password(password):
# password is 7-bit, there are more validation rules at
# http://www.livejournal.com/support/faqbrowse.bml?faqid=71
try:
p = str(password)
except UnicodeEncodeError:
return False
return p and p == password
# vim:set tabstop=4 softtabstop=4 shiftwidth=4:
# vim:set expandtab:
|
import random
class Creat(object):
def __init__(self, max_num: int, formula_num: int):
self.max_num = max_num # 最大范围
self.formula_num = formula_num # 公式最大条数
self.level = random.randint(2, 4) # 递归层数
self.start_level = 0 # 递归开始层
self.first_level = random.randint(1, self.level - 1) # 第一个子式递归次数
self.second_level = self.level - self.first_level # 第二个子式递归次数
self.operator = {
1: '+',
2: '-',
3: '*',
4: '÷',
}
def creator(self) -> str:
math_op = '{}{}{}{}{}'.format(
self.__creat_math_op(self.first_level), # 随机数
' ',
self.operator[random.randint(1, 4)], # 随机选取运算符
' ',
self.__creat_math_op(self.second_level), # 'xx +|-|*|/ xx'
)
return math_op
def __creat_math_op(self, level_choice: int) -> str:
random_num = random.randint(0, 1)
self.start_level += 1
if self.start_level == level_choice:
self.start_level = 0
return random.randint(0, self.max_num)
math_op = '{}{}{}{}{}{}{}'.format(
self.__brackets(random_num, 0), # '('
random.randint(0, self.max_num), # 随机数
' ',
self.operator[random.randint(1, 4)], # 随机选取运算符
' ',
self.__creat_math_op(level_choice), # 'xx +|-|*|/ xx'
self.__brackets(random_num, 1), # ')'
)
self.start_level = 0
return math_op
def num_choice(self): # 选择自然数还是真分数还是假分数
pass
def __brackets(self, random_num: int, choice: int) -> str: # 决定括号是否填入
if random_num:
if choice:
return ')'
else:
return '('
return ''
@property
def math_op(self) -> str: # 属性
return self.creator()
def creat_more(self): # 迭代器
op_num = 1
while op_num < self.formula_num:
math_op = self.creator()
if self.__check_math_op(math_op):
yield math_op
op_num += 1
self.__init_variable()
def __init_variable(self): # 初始化以下值
self.start_level = 0
self.level = random.randint(2, 4)
self.first_level = random.randint(1, self.level - 1)
self.second_level = self.level - self.first_level
def __check_math_op(self, math_op: str) -> bool: # 检测数学表达式的合法性
new_op = math_op.replace('÷', '/')
try:
if eval(new_op) < 0:
return False
else:
return True
except ZeroDivisionError:
return False
def __repr__(self):
return f'Creat(max_num={self.max_num}, formula_num={self.formula_num})'
if __name__ == '__main__':
t = Creat(50, 10)
for i in t.creat_more():
print(i)
print(t.math_op)
|
# Python Text RPG
# Underwhelmed Ape
import cmd # help use command line
import textwrap # wrap text around the console for overflow
import sys
import os
import random # generate pseudo-random numbers
import math
from collections import OrderedDict
from functools import partial
from title_screens import title_screen, help_menu, about_menu, title_screen_selections
from game_setup import setup_game
from game_visuals import narrate
from player import Player
import world
screen_width = 100
player = Player()
## MAP ##
# CREATING THE WORLD MAP
# |FWal| |FWal|.pot|shop|
# FWal|fire|FWal|fire|DA-1|DA-2|Grin
# Stat|Home|Perk|FWal|AWal|KTA |Hag
# |Wall| | | |Vict|
# 00 | 10 | 20 | 30 | 40 | 50 |
# 01 | 11 | 21 | 31 | 41 | 51 | 61
# 02 | 12 | 22 | 32 | 42 | 52 | 62
# 03 | 13 | 23 | 33 | 43 | 53 |
# make shops with no move functionality. Player is transported back out after transaction
# AWal is alley wall, will change output according to player's location
# Hag is npc. Talking suggests that there is hidden magic to those who know how to find it
home = world.StartTile(1,2,player)
world_map = [
[None, world.FireplaceWall(1,0), None, world.FireplaceWall(3,0), world.PopupPotions(4,0,player), None],
[world.FireplaceWall(0,1), world.Fireplace(1,1,player), world.FireplaceWall(2,1), world.DiagonAlleyTop(3,1,player), world.DiagonAlleyBottom(4,1, player)],
[world.MinistryStatue(0,2), home, world.MinistryPerkins(2,2), world.KnockturnAlley(3,2,player), None],
[None, world.MinistryWall(1,3), None, world.SecretRoom(3,3,player), None]
]
def tile_at(world_map, x, y):
if x < 0 or y < 0:
return None
try:
return world_map[y][x]
except IndexError:
return None
###### GAME FUNCTIONALITY ######
def play():
title_screen()
setup_game(player)
while player.victory == False:
if player.is_alive:
room = tile_at(world_map, player.x, player.y)
choose_action(room, player)
else:
os.system('clear')
narrate('You died, bad luck', 0.1)
sys.exit()
###### GAME INTERACTIVITY ######
def player_move():
ask = "Where would you like to go?\n"
dest = input(ask)
print(f'current room is: {tile_at(world_map, player.x, player.y)}')
if dest.lower() in ['up', 'north', 'n']:
new_room = tile_at(world_map, player.x, player.y - 1)
if isinstance(new_room, world.MapTile):
print('room is a MapTile')
print(f'new room is: {new_room}')
player.move_north()
elif isinstance(new_room, world.BlockedTile):
print('room is a ClosedMapTile')
print(new_room)
return
elif dest.lower() in ['d', 'down', 's', 'south']:
new_room = tile_at(world_map, player.x, player.y + 1)
if isinstance(new_room, world.MapTile):
print('room is a MapTile')
print(f'new room is: {new_room}')
player.move_south()
elif isinstance(new_room, world.BlockedTile):
print(f'new room is: {new_room}')
print(new_room)
return
elif dest in ['left', 'west', 'w', 'W']:
new_room = tile_at(world_map, player.x - 1, player.y)
if isinstance(new_room, world.MapTile):
print('room is a MapTile')
print(f'new room is: {new_room}')
player.move_west()
elif isinstance(new_room, world.BlockedTile):
print('room is a ClosedMapTile')
print(new_room)
return
elif dest in ['right', 'east', 'e', 'E']:
new_room = tile_at(world_map, player.x + 1, player.y)
if isinstance(new_room, world.MapTile):
print('room is a MapTile')
print(f'new room is: {new_room}')
player.move_east()
elif isinstance(new_room, world.BlockedTile):
print('room is a ClosedMapTile')
print(new_room)
return
print(f'new current room is: {tile_at(world_map, player.x, player.y)}')
def player_examine(room):
if room[player.location]['SOLVED'] == True:
print("You have already completed this job")
else:
print("trigger puzzle here")
def action_adder(action_dict, visible_hotkey, hotkeys, action, name):
for hotkey in hotkeys:
action_dict[hotkey.lower()] = action
action_dict[hotkey.upper()] = action
print(f'{visible_hotkey} -> {name}')
def get_available_actions(room, player):
'''adds the actions a player can make to a dict'''
actions = OrderedDict()
action_adder(actions, '(Q)uit', ['q', 'quit'], sys.exit, "Exit Game")
action_adder(actions, '(S)tats', ['s', 'stats'], player.player_stats, "Show your stats")
action_adder(actions, '(I)nventory', ['i', 'inventory'], player.print_inventory, "Show your inventory")
action_adder(actions, '(M)ove', ['m', 'move'], player_move, "Move")
for action in room.actions:
action_adder(
actions,
action['visible_hotkey'],
action['hotkeys'],
action['action'] if action['args'] is None else partial(action['action'], action['args']),
action['name']
)
return actions
def choose_action(room, player):
''' Prompt player to give action command'''
print('\n' + '===============================================================')
print('What would like to do?')
action = None
while not action:
available_actions = get_available_actions(room, player)
action_input = input('\n> ')
action = available_actions.get(action_input)
if action:
action()
else:
print('Unknown action, please enter another.\n')
play()
|
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""X265 codec definitions.
This gives the definitions required to run the x265 implementation of
the HEVC codec.
"""
import encoder
import ffmpeg
class X265Codec(ffmpeg.FfmpegCodec):
def __init__(self, name='x265'):
# The x265 encoder uses default parameter delimiters, unlike
# the ffmpeg family; we inherit the decoding process.
super(X265Codec, self).__init__(name,
formatter=encoder.OptionFormatter())
self.extension = 'hevc'
self.codecname = 'hevc'
self.option_set = encoder.OptionSet()
def StartEncoder(self, context):
return encoder.Encoder(context, encoder.OptionValueSet(self.option_set,
''))
def ConfigurationFixups(self, config):
# No fixups so far.
return config
def EncodeCommandLine(self, parameters, bitrate, videofile, encodedfile):
commandline = ('%(x265)s '
'--bitrate %(bitrate)d --fps %(framerate)d '
'--threads 1 '
'--input-res %(width)dx%(height)d '
'%(parameters)s '
'-o %(outputfile)s %(inputfile)s') % {
'x265': encoder.Tool('x265'),
'bitrate': bitrate,
'framerate': videofile.framerate,
'width': videofile.width,
'height': videofile.height,
'outputfile': encodedfile,
'inputfile': videofile.filename,
'parameters': parameters.ToString()}
return commandline
def DecodeCommandLine(self, videofile, encodedfile, yuvfile):
# Because of a bug in the ffmpeg decoder, we're using JM decoder.
# ffmpeg sometimes produces a decoded YUV file slightly shorter
# than the expected size.
commandline = '%s -b %s -o %s' % (encoder.Tool('TAppDecoderStatic'),
encodedfile,
yuvfile)
return commandline
|
"""
剑指 Offer 64. 求1+2+…+n
求 1+2+...+n ,要求不能使用乘除法、for、while、if、else、switch、case等关键字及条件判断语句(A?B:C)。
"""
# 其实就是简单的高斯求和,但是这边做出了诸多限制,所以才让这个题这么难。我的首选肯定是递归,但是递归的结束必须用条件语句,这边直接用逻辑运算的短路效应来搞定应该也可以。
def sumNums(n):
return n>=1 and n+sumNums(n-1) or n
def sumNums2(n):
if n ==1:
return 1
return sumNums2(n-1) +n
if __name__ == '__main__':
res = sumNums(100)
print(res) |
###################################################################################################
# date_dilemma() #
# ----------------------------------------------------------------------------------------------- #
# The program takes in a date that is not standarized and then converts the text to a standarized #
# format. There are set rules, which can be found on the DailyProgrammer subreddit: #
# https://www.reddit.com/r/dailyprogrammer/comments/3wshp7/20151214_challenge_245_easy_date_dilemma/#
# ----------------------------------------------------------------------------------------------- #
# #
# Author: Sejson #
# Date: December 30 2015 #
###################################################################################################
class date:
"""
date
----
Description:
Holds all the parts of a date: year, month, date
------------
"""
year = 0
month = 0
day = 0
def date_dilemma():
"""
date_dilemma()
--------------
Description:
Take in a date written in an arbitrary form and converts it to a more
standarized form.
The standarized form is YYYY-MM-DD
------------
"""
# Allows the program to be run multiple times.
running = True
while (running):
# Takes in the users odd date format
unstandardized_text = input("Enter your date:\n")
# Obatin a standard date and print in the YYYY-MM_DD format
standard_date = standardize(unstandardized_text)
print(str(standard_date.year) + '-' + str(standard_date.month) + '-' + str(standard_date.day))
# Determines if the the user wants to run the program again. It assumes yes.
again = input("Would you like to do another? Y or N\n")
if ('n' in again) or ('N' in again):
running = False
print("Goodbye....")
def standardize(date_text):
"""
standarized()
-------------
Description:
Takes a date in an arbitrary format and converts it to a standarized
format.
------------
Arguments:
date_text - The arbitrary date text.
----------
Returns:
standard_date = An object of date type that has all the parts of a date.
--------
"""
# Initalize variables
standard_date = date()
current_text = ""
# Interates through all the characters of date_text and one more to make
# sure that the date and year are all add.
# The loop works by testing the current character and seeing if it is a number
# if it is then it is part of a number. When the test fails, there is a delemeter
# leading the for loop to determine, what the number was indicating: year, month, day.
# Note: The " + 1" in length is needed to assure the last parts of a date are added to standard_date.
for c in range(len(date_text) + 1):
# Tests to see if the character is part of a number by seeing if it converts
# to a int.
# The test will also fail on the extra iteration resulting in the last part being added to standard_date.
try:
int(date_text[c])
current_text += date_text[c]
except (ValueError, IndexError):
# Assures that the date value is not empty.
# Takes care of double spaces and such.
if current_text != "":
# All values that are 4 digits long must be dates.
if len(current_text) == 4:
standard_date.year = int(current_text)
# A case where there is a break on the 3rd or 2nd character means it must be a month.
elif c <= 2 or standard_date.year > 0 and standard_date.month == 0:
standard_date.month = int(current_text)
# As long as everything else is 0 the only spot left is day.
elif standard_date.year > 0 or standard_date.month > 0 and standard_date.day == 0:
standard_date.day = int(current_text)
# When everything else is filled the year is the only thing left.
# The year must be a value between 0 and 16 to be a 2000's era year or greater for a
# 1990's date.
else:
year = int(current_text)
if year >= 0 or year <= 16:
standard_date.year = 2000 + year
else:
standard_date.year = 2000 - year
# Reset the current_text, when a break is found
current_text = ""
# Returns a object as a date type.
return standard_date
# Sets date_dilemma as the main function
if __name__ == "__main__":
date_dilemma()
|
def append(alist, iterable):
for item in iterable:
alist.append(item)
def extend(alist, iterable):
alist.extend(iterable)
import timeit
print(min(timeit.repeat('lambda: append([], "abcdefghijklmnopqrstuvwxyz")', repeat=20, number=1000000)))
print(min(timeit.repeat('lambda: append([], "abcdefghijklmnopqrstuvwxyz")', repeat=20, number=1000000)))
print(max(timeit.repeat('lambda: append([], "abcdefghijklmnopqrstuvwxyz")', repeat=20, number=1000000)))
print(max(timeit.repeat('lambda: append([], "abcdefghijklmnopqrstuvwxyz")', repeat=20, number=1000000)))
print(min(timeit.repeat('lambda: append([], "a")', repeat=20, number=1000000)))
print(min(timeit.repeat('lambda: extend([], ["a"])', repeat=20, number=1000000)))
print(min(timeit.repeat('lambda: extend([], "a")', repeat=20, number=1000000)))
print(max(timeit.repeat('lambda: append([], "a")', repeat=20, number=1000000)))
print(max(timeit.repeat('lambda: extend([], ["a"])', repeat=20, number=1000000)))
print(max(timeit.repeat('lambda: extend([], "a")', repeat=20, number=1000000)))
'''
0.0933285720020649
0.09314376100155641
0.09545929099840578
0.09714219799934654
0.09300531600092654
0.09291150900025968
0.09264149699811242
0.09703555200030678
0.10708495500148274
0.121265007997863
'''
|
import psutil
def compiling(lang,name,filename):
if lang =='c++':
psutil.Popen('g++ '+name+' -o '+filename+" -O2",shell=True).wait()
if lang =='c':
psutil.Popen('gcc '+name+' -o '+filename,shell=True).wait()
if lang == 'golang':
psutil.Popen('go build ' + name + ' -o ' + filename , shell=True).wait() |
import os
from urllib.parse import urlparse
from traitlets import Float, Unicode, Int, List, Instance, Bool, Dict, HasTraits
from ipywidgets import Widget, register, widget_serialization
from ipywidgets.widgets.trait_types import Color, InstanceDict
from ipywidgets.widgets import widget
from ._version import EXTENSION_VERSION
# NB '.txt' considered annotation as it is used in the public genomes. But not as per the doc.
TRACK_FILE_TYPES = {
'annotation': [
'.txt', '.bed', '.gff', '.gff3', '.gtf', '.genePred', '.genePredExt',
'.peaks', '.narrowPeak', '.broadPeak', '.bigBed', '.bedpe'
],
'wig': ['.wig', '.bigWig', '.bedGraph'],
'alignment': ['.bam'],
'variant': ['.vcf'],
'seg': ['.seg'],
'spliceJunctions': ['.bed'],
'gwas': ['.gwas', '.bed'],
'interaction': ['.bedpe'],
}
class FieldColors(HasTraits):
field = Unicode()
palette = Dict(key_trait=Unicode, value_trait=Instance(Color))
class SortOption(HasTraits):
chr = Unicode() # chromosone name
position = Int() # genomic position
option = Unicode() # 'BASE', 'STRAND', 'INSERT_SIZE', 'MATE_CHR', 'MQ', 'TAG'
tag = Unicode () # doc not clear
direction = Unicode("ASC") # 'ASC' for ascending, 'DESC' for descending
class SortOrder(HasTraits):
chr = Unicode() # chromosone name
direction = Unicode("ASC") # 'ASC' for ascending, 'DESC' for descending
start = Int()
end = Int()
@register
class Track(Widget):
"""
A class reflecting the common fields of a track as per igv documentation.
https://github.com/igvteam/igv.js/wiki/Tracks-2.0
If a Track type is not inferable a generic Track will be instantiated.
"""
_view_name = Unicode('TrackView').tag(sync=True)
_model_name = Unicode('TrackModel').tag(sync=True)
_view_module = Unicode('jupyter-igv').tag(sync=True)
_model_module = Unicode('jupyter-igv').tag(sync=True)
_view_module_version = Unicode(EXTENSION_VERSION).tag(sync=True)
_model_module_version = Unicode(EXTENSION_VERSION).tag(sync=True)
def __new__(cls, **kwargs):
if cls is Track:
# we must infer the type to instantiate the right Track type
trackType = kwargs.get('type', None)
if trackType is None:
# then type is inferred from the file extension
url = kwargs.get('url')
path = urlparse(url).path
filename, filetype = os.path.splitext(path)
if filetype == '.gz': # some files might be compressed
innerfilename, innerfiletype = os.path.splitext(filename)
filetype = innerfiletype
for k, v in TRACK_FILE_TYPES.items():
if filetype in v:
trackType = k
break
if trackType == 'annotation':
return super(Track, cls).__new__(AnnotationTrack)
elif trackType == 'alignment':
return super(Track, cls).__new__(AlignmentTrack)
elif trackType == 'variant':
return super(Track, cls).__new__(VariantTrack)
elif trackType == 'wig':
return super(Track, cls).__new__(WigTrack)
elif trackType == 'seg':
return super(Track, cls).__new__(SegTrack)
elif trackType == 'spliceJunctions':
return super(Track, cls).__new__(SpliceJunctionsTrack)
elif trackType == 'gwas':
return super(Track, cls).__new__(GwassTrack)
elif trackType == 'interation':
return super(Track, cls).__new__(InteractionTrack)
else:
return super(Track, cls).__new__(cls)
else:
return super(Track, cls).__new__(cls)
# These fields are common to all Track types
sourceType = Unicode(default_value='file').tag(sync=True) #
format = Unicode().tag(sync=True) # missing documentation
name = Unicode().tag(sync=True)
url = Unicode().tag(sync=True)
indexURL = Unicode().tag(sync=True)
indexed = Bool(default_value=False).tag(sync=True)
order = Int().tag(sync=True)
color = Color().tag(sync=True).tag(sync=True)
height = Int(default_value=50).tag(sync=True)
autoHeight = Bool(default_value=False).tag(sync=True)
minHeight = Int(default_value=50).tag(sync=True)
maxHeight = Int(default_value=500).tag(sync=True)
# visibilityWindow = # missing documentation
removable = Bool(default_value=True).tag(sync=True)
headers = Dict().tag(sync=True)
oauthToken = Unicode(allow_none = True).tag(sync=True)
@register
class AnnotationTrack(Track):
"""
AnnotationTrack as described at:
https://github.com/igvteam/igv.js/wiki/Annotation-Track
"""
type = Unicode('annotation', read_only=True).tag(sync=True)
displayMode = Unicode(default_value = 'COLLAPSED').tag(sync=True)
expandedRowHeight = Int (default_value = 30).tag(sync=True)
squishedRowHeight = Int (default_value = 15).tag(sync=True)
nameField = Unicode(default_value = 'Name').tag(sync=True)
maxRows = Int (default_value = 500).tag(sync=True)
searchable = Bool(default_value=False).tag(sync=True)
filterTypes = List(Unicode, default_value=['chromosone', 'gene']).tag(sync=True, **widget_serialization)
color = Color("rgb(0,0,150)").tag(sync=True)
altColor = Color("rgb(0,0,150)").tag(sync=True)
colorBy = Instance(FieldColors, allow_none=True).tag(sync=True, **widget_serialization)
roi = List(InstanceDict(Track)).tag(sync=True, **widget_serialization) # regions of interest
@register
class AlignmentTrack(Track):
"""
AlignmentTrack as described at:
https://github.com/igvteam/igv.js/wiki/Alignment-Track
"""
type = Unicode('alignment', read_only=True).tag(sync=True)
viewAsPairs = Bool(default_value=False).tag(sync=True)
pairsSupported = Bool(default_value=True).tag(sync=True)
coverageColor = Color(default_value="rgb(150, 150, 150)").tag(sync=True, **widget_serialization) # default: rgb(150, 150, 150)
color = Color(default_value="rgb(170, 170, 170)").tag(sync=True, **widget_serialization) # default: rgb(170, 170, 170)
deletionColor = Color(default_value="black").tag(sync=True, **widget_serialization)
skippedColor = Color(default_value="rgb(150, 170, 170)").tag(sync=True, **widget_serialization) # default: rgb(150, 170, 170)
insertionColor = Color(default_value="rgb(138, 94, 161)").tag(sync=True, **widget_serialization) # default: rgb(138, 94, 161)
negStrandColor = Color(default_value="rgba(150, 150, 230, 0.75)").tag(sync=True, **widget_serialization) # default: rgba(150, 150, 230, 0.75)
posStrandColor = Color(default_value="rgba(230, 150, 150, 0.75)").tag(sync=True, **widget_serialization) # default: rgb(138, 94, 161)
# pairConnectorColor = Instance(Color, default_value="alignmentColor") # default: doc not clear
colorBy = Unicode("none").tag(sync=True) # "none", "strand", "firstOfPairStrand", or "tag"
colorByTag = Unicode().tag(sync=True) # TODO - doc not clear
bamColorTag = Unicode("YC").tag(sync=True) # TODO - doc not clear
samplingWindowSize = Int(100).tag(sync=True)
samplingDepth = Int(100).tag(sync=True)
alignmentRowHeight = Int(14).tag(sync=True)
readgroup = Unicode("RG").tag(sync=True)
sortOption = Instance(SortOption, allow_none=True).tag(sync=True, **widget_serialization)
showSoftClips = Bool(False).tag(sync=True)
showMismatches = Bool(True).tag(sync=True)
# Paired-end and mate-pair coloring options.
pairOrientation = Unicode(allow_none=True).tag(sync=True, **widget_serialization) # ff, fr, or rf
minFragmentLength = Int(allow_none=True).tag(sync=True, **widget_serialization)
maxFragmentLength = Int(allow_none=True).tag(sync=True, **widget_serialization)
roi = List(InstanceDict(Track)).tag(sync=True, **widget_serialization) # regions of interest
@register
class VariantTrack(Track):
"""
VariantTrack as described at:
https://github.com/igvteam/igv.js/wiki/Variant-Track
"""
type = Unicode('variant', read_only=True).tag(sync=True)
displayMode = Unicode('EXPANDED').tag(sync=True)
noCallColor = Color("rgb(250, 250, 250)").tag(sync=True)
homvarColor = Color("rgb(17,248,254)").tag(sync=True)
hetvarColor = Color("rgb(34,12,253)").tag(sync=True)
homrefColor = Color("rgb(200, 200, 200)").tag(sync=True)
squishedCallHeight = Int(1).tag(sync=True)
expandedCallHeight = Int(10).tag(sync=True)
roi = List(InstanceDict(Track)).tag(sync=True, **widget_serialization) # regions of interest
class Guideline(HasTraits):
color = Color().tag(sync=True)
dotted = Bool().tag(sync=True)
y = Int().tag(sync=True)
@register
class WigTrack(Track):
"""
WigTrack as described at:
https://github.com/igvteam/igv.js/wiki/Wig-Track
"""
type = Unicode('wig', read_only=True).tag(sync=True)
autoscale = Bool(True).tag(sync=True)
autoscaleGroup = Unicode(allow_none=True).tag(sync=True, **widget_serialization)
min = Int(0).tag(sync=True)
max = Int(allow_none=True).tag(sync=True, **widget_serialization)
color = Color(default_value="rgb(150, 150, 150)").tag(sync=True)
altColor = Color(allow_none=True).tag(sync=True, **widget_serialization)
guideLines = List(trait=Instance(Guideline), allow_none=True).tag(sync=True, **widget_serialization)
roi = List(InstanceDict(Track)).tag(sync=True, **widget_serialization) # regions of interest
@register
class SegTrack(Track):
"""
SegTrack Track as described at:
https://github.com/igvteam/igv.js/wiki/Seg-Track
"""
type = Unicode('seg', read_only=True).tag(sync=True)
isLog = Bool(allow_none=True).tag(sync=True, **widget_serialization)
displayMode = Unicode("EXPANDED").tag(sync=True) # "EXPANDED", "SQUISHED", or "FILL"
sort = InstanceDict(SortOrder).tag(sync=True, **widget_serialization)
roi = List(InstanceDict(Track)).tag(sync=True, **widget_serialization) # regions of interest
@register
class SpliceJunctionsTrack(Track):
"""
SpliceJunctionsTrack as described at:
https://github.com/igvteam/igv.js/wiki/SpliceJunctions
"""
type = Unicode('spliceJunctions', read_only=True).tag(sync=True)
# Display Options
colorBy = Unicode('numUniqueReads').tag(sync=True) # "numUniqueReads", "numReads", "isAnnotatedJunction", "strand", "motif"
colorByNumReadsThreshold = Int(5).tag(sync=True)
thicknessBasedOn = Unicode('numUniqueReads').tag(sync=True) # "numUniqueReads", "numReads", "isAnnotatedJunction"
bounceHeightBasedOn = Unicode('random').tag(sync=True) # "random", "distance", "thickness"
labelUniqueReadCount = Bool(True).tag(sync=True)
labelMultiMappedReadCount = Bool(True).tag(sync=True)
labelTotalReadCount = Bool(False).tag(sync=True)
labelMotif = Bool(False).tag(sync=True)
labelAnnotatedJunction = Unicode(allow_none=True).tag(sync=True, **widget_serialization)
# Filtering Options
minUniquelyMappedReads = Int(0).tag(sync=True)
minTotalReads = Int(0).tag(sync=True)
maxFractionMultiMappedReads = Int(1).tag(sync=True)
minSplicedAlignmentOverhang = Int(0).tag(sync=True)
hideStrand = Unicode(allow_none=True).tag(sync=True, **widget_serialization) # None, "+" or "-"
hideAnnotatedJunctions = Bool(False).tag(sync=True)
hideUnannotatedJunctions = Bool(False).tag(sync=True)
hideMotifs = List(Unicode).tag(sync=True, **widget_serialization)
roi = List(InstanceDict(Track)).tag(sync=True, **widget_serialization) # regions of interest
@register
class GwasTrack (Track):
"""
GwasTrack as described at:
https://github.com/igvteam/igv.js/wiki/GWAS
"""
type = Unicode('gwas', read_only=True).tag(sync=True)
min = Int(0).tag(sync=True)
max = Int(25).tag(sync=True)
# format = Unicode().tag(sync=True) # 'bed' or 'gwas' - format is already in Track -> validation only
posteriorProbability = Bool(False).tag(sync=True)
dotSize = Int(3).tag(sync=True)
columns = Dict(key_trait=Unicode, value_trait=Int, allow_none=True).tag(sync=True, **widget_serialization)
roi = List(InstanceDict(Track)).tag(sync=True, **widget_serialization) # regions of interest
@register
class InteractionTrack (Track):
"""
InteractionTrack as described at:
https://github.com/igvteam/igv.js/wiki/Interaction
"""
type = Unicode('interaction', read_only=True).tag(sync=True)
arcOrientation = Bool(True).tag(sync=True)
thickness = Int(2).tag(sync=True)
roi = List(InstanceDict(Track)).tag(sync=True, **widget_serialization) # regions of interest
class Exon(HasTraits):
start = Int()
end = Int()
cdStart = Int()
cdEnd = Int()
utr = Bool()
class TrackFeature(HasTraits):
chr = Unicode()
start = Int()
end = Int()
name = Unicode()
score = Float()
strand = Unicode()
cdStart = Int()
cdEnd = Int()
color = Instance(Color)
exons = List(trait=Instance(Exon))
@register
class ReferenceGenome(Widget):
"""
A class reflecting a reference genome as per IGV documentation.
https://github.com/igvteam/igv.js/wiki/Reference-Genome
"""
_view_name = Unicode('ReferenceGenomeView').tag(sync=True)
_model_name = Unicode('ReferenceGenomeModel').tag(sync=True)
_view_module = Unicode('jupyter-igv').tag(sync=True)
_model_module = Unicode('jupyter-igv').tag(sync=True)
_view_module_version = Unicode(EXTENSION_VERSION).tag(sync=True)
_model_module_version = Unicode(EXTENSION_VERSION).tag(sync=True)
id = Unicode(allow_none=True).tag(sync=True)
name = Unicode(allow_none=True).tag(sync=True)
fastaURL = Unicode().tag(sync=True)
indexURL = Unicode(allow_none=True).tag(sync=True)
cytobandURL = Unicode(allow_none=True).tag(sync=True)
aliasURL = Unicode(allow_none=True).tag(sync=True)
indexed = Bool(default_value=False).tag(sync=True)
tracks = List(InstanceDict(Track)).tag(sync=True, **widget_serialization)
chromosomeOrder = Unicode(allow_none=True).tag(sync=True)
headers = Dict().tag(sync=True)
wholeGenomeView = Bool(default_value=True).tag(sync=True)
@register
class SearchService(Widget):
url = Unicode()
resultsField = Unicode()
coords = Int(default_value=1)
chromosomeField = Unicode(default_value='chromosome')
startField = Unicode(default_value='start')
endField = Unicode(default_value='end', allow_none=True)
|
import math
import copy
import matplotlib.pyplot as plt
import numpy as np
import Solution as sl
import Problem
import ParetoUtil as pu
import Metrics as met
import MRML as mm
class NSPSOMMBase():
def __init__(self, problem, popSize: int):
super().__init__()
self.problem = problem
self.popSize = popSize
def Evolve(self, genertionCount: int, initPopulation: np.array, FitnessInd, GDInd, IGDInd,EIInd, HVInd):
# 1, 2
PSOList = copy.deepcopy(initPopulation)
for i in range(PSOList.shape[0]):
PSOList[i].velocity = self.problem.createGene()
if(np.random.rand() < .5):
PSOList[i].velocity *= -1
PSOList[i].Pbest = np.copy(PSOList[i].gene)
nonDomPSOList = None
#Vmax = self.problem.variableUpperBound
result = np.full([genertionCount,7],10,dtype=float)
for g in range(genertionCount):
# 3 Add non-dominated front of population to nonDomPSOList
PSOListFronts = pu.fastNonDominatedSord(PSOList)
nonDomPSOList = PSOListFronts[0]
#4,5
nonDomPSOList = pu.sortSolutionsByCrowdingDist(nonDomPSOList)
#6
nextPopList = np.empty(PSOList.shape[0] * 2,dtype=sl.solution)
#w was gradually decreased from 1.0 to 0.4.
w = 1 - (g/(genertionCount - 1)) * .6
i = 0
for curent_F in PSOListFronts:
pairList = mm.MattingSelection2(curent_F)
for pair in pairList:
R1, R2 = np.random.rand(), np.random.rand()
C1,C2 = 2,2
# a) Select randomly a global best Pg for the i-th particle from a specified top part 5%
randGbestIndex = np.random.randint(0,max(1,int(nonDomPSOList.shape[0]/100*5)+1), size=(1))[0]
randGbest = nonDomPSOList[randGbestIndex]
# flag for single parir
isTwinPair = True
if pair[1] is None:
pair = (pair[0],pair[0])
isTwinPair = False
newSl0 = sl.solution(self.problem)
newSl1 = sl.solution(self.problem)
# b) calculate new velocity
newSl0.velocity = (w * pair[0].velocity) +\
(C1 * R1 * (pair[0].Pbest - pair[0].gene)) +\
(C2 * R2 * (randGbest.gene - pair[0].gene))
newSl1.velocity = (w * pair[1].velocity) +\
(C1 * R1 * (pair[1].Pbest - pair[1].gene)) +\
(C2 * R2 * (randGbest.gene - pair[1].gene))
self.ApplyVelocityConstrains(newSl0.velocity)
self.ApplyVelocityConstrains(newSl1.velocity)
# calculate new location
newSl0.gene = pair[0].gene + newSl0.velocity
newSl1.gene = pair[1].gene + newSl1.velocity
newSl0.ApplyVarialbleConstrains()
newSl1.ApplyVarialbleConstrains()
# update pbest
newSl0.Pbest = newSl0.gene
newSl1.Pbest = newSl1.gene
if pair[0].Dominate(newSl0):
newSl0.Pbest = pair[0].gene
if pair[1].Dominate(newSl1):
newSl1.Pbest = pair[1].gene
nextPopList[2*i] = pair[0]
nextPopList[2*i + 1] = newSl0
i +=1
if isTwinPair:
nextPopList[2*i] = pair[1]
nextPopList[2*i + 1] = newSl1
i +=1
#7
# modified dont copy repeated to nextPopListRest
F = pu.fastNonDominatedSord(nextPopList)
nonDomPSOList = F[0]
nextPopListRest = None
for el in F[1:]:
for iel in el:
nextPopListRest = sl.solution.AddPopulations(nextPopListRest,iel)
#8
PSOList = None
# 9
np.random.shuffle(nonDomPSOList)
PSOList = nonDomPSOList[: min(nonDomPSOList.shape[0] , self.popSize)]
#10
while (PSOList.shape[0] < self.popSize): # and nextPopListRest is not None:
#a
nextNonDomList = pu.fastNonDominatedSord(nextPopListRest)[0]
#b
PSOList = sl.solution.AddPopulations(PSOList,nextNonDomList[: min(nextNonDomList.shape[0],(self.popSize - PSOList.shape[0]))])
if (PSOList.shape[0] == self.popSize):
break;
#c
nextPopListRestCopy = np.copy(nextPopListRest)
nextPopListRest = None
#d
Fd = pu.fastNonDominatedSord(nextPopListRestCopy)[1:]
nextPopListRest = None
for el in Fd:
for iel in el:
nextPopListRest = sl.solution.AddPopulations(nextPopListRest,iel)
#11
locations = pu.GetPopulationLocations(PSOList)
result[g] = np.array([g,FitnessInd.compute(locations), GDInd.compute(locations), IGDInd.compute(locations), EIInd.compute(locations), HVInd.compute(locations),met.Delta(locations,self.problem)])
print("NSPSOMMBase generation" + str(g))
# get the final Pareto front
return [PSOList,result]
def ApplyVelocityConstrains(self,velocity):
# x0 Constrains
velocity[0] = np.clip(velocity[0], -self.problem.x0UpperBound, self.problem.x0UpperBound)
# xi Constrains
velocity[1:] = np.clip(velocity[1:], -self.problem.variableUpperBound, self.problem.variableUpperBound)
|
import asyncio
import time
async def client(address):
reader, writer = await asyncio.open_connection(*address)
while True:
writer.write(b'Hello from client')
await writer.drain()
resp = await reader.read(100000)
print(b"got: " + resp)
time.sleep(1)
asyncio.run(client(('localhost', 25000))) |
# -*- coding: utf-8 -*-
"""
Created on Thu May 23 08:13:00 2019
@author: Odi
"""
# ejemplo de uso de Grafo
import networkx as nx
G = nx.DiGraph()
# agegar nodos
G.add_node("Inicio") # Ocasion, TiempoDeConocerse, Cuidado, Edad, Color
G.add_node("Rosa") # amor, mas, normal, menosDe20, rojo
G.add_node("Rosa blanca") # religioso, mas, normal, masDe20, blanco
G.add_node("Tulipan") # amor, menosDe6, normal, menosDe20, todosColores
G.add_node("Lirio") # religioso, menosDeUnAno, normal, masDe20, todosColores
G.add_node("Girasol") # amistad, menosDe6, normal, menosDe10, amarillo
G.add_node("Cactus") # amistad, menosDe6, facil, menosDe20, verde
G.add_node("Suculenta") # amistad, menosDe6, facil, menosde20, verde
G.add_node("Agapanto") # formal, menosDe5, dificil, masDe20, morado
G.add_node("Venus flytrap") # amistosa, menosDe5 dificil, menosDe20, exotico
G.add_node("Drosera") # amistosa, menosDe5, dificil, menosdDe20, exotico
G.add_node("Nepenthes") # amistosa, menosDe5, dificil, menosDe20, exotico
G.add_node("Helecho") # otra, menosDeUnAno, facil, masDe20, verde
G.add_node("Orquidea") # amorosa, mas, normal, masDe20, exotico
#Variables que regulan el peso de las aristas
amor = 5
amistad = 5
formal = 5
religiosa = 5
otra = 5
menosDe6M = 5
menosDe1A = 5
menosDe5A = 5
mas = 5
facil = 5
normal = 5
dificil = 5
menosDe10 = 5
menosDe20 = 5
masDe20 = 5
rojo = 5
blanco = 5
verde = 5
azul = 5
amarillo = 5
morado = 5
exotico = 5
print("Bienvenido a el programa recomendador de plantas como regalos! ")
print("Responda las siguientes preguntas por favor... ")
print("Responda 0 si desea saltar la pregunta: ")
print("¿Para que tipo de ocasion es el regalo?\n\t1. Amorosa\n\t2. Amistosa\n\t3. Formal\n\t4. Religiosa\n\t5. Otra")
res = input("")
val = int(res)
if val == 0:
print("Saltando pregunta...")
elif val == 1:
amor = amor - 5
elif val == 2:
amistad = amistad -5
elif val == 3:
formal = formal -5
elif val == 4:
religiosa = religiosa -5
print("¿Cuanto tiempo llevan de conocerse el recipiente y usted?\n\t1. Menos de 6 meses\n\t2. Menos de un año\n\t3. Menos de 5 años\n\t4. Mas")
res = input("")
val = int(res)
if val == 0:
print("Saltando pregunta...")
elif val == 1:
menosDe6M = menosDe6M - 5
elif val == 2:
menosDe1A = menosDe1A - 5
elif val == 3:
menosDe5A = menosDe5A - 5
elif val == 4:
mas = mas - 5
print("¿Cuanto cuidado cree que el recipiente pueda darle a la planta?\n\t1. Poco\n\t2. Regular\n\t3. Mucho")
res = input("")
val = int(res)
if val == 0:
print("Saltando pregunta...")
elif val == 1:
facil = facil -5
elif val == 2:
normal = normal -5
elif val == 3:
dificil = dificil -5
print("¿Cuantos años tiene el recipiente?\n\t1. Menos de 10\n\t2. Menos de 20\n\t3. Mas de 20")
res = input("")
val = int(res)
if val == 0:
print("Saltando pregunta...")
elif val == 1:
menosDe10 = menosDe10 -5
elif val == 2:
menosDe20 = menosDe20 -5
menosDe10 = menosDe10 -5
elif val == 3:
masDe20 = masDe20 -5
menosDe20 = menosDe20 -5
menosDe10 = menosDe10 -5
print("¿Y que hay del color de la planta?\n\t1. Rojo\n\t2. Blanco\n\t3. Verde\n\t4. Azul\n\t5. Amarillo\n\t5. Morado\n\t6. Exotico")
res = input("")
val = int(res)
if val == 0:
print("Saltando pregunta...")
elif val == 1:
rojo = rojo -5
elif val == 2:
blanco = blanco -5
elif val == 3:
verde = verde -5
elif val == 4:
azul = azul -5
elif val==5:
amarillo = amarillo -5
elif val==6:
exotico = exotico -5
#print ("Nodos: ", G.nodes())
n1 = amor + mas + normal + menosDe20 + rojo
n2 = religiosa + mas + normal + masDe20 + blanco
n3 = amor + menosDe6M + normal + menosDe20 + exotico
n4 = religiosa + menosDe1A + normal + menosDe20 + exotico
n5 = amistad + menosDe6M + normal + menosDe10 + amarillo
n6 = amistad + menosDe6M + facil + menosDe20 + verde
n7 = amistad + menosDe6M + facil + menosDe20 + verde
n8 = formal + menosDe5A + dificil + masDe20 + morado
n9 = amistad + menosDe5A + dificil + menosDe20 + exotico
n10 = amistad + menosDe5A + dificil + menosDe20 + exotico
n11 = amistad + menosDe5A + dificil + menosDe20 + exotico
n12 = otra + menosDe1A + facil + masDe20 + verde
n13 = amor + mas + normal + masDe20 + exotico
# agregar aristas
G.add_edge("Inicio", "Rosa",weight=n1)
G.add_edge("Inicio", "Rosa blanca", weight=n2)
G.add_edge("Inicio", "Tulipan",weight=n3)
G.add_edge("Inicio", "Lirio",weight=n4)
G.add_edge("Inicio", "Girasol",weight=n5)
G.add_edge("Inicio", "Cactus", weight=n6)
G.add_edge("Inicio", "Suculenta", weight=n7)
G.add_edge("Inicio", "Agapanto", weight=n8)
G.add_edge("Inicio", "Venus flytrap", weight=n9)
G.add_edge("Inicio", "Drosera", weight=n10)
G.add_edge("Inicio", "Nepenthes",weight=n11)
G.add_edge("Inicio", "Helecho", weight=n12)
G.add_edge("Inicio", "Orquidea", weight=n13)
G.add_edge("Rosa", "Tulipan",weight=amor)
G.add_edge("Rosa", "Rosa blanca",weight=amor)
G.add_edge("Orquidea", "Tulipan", weight=exotico)
G.add_edge("Rosa", "Girasol",weight=amistad)
G.add_edge("Lirio", "Girasol",weight=amistad)
G.add_edge("Lirio", "Tulipan",weight=exotico)
G.add_edge("Lirio", "Orquidea",weight=exotico)
G.add_edge("Girasol", "Tulipan",weight=menosDe6M)
G.add_edge("Girasol", "Cactus",weight=amistad)
G.add_edge("Cactus", "Girasol",weight=amistad)
G.add_edge("Cactus", "Agapanto",weight=formal)
G.add_edge("Cactus", "Suculenta", weight=facil)
G.add_edge("Cactus", "Helecho", weight=facil)
G.add_edge("Suculenta", "Tulipan", weight=menosDe20)
G.add_edge("Suculenta", "Agapanto",weight=formal)
G.add_edge("Agapanto", "Tulipan", weight=morado)
G.add_edge("Agapanto", "Drosera", weight=dificil)
G.add_edge("Agapanto", "Venus flytrap", weight=dificil)
G.add_edge("Agapanto", "Nepenthes", weight=dificil)
G.add_edge("Venus flytrap", "Cactus", weight=verde)
G.add_edge("Venus flytrap", "Helecho", weight=verde)
G.add_edge("Venus flytrap", "Agapanto", weight=verde)
G.add_edge("Drosera", "Tulipan", weight=exotico)
G.add_edge("Nepenthes", "Cactus", weight=menosDe20)
G.add_edge("Orquidea", "Tulipan", weight=exotico)
G.add_edge("Orquidea", "Rosa blanca", weight=normal)
#print ("Aristas: ", G.edges())
# single source shortest path with Dijkstra
print ()
print
print ("Salieron los resultados!:\n")
resultados = nx.single_source_dijkstra_path_length(G,"Inicio")
for resultado in resultados:
print (resultado)
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
"""
"""
import os
import commands
import subprocess
import re
import httplib
import logging
import logging.handlers
import json
import time
def json_response(errno, msg=None):
resp = {}
resp['status'] = errno
message = [error.errors[errno]]
if msg:
message.append(msg)
resp['message'] = message
return json.dumps(resp)
def get_log(log_name, log_file_path):
"""
log_name, 日志系统的名字
log_file_path, 日志文件的位置
返回:
return my_logger, handler
"""
log_dir = os.path.dirname(log_file_path)
LOG_FILENAME = log_file_path
my_logger = logging.getLogger(log_name)
my_logger.setLevel(logging.DEBUG)
i = 20 #单位是一兆。
handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes = 1024000*i, backupCount = 5
)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s-%(name)s-%(levelname)s-%(message)s")
handler.setFormatter(formatter)
my_logger.addHandler(handler)
return my_logger
if __name__ == '__main__':
pass
|
import os
import sys
import importlib
from nab import log
_loaded = False
def load():
"""
Import all plugins in folder. Will only run once.
"""
global _loaded
if _loaded:
return
log.log.debug("Loading plugins")
_loaded = True
for folder, sub, files in os.walk("nab/plugins/"):
sys.path.insert(0, folder)
for f in files:
fname, ext = os.path.splitext(f)
if ext == '.py' and fname != "__init__":
log.log.debug(fname)
importlib.import_module(fname)
|
import os
import time
from threading import Timer
from .transport import protocol
from electrum.util import print_stderr, raw_input, _logger
IS_ANDROID = True
UI_HANDLER = None
if "iOS_DATA" in os.environ:
from rubicon.objc import ObjCClass
UI_HANDLER = ObjCClass("OKBlueManager")
IS_ANDROID = False
elif "ANDROID_DATA" in os.environ:
from android.os import Handler
class CustomerUI:
def __init__(self):
pass
pin = '' # type: str
passphrase = '' # type: str
user_cancel = 0
pass_state = 0
handler = None
if not IS_ANDROID:
handler = UI_HANDLER.sharedInstance().getNotificationCenter()
# this method must be classmethod in order to keep Memory consistency
@classmethod
def get_pin(cls, code, show_strength=False) -> str:
cls.code = code
cls.user_cancel = 0
cls.pin = ''
if cls.handler:
if code == '2':
if IS_ANDROID:
cls.handler.sendEmptyMessage(2)
else:
cls.handler.postNotificationName_object_("2", None)
elif code == '1':
if IS_ANDROID:
cls.handler.sendEmptyMessage(1)
else:
cls.handler.postNotificationName_object_("1", None)
start = int(time.time())
while True:
wait_seconds = int(time.time()) - start
if cls.user_cancel:
cls.user_cancel = 0
raise BaseException("user cancel")
elif cls.pin != '':
pin_current = cls.pin
cls.pin = ''
return pin_current
elif wait_seconds >= 60:
raise BaseException("waiting pin timeout")
else:
time.sleep(0.0001)
@classmethod
def set_pass_state(cls, state):
cls.pass_state = state
@classmethod
def get_pass_state(cls):
pass_state_current = cls.pass_state
cls.pass_state = 0
return pass_state_current
@classmethod
def get_state(cls):
state_current = cls.state
cls.state = 0
return state_current
@classmethod
def get_passphrase(cls, msg, confirm=None) -> str:
cls.code = msg
cls.passphrase = ''
cls.user_cancel = 0
if cls.pass_state == 0:
return ''
cls.pass_state = 0
if cls.handler:
if msg == "6":
if IS_ANDROID:
cls.handler.sendEmptyMessage(6)
else:
cls.handler.postNotificationName_object_("6", None)
elif msg == "3":
if IS_ANDROID:
cls.handler.sendEmptyMessage(3)
else:
cls.handler.postNotificationName_object_("3", None)
start = int(time.time())
while True:
wait_seconds = int(time.time()) - start
if cls.user_cancel:
cls.user_cancel = 0
raise BaseException("user cancel")
elif cls.passphrase != '':
passphrase_current = cls.passphrase
cls.passphrase = ''
return passphrase_current
elif wait_seconds >= 60:
raise BaseException("waiting passphrase timeout")
else:
time.sleep(0.0001)
#
@classmethod
def button_request(cls, code):
if code == 9:
timer = Timer(1.0, lambda: protocol.notify())
timer.start()
return
if IS_ANDROID:
cls.handler.sendEmptyMessage(code)
else:
cls.handler.postNotificationName_object_(f"{code}", None)
return
def finished(self):
return
def show_message(self, msg, on_cancel=None):
return
def prompt_auth(self, msg):
import getpass
print_stderr(msg)
response = getpass.getpass('')
if len(response) == 0:
return None
return response
def yes_no_question(self, msg):
print_stderr(msg)
return False
def stop(self):
pass
def show_error(self, msg, blocking=False):
print_stderr(msg)
def update_status(self, b):
_logger.info(f'hw device status {b}')
|
"""
https://leetcode.com/problems/maximum-subarray/
Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.
Example 1:
Input: nums = [-2,1,-3,4,-1,2,1,-5,4]
Output: 6
Explanation: [4,-1,2,1] has the largest sum = 6.
Example 2:
Input: nums = [1]
Output: 1
Example 3:
Input: nums = [5,4,-1,7,8]
Output: 23
"""
from typing import List
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
#print (nums)
ans = nums[0]
max_sum = nums[0]
for i in nums[1:]:
#print (max_sum, i)
max_sum = max_sum + i if i < max_sum + i else i
ans = max_sum if max_sum > ans else ans
#print (max_sum, ans)
#print ("=====")
return ans
nums = [1,2]
print ("Inpur - Nums {}".format(nums))
ans = Solution().maxSubArray(nums)
print ("Solution - {}".format(ans))
nums = [-2,-1]
print ("Inpur - Nums {}".format(nums))
ans = Solution().maxSubArray(nums)
print ("Solution - {}".format(ans))
nums = [-2,1,-3,4,-1,2,1,-5,4]
print ("Inpur - Nums {}".format(nums))
ans = Solution().maxSubArray(nums)
print ("Solution - {}".format(ans))
|
import random
from collections import namedtuple
from itertools import product, chain
from enum import Enum
from .tile import Tile
Direction = Enum('Direction', "up left down right")
Position = namedtuple('Position', "row column")
Actions = Enum('Actions', "spawn move merge")
class SpawnTileError(Exception):
pass
class GridAction(object):
def __init__(self, type, new, old=None):
# Actions:
self.type = type
# Position:
self.new, self.old = new, old
def __repr__(self):
return "GridAction({}, new={}{})".format(self.type, self.new,
", old=" + str(self.old) if self.old else "")
class Grid(object):
_grid = [[]]
def __init__(self, rows=4, cols=4, Tile=Tile):
self._grid = [[None for _ in range(cols)] for _ in range(rows)]
self.Tile = Tile
def __getitem__(self, item):
return self._grid.__getitem__(item)
def __setitem__(self, item, value):
return self._grid.__setitem__(item, value)
def __delitem__(self, item):
return self._grid.__delitem__(item)
def __len__(self):
return len(self._grid)
def __repr__(self):
return "Grid(rows={}, cols={})".format(len(self), len(self[0]))
@property
def highest_tile(self):
ret_tile = None
for tile in filter(None, chain(*self)):
ret_tile = tile if not ret_tile else max(ret_tile, tile)
return ret_tile
@property
def possible_moves(self):
return [dir for dir in Direction if self.move(dir, apply=False)]
def spawn_tile(self, row=None, column=None, apply=True, **kwargs):
rows, cols = len(self), len(self[0])
empty_tiles = []
for row_idx in range(row or 0, row + 1 if row is not None else rows):
for col_idx in range(
column or 0, column + 1 if column is not None else cols):
if not self[row_idx][col_idx]:
empty_tiles.append((row_idx, col_idx))
if len(empty_tiles) == 0:
raise SpawnTileError("no empty tiles")
row, column = random.choice(empty_tiles)
if apply:
self[row][column] = self.Tile(**kwargs)
return GridAction(Actions.spawn, Position(row, column))
def move_tile(self, old, new, apply=True):
old, new = Position(*old), Position(*new)
if apply:
self[new.row][new.column] = self[old.row][old.column]
self[old.row][old.column] = None
return GridAction(Actions.move, new, old)
def merge_tiles(self, old, new, apply=True):
old, new = Position(*old), Position(*new)
if (not self[new.row][new.column] or
self[new.row][new.column] != self[old.row][old.column]):
raise ValueError
if apply:
self[new.row][new.column].exponent += 1
self[old.row][old.column] = None
return GridAction(Actions.merge, new, old)
def move_vertical(self, direction, apply=True):
row_idx_iter = range(len(self))
if direction is Direction.down:
row_idx_iter = reversed(row_idx_iter)
for col_idx, row_idx in product(range(len(self[0])), row_idx_iter):
if direction is Direction.down:
pos_in_column = range(row_idx - 1, -1, -1)
else:
pos_in_column = range(row_idx + 1, len(self))
for row_pos in pos_in_column:
if self[row_idx][col_idx]: # Tile occupied, maybe merge
if self[row_pos][col_idx]:
if self[row_idx][col_idx] == self[row_pos][col_idx]:
old = Position(row_pos, col_idx)
new = Position(row_idx, col_idx)
yield self.merge_tiles(old, new, apply=apply)
break
elif self[row_pos][col_idx]:
old = Position(row_pos, col_idx)
new = Position(row_idx, col_idx)
yield self.move_tile(old, new, apply=apply)
for row_pos in pos_in_column:
if self[row_pos][col_idx]:
if self[row_idx][col_idx] == self[row_pos][col_idx]:
old = Position(row_pos, col_idx)
yield self.merge_tiles(old, new, apply=apply)
break
break
def move_horizontal(self, direction, apply=True):
col_idx_iter = range(len(self[0]))
if direction is Direction.right:
col_idx_iter = reversed(col_idx_iter)
for row_idx, col_idx in product(range(len(self)), col_idx_iter):
if direction is Direction.right:
pos_in_row = range(col_idx - 1, -1, -1)
else:
pos_in_row = range(col_idx + 1, len(self[0]))
for col_pos in pos_in_row:
if self[row_idx][col_idx]:
if self[row_idx][col_pos]:
if self[row_idx][col_idx] == self[row_idx][col_pos]:
old = Position(row_idx, col_pos)
new = Position(row_idx, col_idx)
yield self.merge_tiles(old, new, apply=apply)
break
elif self[row_idx][col_pos]:
old = Position(row_idx, col_pos)
new = Position(row_idx, col_idx)
yield self.move_tile(old, new, apply=apply)
for col_pos in pos_in_row:
if self[row_idx][col_pos]:
if self[row_idx][col_idx] == self[row_idx][col_pos]:
old = Position(row_idx, col_pos)
yield self.merge_tiles(old, new, apply=apply)
break
break
def move(self, direction, apply=True):
if not isinstance(direction, Direction):
raise TypeError
if direction in (Direction.up, Direction.down):
return list(self.move_vertical(direction, apply))
return list(self.move_horizontal(direction, apply))
def resize(self, rows=None, cols=None):
if rows:
if rows < len(self):
del self[rows:]
elif rows > len(self):
columns = cols or len(self[0])
for _ in range(rows - len(self)):
self._grid.append([None for _ in range(columns)])
if cols:
for row in self:
if cols < len(row):
del row[cols:]
elif cols > len(row):
row.extend([None for _ in range(cols - len(row))])
|
# coding=utf-8
from django.core import serializers
import sqlite3
import json
from datetime import datetime
from django.conf import settings
from django.contrib.sites.models import Site
from django.utils import feedgenerator
from django.shortcuts import render_to_response, get_object_or_404
from django.template.loader import render_to_string
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.contrib import auth
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.core.context_processors import csrf
from django.template import RequestContext
from django.db.models import Min, Max, Avg
from .models import Decision, Board, BoardCategory, Session
from . import forms
from django.core import serializers
from itertools import groupby
def list(request):
json_list = {'name' : 'Gremien', 'children' : []}
# START BOARDCATEGORY
for boardCategory in BoardCategory.objects.filter(parent=None):
json_list_BoardCategory = {'name' : '', 'children' : []}
# START SUBBOARDCATEGORY
for boardSubCategory in BoardCategory.objects.filter(parent=boardCategory):
json_list_BoardSub = {'name' : '', 'children' : []}
# START BOARD
for board in boardSubCategory.board_set.all():
json_list_Board = {'name' : '', 'children' : []}
years = Session.objects.filter(board=board).dates('date', 'year')
# START YEARS
for year in years:
json_list_Years = {'name' : '', 'children' : []}
# START SESSIONS
for session in Session.objects.filter(board=board).filter(date__year=year.year):
json_list_Session = {'name' : '', 'id' : ''}
json_list_Session['name'] = session.date.strftime('%d.%m.%Y')
json_list_Session['id'] = session.id
# END SESSIONS
json_list_Years['children'].append(json_list_Session)
json_list_Years['name'] = year.strftime('%Y')
# END YEARS
json_list_Board['children'].append(json_list_Years)
json_list_Board['name'] = board.shortcut
# END bOARDS
json_list_BoardSub['children'].append(json_list_Board)
json_list_BoardSub['name'] = boardSubCategory.name
# END SUBBOARDCATEGORY
json_list_BoardCategory['children'].append(json_list_BoardSub)
# START BOARDS
for board in boardCategory.board_set.all():
json_list_Board = {'name' : '', 'children' : []}
years = Session.objects.filter(board=board).dates('date', 'year')
# START YEARS
for year in years:
json_list_Years = {'name' : '', 'children' : []}
# START SESSIONS
for session in Session.objects.filter(board=board).filter(date__year=year.year):
json_list_Session = {'name' : '', 'id' : ''}
json_list_Session['name'] = session.date.strftime('%d.%m.%Y')
json_list_Session['id'] = session.id
# END SESSIONS
json_list_Years['children'].append(json_list_Session)
json_list_Years['name'] = year.strftime('%Y')
# END YEARS
json_list_Board['children'].append(json_list_Years)
json_list_Board['name'] = board.shortcut
json_list_Board['name'] = board.shortcut
# END BOARDS
json_list_BoardCategory['children'].append(json_list_Board)
json_list_BoardCategory['name'] = boardCategory.name
# END BOARDCATEGORY
json_list['children'].append(json_list_BoardCategory)
json_dump = json.dumps(json_list, indent=4, sort_keys = True)
jsonFile = open('static/js/dump.json', 'w+')
jsonFile.write(json_dump)
jsonFile.close()
#jsonFile = open('static/js/dump.json', 'r')
#json_data = json.load(jsonFile)
latest_sessions = Session.objects.all()[:3]
c = {}
c.update(csrf(request))
return render_to_response(
"beschluss/list.html",
{
'list_as_json':json_dump,
'sessions': Session.objects.all(),
'latest_sessions' : latest_sessions,
'auth_form': forms.AuthForm,
'session_form': forms.SessionForm,
'decision_form': forms.DecisionFormSet,
'c':c
},
context_instance=RequestContext(request)
)
def logUserIn(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
# the password verified for the user
if user.is_active:
login(request, user)
print("User is valid, active and authenticated")
else:
print("The password is valid, but the account has been disabled!")
else:
# the authentication system was unable to verify the username and password
print("The username and password were incorrect.")
return HttpResponseRedirect('/')
def logUserOut(request):
auth.logout(request)
return HttpResponseRedirect('/')
def session_send(request):
form = forms.SessionForm()
decision_formset = forms.DecisionFormSet(instance=Session())
if request.method == 'POST':
form = forms.SessionForm(request.POST)
if form.is_valid():
sess = form.save()
decision_formset = forms.DecisionFormSet(request.POST, instance=sess)
if decision_formset.is_valid():
decision_formset.save()
#s_date = form.cleaned_data['date']
#s_board = form.cleaned_data['board']
#s_legislature = form.cleaned_data['legislature']
#s_report = form.cleaned_data['report']
#sess = Session(date=s_date, board=s_board, legislature=s_legislature, report=s_report)
#sess.save()
return HttpResponseRedirect('/')
def overview(request):
latest_sessions = Session.objects.all()[:5]
# average session period
sessions_per_week = 0
for board in Board.objects.filter(active=True, session_period__gt=0):
sessions_per_week += 7 / board.session_period
return render_to_response(
"beschluss/overview.html", {
'latest_sessions': latest_sessions,
'sessions_per_week': int(sessions_per_week),
'number_sessions': Session.objects.all().count(),
'number_decision': Decision.objects.all().count()},
context_instance=RequestContext(request)
)
def boards(request):
return render_to_response(
"beschluss/boards.html",
{'parent_categories': BoardCategory.objects.filter(parent=None)},
context_instance=RequestContext(request))
def board(request, shortcut):
board = get_object_or_404(Board, shortcut=shortcut)
sessions = []
# if year is given as get parameter, display sessions from this year
if "year" in request.GET:
year = int(request.GET["year"])
# the sessions should be regroupable by month
for session in board.session_set.filter(date__year=year).iterator():
sessions.append({
'month': session.date.month,
'object': session
})
return render_to_response(
"beschluss/board_sessions.html",
{'sessions': sessions, 'board': board},
context_instance=RequestContext(request)
)
# normal board view: simply display all sessions
year_min = getattr(board.session_set.aggregate(
Min('date'))['date__min'],
'year', datetime.today().year)
for session in board.session_set.all().iterator():
sessions.append({
'year': session.date.year,
'object': session
})
return render_to_response(
"beschluss/board.html",
{'board': board,
'year_min': year_min,
'year_now': datetime.now().year,
'sessions': sessions},
context_instance=RequestContext(request)
)
def add_session_to_feed(feed, session):
current_site = Site.objects.get_current()
session_absolute_url = "http://%s%s" % (
current_site.domain,
session.get_absolute_url())
feed.add_item(
title="%s: %s" % (session.board.shortcut, session.date),
link=session_absolute_url,
description=render_to_string(
"beschluss/feed_session.html",
{'session': session, 'session_absolute_url': session_absolute_url}
),
pubdate=session.date,
author_name=session.board
)
def feed_global(request):
current_site = Site.objects.get_current()
feed = feedgenerator.Atom1Feed(
title=settings.ORGANISATION,
link="http://" + current_site.domain,
language=settings.LANGUAGE_CODE,
description="",
feed_copyright=settings.FEED_LICENSE
)
for session in Session.objects.all()[:50]:
add_session_to_feed(feed, session)
return HttpResponse(
feed.writeString('UTF-8'),
mimetype='application/atom+xml')
def feed_board(request, shortcut):
board = get_object_or_404(Board, shortcut=shortcut)
current_site = Site.objects.get_current()
feed = feedgenerator.Atom1Feed(
title=board.name,
link="http://%s%s" % (current_site.domain, board.get_absolute_url()),
language=settings.LANGUAGE_CODE,
description=board.info,
feed_copyright=settings.FEED_LICENSE
)
for session in board.session_set.all().iterator():
add_session_to_feed(feed, session)
return HttpResponse(
feed.writeString('UTF-8'),
mimetype='application/atom+xml')
def session(request, id):
session = get_object_or_404(Session, pk=id)
return render_to_response(
"beschluss/session.html",
{'session': session},
context_instance=RequestContext(request)
)
def report(request, session_id):
session = get_object_or_404(Session, pk=session_id)
if not session.report:
raise Http404
# TODO: implement auth here
# ...
# respose
from django.core.servers.basehttp import FileWrapper
from os.path import split
report = open(session.report.path)
response = HttpResponse(
FileWrapper(report),
content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=%s' % split(
session.report.path
)[1]
return response |
from django.contrib.auth.models import AbstractUser
from django.core.validators import RegexValidator
from django.db import models
class User(AbstractUser):
email = models.EmailField(
'email address',
unique=True,
error_messages={
'unique': "A user with that email already exists.",
},
) # Переопределение поля email, чтобы оно было уникальным для каждого пользователя
phone_regex = RegexValidator(
regex=r'^\+?1?\d{9,15}$',
message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed."
) # Проверка валидности номера
phone_number = models.CharField(
validators=[phone_regex],
max_length=17,
blank=True
) # Поле с номером телефона пользователя
is_owner = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.email)
|
# Copyright 2020 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for JSON serializations."""
from __future__ import annotations
import warnings
from dataclasses import MISSING, Field
from typing import TYPE_CHECKING, Any, Optional, Sequence
import pulser
from pulser.json.exceptions import AbstractReprError
if TYPE_CHECKING: # pragma: no cover
from pulser.register import QubitId
def get_dataclass_defaults(fields: tuple[Field, ...]) -> dict[str, Any]:
"""Gets the defaults for the fields that have them."""
defaults = {}
for field in fields:
if field.default is not MISSING:
defaults[field.name] = field.default
elif field.default_factory is not MISSING:
defaults[field.name] = field.default_factory()
return defaults
def obj_to_dict(
obj: object,
*args: Any,
_build: bool = True,
_module: Optional[str] = None,
_name: Optional[str] = None,
_submodule: Optional[str] = None,
**kwargs: Any,
) -> dict[str, Any]:
"""Encodes an object in a dictionary for serialization.
Args:
obj: The object to encode in the dictionary.
Other Parameters:
_build (bool): Whether the object is to be built on deserialization.
_module (str): Custom name for the module containing the object.
_name (str): Custom name of the object.
_submodule(str): Name of a submodule (e.g. the class holding a
classmethod). Only used when defined.
args: If the object is to be built, the arguments to give on creation.
kwargs: If the object is to be built, the keyword arguments to give on
creation.
Returns:
The dictionary encoding the object.
"""
d = {
"_build": _build,
"__module__": _module if _module else obj.__class__.__module__,
"__name__": _name if _name else obj.__class__.__name__,
}
if _build:
d["__args__"] = args
d["__kwargs__"] = kwargs
if _submodule:
d["__submodule__"] = _submodule
pulser.json.supported.validate_serialization(d)
return d
def stringify_qubit_ids(qubit_ids: Sequence[QubitId]) -> list[str]:
"""Converts all qubit IDs into strings and looks for conflicts."""
not_str = [id for id in qubit_ids if not isinstance(id, str)]
names = [str(id) for id in qubit_ids]
if not_str:
warnings.warn(
"Register serialization to an abstract representation "
"irreversibly converts all qubit ID's to strings.",
stacklevel=2,
)
if len(set(names)) < len(names):
collisions = [id for id in not_str if str(id) in qubit_ids]
raise AbstractReprError(
"Name collisions encountered when converting qubit IDs to "
f"strings for IDs: {[(id, str(id)) for id in collisions]}"
)
return names
|
from django.conf.urls import patterns, include, url
from mvhbc import views
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'clubmanager.views.home', name='home'),
# url(r'^clubmanager/', include('clubmanager.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^$', views.home, name="home"),
url(r'^admin/', include(admin.site.urls)),
url(r'^about/', views.about, name="about"),
url(r'^membership/', views.membership, name="membership"),
url(r'^officers/', views.officers, name="officers"),
url(r'^accounts/login/', 'django.contrib.auth.views.login', {'template_name': 'competition_login.html'}, name="accounts_login"),
url(r'^competition/', include('competition.urls')),
url(r'^resources/', views.resources, name="resources"),
)
|
# -*- coding: utf-8 -*-
"""
Simple NLP Encoding
Created on Thu Jan 31 13:51:08 2019
@author: Markus.Meister
"""
import glob
import sys
import os
import torch
import pandas as pd
import numpy as np
import nltk
#from nltk import word_tokenize as tkn
import gensim
from gensim import corpora, models, similarities
sentence_detector = nltk.data.load('tokenizers/punkt/german.pickle')
tkn = sentence_detector.tokenize
try:
cd_here = glob.glob('')
except Exception as e:
print(e)
cd_here = []
if 'utils' in cd_here:
sys.path.append('utils')
import scii_funs
class key_word_encoder:
quote_size=300
min_word_quote=1
def __init__(self, dictionary=None, modelif=None):
# if type(dictionary).__name__.rsplit('.')[0] == 'Series':
# dictionary = dictionary.values
self.diction = [tkn(sent) for sent in dictionary]
self.word2vec = gensim.models.Word2Vec
self.modelif = modelif
def encode(self, dictionary=None, quote_size=None, min_word_quote=None):
if type(self.diction).__name__ == "NoneType":
if type(dictionary).__name__ == "NoneType":
print("Sorry: No dictionary declared!")
if type(dictionary).__name__.rsplit('.')[0] == 'Series':
dictionary = dictionary.values
self.diction = [tkn(sent) for sent in dictionary]
if quote_size:
self.quote_size=quote_size
if min_word_quote:
self.min_word_quote = min_word_quote
self.wordinfo = self.word2vec(min_count=self.min_word_quote, size = self.quote_size )
self.wordinfo.build_vocab(self.diction)
if type(self.modelif) != type(None):
model = models.KeyedVectors.load_word2vec_format(self.modelif, binary=True)
self.wordinfo.build_vocab([list(model.vocab.keys())], update=True)
self.wordinfo.intersect_word2vec_format(self.modelif, binary=True, lockf=1.0)
# else:
#
# self.wordinfo = self.word2vec(min_count=self.min_word_quote, size = self.quote_size )
# self.wordinfo.build_vocab(self.diction)
self.wordinfo.train(self.diction, total_examples=self.wordinfo.corpus_count, epochs=self.wordinfo.iter)
return self.wordinfo
def save_wv(self,path="wv.model"):
self.wordinfo.wv.save_word2vec_format(path, binary=True)
def tensorize(self, dictionary=None, quote_size=None, min_word_quote=None, n_most_similar=3):
if type(self.diction).__name__ == "NoneType":
if type(dictionary).__name__ == "NoneType":
print("Sorry: No dictionary declared!")
if type(dictionary).__name__.rsplit('.')[0] == 'Series':
dictionary = dictionary.values
self.diction = [tkn(scii_funs.unicodeToAscii(sent)) for sent in dictionary]
if quote_size:
self.quote_size=quote_size
if min_word_quote:
self.min_word_quote = min_word_quote
if not hasattr(self,'worinfo'):
self.encode()
my_dictionary, my_iids = scii_funs.unfold_lists(self.diction)
my_dictionary = np.array(my_dictionary)
my_iids = np.array(my_iids)
# malloc
topNten = torch.zeros([my_dictionary.shape[0], n_most_similar, self.quote_size])
wordTensors = torch.zeros([my_dictionary.shape[0], self.quote_size])
#----------------------------- words ----------------------------------------
for j,d in enumerate(my_dictionary):
topN = self.wordinfo.wv.most_similar(d,topn=n_most_similar)
topNlist = np.array([self.wordinfo[sim[0]] for sim in topN])
topNten[j] = torch.from_numpy(topNlist)
wordTensors[j] = torch.from_numpy(self.wordinfo[d])
return {
'tensor':wordTensors,
'topN':topNten,
'iids':my_iids,
'dictionary':my_dictionary,
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.