text
stringlengths 8
6.05M
|
|---|
'''Test ORM '''
from flask.ext.sqlalchemy import SQLAlchemy
from news.models import NewsArticle
@pytest.fixture(scope='function')
def db(app):
db = SQLALchemy(app)
return db
def test_create_tables(db):
db.create_all()
assert True
db.drop_all()
|
# -*- coding: utf-8 -*-
"""
Least square fit using trained stress-strain GPR metamodel
for the Holzapfel model
"""
import matplotlib.pyplot as plt
import os
import sys
import numpy as np
import pandas as pd
import pickle
import csv
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.utils.data import Dataset, TensorDataset, DataLoader
import imageio
from time import time
from random import seed, randint
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy import io
from lmfit import Parameters, minimize, report_fit, fit_report
from scipy import interpolate
import Metamodels
import PreProcess
import PostProcess
def MetaModelExport(scaled_params, strain, fiber_param, WDH_param, models, scalers):
# NN device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Initialize output
n_data_points = len(strain[1])
MDL_data = np.array([]).reshape(n_data_points,0)
# Loop through testing modes
for i in range(1,10):
IsShear = (i%3) != 0
# Export cube dimensions for current position
current_pos = ((i-1)//3)*3
WDH_dim = WDH_param[current_pos:current_pos+3]
# Assemble X_input
X_input = np.atleast_2d(np.concatenate((scaled_params,WDH_dim,fiber_param)))
# Scale input
X_scaled = scalers[i].transform(X_input)
# Convert to tensor
X_tensor = torch.from_numpy(X_scaled).float().to(device)
# Run model
fX = models[i](X_tensor)
fX_np = fX.data.numpy()
Tdir = fX_np[0,0:50]
Tnor = fX_np[0,50:100] #if applicable
# Interpolate model export
if IsShear:
FEBioStrain = np.linspace(-0.4,0.4,50)
Fx_interp = interpolate.interp1d(FEBioStrain, Tdir, kind='cubic')
Fz_interp = interpolate.interp1d(FEBioStrain, Tnor, kind='cubic')
# Interpolate the strains of interest
Fx_new = Fx_interp(strain[i])
Fz_new = Fz_interp(strain[i])
# Assemble the global vector
MDL_data = np.column_stack((MDL_data,Fx_new))
MDL_data = np.column_stack((MDL_data,Fz_new))
else:
FEBioStrain = np.linspace(-0.15,0.15,50)
Fx_interp = interpolate.interp1d(FEBioStrain, Tdir, kind='cubic')
Fx_new = Fx_interp(strain[i])
# Assemble the global vector
MDL_data = np.column_stack((MDL_data,Fx_new))
return MDL_data
# Objective function
def objective(params, strain, exp_data, fiber_param, WDH_param, models, scalers):
"""Calculate total residual for fits of Gaussians to several data sets."""
# Export and scale parameters
scaled_params = [0]*8
scaled_params[0] = params['a'].value
scaled_params[1] = params['b']*1e4
scaled_params[2] = params['af'].value
scaled_params[3] = params['bf']*1e4
scaled_params[4] = params['ash'].value
scaled_params[5] = params['bsh']*1e4
scaled_params[6] = params['afs'].value
scaled_params[7] = params['bfs']*1e4
# Intialize
resid = 0.0*exp_data[:]
mdl_dat = MetaModelExport(scaled_params, strain, fiber_param, WDH_param, models, scalers)
# make residual per data set
for i in range(15):
resid[:,i] = exp_data[:,i] - mdl_dat[:,i]
# now flatten this to a 1D array, as minimize() needs
return resid.flatten()
if __name__ == "__main__":
# Input (experimental data)
TSLs = [1,2,3,4,5,7,8,9,10,11,12]
n_data_points = 100 # points per stress strain curve
# Load trained NNs
Num_train = 8000
DirName = './ExportedData/FWD_HolzapfelNN_Final/Output_HolzapfelNN_Final_'
# initialize scalers and models
scalers = {x: [] for x in range(1,10)}
models = {x: [] for x in range(1,10)}
for i in range(1,10):
Xscaler_dir = DirName + str(Num_train)+'Mode_' + str(i) + '/Torch_NN_' + str(Num_train) + 'Xscaler.sav'
Settings_dir = DirName + str(Num_train)+'Mode_' + str(i) + '/Torch_NN_' + str(Num_train) + 'Settings.sav'
Model_path = DirName + str(Num_train)+'Mode_' + str(i) + '/Torch_NN_' + str(Num_train) + '.pt'
NN_settings = pickle.load(open(Settings_dir, 'rb'))
NUM_FEATURES = NN_settings['Feat']
HIDDEN_DIM = NN_settings['Dim']
NUM_OUTPUT = NN_settings['Out']
# import scalers and models
scalers[i] = pickle.load(open(Xscaler_dir, 'rb'))
models[i] = Metamodels.NN(feature_dim=NUM_FEATURES, hidden_dim=HIDDEN_DIM, output_dim=NUM_OUTPUT)
models[i].load_state_dict(torch.load(Model_path))
models[i].eval()
for TSLn in TSLs:
print('\n Subject TSL: %i' %(TSLn))
# Read experimental data and parameters
strain, exp_data, fiber_param, WDH_param = PreProcess.load_RV_ExpData(n_data_points,TSLn)
# Initialize parameters to be fitted
fit_params = Parameters()
fit_params.add('a', value=0.001, min=0.0, max=100)
fit_params.add('b', value=0.0010, min=0.0, max=10)
fit_params.add('af', value=0.001, min=0.0, max=100)
fit_params.add('bf', value=0.0010, min=0.0, max=10)
fit_params.add('ash', value=0.001, min=0.0, max=100)
fit_params.add('bsh', value=0.0010, min=0.0, max=10)
fit_params.add('afs', value=0.001, min=0.0, max=100)
fit_params.add('bfs', value=0.0010, min=0.0, max=10)
# Run matlab lsqnonlin equivalent
out = minimize(objective, fit_params, method = 'least_squares', args=(strain, exp_data, fiber_param, WDH_param, models, scalers))
# Inverse scale final outputs
final_params = [0]*8
final_params[0] = out.params['a'].value
final_params[1] = out.params['b']*1e4
final_params[2] = out.params['af'].value
final_params[3] = out.params['bf']*1e4
final_params[4] = out.params['ash'].value
final_params[5] = out.params['bsh']*1e4
final_params[6] = out.params['afs'].value
final_params[7] = out.params['bfs']*1e4
best_fit = MetaModelExport(final_params, strain, fiber_param, WDH_param, models, scalers)
# Plot fits from ML metamodels
export = PostProcess.ExportData('RV_Holz', 'BestFits')
export.RV_StressStrain(strain, exp_data, best_fit, TSLn)
if not os.path.exists('./ExportedData/HolzapfelParams'):
os.makedirs('./ExportedData/HolzapfelParams')
param_path = './ExportedData/HolzapfelParams/TSL' + str(TSLn) + '.csv'
np.savetxt(param_path, final_params, delimiter=",")
|
def max_char(string1):
ASCII=256
L=len(string1)
count=[0]*256
for i in string1:
count[ord(i)]=count[ord(i)]+1
max=0
c=''
for i in string1:
if(count[ord(i)])>max:
max=count[ord(i)]
c=i
print "The max character is",c ," occuring " ,max," times"
string1="abccca"
max_char(string1)
|
# BASIC GOAL Imagine that your friend is a cashier, but has a hard time counting back change to customers. Create a program that allows him to input a certain amount of change, and then print how how many quarters, dimes, nickels, and pennies are needed to make up the amount needed.
#
# For example, if he inputs 1.47, the program will tell that he needs 5 quarters, 2 dimes, 0 nickels, and 2 pennies.
#
# SUBGOALS
#
# So your friend doesn't have to calculate how much change is needed, allow him to type in the amount of money given to him and the price of the item. The program should then tell him the amount of each coin he needs like usual.
#
# To make the program even easier to use, loop the program back to the top so your friend can continue to use the program without having to close and open it every time he needs to count change.
import math
while True:
price = math.floor(float(input("Price of item(s): ")) * 100)
cash = math.floor(float(input("Cash given: ")) * 100)
change = (cash - price)
no_of_quarters = 0
no_of_dimes = 0
no_of_nickels = 0
no_of_pennies = 0
if change >= 25:
no_of_quarters = math.floor(change / 25)
change = change % 25
if change >= 10:
no_of_dimes = math.floor(change / 10)
change = change % 10
if change >= 5:
no_of_nickels = math.floor(change / 5)
change = change % 5
if change >= 1:
no_of_pennies = change
print(str(no_of_quarters) + " quarters")
print(str(no_of_dimes) + " dimes")
print(str(no_of_nickels) + " nickels")
print(str(no_of_pennies) + " pennies")
|
import json
import boto3
from elasticsearch import Elasticsearch, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
TABLE_NAME = 'yelp-restaurants'
SAMPLE_N = '5'
host = 'https://search-cloud-elastic-search-ybmh47fjqd7qokijii7kfebh74.us-east-1.es.amazonaws.com'
queue_url = 'https://sqs.us-east-1.amazonaws.com/089149523310/Cloudqueue'
# credentials = boto3.Session().get_credentials()
# awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, 'us-east-1', 'es',session_token=credentials.token)
sqsclient = boto3.client('sqs',region_name='us-east-1')
es = Elasticsearch(
host,
http_auth = ('username', 'password'),
connection_class = RequestsHttpConnection
)
#check = es.get(index="restaurants", doc_type="Restaurant", id='-KahGyU9G7JT0JmoC_Yc0Q')
#print(check)
def sendsms(number, message):
send_sms = boto3.client('sns',region_name='us-east-1')
smsattrs = {
'AWS.SNS.SMS.SenderID': {
'DataType': 'String',
'StringValue': 'TestSender'
},
'AWS.SNS.SMS.SMSType': {
'DataType': 'String',
'StringValue': 'Transactional' # change to Transactional from Promotional for dev
}
}
response = send_sms.publish(
PhoneNumber=number,
Message=message,
MessageAttributes=smsattrs
)
print(number)
print(response)
print("The message is: ", message)
def search(cuisine):
data = es.search(index="restaurants", body={"query": {"match": {'categories.title':cuisine}}})
print(data)
print("search complete", data['hits']['hits'])
return data['hits']['hits']
def get_restaurant_data(ids):
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
table = dynamodb.Table('yelp-restaurants')
ans = 'Hi! Here are your suggestions,\n '
i = 1
for id in ids:
if i<6:
response = table.get_item(
Key={
'id': id
}
)
print(response)
response_item = response['Item']
print(response_item)
restaurant_name = response_item['name']
restaurant_address = response_item['address']
# restaurant_city = response_item['city:']
restaurant_zipcode = response_item['zip_code']
restaurant_rating = str(response_item['rating'])
ans += "{}. {}, located at {}\n".format(i, restaurant_name, restaurant_address)
# return ans
i += 1
else:
break
print("db pass")
return ans # string type
def lambda_handler(event=None, context=None):
messages = sqsclient.receive_message(QueueUrl=queue_url, MessageAttributeNames=['All'])
print(messages)
try:
message = messages['Messages'][0]
receipt_handle = message['ReceiptHandle']
req_attributes = message['MessageAttributes']
print(req_attributes)
cuisine = req_attributes['Cuisine']['StringValue']
location = req_attributes['Location']['StringValue']
dining_date = req_attributes['DiningDate']['StringValue']
dining_time = req_attributes['DiningTime']['StringValue']
num_people = req_attributes['PeopleNum']['StringValue']
phone = req_attributes['PhoneNum']['StringValue']
print(location, cuisine, dining_date, dining_time, num_people, phone)
print(phone)
ids = search(cuisine)
ids = list(map(lambda x: x['_id'], ids))
print(ids)
rest_details = get_restaurant_data(ids)
sendsms("+1"+phone, rest_details)
sqsclient.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle
)
except Exception as e:
print(e)
return {
'statusCode': 200,
'body': json.dumps('Hello from Lambda LF2!')
}
lambda_handler()
|
from enum import Enum
from random import random
from pydantic import BaseModel, Field
from quart import Quart, abort, jsonify, request
from quart.views import MethodView
from spectree import Response, SpecTree
app = Quart(__name__)
spec = SpecTree("quart")
class Query(BaseModel):
text: str = "default query strings"
class Resp(BaseModel):
label: int
score: float = Field(
...,
gt=0,
lt=1,
)
class Data(BaseModel):
uid: str
limit: int = 5
vip: bool
class Config:
schema_extra = {
"example": {
"uid": "very_important_user",
"limit": 10,
"vip": True,
}
}
class Language(str, Enum):
en = "en-US"
zh = "zh-CN"
class Header(BaseModel):
Lang: Language
class Cookie(BaseModel):
key: str
@app.route(
"/api/predict/<string(length=2):source>/<string(length=2):target>", methods=["POST"]
)
@spec.validate(
query=Query, json=Data, resp=Response("HTTP_403", HTTP_200=Resp), tags=["model"]
)
def predict(source, target):
"""
predict demo
demo for `query`, `data`, `resp`, `x`
query with
``http POST ':8000/api/predict/zh/en?text=hello' uid=xxx limit=5 vip=false ``
"""
print(f"=> from {source} to {target}") # path
print(f"JSON: {request.json}") # Data
print(f"Query: {request.args}") # Query
if random() < 0.5:
abort(403)
return jsonify(label=int(10 * random()), score=random())
@app.route("/api/header", methods=["POST"])
@spec.validate(
headers=Header, cookies=Cookie, resp=Response("HTTP_203"), tags=["test", "demo"]
)
async def with_code_header():
"""
demo for JSON with status code and header
query with ``http POST :8000/api/header Lang:zh-CN Cookie:key=hello``
"""
return jsonify(language=request.headers.get("Lang")), 203, {"X": 233}
class UserAPI(MethodView):
@spec.validate(json=Data, resp=Response(HTTP_200=Resp), tags=["test"])
async def post(self):
return jsonify(label=int(10 * random()), score=random())
# return Resp(label=int(10 * random()), score=random())
if __name__ == "__main__":
"""
cmd:
http :8000/api/user uid=12 limit=1 vip=false
http ':8000/api/predict/zh/en?text=hello' vip=true uid=aa limit=1
http POST :8000/api/header Lang:zh-CN Cookie:key=hello
"""
app.add_url_rule("/api/user", view_func=UserAPI.as_view("user_id"))
spec.register(app)
app.run(port=8000)
|
import pymongo
from random import randint
def save_results(identifier, clusters, set_to_plot, alive_indexes, samples_to_analyze, pids_array):
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["results"]
mycol = mydb["elements"]
element_to_insert = {"ident":identifier, "clusters": clusters.tolist(), "set_to_plot": set_to_plot.tolist(), "alive_indexes": alive_indexes.tolist(),
"samples_to_analyze": samples_to_analyze.tolist(), "pids_array": pids_array}
x = mycol.insert_one(element_to_insert)
def load_results(identifier):
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["results"]
mycol = mydb["elements"]
myquery = {"ident": identifier}
mydoc = mycol.find(myquery)
for x in mydoc:
return (x)
|
from django.db import models
class student(models.Model):
first_name=models.CharField(max_length=20)
last_name=models.CharField(max_length=30)
contact=models.IntegerField()
email=models.EmailField(max_length=50)
age=models.IntegerField()
|
import sys
import math
def graham_scan(points):
"""Implementation of the Graham Scan Algorithm"""
start_vertex, start_index = find_lowest(points)
if points[0] != start_vertex:
points[0], points[start_index] = points[start_index], points[0]
sorted_points = populate_list(points, start_index)
return_stack = sorted_points[0:3]
remaining_points = sorted_points[3:]
for point in remaining_points:
while not is_left_turn(return_stack[-2], return_stack[-1], point):
return_stack.pop()
return_stack.append(point)
convex_hull = find_indices(points, return_stack)
if start_index in convex_hull:
hull_index = convex_hull.index(start_index)
convex_hull[0], convex_hull[hull_index] = convex_hull[hull_index], convex_hull[0]
else:
convex_hull[0] = start_index
print(convex_hull)
def find_lowest(points):
"""Finds the lowest point on the graph"""
lowest = points[0]
low_index = 0
for index, point in enumerate(points[1:]):
if point[1] < lowest[1]:
lowest = point
low_index = (index+1)
elif point[1] == lowest[1]:
if point[0] > lowest[0]:
lowest = point
low_index = (index+1)
return lowest, low_index
def theta(pointA, pointB):
"""Computes angle from A to B, from the x-axis through a"""
dx = pointB[0] - pointA[0]
dy = pointB[1] - pointA[1]
if abs(dx) < math.exp(1e-6) and abs(dy) < math.exp(1e-6):
t = 0
else:
t = dy / float(abs(dx) + abs(dy))
if dx < 0:
t = 2 - t
elif dy < 0:
t = 4 + t
return float(t*90)
def populate_list(points, start_index):
"""Sorts the points in the list by angle"""
sorted_list = points
theta_list = [0]
for point in points[1:]:
theta_list.append(float(theta(points[0], point)))
sorted_list = [x for (y,x) in sorted(zip(theta_list,sorted_list))]
return(sorted_list)
def is_left_turn(pointA, pointB, pointC):
"""Checks whether the points make a left turn or not"""
bdx = pointB[0] - pointA[0]
bdy = pointB[1] - pointA[1]
cdx = pointC[0] - pointA[0]
cdy = pointC[1] - pointA[1]
lineFn = (bdx*cdy) - (bdy*cdx)
return (lineFn > 0)
def find_indices(points, stack):
"""Finds the location of all vertices in the list"""
hull = []
for point in stack:
hull.append(points.index(point))
return hull
file = open(sys.argv[1])
n = int(file.readline())
points = file.read().split()
x_val = None
y_val = None
use_points = []
for point in points:
if x_val == None:
x_val = int(point)
elif y_val == None:
y_val = int(point)
use_points.append((x_val, y_val))
x_val = None
y_val = None
points = use_points
graham_scan(points)
|
# Copyright (c) 2013, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
columns = get_columns()
data = get_data(filters)
return columns, data
def get_data(filters):
format = "%H:%i"
return frappe.db.sql(""" select lo.user, count(lo.name), usr.last_login,
TIME_FORMAT(SEC_TO_TIME(AVG(TIME_TO_SEC(timediff(lo.log_out_time, lo.login_time) ))), %(format)s ) as avg_duration
from `tabLogin Log` lo
left join `tabUser` usr
on lo.user = usr.name
where ( lo.login_time between %(start_time)s and %(end_time)s )
or ( lo.log_out_time between %(start_time)s and %(end_time)s )
group by lo.user """, {"start_time":filters.get("start_time"),
"end_time":filters.get("end_time"), "format":format}, as_list = 1)
def get_columns():
return [
_("User") + ":Link/User:200",
_("Logged In Count") + ":Int:150",
_("Last Login") + ":Datetime:180",
_("Average Duration (HH:MM)") + ":Data:200"
]
|
# JTSK-350112
# appropiparam.py
# Taiyr Begeyev
# t.begeyev@jacobs-university.de
from graphics import *
from random import randrange
from sys import *
def main():
print("Enter the length of the window")
d = int(input())
if d > 1000:
print("Window size shouldn't exceed 1000")
sys.exit()
print("Enter the numbers of points to be generated")
n = int(input())
if n < 0:
print("Should be natural number")
sys.exit()
# create a square window with the side length of d (400x400).
win = GraphWin("Approximate Pie", d, d)
# repeatedly create and draw 10000 random points
counter = 0
for i in range(1, n + 1):
# random number generator
x = randrange(0, d)
y = randrange(0, d)
if i % 100 == 0:
# ratio of the points inside the circle and the total number of points and * 4
ratio = counter / i * 4
# print the value on the screen
print("Pie = {}".format(ratio))
elif ((x - d / 2) ** 2) + ((y - d / 2) ** 2) <= (d ** 2) / 4:
# if generated point is inside the circle
win.plotPixelFast(x, y, "red")
counter += 1
else:
win.plotPixelFast(x, y, "blue")
# use the plotPixel() method to plot at least one pixel to actually show results
win.plotPixel(100, 100, "green")
win.getMouse()
win.close()
main()
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from abc import ABC
from dataclasses import dataclass
from typing import ClassVar, Iterable, Mapping, Optional, Sequence, Tuple
from pants.core.util_rules.environments import _warn_on_non_local_environments
from pants.engine.addresses import Addresses
from pants.engine.console import Console
from pants.engine.env_vars import CompleteEnvironmentVars
from pants.engine.environment import EnvironmentName
from pants.engine.fs import Digest
from pants.engine.goal import Goal, GoalSubsystem
from pants.engine.process import InteractiveProcess, InteractiveProcessResult
from pants.engine.rules import Effect, Get, collect_rules, goal_rule
from pants.engine.target import FilteredTargets, Target
from pants.engine.unions import UnionMembership, union
from pants.option.option_types import BoolOption, StrOption
from pants.util.frozendict import FrozenDict
from pants.util.memo import memoized_property
from pants.util.strutil import softwrap
@union(in_scope_types=[EnvironmentName])
@dataclass(frozen=True)
class ReplImplementation(ABC):
"""A REPL implementation for a specific language or runtime.
Proxies from the top-level `repl` goal to an actual implementation.
"""
name: ClassVar[str]
targets: Sequence[Target]
def in_chroot(self, relpath: str) -> str:
return os.path.join("{chroot}", relpath)
@memoized_property
def addresses(self) -> Addresses:
return Addresses(t.address for t in self.targets)
class ReplSubsystem(GoalSubsystem):
name = "repl"
help = "Open a REPL with the specified code loadable."
@classmethod
def activated(cls, union_membership: UnionMembership) -> bool:
return ReplImplementation in union_membership
shell = StrOption(
default=None,
help="Override the automatically-detected REPL program for the target(s) specified.",
)
restartable = BoolOption(
default=False,
help="True if the REPL should be restarted if its inputs have changed.",
)
class Repl(Goal):
subsystem_cls = ReplSubsystem
environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY
@dataclass(frozen=True)
class ReplRequest:
digest: Digest
args: Tuple[str, ...]
extra_env: FrozenDict[str, str]
immutable_input_digests: FrozenDict[str, Digest]
append_only_caches: FrozenDict[str, str]
run_in_workspace: bool
def __init__(
self,
*,
digest: Digest,
args: Iterable[str],
extra_env: Optional[Mapping[str, str]] = None,
immutable_input_digests: Mapping[str, Digest] | None = None,
append_only_caches: Mapping[str, str] | None = None,
run_in_workspace: bool = True,
) -> None:
object.__setattr__(self, "digest", digest)
object.__setattr__(self, "args", tuple(args))
object.__setattr__(self, "extra_env", FrozenDict(extra_env or {}))
object.__setattr__(
self, "immutable_input_digests", FrozenDict(immutable_input_digests or {})
)
object.__setattr__(self, "append_only_caches", FrozenDict(append_only_caches or {}))
object.__setattr__(self, "run_in_workspace", run_in_workspace)
@goal_rule
async def run_repl(
console: Console,
repl_subsystem: ReplSubsystem,
specified_targets: FilteredTargets,
union_membership: UnionMembership,
complete_env: CompleteEnvironmentVars,
) -> Repl:
await _warn_on_non_local_environments(specified_targets, "the `repl` goal")
# TODO: When we support multiple languages, detect the default repl to use based
# on the targets. For now we default to the python repl.
repl_shell_name = repl_subsystem.shell or "python"
implementations = {impl.name: impl for impl in union_membership[ReplImplementation]}
repl_implementation_cls = implementations.get(repl_shell_name)
if repl_implementation_cls is None:
available = sorted(implementations.keys())
console.print_stderr(
softwrap(
f"""
{repr(repl_shell_name)} is not a registered REPL. Available REPLs (which may
be specified through the option `--repl-shell`): {available}
"""
)
)
return Repl(-1)
repl_impl = repl_implementation_cls(targets=specified_targets)
request = await Get(ReplRequest, ReplImplementation, repl_impl)
env = {**complete_env, **request.extra_env}
result = await Effect(
InteractiveProcessResult,
InteractiveProcess(
argv=request.args,
env=env,
input_digest=request.digest,
run_in_workspace=request.run_in_workspace,
restartable=repl_subsystem.restartable,
immutable_input_digests=request.immutable_input_digests,
append_only_caches=request.append_only_caches,
),
)
return Repl(result.exit_code)
def rules():
return collect_rules()
|
import sys
def floyd_warshall():
global matrix, N
for k in range(N):
for i in range(N):
for j in range(N):
if matrix[i][j] > matrix[i][k] + matrix[k][j]:
matrix[i][j] = matrix[i][k] + matrix[k][j]
N, M = map(int, raw_input().split())
# Build and populate the matrix
matrix = [[sys.maxint for x in range(N)] for x in range(N)]
for x in range(N): matrix[x][x] = 0
for input in range(M):
x, y, z = map(int, raw_input().split())
matrix[x - 1][y - 1] = z
# Calculate all the shortest paths
floyd_warshall()
# Now check all input
for input in range(int(raw_input())):
x, y = map(int, raw_input().split())
if matrix[x - 1][y - 1] == sys.maxint:
print "-1"
else:
print matrix[x - 1][y - 1]
|
#Create an application to convert Dollar to Rupees in python tkinter
from tkinter import *
from tkinter import messagebox
def convert():
rupee = data.get()*77
messagebox.showinfo('Converted---','the Rupee is:'+str(rupee))
window =Tk()
window.title("Frame window")
window.geometry('500x200')
frame1=Frame(window,width=100,highlightbackground='red',highlightthickness=3)
frame1.grid(row=0,column=0,padx=20,pady=20,ipadx=20,ipady=20)
l1= Label(frame1,text="Enter the doller",fg='blue',font=(16))
l1.grid(row=0,column=0)
data=IntVar()
textbox=Entry(frame1,textvariable=data,fg='blue',font=(16))
textbox.grid(row=0,column=1)
button1=Button(frame1,command=convert,text='Convert to Rupee',fg='blue',font=(16))
button1.grid(row=1,column=1,sticky=W)
window.mainloop()
|
class Queue():
def __init__(self):
self.items = []
def enqueue(self, item):
self.items.insert(0, item)
def dequeue(self):
return self.items.pop()
def isEmpty(self):
return self.items == []
def size(self):
return len(self.items)
if __name__ == "__main__":
queue = Queue()
print queue.isEmpty()
queue.enqueue("Hello")
queue.enqueue("World")
print queue.size()
print queue.isEmpty()
print queue.dequeue()
print queue.dequeue()
|
# something else
import csv
from sqlmethods import PrioriDataDB
db_methods = PrioriDataDB()
class Csv_helper():
def readFile(self, filename):
with open(filename, 'r') as csvfile:
csvfile.readline()
reader = csv.reader(csvfile)
names_ids = []
for item in reader:
names_ids.append({
"app_id": item[0]
})
# names_ids.append(item)
# for item in names_ids:
# print(item['app_id'])
print(len(names_ids))
return names_ids
def apps_to_objs(self):
csv = Csv_helper()
apps = []
# items = csv.readFile('appCheck.csv')
# for item in items:
# apps.append(db_methods.get_metadata_from_appID(item[1])[0])
apps = db_methods.get_app_and_name()
app_objs = []
for app in apps:
app_obj = {
"app_name": app[1],
"app_id": app[0],
'autism specific': 0,
"edutainment":0,
'a1': 0,
'a2': 0,
'a3': 0,
'a4': 0,
'a5': 0,
'b1': 0,
'b2': 0,
'b3': 0,
'c1': 0,
'c2': 0,
'c3': 0,
'c4': 0,
"linked_queries": []
}
app_objs.append(app_obj)
return app_objs
def find_obj_and_append_query(self, app_id, list_of_objs, query, group_title):
for obj in list_of_objs:
for key in obj:
if obj[key] == app_id:
obj['linked_queries'].append(query)
# obj['group_titles'].append(group_title)
obj[group_title] += 1
return list_of_objs
def read_file_for_queries(self, keywords_csv):
apps_objs = self.apps_to_objs()
groupTitles = ['autism specific','edutainment',
"a1", "a2",
"a3","a4", "a5",
"b1", "b2", "b3",
"c1", "c2", "c3", "c4"]
with open(keywords_csv, 'r') as csvfile:
csvfile.readline()
lines = csv.reader(csvfile)
count = 0
for line in lines:
if len(line) == 0:
count += 1
else:
results = db_methods.search_table(line[0])
for tuple in results:
apps_objs = self.find_obj_and_append_query(tuple[0], apps_objs, line[0], groupTitles[count])
# print(count, len(groupTitles) - 1)
return apps_objs
def obj_to_csv(self, list_of_objs):
with open('list.csv', 'a', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
for obj in list_of_objs:
listqueries = str(obj['linked_queries']).replace(',', '.').replace('[', '').replace(']', '')
# listgrouping = str(obj['group_titles']).replace(',', '.').replace('[', '').replace(']', '')
writer.writerow([obj["app_name"], obj['app_id'], listqueries, obj["autism specific"],
obj["edutainment"],
obj['a1'], obj['a2'], obj['a3'], obj['a4'],obj['a5'],
obj['b1'], obj['b2'], obj['b3'],
obj['c1'],obj['c2'], obj['c3'], obj['c4']])
print('done')
|
#Created on July 23, 2015
#@author: rspies
# Lynker Technologies
# Python 2.7
# Description: parse through a csv file with date and variable and generate a
# formatted datacard for chps import. Note: script does not check for missing
# time steps
import os
import datetime
import collections
maindir = os.getcwd()
os.chdir("../..")
maindir = os.getcwd() + os.sep
################### user input #########################
station_dir = maindir + 'Calibration_NWS/NERFC/NERFC_FY2016/datacards/MAT/'
csv_dir = station_dir + os.sep + 'chps_modified_csv' + os.sep
out_dir = station_dir + os.sep + 'final_datacard'
variable = 'MAT'
chps_shift = 'yes' # shift time back by 1 timestep (csv from CHPS shifts forward compared to original datacard)
variable_name = 'TEMP' #'inflow' or 'outflow'
start = datetime.datetime(1950,1,1,0)
header_lines = 2 # header lines in csv
timestep = 6 # time step in hours
date_time_format = "%Y-%m-%d %H:%M:%S"
#data_format = 'f9.3'
basin_list = ['CFDM1','NWFM1','JVLV1LWR','JVLV1UPR'] # name of location
loc_name = {'CFDM1':'NARRAGUAGAS CHERRYFD','NWFM1':'SHEEPSCOT NWHITEFLD','JVLV1LWR':'LO JEFFERSONVIL','JVLV1UPR':'UP JEFFERSONVIL'} #
########################################################
var_units = {'QME':'CFSD','MAP':'MM','MAT':'DEGF'}
var_dim = {'QME':'L3','MAP':'L','MAT':'TEMP'}
for basin_id in basin_list:
count = 0
csv_file = csv_dir + basin_id + '_final.csv'
read_file = open(csv_file,'r')
dates = collections.OrderedDict()
print 'parsing file: ' + basin_id + ' -> ' + csv_file
for line in read_file:
count += 1
if count > header_lines:
sep = line.split(',')
date = datetime.datetime.strptime(sep[0], date_time_format)
if chps_shift == 'yes':
date = date - datetime.timedelta(hours=timestep)
data = sep[1].strip()
if data == '':
data = -999
elif float(data) < 0 and variable != 'MAT':
data = -999
if date >= start:
dates[date] = data
#### data output to formatted datacard
write_file = open(out_dir + os.sep + 'modified_' + basin_id + '.' + variable.upper(),'wb')
write_file.write('$ Datacard Time Series created at ' + str(datetime.datetime.now().date()) + ' from CSV Conversion Script\n')
write_file.write('$ Data type: ' + variable_name + '\n')
write_file.write('$ PERIOD OF RECORD= ' + str(min(dates).month)+'/'+str(min(dates).year) + ' THRU ' + str(max(dates).month)+'/'+str(max(dates).year)+'\n')
#write_file.write('$ \n')
write_file.write('$ Symbol for missing data = -999.00 \n')
write_file.write('$ ' + 'TYPE=' + variable + ' ' + 'UNITS=' + var_units[variable] + ' ' + 'DIMENSIONS=' + variable_name + ' ' + 'TIME INTERVAL=' + str(timestep) + ' HOURS\n')
write_file.write('{:12s} {:4s} {:4s} {:4s} {:2d} {:12s} {:12s}'.format('DATACARD', variable, var_dim[variable],var_units[variable],int(timestep),basin_id,loc_name[basin_id]))
write_file.write('\n')
#write_file.write('datacard ' + variable + ' ' + var_dim[variable] + ' '+ var_units[variable] +' ' + str(timestep) + ' '+basin_id+' '+loc_name+'\n')
if min(dates).month >=10 and max(dates).month >= 10:
write_file.write(str(min(dates).month)+' '+str(min(dates).year)+' '+str(max(dates).month)+' '+str(max(dates).year)+' 1 F9.3'+'\n')
elif min(dates).month >=10:
write_file.write(str(min(dates).month)+' '+str(min(dates).year)+' '+str(max(dates).month)+' '+str(max(dates).year)+' 1 F9.3'+'\n')
elif max(dates).month >=10:
write_file.write(' ' + str(min(dates).month)+' '+str(min(dates).year)+' '+str(max(dates).month)+' '+str(max(dates).year)+' 1 F9.3'+'\n')
else:
write_file.write(' ' + str(min(dates).month)+' '+str(min(dates).year)+' '+str(max(dates).month)+' '+str(max(dates).year)+' 1 F9.3'+'\n')
month_prev = min(dates).month
hr_count = 0
for each in dates:
if each.month == month_prev:
hr_count += 1
else:
hr_count = 1
if int(each.month) < 10:
space1 = ' '
else:
space1 = ' '
if hr_count < 10:
space2= ' '
elif hr_count <100:
space2= ' '
else:
space2= ' '
### write data to datacard ###
write_file.write('{:12s}{:2d}{:02d}{:4d}{:9.3f}'.format(basin_id,int(each.month),int(str(each.year)[-2:]),hr_count,float(dates[each])))
write_file.write('\n')
#write_file.write(basin_id + space1 + str(each.month)+str(each.year)[2:] + space2 + str(hr_count) + "%10.2f" % float(dates[each]) + '\n')
month_prev = each.month
write_file.close()
read_file.close()
print 'Completed!'
|
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
import logging
import os.path
# Pylon imports.
from pylons import request, response
from pylons.controllers.util import abort
from paste import fileapp
# For serializing JSON data.
import json
# Controller to derive from.
from pycloud.pycloud.pylons.lib.base import BaseController
from pycloud.pycloud.pylons.lib.util import asjson
from pycloud.pycloud.utils import timelog
from pycloud.pycloud.model import App
log = logging.getLogger(__name__)
################################################################################################################
# Class that handles Service VM related HTTP requests to a Cloudlet.
################################################################################################################
class AppPushController(BaseController):
# Maps API URL words to actual functions in the controller.
API_ACTIONS_MAP = {'': {'action': 'getList', 'reply_type': 'json'},
'get': {'action': 'getApp', 'reply_type': 'binary'}}
################################################################################################################
# Called to get a list of apps available at the server.
################################################################################################################
@asjson
def GET_getList(self):
print '\n*************************************************************************************************'
timelog.TimeLog.reset()
timelog.TimeLog.stamp("Request for stored apps received.")
os_name = request.params.get('osName', None)
os_version = request.params.get('osVersion', None)
tags = request.params.get('tags', None)
print "OS Name: ", os_name
print "OS Version: ", os_version
print "Tags: ", tags
query = {}
if tags:
# TODO: modify this to search not only in tags, but in service id as well.
query['tags'] = {"$in": tags.split(',')}
apps = App.find(query)
# Send the response.
timelog.TimeLog.stamp("Sending response back to " + request.environ['REMOTE_ADDR'])
timelog.TimeLog.writeToFile()
return apps
################################################################################################################
# Called to get an app from the server.
################################################################################################################
def GET_getApp(self):
app_id = request.params.get('appId', None)
if not app_id:
# If we didnt get a valid one, just return an error message.
print "No app name to be retrieved was received."
abort(400, '400 Bad Request - must provide app_id')
print '\n*************************************************************************************************'
timelog.TimeLog.reset()
timelog.TimeLog.stamp("Request to push app received.")
app = App.by_id(app_id)
if not app:
print "No app found for specified id"
abort(404, '404 Not Found - app with id "%s" was not found' % app_id)
# Check that the APK file actually exists.
file_exists = os.path.isfile(app.apk_file)
if not file_exists:
print "No APK found for app with specified id. Path: " + app.apk_file
abort(404, '404 Not Found - APK for app with id "%s" was not found' % app_id)
# Log that we are responding.
timelog.TimeLog.stamp("Sending response back to " + request.environ['REMOTE_ADDR'])
timelog.TimeLog.writeToFile()
# Create a FileApp to return the APK to download. This app will do the actually return execution;
# this makes the current action a middleware for this app in WSGI definitions.
# The content_disposition header allows the file to be downloaded properly and with its actual name.
#fapp = fileapp.FileApp(app.apk_file, content_disposition='attachment; filename="' + app.file_name() + '"')
#reply = fapp(request.environ, self.start_response)
# Instead of returning the file by parts with FileApp, we will return it as a whole so that it can be encrypted.
response.headers['Content-disposition'] = 'attachment; filename="' + app.file_name() + '"'
response.headers['Content-type'] = 'application/vnd.android.package-archive'
with open(app.apk_file, 'rb') as f:
reply = f.read()
# Send the response, serving the apk file back to the client.
return reply
|
#00 01 02 03 04 05 06 07
#10 11 12 13 14 15 16 17
#20 21 22 23 24 25 26 27
def isValid(board,x,y):
if(x>-1 and x<5 and y>-1 and y<5 and board[x][y]==-1):
return True
else:
return False
def solve():
board= [[-1 for i in range(8)]for i in range(8)]
board[0][0] = 0
moveX = [1,2,-1,-2, 1, 2,-1,-2]
moveY = [2,1, 2, 1,-2,-1,-2,-1]
if(solveUtil(board,0,0,moveX,moveY,1)):
print(board)
else:
print(-1)
def solveUtil(board,currX,currY,moveX,moveY,pos):
if pos==64:
return True
for i in range(8):
newX = currX + moveX[i]
newY = currY + moveY[i]
if(isValid(board,newX,newY)):
board[newX][newY] = pos
if(solveUtil(board,newX,newY,moveX,moveY,pos+1)):
return True
else:
board[newX][newY] = -1
return False
solve()
|
def join_batman_network(network_name = 'squids_network',
ap_mac = '02:12:34:56:78:9A',
channel = '1'):
'''Create a BATMAN network using Raspbian.
ARGS:
@network_name -- The name of the network you would like to create
@ap_mac -- The MAC address to assign the Access Point
@channel -- The channel number to join (STRING or INT)
RETURN:
None
'''
import sys
import time
sys.path.append('..')
from batman_setup import load_batman
from run_command import run_command
load_batman.load_batman()
# Configure wlan0 to have a Maximum Transmission Unit to 1532 frames
# This is standard for BATMAN-Advanced. Most protocols only require
# 1500 frames, but BATMAN-Advanced uses the spare 32 frames to append
# its header.
run_command('sudo ip link set up dev eth0')
run_command('sudo ip link set mtu 1532 dev wlan0')
# Configure wlan0 with the specifications given.
run_command('sudo ifconfig wlan0 down && sudo iwconfig wlan0 mode ad-hoc ' +
'essid ' + network_name + ' ap ' + ap_mac + ' channel ' + str(channel))
# Add wlan0 to the list of BATMAN-Advanced available interfaces, then
# start wlan0 and the corresponding BATMAN-Advanced interface.
run_command('sudo batctl if add wlan0')
run_command('sudo ip link set up dev wlan0')
run_command('sudo ip link set up dev bat0')
run_command('sudo batctl gw_mode client')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-n', 'network', default = 'squids_network')
parser.add_argument('-a', 'ap_mac', default = '02:12:34:56:78:9A')
parser.add_argument('-c', 'channel', default = '1')
args = parser.parse_args()
join_batman_network(args.network, args.ap_mac, args.channel)
|
import cv2
import numpy as np
import face_recognition
# STEP 1 : Loading the images...
imgNM = face_recognition.load_image_file('ImagesBasic/NM.jpg')
imgNM = cv2.cvtColor(imgNM, cv2.COLOR_BGR2RGB)
imgTest = face_recognition.load_image_file('ImagesBasic/AB.jpg')
imgTest = cv2.cvtColor(imgTest, cv2.COLOR_BGR2RGB)
# STEP 2 : Detecting faces in the images...
# face locations finding process!
faceLoc = face_recognition.face_locations(imgNM)[0]
# Encoding the detected faces!
encodeNM = face_recognition.face_encodings(imgNM)[0]
# print(faceLoc)
# To encircle it with a rectangular box
cv2.rectangle(imgNM, (faceLoc[3], faceLoc[0]), (faceLoc[1], faceLoc[2]), (255, 0, 255), 2)
faceLoctest = face_recognition.face_locations(imgTest)[0]
encodeNMTest = face_recognition.face_encodings(imgTest)[0]
cv2.rectangle(imgTest, (faceLoctest[3], faceLoctest[0]), (faceLoctest[1], faceLoctest[2]), (255, 0, 255), 2)
results = face_recognition.compare_faces([encodeNM], encodeNMTest)
faceDis = face_recognition.face_distance([encodeNM],encodeNMTest)
print(results, faceDis)
cv2.putText(imgTest, f'{results}, {round(faceDis[0],2)}', (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
cv2.imshow('NAMO', imgNM)
cv2.imshow('NAMO TEST', imgTest)
cv2.waitKey(0)
|
import matplotlib.pyplot as plt
from DataSet.iris import learn_iris
from LogisticRegression import LogisticRegression
# 独自ロジスティック回帰のテスト
lr = LogisticRegression(n_iter=15,eta=0.01,random_state=1)
# 学習させる
learn_iris(classifier=lr, title='LogisticRegression')
# コスト量の遷移グラフ
plt.plot(range(1, len(lr.cost_)+1), lr.cost_, marker='o')
plt.xlabel('Epochs')
plt.ylabel("Average Cost")
plt.show()
|
import sys
sys.path.append('../500_common')
import lib_taisei
dirname = "kyoto01_kuronet"
lib_taisei.main(dirname)
|
def count_letters(word,letter):
"""
Count letter in a word
:param word: input word
:param letter: input letter
:return:
"""
count = 0
for i in word:
if i == letter:
count += 1
return (count)
print(count_letters("banana", "n"))
|
import time
import numpy as np
import pandas as pd
import csv
import itertools
from sklearn.svm import SVC
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.preprocessing import StandardScaler
from sklearn.metrics.classification import accuracy_score,precision_score
from sklearn.model_selection import KFold, GridSearchCV,cross_val_predict
import matplotlib.pyplot as plt
set_sizes = [100,500,1000,5000,10000,50000,100000,500000,1000000,5000000,10000000,50000000,100000000]
column_names = ["id","vendor_id","pickup_datetime","dropoff_datetime","passenger_count","pickup_longitude","pickup_latitude"
,"dropoff_longitude","dropoff_latitude","store_and_fwd_flag","trip_duration", "Short_or_long"]
"""Read in dataset"""
i=7
dataframe = pd.read_csv("train.csv",
sep=',',header=0,names=column_names,index_col=0,usecols=[0,1,2,3,4,5,6,7,8,10,11] ,nrows =set_sizes[i])
Y = dataframe["Short_or_long"]
X = dataframe[["vendor_id","passenger_count","pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]]
svc = SVC()
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.metrics.classification import accuracy_score,precision_score
predicted = cross_val_predict(svc, X, Y, cv=10)
print(accuracy_score(Y, predicted))
#clf = svm.SVC(kernel='linear', C=1)
#scores = cross_val_score(regr, X, Y, cv=5)
print("Mean squared error: %.2f"
% mean_squared_error(Y, predicted))
|
from queue import PriorityQueue
class Graph:
def __init__(self, vertices):
self.v = vertices
self.edges = [[-1 for i in range(vertices)]
for j in range(vertices)]
self.visited = []
def add_edge(self, u, v, weight):
self.edges[u][v] = weight
self.edges[v][u] = weight
def dijkstra(graph, first):
D = {v:float('inf') for v in range(graph.v)}
D[0]=0
D[first] = 0
pq = PriorityQueue()
pq.put((0, first))
while not pq.empty():
(dist, current_vertex) = pq.get()
graph.visited.append(current_vertex)
for near_node in range(graph.v):
if graph.edges[current_vertex][near_node] != -1:
distance = graph.edges[current_vertex][near_node]
if near_node not in graph.visited:
old_cost = D[near_node]
new_cost = D[current_vertex] + distance
if new_cost < old_cost:
pq.put((new_cost, near_node))
D[near_node] = new_cost
return D
g = Graph(7)
g.add_edge(1, 2, 2)
g.add_edge(1, 3, 4)
g.add_edge(2, 3, 1)
g.add_edge(2, 4, 7)
g.add_edge(3, 5, 3)
g.add_edge(5, 4, 2)
g.add_edge(6, 4, 1)
g.add_edge(6, 5, 5)
D = dijkstra(g, 1)
dist=len(D)
for node in range(1,dist):
print("Distance from Node 1 to node", node, "is", D[node])
|
# Check if a sudoku board is valid
testBoard = """
1 3 7 2 6 4 5 9 8
5 9 2 8 7 3 4 6 1
8 4 6 5 1 9 7 2 3
2 6 3 7 9 5 8 1 4
4 7 5 1 8 6 2 3 9
9 8 1 4 3 2 6 7 5
6 2 9 3 4 8 1 5 7
7 5 8 9 2 1 3 4 6
3 1 4 6 5 7 9 8 2
"""
def parse_board(tb):
tb = tb.rstrip("\n").lstrip("\n")
board = []
rows = tb.split("\n")
for each in rows:
row = []
el = each.split(" ")
for every in el:
if int(every) < 1 or int(every) > 9:
return False, None
row.append(int(every))
board.append(row)
return True, board
def check_duplicate_row(val, row):
duplicates = 0
for each in row:
if val == each:
duplicates += 1
return duplicates == 1
def check_duplicate_col(val, board, current_column):
duplicates = 0
idx = 0
while idx < 9:
if val == board[idx][current_column]:
duplicates += 1
idx += 1
return duplicates == 1
def check_duplicate_grid(val, board, cr, cc):
duplicates = 0
grid_row = int(cr / 3) * 3
grid_col = int(cc / 3) * 3
for i in range(grid_row, grid_row + 3):
for j in range(grid_col, grid_col + 3):
if board[i][j] == val:
duplicates += 1
return duplicates == 1
def main(b):
valid, board = parse_board(b)
if not valid:
return False
current_row = 0
for row in board:
current_column = 0
for val in row:
is_valid = check_duplicate_row(val, row) and check_duplicate_col(val, board, current_column) and check_duplicate_grid(val, board, current_row, current_column)
if not is_valid:
return False
current_column += 1
current_row += 1
return True
print(main(testBoard))
|
# stats
from statsd import StatsClient
# vibe
from vibepy.class_postgres import PostgresManager
from vibepy import read_config
# logging
import logging
import graypy
# local
from exchange_callback import ExchangeCallback
from rabbit_consumer import RabbitConsumerProc, RabbitConsumerThread
class AbstractBot(object):
db = None
statsd = None
def __init__(self, bot_id_setting, config, exchange_callbacks, callback_consumer_num=1, use_threading=False):
self.config = config
self.bot_id = 'bot-' + self.config.get('bot', bot_id_setting)
self.configure_logging()
self.log_config()
sects = self.config.sections()
if 'postgres' in sects:
self.db = self.__build_db_pool__(self.config)
if 'statsd' in sects:
self.statsd = self.__build_statsd_client__(self.config)
if self.statsd is None:
self.logger.error("Statsd config must be supplied! Section [statsd] missing from config")
exit()
if use_threading:
self.rabbit_klass = RabbitConsumerThread
else:
self.rabbit_klass = RabbitConsumerProc
exchange_callbacks = self.__configure_callbacks__(exchange_callbacks, callback_consumer_num)
self.consumers = self.__build_consumers__(exchange_callbacks)
def read_config(self, config_filename, config_filepath):
self.config = read_config.read_config(ini_filename=config_filename,
ini_path=config_filepath)
def stop(self):
self.logger.info("Stopping consumers")
self.__stop_consumers__()
exit()
def start(self):
self.logger.info("Starting consumers")
try:
self.__start_consumers__()
except (KeyboardInterrupt, SystemExit):
self.logger.info('Received keyboard interrupt, quitting')
self.stop()
except Exception as e:
# all other errors
self.logger.info("Unexpected error - {0}".format(e))
self.stop()
raise
def __build_db_pool__(self, config):
logging.info("Creating PostgresManager")
max_connections = 10
if config.has_option('logging', 'POSTGRES_MAX_CONNECTIONS'):
max_connections = int(config.get('postgres', 'POSTGRES_MAX_CONNECTIONS'))
return PostgresManager(config, 'postgres', max_connections=max_connections)
def __build_statsd_client__(self, config):
statsdhost = config.get('statsd', 'STATSD_HOST')
statsdport = config.get('statsd', 'STATSD_PORT')
statsd_namespace = config.get('statsd', 'STATSD_NAMESPACE')
statsd_namespace += "." + self.bot_id
self.logger.info("Creating StatsClient with prefix " + statsd_namespace)
return StatsClient(host=statsdhost, port=statsdport, prefix=statsd_namespace, maxudpsize=512)
def __build_consumers__(self, exchange_callbacks):
raise NotImplementedError
def __start_consumers__(self):
raise NotImplementedError
def __stop_consumers__(self):
raise NotImplementedError
def configure_logging(self):
log_format = '%(asctime)s [%(processName)-17.17s] [%(levelname)-5.5s] %(message)s'
if self.config.has_option('logging', 'LOG_FORMAT'):
log_format = self.config.get('logging', 'LOG_FORMAT', raw=True)
log_level = self.config.get('logging', 'LOG_LEVEL')
# Root logging configuration
log_formatter = logging.Formatter(log_format)
root_logger = logging.getLogger()
root_logger.setLevel(log_level)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
root_logger.addHandler(console_handler)
# Graylog configuration
if self.config.has_option('logging', 'GRAYLOG_SERVER'):
graylog_server = self.config.get('logging', 'GRAYLOG_SERVER')
graylog_port = int(self.config.get('logging', 'GRAYLOG_PORT'))
handler = graypy.GELFHandler(graylog_server, graylog_port, facility=self.bot_id)
root_logger.addHandler(handler)
self.logger = logging.getLogger(__name__ + "_" + self.bot_id)
def log_config(self):
config_str = ""
for sect in self.config.sections():
config_str += "[" + sect + "]\n"
for k, v in dict(self.config.items(sect, raw=True)).iteritems():
config_str += "{0} : {1}\n".format(k, v)
config_str += '\n'
self.logger.info("Config:\n" + config_str)
def __configure_callbacks__(self, exch_callbacks, num_callbacks):
if isinstance(exch_callbacks, dict):
new_exch_callbacks = []
for exch, callback in exch_callbacks.iteritems():
new_exch_callbacks.append(ExchangeCallback(exch, callback, num_callbacks))
return new_exch_callbacks
return exch_callbacks
|
__author__ = 'Justin'
from math import ceil
tuple = ['1',[[1,2,3,4],[1,1,1,1],[2,2,2,2]]]
paths = tuple[1]
finalpaths = []
for path in paths:
if(not(path in finalpaths)):
finalpaths.append(path)
paths = finalpaths
print(paths)
finalpaths = []
for path in paths:
for j in range(0,paths.index(path)):
samenodes = 0
for element in path:
if(element in paths[j]):
samenodes +=1
if(samenodes < ceil(len(path)/2)):
finalpaths.append(path)
break
print(finalpaths)
paths = finalpaths
# paths = finalpaths
# for path in paths:
# print('PATH',path)
# print('RANGE',range(0,paths.index(path)))
# for j in range(0,paths.index(path)):
# print('PATH compare',paths[j])
# samenodes = 0
# for element in path:
# if(element in paths[j]):
# samenodes +=1
# if(samenodes >= ceil(len(path)/2)):
# paths.remove(path)
# print('REMOVED',path)
# break
print('LENGTH AFTER',len(paths))
print('PATHS AFTER',paths)
|
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework import serializers
from rest_framework.response import Response
from rest_framework import status
from .services import *
from .permissions import *
from utils.serializer_validator import validate_serializer
from .models import Cpi
class AddCpiApi(APIView):
permission_classes = [ExpertPermission, ]
class RequestSerializer(serializers.Serializer):
CPI = serializers.FloatField(required=False)
food_service = serializers.FloatField(required=False)
eating_out = serializers.FloatField(required=False)
cereal = serializers.FloatField(required=False)
food = serializers.FloatField(required=False)
beverage_cigarette = serializers.FloatField(required=False)
garment = serializers.FloatField(required=False)
household_equipment = serializers.FloatField(required=False)
housing = serializers.FloatField(required=False)
medicine_medical_service = serializers.FloatField(required=False)
communication = serializers.FloatField(required=False)
telecommunication = serializers.FloatField(required=False)
education = serializers.FloatField(required=False)
culture_entertainment_travel = serializers.FloatField(required=False)
other_good_services = serializers.FloatField(required=False)
base_period = serializers.CharField(max_length=50, required=False)
year = serializers.IntegerField(required=True)
month = serializers.IntegerField(required=True)
class ResponseSerializer(serializers.ModelSerializer):
class Meta:
model = Cpi
fields = '__all__'
def post(self, request):
request_serializer = self.RequestSerializer(data=request.data)
validate_serializer(request_serializer)
self.check_permissions(request=request)
creator = request.user
organization = creator.client
cpi = add_cpi(data=request_serializer.validated_data, creator=creator, organization=organization)
response_serializer = self.ResponseSerializer(cpi)
return Response({
'cpi': response_serializer.data
}, status=status.HTTP_200_OK)
class UpdateCpiApi(APIView):
permission_classes = [OwnerPermission, ]
class RequestSerializer(serializers.Serializer):
CPI = serializers.FloatField(required=False)
food_service = serializers.FloatField(required=False)
eating_out = serializers.FloatField(required=False)
cereal = serializers.FloatField(required=False)
food = serializers.FloatField(required=False)
beverage_cigarette = serializers.FloatField(required=False)
garment = serializers.FloatField(required=False)
household_equipment = serializers.FloatField(required=False)
housing = serializers.FloatField(required=False)
medicine_medical_service = serializers.FloatField(required=False)
communication = serializers.FloatField(required=False)
telecommunication = serializers.FloatField(required=False)
education = serializers.FloatField(required=False)
culture_entertainment_travel = serializers.FloatField(required=False)
other_good_services = serializers.FloatField(required=False)
base_period = serializers.CharField(max_length=50, required=False)
class ResponseSerializer(serializers.ModelSerializer):
class Meta:
model = Cpi
fields = '__all__'
def put(self, request, cpi_id):
request_serializer = self.RequestSerializer(data=request.data)
validate_serializer(request_serializer)
cpi = get_cpi_by(raise_exception=False, id=cpi_id).first()
self.check_object_permissions(request=request, obj=cpi)
cpi = update_cpi(cpi=cpi, **request_serializer.validated_data)
response_serializer = self.ResponseSerializer(cpi)
return Response({
'cpi': response_serializer.data
}, statu=status.HTTP_200_OK)
class DeleteCpiApi(APIView):
permission_classes = [OwnerPermission, ]
class ResponseSerializer(serializers.ModelSerializer):
class Meta:
model = Cpi
fields = '__all__'
def delete(self, request, cpi_id):
cpi = get_cpi_by(raise_exception=False,id=cpi_id).first()
self.check_object_permissions(request=request, obj=cpi)
cpi = delete_cpi(cpi)
response_serializer = self.ResponseSerializer(cpi)
return Response({
'cpi': response_serializer.data
}, status=status.HTTP_200_OK)
class CpiListApi(APIView):
permission_classes = [IsAuthenticated, ]
class ResponseSerializer(serializers.ModelSerializer):
class Meta:
model = Cpi
fields = '__all__'
def get(self, request, start, end):
start_year = int(str(start)[0:4])
start_month = int(str(start)[4:])
end_year = int(str(end)[0:4])
end_month = int(str(end)[4:])
user = request.user
if start_year != end_year:
cpis = set(Cpi.objects.filter(organization=user.client, month__gte=start_month, year=start_year) | Cpi.objects.filter(organization=user.client, year__gt=start_year, year__lt=end_year) | Cpi.objects.filter(organization=user.client, month__lte=end_month, year=end_year))
else:
cpis = set(Cpi.objects.filter(organization=user.client, month__lte=end_month, month__gte=start_month, year=start_year))
response_serializer = self.ResponseSerializer(cpis, many=True)
return Response({
'cpis': response_serializer.data
}, status=status.HTTP_200_OK)
|
"""
Robotritons in-use module for gps communication. Based on tbe Emlid GPS.py example.
Purpose: Define classes to handle communications with the Ublox NEO-M8N Standard Precision GNSS Module and methods to handle data retrieval.
Requirements: The python modules copy, Queue, spidev, math, struct, navio.util, and one one Ublox NEO-M8N Standard Precision GNSS Module.
Use: First make an object of class U_blox(). Initialize communication by sending an I2C poll request "self.bus.xfer2(msg)" or using
the "enable_posllh(self)" method. Finally call GPSfetch() to probe the Ublox module for a message, then store its returned value for use.
The remaining methods control the actual handling of a message and ultimately customize the functionality of GPSfetch().
Updates:
- May 26, 2016. Added a debug object variable "self.debug" which, when True, makes GPSfetch() print strings instead of returning values.
Also defined new method "fetchSpecial" to test polling the GPS for more immediate message response. It is accesible through GPSfetch()'s optional argument.
- May 25, 2016. Modified GPSfetch() to print nothing and instead return a valued dictionary.
Resources:
https://www.u-blox.com/sites/default/files/NEO-M8N-FW3_DataSheet_%28UBX-15031086%29.pdf
https://www.u-blox.com/sites/default/files/products/documents/u-blox8-M8_ReceiverDescrProtSpec_%28UBX-13003221%29_Public.pdf
https://pythontips.com/2013/08/04/args-and-kwargs-in-python-explained/
http://www.binaryhexconverter.com/decimal-to-hex-converter
"""
import copy
import Queue
import spidev
import math
import struct
import navio.util
navio.util.check_apm()
waiting_header = 0
msg_class = 1
msg_id = 2
length = 3
payload = 4
checksum = 5
class U_blox_message:
def __init__(self, msg_class = 0, msg_id = 0, msg_length = 0, msg_payload = []):
self.msg_class = msg_class
self.msg_id = msg_id
self.msg_length = msg_length
self.msg_payload = msg_payload
def clear(self):
self.msg_class = 0
self.msg_id = 0
self.msg_length = 0
self.msg_payload = []
class U_blox:
def __init__(self):
self.mess_queue = Queue.Queue()
self.curr_mess = U_blox_message()
self.bus = spidev.SpiDev()
self.bus.open(0,0)
self.state=0
self.counter1=0
self.chk_a=0
self.chk_b=0
self.accepted_chk_a=0
self.accepted_chk_b=0
self.debug=False
def enable_posllh(self):
msg = [0xb5, 0x62, 0x06, 0x01, 0x03, 0x00, 0x01, 0x02, 0x01, 0x0e, 0x47]
self.bus.xfer2(msg)
def enable_posstatus(self):
msg = [0xb5, 0x62, 0x06, 0x01, 0x03, 0x00, 0x01, 0x03, 0x01, 0x0f, 0x49]
self.bus.xfer2(msg)
def scan_ubx(self, byte):
if(self.state == waiting_header):
self.result = [0,0,0,0,0,0,0,0,0]
self.accepted = 0
self.chk_a = 0
self.chk_b = 0
if((self.counter1 == 0) & (byte == 0xb5)):
self.counter1 += 1
elif((self.counter1 == 0) & (byte != 0xb5)):
self.state = waiting_header
self.counter1 = 0
elif((self.counter1 == 1) & (byte == 0x62)):
self.counter1 = 0
self.state = msg_class
elif((self.counter1 == 1) & (byte != 0x62)):
self.counter1 = 0
self.state = waiting_header
elif(self.state == msg_class):
self.chk_a = (self.chk_a + byte)%256
self.chk_b = (self.chk_b + self.chk_a)%256
self.curr_mess.msg_class = byte
self.state = msg_id
elif(self.state == msg_id):
self.chk_a = (self.chk_a + byte)%256
self.chk_b = (self.chk_b + self.chk_a)%256
self.curr_mess.msg_id = byte
self.state = length
elif(self.state == length):
if(self.counter1 == 0):
self.chk_a = (self.chk_a + byte)%256
self.chk_b = (self.chk_b + self.chk_a)%256
self.counter1 += 1
self.curr_mess.msg_length = byte
elif(self.counter1 == 1):
self.chk_a = (self.chk_a + byte)%256
self.chk_b = (self.chk_b + self.chk_a)%256
self.counter1 = 0
self.curr_mess.msg_length = self.curr_mess.msg_length + 256*byte
self.state = payload
elif(self.state == payload):
self.chk_a = (self.chk_a + byte)%256
self.chk_b = (self.chk_b + self.chk_a)%256
self.curr_mess.msg_payload.append(byte)
if(self.counter1 < self.curr_mess.msg_length - 1):
self.counter1 += 1
else:
self.counter1 = 0
self.state = checksum
elif(self.state == checksum):
if(self.counter1 == 0):
self.accepted_chk_a = byte
self.counter1 += 1
elif(self.counter1 == 1):
self.accepted_chk_b = byte
self.counter1 = 0
self.state = waiting_header
self.curr_mess.msg_length = 0
if((self.chk_a == self.accepted_chk_a) & (self.chk_b == self.accepted_chk_b)):
self.mess_queue.put(copy.deepcopy(self.curr_mess))
self.curr_mess.clear()
else:
print("Error! Checksum doesn't match")
def parse_ubx(self):
curr_values = [0,0,0,0,0,0,0]
curr_mess = self.mess_queue.get(False)
#If the buffer held a NAVposllh message
if((curr_mess.msg_class == 0x01) & (curr_mess.msg_id == 0x02)):
#print "NAVposllh message"
msg = NavPosllhMsg()
curr_values = struct.unpack("<IiiiiII", str(bytearray(curr_mess.msg_payload)))
msg.itow = curr_values[0]#Assign the current values into the msg object's parameters
msg.lon = curr_values[1]
msg.lat = curr_values[2]
msg.heightEll = curr_values[3]
msg.heightSea = curr_values[4]
msg.horAcc = curr_values[5]
msg.verAcc = curr_values[6]
if (self.debug == True): return msg
return msg.GPSPosition()
#If the buffer held a NAVstatus message
if((curr_mess.msg_class == 0x01) & (curr_mess.msg_id == 0x03)):
#print "NAVstatus message"
msg = NavStatusMsg()
msg.fixStatus = curr_mess.msg_payload[4]
msg.fixOk = curr_mess.msg_payload[5]
if (self.debug == True): return msg
return msg.GPSStatus()
'''
if((curr_mess.msg_class == 0x06) & (curr_mess.msg_id == 0x00)):
msg = "Found a CFG-PRT I/O message response"
return msg
if((curr_mess.msg_class == 0x06) & (curr_mess.msg_id == 0x01)):
msg = "Found a CFG-MSG poll response"
return msg
'''
return None
#A GPS single communication method
def GPSfetch(self,*args):
if (args):
return self.fetchSpecial()
buffer = self.bus.xfer2([100])
#print buffer
#yes there is stuff in the buffer, but self.scan_ubx(byt) is never returning a valid message after NavStatus sucesfully runs
#This problem only happens if you put in a time.sleep() while scanning, even a 0.1 second sleep screws it up.
for byt in buffer:
self.scan_ubx(byt)
if(self.mess_queue.empty() != True):
#print "message"
data = self.parse_ubx()
if (data != None):
if(self.debug == True):
print(data)
else:
return data
return None
def fetchSpecial(self):
"""
(PG 135)
The UBX protocol is designed so that messages can be polled by sending the message required to the receiver
but without a payload (or with just a single parameter that identifies the poll request). The receiver then
responds with the same message with the payload populated
"""
#msg = [0xb5, 0x62, 0x01, 0x03, 0x10,0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x6D] #Status poll?
#msg = [0xb5, 0x62, 0x01, 0x02, 0x1c,0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xA6] #Posllh poll?
msg = [0xb5, 0x62, 0x06, 0x01, 0x03, 0x00, 0x01, 0x02, 0x01, 0x0e, 0x47]
buffer = self.bus.xfer2(msg)
for byte in buffer:
self.scan_ubx(byte)
if(self.mess_queue.empty() != True):
data = self.parse_ubx()
if (data != None):
if (self.debug == True):
print(data)
else:
return data
return None
class NavStatusMsg:
def __init__(self):
self.fixOk = 0
self.fixStatus = 0
def __str__(self):
Status = "Reserved value. Current state unknown\n"
if (self.fixStatus == 0x00): Status = "no fix\n"
elif (self.fixStatus == 0x01): Status = "dead reckoning only\n"
elif (self.fixStatus == 0x02): Status = "2D-fix\n"
elif (self.fixStatus == 0x03): Status = "3D-fix\n"
elif (self.fixStatus == 0x04): Status = "GPS + dead reckoning combined\n"
elif (self.fixStatus == 0x05): Status = "Time only fix\n"
return 'Current GPS status:\ngpsFixOk: {}\ngps Fix status: {}'.format(self.fixOk & 0x01, Status)
def GPSStatus(self):
"""
0 = no fix
1 = dead reckoning only
2 = 2D-fix
3 = 3D-fix
4 = GPS + dead reckoning combined
5 = Time only fix
"""
status = {'fStatus':0,'fOk':0}
status['fStatus'] = self.fixStatus
status['fOk'] = self.fixOk
return status
class NavPosllhMsg:
def __init__(self):
self.itow=0
self.lon=0
self.lat=0
self.heightEll=0
self.heightSea=0
self.horAcc=0
self.verAcc=0
def __str__(self):
itow = "GPS Millisecond Time of Week: %d s" % (self.itow/1000)
lon = "Longitude: %.6f" % (self.lon/10000000.0)
lat = "Latitude: %.6f" % (self.lat/10000000.0)
heightEll = "Height above Ellipsoid: %.3f m" % (self.heightEll/1000.0)
heightSea = "Height above mean sea level: %.3f m" % (self.heightSea/1000.0)
horAcc = "Horizontal Accuracy Estateimate: %.3f m" % (self.horAcc/1000.0)
verAcc = "Vertical Accuracy Estateimate: %.3f m" % (self.verAcc/1000.0)
return '{}\n{}\n{}\n{}\n{}\n{}\n{}\n'.format(itow, lon, lat, heightEll, heightSea, horAcc, verAcc)
def GPSPosition(self):
"""Prepares and returns a dictionary holding gps position accuracy, lat, lon, and height"""
position = {'hAcc':0, 'lon':0, 'lat':0, 'hEll':0}
position['hAcc'] = self.horAcc/1000.0
position['lon'] = self.lon/10000000.0
position['lat'] = self.lat/10000000.0
position['hEll'] = self.heightEll/1000.0
return position
|
import os
import json
path = f"chest_xray"
path = "state-farm-distracted-driver-detection/train"
path="dogsvscats/train"
db = {"path":path, "lum":-1,"std":-1,"lummin":-1,"lummax":-1, "count":0, "data":[] }
id = 0
def scan(path):
global id
items = os.listdir(path)
for item in items:
if os.path.isdir(f"{path}/{item}"):
scan(f"{path}/{item}")
else:
row = {"id":id, "name":"", "path":"", "lum":-1,"std":-1,"lummin":-1,"lummax":-1}
row["name"] = item
row["path"] = path
db["data"].append(row)
id+=1
if __name__ == '__main__':
print(f"Scan {path}")
scan(path)
db["count"] = len(db["data"])
print(f"Create db.json with {db['count']} items")
with open(f"{path}/db.json", "w") as f:
f.write(json.dumps(db, indent=4))
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import logging
from typing import Iterable, Mapping, Sequence
from pants.base.build_environment import get_buildroot
from pants.base.deprecated import warn_or_error
from pants.option.arg_splitter import ArgSplitter
from pants.option.config import Config
from pants.option.errors import ConfigValidationError
from pants.option.option_util import is_list_option
from pants.option.option_value_container import OptionValueContainer, OptionValueContainerBuilder
from pants.option.parser import Parser
from pants.option.scope import GLOBAL_SCOPE, GLOBAL_SCOPE_CONFIG_SECTION, ScopeInfo
from pants.util.memo import memoized_method
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
from pants.util.strutil import softwrap
logger = logging.getLogger(__name__)
class Options:
"""The outward-facing API for interacting with options.
Supports option registration and fetching option values.
Examples:
The value in global scope of option '--foo-bar' (registered in global scope) will be selected
in the following order:
- The value of the --foo-bar flag in global scope.
- The value of the PANTS_GLOBAL_FOO_BAR environment variable.
- The value of the PANTS_FOO_BAR environment variable.
- The value of the foo_bar key in the [GLOBAL] section of pants.toml.
- The hard-coded value provided at registration time.
- None.
The value in scope 'compile.java' of option '--foo-bar' (registered in global scope) will be
selected in the following order:
- The value of the --foo-bar flag in scope 'compile.java'.
- The value of the --foo-bar flag in scope 'compile'.
- The value of the --foo-bar flag in global scope.
- The value of the PANTS_COMPILE_JAVA_FOO_BAR environment variable.
- The value of the PANTS_COMPILE_FOO_BAR environment variable.
- The value of the PANTS_GLOBAL_FOO_BAR environment variable.
- The value of the PANTS_FOO_BAR environment variable.
- The value of the foo_bar key in the [compile.java] section of pants.toml.
- The value of the foo_bar key in the [compile] section of pants.toml.
- The value of the foo_bar key in the [GLOBAL] section of pants.toml.
- The hard-coded value provided at registration time.
- None.
The value in scope 'compile.java' of option '--foo-bar' (registered in scope 'compile') will be
selected in the following order:
- The value of the --foo-bar flag in scope 'compile.java'.
- The value of the --foo-bar flag in scope 'compile'.
- The value of the PANTS_COMPILE_JAVA_FOO_BAR environment variable.
- The value of the PANTS_COMPILE_FOO_BAR environment variable.
- The value of the foo_bar key in the [compile.java] section of pants.toml.
- The value of the foo_bar key in the [compile] section of pants.toml.
- The value of the foo_bar key in the [GLOBAL] section of pants.toml
(because of automatic config file fallback to that section).
- The hard-coded value provided at registration time.
- None.
"""
class DuplicateScopeError(Exception):
"""More than one registration occurred for the same scope."""
class AmbiguousPassthroughError(Exception):
"""More than one goal was passed along with passthrough args."""
@classmethod
def complete_scopes(cls, scope_infos: Iterable[ScopeInfo]) -> FrozenOrderedSet[ScopeInfo]:
"""Expand a set of scopes to include scopes they deprecate.
Also validates that scopes do not collide.
"""
ret: OrderedSet[ScopeInfo] = OrderedSet()
original_scopes: dict[str, ScopeInfo] = {}
for si in sorted(scope_infos, key=lambda _si: _si.scope):
if si.scope in original_scopes:
raise cls.DuplicateScopeError(
softwrap(
f"""
Scope `{si.scope}` claimed by {si}, was also claimed
by {original_scopes[si.scope]}.
"""
)
)
original_scopes[si.scope] = si
ret.add(si)
if si.deprecated_scope:
ret.add(dataclasses.replace(si, scope=si.deprecated_scope))
original_scopes[si.deprecated_scope] = si
return FrozenOrderedSet(ret)
@classmethod
def create(
cls,
env: Mapping[str, str],
config: Config,
known_scope_infos: Iterable[ScopeInfo],
args: Sequence[str],
bootstrap_option_values: OptionValueContainer | None = None,
allow_unknown_options: bool = False,
) -> Options:
"""Create an Options instance.
:param env: a dict of environment variables.
:param config: data from a config file.
:param known_scope_infos: ScopeInfos for all scopes that may be encountered.
:param args: a list of cmd-line args; defaults to `sys.argv` if None is supplied.
:param bootstrap_option_values: An optional namespace containing the values of bootstrap
options. We can use these values when registering other options.
:param allow_unknown_options: Whether to ignore or error on unknown cmd-line flags.
"""
# We need parsers for all the intermediate scopes, so inherited option values
# can propagate through them.
complete_known_scope_infos = cls.complete_scopes(known_scope_infos)
splitter = ArgSplitter(complete_known_scope_infos, get_buildroot())
split_args = splitter.split_args(args)
if split_args.passthru and len(split_args.goals) > 1:
raise cls.AmbiguousPassthroughError(
softwrap(
f"""
Specifying multiple goals (in this case: {split_args.goals})
along with passthrough args (args after `--`) is ambiguous.
Try either specifying only a single goal, or passing the passthrough args
directly to the relevant consumer via its associated flags.
"""
)
)
if bootstrap_option_values:
spec_files = bootstrap_option_values.spec_files
if spec_files:
for spec_file in spec_files:
with open(spec_file) as f:
split_args.specs.extend(
[line for line in [line.strip() for line in f] if line]
)
parser_by_scope = {si.scope: Parser(env, config, si) for si in complete_known_scope_infos}
known_scope_to_info = {s.scope: s for s in complete_known_scope_infos}
return cls(
builtin_goal=split_args.builtin_goal,
goals=split_args.goals,
unknown_goals=split_args.unknown_goals,
scope_to_flags=split_args.scope_to_flags,
specs=split_args.specs,
passthru=split_args.passthru,
parser_by_scope=parser_by_scope,
bootstrap_option_values=bootstrap_option_values,
known_scope_to_info=known_scope_to_info,
allow_unknown_options=allow_unknown_options,
)
def __init__(
self,
builtin_goal: str | None,
goals: list[str],
unknown_goals: list[str],
scope_to_flags: dict[str, list[str]],
specs: list[str],
passthru: list[str],
parser_by_scope: dict[str, Parser],
bootstrap_option_values: OptionValueContainer | None,
known_scope_to_info: dict[str, ScopeInfo],
allow_unknown_options: bool = False,
) -> None:
"""The low-level constructor for an Options instance.
Dependents should use `Options.create` instead.
"""
self._builtin_goal = builtin_goal
self._goals = goals
self._unknown_goals = unknown_goals
self._scope_to_flags = scope_to_flags
self._specs = specs
self._passthru = passthru
self._parser_by_scope = parser_by_scope
self._bootstrap_option_values = bootstrap_option_values
self._known_scope_to_info = known_scope_to_info
self._allow_unknown_options = allow_unknown_options
@property
def specs(self) -> list[str]:
"""The specifications to operate on, e.g. the target addresses and the file names.
:API: public
"""
return self._specs
@property
def builtin_goal(self) -> str | None:
"""The requested builtin goal, if any.
:API: public
"""
return self._builtin_goal
@property
def goals(self) -> list[str]:
"""The requested goals, in the order specified on the cmd line.
:API: public
"""
return self._goals
@property
def unknown_goals(self) -> list[str]:
"""The requested goals without implementation, in the order specified on the cmd line.
:API: public
"""
return self._unknown_goals
@property
def known_scope_to_info(self) -> dict[str, ScopeInfo]:
return self._known_scope_to_info
@property
def known_scope_to_scoped_args(self) -> dict[str, frozenset[str]]:
return {scope: parser.known_scoped_args for scope, parser in self._parser_by_scope.items()}
@property
def scope_to_flags(self) -> dict[str, list[str]]:
return self._scope_to_flags
def verify_configs(self, global_config: Config) -> None:
"""Verify all loaded configs have correct scopes and options."""
section_to_valid_options = {}
for scope in self.known_scope_to_info:
section = GLOBAL_SCOPE_CONFIG_SECTION if scope == GLOBAL_SCOPE else scope
section_to_valid_options[section] = set(self.for_scope(scope, check_deprecations=False))
global_config.verify(section_to_valid_options)
def is_known_scope(self, scope: str) -> bool:
"""Whether the given scope is known by this instance.
:API: public
"""
return scope in self._known_scope_to_info
def register(self, scope: str, *args, **kwargs) -> None:
"""Register an option in the given scope."""
self.get_parser(scope).register(*args, **kwargs)
deprecated_scope = self.known_scope_to_info[scope].deprecated_scope
if deprecated_scope:
self.get_parser(deprecated_scope).register(*args, **kwargs)
def registration_function_for_subsystem(self, subsystem_cls):
"""Returns a function for registering options on the given scope."""
# TODO(benjy): Make this an instance of a class that implements __call__, so we can
# docstring it, and so it's less weird than attaching properties to a function.
def register(*args, **kwargs):
self.register(subsystem_cls.options_scope, *args, **kwargs)
# Clients can access the bootstrap option values as register.bootstrap.
register.bootstrap = self.bootstrap_option_values()
# Clients can access the scope as register.scope.
register.scope = subsystem_cls.options_scope
return register
def get_parser(self, scope: str) -> Parser:
"""Returns the parser for the given scope, so code can register on it directly."""
try:
return self._parser_by_scope[scope]
except KeyError:
raise ConfigValidationError(f"No such options scope: {scope}")
def _check_and_apply_deprecations(self, scope, values):
"""Checks whether a ScopeInfo has options specified in a deprecated scope.
There are two related cases here. Either:
1) The ScopeInfo has an associated deprecated_scope that was replaced with a non-deprecated
scope, meaning that the options temporarily live in two locations.
2) The entire ScopeInfo is deprecated (as in the case of deprecated SubsystemDependencies),
meaning that the options live in one location.
In the first case, this method has the sideeffect of merging options values from deprecated
scopes into the given values.
"""
si = self.known_scope_to_info[scope]
# If this Scope is itself deprecated, report that.
if si.removal_version:
explicit_keys = self.for_scope(scope, check_deprecations=False).get_explicit_keys()
if explicit_keys:
warn_or_error(
removal_version=si.removal_version,
entity=f"scope {scope}",
hint=si.removal_hint,
)
# Check if we're the new name of a deprecated scope, and clone values from that scope.
# Note that deprecated_scope and scope share the same Subsystem class, so deprecated_scope's
# Subsystem has a deprecated_options_scope equal to deprecated_scope. Therefore we must
# check that scope != deprecated_scope to prevent infinite recursion.
deprecated_scope = si.deprecated_scope
if deprecated_scope is not None and scope != deprecated_scope:
# Do the deprecation check only on keys that were explicitly set
# on the deprecated scope.
explicit_keys = self.for_scope(
deprecated_scope, check_deprecations=False
).get_explicit_keys()
if explicit_keys:
# Update our values with those of the deprecated scope.
# Note that a deprecated val will take precedence over a val of equal rank.
# This makes the code a bit neater.
values.update(self.for_scope(deprecated_scope))
warn_or_error(
removal_version=self.known_scope_to_info[
scope
].deprecated_scope_removal_version,
entity=f"scope {deprecated_scope}",
hint=f"Use scope {scope} instead (options: {', '.join(explicit_keys)})",
)
def _make_parse_args_request(
self, flags_in_scope, namespace: OptionValueContainerBuilder
) -> Parser.ParseArgsRequest:
return Parser.ParseArgsRequest(
flags_in_scope=flags_in_scope,
namespace=namespace,
passthrough_args=self._passthru,
allow_unknown_flags=self._allow_unknown_options,
)
# TODO: Eagerly precompute backing data for this?
@memoized_method
def for_scope(self, scope: str, check_deprecations: bool = True) -> OptionValueContainer:
"""Return the option values for the given scope.
Values are attributes of the returned object, e.g., options.foo.
Computed lazily per scope.
:API: public
"""
values_builder = OptionValueContainerBuilder()
flags_in_scope = self._scope_to_flags.get(scope, [])
parse_args_request = self._make_parse_args_request(flags_in_scope, values_builder)
values = self.get_parser(scope).parse_args(parse_args_request)
# Check for any deprecation conditions, which are evaluated using `self._flag_matchers`.
if check_deprecations:
values_builder = values.to_builder()
self._check_and_apply_deprecations(scope, values_builder)
values = values_builder.build()
return values
def get_fingerprintable_for_scope(
self,
scope: str,
daemon_only: bool = False,
):
"""Returns a list of fingerprintable (option type, option value) pairs for the given scope.
Options are fingerprintable by default, but may be registered with "fingerprint=False".
This method also searches enclosing options scopes of `bottom_scope` to determine the set of
fingerprintable pairs.
:param scope: The scope to gather fingerprintable options for.
:param daemon_only: If true, only look at daemon=True options.
"""
pairs = []
parser = self.get_parser(scope)
# Sort the arguments, so that the fingerprint is consistent.
for _, kwargs in sorted(parser.option_registrations_iter()):
if not kwargs.get("fingerprint", True):
continue
if daemon_only and not kwargs.get("daemon", False):
continue
val = self.for_scope(scope)[kwargs["dest"]]
# If we have a list then we delegate to the fingerprinting implementation of the members.
if is_list_option(kwargs):
val_type = kwargs.get("member_type", str)
else:
val_type = kwargs.get("type", str)
pairs.append((val_type, val))
return pairs
def __getitem__(self, scope: str) -> OptionValueContainer:
# TODO(John Sirois): Mainly supports use of dict<str, dict<str, str>> for mock options in tests,
# Consider killing if tests consolidate on using TestOptions instead of the raw dicts.
return self.for_scope(scope)
def bootstrap_option_values(self) -> OptionValueContainer | None:
"""Return the option values for bootstrap options.
General code can also access these values in the global scope. But option registration code
cannot, hence this special-casing of this small set of options.
"""
return self._bootstrap_option_values
def for_global_scope(self) -> OptionValueContainer:
"""Return the option values for the global scope.
:API: public
"""
return self.for_scope(GLOBAL_SCOPE)
|
import argparse
import os
import shutil
from zipfile import ZipFile
parser = argparse.ArgumentParser(description="zip files with the same prefix recursively")
parser.add_argument('to_exclude', type=str, help='The type to be excluded')
parser.add_argument('home', type=str, help='Home of folders')
parser.add_argument('dest', type=str, help='Destination of zips')
parser.add_argument('zipname', type=str, help='Name of the zips')
args = parser.parse_args()
def get_files(prefix:str, home:str, current:str, dest:str, action):
for e in os.listdir(current):
curr = os.path.join(home,current,e)
if os.path.isdir(curr):
destination = curr.replace(home,dest)
yield from get_files(prefix, home,curr, dest, action)
elif os.path.isfile(curr) and ((action == "exclude" and not(e.startswith(prefix))) or
((action =="include_only") and e.startswith(prefix))):
destination = curr.replace(home, dest)
yield destination
def get_wrapper(prefix, home, dest, action):
return get_files(prefix,home,home,dest,action)
def move_files(prefix, home, dest):
for m in get_wrapper(prefix,home, dest, "exclude"):
os.makedirs(os.path.dirname(m), exist_ok=True)
src = m.replace(dest, home)
shutil.copyfile(src,m)
def create_zips(prefix):
with ZipFile(prefix+args.zipname+'.zip', 'w') as zf:
for m in get_wrapper(prefix, args.home, args.dest, "include_only"):
zf.write(m)
if __name__ == "__main__":
move_files(args.to_exclude, args.home, args.dest)
for prefix in ["b_", "c_"]:
create_zips(prefix)
|
__author__ = 'martslaaf'
import numpy as np
from random import shuffle
from wavelets import Morlet
etta = 0.01
def from_signal_freq(signal, nyq_freq):
fourier = abs(np.fft.fft(signal))
positive = fourier[:int(fourier.shape[-1]/2)]
needed = sum(list(positive)) * 0.95
maximum = np.argmax(positive)
right_point = maximum
left_point = maximum
while needed > 0:
current = np.argmax(positive)
if current > right_point:
right_point = current
if current < left_point:
left_point = current
needed -= positive[current]
positive[current] = 0
step = nyq_freq / positive.shape[-1]
min_freq = left_point * step if left_point > 0 else 0.5 * step
max_freq = right_point * step
return min_freq, max_freq
def wavelon_class_constructor(motherfunction=None, period=None, frame=None, signal=None, fa=False):
Mtf = motherfunction if motherfunction else Morlet
if period:
freq_Nyquist = 1.0 / (2 * period)
if signal and fa:
min_freq, max_freq = from_signal_freq(signal, freq_Nyquist)
elif signal:
min_freq, max_freq = freq_Nyquist / len(signal), freq_Nyquist
else:
min_freq, max_freq = 0.2e-300, freq_Nyquist
else:
min_freq, max_freq = 0.2e-300, 1
min_dela, max_dela = Mtf.from_freq(max_freq), Mtf.from_freq(max_freq)
if frame:
min_trans, max_trans = frame
else:
min_trans, max_trans = -1, 1
class Wavelon():
def __init__(self, indim, outdim, hiddim):
self.indim = indim
self.outdim = outdim
self.hiddim = hiddim
self.inconnections = np.random.random_sample((indim, hiddim))
self.outconnections = np.random.random_sample((hiddim, outdim))
np.random.seed()
self.summer = np.random.random_sample((1, outdim))
self.translations = np.random.random_sample((1, hiddim)) * (max_trans - min_trans) + min_trans
self.dilations = np.random.random_sample((1, hiddim)) * (max_dela - min_dela) + min_dela
self._mother = Mtf
self.wavemodeon = True
self.old_data = {'summer': 0, 'inconnections': 0, 'outconnections': 0, 'translations': 0, 'dilations': 0}
def forward(self, input):
U = np.reshape(input, (1, self.indim))
a = np.dot(self._mother.function((np.dot(U, self.inconnections) - self.translations)/self.dilations), self.outconnections) + self.summer
return a
def backup(self, delta_Chi, delta_M, delta_Omega, delta_T=None, delta_Lambda=None):
def step(x, y, o):
return x + y * etta + etta * (x - o)
new = {}
new['summer'] = step(self.summer, delta_Chi, self.old_data['summer'])
if self.wavemodeon:
new['dilations'] = step(self.dilations, delta_Lambda, self.old_data['dilations'])
new['translations'] = step(self.translations, delta_T, self.old_data['translations'])
new['inconnections'] = step(self.inconnections, delta_Omega, self.old_data['inconnections'])
new['outconnections'] = step(self.outconnections, delta_M, self.old_data['outconnections'])
self.old_data['summer'] = self.summer
if self.wavemodeon:
self.old_data['dilations'] = self.dilations
self.old_data['translations'] = self.translations
self.old_data['inconnections'] = self.inconnections
self.old_data['outconnections'] = self.outconnections
self.summer = new['summer']
if self.wavemodeon:
self.dilations = new['dilations']
self.translations = new['translations']
self.inconnections = new['inconnections']
self.outconnections = new['outconnections']
def backward(self, error, input):
U = np.reshape(input, (1, self.indim))
Err = np.reshape(error, (1, self.outdim))
Z = self._mother.function((np.dot(U, self.inconnections) - self.translations)/self.dilations)
Zs = self._mother.derivative((np.dot(U, self.inconnections) - self.translations)/self.dilations)
delta_Chi = Err
delta_M = np.dot(Z.transpose(), Err)
# print U.shape
delta_Omega = np.dot(U.transpose(), (np.dot(self.outconnections, Err.transpose()).transpose()*(Zs/self.dilations)))
# print Z.shape, Zs.shape, delta_Chi.shape, delta_M.shape, delta_Omega.shape
if self.wavemodeon:
delta_T = np.dot(Err, self.outconnections.transpose())*(Zs/self.dilations)
delta_Lambda = Zs*((np.dot(U, self.inconnections) - self.translations)/(self.dilations*self.dilations))
self.backup(delta_Chi, delta_M, delta_Omega, delta_T, delta_Lambda)
else:
self.backup(delta_Chi, delta_M, delta_Omega)
return Wavelon
def trainer(epochs, training, validation, net):
track = []
for i in range(epochs):
localtrain = training[:]
shuffle(localtrain)
for element in localtrain:
net.backward(element[1] - net.forward(element[0]), element[0])
local_mse = 0.0
for element in validation:
local_mse += 0.5 * sum((element[1] - net.forward(element[0])) ** 2)
track.append(local_mse / (len(validation) * net.outdim))
return track
|
import os
from flask_wtf import Form
from wtforms import StringField, PasswordField, SelectField, BooleanField
from wtforms.validators import DataRequired, Length, StopValidation
class BaseForm(Form):
pass
def is_cf_enabled(form, field):
if not form.setup_cf.data:
# clear out processing errors
field.errors[:] = []
# Stop further validators running
raise StopValidation()
class CreateIdeForm(BaseForm):
display_name = StringField('Display name', validators=[DataRequired(), Length(min=1, max=64)])
timeout = SelectField(u'Timeout', choices=[('15m', '15 minutes'), ('1h', '1 hour'), ('2h', '2 hours'), ('3h', '3 hours')], default="15m")
git_clones = StringField('Git clones', validators=[DataRequired()], default='https://github.com/mcowger/hello-python.git')
setup_cf = BooleanField('CloudFoundry')
cf_api_endpoint = StringField('Api endpoint', validators=[is_cf_enabled, DataRequired()], default='https://api.cfapps.tailab.eu')
cf_username = StringField('Username', validators=[is_cf_enabled, DataRequired()], default='')
cf_password = PasswordField('Password', validators=[is_cf_enabled, DataRequired(), Length(min=1, max=64)], default='')
cf_org = StringField('Organization', validators=[is_cf_enabled, DataRequired()], default='ideaas-test-project')
cf_spc = StringField('Space', validators=[is_cf_enabled, DataRequired()], default='dev')
|
def merge_testcase_special(string):
string = string.replace(' ','')
if len(string) == 0:
return('')
a_raw = string.split(',')
tempt = ''
tempt_t = ''
for i in a_raw:
tempt = i.split('~')
print(tempt)
if tempt[0] == tempt[1]:
tempt_t = tempt_t + ',' +tempt[0]
else:
tempt_t = tempt_t + ',' +tempt[0] + '~' + tempt[1]
return(tempt_t[1:])
#-------------------------------------
my_array = merge_testcase_special('')
print(my_array)
|
from src.main.model.model import Config, Parameters
from src.main.dataset.dataset import Dataset
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
from src.main.model.classification.kernelsvm import SVM
# model configurations
config = Config(feature_num=2, batch_size=100, learning_rate=0.01, epoche=1000)
#data
(x_vals, y_vals) = datasets.make_circles(n_samples=500, factor=.5,noise=.1)
y_vals = np.array([1 if y==1 else -1 for y in y_vals])
class1_x = [x[0] for i,x in enumerate(x_vals) if y_vals[i]==1]
class1_y = [x[1] for i,x in enumerate(x_vals) if y_vals[i]==1]
class2_x = [x[0] for i,x in enumerate(x_vals) if y_vals[i]==-1]
class2_y = [x[1] for i,x in enumerate(x_vals) if y_vals[i]==-1]
dataset=Dataset(tf.convert_to_tensor(x_vals, dtype=tf.float32), tf.reshape(tf.convert_to_tensor(y_vals, dtype=tf.float32), [-1,1]), batch_size=100)
# model
sess = tf.Session()
model = SVM(dataset=dataset, config=config, parameters=Parameters())
sess.run(tf.global_variables_initializer())
model.training(session=sess)
print(sess.run([model.weights, model.bias]))
x_min, x_max = x_vals[:, 0].min() - 1, x_vals[:, 0].max() + 1
y_min, y_max = x_vals[:, 1].min() - 1, x_vals[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02),np.arange(y_min, y_max, 0.02))
grid_points = np.c_[xx.ravel(), yy.ravel()]
X=tf.convert_to_tensor(x_vals, dtype=tf.float32)
Y=tf.convert_to_tensor(y_vals, dtype=tf.float32)
kernel=model.kernel(X=X, Y=tf.convert_to_tensor(grid_points, dtype=tf.float32), gamma=-50.)
grid_predictions = sess.run(model.predictions(Y, kernel))
grid_predictions = grid_predictions.reshape(xx.shape)
plt.contourf(xx, yy, grid_predictions, cmap=plt.cm.Paired,
alpha=0.8)
plt.plot(class1_x, class1_y, 'ro', label='Class 1')
plt.plot(class2_x, class2_y, 'kx', label='Class -1')
plt.legend(loc='lower right')
plt.ylim([-1.5, 1.5])
plt.xlim([-1.5, 1.5])
plt.show()
|
"""
Author : Bastien RATAT
Analyzing CAC40 30 best stocks on Yahoo Finance
"""
import re
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import os
import datetime
cwd = os.getcwd()
# C:\Users\Bastien\Desktop\Python\data_scrapping
_, file_extension = os.path.splitext(
'C:/Users/Bastien/Desktop/Python/data_scrapping/2020-01-03.csv')
dataframes = {
"dates": [],
"files": [],
"df": {}
}
for file in os.listdir(cwd):
file_name, file_extension = os.path.splitext(f'C:/Users/Bastien/Desktop/Python/data_scrapping/{file}')
if file_extension == '.csv':
temp = re.findall(r'\d+\-\d+\-\d+', file_name)[0]
dataframes['dates'].append(temp)
dataframes['files'].append(f"{temp}.csv")
index_1 = 0
for file in dataframes['files']:
dataframes['df'][f'df{index_1}'] = pd.read_csv(file)
dataframes['df'][f'df{index_1}'] = dataframes['df'][f'df{index_1}'].set_index("symbol")
index_1 += 1
index_2 = 0
for date in dataframes["dates"]:
dataframes['df'][f'df{index_2}'] = dataframes['df'][f'df{index_2}'].rename(columns={"last prices":
f"last prices {date}"})
index_2 += 1
print(dataframes['df']['df0'])
print(dataframes['df']['df1'])
|
from SignalGenerationPackage.SignalData import SignalData
class EdgeSignalData(SignalData):
def __init__(self):
super().__init__()
self.StartTime = 0
self.AccelerationTime = 0
self.PlateauTime = 0
self.DecelerationTime = 0
self.EndTime = 0
self.WholePeriod = 0
self.LowLevelFrequency = 0
self.HighLevelFrequency = 0
# Необходимые времёна разгона для
# Частотного преобразователя
self.MinFrequency = 0
self.MaxFrequency = 50
self.NecessaryAccelerationTime = 0
self.NecessaryDecelerationTime = 0
# Критические Параметры разгона
self.CriticalAcceleration = (self.MaxFrequency - self.MinFrequency) / (15) # От 0 до 50 Hz за 15 сек
self.CriticalDeceleration = (self.MaxFrequency - self.MinFrequency) / (15) # От 50 до 0 Hz за 15 сек
# Ускорение / Замедление (Пересчитывается при редактировании сигнала)
self.AccelerationCoeff = 0
self.DecelerationCoeff = 0
# Пересчитывать на расход в визуализации или нет
# Коэффициенты линейного пересчёта
self.RecalcFlowrate = False
self.k_flowrate_coefficient = 1.0 # kx + b
self.b_flowrate_coefficient = 0.0 # kx + b
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 21 01:31:45 2017
@author: Rafal
"""
from collections import Counter
import pandas as pd
def extract_mdb_to_pd(collection):
# Get data from mongodb and convert into dataframe
col = collection.find({})
return pd.DataFrame(list(col))
def get_frequencies(df, count):
''' Uses Pandas function to group by the feature name, count the frequencies of each value within the feature and sort'''
features = ['Company', 'Title', 'City', 'State']
frequency_series = []
for feature in features:
frequency_series.append({feature: df.groupby(feature)['_id'].size().sort_values(ascending=False).head(count).to_dict()})
return frequency_series
def title_vocabulary(df, top_count):
# Titles Vocabulary contains all words found in titles
title_vocabulary = []
# Break apart words in titles
titles = df['Title'].str.split(' ')
# Push all words into a single array
for word_group in titles:
if word_group:
for word in word_group:
title_vocabulary.append(word.strip())
# Get Word frequency into a dictionary
title_word_count = dict(Counter(title_vocabulary))
# Convert into a dataframe, sort and extract top results
title_df = pd.DataFrame(index=list(title_word_count), data={'count': list(title_word_count.values())}, )
title_df = title_df[~(title_df.index.isin(['And', 'Usa', 'I', 'Li', '', ' ']))]
# Exclude searched words - may be used for alternative positions/keywords
# title_df = title_df[~(title_df.index.isin(['data', 'engineer']))]
top = title_df.sort_values('count', ascending=False).head(top_count)
print('Key Words: ' + str(top))
return title_df
def title(title_df):
'''
It determines seniority levels and frequency.
It returns back the title dictionary without the seniority levels and keywords.
'''
# Define title seniority levels and keywords for each
# The keywords should start with a capital letter as every word within the title was converted during collection
rank_keywords = {'intern': ['Intern', 'Internship'],
'junior': ['Jr', 'Junior', 'Entry', 'Associate', 'New'],
'senior': ['Sr', 'Senior', 'Senior '],
'lead': ['Lead', 'Chief', 'Principal', 'Head'],
'manager': ['Manager'],
'director': ['Director', 'Vice', 'President']}
# For each level, sum the number of matched keywords
intern = int(title_df[(title_df.index.isin(rank_keywords['intern']))].sum())
junior = int(title_df[(title_df.index.isin(rank_keywords['junior']))].sum())
senior = int(title_df[(title_df.index.isin(rank_keywords['senior']))].sum())
lead = int(title_df[(title_df.index.isin(rank_keywords['lead']))].sum())
manager = int(title_df[(title_df.index.isin(rank_keywords['manager']))].sum())
director = int(title_df[(title_df.index.isin(rank_keywords['director']))].sum())
# Convert the seniority into a dictionary for charting
seniority_dict = {'Intern': intern,
'Junior': junior,
'Senior': senior,
'Lead': lead,
'Manager': manager,
'Director': director}
print(seniority_dict)
# Remove ranks from top titles
title_df_no_rank = title_df[(~title_df.index.isin(rank_keywords['junior']))]
title_df_no_rank = title_df_no_rank[~(title_df_no_rank.index.isin(rank_keywords['senior']))]
title_df_no_rank = title_df_no_rank[~(title_df_no_rank.index.isin(rank_keywords['lead']))]
title_df_no_rank = title_df_no_rank[~(title_df_no_rank.index.isin(rank_keywords['manager']))]
title_df_no_rank = title_df_no_rank[~(title_df_no_rank.index.isin(rank_keywords['director']))]
# Remove insignificant words
#title_df_no_rank = title_df_no_rank[~(title_df_no_rank.index.isin(['And', 'Usa', ' ']))]
title_df_no_rank = title_df_no_rank.sort_values('count')
# @TODO the mongodb inser should be triggerted from a separate function
# Insert the summarized data into mongodb
#analyzed_collection = str(collection.name) + '_analyzed'
#analyzed_collection.insert_many(json.loads(title_df_no_rank.T.to_json()).values())
def position_type(title_df):
type_keywords = {'full': ['Full', 'Time', 'Permanent'],
'part': ['Part', 'Diem'],
'temp': ['Temp', 'Temporary'],
'cont': ['Contract', 'Cont', 'Contr']}
# For each level, sum the number of matched keywords
full = int(title_df[(title_df.index.isin(type_keywords['full']))].sum())
part = int(title_df[(title_df.index.isin(type_keywords['part']))].sum())
temp = int(title_df[(title_df.index.isin(type_keywords['temp']))].sum())
cont = int(title_df[(title_df.index.isin(type_keywords['cont']))].sum())
# Convert the seniority into a dictionary for charting
type_dict = {'Full': full,
'Part': part,
'Temp': temp,
'Contract': cont}
print(type_dict)
return type_dict
def salary(df, title_df):
# Get only Salary Columns
salary_df = df[['Salary Min', 'Salary Max']]
# Convert it to numeric
salary_df = salary_df[['Salary Min', 'Salary Max']].apply(pd.to_numeric)
# Drop any rows with salary of 0
salary_df = salary_df[salary_df['Salary Min'] > 0]
salary_df = salary_df[salary_df['Salary Max'] > 0]
# Create a new column - salary average
salary_df['avg'] = (salary_df['Salary Max'] + salary_df['Salary Min']) / 2
salary_df['Title'] = df['Title']
# linear regression on Salary range
#sns.regplot(x="Salary Max", y="Salary Min", data=salary_df)
# Get salary range for top 20 keywords
keywords = title_df.sort_values(by='count', ascending=False)
keywords = keywords[keywords.index != '-'].head(20).index
kw_worth = []
for kw in keywords:
kw_worth.append({kw: list(salary_df[salary_df['Title'].str.contains('Data')]['avg'].astype(float))})
kw_worth_df = pd.DataFrame(columns=kw_worth.keys(), data=kw_worth.values())
|
"""Test the analog.utils module."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import Counter
import os
import tempfile
import textwrap
from analog import utils
def test_analog_argument_parser():
"""Analog uses a custom argumentparser.
It accepts comma/whitespace separated arguments when reading from a file.
"""
parser = utils.AnalogArgumentParser(fromfile_prefix_chars='@')
parser.add_argument('-p', '--path', action='append')
parser.add_argument('-f', '--flag', action='store_true')
parser.add_argument('-s', '--status', action='append')
parser.add_argument('-v', '--verb', action='append')
parser.add_argument('file', action='store')
argfile = tempfile.NamedTemporaryFile(delete=False)
with open(argfile.name, 'w') as fp:
fp.write(textwrap.dedent('''\
-p=/foo/bar
--path /baz/bum
--flag
-s=200 400 500
--verb GET, POST, PUT
somefile.log
'''))
args = parser.parse_args(['@' + argfile.name])
assert args.path == ['/foo/bar', '/baz/bum']
assert args.flag is True
assert args.status == ['200', '400', '500']
assert args.verb == ['GET', 'POST', 'PUT']
assert args.file == 'somefile.log'
os.unlink(argfile.name)
def test_prefix_matching_counter():
"""PrefixMatchingCounter is a Counter that matches string prefixes."""
pmc = utils.PrefixMatchingCounter({'2': 0, '40': 0})
assert isinstance(pmc, Counter)
pmc.inc(200)
pmc.inc(206)
pmc.inc(200)
pmc.inc(404)
pmc.inc(409)
pmc.inc(400)
pmc.inc(302)
pmc.inc(419)
pmc.inc(499)
assert pmc['2'] == 3
assert pmc['40'] == 3
|
from django.urls import path
from .views import (
PostListView,
PostDetailView,
PostCreateView,
PostUpdateView,
PostDeleteView,
UserPostListView
)
from . import views
urlpatterns = [
path('', PostListView.as_view(), name='blog-home'),
path('user/<str:username>', UserPostListView.as_view(), name='user-posts'),
path('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'),
path('post/new/', PostCreateView.as_view(), name='post-create'),
path('post/<int:pk>/update/', PostUpdateView.as_view(), name='post-update'),
path('post/<int:pk>/delete/', PostDeleteView.as_view(), name='post-delete'),
path('about/', views.about, name='blog-about'),
path('quiz/', views.quiz, name='quiz'),
path('upload/', views.upload, name='upload'),
path('books/', views.book_list, name='book_list'),
path('books/upload', views.upload_book, name='upload_book'),
path('books/<int:pk>', views.delete_book, name='delete_book'),
path('index/', views.index, name='index'),
path('<int:question_id>/detail', views.detail, name='detail'),
path('<int:question_id>/results/', views.results, name='results'),
path('<int:question_id>/rper/', views.rper, name='rper'),
path('<int:question_id>/vote/', views.vote, name='vote'),
]
|
import re
def valid_email(email):
"""Checks that email includes '@' and '.'.
Args:
email: String of representation of an email address.
Returns:
The email address string if it matches ('True'),
or 'None' if no match.
"""
return re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)",
email)
def valid_password(password):
"""Checks that a password is at least 3 characters.
Args:
password: The string representing the password.
Returns:
The password string if it matches ('True'),
or 'None' if no match.
"""
return re.match(r"[a-zA-Z0-9]{3,}", password)
def check_registration(fields, user_exists):
"""Checks that all fields in registrtion form are valid.
Args:
fields: A dict containing fields from registration form.
user_exists: A boolean value. True if user exists,
false if user does not exist.
Returns:
A dict of any errors. If no errors, returns an empty dict (False).
"""
errors = {}
if user_exists:
errors['user_exists'] = True
if not valid_email(fields['email']):
errors['email'] = True
if not fields['name']:
errors['name'] = True
if not valid_password(fields['password']):
errors['password'] = True
if fields['password'] != fields['verify_password']:
errors['verify_password'] = True
return errors
def check_login(fields):
"""Checks that all fields in login form are valid.
Args:
fields: A dict containing fields from login form.
Returns:
A dict of any errors. If no errors, returns an empty dict (False).
"""
errors = {}
if not valid_email(fields['email']):
errors['email'] = True
if not fields['password']:
errors['password'] = True
return errors
def check_no_blanks(fields):
"""Checks that no fields are empty.
Args:
fields: A dict containing fields from a form.
Returns:
A dict of any errors. If no errors, returns an empty dict (False).
"""
errors = {}
for field in fields:
if fields[field] == '':
errors[field] = True
return errors
|
from PyQt4.QtGui import *
class MyDialog(QDialog):
def __init__(self):
QDialog.__init__(self)
label = QLabel()
# 레이블에 텍스트 쓰기
#label.setText("Normal")
label.setText("<a href='https://www.google.com'>www.google.com</a>")
# Layout
layout = QVBoxLayout()
layout.addWidget(label)
# Set layout on a MyDialog
self.setLayout(layout)
# app
app = QApplication([])
dialog = MyDialog()
dialog.show()
app.exec_()
|
import os
from dataclasses import dataclass
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from ray import tune
from ray.tune.schedulers import ASHAScheduler
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import src.data as dta
import src.training.training_helper as th
import src.utils as utils
from src.constants import Constants as c
from src.model import EcgNetwork, EcgAmigosHead
path_to_src_model: str = c.model_base_path
basepath_to_tuned_model: str = c.model_base_path + "tuned/"
@dataclass
class TuningParams:
batch_size:int = 32
num_workers:int = 1#2
epochs:int = 200
valid_size = 0.2
test_size = 0.1
good_params_for_single_run = {
"finetune": {
"batch_size": 16,
"adam": {"lr": 0.000128268},
"scheduler": {"decay": 0.9}
}
}
def rm_nan(labels, column):
col = labels[:, column]
col[col != col] = 0.0 # remove nans
return col
def binary_labels(labels):
# most old fashiond literature just does a high/low valance/arousal classification (so 4 classes)
# let's first also do this old fashioned task
labels[:, 0] = rm_nan(labels, 0)
labels[:, 1] = rm_nan(labels, 1)
labels = (labels > 5.0).type(torch.FloatTensor)
return labels
def train_finetune_tune_task(target_dataset: dta.DataSets, target_id, num_samples=10, max_num_epochs=200, gpus_per_trial=0.5):
config = {
"finetune": {
"batch_size": tune.choice([8, 16, 32, 64, 128]),
"adam": {"lr": tune.loguniform(5e-4, 1e-1)},
"scheduler": {
"type": tune.choice(['decay', 'cosine_w_restarts', 'none']),
"decay": tune.loguniform(0.99, 0.90),
"warmup": tune.randint(5, 15),
"cycles": tune.randint(1, 4)
}
}
}
scheduler = ASHAScheduler(
max_t=max_num_epochs,
grace_period=1,
reduction_factor=2)
result = tune.run(
tune.with_parameters(finetune_to_target_full_config, target_dataset=target_dataset, target_id=target_id),
resources_per_trial={"cpu": 3, "gpu": gpus_per_trial},
config=config,
metric="loss",
mode="min",
num_samples=num_samples,
scheduler=scheduler
)
utils.print_ray_overview(result, 'finetuning')
best_trial = result.get_best_trial("loss", "min", "last")
print("Best trial config: {}".format(best_trial.config))
print("Best trial final validation loss: {}".format(
best_trial.last_result["loss"]))
print("Best trial final validation accuracy: {}".format(
best_trial.last_result["accuracy"]))
# dataset = dta.ds_to_constructor[target_dataset](dta.DataConstants.basepath)
best_trained_model = EcgAmigosHead(2) # = EcgAmigosHead(dataset.target_size)
train_on_gpu = torch.cuda.is_available()
if train_on_gpu:
best_trained_model = best_trained_model.cuda()
if torch.cuda.device_count() > 1:
best_trained_model = nn.DataParallel(best_trained_model)
checkpoint_path = os.path.join(best_trial.checkpoint.value, "checkpoint")
device = 'cuda' if train_on_gpu else 'cpu'
model_state, optimizer_state = torch.load(checkpoint_path, map_location=device)
best_trained_model = utils.save_load_state_dict(best_trained_model, model_state)
print('------------------------------------------------------------------------------')
print(' Saving best model from hyperparam search ')
print(' for later use ')
print('------------------------------------------------------------------------------')
torch.save(best_trained_model.state_dict(), f'{basepath_to_tuned_model}tuned_for_{target_id}.pt')
def finetune_to_target_full_config(hyperparams_config, checkpoint_dir=None, target_dataset: dta.DataSets=[], target_id=None, use_tune=True):
default_params = TuningParams()
default_params.batch_size = hyperparams_config['finetune']['batch_size']
train_on_gpu = torch.cuda.is_available()
dataset = dta.ds_to_constructor[target_dataset](dta.DataConstants.basepath)
does_not_matter = len(dta.AugmentationsPretextDataset.STD_AUG) + 1
ecg_net = EcgNetwork(does_not_matter, dataset.target_size)
model = EcgAmigosHead(2)
model.debug_values = False
embedder = ecg_net.cnn
device = 'cuda' if train_on_gpu else 'cpu'
state_dict = torch.load(f'{path_to_src_model}model_embedding.pt', map_location=torch.device(device))
embedder.load_state_dict(state_dict)
# for p in embedder.parameters():
# p.requires_grad = False
def check_zero_grad(gradient):
data = gradient.data.clone().detach()
debug_is_zero = torch.sum(data) == 0
if debug_is_zero:
print(gradient)
embedder.conv_1.weight.register_hook(check_zero_grad)
dataset = dta.EmbeddingsDataset(embedder, dataset, False, dta.EmbeddingsDataset.path_to_cache, target_id, train_on_gpu) # set should cache to false so we see an effect on finetuning
lr = hyperparams_config['finetune']['adam']['lr']
optimizer = torch.optim.Adam([
{'params': model.parameters()},
{'params': embedder.parameters(), 'lr': lr/10}
], lr)
scheduler_info = hyperparams_config['finetune']['scheduler']
if scheduler_info['type'] == 'none':
schedulder = None
elif scheduler_info['type'] == 'decay':
schedulder = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer,
gamma=scheduler_info['decay'])
elif scheduler_info['type'] == 'cosine_w_restarts':
warmup = scheduler_info['warmup']
training = default_params.epochs - warmup
cycles = scheduler_info['cycles']
schedulder = utils.get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, warmup, training, cycles)
criterion = nn.BCEWithLogitsLoss()#nn.CrossEntropyLoss()
# The `checkpoint_dir` parameter gets passed by Ray Tune when a checkpoint
# should be restored.
if checkpoint_dir:
checkpoint = os.path.join(checkpoint_dir, "checkpoint")
device = 'cuda' if train_on_gpu else 'cpu'
model_state, optimizer_state = torch.load(checkpoint, map_location=device)
model = utils.save_load_state_dict(model, model_state)
optimizer.load_state_dict(optimizer_state)
train_on_gpu = torch.cuda.is_available()
if train_on_gpu:
model = model.cuda()
criterion = criterion.cuda()
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
finetune(model, optimizer, schedulder, criterion, dataset, train_on_gpu, default_params, target_id, use_tune)
def finetune(model, optimizer, schedulder, criterion, dataset, train_on_gpu: bool, p: TuningParams, target_id, use_tune: bool):
num_train = len(dataset)
indices = list(range(num_train))
train_idx, valid_idx, test_idx = utils.random_splits(indices, p.test_size, p.valid_size)
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
test_sampeler = SubsetRandomSampler(test_idx)
# prepare data loaders (combine dataset and sampler)
train_loader = DataLoader(dataset, batch_size=p.batch_size,
sampler=train_sampler, num_workers=0)
valid_loader = DataLoader(dataset, batch_size=p.batch_size,
sampler=valid_sampler, num_workers=0)
test_loader = DataLoader(dataset, batch_size=p.batch_size,
sampler=test_sampeler, num_workers=0)
print(model)
def compute_loss_and_accuracy(data, labels):
# latent = cnn(data).squeeze()
# y_prime = head(latent)
y_prime = model(data).squeeze()
y = binary_labels(labels)
if train_on_gpu:
y = y.cuda()
loss = criterion(y_prime, y)
# print('loss', loss)
y_sigm = torch.sigmoid(y_prime).detach()
predicted = (y_sigm > 0.5).type(torch.FloatTensor) # torch.argmax(l_prime, dim=1)
if train_on_gpu:
predicted = predicted.cuda()
same = predicted == y
same_sum = torch.sum(same).type(torch.float)
accuracy = same_sum / torch.numel(same)
return loss, accuracy
def save_model():
torch.save(model.state_dict(), f'{basepath_to_tuned_model}tuned_for_{target_id}.pt')
use_scaler = True
th.std_train_loop(p.epochs, p.batch_size, train_loader, valid_loader, model, optimizer, schedulder, use_scaler, compute_loss_and_accuracy, save_model, train_on_gpu, use_tune)
|
# Generated by Django 3.0.7 on 2020-08-20 22:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('qualification', '0004_maindocument_documents'),
]
operations = [
migrations.AddField(
model_name='staffdocument',
name='status',
field=models.CharField(choices=[('Uploaded', 'Uploaded'), ('Submitted', 'Submitted'), ('Accepted', 'Accepted'), ('Rejected', 'Rejected')], default='Uploaded', max_length=10, verbose_name='Status'),
),
]
|
import copy
import json
from typing import Dict, List, Type
from django.db import connection
from duckql import Query, Count, Constant, Operator
from . import Schema
class QueryWrapper:
def __init__(self, query: str, base_model: Type, user=None):
Query.update_forward_refs()
Operator.update_forward_refs()
self._query = Query.parse_raw(query)
self._schema = Schema(base_model, user)
@classmethod
def from_dict(cls, payload: Dict, base_model: Type, user=None):
# So dirty, so nasty, so sad
return QueryWrapper(json.dumps(payload), base_model, user)
@staticmethod
def _execute_query(query: Query) -> List[Dict]:
with connection.cursor() as cursor:
cursor.execute(str(query))
columns = [col[0] for col in cursor.description]
return [dict(zip(columns, row)) for row in cursor.fetchall()]
def execute(self) -> List[Dict]:
return self._execute_query(self._query)
def estimated_total(self) -> int:
subquery = copy.deepcopy(self._query)
subquery.limit = None
subquery.alias = '__s'
query = Query(
entity=subquery,
properties=[
Count(
property=Constant(value='*')
),
],
)
return self._execute_query(query)[0].get('count')
__all__ = [
'QueryWrapper'
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-25 20:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('API', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Autor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Dealer_Catalogo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Genero',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tipo', models.CharField(max_length=50, unique=True)),
],
),
migrations.CreateModel(
name='Pedido_Lector',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Pedido_Libro',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.RemoveField(
model_name='comision',
name='pedido',
),
migrations.RenameField(
model_name='direccion',
old_name='nombre',
new_name='calle',
),
migrations.RemoveField(
model_name='libro',
name='pedido',
),
migrations.RemoveField(
model_name='pedido',
name='lector',
),
migrations.AddField(
model_name='ciudad',
name='region',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Region'),
),
migrations.AddField(
model_name='dealer',
name='direccion',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Direccion'),
),
migrations.AddField(
model_name='direccion',
name='ciudad',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Ciudad'),
),
migrations.AddField(
model_name='direccion',
name='departamento',
field=models.CharField(blank=True, max_length=10, null=True),
),
migrations.AddField(
model_name='direccion',
name='numero',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='editorial',
name='direccion',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Direccion'),
),
migrations.AddField(
model_name='lector',
name='direccion',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Direccion'),
),
migrations.AlterField(
model_name='dealer',
name='correo',
field=models.EmailField(default=b'', max_length=254),
),
migrations.AlterField(
model_name='editorial',
name='correo',
field=models.EmailField(default=b'', max_length=254),
),
migrations.AlterField(
model_name='lector',
name='correo',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AlterField(
model_name='libro',
name='autor',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Autor'),
),
migrations.AlterField(
model_name='libro',
name='descripcion',
field=models.TextField(default=b''),
),
migrations.AlterField(
model_name='libro',
name='editorial',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Editorial'),
),
migrations.AlterField(
model_name='libro',
name='genero',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Genero'),
),
migrations.AlterField(
model_name='pedido',
name='dealer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Dealer'),
),
migrations.AlterField(
model_name='pedido',
name='total',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='rating',
name='dealer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Dealer'),
),
migrations.AlterField(
model_name='rating',
name='lector',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Lector'),
),
migrations.DeleteModel(
name='Comision',
),
migrations.AddField(
model_name='pedido_libro',
name='libro',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Libro'),
),
migrations.AddField(
model_name='pedido_libro',
name='pedido',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Pedido'),
),
migrations.AddField(
model_name='pedido_lector',
name='lector',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Lector'),
),
migrations.AddField(
model_name='pedido_lector',
name='pedido',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Pedido'),
),
migrations.AddField(
model_name='dealer_catalogo',
name='dealer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Dealer'),
),
migrations.AddField(
model_name='dealer_catalogo',
name='libro',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='API.Libro'),
),
]
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
from time import sleep
from redis import Redis
def range_generator(step=0.1):
""" Simulates range readings from 4 sensors
"""
p = 0.
phase = np.array([0, 0.5, 1, 1.5]) * np.pi
center = 300
k = 200
while True:
radii = center + k * np.cos(p+phase)
yield radii
phase += step
if __name__ == '__main__':
red = Redis('127.0.0.1', 6379)
g = range_generator()
for r in g:
red.set('loc.post', list(r))
sleep(1)
|
from django.urls import path, include, re_path
from shopify import views
urlpatterns = [
re_path(r'order', views.WebHook.as_view()),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-04-24 21:52:26
# @Author : Fallen (xdd043@qq.com)
# @Link : https://github.com/fallencrasher/python-learning
# @Version : $Id$
# 用户输入用户名密码性别
# 实例化对象
# 用户任意输入内容 : 不能用异常处理
# 如果输入的是属性名 打印属性值
# 如果输入的是方法名 调用fangfa
# 如果输入的什么都不是 不做操作
class User:
def __init__(self,name,age,sex):
self.name = name
self.age = age
self.sex = sex
def eat(self):
print('eating')
def sleep(self):
print('sleeping')
def run():
username = input('username:')
age = input('age:')
sex = input('sex:')
temp = User(username,age,sex)
while True:
judge = input('operation(attribute or method of User), input "q" or "quit" to quit.\n >>>').strip()
if judge.lower()=='q' or judge.lower()=='quit':
break
else:
if hasattr(temp,judge):
if callable(getattr(temp,judge)):
getattr(temp,judge)()
else:
print(getattr(temp,judge))
else:
continue
if __name__ == '__main__':
run()
|
from django.db import models
from tracking.settings import DEFAULT_IDENTIFIER_SHOP
# Create your models here.
class LinkShop(models.Model):
key = models.CharField(max_length = 15, verbose_name = 'code', primary_key = True, default = DEFAULT_IDENTIFIER_SHOP)
link = models.CharField(max_length = 300, verbose_name = 'Enlace', blank = True, null = True)
class Meta:
verbose_name = 'Enlace de tienda'
|
from selenium import webdriver
driver = webdriver.Chrome()
driver.get('https://web.whatsapp.com')
name = input('Enter name of user / group : ')
msg = input('Enter message : ')
count = int(input('Enter count : '))
msgList = msg.split()
input('Enter anything after scanning QR code')
user = driver.find_element_by_xpath('//span[@title = "{}"]'.format(name))
user.click()
msgBox = driver.find_element_by_class_name('_3uMse')
for i in range(count):
for j in range(len(msgList)):
msgBox.send_keys(msgList[j])
button = driver.find_element_by_class_name('_1U1xa')
button.click()
|
__author__ = 'nulysse'
import csv
import simplekml
import os
import ConfigParser
import collections
import sys
from geopy.distance import great_circle
line = collections.namedtuple('line', 'name lat_col_index lon_col_index color mark_time timestep')
_CSV_Path = r'C:\Ascent\Development\A350-FFS\DEVELOPMENT\Tools\CSVToKML\Dumps\TestStation2\A350-FFS_Tue_May_24_07-09-52_2016.bus.csv'
_color_map = {'red': simplekml.Color.red,
'blue': simplekml.Color.blue,
'green': simplekml.Color.green,
'yellow': simplekml.Color.yellow}
time_index = 0
start_time = 5
end_time = 15
ref_speed_in_kts = 250
linelist = list()
def loadConfig(configFilePath, CSVfirstRow):
"""
Load the INI Configuration file data
From the config file, load:
- Start and End time
- name of the time column
- for each trajectory, the name of the column, the sampling time and color
:param configFilePath: path to the INI configuration file
:param CSVfirstRow: first row in the CSV file
"""
global start_time,end_time,time_index,ref_speed_in_kts
config = ConfigParser.RawConfigParser()
config.read(configFilePath)
start_time = config.getfloat('GENERAL', 'STARTTIME')
end_time = config.getfloat('GENERAL', 'ENDTIME')
timeColumnName = config.get('GENERAL', 'TIMECOLNAME')
time_index = CSVfirstRow.index(timeColumnName)
for section in config.sections():
if 'traj' in section:
latcolname = config.get(section, 'LATCOLNAME')
loncolname = config.get(section, 'LONCOLNAME')
try:
latcolindex = CSVfirstRow.index(latcolname)
except ValueError:
sys.stderr.write('could not find {!s} in the CSV file'.format(latcolname))
sys.exit(-1)
try:
loncolindex = CSVfirstRow.index(loncolname)
except ValueError:
sys.stderr.write('could not find {!s} in the CSV file'.format(loncolname))
sys.exit(-1)
try:
ref_speed_in_kts=config.getfloat(section, "REFSPEEDKTS")
except:
ref_speed_in_kts = 250
try:
marktime=config.getboolean(section, "MARKTIME")
except:
marktime = False
try:
timestep=config.getfloat(section, "TIMESTEP")
except:
timestep = 0.01
try:
color= _color_map[config.get(section, "COLOR")]
except:
color = simplekml.Color.red
linelist.append(line(name=section,
lat_col_index=latcolindex,
lon_col_index=loncolindex,
color=color,
mark_time=marktime,
timestep=timestep))
def isValidLine(row, traj_data):
"""
:param row:
:return:
"""
return row[traj_data.lat_col_index]!=' ' and row[traj_data.lon_col_index]!=' '
def isInTimeWindow(row):
return float(row[time_index])>=start_time and float(row[time_index])<=end_time
def get_last_speed(current_time, id):
return great_circle(line_coord[id][-1], line_coord[id][-2]).nm / (current_time - line_times[id]) * 3600
if __name__ == '__main__':
KMLDoc = simplekml.Kml()
timepnt = KMLDoc.newmultigeometry(name="Times")
with open(_CSV_Path, 'rb') as csvfile:
positionreader = csv.reader(csvfile)
firstline = True
linecount = 0
last_time = -10.0
for row in positionreader:
if firstline:
loadConfig(os.path.join(os.path.dirname(_CSV_Path), "config.ini"), row)
line_coord = list()
line_times = list()
for id in range(len(linelist)):
line_coord.append(list())
line_times.append(-10.0)
else:
for id in range(len(linelist)):
if isValidLine(row, linelist[id]) and isInTimeWindow(row) and (float(row[time_index]) - line_times[id])>= linelist[id].timestep:
line_coord[id].append((row[linelist[id].lon_col_index], row[linelist[id].lat_col_index]))
if linelist[id].mark_time:
timepnt = KMLDoc.newpoint(name="Time is {!s}".format(row[time_index]),
description="Time is {!s}".format(row[time_index]),
coords=[(row[linelist[id].lon_col_index], row[linelist[id].lat_col_index])])
if len(line_coord[id])>2:
if get_last_speed(float(row[time_index]), id) > (ref_speed_in_kts + (ref_speed_in_kts*5.0)):
print 'potential jump for trajectory {!s} at time {!s}'.format(linelist[id].name, row[time_index])
line_times[id] = float(row[time_index])
firstline = False
linecount+=1
trajectory = list()
for id in range(len(linelist)):
trajectory.append(KMLDoc.newmultigeometry(name=linelist[id].name))
trajectory[id]. newlinestring(name='test',description=row[time_index],coords=line_coord[id])
trajectory[id].style.linestyle.color = linelist[id].color # Red
trajectory[id].style.linestyle.width = 10 # 10 pixels
trajectory[id].style.iconstyle.scale = 3 # Icon thrice as big
trajectory[id].style.iconstyle.icon.href = 'http://maps.google.com/mapfiles/kml/shapes/info-i.png'
KMLDoc.save(os.path.join(os.path.dirname(_CSV_Path), "lines"+".kml"))
|
# I pledge my Honor that I have abided by the Stevens Honor System
from math import sqrt
global response_3
def intro():
try:
print("\nThis program will allow you to perform either mathematical or string operations")
print("For Mathematical Functions, Please Enter the Number 1")
print("For String Operations, Please Enter the Number 2\n")
response_1 = int(input("Please select the operation you would like to perform:\n"))
if response_1 == 1:
math_intro()
elif response_1 == 2:
string_intro()
else:
print("Your response was invalid, please select an option 1 or 2")
intro()
print("\nIf you would like to go again input 1")
print("If you are finished input 2")
response_3 = int(input("Would you like to go again??\n"))
if response_3 == 1:
intro()
elif response_3 == 2:
exit()
else:
print("Your input wasn't valid, so we will have you go again")
intro()
except ValueError:
print("\nYour input was invalid, please use options 1 or 2")
intro()
def math_intro():
try:
print("\nFor Addition, Please Enter the Number 1")
print("For Subtraction, Please Enter the Number 2")
print("For Multiplication, Please Enter the Number 3")
print("For Division, Please Enter the Number 4")
response_2 = int(input(""))
if response_2 == 1:
x, y = numbers()
z = x + y
print("The sum of", x, "and", y, "is", z)
elif response_2 == 2:
x, y = numbers()
z = x - y
print("The difference of", x, "and", y, "is", z)
elif response_2 == 3:
x, y = numbers()
z = x * y
print("The product of", x, "and", y, "is", z)
elif response_2 == 4:
x, y = numbers()
if y != 0:
z = x / y
print("The quotient of", x, "and", y, "is", z)
elif y == 0:
print("You cannot divide by 0, please reselect your desired math operation and your numbers again")
math_intro()
else:
print("Your response was proper type, but did not select a operation, please selection an option 1 - 4")
math_intro()
except ValueError:
print("Please input a valid response")
math_intro()
def numbers():
try:
x = float(input("\nPlease input your first number: "))
y = float(input("Please input your second number: "))
print("")
return x, y
except ValueError:
print("The inputs you provided are invalid for this operation, please provide proper inputs")
numbers()
def string_intro():
try:
print("\nTo Determine the Number of Vowels in a String; Enter the Number 1")
print("To Encrypt a String; Enter the Number 2")
response_2 = int(input(""))
if response_2 == 1:
vowel_count()
elif response_2 == 2:
encryption()
else:
print("Please input a valid response")
string_intro()
except ValueError:
print("Please input a valid response")
string_intro()
def vowel_count():
string = str(input("\nPlease input your string below: \n"))
vowel_counter = 0
y_counter = 0
vowel = ["a", "e", "i", "o", "u", "A", "E", "I", "O", "U"]
for i in string:
if i in vowel:
vowel_counter += 1
elif i == "y":
y_counter += 1
elif i == "Y":
y_counter += 1
print("\nThe string has", vowel_counter, "vowels in it.")
if y_counter != 0:
print("Sometimes y is considered a vowel, and your string has", y_counter, "y's in it.")
def encryption():
string = str(input("\nPlease input your string below: \n"))
encryptiod = string.split(" ")
encryptiod = encryptiod[-1]
encryptiod_num = []
string_num = []
for i in encryptiod:
encryptiod_num.append(ord(i))
encryptiod_num = (sum(encryptiod_num) / len(encryptiod_num))
encryptiod_num = round(sqrt(encryptiod_num))
for i in string:
string_num.append(ord(i))
for i in range(len(string_num)):
string_num[i] = string_num[i] + encryptiod_num
string_num[i] = chr(string_num[i])
print("\nYour encrypted message is:")
print(''.join(['%-2s' % (i,) for i in string_num]))
intro()
|
from flask import Flask, render_template, redirect, url_for, flash, abort, request, send_from_directory
from flask_bootstrap import Bootstrap
import os
from datetime import datetime
from os import listdir
from os.path import isfile, join
import xlsxwriter
app = Flask(__name__)
def create_directories():
"""Adds a directory for the day the program is being used to store all of the combined schedules
hat are created"""
current_directory = os.getcwd()
combined_schedules_dir = os.path.join(current_directory, "static", "Files")
today_date = datetime.now().strftime("%I-%M-%S")
today_schedules = os.path.join(combined_schedules_dir, today_date)
if not os.path.exists(combined_schedules_dir):
os.makedirs(combined_schedules_dir)
return today_schedules
def directory_path():
current_directory = os.getcwd()
combined_schedules_dir = os.path.join(current_directory, "static", "Files")
return combined_schedules_dir
@app.route('/', methods=["GET", "POST"])
def home():
workbook = xlsxwriter.Workbook(f"{create_directories()}.xlsx")
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, 25)
workbook.close()
return render_template("index.html")
@app.route('/check-files', methods=["GET"])
def check_files():
onlyfiles = [f for f in listdir(directory_path()) if isfile(join(directory_path(), f))]
print(onlyfiles)
return render_template("excel_files.html", files=onlyfiles)
@app.route('/download', methods=["GET"])
def download():
file = request.args.get("filename")
return send_from_directory("static", filename=f"Files/{file}")
if __name__ == "__main__":
app.run()
|
from maintenance import *
print('*** Maintenance Mode ***')
|
# Generated by Django 3.1.4 on 2021-06-28 13:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Car', '0005_auto_20210627_2013'),
]
operations = [
migrations.AlterField(
model_name='car_rent',
name='status',
field=models.CharField(choices=[('на', 'На рассмотрении'), ('за', 'Оформлено'), ('оп', 'Оплачено'), ('во', 'Возврат средств'), ('дз', 'На доставке заказчику'), ('ак', 'Активно'), ('дл', 'На доставке в локацию'), ('вы', 'Выполнено'), ('от', 'Отменено'), ('шт', 'Ожидание оплаты штрафа')], default='на', max_length=20),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-18 08:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('issue', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='issuerecord',
name='issue_status',
field=models.BooleanField(default=False, verbose_name='发布状态'),
),
migrations.AddField(
model_name='issuerecord',
name='svn_path',
field=models.CharField(max_length=500, null=True, verbose_name='svn路径'),
),
]
|
import unittest
from katas.kyu_6.persistent_bugger import persistence
class PersistenceTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(persistence(39), 3)
def test_equals_2(self):
self.assertEqual(persistence(4), 0)
def test_equals_3(self):
self.assertEqual(persistence(25), 2)
def test_equals_4(self):
self.assertEqual(persistence(999), 4)
|
# Generated by Django 2.0 on 2018-03-16 05:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stocks', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='stock',
name='price',
),
migrations.AlterField(
model_name='stock',
name='ask',
field=models.FloatField(default=None, null=True),
),
migrations.AlterField(
model_name='stock',
name='bid',
field=models.FloatField(default=None, null=True),
),
migrations.AlterField(
model_name='stock',
name='last',
field=models.FloatField(default=None, null=True),
),
]
|
from rdflib.namespace import RDF
from source.utils import id2uri, g_add_with_valid
import csv
import json
def create_ttl(g, u, row):
"""
geneid: 10018
genename: BCL2L11
geneclaimname: BCL2L11
interactionclaimsource: PharmGKB
interactiontypes: NULL
sid: 103245522
cid: 5291
drugname: IMATINIB
drugclaimname: imatinib
drugclaimprimaryname: imatinib
drugchemblid: CHEMBL941
pmids: 24223824
cmpdname: Imatinib
dois: 10.1371/journal.pone.0078582
"""
gid = id2uri(row["geneid"], "gid")
sid = id2uri(row["sid"], "sid")
cid = id2uri(row["cid"], "cid")
pmid = id2uri(row["pmids"], "pmid")
g_add_with_valid(g, gid, RDF.type, u.gid)
g_add_with_valid(g, gid, u.gid2pmid, pmid)
g_add_with_valid(g, sid, RDF.type, u.sid)
g_add_with_valid(g, sid, u.sid2cid, cid)
g_add_with_valid(g, cid, RDF.type, u.cid)
g_add_with_valid(g, pmid, RDF.type, u.pmid)
return g
|
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from django.contrib.auth.mixins import PermissionRequiredMixin, UserPassesTestMixin
from django.contrib.admin.views.decorators import staff_member_required
from .models import Author, Genre, Book
# Create your views here.
class StaffRequiredMixin(UserPassesTestMixin):
def test_func(self):
return self.request.user.is_staff
class AuthorView(ListView):
template_name = 'authors.html'
model = Author
class AuthorDetailView(PermissionRequiredMixin, DetailView):
template_name = 'author_detail.html'
model = Author
permission_required = 'viewer.view_author'
class AuthorCreateView(PermissionRequiredMixin, CreateView):
model = Author
fields = '__all__'
template_name = "form.html"
success_url = reverse_lazy('authors')
permission_required = 'viewer.add_author'
class AuthorUpdateView(PermissionRequiredMixin, UpdateView):
template_name = 'form.html'
model = Author
fields = '__all__'
success_url = reverse_lazy('authors')
permission_required = 'viewer.change_author'
class AuthorDeleteView(StaffRequiredMixin, PermissionRequiredMixin, DeleteView):
model = Author
template_name = "author_delete.html"
success_url = reverse_lazy('authors')
permission_required = 'viewer.delete_author'
class GenreView(ListView):
template_name = 'genres.html'
model = Genre
class GenreDetailView(PermissionRequiredMixin, DetailView):
template_name = 'genre_detail.html'
model = Genre
permission_required = 'viewer.view_genre'
class GenreCreateView(PermissionRequiredMixin, CreateView):
model = Genre
fields = '__all__'
template_name = "form.html"
success_url = reverse_lazy('genres')
permission_required = 'viewer.add_genre'
class GenreUpdateView(PermissionRequiredMixin, UpdateView):
template_name = 'form.html'
model = Genre
fields = '__all__'
success_url = reverse_lazy('genres')
permission_required = 'viewer.change_genre'
class GenreDeleteView(StaffRequiredMixin, PermissionRequiredMixin, DeleteView):
model = Genre
template_name = "genre_delete.html"
success_url = reverse_lazy('genres')
permission_required = 'viewer.delete_genre'
class BookView(ListView):
template_name = 'books.html'
model = Book
paginate_by = 2
def get_paginate_by(self, queryset):
return self.request.GET.get("paginate_by", self.paginate_by)
class BookDetailView(PermissionRequiredMixin, DetailView):
template_name = 'book_detail.html'
model = Book
permission_required = 'viewer.view_book'
class BookCreateView(PermissionRequiredMixin, CreateView):
model = Book
fields = '__all__'
template_name = "form.html"
success_url = reverse_lazy('books')
permission_required = 'viewer.add_book'
class BookUpdateView(PermissionRequiredMixin, UpdateView):
template_name = 'form.html'
model = Book
fields = '__all__'
success_url = reverse_lazy('books')
permission_required = 'viewer.change_book'
class BookDeleteView(StaffRequiredMixin, PermissionRequiredMixin, DeleteView):
model = Book
template_name = "book_delete.html"
success_url = reverse_lazy('books')
permission_required = 'viewer.delete_book'
|
#!/usr/bin/env python3
import datetime
import json
import pathlib
import fire
import toml
def main(chain_name, subdir='gaia', skip_sig_checks=True):
if not skip_sig_checks:
# idea is for request submitter to sign with account key
raise Exception('not implemented!')
genesis = dict()
genesis['genesis_time'] = datetime.datetime.utcnow().isoformat()[:19] + 'Z'
genesis['chain_id'] = chain_name
genesis['validators'] = list()
genesis['app_options'] = dict()
genesis['app_options']['accounts'] = list()
# two unclear items
genesis['app_hash'] = ''
genesis['app_options']['plugin_options'] = [
"coin/issuer", {"app": "sigs", "addr": "B01C264BFE9CBD45458256E613A6F07061A3A6B6"}
]
path_base_str = '{chain_name}/{subdir}'.format(**locals())
path_base = pathlib.Path(path_base_str)
for inclusion_request in path_base.glob('genesis-inclusion-requests/*.toml'):
print(":: adding {inclusion_request}".format(**locals()))
request = toml.load(str(inclusion_request))
# add validator node entries
for validator in request['validator']:
print(validator)
validator_entry = {
"name": validator['name'],
"pub_key": {
"data": validator['pub_key'],
"type": "ed25519",
},
"power": validator['power'],
}
genesis['validators'].append(validator_entry)
# add account with coins
account = request['account']
account_entry = {
"name": account['name'],
"address": account['address'],
"coins": [],
}
for coin_name, coin_amount in request['coins'].items():
coin_entry = {
"denom": coin_name,
"amount": coin_amount,
}
account_entry['coins'].append(coin_entry)
genesis['app_options']['accounts'].append(account_entry)
# write genesis.json
with (path_base / 'genesis.json').open('w') as fh:
fh.write(json.dumps(genesis, indent=2))
if '__main__' == __name__:
fire.Fire(main)
|
# Generated by Django 2.0.3 on 2018-03-13 07:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('stock', '0003_auto_20180313_0417'),
]
operations = [
migrations.CreateModel(
name='Pixel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, default=None, max_length=10, verbose_name='наименование P')),
('price', models.IntegerField(blank=True, default=0, verbose_name='цена')),
('vid', models.CharField(choices=[(1, 'Интерьерные'), (2, 'Уличный')], max_length=10, verbose_name='Вид эксплоатации')),
],
options={
'verbose_name': 'модель пиксиля',
'verbose_name_plural': 'модели пиксиля',
},
),
migrations.RemoveField(
model_name='product',
name='prices',
),
migrations.AddField(
model_name='pixel',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stock.Product', verbose_name='Товар'),
),
]
|
"""
Запросить у пользователя 3 числа
Вывести все числа
Найти и вывести максимальное и минимальное число.
Вывести числа по возрастаю и убыванию.
"""
pervoechislo = int(input("Введите 1 число\n"))
vtoroechislo = int(input("Введите 2 число\n"))
tretiechislo = int(input("Введите 2 число\n"))
max = 0
max = int(max)
min = 0
min = int(min)
var = 0
var = int(var)
print("1 число", pervoechislo, "\n",
"2 число", vtoroechislo, "\n",
"3 число", tretiechislo, "\n")
if pervoechislo > vtoroechislo > tretiechislo:
max = pervoechislo
elif vtoroechislo > pervoechislo > tretiechislo:
max = vtoroechislo
else:
max = tretiechislo
if pervoechislo < vtoroechislo < tretiechislo:
min = pervoechislo
elif vtoroechislo < pervoechislo < tretiechislo:
min = vtoroechislo
else:
min = tretiechislo
if max == pervoechislo and min == vtoroechislo:
var = tretiechislo
elif max == vtoroechislo and min == pervoechislo:
var = tretiechislo
elif max == pervoechislo and min == tretiechislo:
var = vtoroechislo
elif max == tretiechislo and min == pervoechislo:
var = vtoroechislo
else:
var = pervoechislo
print("Максимальное число = ", max)
print("Минимальное число = ", min)
print("Числа по возрастанию: ", min," ", var," ", max)
print("Числа по убыванию: ", max," ", var," ", min)
input("Для продолжения нажмите ENTER\n")
|
#!/usr/bin/env python3
#
# A tool to test the effect (number of pgs, objects, bytes moved) of a
# crushmap change. This is a wrapper around osdmaptool, hardly relying
# on its --test-map-pgs-dump option to get the list of changed pgs.
# Additionally it uses pg stats to calculate the numbers of objects
# and bytes moved.
#
# Typical usage:
#
# # Get current crushmap
# $ crushdiff export cm.txt
# # Edit the map
# $ $EDITOR cm.txt
# # Check the result
# $ crushdiff compare cm.txt
# # Install the updated map
# $ crushdiff import cm.txt
#
# By default, crushdiff will use the cluster current osdmap and pg
# stats, which requires access to the cluster. But one can use the
# --osdmap and --pg-dump options to test against previously obtained
# data.
#
import argparse
import re
import json
import os
import sys
import tempfile
#
# Global
#
parser = argparse.ArgumentParser(prog='crushdiff',
description='Tool for updating crush map')
parser.add_argument(
'command',
metavar='compare|export|import',
help='command',
default=None,
)
parser.add_argument(
'crushmap',
metavar='crushmap',
help='crushmap json file',
default=None,
)
parser.add_argument(
'-m', '--osdmap',
metavar='osdmap',
help='',
default=None,
)
parser.add_argument(
'-p', '--pg-dump',
metavar='pg-dump',
help='`ceph pg dump` json output',
default=None,
)
parser.add_argument(
'-v', '--verbose',
action='store_true',
help='be verbose',
default=False,
)
#
# Functions
#
def get_human_readable(bytes, precision=2):
suffixes = ['', 'Ki', 'Mi', 'Gi', 'Ti']
suffix_index = 0
while bytes > 1024 and suffix_index < 4:
# increment the index of the suffix
suffix_index += 1
# apply the division
bytes = bytes / 1024.0
return '%.*f%s' % (precision, bytes, suffixes[suffix_index])
def run_cmd(cmd, verbose=False):
if verbose:
print(cmd, file=sys.stderr, flush=True)
os.system(cmd)
def get_osdmap(file):
with open(file, "r") as f:
return json.load(f)
def get_pools(osdmap):
return {p['pool']: p for p in osdmap['pools']}
def get_erasure_code_profiles(osdmap):
return osdmap['erasure_code_profiles']
def get_pgmap(pg_dump_file):
with open(pg_dump_file, "r") as f:
return json.load(f)['pg_map']
def get_pg_stats(pgmap):
return {pg['pgid']: pg for pg in pgmap['pg_stats']}
def parse_test_map_pgs_dump(file):
# Format:
# pool 1 pg_num 16
# 1.0 [1,0,2] 1
# 1.1 [2,0,1] 2
# ...
# pool 2 pg_num 32
# 2.0 [2,1,0] 2
# 2.1 [2,1,0] 2
# ...
# #osd count first primary c wt wt
# osd.1 208 123 123 0.098587 1
pgs = {}
with open(file, "r") as f:
pool = None
for l in f.readlines():
m = re.match('^pool (\d+) pg_num (\d+)', l)
if m:
pool = m.group(1)
continue
if not pool:
continue
m = re.match('^#osd', l)
if m:
break
m = re.match('^(\d+\.[0-9a-f]+)\s+\[([\d,]+)\]', l)
if not m:
continue
pgid = m.group(1)
osds = [int(x) for x in m.group(2).split(',')]
pgs[pgid] = osds
return pgs
def do_compare(new_crushmap_file_txt, osdmap=None, pg_dump=None, verbose=False):
with tempfile.TemporaryDirectory() as tmpdirname:
new_crushmap_file = os.path.join(tmpdirname, 'crushmap')
run_cmd('crushtool -c {} -o {}'.format(new_crushmap_file_txt,
new_crushmap_file), verbose)
osdmap_file = os.path.join(tmpdirname, 'osdmap')
if osdmap:
run_cmd('cp {} {}'.format(osdmap, osdmap_file), verbose)
else:
run_cmd('ceph osd getmap -o {}'.format(osdmap_file), verbose)
if not pg_dump:
pg_dump = os.path.join(tmpdirname, 'pg_dump.json')
run_cmd('ceph pg dump --format json > {}'.format(pg_dump), verbose)
old_test_map_pgs_dump = os.path.join(tmpdirname, 'pgs.old.txt')
run_cmd('osdmaptool {} --test-map-pgs-dump > {}'.format(
osdmap_file, old_test_map_pgs_dump), verbose)
if verbose:
run_cmd('cat {} >&2'.format(old_test_map_pgs_dump), True)
new_test_map_pgs_dump = os.path.join(tmpdirname, 'pgs.new.txt')
run_cmd(
'osdmaptool {} --import-crush {} --test-map-pgs-dump > {}'.format(
osdmap_file, new_crushmap_file, new_test_map_pgs_dump), verbose)
if verbose:
run_cmd('cat {} >&2'.format(new_test_map_pgs_dump), True)
osdmap_file_json = os.path.join(tmpdirname, 'osdmap.json')
run_cmd('osdmaptool {} --dump json > {}'.format(
osdmap_file, osdmap_file_json), verbose)
osdmap = get_osdmap(osdmap_file_json)
pools = get_pools(osdmap)
ec_profiles = get_erasure_code_profiles(osdmap)
pgmap = get_pgmap(pg_dump)
pg_stats = get_pg_stats(pgmap)
old_pgs = parse_test_map_pgs_dump(old_test_map_pgs_dump)
new_pgs = parse_test_map_pgs_dump(new_test_map_pgs_dump)
diff_pg_count = 0
total_object_count = 0
diff_object_count = 0
for pgid in old_pgs:
objects = pg_stats[pgid]['stat_sum']['num_objects']
total_object_count += objects
if old_pgs[pgid] == new_pgs[pgid]:
continue
pool_id = int(pgid.split('.')[0])
if len(new_pgs[pgid]) < pools[pool_id]['size']:
print("WARNING: {} will be undersized ({})".format(
pgid, new_pgs[pgid]), file=sys.stderr, flush=True)
if not pools[pool_id]['erasure_code_profile'] and \
sorted(old_pgs[pgid]) == sorted(new_pgs[pgid]):
continue
if verbose:
print("{}\t{} -> {}".format(pgid, old_pgs[pgid], new_pgs[pgid]),
file=sys.stderr, flush=True)
diff_pg_count += 1
diff_object_count += objects
print("{}/{} ({:.2f}%) pgs affected".format(
diff_pg_count, len(old_pgs),
100 * diff_pg_count / len(old_pgs) if len(old_pgs) else 0),
flush=True)
print("{}/{} ({:.2f}%) objects affected".format(
diff_object_count, total_object_count,
100 * diff_object_count / total_object_count \
if total_object_count else 0), flush=True)
total_pg_shard_count = 0
diff_pg_shard_count = 0
total_object_shard_count = 0
diff_object_shard_count = 0
total_bytes = 0
diff_bytes = 0
for pgid in old_pgs:
pool_id = int(pgid.split('.')[0])
ec_profile = pools[pool_id]['erasure_code_profile']
if ec_profile:
k = int(ec_profiles[ec_profile]['k'])
m = int(ec_profiles[ec_profile]['m'])
else:
k = 1
m = pools[pool_id]['size'] - 1
bytes = pg_stats[pgid]['stat_sum']['num_bytes'] + \
pg_stats[pgid]['stat_sum']['num_omap_bytes']
objects = pg_stats[pgid]['stat_sum']['num_objects']
total_pg_shard_count += len(old_pgs[pgid])
total_object_shard_count += objects * (k + m)
total_bytes += bytes * (k + m) / k
if old_pgs[pgid] == new_pgs[pgid]:
continue
old_count = diff_pg_shard_count
if ec_profile:
for i in range(len(old_pgs[pgid])):
if old_pgs[pgid][i] != new_pgs[pgid][i]:
diff_pg_shard_count += 1
diff_object_shard_count += objects
diff_bytes += bytes / k
else:
for osd in old_pgs[pgid]:
if osd not in new_pgs[pgid]:
diff_pg_shard_count += 1
diff_object_shard_count += objects
diff_bytes += bytes / k
if old_count == diff_pg_shard_count:
continue
if verbose:
print("{}\t{} -> {}".format(pgid, old_pgs[pgid], new_pgs[pgid]),
file=sys.stderr, flush=True)
print("{}/{} ({:.2f}%) pg shards to move".format(
diff_pg_shard_count, total_pg_shard_count,
100 * diff_pg_shard_count / total_pg_shard_count \
if total_pg_shard_count else 0), flush=True)
print("{}/{} ({:.2f}%) pg object shards to move".format(
diff_object_shard_count, total_object_shard_count,
100 * diff_object_shard_count / total_object_shard_count \
if total_object_shard_count else 0), flush=True)
print("{}/{} ({:.2f}%) bytes to move".format(
get_human_readable(int(diff_bytes)),
get_human_readable(int(total_bytes)),
100 * diff_bytes / total_bytes if total_bytes else 0),
flush=True)
def do_export(crushmap_file_txt, osdmap_file=None, verbose=False):
with tempfile.TemporaryDirectory() as tmpdirname:
if not osdmap_file:
osdmap_file = os.path.join(tmpdirname, 'osdmap')
run_cmd('ceph osd getmap -o {}'.format(osdmap_file), verbose)
crushmap_file = os.path.join(tmpdirname, 'crushmap')
run_cmd('osdmaptool {} --export-crush {}'.format(
osdmap_file, crushmap_file), verbose)
run_cmd('crushtool -d {} -o {}'.format(crushmap_file,
crushmap_file_txt), verbose)
def do_import(crushmap_file_txt, osdmap=None, verbose=False):
with tempfile.TemporaryDirectory() as tmpdirname:
crushmap_file = os.path.join(tmpdirname, 'crushmap')
run_cmd('crushtool -c {} -o {}'.format(crushmap_file_txt,
crushmap_file), verbose)
if osdmap:
run_cmd('osdmaptool {} --import-crush {}'.format(
osdmap, crushmap_file), verbose)
else:
run_cmd('ceph osd setcrushmap -i {}'.format(crushmap_file), verbose)
def main():
args = parser.parse_args()
if args.command == 'compare':
do_compare(args.crushmap, args.osdmap, args.pg_dump, args.verbose)
elif args.command == 'export':
do_export(args.crushmap, args.osdmap, args.verbose)
elif args.command == 'import':
do_import(args.crushmap, args.osdmap, args.verbose)
#
# main
#
main()
|
__all__ = ['zookeeper', 'mcpack']
import zookeeper
import mcpack
|
#!/usr/bin/env python3
""" classify swiss german dialects """
|
#!/usr/bin/python
"""
Using the tau amino acid sequence from 5o3l, this script threads a sliding
frame of 6 tau residues into the substrate of Htra1 protease 3nzi then runs a
FastRelax. The aim is to determine the most favorable docking points along the
tau chain based only on sequence.
"""
from os import makedirs
from os.path import basename, isdir, join
from pyrosetta import *
from pyrosetta.rosetta.core.id import AtomID
from pyrosetta.rosetta.core.kinematics import FoldTree
from pyrosetta.rosetta.core.pack.task.operation import \
ExtraRotamers, IncludeCurrent, RestrictToRepacking
from pyrosetta.rosetta.protocols.enzdes import ADD_NEW, AddOrRemoveMatchCsts
from pyrosetta.rosetta.protocols.relax import FastRelax
from pyrosetta.teaching import SimpleThreadingMover
from pyrosetta.toolbox import mutate_residue
from sys import exit
tau_seq = 'AKSRLQTAPVPMPDLKNVKSKIGSTENLKHQPGGGK\
VQIVYKPVDLSKVTSKCGSLGNIHHKPGGGQVEVKSEKLDFKDRVQSKIGSLDNITHVPGGGNKKIETHKLTF\
RENAKAKTDHGAEIVYKSPVV'
def apply_constraints(pose):
""" Applies enzdes constraints form the input CST file to a pose """
cstm = AddOrRemoveMatchCsts()
cstm.set_cst_action(ADD_NEW)
cstm.apply(pose)
return pose
def make_fold_tree():
"""
Make a fold tree that connects the first catalytic serine to the
substrate scissile residue. More efficient sampling.
Presently hard-coded for Htra1 protease
S328 is residue 169.
Last residue of protease chain A is 212
Scissile residue of substrate chain B is 216
Substrate chain B is residues
"""
ft = FoldTree()
ft.add_edge(169, 1, -1)
ft.add_edge(169, 211, -1)
ft.add_edge(169, 216, 1)
ft.add_edge(216 ,212, -1)
assert ft.check_fold_tree()
return ft
def setup_fastrelax(sf):
"""
Creates FastRelax mover with appropriate score function, movemap, and
packer rules. List of neighbor residues was generated using a 10A
neighborhood residue selector around the peptide chain.
"""
relax = FastRelax()
relax.set_scorefxn(sf)
# MoveMap
mm = MoveMap()
mm.set_bb_true_range(212,216)
neighbors = [45, 46, 59, 60, 62, 124, 126, 145, 148, 149, 150, 157, 164,
165, 166, 167, 168, 170, 171, 183, 184, 185, 186, 187, 188, 189, 192,
193, 194, 195, 212, 213, 214, 215, 216] # Did 10A selection separately
for n in neighbors:
mm.set_chi(n, True)
relax.set_movemap(mm)
# Packer tasks
tf = standard_task_factory()
tf.push_back(RestrictToRepacking())
tf.push_back(IncludeCurrent())
tf.push_back(ExtraRotamers(0, 1, 1))
tf.push_back(ExtraRotamers(0, 2, 1))
relax.set_task_factory(tf)
return relax
def thread_to_htra1(sequence, pose):
"""
Uses SimpleThreadingMover to swap out the native substrate and put in a
new test sequence docked with Htra1 protease. The substrate peptide begins
at residue 212 of the pose, based on 3nzi.
"""
assert len(sequence) == 5
# Constructing and applying mover
tm = SimpleThreadingMover(sequence, 212)
threaded_pose = Pose()
threaded_pose.assign(pose)
tm.apply(threaded_pose)
return threaded_pose
def main():
# Destination folder for PDB files
out_dir = 'cat_tau_slide'
if not isdir(out_dir):
makedirs(out_dir)
# Initialize Rosetta
opts = '-enzdes::cstfile htra1_cat_general.cst \
-cst_fa_weight 1.0 -run:preserve_header'
init(opts)
# Score function and starting PDB
sf = create_score_function('ref2015_cst')
pose = pose_from_pdb('cat_relax.pdb')
# Applying fold tree and constraints to the pose, deactivating by mutation
pose.fold_tree(make_fold_tree())
pose = apply_constraints(pose)
mutate_residue(pose, 169, 'A') # Catalytic S328 (169 in pose) mutated to A
# Making FastRelax mover
fr = setup_fastrelax(sf)
# Going through all 6-residue frames in tau
for frame in range(len(tau_seq))[:-4]:
# Making name from position within tau sequence and the frame sequence
position_name = '{:03d}'.format(frame + 275) # First res is V270
# 275 selected so that name reflects which residue is downstream scissile
seq = tau_seq[frame:frame + 5]
set_name = '_'.join([position_name, seq])
print(set_name)
# Make threaded model
threaded_pose = thread_to_htra1(seq, pose)
threaded_name = 'threaded_htra1_tau_' + set_name + '.pdb'
threaded_pose.dump_pdb(join(out_dir, threaded_name))
# Creating relaxed decoys
decoy_name = join(out_dir, 'relaxed_htra1_tau_' + set_name)
jd = PyJobDistributor(decoy_name, 20, sf)
while not jd.job_complete:
pp = Pose()
pp.assign(threaded_pose)
fr.apply(pp)
jd.output_decoy(pp)
if __name__ == '__main__':
main()
|
from torch.utils.data import Dataset, DataLoader
class TrainSet(Dataset):
def __init__(self, dataset):
self.dataset = dataset
def __getitem__(self, index):
type1, type2, label = self.dataset[index]
return type1, type2, label
def __len__(self):
return len(self.dataset)
|
from piipod.utils.csp import *
import pytest
@pytest.fixture
def events():
return [chr(97+i) for i in range(3)]
@pytest.fixture
def users():
return list(map(str, range(10)))
def test_signupModel(users, events):
"""test that CSP accurately reflects signup relationships"""
csp = SignupCSP(users, events)
sol = csp.getSolution()
counts = {}
for user in users:
for event in events:
if sol['%s__%s' % (user, event)]:
counts.setdefault(user, 0)
counts.setdefault(event, 0)
counts[user] += 1
counts[event] += 1
for k, v in counts.items():
assert sol[k] == v
def test_userSignupMax(users, events):
"""test that signups per event are enforced"""
limit = 1
csp = SignupCSP(users, events)
for user in users:
csp.setUserSignupMax(user, limit)
sol = csp.getSolution()
for user in users:
assert sol[user] <= limit
assert sum(sol['%s__%s' % (user, event)] for event in events) <= limit
def test_eventSignupMax(users, events):
"""test that signups per event are enforced"""
limit = 4
csp = SignupCSP(users, events)
csp.setEventSignupMax('b', limit)
sol = csp.getSolution()
assert sol['b'] <= limit
assert sum(sol['%s__%s' % (user, 'b')] for user in users) <= limit
def test_userSignupMin(users, events):
"""test that signups per event are enforced"""
limit = 2
csp = SignupCSP(users, events)
for user in users:
csp.setUserSignupMin(user, limit)
sol = csp.getSolution()
for user in users:
assert sol[user] >= limit
assert sum(sol['%s__%s' % (user, event)] for event in events) >= limit
def test_eventSignupMin(users, events):
"""test that signups per event are enforced"""
limit = 8
csp = SignupCSP(users, events)
csp.setEventSignupMin('b', limit)
sol = csp.getSolution()
assert sol['b'] >= limit
assert sum(sol['%s__%s' % (user, 'b')] for user in users) >= limit
|
#! /bin/env python3
# This script is used to load the output data from evaluation.py for guard and exit files
# Further, it is used to truncate the data to the <t> top AS found within the file
# The output files are further used to generate both the client_top and destination_top graphs
import argparse
import csv
import logging
import pathlib
import statistics
def get_default_format():
return '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
def get_console_logger(name):
logger = logging.getLogger(name)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
# create formatter
log_format = get_default_format()
formatter = logging.Formatter(log_format)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.setLevel(logging.INFO)
return logger
parser = argparse.ArgumentParser(description='Filter guard files and combine to output file')
parser.add_argument('-i', '--input', required=True, type=str, help='Path to input files')
parser.add_argument('-o', '--output', required=True, type=str, help='Path to output file')
parser.add_argument('-d', '--destinations', action="store_true", default=False, help='Filter on exit instead of guard')
parser.add_argument('-t', '--top', type=int, default=15, help='Load top <t> AS from each file')
parser.add_argument('-n', '--number', type=int, default=5, help='Feature at least X data points')
parser.add_argument('-r', '--asn_list', type=str, help='Path to file containing AS to unconditionally use')
parser.add_argument('-x', '--threshold', type=float, default=1.0, help='Threshold for minimum value')
parser.add_argument('-m', '--mean', action='store_true', default=False, help='Use mean instead of max')
args = parser.parse_args()
log = get_console_logger("filter_client")
log.setLevel(logging.DEBUG)
input_path = args.input
input_path = pathlib.Path(input_path)
output_file = args.output
output_file = pathlib.Path(output_file)
fix_asn_list = set()
if args.asn_list is not None and args.asn_list != "":
asn_path = pathlib.Path(args.asn_list)
log.info(f'Found asn file')
with open(asn_path) as asn_file:
for line in asn_file:
line = line.strip()
if 'AS' not in line:
line = f'AS{line}'
fix_asn_list.add(line)
log.info(f'Loaded {len(fix_asn_list)} asn from list')
top_asn = args.top
data_threshold = args.threshold
destinations = args.destinations
number_points = args.number
use_mean = args.mean
ranking = max
if use_mean:
ranking = statistics.mean
if not input_path.exists() or input_path.is_file():
log.error("Input dir does not exist or is file")
exit(1)
log.info(f"Filtering the top {top_asn} ASN from each file")
asn_list = {}
if destinations:
log.info(f'Filtering on exit stat files')
glob_string = "exit_*_stats.tsv"
else:
log.info(f'Filtering on guard stat files')
glob_string = "guard_*_stats.tsv"
max_asn_value = {}
own_asn_value = {}
filter_target_asn = set()
for in_file in input_path.glob(glob_string):
log.info(f'Using file {in_file}')
with open(in_file) as csv_file:
file_name = in_file.name
target_as = file_name.split('_')[1]
filter_target_asn.add(target_as)
use_max = "AS" in file_name
header = [h.strip() for h in csv_file.readline().split('\t')]
reader = csv.DictReader(csv_file, delimiter='\t', fieldnames=header)
for index, line in enumerate(reader):
asn = line['AS'].strip()
if destinations and asn in str(in_file):
log.debug(f"skipping {asn} due to in filename")
continue
p_relay = float(line['Own'].strip())
p_sum = float(line['Sum'].strip())
if asn not in asn_list:
asn_list[asn] = []
own_asn_value[asn] = p_relay
asn_list[asn].append(p_sum)
if use_max:
if asn not in max_asn_value:
max_asn_value[asn] = {'max': 0.0, 'as': ''}
last_max = max_asn_value[asn]['max']
if max_asn_value[asn]['max'] < p_sum:
max_asn_value[asn]['max'] = p_sum
max_asn_value[asn]['as'] = target_as
# at this point we have a dict from ASN -> [percentages]
result_dict = {}
for asn in filter_target_asn:
if asn in asn_list:
del asn_list[asn]
# select all asn from the given list to the result dict
for asn in fix_asn_list:
if asn in asn_list:
log.info(f'Added {asn} per default')
result_dict[asn] = asn_list[asn]
del asn_list[asn]
# do some kind of ranking for the
# max ranking
res = {key: val for key, val in sorted(asn_list.items(), key=lambda ele: ranking(ele[1]), reverse=True) if len(val) >= number_points}
for index, asn in enumerate(res.keys()):
if index >= top_asn:
break
data = asn_list[asn]
max_value = ranking(data)
if max_value < data_threshold:
continue
result_dict[asn] = data
log.info("Writing output file")
with open(output_file, 'w') as output_file_pointer:
fieldnames = ['index', 'AS', 'perc', 'max_target', 'p_relay']
writer = csv.DictWriter(output_file_pointer, fieldnames=fieldnames)
for index, (asn, values) in enumerate(result_dict.items()):
max_target = max_asn_value.get(asn, "")
record = {
'index': index,
'AS': asn,
'perc': 0,
'max_target': max_target['as'],
'p_relay': own_asn_value[asn]
}
for value in values:
record['perc'] = value
writer.writerow(record)
|
import os
import re
import requests
import sys
from lxml import etree
import pandas as pd
'''下面的URL是ajax加载的内容,用BeautifulSoup或Xpath直接获取从网站链接返回的HTML文件的数据的方式往往得不到,目前暂时机械的手动
复制链接'''
urls = ['https://fe-api.zhaopin.com/c/i/sou?pageSize=60&cityId=530&workExperience=-1&education=-1&companyTy'
'pe=-1&employmentType=-1&jobWelfareTag=-1&kw=Java%E5%BC%80%E5%8F%91&kt=3&_v=0.75420128&x-zp-page-re'
'quest-id=c9349f50d8134129828a3fd6cdebb33f-1542252095592-546632','https://fe-api.zhaopin.com/c/i/so'
'u?start=60&pageSize=60&cityId=530&workExperience=-1&education=-1&companyType=-1&employmentType=-'
'1&jobWelfareTag=-1&kw=Java%E5%BC%80%E5%8F%91&kt=3&_v=0.99050332&x-zp-page-request-id=0d1e51287340'
'4c338d4746efade3c24b-1542252174922-491324','https://fe-api.zhaopin.com/c/i/sou?start=120&pageSiz'
'e=60&cityId=530&workExperience=-1&education=-1&companyType=-1&employmentType=-1&jobWelfareTag=-1'
'&kw=Java%E5%BC%80%E5%8F%91&kt=3&_v=0.21924522&x-zp-page-request-id=f65894d42394423e8f24bf38a0ae'
'a0d8-1542252221835-965786','https://fe-api.zhaopin.com/c/i/sou?start=180&pageSize=60&cityId=530&w'
'orkExperience=-1&education=-1&companyType=-1&employmentType=-1&jobWelfareTag=-1&kw=Java%E5%BC%80%E'
'5%8F%91&kt=3&_v=0.73648157&x-zp-page-request-id=43e9b772d1d3405dbace142103faeec0-1542252270832'
'-101478','https://fe-api.zhaopin.com/c/i/sou?start=240&pageSize=60&cityId=530&workExperience=-1&ed'
'ucation=-1&companyType=-1&employmentType=-1&jobWelfareTag=-1&kw=Java%E5%BC%80%E5%8F%91&kt=3&_v=0.'
'74425705&x-zp-page-request-id=6ea6c55a416e4e4c9a30dcb24717bd87-1542252321797-561186']
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36'}
'''根据对json文本的分析,json对象的键’data‘的值中的results键的对应值是一个json数组,保存的是所有公司信息,每一个公司
是一个json对象。{'number': 'CC398107113J00040907712', 'jobType': {'items': [{'code': '160000', 'name': '软件/互联网
开发/系统集成'}, {'code': '44', 'name': '高级软件工程师'}], 'display': '软件/互联网开发/系统集成,高级软件工程师'}, 'comp
any': {'number': 'CZ398107110', 'url': 'https://company.zhaopin.com/CZ398107110.htm', 'name': '北京盛学成长科技有
限公司', 'size': {'code': '3', 'name': '100-499人'}, 'type': {'code': '5', 'name': '民营'}}, 'positionURL': 'htt
s://jobs.zhaopin.com/CC398107113J00040907712.htm', 'workingExp': {'code': '510', 'name': '5-10年'}, 'eduLevel':
{'code': '4', 'name': '本科'}, 'salary': '15K-25K', 'emplType': '全职', 'jobName': '高级java开发工程师', 'industr
y': '160000,210500,160400', 'recruitCount': 0, 'geo': {'lat': '39.961822', 'lon': '116.464077'}, 'city': {'ite
s': [{'code': '530', 'name': '北京'}], 'display': '北京'}, 'applyType': '1', 'updateDate': '2018-11-14 19:27:49
', 'createDate': '2018-10-08 15:42:12', 'endDate': '2019-08-04 15:42:12', 'welfare': ['节日福利', '带薪年假', '补
充医疗保险', '五险一金', '定期体检'], 'saleType': 0, 'feedbackRation': 0.5984, 'score': 607.3427, 'resumeCount': 7
35, 'showLicence': 0, 'interview': 0, 'companyLogo': 'http://company.zhaopin.com/CompanyLogo/20171122/287207C86
1B14BA4B277D8A863542FCB.jpg', 'tags': [], 'vipLevel': 1003, 'expandCount': 0, 'positionLabel': '{"qualificati
ons":null,"role":null,"level":null,"jobLight":["节日福利","带薪年假","补充医疗保险","五险一金","定期体检","绩效奖金",
"弹性工作","周末双休"],"companyTag":null,"skill":null,"refreshLevel":2,"chatWindow":20}', 'duplicated': False,
'futureJob': False,
'selected': False, 'applied': False, 'collected': False, 'isShow': False, 'timeState': '最新', 'rate': '59%'}
'''
#下面存储职位名称,职位链接,公司名称,公司链接,公司性质,公司规模,工作经验要求,学历要求,薪资,招聘人数,所在城市,福利,工作时间
#下面的列表用于创建DataFrame,最终要导出excel文件
position = []
name_of_company = []
scale_of_company = []
number_required = []
category_of_company = []
experience = []
education = []
salary =[]
city = []
welfare = []
link_of_company = []
link_of_job = []
working_time = []
#下面对所有五个页面的招聘信息进行收集
for url in urls:
response =requests.get(url,headers=headers)
data = response.json()
companies = data['data']['results']
for company in companies:
name_of_company.append(company['company']['name'])
link_of_company.append(company['company']['url'])
scale_of_company.append(company['company']['size']['name'])
category_of_company.append(company['company']['type']['name'])
link_of_job.append(company['positionURL'])
experience.append(company['workingExp']['name'])
education.append(company['eduLevel']['name'])
salary.append(company['salary'])
working_time.append(company['emplType'])
position.append(company['jobName'])
city.append(company['city']['display'])
welfare.append(company['welfare'])
#直接利用列表数据制作词云,免去了中文分词的步骤
import word_cloud
word_cloud.generate_wordcloud(' '.join(salary))
word_cloud.generate_wordcloud(' '.join(experience))
word_cloud.generate_wordcloud(' '.join(education))
'''使用pyecharts库对薪水和学历要求进行可视化展示,因为薪水是’3K-11K‘这样的形式,需要
设定几个区间然后统计范围内的频率,学历的几种类型也要得到器出现的频率,所以下面先对数据进行整理
'''
salary_str = ''.join(salary)
#匹配的几个区间为3000-6000,6000-12000,12000-24000,24000-30000,30000-40000
salary1 = re.compile('[3456]K-[3456]K').findall(salary_str)
salary2 = re.compile('[6789]K-[6789]K|[6789]K-1[0-2]K|1[012]K-1[012]K').findall(salary_str)
salary3 = re.compile('1[2-9]K-1[2-9]K|1[2-9]K-2[0-4]K|2[0-4]K-2[0-4]K').findall(salary_str)
salary4 = re.compile('2[4-9]K-2[4-9]K|2[4-9]K-30K').findall(salary_str)
salary5 = re.compile('3[0-9]K-3[0-9]K|3[0-9]K-40K').findall(salary_str)
#下面计算比重
rate = []
rate1 = len(salary1)/len(salary)
rate2 = len(salary2)/len(salary)
rate3 = len(salary3)/len(salary)
rate4 = len(salary4)/len(salary)
rate5 = len(salary5)/len(salary)
rate.append(rate1)
rate.append(rate2)
rate.append(rate3)
rate.append(rate4)
rate.append(rate5)
import sys
#将上层目录路径添加到库搜索路径中
sys.path.append('..')
import drawer
drawer.draw_bar(['3K-6K','6K-12K','12K-24K','24K-30K','30K-40K'],rate,'智联招聘Java岗位薪资统计','共'+str(len(salary))+'家公司')
drawer.draw_pie(['3K-6K','6K-12K','12K-24K','24K-30K','30K-40K'],rate,'薪资占比玫瑰图')
frame = pd.DataFrame({'公司名称':name_of_company,'公司链接':link_of_company,'公司规模':scale_of_company,'所在城市':city,
'企业类型':category_of_company,'岗位名称':position,'岗位链接':link_of_job,
'工作经验':experience,'学历起点':education,'薪水':salary,'工作类型':working_time,
'福利':welfare})
frame.to_excel('智联北京Java岗位招聘信息.xlsx',sheet_name='第1页',header='北京Java岗位招聘信息',na_rep='NULL')
|
from unittest import TestCase
from mock import patch, Mock
from app.core import file_operations as file
import mock
class FileOperationTestCase(TestCase):
@patch("app.core.file_operations.get_os_directory")
def test_get_json_file_path_from_data(self, os_dir):
os_dir.return_value = "dir"
self.assertEqual(file.get_json_file_path_from_data('path'), 'dir/data/path.json')
self.assertEqual(file.get_json_file_path_from_data('append'), 'dir/data/append.json')
@patch("os.path.dirname")
def test_get_os_directory(self, dirname):
dirname.return_value = "dir"
self.assertEqual(file.get_os_directory(), "dir")
@patch("os.path.isdir")
@patch("os.listdir")
def test_list_dir(self, listdir, isdir):
isdir.return_value = True
listdir.return_value = ["directory"]
self.assertEqual(file.list_dir("directory"), ["directory"])
isdir.return_value = False
self.assertEqual(file.list_dir("directory"), [])
@patch("app.core.file_operations.get_os_directory")
@patch("app.core.file_operations.get_visual_map_from_db")
def test_get_visual_files(self, get_visual_map_from_db, os_dir):
get_visual_map_from_db.return_value = ["dir1", "dir2", "dir3"]
os_dir.return_value = "dir"
self.assertEqual(file.get_visual_files("fishes"),
["js/visualization/dir1.js", "js/visualization/dir2.js", "js/visualization/dir3.js"])
@patch("app.core.file_operations.get_all_visual_data_files")
def test_get_all_visual_chart_files(self, get_all_visual_data_files):
get_all_visual_data_files.return_value = ['test', 'fishes', 'spiders', 'demo']
self.assertEqual(file.get_all_visual_chart_files(),
["js/visualization/test.js", "js/visualization/fishes.js", "js/visualization/spiders.js", "js/visualization/demo.js"])
def test_get_visual_file(self):
self.assertEqual(file.get_visual_file('fishes'), ["js/visualization/fishes.js"])
@patch("json.load")
@patch("app.core.file_operations.open", create=True)
def test_get_json_file(self, mock_open, json_load):
mock_open.side_effect = [
mock.mock_open(read_data='{"fishes":0, "demo":1}').return_value
]
json_load.return_value = {"fishes":0, "demo":1}
self.assertEqual({"fishes":0, "demo":1}, file.get_json_file("fileA"))
@patch("json.load")
def test_update_json(self, json_load):
json_load.return_value = {"type": {"key": "value"}}
self.assertEqual(file.update_json({}, "somefile"), {"key": "value"})
@patch("app.core.file_operations.update_json")
@patch("app.core.file_operations.open", create=True)
def test_update_json_from_file(self, mock_open, update_json):
update_json.return_value = {"key":"value"}
mock_open.side_effect = [
mock.mock_open(read_data={"type":{"key":"value"}}).return_value
]
self.assertEqual(file.update_json_from_file({}, 'test', "x.txt"), {"key":"value"})
@patch("app.core.file_operations.get_visual_data")
def test_get_visual_map_from_db(self, get_visual_data):
visual_response = ['visual1', 'visual2']
get_visual_data.return_value = visual_response
self.assertEqual(file.get_visual_map_from_db(1), visual_response)
get_visual_data.assert_called_with(1)
|
from django.contrib.auth.models import User, Group
from api.models import Submission, Conference
from rest_framework import serializers as ser
from django_countries.fields import CountryField
class UserSerializer(ser.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('username', 'email', 'groups')
class GroupSerializer(ser.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ('id', 'name')
class AuthenticationSerializer(ser.Serializer):
username = ser.CharField(required=True)
password = ser.CharField(required=True)
def validate(self, data):
return data
class ConferenceSerializer(ser.ModelSerializer):
title = ser.CharField(required=True)
city = ser.CharField(required=True)
state = ser.CharField(required=True)
country = CountryField() #get country_dict working later
event_start = ser.DateTimeField(required=True)
event_end = ser.DateTimeField(required=True)
submission_start = ser.DateTimeField(required=True)
submission_end = ser.DateTimeField(required=True)
logo_url = ser.URLField(required=True)
description = ser.CharField(required=True)
site_url = ser.URLField(required=False)
# Later on add tags and sponsors back
class Meta:
model = Conference
fields = ('created', 'modified', 'id', 'title', 'site_url', 'city', 'state', 'country', 'event_start', 'event_end', 'submission_start', 'submission_end', 'logo_url', 'description')
class SubmissionSerializer(ser.HyperlinkedModelSerializer):
conference = ser.PrimaryKeyRelatedField(queryset=Conference.objects.all())
contributors = UserSerializer(many=True)
node_id = ser.CharField(read_only=True)
def create(self, validated_data):
# look up contributors by ID
contributors = validated_data['contributors']
title = validated_data['title']
description = validated_data['description']
conference = validated_data['conference']
submission = Submission.objects.create(title=title, description=description, conference=conference, approved=False)
for contributor in contributors:
submission.contributors.add(contributor)
return submission
class Meta:
model = Submission
fields = ('id', 'node_id', 'title', 'description', 'conference', 'contributors')
|
import numpy as np
import numpy.testing as npt
import pandas as pd
from stumpy import (
gpu_stump,
_get_QT_kernel,
_ignore_trivial_kernel,
_calculate_squared_distance_kernel,
_update_PI_kernel,
)
from stumpy import core, _get_QT
from numba import cuda
import math
import pytest
THREADS_PER_BLOCK = 1
if not cuda.is_available():
pytest.skip("Skipping Tests No GPUs Available", allow_module_level=True)
def naive_mass(Q, T, m, trivial_idx=None, excl_zone=0, ignore_trivial=False):
D = np.linalg.norm(
core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1
)
if ignore_trivial:
start = max(0, trivial_idx - excl_zone)
stop = min(T.shape[0] - Q.shape[0] + 1, trivial_idx + excl_zone)
D[start:stop] = np.inf
I = np.argmin(D)
P = D[I]
# Get left and right matrix profiles for self-joins
if ignore_trivial and trivial_idx > 0:
PL = np.inf
IL = -1
for i in range(trivial_idx):
if D[i] < PL:
IL = i
PL = D[i]
if start <= IL < stop:
IL = -1
else:
IL = -1
if ignore_trivial and trivial_idx + 1 < D.shape[0]:
PR = np.inf
IR = -1
for i in range(trivial_idx + 1, D.shape[0]):
if D[i] < PR:
IR = i
PR = D[i]
if start <= IR < stop:
IR = -1
else:
IR = -1
return P, I, IL, IR
def replace_inf(x, value=0):
x[x == np.inf] = value
x[x == -np.inf] = value
return
test_data = [
(
np.array([9, 8100, -60, 7], dtype=np.float64),
np.array([584, -11, 23, 79, 1001, 0, -19], dtype=np.float64),
),
(
np.random.uniform(-1000, 1000, [8]).astype(np.float64),
np.random.uniform(-1000, 1000, [64]).astype(np.float64),
),
]
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_get_QT_kernel(T_A, T_B):
m = 3
M_T, Σ_T = core.compute_mean_std(T_B, m)
μ_Q, σ_Q = core.compute_mean_std(T_A, m)
QT, QT_first = _get_QT(0, T_B, T_A, m)
device_T_A = cuda.to_device(T_B)
device_T_B = cuda.to_device(T_A)
device_M_T = cuda.to_device(M_T)
device_Σ_T = cuda.to_device(Σ_T)
device_QT_odd = cuda.to_device(QT)
device_QT_even = cuda.to_device(QT)
device_QT_first = cuda.to_device(QT_first)
threads_per_block = THREADS_PER_BLOCK
blocks_per_grid = math.ceil(QT_first.shape[0] / threads_per_block)
for i in range(1, QT_first.shape[0]):
left = core.sliding_dot_product(T_A[i : i + m], T_B)
_get_QT_kernel[blocks_per_grid, threads_per_block](
i, device_T_A, device_T_B, m, device_QT_even, device_QT_odd, device_QT_first
)
if i % 2 == 0:
right = device_QT_even.copy_to_host()
npt.assert_almost_equal(left, right)
else:
right = device_QT_odd.copy_to_host()
npt.assert_almost_equal(left, right)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_calculate_squared_distance_kernel(T_A, T_B):
m = 3
for i in range(T_A.shape[0] - m + 1):
Q = T_A[i : i + m]
left = np.linalg.norm(
core.z_norm(core.rolling_window(T_B, m), 1) - core.z_norm(Q), axis=1
)
left = np.square(left)
M_T, Σ_T = core.compute_mean_std(T_B, m)
QT = core.sliding_dot_product(Q, T_B)
μ_Q, σ_Q = core.compute_mean_std(T_A, m)
device_M_T = cuda.to_device(M_T)
device_Σ_T = cuda.to_device(Σ_T)
device_QT_even = cuda.to_device(QT)
device_QT_odd = cuda.to_device(QT)
device_QT_first = cuda.to_device(QT)
device_μ_Q = cuda.to_device(μ_Q)
device_σ_Q = cuda.to_device(σ_Q)
device_D = cuda.device_array(QT.shape, dtype=np.float64)
device_denom = cuda.device_array(QT.shape, dtype=np.float64)
threads_per_block = THREADS_PER_BLOCK
blocks_per_grid = math.ceil(QT.shape[0] / threads_per_block)
_calculate_squared_distance_kernel[blocks_per_grid, threads_per_block](
i,
m,
device_M_T,
device_Σ_T,
device_QT_even,
device_QT_odd,
device_μ_Q,
device_σ_Q,
device_D,
device_denom,
)
right = device_D.copy_to_host()
npt.assert_almost_equal(left, right)
def test_ignore_trivial_kernel():
D = np.random.rand(10)
start_stop = [(0, 3), (4, 6), (7, 9)]
for start, stop in start_stop:
left = D.copy()
left[start:stop] = np.inf
device_D = cuda.to_device(D)
_ignore_trivial_kernel(device_D, start, stop)
right = device_D.copy_to_host()
npt.assert_almost_equal(left, right)
def test_update_PI_kernel():
D = np.random.rand(5, 10)
profile = np.empty((10, 3))
profile[:, :] = np.inf
indices = np.ones((10, 3)) * -1
ignore_trivial = False
left_profile = profile.copy()
left_profile[:, 0] = np.min(D, axis=0)
left_indices = np.ones((10, 3)) * -1
left_indices[:, 0] = np.argmin(D, axis=0)
device_profile = cuda.to_device(profile)
device_indices = cuda.to_device(indices)
for i in range(D.shape[0]):
device_D = cuda.to_device(D[i])
_update_PI_kernel(i, device_D, ignore_trivial, device_profile, device_indices)
right_profile = device_profile.copy_to_host()
right_indices = device_indices.copy_to_host()
npt.assert_almost_equal(left_profile, right_profile)
ignore_trivial = True
left_profile = profile.copy()
left_profile[:, 0] = np.min(D, axis=0)
# for i in range(1, D.shape[1]):
for j in range(D.shape[1]):
for i in range(D.shape[0]):
if i < j and D[i, j] < left_profile[j, 1]:
left_profile[j, 1] = D[i, j]
if i > j and D[i, j] < left_profile[j, 2]:
left_profile[j, 2] = D[i, j]
left_indices = np.ones((10, 3)) * -1
left_indices[:, 0] = np.argmin(D, axis=0)
device_profile = cuda.to_device(profile)
device_indices = cuda.to_device(indices)
for i in range(D.shape[0]):
device_D = cuda.to_device(D[i])
_update_PI_kernel(i, device_D, ignore_trivial, device_profile, device_indices)
right_profile = device_profile.copy_to_host()
right_indices = device_indices.copy_to_host()
npt.assert_almost_equal(left_profile[:, 2], right_profile[:, 2])
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_stump_self_join(T_A, T_B):
m = 3
zone = int(np.ceil(m / 4))
left = np.array(
[
naive_mass(Q, T_B, m, i, zone, True)
for i, Q in enumerate(core.rolling_window(T_B, m))
],
dtype=object,
)
right = gpu_stump(T_B, m, ignore_trivial=True, threads_per_block=THREADS_PER_BLOCK)
replace_inf(left)
replace_inf(right)
npt.assert_almost_equal(left, right)
right = gpu_stump(
pd.Series(T_B), m, ignore_trivial=True, threads_per_block=THREADS_PER_BLOCK
)
replace_inf(right)
npt.assert_almost_equal(left, right)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_stump_A_B_join(T_A, T_B):
m = 3
left = np.array(
[naive_mass(Q, T_A, m) for Q in core.rolling_window(T_B, m)], dtype=object
)
right = gpu_stump(
T_A, m, T_B, ignore_trivial=False, threads_per_block=THREADS_PER_BLOCK
)
replace_inf(left)
replace_inf(right)
npt.assert_almost_equal(left, right)
right = gpu_stump(
pd.Series(T_A),
m,
pd.Series(T_B),
ignore_trivial=False,
threads_per_block=THREADS_PER_BLOCK,
)
replace_inf(right)
npt.assert_almost_equal(left, right)
|
def _partition(data):
mid = int(len(data) // 2)
return data[:mid], data[mid:]
def _merge(left, right):
result = []
left_idx = right_idx = 0
while True:
if left[left_idx] < right[right_idx]:
result.append(left[left_idx])
left_idx += 1
else:
result.append(right[right_idx])
right_idx += 1
if left_idx == len(left):
result.extend(right[right_idx:])
break
if right_idx == len(right):
result.extend(left[left_idx:])
break
return result
def merge_sort(data):
if len(data) > 1:
left, right = _partition(data)
left = merge_sort(left)
right = merge_sort(right)
return _merge(left, right)
else:
return data
if __name__ == '__main__':
print(merge_sort([]))
print(merge_sort([1]))
list1 = [1,2,10,4,5,1]
print(merge_sort(list1))
|
from calc import add,div,name
# print(calc.add(10,20))
# name 'calc' is not defined
print(add(10,20))
print(div(10,20))
#print(sub(10,20))# name 'sub' is not defined
print(name)
|
import requests, os, argparse, dotenv
from dotenv import load_dotenv
def create_bitlink(url_to_check, token):
headers = {"Authorization" : "Bearer {}".format(token)}
url = 'https://api-ssl.bitly.com/v4/bitlinks'
body = {
"long_url": url_to_check
}
response = requests.post(url, headers=headers, json=body)
if response.ok:
return response.json()['id']
def get_bitlink(url_to_check, token):
headers = {"Authorization" : "Bearer {}".format(token)}
url = 'https://api-ssl.bitly.com/v4/bitlinks/{}'.format(url_to_check)
response = requests.get(url, headers = headers)
if response.ok:
return url_to_check
url_to_check_replace = url_to_check.replace('http://','').replace('https://','')
url = 'https://api-ssl.bitly.com/v4/bitlinks/{}'.format(url_to_check_replace)
response = requests.get(url, headers = headers)
if response.ok:
return url_to_check_replace
def get_clicks_number(bitlink, token):
headers = {"Authorization" : "Bearer {}".format(token)}
url = 'https://api-ssl.bitly.com/v4/bitlinks/{}/clicks'.format(bitlink)
payload = {"unit":"day", "units":"-1"}
response = requests.get(url, params=payload, headers = headers)
if response.ok:
return response.json()['link_clicks']
def get_clicks_count(url, token):
clicks_count = dict()
bitlink = get_bitlink(url, token)
if bitlink is None:
return None
for clicks_info in get_clicks_number(bitlink, token):
clicks_count[clicks_info['date']] = clicks_info['clicks']
return clicks_count
def main():
load_dotenv()
token_api_bitly = os.getenv("TOKEN_API_BITLY")
parser = argparse.ArgumentParser(
description='''Создание коротких ссылок или
подсчет количества кликов, если ссылка уже создана'''
)
parser.add_argument('url', help='Укажите ссылку')
args = parser.parse_args()
url = args.url
clicks_count = get_clicks_count(url, token_api_bitly)
if clicks_count is None:
print('bitlink:', create_bitlink(url, token_api_bitly))
elif not clicks_count:
print('Список кликов пуст')
else:
for date, count in clicks_count.items():
print(f'{date} количество {count}')
if __name__ == "__main__":
main()
|
import json,httplib,sys
username = sys.argv[1]
if sys.argv[2] = "final"
pushmsg = "The previous player skipped their turn. It is your turn to make a whisper!"
else if sys.argv[2] = "warn"
pushmsg = "Hurry up! Your turn expires in 1 hour!"
connection = httplib.HTTPSConnection('api.parse.com', 443)
connection.connect()
connection.request('POST', '/1/push', json.dumps({
"where": {
"owner": {
"$inQuery": {
"username": username, "className": "_User", "__type": "Pointer"
}
}
}
,
"data": {
"alert": pushmsg
}
}), {
"X-Parse-Application-Id": "PIYnTkrWnsIfFQbdQtRlzxS5QRvtivurmK5BPtxq",
"X-Parse-REST-API-Key": "cXfQEYhDuPsYA6t0tGw0nOFMnDQtVA1gedpiQxT8",
"Content-Type": "application/json"
})
result = json.loads(connection.getresponse().read())
print result
|
from django import forms
from .models import Document, Item
from django.forms.extras import SelectDateWidget
from datetime import date
from django.views.generic.edit import CreateView
class ItemForm(forms.ModelForm):
class Meta:
model = Item
fields = ['name', 'quantity', 'unit_of_measurement', 'price', 'purpose']
class DocumentForm(forms.ModelForm):
created_date = forms.DateField(widget=SelectDateWidget(), initial=date.today())
class Meta:
model = Document
fields = ['number', 'series', 'seller', 'created_date']
|
from __future__ import division
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
from os.path import join
from os import listdir
class Hsv:
def __init__(self, filename, path=None):
if path:
print('fname in hsv ', join(path, filename))
self.img = cv.imread(join(path, filename))
else:
self.img = cv.imread(filename)
def to_hsv(self):
# Convert BGR to HSV
#print (self.img)
#print(self.img)
hsv = cv.cvtColor(self.img, cv.COLOR_BGR2HSV)
return hsv
def blue_mask(self, hsv, low, high, ind):
blue = np.uint8([[[0, 0, 255]]])
hsv_blue = cv.cvtColor(blue,cv.COLOR_BGR2HSV)
hsv_low_blue = np.array(low) #([220,50,50])
hsv_high_blue = np.array(high) #([260, 255, 255])
# Threshold the HSV image to get only blue colors
mask = cv.inRange(hsv, hsv_low_blue, hsv_high_blue)
# Bitwise-AND mask and original image
res = cv.bitwise_and(self.img, self.img, mask= mask)
cv.imshow('frame',self.img)
cv.imshow('mask',mask)
cv.imwrite('mask_new.jpg', mask)
cv.imshow('res',res)
cv.imwrite('res_new.jpg',res)
return res
def load_img(self, img):
return cv.imread(img)
def histogram_rgb(self, image):
# extract a 3D RGB color histogram from the image,
# using 8 bins per channel, normalize, and update
# the index
hist = cv.calcHist([image], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
hist = cv.normalize(hist, None).flatten()
#print(hist)
return hist
def histogram_hsv(self, image):
# extract a 3D RGB color histogram from the image,
# using 8 bins per channel, normalize, and update
# the index
hist = cv.calcHist([image], [0, 1], None, [180, 256], [0, 180, 0, 256])
hist = cv.normalize(hist, None).flatten()
#print(hist)
return hist
def compare_histograms(self, h1, h2):
#print ' correl method'
#result = cv.compareHist(h1, h2, cv.HISTCMP_CORREL)
#print(result)
#print ' chisq method'
result = cv.compareHist(h1, h2, cv.HISTCMP_CHISQR)
return result
def histogram_list(self, folder):
histo_list = []
for img in listdir(folder):
image = cv.imread(folder + '/' + img)
histo = self.histogram_rgb(image)
histo_list.append(histo)
return histo_list
def histo_err(self, histo_to_judge, histo_list):
err = 0
for hs in histo_list:
err += self.compare_histograms(histo_to_judge, hs)
print(err)
return err/len(histo_list)
def compare_img_to_histos(self, img, grass_histo_list, sky_histo_list):
'''
return True if near to sky histograms,
return False if near to grass histograms
'''
histo_to_judge = self.histogram_rgb(img)
grass_err = self.histo_err(histo_to_judge, grass_histo_list)
print ('total grass err ' + str(grass_err))
sky_err = self.histo_err(histo_to_judge, sky_histo_list)
print ('total sky err ' + str(sky_err))
return sky_err < grass_err
def histogram_all(self, image):
grass_histo_list = self.histogram_list('grass_examples')
sky_histo_list = self.histogram_list('sky_examples')
print self.compare_img_to_histos(self.img, grass_histo_list, sky_histo_list)
h = Hsv('fisheye_orig_sky_jpg')
h.histogram_all(None)
h = Hsv('fisheye_orig_grass2_jpg')
h.histogram_all(None)
#img2 = h.load_img('fisheye_orig_grass_jpg')
#img3 = h.load_img('fisheye_orig_grass2_jpg')
'''
img1 = h.load_img('hsv_grass.jpg')
img2 = h.load_img('hsv_grass2.jpg')
img3 = h.load_img('hsv_sky.jpg')
img4 = h.load_img('hsv_sky2.jpg')
hist_grass = h.histogram_hsv(img1)
hist_grass2 = h.histogram_hsv(img2)
hist_sky = h.histogram_hsv(img3)
hist_sky2 = h.histogram_hsv(img4)
print 'grass vs sky'
h.compare_histograms(hist_sky, hist_grass)
print 'grass vs grass'
h.compare_histograms(hist_grass2, hist_grass)
'''
#hsv = h.to_hsv()
#h.new_blue_mask(hsv)
#ind = 0
#h.blue_mask(hsv, [100,50,50], [140,255,255], 1)
# [86, 31, 4], [220, 88, 50]
#for low, high in ([[100,50,50],[140,255,255]], [[100,50,50],[140,255,255]], [[100,50,50],[140,255,255]], [[100,50,50],[140,255,255]], [[100,50,50],[140,255,255]]) :
# h.blue_mask(hsv) ##,[110,50,50],[130,255,255])
|
import matplotlib.pyplot as plt
def noah_scatterplot(noah_dataset):
FF_data = noah_dataset[noah_dataset.pitch_type == 'FF']
CH_data = noah_dataset[noah_dataset.pitch_type == 'CH']
CU_data = noah_dataset[noah_dataset.pitch_type == 'CU']
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = fig.add_subplot(111)
ax3 = fig.add_subplot(111)
ax1.scatter(FF_data['x'], FF_data['y'], color='DarkBlue', label='FF')
ax2.scatter(CH_data['x'], CH_data['y'], color='DarkGreen', label='CH')
ax3.scatter(CU_data['x'], CU_data['y'], color='Yellow', label='CU')
plt.legend()
plt.show()
def scatterplot(dataset, x, y, labels):
plt.scatter(dataset[x], dataset[y], c=labels, s=10, cmap='viridis')
plt.show()
|
import os
input_path = os.path.join(os.path.dirname(__file__), 'input.txt')
with open(input_path) as file:
original_polymer = file.read()
def react(polymer):
reactions = True
while reactions:
reactions = False
for lowercase, uppercase in zip(set(x.lower() for x in polymer), set(x.upper() for x in polymer)):
while polymer.find(lowercase + uppercase) != -1:
value1 = polymer.find(lowercase + uppercase)
polymer = polymer[:value1] + polymer[value1 + 2:]
reactions = True
while polymer.find(uppercase + lowercase) != -1:
value2 = polymer.find(uppercase + lowercase)
polymer = polymer[:value2] + polymer[value2 + 2:]
reactions = True
return polymer
# Original solution which earned the stars, however takes a number of seconds for part 2 and very inefficient.
# Greatly improved in other solution file
part1 = react(original_polymer)
print "Part 1: Length after all reactions %d" % len(part1)
part2 = min(((len(react(part1.replace(letter, '').replace(letter.upper(), ''))), letter)
for letter in set(letter.lower() for letter in part1)), key=lambda x: x[0])
print "Part 2: Minimum length is %d after removing %s/%s" % (part2[0], part2[1].upper(), part2[1])
|
from googletrans import Translator
sentence=str(input('Say..............'))
translator=Translator()
translated_sentence=translator.translate(sentence,src='en',dest='bn')
print(translated_sentence.text)
|
"""Module containing all SQLalchemy Models."""
from . import recipient_alias, sender_alias # noqa
|
#This is python comment
'''And this is python
multi line comment'''
|
class Solution:
def singleNumber(self, nums: List[int]) -> int:
myhash = {}
for i in range(0, len(nums)):
if nums[i] in myhash:
del myhash[nums[i]]
else:
myhash[nums[i]] = 1
key, = myhash
return key
|
#import the required functions
import pickle
import numpy as np
import argparse
import sys
import pylab
import matplotlib.pyplot as plt
"""
KNOWN BUGS:
-Certain files are 'unrecognized arguments'
"""
#Handles the arguments given in the console
def parse_args(args):
parser = argparse.ArgumentParser(description='PulsON440 SAR Image former')
parser.add_argument('-f', '--file', action='store', dest='file', help='PulsON 440 data file')
parser.add_argument('-l --legacy', action='store_true', dest='legacy', help='Load legacy format of file')
return parser.parse_args(args)
#Main function, creates the SAR image
def main(args):
#Gives arguments
args = parse_args(sys.argv[1:])
f = open(args.file, 'rb')
data = pickle.load(f)
f.close()
#plot data
Platform = data[0]
Pulses = data[1]
Ranges = data[2]
AbsPulses = np.absolute(Pulses)
RealPulses = np.real(Pulses)
print(Ranges)
#for r in AbsPulses:
#print(np.max(r))
for i in AbsPulses:
pylab.scatter(Ranges,i)
pylab.show()
if __name__ == "__main__":
main(sys.argv[1:])
|
# Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"NodeConnectionException",
"ProcessingException",
"InvalidTransactionHash",
"InvalidEtherscanReturnCodeException",
"FourByteConnectionException",
"FourByteContentException",
"FourByteException",
]
import json
from typing import Dict
class NodeConnectionException(Exception):
"""Node Connection Exception."""
def __init__(self):
super().__init__("Couldn't connect to node(s)")
class ProcessingException(Exception):
"""Processing Exception."""
def __init__(self, msg):
super().__init__("Exception processing: " + msg)
class InvalidTransactionHash(Exception):
"""Invalid Transaction Hash."""
def __init__(self, tx_hash):
super().__init__("Invalid transaction hash provided: " + tx_hash)
class InvalidEtherscanReturnCodeException(Exception):
def __init__(self, returned_code: int, params: Dict = None):
params_msg = " with params: " + json.dumps(params) if params else ""
msg = f"Invalid status code for etherscan request: {returned_code} {params_msg}"
super().__init__(msg)
class FourByteException(Exception):
"""4byte base exception class."""
class FourByteConnectionException(FourByteException):
"""4byte directory connection error."""
def __init__(self, msg: str):
super().__init__(f"Couldn't connect to 4byte.directory: {msg}")
class FourByteContentException(FourByteException):
"""4byte content exception. Missing output."""
def __init__(self, status_code: int, content: bytes):
super().__init__(
f"Wrong response from 4byte.directory. Status code:{status_code}, content: {content}"
)
|
# -*- coding: utf-8 -*-
# --------------------------------
# Name: text_normal.py
# Author: devshilei@gmail.com
# @Time 2020/7/14 下午4:16
# Description:
# --------------------------------
import json
def normal_dbc_to_sbc(text):
"""
description: 对文本进行标准化【全角字符替换为半角字符】
注:此处保留原始中文句号“。”以及顿号“、”,
因其在句子中长用来表平行关系且替换为其他半角字符容易引起歧义。
Double Byte Character,简称:DBC 全角字符
Single Byte Character,简称:SBC 半角字符
:param text: 待处理文本
:return: 输出为标准化之后的文本
"""
mapping_data = json.load(open("dbc_mapping_sbc_data.json", mode="r", encoding="utf8"))
for dbc_char, sbc_char in mapping_data.items():
text = text.replace(dbc_char, sbc_char)
return text
def normal_zh_cn_hant_mapping(text):
"""
description: 中文繁体、简体字标注化,将繁体转为简体字
:param text: 待转化文本
:return: 返回标准化后的文本
"""
mapping_data = json.load(open("zh_cn_hant_mapping_data.json", mode="r", encoding="utf8"))
for trad_char, simp_char in mapping_data.items():
text = text.replace(trad_char, simp_char)
return text
|
numero = int(input("digite um numero: "))
calculo = numero%15
if calculo == 0 :
print("FizzBuzz")
else:
print(numero)
|
n1 = int(input('Digite um nº:'))
db = n1*2
tr = n1*3
rq = n1**2
print('O nº digitado é: {}'.format(n1))
print('O dobro é: {}, o triplo é: {} e a raiz quadrada é: {}'.format(db, tr, rq))
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""High-level utilities for manipulating image files associated with
music and items' embedded album art.
"""
from tempfile import NamedTemporaryFile
import os
from beets.util import displayable_path, syspath, bytestring_path
from beets.util.artresizer import ArtResizer
import mediafile
def mediafile_image(image_path, maxwidth=None):
"""Return a `mediafile.Image` object for the path.
"""
with open(syspath(image_path), 'rb') as f:
data = f.read()
return mediafile.Image(data, type=mediafile.ImageType.front)
def get_art(log, item):
# Extract the art.
try:
mf = mediafile.MediaFile(syspath(item.path))
except mediafile.UnreadableFileError as exc:
log.warning('Could not extract art from {0}: {1}',
displayable_path(item.path), exc)
return
return mf.art
def embed_item(log, item, imagepath, maxwidth=None, itempath=None,
compare_threshold=0, ifempty=False, as_album=False, id3v23=None,
quality=0):
"""Embed an image into the item's media file.
"""
# Conditions.
if compare_threshold:
is_similar = check_art_similarity(
log, item, imagepath, compare_threshold)
if is_similar is None:
log.warning('Error while checking art similarity; skipping.')
return
elif not is_similar:
log.info('Image not similar; skipping.')
return
if ifempty and get_art(log, item):
log.info('media file already contained art')
return
# Filters.
if maxwidth and not as_album:
imagepath = resize_image(log, imagepath, maxwidth, quality)
# Get the `Image` object from the file.
try:
log.debug('embedding {0}', displayable_path(imagepath))
image = mediafile_image(imagepath, maxwidth)
except OSError as exc:
log.warning('could not read image file: {0}', exc)
return
# Make sure the image kind is safe (some formats only support PNG
# and JPEG).
if image.mime_type not in ('image/jpeg', 'image/png'):
log.info('not embedding image of unsupported type: {}',
image.mime_type)
return
item.try_write(path=itempath, tags={'images': [image]}, id3v23=id3v23)
def embed_album(log, album, maxwidth=None, quiet=False, compare_threshold=0,
ifempty=False, quality=0):
"""Embed album art into all of the album's items.
"""
imagepath = album.artpath
if not imagepath:
log.info('No album art present for {0}', album)
return
if not os.path.isfile(syspath(imagepath)):
log.info('Album art not found at {0} for {1}',
displayable_path(imagepath), album)
return
if maxwidth:
imagepath = resize_image(log, imagepath, maxwidth, quality)
log.info('Embedding album art into {0}', album)
for item in album.items():
embed_item(log, item, imagepath, maxwidth, None, compare_threshold,
ifempty, as_album=True, quality=quality)
def resize_image(log, imagepath, maxwidth, quality):
"""Returns path to an image resized to maxwidth and encoded with the
specified quality level.
"""
log.debug('Resizing album art to {0} pixels wide and encoding at quality \
level {1}', maxwidth, quality)
imagepath = ArtResizer.shared.resize(maxwidth, syspath(imagepath),
quality=quality)
return imagepath
def check_art_similarity(
log,
item,
imagepath,
compare_threshold,
artresizer=None,
):
"""A boolean indicating if an image is similar to embedded item art.
If no embedded art exists, always return `True`. If the comparison fails
for some reason, the return value is `None`.
This must only be called if `ArtResizer.shared.can_compare` is `True`.
"""
with NamedTemporaryFile(delete=True) as f:
art = extract(log, f.name, item)
if not art:
return True
if artresizer is None:
artresizer = ArtResizer.shared
return artresizer.compare(art, imagepath, compare_threshold)
def extract(log, outpath, item):
art = get_art(log, item)
outpath = bytestring_path(outpath)
if not art:
log.info('No album art present in {0}, skipping.', item)
return
# Add an extension to the filename.
ext = mediafile.image_extension(art)
if not ext:
log.warning('Unknown image type in {0}.',
displayable_path(item.path))
return
outpath += bytestring_path('.' + ext)
log.info('Extracting album art from: {0} to: {1}',
item, displayable_path(outpath))
with open(syspath(outpath), 'wb') as f:
f.write(art)
return outpath
def extract_first(log, outpath, items):
for item in items:
real_path = extract(log, outpath, item)
if real_path:
return real_path
def clear(log, lib, query):
items = lib.items(query)
log.info('Clearing album art from {0} items', len(items))
for item in items:
log.debug('Clearing art for {0}', item)
item.try_write(tags={'images': None})
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from tkinter import *
from Node import *
from Edge import *
from Obstacle import *
if len(sys.argv) < 2:
exit(1)
def str_to_bool(s):
return s in ["true", "True", "1", "y", "Y", "yes", "Yes"]
nodes = []
edges = []
obstacles = []
f = open(sys.argv[1])
l = f.readline().split()
if len(l) > 1:
try:
width = int(l[0])
height = int(l[1])
scale = int(l[2])
except ValueError:
exit(2)
except IndexError:
scale = 1
else:
exit(2)
for line in f:
l = line.split()
if len(l) == 0:
continue
if l[0] == "Node":
try:
x = int(l[1])
y = int(l[2])
# b = str_to_bool(l[3])
except (ValueError, IndexError):
print("Cannot parse node")
weight = ""
b = False
if len(l) > 4:
weight = str(int(float(l[3])))
b = str_to_bool(l[4])
elif len(l) > 3:
try:
weight = str(int(float(l[3])))
except ValueError:
b = str_to_bool(l[3])
nodes.append(Node(x, y, weight, b, scale))
elif l[0] == "Edge":
try:
i = int(l[1])
j = int(l[2])
except (ValueError, IndexError):
print("Cannot parse edge")
weight = ""
b = False
if len(l) > 4:
weight = str(int(float(l[3])))
b = str_to_bool(l[4])
elif len(l) > 3:
try:
weight = str(int(float(l[3])))
except ValueError:
b = str_to_bool(l[3])
edges.append(Edge(nodes[i], nodes[j], weight, visited=b))
elif l[0] == "Obstacle":
if len(l) < 7 | len(l) % 2 != 0:
print("Cannot parse obstacle")
continue
obstacles.append(Obstacle(l[1:], scale))
root = Tk()
w = Canvas(root, width=width*scale, height=height*scale)
w.pack()
for e in edges:
e.draw(w)
for n in nodes:
n.draw(w)
for o in obstacles:
o.draw(w)
root.mainloop()
|
# 네이버 영화 데이터 수집
import requests
from bs4 import BeautifulSoup
raw = requests.get("https://movie.naver.com/movie/running/current.nhn#",
headers = {"User-Agent":"Mozilla/5.0"})
html = BeautifulSoup(raw.text, 'html.parser')
# 컨테이너 dl.lst_dsc
movies = html.select("dl.lst_dsc")
for m in movies:
# 제목 dt.tit > a
title = m.select_one("dt.tit > a").text
# 평점 div.star_t1 a span.num
score = m.select_one("div.star_t1 a span.num").text
# 장르 dl.lst_dsc dl.info_txt1 dd a
# 감독 dl.lst_dsc dl.info_txt1 dd a
# 배우 dl.lst_dsc dl.info_txt1 dd a
# select 함수 이용하는 방법
info = m. select("dl.info_txt1 dd") # 개요, 감독, 배우에 대한 데이터가 리스트로 저장
# 장르
genre = info[0].select("a") # 또 하나의 리스트를 만들어 준 것
# 감독
director = info[1].select("a")
# 배우
actor = info[2].select("a")
print(title)
print(score)
for g in genre:
print(g.text)
for d in director:
print(d.text)
for a in actor:
print(a.text)
print("*"*50)
|
from django.db import models
from aristo.models import *
# Create your models here.
class Package(models.Model):
name = models.CharField(max_length=200,verbose_name="License's Name",null=True)
description = models.CharField(max_length=200,verbose_name="Description",null=True)
offered_days = models.IntegerField(verbose_name= 'Offered Days',null=True)
account_count = models.IntegerField(verbose_name= 'Number of IG Accounts',null=True)
package_price = models.IntegerField(verbose_name= 'Package Price',null=True)
def __str__(self):
return str(self.name)
class License(models.Model):
"""Keeps License informations of Main Users"""
main_user = models.ForeignKey(User,on_delete=models.CASCADE,null=True)
package = models.ForeignKey(Package,on_delete=models.CASCADE,null=True)
created_date = models.DateTimeField(auto_now_add=True,verbose_name="Register Date",null=True)
status = models.IntegerField(verbose_name="Status", null=True)
def __str__(self):
return self.main_user.username
class Coupon(models.Model):
name = models.CharField(max_length=200,verbose_name="Coupon Name",null=True)
percentage = models.IntegerField(verbose_name='Percentage', null=True)
status = models.IntegerField(verbose_name='Status', null=True)
amount = models.IntegerField(verbose_name='Amount', null=True)
def __str__(self):
return str(self.name)
class Card(models.Model):
main_user = models.ForeignKey(User,on_delete=models.CASCADE,null=True)
order_id = models.IntegerField(verbose_name= 'Order ID')
package = models.ForeignKey(Package,on_delete=models.CASCADE,null=True)
coupon = models.ForeignKey(Coupon,on_delete=models.CASCADE, null=True)
signature = models.CharField(max_length=400, verbose_name='signature', null=True)
payment_status = models.IntegerField(verbose_name='Payment Status', null=True)
updated_time = models.DateTimeField(auto_now_add=True,verbose_name="Updated Date",null=True)
def __str__(self):
return str(self.package.name)
|
import requests
response = requests.get('http://zhihu.com')
print(response.text)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.