text stringlengths 8 6.05M |
|---|
"""
pytest configuration additions.
"""
from typing import final, Optional
import numpy as np
import pytest
import sacc
from firecrown.likelihood.gauss_family.statistic.statistic import (
Statistic,
DataVector,
TheoryVector,
)
from firecrown import parameters
from firecrown.parameters import (
RequiredParameters,
DerivedParameterCollection,
ParamsMap,
)
from firecrown.modeling_tools import ModelingTools
def pytest_addoption(parser):
"""Add handling of firecrown-specific options for the pytest test runner.
--runslow: used to run tests marked as slow, which are otherwise not run.
"""
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)
def pytest_configure(config):
"""Add new markers that can be set on pytest tests.
Use the marker `slow` for any test that takes more than a second to run.
Tests marked as `slow` are not run unless the user requests them by specifying
the --runslow flag to the pytest program.
"""
config.addinivalue_line("markers", "slow: mark test as slow to run")
def pytest_collection_modifyitems(config, items):
"""Apply our special markers and option handling for pytest."""
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
# Fixtures
class TrivialStatistic(Statistic):
"""A minimal statistic for testing Gaussian likelihoods."""
def __init__(self) -> None:
"""Initialize this statistic."""
super().__init__()
self.data_vector: Optional[DataVector] = None
self.mean = parameters.create()
self.computed_theory_vector = False
def read(self, sacc_data: sacc.Sacc):
"""This trivial class does not actually need to read anything."""
our_data = sacc_data.get_mean(data_type="count")
self.data_vector = DataVector.from_list(our_data)
self.sacc_indices = np.arange(len(self.data_vector))
@final
def _reset(self):
"""Reset this statistic. This implementation has nothing to do."""
self.computed_theory_vector = False
@final
def _required_parameters(self) -> RequiredParameters:
"""Return an empty RequiredParameters."""
return RequiredParameters([])
@final
def _get_derived_parameters(self) -> DerivedParameterCollection:
"""Return an empty DerivedParameterCollection."""
return DerivedParameterCollection([])
def get_data_vector(self) -> DataVector:
"""Return the data vector; raise exception if there is none."""
assert self.data_vector is not None
return self.data_vector
def compute_theory_vector(self, _: ModelingTools) -> TheoryVector:
"""Return a fixed theory vector."""
self.computed_theory_vector = True
return TheoryVector.from_list([self.mean, self.mean, self.mean])
@pytest.fixture(name="trivial_stats")
def make_stats():
"""Return a non-empty list of TrivialStatistics."""
return [TrivialStatistic()]
@pytest.fixture(name="trivial_params")
def make_trivial_params() -> ParamsMap:
"""Return a ParamsMap with one parameter."""
return ParamsMap({"mean": 1.0})
@pytest.fixture(name="sacc_data")
def make_sacc_data():
"""Create a trivial sacc.Sacc object."""
result = sacc.Sacc()
result.add_data_point("count", (), 1.0)
result.add_data_point("count", (), 4.0)
result.add_data_point("count", (), -3.0)
result.add_covariance(np.diag([4.0, 9.0, 16.0]))
return result
|
#standard import
import pandas as pd
import numpy as np
import itertools
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
import matplotlib.pyplot as plt
# read data
df = pd.read_csv('fake_or_real_news.csv')
df = df.set_index('Unnamed: 0')
y = df.label
df = df.drop('label', axis=1)
X_train, X_test, y_train, y_test = train_test_split(df['text'], y, test_size=0.3, random_state=50)
#tfidf
tfidf_vectorizer = TfidfVectorizer(stop_words='english', max_df=0.7)
tfidf_train = tfidf_vectorizer.fit_transform(X_train)
tfidf_test = tfidf_vectorizer.transform(X_test)
#count
count_vectorizer = CountVectorizer(stop_words='english')
count_train = count_vectorizer.fit_transform(X_train)
count_test = count_vectorizer.transform(X_test)
# important features function
def most_informative_feature(vectorizer, classifier, n=10):
class_labels = classifier.classes_
feature_names = vectorizer.get_feature_names()
class1 = sorted(zip(classifier.coef_[0], feature_names))[:n]
class2 = sorted(zip(classifier.coef_[0], feature_names))[-n:]
print("Important FAKE news features")
for coef, feat in class1:
print(class_labels[0], feat)
print()
print("Important REAL news features")
for coef, feat in reversed(class2): #reversed order
print(class_labels[1], feat)
# scorer function
def scorer(confusion_m):
tn, fp, fn, tp = confusion_m.ravel()
precision = tp/(tp+fp)
recall = tp/(tp+fn)
f1_score = (2*precision*recall)/(precision+recall)
print("Precision is: %0.3f" % precision)
print("Recall is: %0.3f" % recall)
print("F-1 Score is: %0.3f" % f1_score)
print()
############ classification
# knn model
print("Result of K-NN model")
knn_matrix_score = []
for n_neighbors in np.arange(2,10):
knn_model = KNeighborsClassifier(n_neighbors=n_neighbors)
knn_model.fit(count_train, y_train)
pred_knn = knn_model.predict(count_test)
knn_score = metrics.accuracy_score(y_test, pred_knn)
knn_matrix_score.append(knn_score)
knn_max_index = np.argmax(knn_matrix_score) + 2 # neighbor array start from 2
print ("Best number of neighbors is: %d" % knn_max_index)
print("Best accuracy of K-NN: %0.3f" % knn_matrix_score[knn_max_index - 2]) #deduct two print out the right number
cm_knn = metrics.confusion_matrix(y_test, pred_knn, labels=['FAKE', 'REAL'])
scorer(cm_knn)
# rf model
print("Result of Random Forest model")
rf_matrix_score = []
for max_depth in np.arange(2,6):
rf_model = RandomForestClassifier(max_depth=max_depth, random_state=0)
rf_model.fit(tfidf_train, y_train)
pred_rf = rf_model.predict(tfidf_test)
rf_score = metrics.accuracy_score(y_test, pred_rf)
rf_matrix_score.append(rf_score)
rf_max_index = np.argmax(rf_matrix_score) + 2 # neighbor array start from 2
print ("Best number of max_depth is: %d" % rf_max_index)
print("Best accuracy of RF: %0.3f" % rf_matrix_score[rf_max_index - 2])
cm_rf = metrics.confusion_matrix(y_test, pred_rf, labels=['FAKE', 'REAL'])
scorer(cm_rf)
# nn model
print("Result of MLP Neural Net model")
nn_model = MLPClassifier(alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=0)
nn_model.fit(tfidf_train, y_train)
pred_nn = nn_model.predict(tfidf_test)
nn_score = metrics.accuracy_score(y_test, pred_nn)
print("Accuracy of Multi-layer Perceptron NN: %0.3f" % nn_score)
cm_nn = metrics.confusion_matrix(y_test, pred_nn, labels=['FAKE', 'REAL'])
scorer(cm_nn)
########### features
# svm model
print("Result of Linear SVM model")
svm_model = LinearSVC(random_state=0, tol=1e-5)
svm_model.fit(tfidf_train, y_train)
#pred_svm = svm_model.predict(tfidf_test)
#svm_score = metrics.accuracy_score(y_test, pred_svm)
#print("Accuracy of Linear SVM: %0.3f" % svm_score)
#cm_svm = metrics.confusion_matrix(y_test, pred_svm, labels=['FAKE', 'REAL'])
#scorer(cm_svm)
most_informative_feature(tfidf_vectorizer, svm_model)
# Logistic Regression
print("Result of Logistic Regression model")
log_reg = LogisticRegression(random_state=0)
log_reg.fit(tfidf_train, y_train)
#pred_log = log_reg.predict(tfidf_test)
#log_score = metrics.accuracy_score(y_test, pred_log)
#print("Logistic Regression accuracy: %0.3f" % log_score)
#log_cm = metrics.confusion_matrix(y_test, pred_log, labels=['FAKE', 'REAL'])
#scorer(log_cm)
most_informative_feature(tfidf_vectorizer, log_reg)
|
#!/usr/bin/env python
# -*-coding:utf-8 -*-
# Author:Renkai
dict1 = {'name':'wuya','age':18}
dict2 = {'address':'xian'}
# dict2 = dict1.copy()
# print(dict2)
# print(dict1.get('age'))
#
# for key in dict1.keys():
# print(key)
#
# for value in dict1.values():
# print(value)
#
# for key,value in dict1.items():
# print(key,":",value)
dict1.update(dict2)
print(dict1)
dict3 = {'name': 'wuya', 'age': 18, 'address': 'xian'}
print(sorted(dict1.items(),key=lambda item:item[0])) |
#!/usr/bin/env python
# See the argparse help further down or run [thisprogram] --help for a description of this program.
# IDEAS
# - Make it work for binary files (not using whole lines to calculate the ratio)
# - Make it faster by using an index on the right tree, e.g. by putting 10-char snippets into a snippet-to-[file] dict
from __future__ import print_function
import sys
import os
if sys.version_info < (2,7) or (sys.version_info.major == 3 and sys.version_info < (3,2)):
sys.stderr.write("Python 3 >= 3.2 or Python 2 >= 2.7 required\n")
sys.exit(1)
import subprocess
import argparse
import re
from collections import namedtuple
def die(msg, error_code=1):
print("Error: " + msg, file=sys.stderr)
exit(error_code)
class DiffError(Exception):
def __init__(self, returncode):
Exception.__init__(self, "Got bad return code %s from diff" % returncode)
self.returncode = returncode
def count_common_lines(left_path, right_path):
# This custom diff command for each unchanged hunk prints the number lines in the hunk
args = [
"diff",
"--old-group-format=",
"--new-group-format=",
"--unchanged-group-format=%dN\n",
"--changed-group-format=",
left_path,
right_path,
]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode in (0,1):
unchanged_lines = sum(map(int, out.strip().split()))
return unchanged_lines
else:
raise DiffError(proc.returncode)
def count_lines(path):
return len(open(path, 'rb').readlines())
def common_lines_ratio(left_path, right_path):
"""
What fraction of their lines l and r have in common.
This is calculated as 2 * common_lines / (left_lines + right_lines).
"""
# open in binary mode to prevent Python 3's platform-dependent encoding (e.g. utf-8)
left_lines = count_lines(left_path)
right_lines = count_lines(right_path)
common_lines = count_common_lines(left_path, right_path)
ratio = 2.0 * common_lines / (left_lines + right_lines)
return ratio
class CachingDict(dict):
""" A simple key-value cache based on a dict. """
def get_or_cache(self, key, val_fn):
"""
If key in self, return self[key].
Otherwise set self[key] = val_fn() and return the calculated value.
"""
cached = self.get(key)
if not cached:
cached = val_fn()
self[key] = cached
return cached
class MimetypeCache(CachingDict):
""" A file to mimetype cache """
def mimetype(self, path):
"""
Returns the mimetype of the given path in the form of file -ib.
Uses internal caching.
"""
import subprocess
mime_fn = lambda: subprocess.check_output(["file", "-ib", path]).decode("utf-8").split(";")[0]
return self.get_or_cache(path, mime_fn)
def build_file_list(dir):
""" Returns a list of all files in the given directory as relative paths. """
paths = []
for dirpath, dirnames, filenames in os.walk(dir):
for filename in filenames:
path = os.path.join(dirpath, filename)
paths.append(path)
return paths
def find_tree_matches(left, right, prematch_filter=None, silent_diff_errors=False):
"""
Traverses the directories given as left and right, calculating the ratio of how similar each file in right is to each file in left.
Runtime of no_files(left) * no_files(right).
:param prematch_filter If given, all comparisons for which prematch_filter(left_file, right_file) is False are skipped.
:param silent_diff_errors Prevent printing errors to stderr on bad diff return codes.
"""
left_paths = build_file_list(left)
right_paths = build_file_list(right)
for left_path in left_paths:
matches = {} # {right_path: ratio}
for right_path in right_paths:
if prematch_filter is None or prematch_filter(left_path, right_path):
try:
ratio = common_lines_ratio(left_path, right_path)
matches[right_path] = ratio
except DiffError as e:
if not silent_diff_errors:
explanation = ""
if e.returncode == 2:
explanation = " - Perhaps the files are binary files"
print("Error: %s (for files %s and %s)%s" % (e, left_path, right_path, explanation), file=sys.stderr)
yield left_path, matches
def copy_full_path(src, dst):
""" Copies the file src to dst, recursively creating all parent directories of dst. """
import shutil
dst_dir = os.path.dirname(dst)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
shutil.copyfile(src, dst)
class MimetypeFilter(object):
""" A filter to be passed into rescue_matcher that filters away all files which do not have the same mime type. """
def __init__(self):
self.mime_cache = MimetypeCache()
def filter(self, left_path, right_path):
return self.mime_cache.mimetype(left_path) == self.mime_cache.mimetype(right_path)
def rescue_matcher(left_tree, right_tree, min_ratio=0.0, prematch_filters=[], copy_dest=None, copy_least_matching=False):
"""
Compare all files in left_tree to all files in right_tree, and print out a ratio how similar they are to each other.
:param min_ratio Ratio output is skipped for all files that have a common files ratio less than this.
:param prematch_filters A list of objects having a filter() method. For all files (l, r) from (left_tree, right_tree) for which one of the filter()s returns false, comparison and ratio output of l and r are skipped.
:param copy_dest The best matching from right_tree are copied to this directory, getting the file names of their best matching equivalents in left_tree.
:param copy_least_matching If True, the file with the lowest ratio bigger than min_ratio is chosen as the best matching file for being copied to copy_dest. Ignored if copy_dest is None.
"""
# Filters out a file comparison if one of the prematch_filters filters it out
def prematch_filter(left_path, right_path):
return all( f.filter(left_path, right_path) for f in prematch_filters )
tree_matches = find_tree_matches(left_tree, right_tree, prematch_filter)
for left_path, matches_dict in tree_matches:
# all matches >= min_ratio sorted by ratio in descending order
selected_files = sorted(( k for k in matches_dict if matches_dict[k] >= min_ratio ), key=matches_dict.get, reverse=True)
if selected_files:
print("%s" % left_path)
for right_path in selected_files:
ratio = matches_dict[right_path]
print(" %.4f %s" % (ratio, right_path))
if copy_dest:
best_match_path = selected_files[-1 if copy_least_matching else 0]
copy_full_path(best_match_path, os.path.join(copy_dest, left_path))
def main():
parser = argparse.ArgumentParser(description='Compares two trees of files and tells which ones from the left tree match best with which ones from the right tree.')
parser.add_argument('left_tree', help='For each file in this tree, it will be tried to find a matching equivalent from right_tree.')
parser.add_argument('right_tree', help='The tree in which matching files are searched for.')
parser.add_argument('--min-ratio', type=float, default=0.0, help='Only print matching having a line match ratio >= MIN_RATIO')
parser.add_argument('--mimetype-filter', action='store_true', help='Skip file matching if mimetypes do not match. Can yield more useful results and speed up the matching process.')
parser.add_argument('--copy-dest', metavar="DIR", help='If specified, matching files found in right_tree found are saved to DIR, where they get the same path/filename as their their equivalents from left_tree.')
parser.add_argument('--copy-least-matching', action='store_true', help='Instead of copying the best matching file to the directory given in --copy-dest, copy the least matching one that exceeds MIN_RATIO. This is useful if there are different revisions of a file, with the most matching ones being oldest and the least matching ones the most recent ones (e.g. when a version control system was used).')
args = parser.parse_args()
# Handle argument confilcts
if args.copy_dest is None and args.copy_least_matching:
die("--copy-dest has to be specified for --copy-least-matching to take effect!")
prematch_filters = []
if(args.mimetype_filter):
prematch_filters.append(MimetypeFilter())
rescue_matcher(args.left_tree, args.right_tree, args.min_ratio, prematch_filters, args.copy_dest, args.copy_least_matching)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit(1)
|
class EvenOddVendingMachine(int):
def vending(x):
if x % 2 == 0:
print('Number is even')
else:
print('Number is odd')
def find_range_stepwise(x, y, z):
for i in range(x, y, z):
print(i)
if __name__ == '__main__':
x = input('Enter a number: ')
vending(int(x))
find_range_stepwise(int(x), int(9), int(2))
|
# -*- coding: utf-8 -*-
#########################################################
# python
import os
import sys
import logging
import traceback
import json
import re
import urllib
import requests
import threading
# third-party
# sjva 공용
# 패키지
from .plugin import logger, package_name
from .model import ModelSetting, ModelChannel
from .source_base import SourceBase
#########################################################
class SourceSBS(SourceBase):
@classmethod
def get_channel_list(cls):
try:
ret = []
url_list = ['http://static.apis.sbs.co.kr/play-api/1.0/onair/channels', 'http://static.apis.sbs.co.kr/play-api/1.0/onair/virtual/channels']
for url in url_list:
data = requests.get(url).json()
for item in data['list']:
c = ModelChannel(cls.source_name, item['channelid'], item['channelname'], None, True if 'type' not in item or item['type'] == 'TV' else False)
c.current = item['title']
ret.append(c)
return ret
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
return ret
@classmethod
def get_url(cls, source_id, quality, mode):
try:
from framework.common.ott import OTTSupport
url = OTTSupport.get_sbs_url(source_id)
if mode == 'web_play':
return 'return_after_read', url
return 'redirect', url
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
return
@classmethod
def get_return_data(cls, source_id, url, mode):
try:
data = requests.get(url).text
return cls.change_redirect_data(data)
except Exception as e:
logger.error('Exception:%s', e)
logger.error(traceback.format_exc())
return data
|
import numpy
from qiskit_shor import *
from qiskit import QuantumCircuit, QuantumRegister
from Qubit import *
from qiskit import IBMQ
from qiskit.aqua import QuantumInstance
from qiskit.aqua.algorithms import Shor
def main():
print('################################################################')
print('############ Noiseless Implementation Demonstration ############')
print('################################################################', '\n')
print('We begin with single Qubit objects:')
phi = Qubit()
print('phi =', phi.__repr__(), '\n', '\n', '\n', '\n')
print('We can apply transformations such as the Pauli matrices and the Hadamard Gate:')
px = phi.pauli('X')
py = phi.pauli('Y')
pz = phi.pauli('Z')
ph = phi.Hadamard()
print('X * phi = ', px.__repr__())
print('Y * phi = ', py.__repr__())
print('Z * phi = ', pz.__repr__())
print('H * phi = ', ph.__repr__(), '\n', '\n', '\n', '\n')
print('We now initialize two 3-qubit registers (x and y) to the zero state:')
x = Register(3)
y = Register(3)
print('x =', x.__repr__())
print('y =', y.__repr__(), '\n', '\n', '\n', '\n')
print('Create an equal superposition over all of the basis states with register x', '\n',
' and take the conjugate transpose of register y to make it a bra:', '\n')
x = x.walsh()
print('x =', x.__repr__())
y.bra()
print('y =', y.__repr__(), '\n', '\n', '\n')
print('We expect the purity of x and y to be 1/8 (1/2^n, where n=3) and 1, respectively...')
print('purity of x: ', x.purity)
print('purity of y:', y.purity, '\n', '\n', '\n')
print('Taking their inner product (bra/ket) should result in a scalar value.', '\n',
'Taking their outer product (ket/bra) should result in an 8x8 matrix operator, which we omit for brevity',
'\n')
p1 = y * x
p2 = x * y
print('<y|x> =', p1, '\n', '\n', '\n', '\n')
# print('|x><y| =', p2, '\n', '\n')
print('Measuring our register will return the index of the state measured.', '\n'
'e.g.) if x.measure() returns 4, we have effectively measured the state |100>')
m = x.measure()
my = y.measure()
print('x after measurement: ', m,
'--> State measured: ', str(dec_to_bin(m)), '\n')
print('We should observe the zero state with certainty when measuring y:')
print('y after measurement: ', my, '\n', '\n')
print('############################################################################')
print('############ Basic Functionality Tests Against Control (Qiskit) ############')
print("############################################################################", '\n')
qis_x = QuantumCircuit(2, 2)
qis_x.h(0)
qis_x.cx(0, 1)
qis_x.measure([0, 1], [0, 1])
print("Qiskit bell state construction:")
print(qis_x)
x = Register(name='phi_plus')
print("Our bell state construction:")
print(x.name, ': ', x.__repr__(), '\n', '\n')
print('#################################################################################')
print('############ Implementations of various forms of probabilistic noise ############')
print('#################################################################################', '\n')
print('Dephasing: Moving from surface of bloch sphere to the Z axis over time due to magnetic field.', '\n')
print('Bit Flip: Flip across Z plane. Probability of flipping from 1 to 0. Probability p of applying X.', '\n')
print('Depolarization: Moving toward origin of sphere. Equal probability p/3 of applying X, Y, or Z.', '\n')
print("Amplitude Damping: Moving toward north pole `|0>`. Also called Thermal relaxation. Probability of `|1> -> "
"|0>`", '\n', '\n')
print('###############################################################################################')
print('############ Test of Noiseless Implementation of Quantum Factoring Against Control ############')
print('###############################################################################################', '\n')
if __name__ == "__main__":
main()
|
from rv.readers.reader import Reader, ReaderFinished
from rv.readers.sunsynth import SunSynthReader
from rv.readers.sunvox import SunVoxReader
class InitialReader(Reader):
def process_SVOX(self, _):
self.object = SunVoxReader(self.f).object
def process_SSYN(self, _):
self.object = SunSynthReader(self.f).object
def process_end_of_file(self):
raise ReaderFinished()
|
# File: configure_checker.py
# Aim: Check the configuration
from tools import Configure
config = Configure()
settings = config.getall()
print(settings)
|
from datetime import datetime
import time
import os
from get_db import db
connection = db()
c = connection.cursor()
c.execute("SELECT * FROM sensor_data ORDER BY timestamp DESC LIMIT 10")
res = c.fetchall()
print("Latest Data:")
for r in res:
t = datetime.fromtimestamp(r[0])
print("Time: ", t, " TempC: ", round(r[1], 2), " Acti:", r[2])
|
"""An evaluation defines how we go from trials per subject and session to a
generalization statistic (AUC score, f-score, accuracy, etc) -- it can be
either within-recording-session accuracy, across-session within-subject
accuracy, across-subject accuracy, or other transfer learning settings."""
# flake8: noqa
from .evaluations import (
CrossSessionEvaluation,
CrossSubjectEvaluation,
WithinSessionEvaluation,
)
from .utils import create_save_path, save_model_cv, save_model_list
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_bcrypt import Bcrypt
from flask_bootstrap import Bootstrap
import psycopg2
db = SQLAlchemy()
bcrypt = Bcrypt()
login_manager = LoginManager()
# Import Blueprints Here
from src.user.routes import upgrade_blueprint
def create_app(config_filename=None):
app = Flask(__name__, template_folder='templates')
app.config['SECRET_KEY'] = 'sa,ple_secret_key'
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://mqegptxnpleilm:84cc6f5c616096e414e520b93216f874394d99c3ef9f48da45a8e1a097f4e77a@ec2-52-45-73-150.compute-1.amazonaws.com:5432/d8f8oqefs37jrp'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
bootstrap = Bootstrap(app)
# Initialize app
db.init_app(app)
print("initialized db")
app.register_blueprint(upgrade_blueprint)
return app
|
# -*- encoding: utf-8 -*-
#from flask_sqlalchemy import SQLAlchemy
#db = SQLAlchemy()
from sqlalchemy import Column, Integer, Text
from database import db
from database import Base
class Paste(Base):
__tablename__ = 'paste'
id = Column(Integer, primary_key = True)
#hash = db.Column(db.String(32), unique = True)
data = Column(Text())
def __init__(self):
return None
def __repr__(self):
return '<Paste %r>' % (self.id)
#return "<Paste :%s, %s>" % (self.id, self.data)
def get(self, id):
return Paste.query.filter_by(id=id).first()
def get_id(self):
return self.id
def get_data(self):
return self.data
def put(self, data):
self.data = data
db.add(self)
db.commit()
return self.id
|
#!/usr/bin/python3
import spaceking.net as net
import spaceking.server.net as sv_net
import spaceking.common as com
import spaceking.game as game
import spaceking.log as log
import spaceking.event as ev
import asyncio
import socket
class GameServer(net.NetObserverMixin):
"""Game server"""
def __init__(self):
super().__init__()
self._loop = asyncio.get_event_loop()
self._con = sv_net.ServerConnection(loop=self._loop)
self.register_handler(net.EVT_PACKET_RECV, self.handle_packet_recv)
self.register_handler(sv_net.EVT_SERVER_NEW_CLIENT,
self.handle_new_client)
self.register_handler(sv_net.EVT_SERVER_DISCONNECTED_CLIENT,
self.handle_client_disconnect)
self.register_handler(sv_net.EVT_SERVER_QUIT,
self.handle_connection_lost)
self._players = {}
self._entities = {}
self._gamestates = {}
self._level = "abyss"
@asyncio.coroutine
def handle_connection_lost(self, event):
# XXX: Wat do when connection dies. Reopen? Ignore event?
pass
@asyncio.coroutine
def handle_packet_recv(self, event):
# TODO: Add error handling for data stream reads. We can't assume data.
log.debug("packets: {0}".format(event.pkt))
success = True
pkt_type, data = net.pkt_split(event.pkt)
if pkt_type == game.PKT_CLIENT_MOVED:
direction, data = com.read_ubyte(data)
success = self._players[event.uid].move(direction) and success
elif pkt_Type == game.PKT_CLIENT_CINIT:
player = self._players[event.uid]
name_len, data = com.read_ubyte(data)
player.name, data = com.read_nubytes(data, name_len)
else:
log.debug("No packet handler for type={0}".format(pkt_type))
@asyncio.coroutine
def handle_new_client(self, event):
uid = event.uid
log.info("New client connected. uid={0}".format(uid))
player = game.Player(uid)
self._players[uid] = player
self._entities[uid] = player
self._gamestates[uid] = []
yield from self._send_packet(game.PKT_SERVER_CINIT,
com.int_to_ubyte(uid))
yield from self._send_update(uid, game.PKT_WORLD_INIT)
@asyncio.coroutine
def handle_client_disconnect(self, event):
log.info("Client disconnected. uid={0}".format(event.uid))
del self._players[event.uid]
del self._entities[event.uid]
def _create_gamestate(self, uid):
gamestate = game.GameState(self._entities)
self._gamestates[uid].append(gamestate)
return gamestate
@asyncio.coroutine
def _send_update(self, uid, sv_command):
gamestate = self._create_gamestate(uid)
state_data = gamestate.marshall()
yield from self._send_packet(sv_command, state_data, uid=uid)
TICK_RATE = 1
@asyncio.coroutine
def _process(self):
while True:
for uid, player in self._players.items():
yield from self._send_update(uid,game.PKT_GAME_STATE)
yield from asyncio.sleep(GameServer.TICK_RATE, loop=self._loop)
def run(self):
self._con.start_server()
self._con.register_observer(self)
self.register_observer(self._con)
self._process_task = asyncio.Task(self._process(), loop=self._loop)
self._loop.run_forever()
self._quit()
def _quit(self):
log.info("Shutting down game server.")
if self._loop:
self._loop.close()
self._loop = None
else:
log.debug("Calling quit on a dead server.")
def main():
log_ctx = log.LoggingContext(log.server_log_targets, role=log.ROLE_CLIENT)
log.init_log(log_ctx)
sv = GameServer()
sv.run()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import io
import time
import homie
import logging
import threading
import json
import base64
from PIL import Image
import requests
import websocket
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("homie").setLevel(logging.WARNING)
from modules.homiedevice import HomieDevice
from modules.mysql import db
class Daapd(HomieDevice):
_first = True
_playing = False
_progress = 0
_last_time = ''
def on_message(self, message):
js = json.loads(message)
notify = {
"player": self.get_player,
"queue": self.get_queue,
"volume": self.get_volume,
"outputs": self.get_outputs,
}
if "notify" in js:
for ty in js["notify"]:
if ty in notify:
notify[ty]()
def on_open(self):
self._ws.send(json.dumps({
"notify": [
"player",
"outputs",
"volume",
"queue",
]
}))
def on_error(self, error):
logger.warning("daapd: {error}".format(error=str(error)))
def on_close(self):
logger.warning("daapd: websocket closed")
def setup(self):
self._options = {}
config = self._db.pq("""SELECT name, value FROM options WHERE name IN ('daapd_url', 'daapd_port')""")
if len(config):
for row in config:
self._options[row['name']] = row['value']
self._clock = self._homie.Node("clock", "time")
self._clock.advertise("time")
self._library = self._homie.Node("library", "daapd")
self._library.advertise("playlists")
self._meta = self._homie.Node("meta", "daapd")
self._meta.advertise("artist")
self._meta.advertise("aartist")
self._meta.advertise("album")
self._meta.advertise("track")
self._meta.advertise("progress")
self._meta.advertise("length")
self._cover = self._homie.Node("cover", "daapd")
self._cover.advertise("image")
self._cover.advertise("thumb")
self._cover.advertise("mime")
self._status = self._homie.Node("status", "daapd")
self._status.advertise("playing").settable(self.playing_handler)
self._status.advertise("next").settable(self.next_handler)
self._status.advertise("previous").settable(self.previous_handler)
self._queue = self._homie.Node("queue", "daapd")
self._queue.advertise("queue")
self._queue.advertise("add").settable(self.queue_add_handler)
self._queue.advertise("play").settable(self.queue_play_handler)
self._volume = self._homie.Node("volume", "daapd")
self._volume.advertise("volume").settable(self.volume_handler)
self._outputs = self._homie.Node("outputs", "daapd")
self._outputs.advertise("outputs")
self._outputs.advertise("enable").settable(self.out_enable_handler)
self._outputs.advertise("volume").settable(self.out_volume_handler)
conf = self.client('/api/config')
if conf.status_code != 200:
raise Exception("Couldnt get config")
self._config = conf.json()
path = "ws://"+self._options["daapd_url"]+":"+str(self._config["websocket_port"])
self._ws = websocket.WebSocketApp(path,
on_message=self.on_message, on_open=self.on_open, on_error=self.on_error, on_close=self.on_close, subprotocols=["notify"])
self._ws_thread = threading.Thread(target=self._ws.run_forever)
self._ws_thread.daemon = True
self._ws_thread.start()
self._tick_running = True
self._tick_thread = threading.Thread(target=self._tick)
self._tick_thread.daemon = True
self._tick_thread.start()
self._clock_running = True
self._clock_thread = threading.Thread(target=self._clock_fn)
self._clock_thread.daemon = True
self._clock_thread.start()
def loopHandler(self):
if self._first and self._homie.mqtt_connected:
self.get_playlists()
self.get_player()
self.get_queue()
self.get_outputs()
self._first = False
time.sleep(0.1)
def _clock_fn(self):
while self._clock_running:
tm = time.strftime("%a %H:%M")
if tm != self._last_time:
self._clock.setProperty("time").send(tm)
self._last_time = tm
time.sleep(1)
def shutdown(self):
logger.info("Waiting for threads to end")
self._ws.close()
self._ws_thread.join()
self._tick_running = False
self._tick_thread.join()
def client(self, path, method='get', **kwargs):
return getattr(requests, method)("http://"+self._options["daapd_url"]+":"+str(self._options["daapd_port"])+path, **kwargs)
def get_playlists(self):
resp = self.client("/api/library/playlists")
if resp.status_code != 200:
return
playlists = resp.json()["items"]
pls = { "playlists": [] }
for p in playlists:
pls["playlists"].append({
"name": p["name"],
"uri": p["uri"],
})
self._library.setProperty("playlists").send(json.dumps(pls))
def get_queue(self):
resp = self.client("/api/queue")
if resp.status_code != 200:
return
queue = resp.json()
if len(queue["items"]):
items = []
for item in queue["items"]:
items.append({
"id": item["id"],
"uri": item["uri"],
"artist": item["artist"],
"album": item["album"],
"title": item["title"],
})
self._queue.setProperty("queue").send(json.dumps({ "items": items }))
def get_player(self):
resp = self.client("/api/player")
if resp.status_code != 200:
return
player = resp.json()
self._playing = player["state"] == 'play'
self._status.setProperty('playing').send(1 if self._playing else 0)
self._volume.setProperty("volume").send(player['volume'])
self._meta.setProperty("length").send(player["item_length_ms"]/1000)
self._meta.setProperty("progress").send(player["item_progress_ms"]/1000)
self._progress = player["item_progress_ms"]/1000
if "artwork_url" in player:
artwork = self.client(player["artwork_url"])
if artwork.status_code == 200:
self._cover.setProperty("mime").send(artwork.headers['Content-Type'])
self._cover.setProperty("image").send(base64.b64encode(artwork.content))
self._cover.setProperty("thumb").send(self._generate_thumb(artwork.content))
resp = self.client("/api/queue")
if resp.status_code != 200:
return
queue = resp.json()
for item in queue["items"]:
if item["id"] == player["item_id"]:
self._meta.setProperty("artist").send(item["artist"].encode('ascii','ignore'))
self._meta.setProperty("aartist").send(item["album_artist"].encode('ascii','ignore'))
self._meta.setProperty("album").send(item["album"].encode('ascii','ignore'))
self._meta.setProperty("track").send(item["title"].encode('ascii','ignore'))
break
def _generate_thumb(self, binary, quality=60):
if not binary:
return
byt = io.BytesIO(binary)
byt.seek(0)
img = Image.open(byt)
img.thumbnail([200,200], Image.ANTIALIAS)
byte_array = io.BytesIO()
img.save(byte_array, format='JPEG', subsampling=0, quality=quality)
b64 = base64.b64encode(byte_array.getvalue())
if len(b64) > 30000:
for i in range(5):
quality = quality * 0.5
byte_array = io.BytesIO()
img.save(byte_array, format='JPEG', subsampling=0, quality=int(quality))
b64 = base64.b64encode(byte_array.getvalue())
if (len(b64) < 30000):
break
return b64
def get_volume(self):
self.get_player()
self.get_outputs()
def get_outputs(self):
resp = self.client("/api/outputs")
if resp.status_code != 200:
return
outputs = resp.json()
out = []
for o in outputs["outputs"]:
out.append({
"id": o["id"],
"name": o["name"],
"selected": o["selected"],
"volume": o["volume"],
})
self._outputs.setProperty("outputs").send(json.dumps({ "outputs": out }))
def playing_handler(self, mqttc, obj, msg):
payload = msg.payload.decode("UTF-8")
if payload == "1":
self.client('/api/player/play', method="put")
else:
self.client('/api/player/stop', method="put")
def next_handler(self, mqttc, obj, msg):
self.client('/api/player/next', method="put")
def previous_handler(self, mqttc, obj, msg):
self.client('/api/player/previous', method="put")
def queue_play_handler(self, mqttc, obj, msg):
payload = msg.payload.decode("UTF-8")
resp = self.client('/api/player/play', method="put", params={
"item_id": payload
})
def queue_add_handler(self, mqttc, obj, msg):
payload = msg.payload.decode("UTF-8")
resp = self.client('/api/queue/items/add',
method='post',
params={
"clear": "true",
"uris": payload,
}
)
self.client('/api/player/play', method="put")
def volume_handler(self, mqttc, obj, msg):
payload = msg.payload.decode("UTF-8")
self.client('/api/player/volume', method="put", params={
"volume": payload
})
def out_volume_handler(self, mqttc, obj, msg):
payload = msg.payload.decode("UTF-8")
try:
oid, volume = payload.split(',')
self.client('/api/outputs/{id}'.format(id=oid), method='put', json={
"volume": int(volume)
})
except:
logger.warning('out_volume_handler: malformed payload {pl}'.format(pl=payload))
def out_enable_handler(self, mqttc, obj, msg):
payload = msg.payload.decode("UTF-8")
try:
oid, enabled = payload.split(',')
self.client('/api/outputs/{id}'.format(id=oid), method='put', json={
'selected': enabled == '1'
})
except:
logger.warning('out_enable_handler: malformed payload {pl}'.format(pl=payload))
def _tick(self):
logger.info('Starting tick thread')
while self._tick_running:
if self._playing and self._homie.mqtt_connected:
self._progress += 1
self._meta.setProperty("progress").send(self._progress)
time.sleep(1)
def main():
d = db()
config = homie.loadConfigFile("configs/daapd.json")
Homie = homie.Homie(config)
daapd = Daapd(d, Homie)
Homie.setFirmware("daapd", "1.0.0")
Homie.setup()
try:
while True:
daapd.loopHandler()
finally:
daapd.shutdown()
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt, SystemExit):
logger.info("Quitting.")
|
from sublime_lib._util.named_value import NamedValue
from unittest import TestCase
class TestNamedValue(TestCase):
def test_named_value(self):
s = "Hello, World!"
self.assertEqual(
repr(NamedValue(s)),
s
)
|
import numpy as np
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn import datasets
from sklearn.metrics import mean_squared_error, explained_variance_score
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
# 1.9 估算房屋价格
# 1.9.2.02 加载房屋价格数据库
housing_data = datasets.load_boston()
# 1.9.2.03 通过suffle函数把数据的顺序打乱
X, y = shuffle(housing_data.data, housing_data.target, random_state=7)
# 1.9.2.04 把数据分为训练数据集和测试数据集
num_training = int(0.8 * len(X))
X_train, y_train = X[:num_training], y[:num_training]
X_test, y_test = X[num_training:], y[num_training:]
# 1.9.2.05 选一个最大深度为4的决策树,限定决策树不变成任意深度
dt_regressor = DecisionTreeRegressor(max_depth=4)
dt_regressor.fit(X_train, y_train)
# 1.9.2.06 用带AdaBoost算法的决策树回归模型进行拟合
ab_regressor = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4), n_estimators=400, random_state=7)
ab_regressor.fit(X_train, y_train)
# 1.9.2.07 评价决策树回归器的训练效果
y_pred_dt = dt_regressor.predict(X_test)
mse = mean_squared_error(y_test, y_pred_dt)
evs = explained_variance_score(y_test, y_pred_dt)
print("\n#### Decision Tree performance ####")
print("Mean squared error = ", round(mse, 2))
print("Explained variance score = ", round(evs, 2))
# 1.9.2.08 评价AdaBoost算法改善的效果
y_pred_ab = ab_regressor.predict(X_test)
mse = mean_squared_error(y_test, y_pred_ab)
evs = explained_variance_score(y_test, y_pred_ab)
print("\n#### AdaBoost performance ####")
print("Mean squared error = ", round(mse, 2))
print("Explained variance score = ", round(evs, 2))
# 1.10 计算特征的相对重要性
# 1.10.02 定义画图函数
def plot_feature_importances(feature_importances, title, feature_names):
# 将重要性值标准化
feature_importances = 100.0 * (feature_importances / max(feature_importances))
# 将得分从高到低排序
index_sorted = np.flipud(np.argsort(feature_importances))
# 让X坐标轴上的标签居中显示
pos = np.arange(index_sorted.shape[0]) + 0.5
# 画条形图
plt.figure()
plt.bar(pos, feature_importances[index_sorted], align='center')
plt.xticks(pos, feature_names[index_sorted])
plt.ylabel('Relative Importance')
plt.title(title)
plt.show()
# 1.10.01 画出特征的相对重要性
plot_feature_importances(dt_regressor.feature_importances_, 'Decision Tree regressor', housing_data.feature_names)
plot_feature_importances(ab_regressor.feature_importances_, 'AdaBoost regressor', housing_data.feature_names)
|
#!/usr/bin/env python
import socket
import time
import sys
import subprocess
from multiprocessing import Process
global clientName
clientName = ""
""" http://docs.python.org/2.7/library/socket.html """
""" returns socket s that is connected to port at ip_connect """
def connect_to(ip_connect, port):
for res in socket.getaddrinfo(ip_connect, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error as msg:
s = None
continue
try:
s.connect(sa)
except socket.error as msg:
s.close()
s = None
continue
break
if s is None:
print 'could not open connection'
return None
return s
def main():
port = 9999
""" set up parameters needed """
hostname = raw_input("Enter a username : ")
"""ip_connect = "192.168.0.106"""
ip_connect = raw_input("Enter IP to connect to: ")
"""print "Connecting to " + ip_connect"""
""" make the connection on unused port"""
s = connect_to(ip_connect, port)
while True:
if s is None:
print 'port', port, 'busy, trying', port - 1
port =- 1
s = connect_to(ip_connect, port)
else:
break
s.sendall(hostname)
clientName = s.recv(1024)
subprocess.call(["clear"])
print 'Connected to', clientName
print 'To exit chat, type', repr('.logout')
toSend = None
while 1:
toSend = raw_input('>')
if toSend == ".logout": break
s.sendall(toSend)
print clientName, ":", s.recv(1024)
s.close()
print 'connection with ' + clientName + ' closed'
if __name__ == "__main__":
main()
|
import os
from day_9 import main
current_dir = os.path.dirname(os.path.abspath(__file__))
test_input_file = os.path.join(current_dir, 'test_input.txt')
def test_invalid_xmas():
test_preable_list, test_number_list = main.convert_input(test_input_file, 5)
invalid_number = main.search_invalid_xmas(test_preable_list, test_number_list)
assert invalid_number == 127
def test_break_xmas():
test_preable_list, test_number_list = main.convert_input(test_input_file, 5)
input_list = test_preable_list + test_number_list
assert main.break_xmas(input_list, 127) == (15, 25, 47, 40)
|
from datetime import datetime
from pytz import timezone
from flask import current_app
from sqlalchemy.orm.exc import NoResultFound, FlushError
from sqlalchemy.exc import IntegrityError
from sqlalchemy import text
import phonenumbers
from phonenumbers import PhoneNumberFormat
from twilio.rest import Client
from .models import IOU, Person, person_to_person
from .database import db
TIMEZONE = timezone('America/Chicago')
class MessageError(Exception):
def __init__(self, message, from_number):
self.message = message
self.from_number = from_number
def __str__(self):
return self.message
class PermissionError(MessageError):
pass
class IOUHandler(object):
def __init__(self, message, from_number):
self.message = message.strip()
self.from_number = from_number
def handle(self):
if self.message.lower().startswith('how much'):
return self.inquiry()
elif 'owe' in self.message.lower():
return self.addIOU()
elif self.message.lower().startswith('add'):
return self.addPerson()
def addPerson(self):
'''
Example: "Add Eric 3125555555"
'''
if self.fromAdmin():
try:
_, name, number = self.message.split(' ', 2)
except ValueError:
raise MessageError('"Add person" message should '
'look like: "Add <name> <phone number>"',
self.from_number)
phone_number = self.validatePhoneNumber(number)
person = Person(name=name,
phone_number=phone_number)
try:
db.session.add(person)
db.session.commit()
except IntegrityError:
db.session.rollback()
raise MessageError('A person with the phone '\
'number {1} already exists'.format(name, phone_number),
self.from_number)
friend = person_to_person.insert().values(from_phone=self.from_number,
to_phone=phone_number,
alias=name.lower())
try:
db.session.execute(friend)
db.session.commit()
except IntegrityError:
db.session.rollback()
real_friend = db.session.query(person_to_person)\
.filter(person_to_person.c.alias == name.lower())\
.filter(person_to_person.c.from_phone == self.from_number)\
.one()
raise MessageError('You already have a friend named {0} '
'with the number {1}'.format(name, real_friend.to_phone),
self.from_number)
return '"{name}" with phone number {number} successfully added'.format(name=name,
number=phone_number)
else:
raise PermissionError("Sorry, you can't do that", self.from_number)
def addIOU(self):
'''
Example: "Eric owes Kristi $100"
"I owe Kristi $75"
"Kristi owes me $50"
'''
ower_name, owee_name, amount, reason = self.parseMessage()
try:
amount = amount.replace('$', '')
amount = float(amount)
except ValueError:
raise MessageError('Amount "{}" should be a number'.format(amount),
self.from_number)
sender, receiver, sent_from_ower = self.findRelationship(ower_name, owee_name)
if self.from_number not in [sender.phone_number, receiver.phone_number]:
raise MessageError("Sorry, you can't record IOUs"
"that you are not part of", self.from_number)
date_added = TIMEZONE.localize(datetime.now())
if sent_from_ower:
iou = IOU(ower=sender,
owee=receiver,
date_added=date_added,
amount=float(amount),
reason=reason)
else:
iou = IOU(ower=receiver,
owee=sender,
date_added=date_added,
amount=float(amount),
reason=reason)
db.session.add(iou)
db.session.commit()
return self.balance(iou.ower, iou.owee)
def parseMessage(self):
try:
if ' for ' in self.message.lower():
people, reason = self.message.lower().split(' for ')
ower_name, owee = people.replace('owes', 'owe').split('owe')
else:
ower_name, owee = self.message.lower().replace('owes', 'owe').split('owe')
reason = 'General'
owee = owee.strip()
ower_name = ower_name.strip()
owee_name, amount = owee.rsplit(' ', 1)
except ValueError:
raise MessageError('IOU message should look like: '
'"<name> owes <name> <amount> for <reason>"',
self.from_number)
return ower_name, owee_name, amount, reason
def inquiry(self):
"""
Example: "How much does Eric owe Kristi?"
"How much do I owe Kristi?"
"How much does Kristi owe me?"
"""
try:
_, ower_name, _, owee_name = self.message.lower().rsplit(' ', 3)
except ValueError:
raise MessageError('Balance inquiry should look like '
'"How much does <person 1 name> owe '
'<person 2 name>?"', self.from_number)
sender, receiver, sent_from_ower = self.findRelationship(ower_name,
owee_name.replace('?', ''))
if sent_from_ower:
return self.balance(sender, receiver)
else:
return self.balance(receiver, sender)
def balance(self, ower, owee):
owes = IOU.query.filter(IOU.ower == ower)\
.filter(IOU.owee == owee).all()
ower_total = sum(o.amount for o in owes)
owed = IOU.query.filter(IOU.ower == owee)\
.filter(IOU.owee == ower).all()
owee_total = sum(o.amount for o in owed)
balance = int(ower_total - owee_total)
fmt_args = {
'ower': ower.name.title(),
'owee': owee.name.title(),
'balance': balance,
}
if balance == 0:
message = '{ower} and {owee} are now even'.format(**fmt_args)
elif balance > 0:
message = '{ower} now owes {owee} ${balance}'.format(**fmt_args)
elif balance < 0:
fmt_args['balance'] = abs(fmt_args['balance'])
message = '{owee} now owes {ower} ${balance}'.format(**fmt_args)
return message
def findRelationship(self, ower_name, owee_name):
sender = Person.query.get(self.from_number)
sent_from_ower = False
if ower_name == owee_name:
alias = ower_name
elif ower_name in ['i', sender.name]:
alias = owee_name
sent_from_ower = True
elif owee_name in ['me', sender.name]:
alias = ower_name
try:
relationship = db.session.query(person_to_person)\
.filter(person_to_person.c.from_phone == self.from_number)\
.filter(person_to_person.c.alias == alias.lower())\
.one()
except NoResultFound:
raise MessageError('"{0}" not found. '
'You can add this person '
'by texting back "Add {0} '
'<their phone number>'.format(alias),
self.from_number)
receiver = Person.query.get(relationship.to_phone)
return sender, receiver, sent_from_ower
def validatePhoneNumber(self, phone_number):
parsed = phonenumbers.parse(phone_number, 'US')
if not phonenumbers.is_valid_number(parsed):
raise MessageError('"{}" is not a valid '
'phone number'.format(phone_number),
self.from_number)
return phonenumbers.format_number(parsed, PhoneNumberFormat.E164)
def fromAdmin(self):
admin = db.session.query(Person)\
.filter(Person.phone_number == self.from_number)\
.first()
if admin:
return admin.admin
def sendTwilioResponse(message, to_number):
account_id = current_app.config['TWILIO_ACCOUNT_ID']
auth_token = current_app.config['TWILIO_AUTH_TOKEN']
from_number = current_app.config['TWILIO_NUMBER']
client = Client(account_id, auth_token)
message = client.messages.create(to=to_number,
from_=from_number,
body=message)
|
#-*- coding:utf8 -*-
import sys
import time
import datetime
import json
import urllib
import pycurl
from django.core.management import setup_environ
import settings
setup_environ(settings)
from celery import group
from shopapp.tmcnotify.models import TmcMessage,TmcUser,DEFAULT_GROUP_NAME
from shopapp.tmcnotify.tasks import ProcessMessageTask
from auth import apis
import logging
logger = logging.getLogger('notifyserver.handler')
class EmptyMessage(Exception):
#for memo empty exception
def __init__(self,msg=''):
self.msg = msg
def __str__(self):
return self.msg
class NotifyCommand():
c = None
group_name = None
user = None
messageProcessor = ProcessMessageTask()
def __init__(self,group_name=DEFAULT_GROUP_NAME):
self.group_name = group_name
self.user = self.getPrimaryUser(group_name)
def handle_daemon(self, *args, **options):
if not self.user:
return
while 1:
try:
self.consume_message()
except EmptyMessage:
#没有可用消息是休眠30秒
time.sleep(15)
except Exception,exc:
logger.error(u'淘宝消息服务错误:%s'%exc.message,exc_info=True)
#休眠5分钟
time.sleep(60*5)
def getPrimaryUser(self,group_name):
users = TmcUser.valid_users.filter(group_name=group_name)
if users.count() == 0:
return None
try:
return users.get(is_primary=True)
except:
return users[0]
def getTotalResults(self,response):
return response['tmc_messages_consume_response'].get('total_results')
def getMessageFromResp(self,response):
if not response['tmc_messages_consume_response'].get('messages'):
raise EmptyMessage(u'暂没有消息可消费')
return response['tmc_messages_consume_response']['messages']['tmc_message']
def consume_message(self):
response = apis.taobao_tmc_messages_consume(
group_name=self.group_name,
quantity=self.user.quantity,
tb_user_id=self.user.user_id)
messages = self.getMessageFromResp(response)
self.handle_message(messages)
def handle_message(self,messages):
if settings.DEBUG:
for m in messages:
print 'debug message:',m
self.messageProcessor(m)
else:
group([self.messageProcessor.s(m) for m in messages]).apply_async()
if __name__ == '__main__':
if len(sys.argv) != 2:
print >> sys.stderr, "usage: python *.py <group_name>"
c = NotifyCommand(group_name=sys.argv[1])
c.handle_daemon()
#c.handle_message(ms)
|
"""ST-Link Automation Example"""
import stlink
if __name__ == '__main__':
print(f'List of ST-Link: {stlink.findall()}')
print('Flashing:...', end='')
status, checksum = stlink.flash('G:\\test.hex')
print(status)
|
from django.apps import AppConfig
class GameConnectionsConfig(AppConfig):
name = 'game_connections'
|
import pyaudio
import wave
def record_for_time(audio_format,audio_channel,bitrate,chunk_size,record_time):
pa = pyaudio.PyAudio()
stream = pa.open(format=audio_format, channels=audio_channel,rate=bitrate,input=True,frames_per_buffer=chunk_size)
audioframes = []
for i in range(0, int(bitrate / chunk_size * record_time)):
inputdata = stream.read(chunk_size)
audioframes.append(inputdata)
stream.stop_stream()
stream.close()
pa.terminate()
return audioframes
def write_to_file(audioframes,audio_channel,bitrate,audio_format,file_name):
waveFile = wave.open(file_name, 'wb')
waveFile.setnchannels(audio_channel)
waveFile.setsampwidth(audio.get_sample_size(audio_format))
waveFile.setframerate(bitrate)
waveFile.writeframes(b''.join(audioframes))
waveFile.close()
|
# -*- coding: utf-8 -*-
import abc
"""
Clase abstracta para piezas, las piezas
(Torre, caballo, etc) herederán esta clase
y deberán impletementar los métodos
"""
class Pieza():
__metaclass_ = abc.ABCMeta #no sé que es
"""Constructor para Pieza"""
def __init__(self, _current, color):
self._current = _current
self.color = color
"""Regresa una lista de tuplas con los movimientos
posibles de la pieza"""
@abc.abstractmethod
def get_movimientos(self, tablero):
#Algo
return
"""regresa una tupla con su posicion actual"""
def get_posicion(self):
return self._current
"""Actualiza la posicion de una pieza
Recibe una tupla con su nueva posicion
"""
def set_posicion(self, act):
self._current = act
"""Regresa el color de la pieza"""
def get_color(self):
return self.color
"""Comprueba si dado parametros n se encuentran en el
rango valido del tamaño de la matriz"""
def enRango(self, n):
return n <= 7 and n >= 0
"""Regresa el nombre de la clase"""
def getClass(self):
return self.__class__.__name__
|
# __author__ = 'cjweffort'
# -*- coding: utf-8 -*-
from numpy import *
from numpy.linalg import *
"""
5 Linear Algebra
"""
"""
5.1 Simple Array Operations
"""
a = array([[1.0, 2.0], [3.0, 4.0]])
a.transpose()
u = eye(2)
print u
j = array([[0.0, -1.0], [1.0, 0.0]])
dot(j, j)
print trace(u) #矩阵的迹
y = array([[5.], [7.]])
print solve(a, y) #求解方程组
print eig(j) #求解矩阵j的特征值和特征向量
"""
5.2 The Matrix Class
"""
A = matrix('1.0 2.0; 3.0 4.0')
print type(A)
print A.T
X = matrix('5.0 7.0')
Y = X.T
print A * Y #矩阵乘法
print A.I #求解矩阵的逆
print solve(A, Y) #求解方程组的解
"""
5.3 Indexing: Comparing Matrices and 2D Arrays
"""
#(1)
A = arange(12)
A.shape = (3, 4)
M = mat(A.copy())
print type(A), " ", type(M)
print A[:, 1]
print A[:, 1].shape
print M[:, 1]
print M[:, 1].shape
A[1:,].take([1, 3], axis = 1)#等价于print A[1:, [1,3]]
#(2)
print A[0,:] > 1
print A[:, A[0,:] > 1]
print M[0, :] > 1
print M[:, M[0,:] > 1]#选取出来的结果和用2D Arrays选取结果不一样
print M[:, M.A[0,:] > 1]
|
from orun.utils.deprecation import MiddlewareMixin
from .shortcuts import get_current_site
class CurrentSiteMiddleware(MiddlewareMixin):
"""
Middleware that sets `site` attribute to request object.
"""
def process_request(self, request):
request.site = get_current_site(request)
|
import inspect, os, sys
import shutil
import subprocess
import random, string
import re
from shutil import copyfile
import base64
import json
NODE_BIN = '../node/node'
WALLET_BIN = '../wallet/wallet'
VERBOSE = False
def getCurrentDir():
return os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
def CleanTestFolders():
# Delete all subfolders looking like prev tests
curdir = getCurrentDir()
dirs = os.listdir( curdir )
for folder in dirs:
if os.path.isdir(curdir + '/' + folder) and folder.startswith("test"):
shutil.rmtree(curdir + '/' + folder)
return True
def RemoveTestFolder(path):
curdir = getCurrentDir()
shutil.rmtree(path)
def CreateTestFolder(suffix = ""):
curdir = getCurrentDir()
newfolder = 'test'+ suffix + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))
newfolder = curdir + '/' + newfolder
os.makedirs(newfolder)
return newfolder
def Execute(command, verbose = False):
if verbose:
commandtext = ' '.join(command)
print commandtext
sys.Exit(0)
res = subprocess.check_output(command)
if verbose:
print res
return res
def ExecuteHang(command, folder, verbose = False):
if verbose:
commandtext = ' '.join(command)
print commandtext
sys.exit(0)
commanddata = base64.b64encode(json.dumps(command))
folderdata = base64.b64encode(folder)
res = subprocess.check_output(["python","_nodeinteractive.py", commanddata, folderdata])
if verbose:
print res
return res
def ExecuteNode(args,verbose=False):
command = [NODE_BIN] + args
return Execute(command,verbose)
def ExecuteWallet(args,verbose=False):
command = [WALLET_BIN] + args
return Execute(command,verbose)
def ExecuteHangNode(args, folder, verbose=False):
command = [NODE_BIN] + args
return ExecuteHang(command, folder, verbose)
def ExecuteWallet(args,verbose=False):
command = [WALLET_BIN] + args
return Execute(command,verbose)
def StartTestGroup(title):
print "==================="+title+"======================"
def StartTest(title):
print "\t----------------"+title
def EndTestSuccess():
print "\tPASS"
def EndTestGroupSuccess():
print "PASS ==="
def SaveConfigFile(datadir, contents):
text_file = open(datadir+"/config.json", "w")
text_file.write(contents)
text_file.close()
def Exit():
raise NameError('Test failed')
def CopyTestData(todir,testset):
srcdir = getCurrentDir()+"/datafortests/"+testset+"/"
copyfile(srcdir+"blockchain.t", todir + "/blockchain.db")
copyfile(srcdir+"wallet.t", todir + "/wallet.dat")
if os.path.isfile(srcdir+"config.t"):
copyfile(srcdir+"config.t", todir + "/config.json")
if os.path.isfile(srcdir+"nodeslist.t"):
copyfile(srcdir+"nodeslist.t", todir + "/nodeslist.db")
#=============================================================================================================
# Assert functions
def Fatal(comment):
print "\t\tFAIL: "+comment
Exit()
def AssertStr(s1,s2,comment):
if s1 != s2:
print "\t\tFAIL: "+comment
print s1
return False
return True
def FatalAssertStr(s1,s2,comment):
if not AssertStr(s1,s2,comment):
Exit()
def AssertSubstr(s1,s2,comment):
if s2 not in s1:
print "\t\tFAIL: "+comment
print s1
return False
return True
def FatalAssertSubstr(s1,s2,comment):
if not AssertSubstr(s1,s2,comment):
Exit()
def FatalAssertFloat(f1,f2,comment):
if float(f1) != float(f2):
print "\t\tFAIL: "+comment
print "Expected: "+str(f1)+" got: "+str(f2)
Exit()
def Assert(cond,comment):
if not cond:
print "\t\tFAIL: "+comment
return False
return True
def FatalAssert(cond,comment):
if not Assert(cond,comment):
Exit()
def FatalAssertPIDRunning(pid,comment):
""" Check For the existence of a unix pid. """
try:
os.kill(pid, 0)
except OSError:
print "\t\tFAIL: "+comment
Exit()
def FatalAssertPIDNotRunning(pid, comment):
""" Check For the existence of a unix pid. """
try:
os.kill(pid, 0)
except OSError:
return True
else:
print "\t\tFAIL: "+comment
Exit()
def FatalRegex(expr,text,comment):
if not re.search(expr,text):
print "\t\tFAIL: "+comment
Exit()
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from utilities.handy_wrappers import HandyWrappers
import time
class UsingWrappers():
def test(self):
baseURL = "https://letskodeit.teachable.com/pages/practice"
driver = webdriver.Firefox(executable_path=r"C:\Users\Federico Barderi\workspace_python\drivers\geckodriver.exe")
driver.maximize_window()
driver.implicitly_wait(10)
hw = HandyWrappers(driver)
driver.get(baseURL)
textField = hw.getElement("name")
textField.send_keys("Test")
time.sleep(2)
textField2 = hw.getElement("//input[@id='name']", locatorType= "xpath")
textField2.clear()
ff = UsingWrappers()
ff.test() |
import pandas as pd
import crd_rbd
# example 1
# example of data in list
data_ = [
[20, 16, 26, 26, 34, 28, 20, 18],
[26, 22, 26, 24, 38, 30, 24, 29],
[32, 28, 32, 24, 36, 48, 36, 24],
[48, 42, 34, 36, 42, 54, 50, 45],
]
# give the data unique rows and column labeling and convert to dataframe
data_ = pd.DataFrame(data_, columns=['0', '1', '2', '3',
'4', '5', '6', '7'],
index=['a', 'b', 'c', 'd'])
# example 2
# comment the line bellow for now
data_ = pd.read_csv('path to .csv file or .txt file') # make sure your data is well labeled
# now feed the data into the program
m = crd_rbd.CrdRbd(data_)
m.crd() # for a crd
m.rbd() # for rbd
|
import matplotlib.pyplot as plt
import seaborn as sns
tips = sns.load_dataset('tips')
print(tips.head())
sns.distplot(tips['total_bill'])
plt.show()
sns.distplot(tips['total_bill'],kde=False,bins=30)
plt.show()
sns.jointplot(x='total_bill',y='tip',data=tips,kind='scatter')
plt.show()
sns.jointplot(x='total_bill',y='tip',data=tips,kind='hex')
plt.show()
sns.jointplot(x='total_bill',y='tip',data=tips,kind='reg')
plt.show()
sns.jointplot(x='total_bill',y='tip',data=tips,kind='kde')
plt.show()
sns.pairplot(tips)
plt.show()
sns.pairplot(tips, hue='sex')
plt.show()
sns.pairplot(tips,hue='sex',palette='coolwarm')
plt.show()
sns.boxplot(x="day", y="total_bill", data=tips,palette='rainbow')
plt.show()
sns.boxplot(x="day", y="total_bill", hue="smoker",data=tips, palette="coolwarm")
plt.show()
sns.violinplot(x="day", y="total_bill", data=tips,hue='sex',palette='Set1')
plt.show()
g = sns.FacetGrid(tips, col="time", row="smoker")
g = g.map(plt.scatter, "total_bill", "tip")
plt.show()
g = sns.JointGrid(x="total_bill", y="tip", data=tips)
g = g.plot(sns.regplot, sns.distplot)
plt.show()
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from django.forms import ModelForm
from django import forms
from coddy.models import *
class DonateForm(ModelForm):
class Meta:
model = Donate
fields = ['name', 'surname', 'email', 'tel']
labels = {
'name' : 'Имя',
'surname' : 'Фамилия',
'email' : 'E-mail',
'tel' : 'Телефон',
}
widgets = {
'name': forms.TextInput(attrs={'class': 'field', 'placeholder': 'Иван'}),
'surname': forms.TextInput(attrs={'class': 'field', 'placeholder': 'Иванович'}),
'email': forms.EmailInput(attrs={'class': 'field', 'placeholder': 'example@mail.ru'}),
'tel': forms.TextInput(attrs={'class': 'field', 'placeholder': '+7 900 123 45 67', 'type': 'tel'}),
}
class VolunteerForm(ModelForm):
class Meta:
model = Volunteer
fields = ['name', 'surname', 'email', 'tel']
labels = {
'name' : 'Имя',
'surname' : 'Фамилия',
'email' : 'E-mail',
'tel' : 'Телефон',
}
widgets = {
'name': forms.TextInput(attrs={'class': 'field', 'placeholder': 'Иван'}),
'surname': forms.TextInput(attrs={'class': 'field', 'placeholder': 'Иванович'}),
'email': forms.EmailInput(attrs={'class': 'field', 'placeholder': 'example@mail.ru'}),
'tel': forms.TextInput(attrs={'class': 'field', 'placeholder': '+7 900 123 45 67', 'type': 'tel'}),
} |
__version__ = '7.0.0a1'
|
"""
Django settings for myproject project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from celery.schedules import crontab
import djcelery
djcelery.setup_loader()
CELERYBEAT_SCHEDULE = {
'newsletter-every-first-day-of-month': {
'task': 'signups.newsletter',
'schedule': crontab(month_of_year='*/12'),
'args': (16, 16),
},
}
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '98!lj8c1kmbhm%n@v%g*1*v+e%uujd@p9bf16%#w*u#m@v%x&t'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
from .email_info import *
TEMPLATE_DEBUG = True
EMAIL_USE_TLS = EMAIL_USE_TLS
EMAIL_HOST = EMAIL_HOST
EMAIL_HOST_USER = EMAIL_HOST_USER
EMAIL_PORT = EMAIL_PORT
EMAIL_HOST_PASSWORD = EMAIL_HOST_PASSWORD
TEMPLATE_DIRS = (
'/home/bcfobe/webapps/static_media/templates',
)
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.comments',
'south',
'Articles',
'whoosh',
'haystack',
'djcelery',
'signups',
'django_mailer',
)
WHOOSH_INDEX = os.path.join(BASE_DIR, 'whoosh/')
HAYSTACK_CONNECTIONS = {
'default' : {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(os.path.dirname(__file__), 'whoosh_index'),
},
}
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django_test.urls'
WSGI_APPLICATION = 'django_test.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'personal_website_db',
'USER': 'chaphb',
'PASSWORD': 'fobe1991',
}
}
SITE_ID = 1
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
BROKER_URL = 'redis://127.0.0.1:18806/0'
CELERY_RESULT_BACKEND = BROKER_URL
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = 'http://bcfobe.webfactional.com/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/home/bcfobe/webapps/static_media/media/'
STATIC_ROOT = '/home/bcfobe/webapps/static_media/'
AUTH_USER_MODEL = 'signups.User'
|
#Algorithm To implement First Come First Serve
n=int(input('Enter the Number of Process : '))
process=[int(x) for x in input('Enter the Process Number : ').split()]
burst_time=[int(x) for x in input('Enter the burst time of the Process : ').split()]
total_waiting_time=0
for ith_process in range(n):
print('Waiting time of the Process '+str((ith_process+1))+' is : '+str(total_waiting_time))
total_waiting_time+=burst_time[ith_process]
*mid,last=burst_time
total_waiting_time=total_waiting_time-last
print("Total Waiting time of the Process is : "+str(total_waiting_time))
print("Average Waiting time of the Process is : "+str(total_waiting_time/n))
|
hour = int(input())
minutes = int(input())
minutes += 15
if minutes >= 60:
hour += 1
minutes -= 60
if hour >= 24:
hour -= 24
print("{0}:{1:02d}".format(hour, minutes))
|
import pytest
import pdb
from fhireval.test_suite.concept_map import example_code_system_source, reset_testdata
from fhirwood.parameters import ConceptMapParameter
test_id = f"{'2.8.2':<10} - ConceptMap Translate"
test_weight = 2
def test_codemap_translate(host):
reset_testdata(host)
result = host.post('ConceptMap', example_code_system_source, validate_only=False)
assert result['status_code'] == 201
cm_id = result['response']['id']
# While we have a concept map in place, let's do a few checks
translate_result = host.get(f"ConceptMap/$translate?system=http://hl7.org/fhir/administrative-gender&code=male", no_count=True).entries[0]
match_result = ConceptMapParameter(translate_result)
assert match_result.match_count > 0, "Make sure we have at least one result"
assert match_result.result, "Make sure the first match is a postive"
assert match_result.match.concept == "M", "Transpose actuall worked"
delete_result = host.delete_by_record_id('ConceptMap', cm_id)
assert delete_result['status_code'] == 200
|
#!/usr/bin/python
#\file scipy_solve_1d_eq.py
#\brief Comparing method for solving 1d variable equation.
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Jan.21, 2021
import scipy.optimize
import time
f= lambda x: x+x**2+x**3 - 2.5
bounds= [-1.0,1.0]
#bounds= [-1.0,0.5] #NOTE: Setup where the solution is not within the bounds.
#We will solve x for f(x)==0 s.t. x in bounds.
results= []
#Using scipy.optimize.minimize.
t_start= time.time()
res= scipy.optimize.minimize(lambda x:f(x[0])**2,[0.0],bounds=[bounds])
results.append(['optimize.minimize\t\t',res.x[0],f(res.x[0]),time.time()-t_start])
#Using scipy.optimize.minimize_scalar. #WARNING: Does not consider bounds.
t_start= time.time()
res= scipy.optimize.minimize_scalar(lambda x:f(x)**2, bracket=bounds, method='brent')
results.append(['optimize.minimize_scalar-brent',res.x,f(res.x),time.time()-t_start])
#Using scipy.optimize.minimize_scalar. #WARNING: Does not consider bounds.
t_start= time.time()
res= scipy.optimize.minimize_scalar(lambda x:f(x)**2, bracket=bounds, method='golden')
results.append(['optimize.minimize_scalar-golden',res.x,f(res.x),time.time()-t_start])
#Using scipy.optimize.minimize_scalar.
t_start= time.time()
res= scipy.optimize.minimize_scalar(lambda x:f(x)**2, bounds=bounds, method='bounded')
results.append(['optimize.minimize_scalar-bounded',res.x,f(res.x),time.time()-t_start])
#Using scipy.optimize.fsolve.
t_start= time.time()
res= scipy.optimize.fsolve(lambda x:f(x[0]),[0.0]) #WARNING: Does not consider bounds.
results.append(['optimize.fsolve\t\t\t',res[0],f(res[0]),time.time()-t_start])
print 'method\t\t\t\t\t x\t\t f(x)\t\t\t time'
for method,x,f_x,t in results:
print method,':\t',x,'\t',f_x,'\t',t
|
#!/usr/bin/env python
"""
this fabric supposed to speed things up when doing parallel rsync
but in our case it actually slowed things down.
But i decided to leave it for mems :) Maybe someday i'll make it work as expected.
"""
from fabric.api import *
from fabric.contrib import files, project
from fabric.context_managers import path
from fabric.utils import error
from joblib import Parallel, delayed
from subprocess import Popen, PIPE, STDOUT, check_output, call
import os
import random
import sys
import getpass
import shlex
PROJECT_NAME = 'emo'
BUILD_CFG = 'release' # release or final-release
BUILD_PLATFORM = 'centos'
PRIVATE_BUILD = True
BUILD_LINUX_CLIENT = False
NINJA_ADD_ARGS = ''
REMOTE_DIR = ''
# remote builder settings
env.disable_known_hosts = True
env.key_filename = 'scripts/id_rsa_for_macbuilder'
if 'rsync_key' in env:
env.key_filename = env.rsync_key
# assign centos builder for each windows builder to avoid centos-builder resources burnout.
# if no environment var found, just pick random builder (of the 4 in total)
try:
TC_AGENT_NAME=os.environ['TC_AGENT_NAME'].replace('.','-')
except KeyError:
TC_AGENT_NAME='devserver{0}-agent1'.format(random.sample([1,2,3,4],1)[0])
# remote build machines per platform
BUILD_BOTS = {
'linux': 'builduser@builder-ubuntu14-02.vm.tgm.local', # emo-ubuntu64
'pc': 'builduser@192.168.20.3',
'ps4': 'builduser@192.168.20.3',
'vagrant_linux': 'vagrant@10.20.30.101',
'centos': 'builduser@builder-centos7-{0}.tgm.local'.format(TC_AGENT_NAME),
}
# isolate private builds from devservers's
BUILD_BOTS_PRIVATE = {
#'centos': 'builduser@builder-centos7-private.tgm.local',
'centos': 'builduser@builder-centos7-02.vm.tgm.local',
}
# used to find proper rsync, echo and other cygwin tools
CYGWIN_PATH = [
r'd:\cygwin64\bin',
r'd:\cygwin\bin',
r'c:\cygwin\bin',
r'c:\cygwin64\bin',
]
RSYNC_EXCLUDES = [
'ios/',
'android/',
'mac/',
'solaris/',
'/bin/',
'fabfile.py',
'README',
'README.*',
'CHANGES',
'HISTORY',
'INSTALL',
'doc/',
'docs/',
'/data',
'temp/',
'/ipch',
'*.vsp',
'/.*',
'*.log',
'*.md',
'*.txt',
'*.suo',
'*.sdf',
'*.pyc',
'*.pdb',
'*.VC.db',
'.ninja_log',
'.ninja_deps',]
# WARN: do not use --delete-excluded option with parallel directories sync
RSYNC_BASE_OPTS = '-zr --checksum'
RSYNC_EXTRA_OPTS = '--temp-dir=/dev/shm --timeout=180 --delete --no-perms --no-owner --no-group'
# Run in parallel RSYNC_PARALLEL_NUM rsyncs for subdirectories in each given path
# even though having full paths in sync, the subsequental rsyncs
# wasting time when calculating and exchanging crc's. So paralleling helps alot here.
# The dict consists of subdirectory names and additional rsync options.
# For code one should use --checksum rsync option - this will allow rsync
# to not touch files timestamps which could trigger re-build of them.
# Each pathname should start with '/', relative to checkout directory.
# Empty the dict to disable rsync parallel runs.
RSYNC_PARALLEL_PATHS = {}
#RSYNC_PARALLEL_PATHS = {'/code':'',
# '/middleware':''}
RSYNC_PARALLEL_NUM = 4
# will be generated out of RSYNC_EXCLUDES and platform-specific lists
# for use with Popen-like system calls
RSYNC_GENERATED_EXCLUDES_CMDARGS = ''
# path to Visual Studio devenv on a remote machine
PC_PATH_TO_DEVENV = r'/cygdrive/c/Program Files (x86)/Microsoft Visual Studio 12.0/Common7/Tools/../IDE/devenv.com'
def fix_rsync_search_path():
"""Add Cygwin search path since Fabric is buggy and can't properly modify paths on Windows"""
if sys.platform.startswith('win'):
os.environ['PATH'] = ";".join(CYGWIN_PATH) + ';' + os.environ['PATH']
def generate_rsync_excludes():
"""Generate rsync excludes file."""
global RSYNC_EXCLUDES
global RSYNC_GENERATED_EXCLUDES_CMDARGS
linux_platforms = ['centos', 'ubuntu', 'vagrant_linux', 'centos-pxo', 'linux']
if BUILD_PLATFORM in linux_platforms:
RSYNC_EXCLUDES += (
'windows/',
'Windows/',
'*.dll',
'*.lib',
'*.bat',
'*.cmd',
'*.exe',
'*.vcxproj',
'*.vcxproj.*',
'*.sln',
)
RSYNC_GENERATED_EXCLUDES_CMDARGS = r' '.join(['--exclude "\'{0}\'"'.format(x) for x in RSYNC_EXCLUDES])
def is_true(arg):
return str(arg).lower() in ['true', 'yes']
def get_remote_dir():
"""Form remote build directory path name."""
build_slot = ''
build_branch = ''
project_dir = PROJECT_NAME
if BUILD_CFG == 'final-release':
project_dir += '_final_release'
if PRIVATE_BUILD:
project_dir += '_private_%s' % getpass.getuser()
try:
build_slot = os.environ['build_slot']
if build_slot != '':
project_dir += '_%s' % build_slot
except KeyError:
pass
if build_slot == '':
try:
build_branch = os.environ['build_branch']
if build_branch != '':
project_dir += '_%s' % build_branch
except KeyError:
pass
if BUILD_PLATFORM == 'vagrant_linux':
return '/project/code/%s' % PROJECT_NAME
else:
return '~/projects/%s' % project_dir
def psync(src_root, subdirs, rsync_path_opts):
"""Run rsync in Parallel for each subdirectory.
To parallel rsync we use a Popen call to xargs, because the multiprocessing - based modules
is a way hard to implement on top of fabric. Who wanna be a Hero to refactor this to mp ?
"""
rsync_opts = '{0} {1} {2} {3}'.format(RSYNC_BASE_OPTS,
RSYNC_EXTRA_OPTS,
rsync_path_opts,
RSYNC_GENERATED_EXCLUDES_CMDARGS)
# because we dont use project.rsync_project we have to iterate over env.hosts manually.
for remote_host in env.hosts:
cmd=('xargs -rn1 -P{pnum} -I% '
'rsync {args} {base_dir}/% {host}:{remote_dir}/{base_dir}/').format(
pnum=RSYNC_PARALLEL_NUM,
args=rsync_opts,
base_dir=src_root,
host=remote_host,
remote_dir=REMOTE_DIR)
p = Popen(shlex.split(cmd), stdout=PIPE, stdin=PIPE, stderr=PIPE)
(p_stdout, p_stderr) = p.communicate(input=b'\n'.join(subdirs))
if p_stderr:
print(p_stderr.decode())
def copy_source_code(src_dir, dst_dir):
"""Sync local source code tree w/ a remote builder.
Paralleling rsync helps to speed things up at checksums exchange stage
and for 'heavy' directories transfer. Use with caution: the destination
device should be parallel-I/O friendly.
To parallel rsync subdirectories, list them in RSYNC_PARALLEL_PATHS global
"""
ssh_opts=('-o BatchMode=yes '
'-o StrictHostKeyChecking=no '
'-o Compression=no '
'-o UserKnownHostsFile=/dev/null '
'-o LogLevel=error '
'-o PreferredAuthentications=publickey')
# this is for parallel rsync
os.environ['RSYNC_RSH'] = 'ssh {0} -i {1}'.format(ssh_opts, env.key_filename)
# strip trailing slashes if exist
while src_dir.endswith('/') or src_dir.endswith('\\'):
src_dir = src_dir[:len(src_dir)-1]
# sync everything except paths for parallel sync
with hide('stdout'):
project.rsync_project(local_dir=src_dir, remote_dir=dst_dir,
exclude=RSYNC_EXCLUDES + [k for k in RSYNC_PARALLEL_PATHS.iterkeys()],
default_opts=RSYNC_BASE_OPTS,
delete=True,
ssh_opts=ssh_opts,
extra_opts=RSYNC_EXTRA_OPTS)
# run parallel sync for each defined paths
# RSYNC_PARALLEL_PATHS elements should start with '/'
for path, rsync_path_opts in RSYNC_PARALLEL_PATHS.iteritems():
try:
root, subdirs, files = next(os.walk(src_dir + path))
# sync root subdirectory with its files (if ones exist)
#print "syncing {0}'s files: {1}".format(root, files)
with hide('running'):
project.rsync_project(local_dir=root+'/', remote_dir=REMOTE_DIR+'/'+root+'/',
exclude=RSYNC_EXCLUDES + ['*/'],
default_opts=RSYNC_BASE_OPTS,
delete=True,
ssh_opts=ssh_opts,
extra_opts=RSYNC_EXTRA_OPTS + ' ' + rsync_path_opts)
psync(root, subdirs, rsync_path_opts)
except StopIteration:
print("[warn] couldnt traverse into {0}".format(path))
def build_linux_client():
ninja_cfg = 'game-finalrelease.linux.ninja' if BUILD_CFG == 'final-release' else 'game-release.linux.ninja'
ninja_dir = REMOTE_DIR + '/ninja/gamelinux/'
with cd(ninja_dir):
run('ninja {0} -f {1}'.format(NINJA_ADD_ARGS, ninja_cfg), shell_escape=False, pty=False)
def build_linux_servers():
ninja_dir = REMOTE_DIR + '/ninja/gamelinux/'
ninja_configs = ('servicecontainer-release.linux.ninja',
'dedicatedserver-release.linux.ninja')
run('g++ --version', shell_escape=False, pty=False)
with cd(ninja_dir):
for cfg in ninja_configs:
run('ninja {0} -f {1}'.format(NINJA_ADD_ARGS, cfg),
shell_escape=False, pty=False)
def build_linux_stressbot():
ninja_dir = REMOTE_DIR + '/ninja/gamelinux/'
with cd(ninja_dir):
run('ninja {0} -f stressbot-release.linux.ninja'.format(NINJA_ADD_ARGS),
shell_escape=False, pty=False)
def build_linux_stresstool():
ninja_dir = REMOTE_DIR + '/ninja/tools/'
with cd(ninja_dir):
run('ninja {0} -f stresstool-release.linux.ninja'.format(NINJA_ADD_ARGS),
shell_escape=False, pty=False)
def dump_symbols(args=""):
# generate breakpad symbols
with cd(REMOTE_DIR):
if args == "":
run('python scripts/dump_breakpad_symbols.py')
else:
run('python scripts/dump_breakpad_symbols.py ' + args)
def is_gnudebug_linked(binary_file, section_name='.gnu_debuglink'):
"""Check if binary file contains specified debug section"""
with settings(warn_only=True):
res = run("readelf -S {0} | grep -qF {1}".format(binary_file, section_name))
return res.return_code == 0
def strip_client():
run('strip %s/bin/linux/Crossout' % REMOTE_DIR)
def strip_servers():
# we would like to save space in SVN and still retain capabilities to perf this stuff on production
# so we copy those before stripping, and add a debuglink
# they get packed into server images but not in SVN
strip_filelist = ( 'ServiceContainer',
'DedicatedServer')
with cd('%s/bin/linux' % REMOTE_DIR):
for f in strip_filelist:
if not is_gnudebug_linked(f):
run('mv {0} {0}.full &&'
'strip {0}.full -o {0} &&'
'objcopy --add-gnu-debuglink={0}.full {0}'.format(f))
def strip_stressbot():
run('strip %s/bin/linux/StressBot' % REMOTE_DIR)
def build_linux():
if BUILD_LINUX_CLIENT:
build_linux_client()
build_linux_servers()
if not PRIVATE_BUILD:
dump_symbols()
if BUILD_LINUX_CLIENT:
strip_client()
strip_servers()
def build_centos():
# we dont need centos client, I think
build_linux_servers()
if not PRIVATE_BUILD:
dump_symbols("centos")
strip_servers()
else:
# we don't need those for now
strip_servers()
# well it's usually broken, so let it rot
# build_linux_stressbot()
# strip_stressbot()
def build_centos_pxo():
# build servers and stress tool, no stripping
build_linux_servers()
build_linux_stresstool()
def build_vagrant_linux():
build_linux_client()
build_linux_servers()
def build_pc():
with cd(REMOTE_DIR), hide('stdout'):
run('"%s" "Project.sln" /build "Game-Release|Win32"' % PC_PATH_TO_DEVENV)
def build_ps4():
with cd(REMOTE_DIR), hide('stdout'):
run('"%s" "Project.sln" /build "Game-Release|ORBIS"' % PC_PATH_TO_DEVENV)
def get_build_results():
if BUILD_PLATFORM == 'centos':
LOCAL_PATH = './bin/linux/centos/'
if not os.path.exists(LOCAL_PATH):
os.makedirs(LOCAL_PATH)
with hide('warnings'):
get(remote_path=REMOTE_DIR +
'/bin/linux/DedicatedServer', local_path=LOCAL_PATH)
get(remote_path=REMOTE_DIR +
'/bin/linux/ServiceContainer', local_path=LOCAL_PATH)
# bot executable, if built
botApp = REMOTE_DIR + '/bin/linux/StressBot'
if files.exists(botApp):
get(remote_path=botApp, local_path=LOCAL_PATH)
elif BUILD_PLATFORM == 'linux':
with hide('warnings'):
if BUILD_LINUX_CLIENT:
get(remote_path=REMOTE_DIR + '/bin/linux/Crossout', local_path='./bin/linux/')
get(remote_path=REMOTE_DIR +
'/bin/linux/DedicatedServer', local_path='./bin/linux/')
get(remote_path=REMOTE_DIR +
'/bin/linux/ServiceContainer', local_path='./bin/linux/')
# copy dbg exe as well (for debug purposes)
dbg_exe = REMOTE_DIR + '/bin/linux/Crossout_dbg'
if files.exists(dbg_exe):
get(remote_path=dbg_exe, local_path='./bin/linux/')
elif BUILD_PLATFORM == 'ps4':
get(remote_path=REMOTE_DIR +
'/bin/ps4/game.elf', local_path='./bin/ps4/')
def delete_build_dir():
run('rm -fR ' + REMOTE_DIR)
def get_private_cpu_limits():
""" return a tuple of threads num and a load avg cap for -j and -l ninja flags. """
with hide('output'):
c = run("getconf _NPROCESSORS_ONLN")
c = int(c)
return (c/2, c-2)
@task
def build(platform='centos', cfg='release', build_for='private', cleanup='yes', just_copy_souce_code='no', build_linux_client='no', egoistic='no'):
global BUILD_PLATFORM
global BUILD_CFG
global BUILD_LOCAL
global PRIVATE_BUILD
global BUILD_LINUX_CLIENT
global NINJA_ADD_ARGS
global REMOTE_DIR
BUILD_PLATFORM = platform.lower()
if BUILD_PLATFORM not in BUILD_BOTS.keys():
error('Invalid platform: %s' % platform)
BUILD_CFG = cfg.lower()
if BUILD_CFG not in ['release', 'final-release']:
error('Invalid build type: %s' % cfg)
PRIVATE_BUILD = False if build_for.lower() == 'global' else True
BUILD_LINUX_CLIENT = is_true(build_linux_client)
if BUILD_PLATFORM == 'pc' and not (PRIVATE_BUILD and BUILD_CFG == 'release'):
error('Currently building PC remotely is experimental, you can build private release only')
REMOTE_DIR = get_remote_dir()
if not PRIVATE_BUILD:
env.hosts = [BUILD_BOTS[BUILD_PLATFORM], ]
else:
env.hosts = [BUILD_BOTS_PRIVATE[BUILD_PLATFORM], ]
if not is_true(egoistic):
res = execute(get_private_cpu_limits)
(max_run_threads, loadavg_cut_threads) = res[BUILD_BOTS_PRIVATE[BUILD_PLATFORM]]
NINJA_ADD_ARGS = '-j {0} -l {1}'.format(max_run_threads, loadavg_cut_threads)
fix_rsync_search_path()
generate_rsync_excludes()
if is_true(just_copy_souce_code) and is_true(cleanup):
execute(delete_build_dir)
execute(copy_source_code, src_dir='.', dst_dir=REMOTE_DIR)
if is_true(just_copy_souce_code):
return
if BUILD_PLATFORM == 'linux':
execute(build_linux)
elif BUILD_PLATFORM == 'pc':
execute(build_pc)
elif BUILD_PLATFORM == 'ps4':
execute(build_ps4)
elif BUILD_PLATFORM == 'vagrant_linux':
execute(build_vagrant_linux)
elif BUILD_PLATFORM == 'centos':
execute(build_centos)
elif BUILD_PLATFORM == 'centos-pxo':
execute(build_centos_pxo)
execute(get_build_results)
if is_true(cleanup):
execute(delete_build_dir)
if __name__ == "__main__":
print("[warn] this supposed to be run via fab")
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 shady <shady@MrRobot.local>
#
import datetime
from peewee import MySQLDatabase
from peewee import (
Model,
CharField,
IntegerField,
BooleanField,
FloatField,
DateTimeField,
PrimaryKeyField,
)
from config import load_config
config = load_config()
db = MySQLDatabase(**config["DB_CONFIG"])
class Product(Model):
id = PrimaryKeyField()
name = CharField(max_length=250, unique=True)
description = CharField(max_length=600, null=True)
price = FloatField(default=0)
inventory = IntegerField(default=0)
created = DateTimeField(default=datetime.datetime.now)
class Meta:
table_name = "product"
database = db
|
"""Xonsh AST tests."""
from nose.tools import assert_equal
from xonsh import ast
from xonsh.ast import Tuple, Name, Store
def test_gather_names_name():
node = Name(id='y', ctx=Store())
exp = {'y'}
obs = ast.gather_names(node)
assert_equal(exp, obs)
def test_gather_names_tuple():
node = Tuple(elts=[Name(id='y', ctx=Store()),
Name(id='z', ctx=Store())])
exp = {'y', 'z'}
obs = ast.gather_names(node)
assert_equal(exp, obs)
|
#-*- encoding=utf8 -*-
#!/usr/bin/env python
import re, sys, operator, string
path_to_stop_words = './BasicData/stop_words.txt'
path_to_text = './BasicData/Pride_And_Prejudice.txt'
class WordFrequencyFrameWork:
_load_event_handlers = []
_dowork_event_handlers = []
_end_event_handlers = []
def register_for_load_event(self,handler):
self._load_event_handlers.append(handler)
def register_for_dowork_event(self, handler):
self._dowork_event_handlers.append(handler)
def register_for_end_event(self, handler):
self._end_event_handlers.append(handler)
def run(self, path_to_file):
for h in self._load_event_handlers:
h(path_to_file)
for h in self._dowork_event_handlers:
h()
for h in self._end_event_handlers:
h()
class DataStorage:
_data = ''
_stop_word_filter = None
_word_event_handler = []
def __init__(self, wfapp,stop_word_filter):
self._stop_word_filter = stop_word_filter
wfapp.register_for_load_event(self.__load)
wfapp.register_for_dowork_event(self.__produce_words)
def __load(self, path_to_file):
with open(path_to_file) as f:
self._data = f.read()
pattern = re.compile('[\W_]+')
self._data = pattern.sub(' ', self._data).lower()
def __produce_words(self):
data_str = ''.join(self._data)
for w in data_str.split():
# _is_stop_word(w)
if not self._stop_word_filter._is_stop_word(w):
for h in self._word_event_handler:
h(w)
def register_for_word_event(self, handler):
self._word_event_handler.append(handler)
class StopWordFilter:
_stop_word = []
def __init__(self, wfapp):
wfapp.register_for_load_event(self.__load)
def __load(self, ignore):
with open(path_to_stop_words) as f:
self._stop_word = f.read().split(',')
self._stop_word.extend(list(string.ascii_lowercase))
def _is_stop_word(self, word):
# print "word:",word
return word in self._stop_word
class WordFrequencyCounter:
_word_freqs = {}
def __init__(self, wfapp,data_storage):
data_storage.register_for_word_event(self.__increment_count)
wfapp.register_for_end_event(self.__print_freqs)
def __increment_count(self, word):
if word in self._word_freqs:
self._word_freqs[word] += 1
else:
self._word_freqs[word] = 1
def __print_freqs(self):
# print "__print_freqs"
word_freqs = sorted(self._word_freqs.iteritems(), key=operator.itemgetter(1), reverse=True)
for (w,c) in word_freqs[0:25]:
# print "Hello w and c"
print w , ' - ' , c
# 主要程式部分
wfapp = WordFrequencyFrameWork()
stop_word_filter = StopWordFilter(wfapp)
data_storage = DataStorage(wfapp,stop_word_filter)
word_freqs_counter = WordFrequencyCounter(wfapp,data_storage)
wfapp.run(path_to_text) |
from onegov.activity import Occasion, OccasionNeed
from onegov.core.security import Secret
from onegov.feriennet import FeriennetApp, _
from onegov.feriennet.exports.base import FeriennetExport
from onegov.feriennet.forms import PeriodExportForm
from sqlalchemy.orm import joinedload, undefer
@FeriennetApp.export(
id='helfer',
form_class=PeriodExportForm,
permission=Secret,
title=_("Volunteers"),
explanation=_("Exports volunteers in the given period."),
)
class VolunteerExport(FeriennetExport):
def run(self, form, session):
return self.rows(session, form.selected_period)
def query(self, session, period):
q = session.query(OccasionNeed)
q = q.filter(OccasionNeed.occasion_id.in_(
session.query(Occasion.id)
.filter(Occasion.period_id == period.id)
.subquery()
))
q = q.join(Occasion)
q = q.options(
joinedload(OccasionNeed.occasion)
.joinedload(Occasion.activity)
)
q = q.options(
joinedload(OccasionNeed.occasion)
.joinedload(Occasion.period)
)
q = q.options(undefer('*'))
q = q.order_by(Occasion.activity_id)
return q
def rows(self, session, period):
for need in self.query(session, period):
for volunteer in need.volunteers:
yield ((k, v) for k, v in self.fields(volunteer))
def fields(self, volunteer):
yield from self.volunteer_fields(volunteer)
|
from django.db import models
from django.contrib.auth import get_user_model
from products.models import Product
User = get_user_model()
class Deal(models.Model):
user = models.ForeignKey(User, on_delete=models.DO_NOTHING,
verbose_name='Пользователь',
related_name='user_deal')
product_sell = models.ForeignKey(Product,
on_delete=models.DO_NOTHING,
verbose_name='Продукт',
related_name='user_sell_product')
quantity = models.DecimalField(max_digits=5, decimal_places=2,
verbose_name='Кол-во товара')
confirmed_deal = models.BooleanField(verbose_name='Подтверждена',
default=False)
def __str__(self):
return '{}, {}, {}'.format(self.user,
self.product_sell, self.quantity,
self.confirmed_deal)
class Meta:
verbose_name = 'Сделка'
verbose_name_plural = 'Сделки'
|
from __future__ import print_function
class Unit(object):
def __init__(self, name, id, team, jsonData):
self.name = name
self.id = id
self.team = team
self.health = jsonData['Health']
self.max_health = jsonData['MaxHealth']
self.location = (jsonData['Loc_X'], jsonData['Loc_Y'], jsonData['Loc_Z'])
def __repr__(self):
str = 'Unit ID: %d, %s - %s' % (self.id, self.name, self.team)
str += '\tHealth: %d / %d\n' % (self.health, self.max_health)
return str
class Hero(Unit):
def __init__(self, name, id, team, jsonData):
Unit.__init__(self, name, id, team, jsonData)
def __repr__(self):
str = 'Hero ID: %d, %s - %s\n' % (self.id, self.name, self.team)
str += '\tHealth: %d / %d\n' % (self.health, self.max_health)
str += '\tLocation: <%f, %f, %f>\n' % (self.location[0],self.location[1],self.location[2])
return str |
x = 10
# odd
if x % 2 == 0:
print('odd')
else:
print('evan')
# x equals 10
if x > 10:
print('x bigger than 10')
elif x < 10:
print('x less than 10')
else:
print('x equals 10')
ACTUALLY_RELEASE_YEAR = 1991
inputYear = int(input("please guess the python release year:"))
if inputYear > ACTUALLY_RELEASE_YEAR:
print('maybe earlier')
elif inputYear < ACTUALLY_RELEASE_YEAR:
print('maybe later')
else:
print('right!')
|
import threading
class InstanceHealth(object):
"""Thread safe object to communicate between the worker thread and the API thread."""
def __init__(self):
self._errors = {}
self._lock = threading.Lock()
def add_degraded(self, key, error):
"""
Args:
key (str)
error (str)
"""
with self._lock:
if key not in self._errors:
self._errors[key] = []
if error not in self._errors[key]:
self._errors[key].append(error)
def set_ok(self, key):
with self._lock:
if key in self._errors:
del self._errors[key]
def get_status(self):
with self._lock:
return {'degraded': len(self._errors), 'errors': self._errors}
|
import csv # imports the csv module
import sys # imports the sys module
#import MySQLdb
header_year = [ 'Ter', '01', '02', '03', '04'];
header_gender = ['M', 'F', 'T']
def readCountriesData():
f = open('01-04.csv', 'rb') # opens the csv fil
rownum = 0
try:
reader = csv.reader(f ,delimiter=',') # creates the reader object
countries = dict();
for row in reader: # iterates the rows of the file in orders
colnum = 0;
years = dict();
MFT = 0;
for col in row:
#print '%-8s: %s' % (header[colnum], col), colnum
if(colnum ==0):
countryName = col;
colnum += 1
else:
#print header[colnum], col
if(MFT == 0):
years[header_year[colnum]] = dict();
years[header_year[colnum]][header_gender[MFT]]=col
MFT += 1;
if(MFT>2):
MFT = 0;
colnum += 1
countries[countryName]=years;
finally:
print countries;
f.close() # closing
return countries;
def insertCountriesDB(data):
db,cursor = connectDB();
for key,val in data.items():
country = key
years = val
for key,val in years.items():
year = key;
TFM = val;
#print year, state, totalNum
num = list();
for key,val in years.items():
num.append(val);
cursor.execute("INSERT IGNORE INTO Countries VALUES (%s, %s, %s, %s, %s) ", (year, country, num[0], num[1], num[2] ) )
closeDB(db, cursor);
def connectDB():
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="tinker", # your username
passwd="tinker", # your password
db="tinker") # name of the data base
cursor = db.cursor();
return db,cursor;
def closeDB(db,cursor):
cursor.close()
db.commit()
db.close()
if __name__ == "__main__":
states = readCountriesData();
#insertCountriesDB(states);
#states = readStateData();
#insertStateDB(states);
states = readEthnicity();
|
n =int(input())
all=[]
for i in range(n):
a = int(input())
if(not (a in all)):
all.append(a)
print(len(all)) |
import pandas as pd
df = pd.read_csv('stock-data.csv')
print(df.head(),'\n',df.info(),'\n')
print("#문자열 데이터(시리즈 객체)를 판다스 Timestamp로 변환 및 데이터 내용 및 자료형 확인")
df['new_Date']=pd.to_datetime(df['Date'])
print(df,'\n')
print(df.info(),'\n')
print(type(df['new_Date'][0]), '\n')
# 시계열 값으로 변환된 열을 새로운 행 인덱스로 지정. 기존 날짜 열은 삭제
df = df.set_index('new_Date')
df = df.drop('Date',axis=1)
print("# 시계열 값으로 변환된 열을 새로운 행 인덱스로 지정. 기존 날짜 열은 삭제. 데이터 내용 및 자료형 확인")
print(df.head(), '\n',df.info())
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import joblib
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.impute import SimpleImputer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error, r2_score, explained_variance_score, mean_absolute_error
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, x, y=None):
return self
def transform(self, x):
# here will added new features
pass
class Predictor:
def __init__(self, model, transformer):
self.model = model
self.transformer = transformer
self.mae = None
self.mse = None
self.acc = None
def get_predict(self, name, year, city, mileage, capacity, bordered):
data_dict = {'year': [year], 'city': [city], 'mileage': [mileage], 'capacity': [capacity],
'bordered': [bordered], 'name': [name]}
data = pd.DataFrame(data=data_dict)
prepared = self.transformer.transform(data)
return self.model.predict(prepared)
def load_data(path):
try:
return pd.read_csv(path)
except UnicodeDecodeError:
return pd.read_csv(path, encoding='ANSI')
def split_train_test(data, test_ratio, id_column):
strat_train_set = None
strat_test_set = None
ceil_k = 2500000 # Коэффицент маштабирования, чтобы ограничить количество страт
sever = 15000000 # Граница ценны автомобиля после которой все автомобили совмещаються в одну страту
data['_cat'] = np.ceil(data[id_column] / ceil_k)
data['_cat'].where(data['_cat'] < sever / ceil_k, sever / ceil_k, inplace=True)
# data['_cat'].hist()
# plt.show()
split = StratifiedShuffleSplit(n_splits=1, test_size=test_ratio, random_state=42)
for train_index, test_index in split.split(data, data['_cat']):
strat_train_set = data.loc[train_index]
strat_test_set = data.loc[test_index]
for set_ in (strat_train_set, strat_test_set):
set_.drop('_cat', axis=1, inplace=True)
strat_train_set = get_prepared_data(strat_train_set)
strat_test_set = get_prepared_data(strat_test_set)
return strat_train_set, strat_test_set
def draw_plots_to_research_data(data): # just to draw some plots
# 1
data.plot(kind='scatter', x='year', y='price', alpha=0.1)
plt.show()
# 2
most_pop_cars = data['name'].value_counts()
most_pop_cars = pd.DataFrame(data={'count': most_pop_cars.values},
index=most_pop_cars.index)
most_pop_cars.iloc[:40].plot(kind='pie', y='count', figsize=(15, 6))
plt.show()
# 3
brand = data[data['brand'] == 'Toyota']
models = brand[brand['model'] == 'Camry'].drop(['color', 'transmission', 'latitude', 'longitude', 'mileage',
'capacity', 'body', 'drive', 'year'], axis=1)
new = models.groupby('city').mean()
new[:10].sort_values('price').plot(kind='bar')
plt.show()
def get_prepared_data(data):
data = data.drop(['latitude', 'longitude', 'body', 'color', 'transmission', 'drive'], axis=1)
# delete overprice cars
cond = data[data['price'] < 500000].index
data.drop(cond, inplace=True)
cond = data[data['price'] > 15000000].index
data.drop(cond, inplace=True)
# concat brand and model
name = data[['brand', 'model']].agg(' '.join, axis=1)
data = data.drop(['brand', 'model'], axis=1)
data['name'] = name
# deleting cites with low metrics
cites = data['city'].value_counts()
to_remove = cites[cites < 400].index
data.replace(to_remove, 'Остальное', inplace=True)
models = data['name'].value_counts()
data = data.loc[data['name'].isin(models.index[models > 300])]
year = data['year'].value_counts()
data = data.loc[data['year'].isin(year.index[year < 1980])]
return data
def preprocessing(train_set):
data = train_set.drop('price', axis=1)
data_labels = train_set['price'].copy()
data_num = data.drop(['name', 'city', 'bordered'], axis=1)
num_pipeline = Pipeline([
('imputer', SimpleImputer(strategy='median')),
# ('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler())
])
num_attribs = list(data_num)
cat_attribs = ['name', 'city', 'bordered']
full_pipeline = ColumnTransformer([
('num', num_pipeline, num_attribs),
('cat', OneHotEncoder(), cat_attribs)
])
prepared_data = full_pipeline.fit_transform(data)
return prepared_data, data_labels, full_pipeline
def loading_models(f):
def wrap_loading(*args, load=True):
name = 'models/' + str(args[0]) + '-' + f.__name__ + '.pkl'
if load:
try:
return joblib.load(name)
except FileNotFoundError:
print(f'{name} model not saved. Creating new..')
result = f(*args)
joblib.dump(result, name)
return result
return wrap_loading
@loading_models
def get_model(kind, prepared_data, data_labels):
models = {
'LR': LinearRegression,
'DTR': DecisionTreeRegressor,
'RFR': RandomForestRegressor
}
try:
model = models[kind]()
except KeyError:
raise KeyError(f'{kind} not using as model, please choose from: {models}')
model.fit(prepared_data, data_labels)
return model
@loading_models
def get_tuned_model(kind, model, prepared_data, data_labels):
params = \
{'RFR':
[
{'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]},
{'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]}
],
'LR':
[
{}
],
'DTR':
[
{'max_features': [10, 20, 30, 40]}
]}
try:
param_grid = params[kind]
except KeyError:
raise KeyError(f'{kind} haven\'t parameters for GridSearchCV, please choose from: {params.keys()}')
grid_search = GridSearchCV(model, param_grid, cv=5,
scoring='neg_mean_squared_error',
return_train_score=True)
grid_search.fit(prepared_data, data_labels)
return grid_search
def check_cross_val_score(model, prepared_data, data_labels):
scores = cross_val_score(model, prepared_data, data_labels,
scoring='neg_mean_squared_error', cv=5)
tree_rmse_scores = np.sqrt(-scores)
print(tree_rmse_scores)
def test_model(predictor, test_set):
x_test = test_set.drop('price', axis=1)
y_test = test_set['price'].copy()
x_test_prepared = predictor.transformer.transform(x_test)
final_prediction = predictor.model.predict(x_test_prepared)
predictor.mae = mean_absolute_error(y_test, final_prediction)
predictor.mse = mean_squared_error(y_test, final_prediction)
predictor.acc = r2_score(y_test, final_prediction)
def get_predict_model(path, model_kind):
data = load_data(path)
data = data.drop('url', axis=1)
train_set, test_set = split_train_test(data, 0.2, 'price')
prepared_data, data_labels, full_pipeline = preprocessing(train_set)
model = get_model(model_kind, prepared_data, data_labels, load=True)
search = get_tuned_model(model_kind, model, prepared_data, data_labels, load=True)
final_model = search.best_estimator_
predictor = Predictor(final_model, full_pipeline)
test_model(predictor, test_set)
return predictor
|
#import sys
#input = sys.stdin.readline
from collections import deque
def main():
N, M, K = map( int, input().split())
H = list( map( int, input().split()))
C = list( map( lambda x: int(x)-1, input().split()))
AB = [ tuple( map( lambda x: int(x)-1, input().split())) for _ in range(M)]
E = [[] for _ in range(N)]
for a, b in AB:
if H[a] < H[b]:
E[a].append(b)
else:
E[b].append(a)
# print(E)
ANS = [-1]*N
d = deque()
for c in C:
d.append(c)
ANS[c] = 0
while d:
v = d.popleft()
dist = ANS[v]+1
for w in E[v]:
if ANS[w] == -1:
ANS[w] = dist
d.append(w)
print("\n".join(map(str, ANS)))
if __name__ == '__main__':
main()
|
# Attributes and Methods for fallback_mode
# Auto-execute numpy method when corresponding cupy method is not found
# "NOQA" to suppress flake8 warning
from cupyx.fallback_mode.fallback import numpy # NOQA
|
""" Contains upgrade tasks that are executed when the application is being
upgraded on the server. See :class:`onegov.core.upgrade.upgrade_task`.
"""
from onegov.core.upgrade import upgrade_task
from onegov.core.orm.types import UTCDateTime
from sqlalchemy import Column
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from onegov.core.upgrade import UpgradeContext
@upgrade_task('Add scheduled column')
def add_scheduled_column(context: 'UpgradeContext') -> None:
context.operations.add_column('newsletters', Column(
'scheduled', UTCDateTime, nullable=True
))
|
def _makeHexStr(a):
return format(a, '#04x')
def _generateTenAccounts():
prefixes = [_makeHexStr(i) for i in range(10)]
postfix = "195c933ff445314e667112ab22f4a7404bad7f9746564eb409b9bb8c6aed32"
return [prefix + postfix for prefix in prefixes]
tenAccounts = _generateTenAccounts()
|
#!/usr/bin/python
# coding: utf-8
# Copyright 2013 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
#
# Based on http://typophile.com/node/13081
# Also see http://typophile.com/node/13081
from __future__ import print_function
import collections
import os
import sys
if sys.version_info.major >= 3:
from io import StringIO
else:
from StringIO import StringIO
from fontTools.ttLib import TTLibError
def metricview(fonts):
from bakery_cli.ttfont import Font
view = TextMetricsView()
for f in fonts:
try:
metrics = Font(f)
except TTLibError as ex:
print("ERROR: %s" % ex)
continue
view.add_metric(os.path.basename(f), metrics)
view.print_metrics()
return view.get_contents()
def get_metric_view(fonts):
from bakery_cli.ttfont import Font
view = TextMetricsView()
for f in fonts:
try:
metrics = Font(f)
except TTLibError as ex:
print("ERROR: %s" % ex)
continue
view.add_metric(os.path.basename(f), metrics)
view.print_metrics()
return view
def metricfix(fonts):
from bakery_cli.ttfont import Font
ymin = 0
ymax = 0
for f in fonts:
metrics = Font(f)
font_ymin, font_ymax = metrics.get_bounding()
ymin = min(font_ymin, ymin)
ymax = max(font_ymax, ymax)
for f in fonts:
metrics = Font(f)
metrics.ascents.set(ymax)
metrics.descents.set(ymin)
metrics.linegaps.set(0)
metrics.save(f + '.fix')
class TextMetricsView(object):
def __init__(self):
self.outstream = StringIO()
self._its_metrics_header = ['Parameter ']
# first column has a length of largest parameter
# named OS/2.sTypoDescender
self._its_metrics = collections.OrderedDict([
('ymax', []),
('hhea.ascent', []),
('OS/2.sTypoAscender', []),
('OS/2.usWinAscent', []),
('ymin', []),
('hhea.descent', []),
('OS/2.sTypoDescender', []),
('OS/2.usWinDescent', []),
('hhea.lineGap', []),
('OS/2.sTypoLineGap', []),
('hhea total', []),
('typo total', []),
('win total', []),
('UPM:Heights', []),
('UPM:Heights %', [])
])
self._inconsistent = set()
self._inconsistent_table = {}
self._warnings = []
self.glyphs = collections.OrderedDict()
def add_to_table(self, fontname, key, value):
if self._its_metrics[key] and value not in self._its_metrics[key]:
self._inconsistent.add(key)
if key not in self._inconsistent_table:
self._inconsistent_table[key] = []
# lookup row with value and append fontname to `fonts` key, eg.:
# {'hhea.ascent': [{'value': 390,
# 'fonts': ['fontname.ttf', 'fontname2.ttf']}]}
#
# It looks like json groupped by metrics key
row = {}
for r in self._inconsistent_table[key]:
if r['value'] == value:
row = r
if not row:
row = {'value': value, 'fonts': []}
self._inconsistent_table[key].append(row)
row['fonts'].append(fontname)
self._its_metrics[key].append(value)
def add_metric(self, font_name, vmet):
ymin, ymax = vmet.get_bounding()
self._its_metrics_header.append(font_name)
self.add_to_table(font_name, 'hhea.ascent', vmet.ascents.hhea)
self.add_to_table(font_name, 'OS/2.sTypoAscender', vmet.ascents.os2typo)
self.add_to_table(font_name, 'OS/2.usWinAscent', vmet.ascents.os2win)
self.add_to_table(font_name, 'hhea.descent', vmet.descents.hhea)
self.add_to_table(font_name, 'OS/2.sTypoDescender', vmet.descents.os2typo)
self.add_to_table(font_name, 'OS/2.usWinDescent', vmet.descents.os2win)
self.add_to_table(font_name, 'hhea.lineGap', vmet.linegaps.hhea)
self.add_to_table(font_name, 'OS/2.sTypoLineGap', vmet.linegaps.os2typo)
self._its_metrics['ymax'].append(ymax)
self._its_metrics['ymin'].append(ymin)
value = abs(ymin) + ymax
upm = '%s:%s' % (vmet.get_upm_height(), value)
self._its_metrics['UPM:Heights'].append(upm)
value = (value / float(vmet.get_upm_height())) * 100
self._its_metrics['UPM:Heights %'].append('%d %%' % value)
hhea_total = vmet.ascents.hhea + abs(vmet.descents.hhea) + vmet.linegaps.hhea
self._its_metrics['hhea total'].append(hhea_total)
typo_total = vmet.ascents.os2typo + abs(vmet.descents.os2typo) + vmet.linegaps.os2typo
self._its_metrics['typo total'].append(typo_total)
win_total = vmet.ascents.os2win + abs(vmet.descents.os2win)
self._its_metrics['win total'].append(win_total)
if len(set([typo_total, hhea_total, win_total])) > 1:
self._warnings.append('%s has NOT even heights' % font_name)
self.glyphs[font_name] = vmet.get_highest_and_lowest()
def print_metrics(self):
self.print_warnings()
self.print_metrics_table()
self.print_high_glyphs()
self.print_low_glyphs()
self.print_inconsistent_table()
def print_warnings(self):
if self._inconsistent:
_ = 'WARNING: Inconsistent {}'
print(_.format(' '.join([str(x) for x in self._inconsistent])),
end='\n\n', file=self.outstream)
if self._warnings:
for warn in self._warnings:
print('WARNING: %s' % warn, file=self.outstream)
def print_metrics_table(self):
formatstring = ''
for k in self._its_metrics_header:
print(('{:<%s}' % (len(k) + 4)).format(k), end='', file=self.outstream)
formatstring += '{:<%s}' % (len(k) + 4)
print(file=self.outstream)
for k, values in self._its_metrics.items():
print(formatstring.format(*([k] + values)), file=self.outstream)
def print_high_glyphs(self):
header_printed = False
for font, glyphs in self.glyphs.items():
if glyphs[0]:
if not header_printed:
print(file=self.outstream)
print('High Glyphs', file=self.outstream)
header_printed = True
print(font + ':', ' '.join(glyphs[0]), file=self.outstream)
def print_low_glyphs(self):
header_printed = False
for font, glyphs in self.glyphs.items():
if glyphs[1]:
if not header_printed:
print(file=self.outstream)
print('Low Glyphs', file=self.outstream)
header_printed = True
print(font + ':', ' '.join(glyphs[1]), file=self.outstream)
def print_inconsistent_table(self):
print(file=self.outstream)
for metrickey, row in self._inconsistent_table.items():
value = self.find_max_occurs_from_metrics_key(row)
tbl = {}
for r in row:
if r['value'] == value:
continue
if metrickey not in tbl:
tbl[metrickey] = []
tbl[metrickey] += r['fonts']
for k, r in tbl.items():
print('Inconsistent %s:' % k, ', '.join(r), file=self.outstream)
def find_max_occurs_from_metrics_key(self, metricvalues):
result = 0
occurs = 0
for v in metricvalues:
if len(v['fonts']) > occurs:
occurs = len(v['fonts'])
result = v['value']
return result
def get_contents(self):
self.outstream.seek(0)
return self.outstream.read()
|
import re
from itertools import permutations
text_file = open('Day 21\\Input.csv')
test_file = open('Day 21\\Test.csv')
lines = text_file.read().split('\n')
test_lines = test_file.read().split('\n')
inp_p1 = 'abcdefgh'
inp_p2 = 'fbgdceah'
test_inp = 'abcde'
def swap_pos(inp, line):
string = ''
lst = []
numbers = re.findall(r"\d+",line)
num0 = int(numbers[0])
num1 = int(numbers[1])
let0 = inp[num0]
let1 = inp[num1]
for i in range(0, len(inp)):
lst.append(inp[i])
lst[num0] = let1
lst[num1] = let0
for i in lst:
string += i
return string
def swap_let(inp, line):
string = ''
lst = []
spl = line.split(' ')
let0 = spl[2]
let1 = spl[5]
for i in range(0, len(inp)):
if inp[i] == let0:
lst.append(let1)
elif inp[i] == let1:
lst.append(let0)
else:
lst.append(inp[i])
for i in lst:
string += i
return string
def reverse(inp, line):
string = ''
lst = []
numbers = re.findall(r"\d+",line)
pos0 = int(numbers[0])
pos1 = int(numbers[1])
for i in range(0, pos0):
if pos0 != 0:
lst.append(inp[i])
for i in range(pos1, pos0 - 1, -1):
lst.append(inp[i])
for i in range(pos1 + 1, len(inp)):
if pos1 != len(inp) - 1:
lst.append(inp[i])
for i in lst:
string += i
return string
def rotate_lr(inp, line):
string = ''
lst = []
spl = line.split(' ')
direction = spl[1]
steps = int(spl[2])
if direction == 'left':
for i in range(steps, len(inp)):
lst.append(inp[i])
for i in range(0, steps):
lst.append(inp[i])
if direction == 'right':
for i in range(len(inp) - steps, len(inp)):
lst.append(inp[i])
for i in range(0, len(inp) - steps):
lst.append(inp[i])
for i in lst:
string += i
return string
def move(inp, line):
string = ''
lst = []
numbers = re.findall(r"\d+",line)
pos0 = int(numbers[0])
pos1 = int(numbers[1])
let = inp[pos0]
for i in range(0, len(inp)):
lst.append(inp[i])
del lst[pos0]
lst.insert(pos1, let)
for i in lst:
string += i
return string
def rotate_pos(inp, line):
spl = line.split(' ')
let = spl[6]
index = inp.find(let)
if index >= 4:
index += 2
else:
index += 1
rot = index % len(inp)
return inp[-rot:] + inp[:-rot]
def find_function(line):
if 'swap position' in line:
return 0
elif 'swap letter' in line:
return 1
elif 'reverse' in line:
return 2
elif 'rotate left' in line or 'rotate right' in line:
return 3
elif 'move' in line:
return 4
elif 'rotate based' in line:
return 5
def scramble(inp, lines):
string = inp
for line in lines:
function = find_function(line)
string = options[function](string, line)
# print(string)
return string
def unscramble(inp, lines):
lst = []
for i in range(0, len(inp)):
lst.append(inp[i])
for p in permutations(lst):
string = ''
for i in range(0, len(p)):
string += p[i]
if scramble(string, lines) == inp_p2:
return string
options = {
0: swap_pos,
1: swap_let,
2: reverse,
3: rotate_lr,
4: move,
5: rotate_pos,
}
print("Test: " + scramble(test_inp, test_lines)) # Correct!
print("Part 1: " + scramble(inp_p1, lines)) # Correct!
print("Part 2: " + unscramble(inp_p1, lines)) # Correct! |
import string
from selenium import webdriver
from wordcloud import WordCloud
from Read import getUser, getMessage
from Socket import openSocket, sendMessage
from Initialize import joinRoom
import time
time1 = int(time.time())
time2 = int(time.time())+300
s = openSocket()
joinRoom(s)
readbuffer = ""
messageList = []
#driver=webdriver.Chrome()
#driver.get("https://twitch.tv/proleaguecsgo")
while True:
readbuffer = readbuffer + s.recv(1024)
temp = string.split(readbuffer, "\n")
readbuffer = temp.pop()
for line in temp:
user = getUser(line)
message = str(getMessage(line)).replace("\r","")
messageList.append(message)
time1 = int(time.time())
print user + " typed : " + message + " " + str(time2-time1)
#word cloud
if "makecloud" in message or (time2-time1)<=0:
time1 = int(time.time())
time2 = time1+300
messageListFinal=str(messageList)
messageListFinal=messageListFinal.replace(","," ")
messageListFinal=messageListFinal.replace("'","")
wordcloud = WordCloud(width=1200, height=1200).generate(messageListFinal)
image = wordcloud.to_image()
image.show()
messageList=[]
print("i hope it worked")
|
"""
Author: Nicholas Baron 830278807
Date: 3/25/2018
Description: This is a class that will allow the operator to communicate and write speeds to Robosub's ESCs axialy.
"""
import sys
sys.path.append("/home/nick/python_driver/Adafruit_Python_PCA9685")
sys.path.append("/home/nick/github/Controls/RaspberryPi/")
import Adafruit_PCA9685
from RaspberryPi.helpers.helpers import map
class MotorController:
armed = False
frequency = 50 #in hz
dead_bit = 320
max_bit = 425
min_bit = 213
pwm_norm = 1500
pwm_max = 2000
pwm_min = 1000
act_num = 50 #the number of us off norm for the class to start writing information to the hat. Default 50 (same as dead band for esc)
def __init__(self):
"""
Initializes the motorcontroller setting the headers for the datalogger and starting the adafruit pwm hat.
"""
self.pwm = Adafruit_PCA9685.PCA9685()
self.pwm.set_pwm_freq(self.frequency)
self.pulse_per_bit = self.get_ppb(self.frequency)
self.headers = ["x1", "x2", "y1", "y2", "z1", "z2"]
self.data = [0, 0, 0, 0, 0, 0]
def get_ppb(self, freq):
"""
This allows the user to get the approximate bit value for a certain us value. It is not exact. The values can be tuned in a similar way to get_bit.
:param freq: This is the frequency you would like to get the bit value for. Note: this will set the frequency for the pi hat in the process.
:return: Returns and float that is the amount of us per bit of the adafruit hat.
"""
self.pwm.set_pwm_freq(freq)
pulse_length = 1000000.0 # 1,000,000 us per second
pulse_length = pulse_length / freq
print(str(pulse_length) + "us per period")
pulse_length = pulse_length / 4096 # 12 bits of resolution
print(str(pulse_length) + "us per bit")
return pulse_length
def get_bit(self, microsecond):
"""
This is a finely tuned version of get_ppb. It will allow the user to get the exact bit value between 1000us and 2000us at 50Hz for the adafruit shield. USE THIS one.
:param microsecond: This is the motorspeed that your want to get the bit value for.
:return: The bit value corresponding to the input time.
"""
#320 = 1500us
#425 = 2000us
#213 = 1000us
if microsecond == 0:
bit = 0
elif microsecond >= self.pwm_max:
bit = self.max_bit
elif microsecond <= self.pwm_min:
bit = self.min_bit
elif microsecond > self.pwm_norm+self.act_num and microsecond < self.pwm_max:
bit = map(microsecond, self.pwm_norm, self.pwm_max, self.dead_bit, self.max_bit)
elif microsecond > self.pwm_min and microsecond < self.pwm_norm-self.act_num:
bit = map(microsecond, self.pwm_min, self.pwm_norm, self.min_bit, self.dead_bit)
else:
bit = 320
#bit = int(round(microsecond / self.pulse_per_bit))
print("us: " + str(microsecond) + " => bit:" + str(bit))
return int(round(bit))
def set_batch_microseconds(self, us_array):
"""
This will set all of the motors to the given values passed the the function.
:param us_array: This is an array of 6 int that represent the motorspeeds. [x1, x2, y1, y2, z1, z2]
:return: Nothing
"""
self.data = us_array
for i in range(0, 6):
self.pwm.set_pwm(i, 0, us_array[i])
def set_microseconds(self, channel, microsecond):
"""
This will set the us of a specific channel to the given value.
:param channel: The channel (0-6)
:param microsecond: This is the time high that you want it set to.
:return: Nothing
"""
self.data[channel] = microsecond
self.pwm.set_pwm(channel, 0, self.get_bit(microsecond))
def set_all_microseconds(self, microsecond):
"""
This function sets all 6 axial speeds to the same value.
:param microsecond: the speed to set all motors to.
:return: nothing
"""
for i in range(0, 6):
self.data[i] = microsecond
self.pwm.set_pwm(i, 0, self.get_bit(microsecond))
def get_headers(self):
"""
Gets the headers for the datalogger.
:return: An array of 6 strings representing the names of each bit of data for the datalogger.
"""
return self.headers
def get_data(self):
"""
Gets the values of the motors for the data logger.
:return: An array of 6 strings that are the data for each value of the datalogger.
"""
return [str(self.data[0]), str(self.data[1]), str(self.data[2]), str(self.data[3]),
str(self.data[4]), str(self.data[5])]
def get_speeds(self):
"""
Gets the values that the motors are currently set to.
:return: An array of 6 ints representing the us values of the motors.
"""
return self.data
def arm(self):
"""
This tells the motor controller to arm the motors. Sets the motors from 0us to 1500us.
:return: Nothing.
"""
print("Arm")
self.set_all_microseconds(1500)
self.armed = True
def disarm(self):
"""
This tells the motor controller to disarm the motors. Sets all motors to 0us time high.
:return: Nothing
"""
print("Disarm")
self.set_all_microseconds(0)
self.armed = False
def write(self, axis, ms0, ms1):
"""
This allows the user to write to the motor controller axialy. This is intended to be used for forward, up, and down movement.
:param axis: The axis that you are writing to. [x, y, z]
:param ms0: Motor-speed 0.
:param ms1: Motor-speed 1.
:return:
"""
print("Write")
if self.armed:
self.set_microseconds(2 * axis, ms0)
self.set_microseconds(2 * axis + 1, ms1) |
import cv2
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
img=cv2.imread("news.jpg")
img_g =cv2.imread("news.jpg",0)
faces=face_cascade.detectMultiScale(img_g,scaleFactor=1.1,minNeighbors=5)
for x,y,w,h in faces:
img_updated=cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),5)
print(faces)
print(type(faces))
cv2.imshow("face",img_updated)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^match/(?P<match_pk>[0-9]+)/$', views.match, name='match'),
url(r'^benchmark/(?P<benchmark_pk>[0-9]+)/$', views.benchmark, name='benchmark'),
url(r'^(?P<tour_name>[^/]+)/$', views.submit, name='submit'),
url(r'^(?P<tour_name>[^/]+)/predictions/$', views.predictions, name='predictions'),
url(r'^(?P<tour_name>[^/]+)/table/$', views.table, name='table'),
url(r'^(?P<tour_name>[^/]+)/table/(?P<org_name>[^/]+)/$', views.org_table, name='org_table'),
url(r'^(?P<tour_name>[^/]+)/join/$', views.join, name='join'),
url(r'^(?P<tour_name>[^/]+)/results/$', views.results, name='results'),
url(r'^(?P<tour_name>[^/]+)/rules/$', views.rules, name='rules'),
url(r'^(?P<tour_name>[^/]+)/benchmark/$', views.benchmark_table, name='benchmark_table'),
]
|
list1 = ["Derbes", "Azamat", "Dauren","Dana","Derbes", "Derbes","Dias"]
set1 = set(list1)
for i in set1:
if list1.count(i) > 1:
print(i) |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2018-03-13 10:38
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('nova', '0078_permission'),
]
operations = [
migrations.AlterModelOptions(
name='permission',
options={'permissions': (('access_asset', '\u8d44\u4ea7\u7ba1\u7406 access_asset'), ('access_app', '\u7cfb\u7edf\u7ba1\u7406 access_app'), ('access_database', '\u6570\u636e\u5e93\u7ba1\u7406 access_database'), ('access_monitor', '\u76d1\u63a7\u7ba1\u7406 access_monitor'), ('access_task', '\u4efb\u52a1\u7ba1\u7406 access_task'), ('access_file', '\u6587\u4ef6\u7ba1\u7406 access_file'), ('access_log', '\u65e5\u5fd7\u7ba1\u7406 access_log'))},
),
]
|
from Cases.Takeaway.Codes.TakeawayCase import PreProcessingTakeaway
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import numpy as np
import nltk
#nltk.download('punkt') #one-time download
#nltk.download('stopwords') #one-time download
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
#Question 1) What is the relation between the reviews and the helpfulness?
def CreatingDfBasedOnColumns(columns):
helpfulness_reviews_df = df_review_combined[columns]
return helpfulness_reviews_df
def parseStr(s):
list = []
s = s.replace("[", "")
s = s.replace("]", "")
segments = s.split(',')
for s in segments:
list.append(int(s.strip()))
return list
def HavingHelpfulPercentage(helpfulness_reviews_df):
helpfulness_reviews_df['helpful_list'] = helpfulness_reviews_df['helpful'].map(lambda name: parseStr(name))
helpfullnessscores_df = helpfulness_reviews_df['helpful_list'].apply(pd.Series)
mapping_columnnames = {helpfullnessscores_df.columns[0]: 'helpfulclick',
helpfullnessscores_df.columns[1]: 'totalclick'}
helpfullnessscores_df = helpfullnessscores_df.rename(columns=mapping_columnnames)
helpfullnessscores_df['helpfulclick'] = pd.to_numeric(helpfullnessscores_df['helpfulclick'])
helpfullnessscores_df['totalclick'] = pd.to_numeric(helpfullnessscores_df['totalclick'])
helpfulness_percentage = []
for index, row in helpfullnessscores_df.iterrows():
if row["totalclick"] == 0:
helpfulness_percentage.append(0)
else:
per = row["helpfulclick"] / row["totalclick"] * 100
helpfulness_percentage.append(per)
helpfullnessscores_df['helpfulness_percentage'] = helpfulness_percentage
helpfulness_reviews_df['helpfulness_percentage'] = helpfullnessscores_df['helpfulness_percentage']
helpfulness_reviews_df['helpfulclick'] = helpfullnessscores_df['helpfulclick']
helpfulness_reviews_df['totalclick'] = helpfullnessscores_df['totalclick']
def GetDistinctNonstopWordsLength(text):
words = set(word_tokenize(text)) - set(stopwords.words('english')) # getting important words
length = len(words)
return length
def CheckingCorrelations(columnnames):
correlations = helpfulness_reviews_df[columnnames].corr()
print(correlations)
fig = plt.figure(figsize=[8, 6])
ax = fig.add_subplot(111)
cax = ax.matshow(correlations, vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = np.arange(0, 3, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(corrnames)
ax.set_yticklabels(corrnames)
plt.show()
if __name__ == '__main__':
ta_data_root = "C:/Users/bengisu.oniz/PyProjects/Main/Cases/Takeaway/Data/"
df_review_combined = PreProcessingTakeaway(ta_data_root)
columnnames_helpfulness_reviews = ["metadataid", 'asin', 'reviewid', 'reviewerid', 'reviewername', 'helpful',
"reviewtext", "overall"]
helpfulness_reviews_df = CreatingDfBasedOnColumns(columnnames_helpfulness_reviews)
HavingHelpfulPercentage(helpfulness_reviews_df)
helpfulness_reviews_df['review_word_count'] = helpfulness_reviews_df['reviewtext'].map(
lambda text: GetDistinctNonstopWordsLength(text))
corrnames = ['helpfulness_percentage', 'overall', 'review_word_count']
CheckingCorrelations(corrnames)
|
from flask import Flask, request, jsonify, json
from cassandra.cluster import Cluster
import requests
import sys
cluster = Cluster(['cassandra'])
session = cluster.connect()
app = Flask(__name__)
base_url = "http://makeup-api.herokuapp.com/api/v1/products.json?brand=maybelline"
@app.route('/')
def hello():
name = request.args.get('name', 'World')
return ('<h1>Hello, {}!<h1>'.format(name))
#get product from database
@app.route('/product/<int:id>', methods=['GET'])
def get_product_by_id(id):
rows = session.execute("""SELECT * FROM makeup.products WHERE id=%(id)s""",{'id': id})
data = None
for row in rows:
data = row
print(row)
#product_url = basee_url.format()
resp = requests.get(base_url)
if resp.ok:
# print(resp.json())
res = resp.json()
matched = None
for product in res:
if product['id'] == id:
matched = product
break
item = {
'id': id,
'name': data.name,
'price': matched['price'],
'description': matched['description']
}
return jsonify(item), resp.status_code
else:
return resp.reason
#insert product into database
@app.route('/items', methods= ['POST'])
def create_product():
resp = requests.get(base_url)
if resp.ok:
# print(resp.json())
res = resp.json()
matched = None
print('request',request.form['id'])
for product in res:
if product['id'] == int(request.form['id']):
matched = product
break
print(request.form['id'])
print(request.form['name'])
print(matched)
count_rows = session.execute("SELECT COUNT(*) FROM smartcart.products")
for c in count_rows:
last_id = c.count
last_id += 1
# print(request.args)
resp = session.execute("INSERT INTO makeup.products(id,brand,description,name,price) VALUES(%s, %s, %s, %s, %s)", (int(request.form['id']),'maybelline', request.form['description'],request.form['name'], matched['price']))
print('done')
return jsonify({'message': 'added'}), 201
#delete product from database by itemid
@app.route('/deleteproduct/<int:id>', methods = ['DELETE'])
def delete_product_by_id(id):
print('before delete')
resp = session.execute("""DELETE FROM makeup.products WHERE id={}""".format(id))
return jsonify({'message': 'deleted'.format(id)}), 200
#edit product into database
@app.route('/editproduct/<int:id>', methods = ['PUT'])
def update_product(id):
print('inside put')
print('inside update')
rows = session.execute("""UPDATE makeup.products SET name=%(name)s, brand=%(brand)s ,description=%(description)s,price=%(price)s WHERE id=%(id)s""", {'name': request.form['name'], 'id': id, 'brand': 'maybelline', 'description':request.form['description'], 'price': request.form['price']})
print('after update')
return jsonify({'message':'1'.format(id)}), 200
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080)
|
# (c) 2012 Urban Airship and Contributors
from django.test import TestCase
from mithril.models import Whitelist
from mithril.tests.utils import fmt_ip
import random
import netaddr
class WhitelistTestCase(TestCase):
def test_netaddr_integration(self):
# just a tiny range, here
test_ip = random.randint(0, 0xFFFFFFFF)
num_ranges = random.randint(1, 10)
whitelist = Whitelist.objects.create(
name='anything',
slug='anything'
)
cidrs = []
for idx in range(num_ranges):
r = whitelist.range_set.create(
ip = fmt_ip(random.randint(0, 0xFFFFFFFF)),
cidr = random.randint(1, 32)
)
cidrs.append('%s/%d' % (r.ip, r.cidr))
self.assertEqual(
whitelist.okay(test_ip),
len(netaddr.all_matching_cidrs(test_ip, cidrs)) > 0
)
|
import numpy as np
import pandas as pd
data = pd.read_csv('../data/computer-configuration.csv')
cores = data['cores']
frenquecy_per_core = data['frenquecy.per.core']
video_mem = data['video.mem']
ram = data['ram']
print 'Percentile Results - 50%'
print '* *'
print 'CORES -> {r}'.format(r = np.percentile(cores, 50))
print 'FREQUENCY PER CORE -> {r}'.format(r = np.percentile(frenquecy_per_core, 50))
print 'VIDEO MEMORY -> {r}'.format(r = np.percentile(video_mem, 50))
print 'RAM MEMORY -> {r}'.format(r = np.percentile(ram, 50))
|
import pygame
class Score:
def __init__(self, app):
self.screen = app.screen
self.score = 0
def draw(self):
self.screen.blit(self.score_letters, self.score_rect)
def update(self):
self.font=pygame.font.Font("assets/fonts/HyliaSerif.ttf", 32)
self.score_letters = self.font.render("Score : %d " % (self.score), True, (0,0,0))
self.score_rect = self.score_letters.get_rect()
self.score_rect.center = (100, 20) |
import datetime
import jwt
import bcrypt
from server.flask_app import app, db
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
work_tag_table = db.Table('work_tag_table',
db.Column('tag_id', db.Integer, db.ForeignKey('tags.id'), primary_key=True),
db.Column('work_id', db.Integer, db.ForeignKey('works.id'), primary_key=True)
)
bookmark_tag_table = db.Table('bookmark_tag_table',
db.Column('tag_id', db.Integer, db.ForeignKey('tags.id'), primary_key=True),
db.Column('bookmark_id', db.Integer, db.ForeignKey('bookmarks.id'), primary_key=True)
)
comment_to_comment = db.Table("comment_to_comment",
db.Column("parent_comment_id", db.Integer, db.ForeignKey("comments.id"), primary_key=True),
db.Column("child_comment_id", db.Integer, db.ForeignKey("comments.id"), primary_key=True)
)
message_to_message = db.Table("message_to_message",
db.Column("parent_message_id", db.Integer, db.ForeignKey("messages.id"), primary_key=True),
db.Column("child_message_id", db.Integer, db.ForeignKey("messages.id"), primary_key=True)
)
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String(255), unique=True, nullable=False)
username = db.Column(db.String(255), unique=True, nullable=True)
password = db.Column(db.String(255), nullable=False)
bio = db.Column(db.String(600), nullable=True)
registered_on = db.Column(db.DateTime, nullable=False)
admin = db.Column(db.Boolean, nullable=False, default=False)
banned = db.Column(db.Boolean, nullable=True, default=False)
comments = db.relationship('Comment', backref='comment_user',
lazy='dynamic')
works = db.relationship('Work', backref='work_user',
lazy='dynamic')
bookmarks = db.relationship('Bookmark', backref='bookmark_user',
lazy='dynamic')
notifications = db.relationship('Notification', backref='notification_user',
lazy='dynamic')
received_messages = db.relationship('Message', foreign_keys="[Message.to_user_id]",
lazy='dynamic')
sent_messages = db.relationship('Message', foreign_keys="[Message.from_user_id]",
lazy='dynamic')
def __init__(self, email, password, admin=False, username=None):
self.email = email
password_bytes = bytes(password, 'utf-8')
self.password = bcrypt.hashpw(
password_bytes, bcrypt.gensalt()
).decode()
self.registered_on = datetime.datetime.now()
self.admin = admin
self.username = username
def encode_auth_token(self, user_id):
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=14, seconds=0),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
app.config.get('SECRET_KEY'),
algorithm='HS256'
)
except Exception as e:
return e
@staticmethod
def decode_auth_token(auth_token):
try:
payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))
is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)
if is_blacklisted_token:
return 'Token blacklisted. Please log in again.'
else:
return payload['sub']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.'
class BlacklistToken(db.Model):
__tablename__ = 'blacklist_tokens'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
token = db.Column(db.String(500), unique=True, nullable=False)
blacklisted_on = db.Column(db.DateTime, nullable=False)
def __init__(self, token):
self.token = token
self.blacklisted_on = datetime.datetime.now()
def __repr__(self):
return '<id: token: {}'.format(self.token)
@staticmethod
def check_blacklist(auth_token):
# check whether auth token has been blacklisted
res = BlacklistToken.query.filter_by(token=str(auth_token)).first()
if res:
return True
else:
return False
class Work(db.Model):
__tablename__ = 'works'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(200))
work_summary = db.Column(db.String)
work_notes = db.Column(db.String)
is_complete = db.Column(db.Integer)
word_count = db.Column(db.Integer)
cover_url = db.Column(db.String)
cover_alt_text = db.Column(db.String)
created_on = db.Column(db.DateTime, server_default=db.func.now())
updated_on = db.Column(db.DateTime, server_default=db.func.now(), server_onupdate=db.func.now())
chapters = db.relationship('Chapter', backref='chapter_work',
lazy='dynamic', cascade='all,delete')
bookmarks = db.relationship('Bookmark', backref='bookmark_work',
lazy='dynamic', cascade='all,delete')
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
user = db.relationship('User', back_populates='works')
type_id = db.Column(db.Integer, db.ForeignKey('work_types.id') )
work_type = db.relationship('WorkType', backref='type_works')
tags = db.relationship('Tag', secondary=work_tag_table,
backref=db.backref('work_tags', lazy='dynamic'), lazy='dynamic')
def __repr__(self):
return '<Work: {}>'.format(self.id)
class WorkType(db.Model):
__tablename__ = 'work_types'
id = db.Column(db.Integer, primary_key=True)
type_name = db.Column(db.String(200))
def __init__(self, type_name):
self.type_name = type_name
def __repr__(self):
return '<WorkType: {}>'.format(self.id)
class Chapter(db.Model):
__tablename__ = 'chapters'
id = db.Column(db.Integer, primary_key=True)
created_on = db.Column(db.DateTime, server_default=db.func.now())
updated_on = db.Column(db.DateTime, server_default=db.func.now(), server_onupdate=db.func.now())
title = db.Column(db.String(200))
number = db.Column(db.Integer)
text = db.Column(db.String)
audio_url = db.Column(db.String)
image_url = db.Column(db.String)
image_alt_text = db.Column(db.String)
summary = db.Column(db.String)
comments = db.relationship('Comment', backref='comment_chapter',
lazy='dynamic')
work_id = db.Column(db.Integer, db.ForeignKey('works.id', ondelete='CASCADE'))
work = db.relationship('Work', back_populates='chapters')
def __repr__(self):
return '<Chapter: {}>'.format(self.id)
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete='CASCADE'))
user = db.relationship('User', back_populates='comments')
chapter_id = db.Column(db.Integer, db.ForeignKey('chapters.id', ondelete='CASCADE'))
chapter = db.relationship('Chapter', back_populates='comments')
bookmark_id = db.Column(db.Integer, db.ForeignKey('bookmarks.id', ondelete='CASCADE'))
bookmark = db.relationship('Bookmark', back_populates='comments')
comments = db.relationship("Comment",
secondary=comment_to_comment,
primaryjoin=id==comment_to_comment.c.parent_comment_id,
secondaryjoin=id==comment_to_comment.c.child_comment_id,
backref="parent_comment"
)
def __repr__(self):
return '<Comment: {}>'.format(self.id)
class Tag(db.Model):
__tablename__ = 'tags'
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.String(120))
tag_type_id = db.Column(db.Integer, db.ForeignKey('tag_types.id', ondelete='CASCADE'))
tag_type = db.relationship('TagType', back_populates='tags')
def __repr__(self):
return '<Tag: {}>'.format(self.id)
class TagType(db.Model):
__tablename__ = 'tag_types'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
label = db.Column(db.String(200))
tags = db.relationship('Tag', back_populates='tag_type')
def __init__(self, label=None):
self.label = label
def __repr__(self):
return '<TagType: {}>'.format(self.id)
class Bookmark(db.Model):
__tablename__ = 'bookmarks'
id = db.Column(db.Integer, primary_key=True)
curator_title = db.Column(db.String(200))
rating = db.Column(db.Integer)
description = db.Column(db.String)
created_on = db.Column(db.DateTime, server_default=db.func.now())
updated_on = db.Column(db.DateTime, server_default=db.func.now(), server_onupdate=db.func.now())
user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete='CASCADE'))
user = db.relationship('User')
is_private = db.Column(db.Boolean, nullable=True, default=False)
work_id = db.Column(db.Integer, db.ForeignKey('works.id', ondelete='CASCADE'))
work = db.relationship('Work', back_populates='bookmarks')
tags = db.relationship('Tag', secondary=bookmark_tag_table,
backref=db.backref('bookmark_tags', lazy='dynamic'), lazy='dynamic')
comments = db.relationship('Comment', backref='comment_bookmark',
lazy='dynamic')
links = db.relationship('BookmarkLink', backref='link_bookmark',
lazy='dynamic')
def __repr__(self):
return '<Bookmark: {}>'.format(self.id)
class BookmarkLink(db.Model):
__tablename__ = 'bookmark_links'
id = db.Column(db.Integer, primary_key=True)
link = db.Column(db.String(200))
text = db.Column(db.String(200))
bookmark_id = db.Column(db.Integer, db.ForeignKey('bookmarks.id', ondelete='CASCADE'))
bookmark = db.relationship('Bookmark', back_populates='links')
def __repr__(self):
return '<BookmarkLink: {}>'.format(self.id)
class Message(db.Model):
__tablename__ = 'messages'
id = db.Column(db.Integer, primary_key=True)
message_subject = db.Column(db.String(200))
message_content = db.Column(db.String)
message_read = db.Column(db.Boolean, default=False)
to_user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete='CASCADE'))
to_user = db.relationship('User', back_populates='received_messages', foreign_keys=[to_user_id])
from_user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete='CASCADE'))
from_user = db.relationship('User', back_populates='sent_messages',foreign_keys=[from_user_id])
replies = db.relationship("Message",
secondary=message_to_message,
primaryjoin=id==message_to_message.c.parent_message_id,
secondaryjoin=id==message_to_message.c.child_message_id,
backref="parent_message", lazy='dynamic'
)
class Notification(db.Model):
__tablename__ = 'notifications'
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String(200))
date_created = db.Column(db.DateTime)
route = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete='CASCADE'))
user = db.relationship('User')
notification_type_id = db.Column(db.Integer, db.ForeignKey('notification_types.id', ondelete='CASCADE'))
notification_type = db.relationship('NotificationType')
class NotificationType(db.Model):
__tablename__ = 'notification_types'
id = db.Column(db.Integer, primary_key=True)
type_label = db.Column(db.String(200))
send_email = db.Column(db.Boolean)
def __init__(self, type_label, send_email):
self.type_label = type_label
self.send_email = send_email
def __repr__(self):
return '<Message: {}>'.format(self.id)
|
#!/usr/bin/env python
# encoding: utf-8
import numpy as np
from pandas import read_csv
from sklearn.model_selection import KFold
from pandas import DataFrame
import sys
from sklearn import preprocessing
class logistic_regression:
def __init__(self, eta, optimizer):
self.eta = eta
self.optimizer = optimizer
def read_input(self, file_path):
raw = read_csv(file_path, header=None)
raw.insert(1, 'bias', 1.0)
x = raw.iloc[:, 1:58]
y = raw.iloc[:, -1:]
return np.array(zip(x.values, y.values))
def sigmoid(self, z):
return 1.0 / (1.0 + np.exp(-z))
def adam(self, train, batch_size=32, epochs=200, beta_1=0.9, beta_2=0.99, epsilon=1e-8):
print("======= running adam with learning rate: %.3f" % self.eta)
X, Y = zip(*train)
X = np.array(X)
Y = np.array([y[0] for y in Y])
num_of_feature = X.shape[1]
w = np.zeros(num_of_feature) # for feature weight and bias
m_t = 0
v_t = 0
t = 0
for epoch in range(1, epochs + 1):
np.random.shuffle(train)
for x, y in get_batches(X, Y, X.shape[0] / batch_size):
Z = np.dot(w, x.T) # 1 * (n + 1) dot (n + 1) * batch_size
A = 1.0 / (1.0 + np.exp(-Z))
dZ = A - y # 1 * m
dw = 1 / batch_size * np.matmul(dZ, x)
t += 1
m_t = beta_1 * m_t + (1 - beta_1) * dw
v_t = beta_2 * v_t + (1 - beta_2) * (dw**2)
mhat = m_t / (1 - beta_1**t)
vhat = v_t / (1 - beta_2 **t)
w -= self.eta * mhat / (np.sqrt(vhat) + epsilon)
progress = (epoch * 1.0 / epochs) * 100
sys.stdout.write("\r%d%%" % progress)
sys.stdout.flush()
sys.stdout.write("\nEnd of optimization!\n")
sys.stdout.flush()
return w
def adagrad(self, train, epochs=2000, batch_size=1, epsilon=1e-8):
X, Y = zip(*train)
X = np.array(X)
Y = np.array([y[0] for y in Y])
num_of_feature = X.shape[1]
w = np.zeros(num_of_feature) # for feature weight and bias
gti = np.zeros(num_of_feature)
dw = np.zeros(num_of_feature)
gti = np.zeros(num_of_feature)
for epoch in range(1, epochs + 1):
np.random.shuffle(train)
for x, y in get_batches(X, Y, X.shape[0] / batch_size):
Z = np.dot(w, x.T) # 1 * (n + 1) dot (n + 1) * batch_size
A = 1.0 / (1.0 + np.exp(-Z))
dZ = A - y # 1 * m
dw = 1 / batch_size * np.matmul(dZ, x)
gti += dw ** 2
w -= self.eta * dw / ((gti + epsilon) ** 0.5)
progress = (epoch * 1.0 / epochs) * 100
sys.stdout.write("\r%d%%" % progress)
sys.stdout.flush()
sys.stdout.write("\nEnd of optimization!\n")
sys.stdout.flush()
return w
def gd(self, train, epoch=200):
X, Y = zip(*train)
# X: m * (n + 1), Y: m * 1
num_of_feature = X.shape[1]
w = np.zeros(num_of_feature) # 1 * (n+1)
dw = np.zeros(num_of_feature)
num_of_example = X.shape[0]
for i in range(epoch):
Z = np.dot(w, X.T) # 1 * (n + 1) dot (n + 1) * m
A = 1.0 / (1.0 + np.exp(-Z))
dZ = A - Y # 1 * m
dw = 1/num_of_example * np.matmul(dZ, x)
w = w - self.eta * dw
progress = (epoch * 1.0 / epoch) * 100
sys.stdout.write("\r%d%%" % progress)
sys.stdout.flush()
sys.stdout.write("\nEnd of optimization!\n")
sys.stdout.flush()
return w
def sgd(self, train, epoch=200, batch_size=32):
X, Y = zip(*train)
num_of_feature = X.shape[1]
w = np.zeros(num_of_feature) # for feature weight and bias
dw = np.zeros(num_of_feature)
for epoch in range(1, epoch + 1):
for x, y in get_batches(X, Y, batch_size):
Z = np.dot(w, x.T) # 1 * (n + 1) dot (n + 1) * batch_size
A = 1.0 / (1.0 + np.exp(-Z))
dZ = A - y # 1 * m
dw = 1 / batch_size * np.matmul(dZ, x)
w = w - self.eta * dw
progress = (epoch * 1.0 / epoch) * 100
sys.stdout.write("\r%d%%" % progress)
sys.stdout.flush()
sys.stdout.write("\nEnd of optimization!\n")
sys.stdout.flush()
return w
def accuracy(self, actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
def cost(self, data, w, epsilon=1e-10):
result = 0
for x, y in data:
yhat = self.predict(x, w)
result += -(y * np.log(yhat + epsilon) + (1 - y) * np.log(1 - yhat + epsilon))
return result
def predict(self, feature, w):
yhat = 0
feature = np.array(feature)
yhat += w.T.dot(feature)
return self.sigmoid(yhat)
def logistic(self, train, test):
coefs = self.adam(train) if self.optimizer == "adam" else self.adagrad(train)
predicts = list()
for data in test:
yhat = self.predict(data, coefs)
yhat = round(yhat)
predicts.append(yhat)
return predicts
def evaluate(self, data):
kf = KFold(n_splits=10)
scores = list()
np.random.shuffle(data)
for train_index, validation_index in kf.split(data):
train = data[train_index]
# normalize training data
train_x = np.array([x for x, y in train])
train_y = np.array([y for x, y in train])
# scalar = preprocessing.StandardScaler().fit(train_x)
# normalized_train_x = scalar.transform(train_x)
# normalize testing data
validation = data[validation_index]
validation_x = np.array([x for x, y in validation])
# normalized_validation_x = scalar.transform(validation_x)
validation_y = [y[0] for x, y in validation]
predictions = self.logistic(zip(train_x, train_y), validation_x)
score = self.accuracy(validation_y, predictions)
print("score: %.3f" % score)
scores.append(score)
print('Scores: %s with Mean Accuracy: %.3f%% with %.3f' % (scores, (sum(scores) / float(len(scores))), self.eta))
def normalized(self, train):
scalar = preprocessing.StandardScaler().fit(train)
normalized_train = scalar.transform(train)
return normalized_train, scalar.mean_, scalar.var_
def dataset_minmax(self, data):
min_max = list()
for i in range(len(data[0][0])):
col_values = list()
for x, y in data:
col_values.append(x[i])
min_value = min(col_values)
max_value = max(col_values)
min_max.append((min_value, max_value))
return min_max
def normalize_dataset(self, data, mean, standard_deviation):
return np.array([(x[i] - mean[i]) / standard_deviation[i] for i in range(len(mean)) for x in data])
def testing(self, filepath, outputpath, eta=0.01):
test = read_csv(filepath, header=None)
test_x = test.iloc[:, 1:]
predicted = self.logistic(dataset, test_x.values)
result = DataFrame.from_dict(dict({"id": list(range(1, len(predicted) + 1)), "values": predicted}))
result.to_csv(outputpath, index=False)
print("Testing done...")
def get_batches(x, y, n_batches=10):
""" Return a generator that yields batches from arrays x and y. """
batch_size = len(x) // n_batches
for ii in range(0, n_batches * batch_size, batch_size):
# If we're not on the last batch, grab data with size batch_size
if ii != (n_batches - 1) * batch_size:
X, Y = x[ii: ii + batch_size], y[ii: ii + batch_size]
# On the last batch, grab the rest of the data
else:
X, Y = x[ii:], y[ii:]
yield X, Y
if __name__ == '__main__':
path = './data/spam_train.csv'
regressor = logistic_regression(0.04, "adagrad")
dataset = regressor.read_input(path)
#evaluate(dataset, 0.0000005, "sgd")
regressor.evaluate(dataset)
# test_path = '/home/vincent/machine_learning/LogisticRegression/data/spam_test.csv'
# output_path = "/home/vincent/machine_learning/LogisticRegression/data/test_result.csv"
# testing(test_path, output_path)
|
def rotate(nums, k):
"""
Do not return anything, modify nums in-place instead.
"""
def numReverse(start, end):
while start < end:
nums[start], nums[end] = nums[end], nums[start]
start += 1
end -= 1
k = k%len(nums)
if k:
numReverse(0, len(nums)-1)
numReverse(0, k-1)
numReverse(k, len(nums)-1)
arr = [1,2,3,4]
rotate(arr, 1)
print(arr) |
#
# Copyright 2008,2009 Free Software Foundation, Inc.
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
# The presence of this file turns this directory into a Python package
'''
This is the GNU Radio AMR module. Place your Python package
description here (python/__init__.py).
'''
import os
# import pybind11 generated symbols into the amr namespace
try:
# this might fail if the module is python-only
from .amr_python import *
except ModuleNotFoundError:
pass
# import any pure python here
from .packet_detector import packet_detector
from .fsk_demod_ff import fsk_demod_ff
from .symbols_to_bits import symbols_to_bits
#
|
import time
import praw
#identifies the bot to reddit
r = praw.Reddit('Dogecoin giveaway tipper')
#input username and password the bot will use here.
r.login("USERNAME","PASSWORD")
already_done = set()
words = ['Giveaway', 'giveaway']
def find_giveaway():
print 'Starting...'
subreddit = r.get_subreddit('dogecoin')
#Gets the last 25 submissions from /r/dogecoin
subreddit_submissions = subreddit.get_new(limit=100)
print 'Looking for giveaway post'
for submission in subreddit_submissions:
post_title = submission.title.lower()
#Creates a text file that logs the submissions its already seen
obj = open('alreadyseen.txt', 'ab+')
#Sees if the title of the post has the word "giveaway" in it
has_word = any(string in post_title for string in words)
link = submission.permalink
sub_id = submission.id
if sub_id not in open("alreadyseen.txt").read() and has_word:
#Checks to see if it has enough upvotes
if submission.ups>10:
if sub_id not in already_done:
print 'Found post that qualifies! Commenting...'
#This is the comment the bot leaves on the giveaway, change it to suit your needs.
submission.add_comment('+/u/dogetipbot 50 doge \n\n^^Please ^^consider ^^tipping ^^this ^^bot ^^to ^^keep ^^me ^^running ^^and ^^to ^^see ^^larger ^^tips! \n\n^^Owned ^^by ^^/u/cbg119. ^^Problems? ^^Shoot ^^me ^^a ^^message!')
already_done.add(sub_id)
obj.write(sub_id + ' ')
obj.close()
time.sleep(30)
break
#loops the defined function
while True:
find_giveaway()
print 'Done. Starting over in 30 seconds.'
time.sleep(30)
|
from django.contrib import admin
# Register your models here.
from domain.models import UrlModel
class UrlAdmin(admin.ModelAdmin):
list_display = ['url_name', 'url']
admin.site.register(UrlModel, UrlAdmin)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-07-23 01:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('metodo', '0003_auto_20180720_1544'),
]
operations = [
migrations.AddField(
model_name='causa',
name='slug',
field=models.SlugField(blank=True, max_length=100, null=True),
),
]
|
from typing import List
from daos.book_dao import BookDAO
from entities.book import Book
from exceptions.book_unavailable_error import BookUnavailableError
from exceptions.not_found_exception import ResourceNotFoundError
from services.book_service import BookService
import time
class BookServiceImpl(BookService):
# Compisition
def __init__(self, book_dao: BookDAO):
self.book_dao = book_dao
# Dependency injection
# The service needs a BOOK DAO to perform crud operations
# When creating a service we will INJECT a BookDAO into it
# Deoupling. Makes our code more modular and independent
# Easier to test and easier to refactor
def add_book(self, book: Book):
return self.book_dao.create_book(book)
def retrieve_all_books(self):
return self.book_dao.get_all_books()
def retrieve_book_by_id(self, book_id: int):
return self.book_dao.get_book_by_id(book_id)
def update_book(self, book: Book):
return self.book_dao.update_book(book)
def remove_book(self, book_id: int):
result = self.book_dao.delete_book(book_id)
if result:
return result
else:
raise ResourceNotFoundError(f"book with the id of {book_id} could not be found")
def find_books_by_tile_containing(self, phrase: str) -> List[Book]:
books = self.book_dao.get_all_books()
filtered_books = []
for book in books:
if phrase in book.title:
filtered_books.append(book)
return filtered_books
def checkout_book(self, book_id: int) -> bool:
book = self.book_dao.get_book_by_id(book_id)
if book.available == False:
raise BookUnavailableError(f"the book with id {book_id} is currently unavailable")
book.available = False
book.return_date = time.time() + 1_209_600
self.book_dao.update_book(book)
return True
|
import json
import urllib2
class NYTimesScraper():
def __init__(self, apikey):
# Creates a new NYTimesScraper Object using the apikey that was included.
self.key = apikey
self.url = 'http://api.nytimes.com/svc/search/v2/articlesearch.json?'
def _build_params(self, params):
if not params:
raise Exception('no search parameters!')
else:
return '&'.join([k + '=' + v for k,v in params.iteritems()])
def search(self, params={}):
url = self.url + self._build_params(params)
url = url + '&api-key=%s' % self.key
req = urllib2.Request(url)
data = urllib2.urlopen(req).read()
return json.loads(data)
nytimes = NYTimesScraper(apikey='')
articles = api.search({'q':'malaysia', 'begin_date': '20140101'})
for article in articles['response']['docs']:
print '"' + '","'.join([article['byline']['person'][0]['lastname'],
article['pub_date'],
article['section_name'],
article['subsection_name'],
article['word_count'],
article['web_url']]) + '"'
filename = 'nytimesdata.csv'
writer = open(filename, 'w')
# Fill in the column names here as one string
writer.write('')
for article in articles['response']['docs']:
# Write the article data you want to collect here
|
# 처음 생각했던 방법이 맞네
# 두가지 테스트케이스에서 시간 초과
# mid를 예외로 두지 않는 풀이는 시간 초과가 나지 않았음, 두 코드 비교 분석하기
def solution(n, times):
start, end = times[0], times[-1] * n
while True:
mid = (start + end) // 2
man_cnt, answer = 0, 0
for t in times:
each = mid // t
man_cnt += each
answer = each * t if each * t > answer else answer
if man_cnt == n:
return answer
elif man_cnt > n:
end = mid-1
else:
start = mid+1
# mid를 예외로 두지 않는 풀이
def solution2(n, times):
left, right = 1, max(times) * n
while left < right:
mid = (left + right) // 2
if sum([mid // x for x in times]) < n:
left = mid + 1
else:
right = mid
return left
print(solution(6, [7, 10]))
print(solution2(6, [7, 10])) |
#_*_coding:utf-8_*_
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
urlpatterns = patterns('',
url(r'^$','apps.accounts.views.login',name='login'),
url(r'^register/?$','apps.accounts.views.register',name='register'),
url(r'^register/invate_code/(.+)$','apps.accounts.views.register',name='invate_code'),
url(r'^login/?$','apps.accounts.views.login',name='login'),
url(r'^logout/?$','apps.accounts.views.logout',name='logout'),
#url(r'^info/?$','apps.accounts.views.info',name='info'),
url(r'^check/?$','apps.accounts.views.check',name='check'),
url(r'^save/?$','apps.accounts.views.save',name='save'),
#url(r'^reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$', 'django.contrib.auth.views.password_reset_confirm'),
#url(r'^reset/done/$', 'django.contrib.auth.views.password_reset_complete'),
#url(r'^admin/password_reset/$', 'django.contrib.auth.views.password_reset', name='admin_password_reset'),
#url(r'^admin/password_reset/done/$', 'django.contrib.auth.views.password_reset_done'),
)
urlpatterns += patterns('',
url(r'^protocol/?$','apps.accounts.views.protocol',name='protocol'),
url(r'^cadd/?$','apps.kx.api.views.cadd',name='cadd'),
)
urlpatterns += patterns('',
url(r'^verify_success/$','apps.accounts.views.verify_success',name='verify_success'),
url(r'^account_verify/?$','apps.accounts.views.account_verify',name='account_verify'),
url(r'^activate/verify/(.+)$','apps.accounts.views.activate',name='activate'),
url(r'^invate/$','apps.kx.api.views.invate',name='invate'),
url(r'^to_active/?$','apps.accounts.views.to_active',name='to_active'),
url(r'^invite_msg/(\w+)/?$','apps.accounts.views.invite_msg',name='invite_msg'),
)
urlpatterns += patterns('',
url(r'^findPwd/?$','apps.accounts.views.findPwd',name='findPwd'),
url(r'^resetPwd/verify/(.+)$','apps.accounts.views.resetPwd',name='resetPwd'),
url(r'^rePwd/?$','apps.accounts.views.rePwd',name='rePwd'),
url(r'^changePWD/?$','apps.accounts.views.chpasswd',name='chpasswd'),
)
urlpatterns += patterns('',
url(r'^index/?$', 'apps.accounts.views.index',name='accounts_index'),
url(r'^info_new/?$','apps.accounts.views.new_info',name='accounts_info'),
url(r'^avatar/?$','apps.accounts.views.avatar',name='avatar'),
url(r'^friend/?$', TemplateView.as_view(template_name="user/friend.html")),
url(r'^friendAdd/?$', TemplateView.as_view(template_name="user/friend_add.html")),
url(r'^printer/auth/?$','apps.accounts.views.printer_auth',name='printer_auth'),
url(r'^printer/do_auth/?$','apps.accounts.views.do_auth',name='do_auth'),
url(r'^print_record/?$','apps.accounts.views.print_record',name='print_record'),
url(r'^my_printer/?$','apps.accounts.views.my_printer',name='my_printer'),
url(r'^user/my_issue/?$','apps.accounts.views.my_issue',name='my_issue'),
url(r'^printer/is_auth/?$','apps.accounts.views.authed_printer',name='accounts_authed_printer'),
)
|
# Generated by Django 2.2.7 on 2019-11-20 21:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0044_auto_20191121_0302'),
]
operations = [
migrations.AlterField(
model_name='product',
name='name',
field=models.CharField(max_length=200, unique=True, verbose_name='Наименование продукта'),
),
]
|
# input the photo in datas to faceset, set ID as its dir name
import os
from os import path
import face_API as face
import pre_process as pp
data_path = '/Users/xander/Documents/code/classroom_recognization_system/datas'
def updateall():
pp.small_datas()
face.clear_faceset()
os.chdir(data_path)
pa = os.getcwd()
for ID in os.listdir(pa):
try: # all ID is int number
int(ID)
except ValueError:
continue
else:
imgpath = path.join(pa,ID)
for img in os.listdir(imgpath):
(filename,extension) = path.splitext(img)
if extension in ('.jpg','.jpeg','.png'):
face.single_addface(img_url = path.join(imgpath,img) , ID = ID)
def updateID(ID):
try:
int(ID)
except (ValueError,FileNotFoundError):
print("No such an ID!")
return
else:
imgpath = path.join(data_path,ID)
pp.small_paths(imgpath)
for img in os.listdir(imgpath):
(filename,extension) = path.splitext(img)
if extension in ('.jpg','.jpeg','.png'):
face.single_addface(img_url = path.join(imgpath,img) , ID = ID)
|
from django.db import models
from phonenumber_field.modelfields import PhoneNumberField
# Create your models here.
class Student(models.Model):
puid = models.PositiveIntegerField(primary_key = True)
name = models.CharField(max_length = 200)
email = models.EmailField(blank = True)
phone = PhoneNumberField(blank = True)
major = models.CharField(max_length = 200, blank = True)
gpa = models.DecimalField(max_digits = 3, decimal_places = 2, null = True)
hometown = models.CharField(max_length = 200, blank = True)
FRESHMAN = 'FR'
SOPHOMORE = 'SO'
JUNIOR = 'JR'
SENIOR = 'SR'
GRADE_CHOICES = [(FRESHMAN, 'Freshman'),(SOPHOMORE, 'Sophomore'),(JUNIOR, 'Junior'),(SENIOR, 'Senior & Above'),]
grade = models.CharField(max_length = 2, choices = GRADE_CHOICES, blank = True)
class Fraternity(models.Model):
# automatically generates an ID#
name = models.CharField(max_length = 200)
email = models.EmailField(unique = True)
password = models.CharField(max_length = 100)
applicants = models.ManyToManyField(Student, blank=True)
class Event(models.Model):
name = models.CharField(max_length = 200)
date = models.DateField(db_index = True)
host = models.ForeignKey(Fraternity, on_delete = models.CASCADE)
attendees = models.ManyToManyField(Student,blank = True) |
from __future__ import print_function
import sys
import sgf
from pyspark import SparkContext#, HiveContext
from pyspark.sql import SQLContext, Row, DataFrame #,SparkSession
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
def getSparkSessionInstance():
if ('sparkSessionSingletonInstance' not in globals()):
globals()['sparkSessionSingletonInstance'] = SparkSession \
.builder \
.appName("Python Spark SQL Hive integration example") \
.config("hive.metastore.uris", "thrift://127.0.0.1:9083") \
.enableHiveSupport() \
.getOrCreate()
return globals()['sparkSessionSingletonInstance']
if __name__ == "__main__":
# spark = getSparkSessionInstance()
print("start.....")
if len(sys.argv) != 3:
print("Usage: direct_kafka_wordcount.py <broker_list> <topic>", file=sys.stderr)
sys.exit(-1)
sc = SparkContext.getOrCreate()
#sqlc = HiveContext(sc)
sqlContext = SQLContext(sc)
sc.setLogLevel("WARN")
#every two seconds
ssc = StreamingContext(sc, 2)
brokers, topic = sys.argv[1:]
#sqlc.sql("create table if not exists record(id string, key string, value string)")
#spark.sql("create table if not exists record(id string, key string, value string)")
kvs = KafkaUtils.createDirectStream(ssc, [topic], {"metadata.broker.list": brokers})
lines = kvs.map(lambda x: x[1])
rowRdd = lines.filter(lambda line: "-" in line and line[0:line.index("-")] != "0")\
.flatMap(lambda line: line.split(" "))\
.map(lambda p: Row(id=p[0], key=p[1], value=p[2]))
sgfDF = sqlContext.createDataFrame(rowRdd)
sgfDF.registerTempTable("record")
sgfDF.write.parquet("record.parquet")
parquetFile = sqlContext.read.parquet("people.parquet")
parquetFile.registerTempTable("parquetFile");
datasToShow = sqlContext.sql("SELECT id, key, value FROM parquetFile")
for oneData in datasToShow.collect():
print(oneData)
#res.createOrReplaceTempView("mytempTable")
# sqlc.sql("create table if not exists mytable as select * from mytempTable");
#kvs.write.mode("append").saveAsTable("record")
#sqlc.sql("select * from record").show()
print("end....pprint")
ssc.start()
ssc.awaitTermination()
print("end.....")
# rowRdd = rdd.map(lambda p: Row(movie_id=long(p[0]), budget=long(p[1]), popularity=float(p[2]), release_year=p[3], release_month=p[4], revenue=long(p[5]), title=p[6], voting_score=float(p[7]), voting_count=float(p[8])))
# sgfDF = spark.createDataFrame(rowRdd)
# newSgfDF = sgfDF[~sgfDF.movie_id.isin(existedMovieIdList)]
# newSgfDF.write.mode("append").saveAsTable("default.movie") |
from collections import deque
import sys
input = sys.stdin.readline
##########################################################
######################## 전역 변수 ########################
##########################################################
actions = [lambda x: x+1, lambda x: x-1, lambda x: x*2]
##########################################################
######################## 함수 부분 ########################
##########################################################
def bfs(N, K):
global actions
vis = [False for _ in range(100001)]
q = deque([N])
vis[N] = True
cnt = 0
while q:
for _ in range(len(q)):
p_subin = q.popleft()
if p_subin == K:
return cnt
for action in actions:
p_subin_next = action(p_subin)
if 0 <= p_subin_next < 100001 and not vis[p_subin_next]:
vis[p_subin_next] = True
q.append(p_subin_next)
cnt += 1
return -1 # if fails
##########################################################
######################## 입력 부분 ########################
##########################################################
# 5 17 # 수빈이의 위치 N, 동생의 위치 K
N, K = map(int, input().split())
ans = bfs(N, K)
print(ans)
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from functools import partial
from json import dumps, loads
from collections import defaultdict
from future.utils import viewitems
from tornado.web import authenticated
from qiita_core.qiita_settings import qiita_config
from qiita_core.util import execute_as_transaction
from qiita_core.qiita_settings import r_client
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_pet.handlers.util import download_link_or_path
from qiita_pet.handlers.analysis_handlers import check_analysis_access
from qiita_pet.util import is_localhost
from qiita_db.util import generate_analysis_list
from qiita_db.analysis import Analysis
from qiita_db.processing_job import ProcessingJob
from qiita_db.software import Parameters
from qiita_db.artifact import Artifact
from qiita_db.software import Software
class ListAnalysesHandler(BaseHandler):
@authenticated
@execute_as_transaction
def get(self):
user = self.current_user
is_local_request = is_localhost(self.request.headers['host'])
uanalyses = user.shared_analyses | user.private_analyses
user_analysis_ids = set([a.id for a in uanalyses])
panalyses = Analysis.get_by_status('public')
public_analysis_ids = set([a.id for a in panalyses])
public_analysis_ids = public_analysis_ids - user_analysis_ids
user_analyses = generate_analysis_list(user_analysis_ids)
public_analyses = generate_analysis_list(public_analysis_ids, True)
dlop = partial(download_link_or_path, is_local_request)
messages = {'info': '', 'danger': ''}
for analysis_id in user_analysis_ids:
job_info = r_client.get('analysis_delete_%d' % analysis_id)
if job_info:
job_info = defaultdict(lambda: '', loads(job_info))
job_id = job_info['job_id']
job = ProcessingJob(job_id)
job_status = job.status
processing = job_status not in ('success', 'error')
if processing:
messages['info'] += (
'Analysis %s is being deleted<br/>' % analysis_id)
elif job_status == 'error':
messages['danger'] += (
job.log.msg.replace('\n', '<br/>') + '<br/>')
else:
if job_info['alert_type'] not in messages:
messages[job_info['alert_type']] = []
messages[job_info['alert_type']] += (
job.log.msg.replace('\n', '<br/>') + '<br/>')
self.render("list_analyses.html", user_analyses=user_analyses,
public_analyses=public_analyses, messages=messages,
dlop=dlop)
@authenticated
@execute_as_transaction
def post(self):
analysis_id = int(self.get_argument('analysis_id'))
user = self.current_user
check_analysis_access(user, Analysis(analysis_id))
qiita_plugin = Software.from_name_and_version('Qiita', 'alpha')
cmd = qiita_plugin.get_command('delete_analysis')
params = Parameters.load(cmd, values_dict={'analysis_id': analysis_id})
job = ProcessingJob.create(user, params, True)
# Store the job id attaching it to the sample template id
r_client.set('analysis_delete_%d' % analysis_id,
dumps({'job_id': job.id}))
job.submit()
self.redirect("%s/analysis/list/" % (qiita_config.portal_dir))
class AnalysisSummaryAJAX(BaseHandler):
@authenticated
@execute_as_transaction
def get(self):
info = self.current_user.default_analysis.summary_data()
self.write(dumps(info))
class SelectedSamplesHandler(BaseHandler):
@authenticated
@execute_as_transaction
def get(self):
# Format sel_data to get study IDs for the processed data
sel_data = defaultdict(dict)
proc_data_info = {}
sel_samps = self.current_user.default_analysis.samples
for aid, samples in viewitems(sel_samps):
artifact = Artifact(aid)
sel_data[artifact.study][aid] = samples
proc_data_info[aid] = {
'processed_date': str(artifact.timestamp),
'merging_scheme': artifact.merging_scheme,
'data_type': artifact.data_type
}
self.render("analysis_selected.html", sel_data=sel_data,
proc_info=proc_data_info)
|
#!/usr/bin/env python3
# coding=utf-8
from Functions import *
import multiprocessing
from multiprocessing import Process
import sys
def ShowIcon():
状态栏及通知().run()
def AlwaysCheck(pid):
开始登陆=执行()
开始登陆.登陆()
while 1:
开始登陆.检查网络()
time.sleep(3)
pid=int(pid)
if CheckProcess(pid) is False:
os._exit(0)
def CheckProcess(pid):
try:
os.kill(pid,0)
except:
return False
else:
return True
if __name__=="__main__":
CurrentPath = os.path.dirname(sys.argv[0])
初始化().更新plist(CurrentPath)
#初始化().test()
multiprocessing.freeze_support()
ProcessIcon=Process(target=ShowIcon,args=())
ProcessIcon.start()
ProcessID = ProcessIcon.pid
#print(int(ProcessID))
ProcessAlwaysCheck = Process(target=AlwaysCheck,args=(str(ProcessID),))
ProcessAlwaysCheck.start()
ProcessIcon.join()
ProcessAlwaysCheck.join()
|
# Generated by Django 2.0.6 on 2018-06-28 09:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sale', '0007_auto_20180628_1632'),
]
operations = [
migrations.AlterField(
model_name='transfer',
name='transfer_position',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='sale.Position', verbose_name='门店'),
),
migrations.AlterField(
model_name='transfer',
name='transfer_status',
field=models.CharField(choices=[('000101', '入库'), ('100001', '销售'), ('011001', '调入'), ('011002', '调出')], default='1000', max_length=6, verbose_name='分类'),
),
]
|
from flask import Flask, render_template, request, redirect
from flask_mysqldb import MySQL
import yaml
app = Flask(__name__)
# config db
conf = yaml.load(open("config.yaml"))
app.config["MYSQL_HOST"] = conf['mysql_host']
app.config["MYSQL_USER"] = conf['mysql_name']
app.config["MYSQL_PASSWORD"] = conf['mysql_password']
app.config["MYSQL_DB"] = conf['mysql_name']
mysql = MySQL(app)
@app.route("/")
def my_home():
return render_template("index.html", title="My Portfolio")
@app.route("/<string:page_name>")
def html_page(page_name):
return render_template(f"{page_name}.html", title="My Portfolio")
def save_data(data):
if data["email"] and data["subject"] and data["message"] != "":
try:
email = data["email"]
subject = data["subject"]
message = data["message"]
cur = mysql.connection.cursor()
cur.execute(
"INSERT INTO Contacts(email, subject, message) VALUES(%s, %s, %s)",
(email, subject, message),
)
mysql.connection.commit()
cur.close()
except:
return "somthing worng wents"
@app.route("/submit-form", methods=["POST", "GET"])
def submit_form():
if request.method == "POST":
try:
data = request.form.to_dict()
if data["email"] and data["subject"] and data["message"] != "":
save_data(data)
return redirect("thanks")
else:
return redirect("404")
except:
return "somthing worng went"
# url for admin to see all contact messages
@app.route("/<string:admin>/<int:password>")
def get_data(admin, password):
if admin == conf['url_name'] and password == conf['url_password']:
cur = mysql.connection.cursor()
resultValue = cur.execute("SELECT * FROM Contacts")
if resultValue > 0:
userData = cur.fetchall()
cur.close()
return render_template("people.html", userData=userData)
return "Somthing worng went"
if __name__ == '__main__':
app.debug = True
app.run() |
from PyQt4.QtGui import *
from . import editconnectiondialog
class TStandardConnectionDialog(QDialog):
def __init__(self,p_connections,parent=None):
QDialog.__init__(self,parent)
l_top=QVBoxLayout(self)
l_buttonTopBox=QDialogButtonBox(self)
l_top.addWidget(l_buttonTopBox)
l_addButton=l_buttonTopBox.addButton("&Add",QDialogButtonBox.ActionRole)
l_addButton.clicked.connect(self.showAdd)
self.editButton=l_buttonTopBox.addButton("&Edit",QDialogButtonBox.ActionRole)
self.editButton.setEnabled(False)
self.editButton.clicked.connect(self.editConnection)
self.deleteButton=l_buttonTopBox.addButton("&Delete",QDialogButtonBox.ActionRole)
self.deleteButton.setEnabled(False)
self.deleteButton.clicked.connect(self.removeConnection)
self.connectionList=QTableWidget()
self.connectionList.setEditTriggers(QAbstractItemView.NoEditTriggers)
l_top.addWidget(self.connectionList)
l_buttonBottomBox=QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
l_top.addWidget(l_buttonBottomBox)
l_buttonBottomBox.accepted.connect(self.accept)
l_buttonBottomBox.rejected.connect(self.reject)
self.connectionList.setColumnCount(3)
self.connectionList.setRowCount(len(p_connections))
self.connectionList.setHorizontalHeaderItem(0,QTableWidgetItem("Connection name"))
self.connectionList.setHorizontalHeaderItem(1,QTableWidgetItem("Username"))
self.connectionList.setHorizontalHeaderItem(2,QTableWidgetItem("Url"))
self.connectionList.setSelectionBehavior(QAbstractItemView.SelectRows)
self.connectionList.setSelectionMode(QAbstractItemView.SingleSelection)
self.connectionList.itemSelectionChanged.connect(self.selectionChanged)
l_rowCnt=0
for l_list in p_connections:
self.setConnectListRow(l_rowCnt,l_list)
l_rowCnt+=1
self.connectionList.resizeColumnsToContents
def removeConnection(self):
l_model=self.connectionList.selectionModel()
l_selectedRows=l_model.selectedRows()
if len(l_selectedRows)==1 :
self.connectionList.removeRow(l_selectedRows[0].row())
def setConnectListRow(self,p_rowNo,p_data):
l_name=QTableWidgetItem(p_data["name"])
l_name.setData(32,p_data)
self.connectionList.setItem(p_rowNo,0,l_name)
self.connectionList.setItem(p_rowNo,1,QTableWidgetItem(p_data["username"]))
self.connectionList.setItem(p_rowNo,2,QTableWidgetItem(p_data["url"]))
def editConnection(self):
l_model=self.connectionList.selectionModel()
l_selectedRows=l_model.selectedRows()
if len(l_selectedRows)==1 :
l_row=l_selectedRows[0].row()
l_selectedItem=self.connectionList.item(l_row,0)
if(l_selectedItem):
l_data=l_selectedItem.data(32)
print(l_data)
l_dialog=editconnectiondialog.editTEditConnectionDialog(l_data)
l_dialog.exec_()
if l_dialog.result()==1:
self.setConnectListRow(l_row,l_dialog.getData())
def getConnectionList(self):
l_connections=[]
for l_cnt in range(0,self.connectionList.rowCount()):
l_connections.append(self.connectionList.item(l_cnt,0).data(32))
return l_connections
def selectionChanged(self):
l_buttonState=False
if self.connectionList.selectionModel().hasSelection():
l_buttonState=True
self.editButton.setEnabled(l_buttonState)
self.deleteButton.setEnabled(l_buttonState)
def showAdd(self):
l_dialog=editconnectiondialog.TEditConnectionDialog({"username":"","name":"","password":"","url":""})
l_dialog.exec_()
if l_dialog.result()==1:
l_rowNum=self.connectionList.rowCount()
self.connectionList.insertRow(l_rowNum)
self.setConnectListRow(l_rowNum,l_dialog.getData()) |
#!/usr/bin/env python
# coding: utf-8
# Copyright 2013 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
from __future__ import print_function
import argparse
from fontTools.ttLib import TTLibError
from bakery_cli.ttfont import Font
from bakery_cli.scripts import vmet
parser = argparse.ArgumentParser()
# ascent parameters
parser.add_argument('-a', '--ascents', type=int,
help=("Set new ascents value in 'Horizontal Header'"
" table"))
parser.add_argument('-ah', '--ascents-hhea', type=int,
help=("Set new ascents value in 'Horizontal Header'"
" table ('hhea'). This argument"
" cancels --ascents."))
parser.add_argument('-at', '--ascents-typo', type=int,
help=("Set new ascents value in 'Horizontal Header'"
" table ('OS/2'). This argument"
" cancels --ascents."))
parser.add_argument('-aw', '--ascents-win', type=int,
help=("Set new ascents value in 'Horizontal Header'"
" table ('OS/2.Win'). This argument"
" cancels --ascents."))
# descent parameters
parser.add_argument('-d', '--descents', type=int,
help=("Set new descents value in 'Horizontal Header'"
" table"))
parser.add_argument('-dh', '--descents-hhea', type=int,
help=("Set new descents value in 'Horizontal Header'"
" table ('hhea'). This argument"
" cancels --descents."))
parser.add_argument('-dt', '--descents-typo', type=int,
help=("Set new descents value in 'Horizontal Header'"
" table ('OS/2'). This argument"
" cancels --descents."))
parser.add_argument('-dw', '--descents-win', type=int,
help=("Set new descents value in 'Horizontal Header'"
" table ('OS/2.Win'). This argument"
" cancels --descents."))
# linegaps parameters
parser.add_argument('-l', '--linegaps', type=int,
help=("Set new linegaps value in 'Horizontal Header'"
" table"))
parser.add_argument('-lh', '--linegaps-hhea', type=int,
help=("Set new linegaps value in 'Horizontal Header'"
" table ('hhea')"))
parser.add_argument('-lt', '--linegaps-typo', type=int,
help=("Set new linegaps value in 'Horizontal Header'"
" table ('OS/2')"))
parser.add_argument('--autofix', action="store_true",
help="Autofix font metrics")
parser.add_argument('ttf_font', nargs='+', metavar='ttf_font',
help="Font file in OpenType (TTF/OTF) format")
options = parser.parse_args()
fonts = options.ttf_font
if (options.ascents or options.descents or options.linegaps
or options.ascents_hhea or options.ascents_typo
or options.ascents_win or options.descents_hhea
or options.descents_typo or options.descents_win
or options.linegaps_hhea or options.linegaps_typo):
for f in fonts:
try:
metrics = Font(f)
except TTLibError as ex:
print('Error: {0}: {1}'.format(f, ex))
continue
# set ascents, descents and linegaps. FontVerticalMetrics will
# not set those values if None, and overwrite them if concrete
# argument has been passed
metrics.ascents.set(options.ascents)
metrics.descents.set(options.descents)
metrics.linegaps.set(options.linegaps)
metrics.ascents.hhea = options.ascents_hhea
metrics.ascents.os2typo = options.ascents_typo
metrics.ascents.os2win = options.ascents_win
metrics.descents.hhea = options.descents_hhea
metrics.descents.os2typo = options.descents_typo
metrics.descents.os2win = options.descents_win
metrics.linegaps.hhea = options.linegaps_hhea
metrics.linegaps.os2typo = options.linegaps_typo
metrics.save(f + '.fix')
elif options.autofix:
vmet.metricfix(fonts)
else:
print(vmet.metricview(fonts))
|
import requests, json
r = requests.get('https://kmuin.com/api/v1/notices/')
print(r.text)
|
import boto3
from botocore.client import Config
ID = 'xxxx' #AWS access ID
SECRET = 'xxx' #AWS secret access key
BUCKET_NAME = 'xxxxx' #s3 Bucket name
data = open('agne.jpg', 'rb')
s3 = boto3.resource(
's3',
aws_access_key_id=ID,
aws_secret_access_key=SECRET,
config=Config(signature_version='s3v4')
)
s3.Bucket(BUCKET_NAME).put_object(Key='agne.jpg', Body=data)
print ("Done")
|
from django.urls import path
from sign import views_if
urlpatterns = [
path('test/', views_if.test, name='test'),
path('add_event/', views_if.add_event, name='add_event'),
path('get_event_list/', views_if.get_event_list, name='get_event_list'),
]
|
from . import base, launch, util
from .base import config_for_dir
|
# Generated by Django 3.0.7 on 2020-10-09 08:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cl_app', '0002_auto_20201009_0613'),
('cl_table', '0016_postaud'),
]
operations = [
migrations.AddField(
model_name='postaud',
name='Appointment',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='cl_table.Appointment'),
),
migrations.AddField(
model_name='postaud',
name='ItemSIte_Codeid',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='cl_app.ItemSitelist'),
),
migrations.AddField(
model_name='postaud',
name='billable_amount',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='postaud',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='postaud',
name='credit_debit',
field=models.BooleanField(default=False, null=True),
),
migrations.AddField(
model_name='postaud',
name='discount_amt',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='postaud',
name='is_voucher',
field=models.BooleanField(default=False, null=True),
),
migrations.AddField(
model_name='postaud',
name='pay_premise',
field=models.BooleanField(default=False, null=True),
),
migrations.AddField(
model_name='postaud',
name='points',
field=models.BooleanField(default=False, null=True),
),
migrations.AddField(
model_name='postaud',
name='posdaudlineamountassign',
field=models.CharField(blank=True, db_column='posdaudLineAmountAssign', max_length=500, null=True),
),
migrations.AddField(
model_name='postaud',
name='posdaudlineamountused',
field=models.FloatField(blank=True, db_column='posdaudLineAmountUsed', null=True),
),
migrations.AddField(
model_name='postaud',
name='posdaudlineno',
field=models.CharField(blank=True, db_column='POSDAUDLineNo', max_length=50, null=True),
),
migrations.AddField(
model_name='postaud',
name='pp_bal',
field=models.FloatField(blank=True, db_column='PP_Bal', null=True),
),
migrations.AddField(
model_name='postaud',
name='prepaid',
field=models.BooleanField(default=False, null=True),
),
migrations.AddField(
model_name='postaud',
name='subtotal',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='postaud',
name='tax',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='postaud',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='postaud',
name='voucher_amt',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='postaud',
name='voucher_no',
field=models.CharField(db_column='Voucher_No', max_length=50, null=True),
),
migrations.AlterField(
model_name='postaud',
name='cas_logno',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AlterField(
model_name='postaud',
name='dt_lineno',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='postaud',
name='itemsite_code',
field=models.CharField(db_column='ItemSIte_Code', max_length=10, null=True),
),
migrations.AlterField(
model_name='postaud',
name='pay_status',
field=models.BooleanField(blank=True, null=True),
),
migrations.AlterField(
model_name='postaud',
name='sa_date',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='postaud',
name='sa_time',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='postaud',
name='sa_transacno',
field=models.CharField(blank=True, max_length=20, null=True),
),
]
|
from django.shortcuts import render,redirect, get_object_or_404, get_list_or_404
from django.views.decorators import gzip
from django.http import StreamingHttpResponse, HttpResponseServerError
import cv2, time, operator, datetime
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array
import numpy as np
from os.path import split
import os
from statistics import mode
from smile.models import PHRASE, FACE, USER
from random import randint
from django.utils import timezone
msg = "Please click the next button"
emotion_image_data = {0: None, # 무표정
1: None, # level_1
2: None, # level_2
3: None, # level_3
}
phraseList = {}
# model path
# #대윤
# detection_model_path = 'C:/dev/finalProject2/project/smile/detection_models/haarcascade_frontalface_default.xml'
# emotion_model_path = 'C:/dev/finalProject2/project/smile/emotion_models/_vgg16_01_.34-0.77-0.6478.h5'
# 찬욱
# detection_model_path = 'C:/Users/acorn-519/PycharmProjects/finalProject/project/smile/detection_models/haarcascade_frontalface_default.xml'
# emotion_model_path = 'C:/Users/acorn-519/PycharmProjects/finalProject/project/smile/emotion_models/_vgg16_01_.34-0.77-0.6478.h5'
# 아영
detection_model_path = 'C:/Users/acorn-508/PycharmProjects/finalProject/project/smile/detection_models/haarcascade_frontalface_default.xml'
emotion_model_path = 'C:/Users/acorn-508/PycharmProjects/finalProject/project/smile/emotion_models/_vgg16_01_.34-0.77-0.6478.h5'
emotion_labels = ["happy", "angry", "sad", "neutral", "surprise"]
# initialization
frame_window = 30
emotion_window = []
best_prob_level = [None]
randInt = randint(1,9999999)
def index(request):
if emotion_image_data[0] == None and emotion_image_data[1] == None and emotion_image_data[2] == None and emotion_image_data[3] == None:
return render(request, 'smile/neutral.html')
elif emotion_image_data[0] != None and emotion_image_data[1] == None and emotion_image_data[2] == None and emotion_image_data[3] == None:
return render(request, 'smile/smile_1.html')
elif emotion_image_data[0] != None and emotion_image_data[1] != None and emotion_image_data[2] == None and emotion_image_data[3] == None:
return render(request, 'smile/smile_2.html')
elif emotion_image_data[0] != None and emotion_image_data[1] != None and emotion_image_data[2] != None and emotion_image_data[3] == None:
return render(request, 'smile/smile_3.html')
elif emotion_image_data[0] != None and emotion_image_data[1] != None and emotion_image_data[2] != None and emotion_image_data[3] != None:
return render(request, 'smile/smile_3.html')
else:
return render(request, 'service/mainpage1.html')
def ListPhrase(request):
#context = {'phraseList':phraseList}
context ={'phraseList':phraseList.values}
print(context)
return render(request, 'smile/emotion_detection_2.html',context)
def streamingImages(request):
#saved_url
context = {'imgUrl' :emotion_image_data.values}
return render(request, 'smile/smile_1.html',context)
def warmup(request):
return render(request, 'smile/warmup.html')
def result(request):
return render(request, 'smile/result.html')
class ImgCamera_smile:
def get_frame_img(self,level_index):
path = emotion_image_data[level_index][1]
if os.path.exists(path):
img = cv2.imread(path,cv2.IMREAD_COLOR)
success, frame = cv2.imencode('.jpg',img)
img = frame.tobytes()
return img
pass
class VideoCamera_smile:
global detection_model_path
global emotion_model_path
global emotion_labels
global frame_window
global emotion_window
global best_prob_level
global emotion_image_data
global phraseList
def __init__(self):
self.video = cv2.VideoCapture(0)
self.cascade = cv2.CascadeClassifier(detection_model_path)
self.emotion_classifier = load_model(emotion_model_path, compile=False)
self.emotion_target_size = self.emotion_classifier.input_shape[1:3] # emotion_target_size = (48,48)
self.smile_count = 0
self.save_file_count = 0
self.smile_data = {} # {count:percent}
self.emotion_label_list =[]
self.emo_label_exist = False
self.emo_image_exist = False
self.frame_count = 0
self.today_emotion_label = []
def __del__(self):
self.video.release()
# self.save_file_count
def warmUp(self):
success_warmup, frame_warmup = self.video.read()
# self.gray = cv2.cvtColor(frame_warmup, cv2.COLOR_BGR2GRAY)
# self.faces = self.cascade.detectMultiScale(self.gray, scaleFactor=1.1, minNeighbors=5)
# for face_coordinates in self.faces:
success, jpeg = cv2.imencode('.jpg', frame_warmup)
return jpeg.tobytes()
# 오늘의 한마디
def today_phrase(self, img_count):
while self.emo_label_exist != True:
success, frame = self.video.read()
self.gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self.faces = self.cascade.detectMultiScale(self.gray, scaleFactor=1.1, minNeighbors=5)
for face_coordinates in self.faces:
gray_face = self.gray[face_coordinates[1]:face_coordinates[1] + face_coordinates[3],
face_coordinates[0]:face_coordinates[0] + face_coordinates[2]]
gray_face = cv2.resize(gray_face, (48, 48))
gray_face = gray_face.astype("float") / 255.0
gray_face = img_to_array(gray_face)
gray_face = np.expand_dims(gray_face, axis=0) # (48,48,1)
emotion_prediction = self.emotion_classifier.predict(gray_face)[0]
emotion_probability = np.max(emotion_prediction)
emotion_probability = round(emotion_probability * 100, 2) # 소수둘째자리까지 반올림 ex)90.12
emotion_label = np.argmax(emotion_prediction)
emotion_text = emotion_labels[emotion_label] # happy, sad, surprise
emotion_window.append(emotion_text)
while self.frame_count < img_count:
self.frame_count += 1
draw_rectangle(face_coordinates, frame, (0, 255, 100))
put_text(face_coordinates, frame, "checking", (0, 255, 100)),
success, jpeg = cv2.imencode('.jpg', frame)
jpeg_tobytes = jpeg.tobytes()
self.emotion_label_list.append(emotion_label)
return jpeg_tobytes
while self.frame_count >= img_count:
#surprise 추가
if mode(self.emotion_label_list) == 4:
self.today_emotion_label.append(0)
self.today_emotion_label.append(mode(self.emotion_label_list))
print(self.today_emotion_label)
#오늘의 한마디 가져오기
i = randint(1,80)
Phrase_list = get_list_or_404(PHRASE, EMOTION_KIND=self.today_emotion_label[0])[i-1]
print(type(Phrase_list))
phraseList[0] = Phrase_list
print(phraseList[0])
print(type(phraseList[0]))
success, jpeg = cv2.imencode('.jpg', frame)
jpeg_tobytes = jpeg.tobytes()
self.frame_count = 0
self.emo_label_exist = True
self.emotion_label_list.clear()
self.today_emotion_label.clear()
return jpeg_tobytes
# print(phraseList)
success_next, frame_next = self.video.read()
self.faces = self.cascade.detectMultiScale(self.gray, scaleFactor=1.1, minNeighbors=5)
for face_coordinates in self.faces:
put_text_info(face_coordinates, frame_next, "", (0, 255, 100))
success, jpeg = cv2.imencode('.jpg', frame_next)
return jpeg.tobytes()
#웃는표정 무표정 학습
def get_frame(self, img_count, level_index, emotion='happy'):
global emotion_image_data
# 학습후 저장된 데이터가 없으면!
# while len(emotion_image_data) == 0:
# while emotion_image_data[0] == None:
#while emotion_image_data[level_index] == None:
while self.emo_image_exist != True:
success, frame = self.video.read()
success_saved, frame_saved = self.video.read()
success_next, frame_next = self.video.read()
self.gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self.rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # 추가코드0924
self.faces = self.cascade.detectMultiScale(self.gray, scaleFactor=1.1, minNeighbors=5)
for face_coordinates in self.faces:
gray_face = self.gray[face_coordinates[1]:face_coordinates[1] + face_coordinates[3],
face_coordinates[0]:face_coordinates[0] + face_coordinates[2]]
gray_face = cv2.resize(gray_face, (48, 48))
gray_face = gray_face.astype("float") / 255.0
gray_face = img_to_array(gray_face)
gray_face = np.expand_dims(gray_face, axis=0) # (48,48,1)
emotion_prediction = self.emotion_classifier.predict(gray_face)[0]
emotion_probability = np.max(emotion_prediction)
emotion_probability = round(emotion_probability*100,2) #소수둘째자리까지 반올림 ex)90.12
emotion_label = np.argmax(emotion_prediction)
emotion_text = emotion_labels[emotion_label] # happy, sad, surprise
emotion_window.append(emotion_text)
if emotion_text == 'happy':
color = emotion_probability * np.asarray((255, 0, 0))
elif emotion_text == 'angry':
color = emotion_probability * np.asarray((0, 0, 255))
elif emotion_text == 'sad':
color = emotion_probability * np.asarray((255, 255, 0))
elif emotion_text == 'neutral':
color = emotion_probability * np.asarray((0, 255, 255))
else:
color = emotion_probability * np.asarray((0, 255, 0))
color = color.astype(int)
color = color.tolist()
if emotion_text == emotion:
while self.smile_count < img_count: # 30
self.smile_count += 1
draw_rectangle(face_coordinates, frame, (0, 0, 250))
# put_text(face_coordinates, frame, (str(self.smile_count)),(0, 255, 100))
success, jpeg = cv2.imencode('.jpg', frame)
jpeg_tobytes = jpeg.tobytes()
success_saved, jpeg_saved = cv2.imencode('.jpg', frame_saved)
# smile percent
emotion_probability
self.smile_data[self.smile_count] = [emotion_probability, jpeg_saved] # dictionary
return jpeg_tobytes
while self.smile_count >= img_count and self.smile_count % img_count == 0:
prob_list = []
for keys, values in self.smile_data.items():
prob_list.append(values) # values = [prob, img]
# if best_prob_level[0] == None:
best_prob_level[0] = max(prob_list) # [[prob, img]]
draw_rectangle(face_coordinates, frame, (0, 255, 100))
# put_text(face_coordinates, frame),
# (0, 255, 100))
success, jpeg = cv2.imencode('.jpg', frame)
imgwrite(best_prob_level, emotion_image_data, level_index,randInt)
self.smile_count = 0
self.emo_image_exist = True
print(emotion_image_data)
# else:
# best_prob_level[0] = None
else:
self.smile_count = 0
success, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tobytes()
# 데이터가 저장되어 있으면
success_next, frame_next = self.video.read()
self.faces = self.cascade.detectMultiScale(self.gray, scaleFactor=1.1, minNeighbors=5)
for face_coordinates in self.faces:
put_text_info(face_coordinates, frame_next, "SUCCESS", (0, 255, 100))
success, jpeg = cv2.imencode('.jpg', frame_next)
return jpeg.tobytes()
#-------------------------------------------------------------------------------------------------------
def video(request):
return render(request)
def video_today_phrase(request):
try:
time.sleep(3)
return StreamingHttpResponse(gen_today_phrase(VideoCamera_smile(), frame_count=45),
content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
print("asborted", e)
def video_neutral(request):
try:
time.sleep(3)
return StreamingHttpResponse(gen_non_smile(VideoCamera_smile(), frame_count=15, level_index=0),
content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
print("asborted", e)
def video_smile_level1(request):
try:
time.sleep(3)
return StreamingHttpResponse(gen_level(VideoCamera_smile(), frame_count=10, level_index=1),
content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
print("asborted", e)
def video_smile_level2(request):
try:
time.sleep(3)
return StreamingHttpResponse(gen_level(VideoCamera_smile(), frame_count=20, level_index=2),
content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
print("asborted", e)
def video_smile_level3(request):
try:
time.sleep(3)
return StreamingHttpResponse(gen_level(VideoCamera_smile(), frame_count=30, level_index=3),content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
print("asborted", e)
def video_warmup(request):
try:
return StreamingHttpResponse(gen_warmup(VideoCamera_smile()),
content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
print("asborted", e)
def img_smile_neutral(request):
try:
return StreamingHttpResponse(gen_img(ImgCamera_smile(),level_index=0),content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
print("asborted", e)
def img_smile_level_1(request):
try:
return StreamingHttpResponse(gen_img(ImgCamera_smile(),level_index=1),content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
print("asborted", e)
def img_smile_level_2(request):
try:
return StreamingHttpResponse(gen_img(ImgCamera_smile(),level_index=2),content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
print("asborted", e)
def img_smile_level_3(request):
try:
return StreamingHttpResponse(gen_img(ImgCamera_smile(),level_index=3),content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
print("asborted", e)
def get_best_smile_img(request):
best_smile_img = max([emotion_image_data[1],emotion_image_data[2],emotion_image_data[3]])[0] #퍼센트
for keys, values in emotion_image_data.items():
if values[0] == best_smile_img:
img_keys = keys
else:
img_keys = 3
print(img_keys)
if img_keys == 1:
try:
return StreamingHttpResponse(gen_img(ImgCamera_smile(), level_index=1),
content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
print("asborted", e)
elif img_keys == 2:
try:
return StreamingHttpResponse(gen_img(ImgCamera_smile(), level_index=2),
content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
print("asborted", e)
elif img_keys == 3:
try:
return StreamingHttpResponse(gen_img(ImgCamera_smile(), level_index=3),
content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
print("asborted", e)
else:
try:
return StreamingHttpResponse(gen_img(ImgCamera_smile(), level_index=3),
content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
print("asborted", e)
# _______________________________________________________________________
def gen_today_phrase(camera, frame_count):
while True:
frame = camera.today_phrase(frame_count)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def gen_non_smile(camera,frame_count, level_index=0):
while True:
frame = camera.get_frame(frame_count, level_index, 'neutral')
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def gen_warmup(camera):
while True:
frame = camera.warmUp()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def gen_level(camera, frame_count, level_index=1):
while True:
frame = camera.get_frame(frame_count, level_index)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def gen_img(camera,level_index):
while True:
frame = camera.get_frame_img(level_index)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
# --------------------------------------------------------------------------
'''
def img_sort(best_prob, rank=3):
best_prob_sort = sorted(best_prob.items(), key=operator.itemgetter(1), reverse=True)
best_prob_sort = best_prob_sort[0:rank] # 랭크몇위입력
return best_prob_sort
def imgwrite_new(rank=3):
best_prob_sort = img_sort(best_prob_level)
for i in range(rank):
data_rank = best_prob_sort[i][1][1]
img = cv2.imdecode(data_rank, cv2.IMREAD_COLOR)
cv2.imwrite('C:/dev/finalProject/aiProject/' + str(i) + '.png', img)
def imgwrite(best_prob_level, emotion_image_data):
data_img = best_prob_level[0][1]
img = cv2.imdecode(data_img, cv2.IMREAD_COLOR)
cv2.imwrite('C:/dev/finalProject/aiProject/' + 'best_level' + str(0 + 1) + '.png', img)
emotion_image_data[0] = best_prob_level
print(emotion_image_data)
print(len(emotion_image_data))
'''
def imgwrite(best_prob_level, emotion_image_data, level_index,randInt):
data_prob = best_prob_level[0][0]
data_img = best_prob_level[0][1]
# 대윤
# path = 'C:/dev/finalProject2/project/smile/static/smile/faces/'
# 찬욱
# path = 'C:/Users/acorn-519/PycharmProjects/finalProject/project/smile/static/smile/faces'
# 아영
path = "C:/Users/acorn-508/PycharmProjects/finalProject/project/smile/static/smile/faces/"
img = cv2.imdecode(data_img, cv2.IMREAD_COLOR)
cv2.imwrite((path +str(randInt)+ '_level_0%s_.png'%(str(level_index))), img)
dir, file = os.path.split(path+(str(randInt)+ '_level_0%s_.png'%(str(level_index))))
# dir, file = os.path.split(path + 'best_level' + str(level_index) + '.png')
imgPath = dir+'/'+file
emotion_image_data[level_index] = [data_prob,imgPath] #emotion_image_data에 저장
print(emotion_image_data)
print(emotion_image_data[0][1])
def draw_rectangle(face_coordinates, image_array, color):
x, y, w, h = face_coordinates
cv2.rectangle(image_array, (x, y), (x + w, y + h), color, 2)
def put_text(coordinates, image_array, text, color, font_scale=2, thickness=2):
x, y = coordinates[:2]
cv2.putText(image_array, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX, font_scale, color, thickness)
def put_text_info(coordinates, image_array, text, color, font_scale=1, thickness=3, x_pixel=-60, y_pixel=-120):
x_root, y_root = coordinates[:2]
x = x_root - x_pixel
y = y_root - y_pixel
cv2.putText(image_array, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX, font_scale, color, thickness)
def reset_today_phrase():
phraseList.clear()
def reset(request):
emotion_image_data[0] = None
emotion_image_data[1] = None
emotion_image_data[2] = None
emotion_image_data[3] = None
return render(request, 'smile/start.html')
# def toMainpage(request):
# emotion_image_data[0] = "None"
# emotion_image_data[1] = "None"
# emotion_image_data[2] = "None"
# emotion_image_data[3] = "None"
# return render(request, 'service/mainpage1.html')
def imageToDB(request):
# print("test=====")
print("test=====" , request.session["userEmail"] )
user = USER.objects.get(pk=request.session["userEmail"])
q=FACE(EMAIL=user ,
STUDY_DATE=datetime.datetime.now(),
NEUTRAL_PATH=emotion_image_data[0][1].split('project/smile')[1], #DB저장: /static/smile/faces/__.png
NEUTRAL_PERCENT=emotion_image_data[0][0],
SMILE1_PATH=emotion_image_data[1][1].split('project/smile')[1],
SMILE1_PERCENT=emotion_image_data[1][0],
SMILE2_PATH=emotion_image_data[2][1].split('project/smile')[1],
SMILE2_PERCENT=emotion_image_data[2][0],
SMILE3_PATH=emotion_image_data[3][1].split('project/smile')[1],
SMILE3_PERCENT=emotion_image_data[3][0],)
emotion_image_data[0] = None
emotion_image_data[1] = None
emotion_image_data[2] = None
emotion_image_data[3] = None
q.save()
return render(request, 'service/mainpage1.html')
|
class Student:
def __init__(self, name, age):
self.name = name
self.age = age
def study(self, couser_name):
print("%s正在学习%s." % (self.name,couser_name))
def main():
stu1 = Student('sherlock', 18)
stu1.study('Python')
if __name__ == '__main__':
main()
|
n = 3
m = 4
a = [0] * n
for idx in range(n):
a[idx] = [0] * m
a[0][0] = 5
print(a[1][0])
|
from flask import Flask, abort, request
import json
from file_functions import crear_archivo, dar_archivos, eliminar_archivos
from last_files import get_last_files
app = Flask(__name__)
@app.route('/archivos', methods=['POST'])
def crear():
cont_json = request.get_json(silent=False, force=True)
filename = cont_json['filename']
content = cont_json['content']
if not filename:
return 'No ha asignado un nombre al archivo!', 400
if crear_archivo(filename, content):
return 'Se ha creado exitosamente el archivo', 200
else:
return 'No se pudo crear el archivo',400
@app.route('/archivos', methods=['GET'])
def listar():
miLista = {}
miLista["files"] = dar_archivos()
return json.dumps(miLista)
@app.route('/archivos', methods=['DELETE'])
def eliminar():
cont_json = request.get_json(silent=False, force=True)
name_delete = cont_json['name_delete']
if not name_delete:
return 'No ha definido que archivo quiere eliminar!', 400
if not eliminar_archivos(name_delete):
return 'Imposible eliminar los archivos del directorio', 400
else:
return 'Archivos eliminados exitosamente', 200
@app.route('/archivos', methods=['PUT'])
def colocar():
abort(404)
@app.route('/archivos/ultimos', methods=['GET'])
def listar_ultimos():
recent_list = {}
recent_list["recent"] = get_last_files()
return json.dumps(recent_list)
@app.route('/archivos/ultimos', methods=['POST'])
def crear_ultimos():
abort(404)
@app.route('/archivos/ultimos', methods=['DELETE'])
def eliminar_ultimos():
abort(404)
@app.route('/archivos/ultimos', methods=['PUT'])
def colocar_ultimos():
abort(404)
if __name__ == "__main__":
app.run(host='0.0.0.0',port=10500,debug='True') |
import sys
import math
from fractions import Fraction
memo = {}
saved = 0
def p(i: Fraction, n: Fraction) -> Fraction:
global saved
assert i >= 1 and n >= 0
key = (i, n)
if key in memo:
saved += 1
return memo[key]
res = None
if n == 0:
return Fraction(1) # no need to cache
elif i == 1:
return 2 ** -n # no need to cache
else:
res = p(i, n - 1) / 2 + p(i - 1, n - 1) / 2
memo[key] = res
return res
if len(sys.argv) != 2:
print(f'usage: {sys.argv[0]} [bridge groups]', file = sys.stderr)
sys.exit(1)
n = int(sys.argv[1])
i_m = n + 1
digits = math.floor(math.log10(i_m)) + 1
print(f'survival odds:')
for i in range(1, i_m + 1):
frac = p(Fraction(i), Fraction(n))
print(f'i = {i:{digits}}: {float(frac):.8f} ({frac})')
print(f'saved: {saved}')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.