text stringlengths 8 6.05M |
|---|
from typing import TypeVar, Union
from pydantic import BaseModel
sentinel = object()
Url = str
JsonType = Union[int, str, bool, list, dict]
T = TypeVar('T')
T_body = TypeVar('T_body', BaseModel, int, str, bool, covariant=True)
T_json_obj = TypeVar('T_json_obj', BaseModel, list, int, str, bool, covariant=True)
T_headers = TypeVar('T_headers', BaseModel, dict, float, int, str, bool, covariant=True)
T_queries = TypeVar('T_queries', BaseModel, dict, float, int, str, bool, covariant=True)
T_path_args = TypeVar('T_path_args', BaseModel, dict, float, int, str, bool, covariant=True)
T_json = TypeVar('T_json', list, int, str, bool, covariant=True)
T_model = TypeVar('T_model', bound=BaseModel)
T_client = TypeVar("T_client")
NoneType = type(None)
JSONType = Union[int, float, str, bool, dict, list, BaseModel]
Token = str
StrOrBytesMsg = Union[str, bytes]
JsonMsg = Union[int, str, bool, dict]
EventData = MsgDict = dict
RpcID = SequenceID = EventName = MsgID = MsgStr = str
SeqNum = int
|
import asyncio
import logging
import gzip
from io import BytesIO
from datetime import datetime
from aiowebsocket.converses import AioWebSocket
import json
import sqlite3
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
conn = sqlite3.connect('btc_tick.db')
cursor = conn.cursor()
cursor.execute('create table if not exists tick (tradeId varchar(20) primary key, price float, amount float, ts bigint, direction varchar(20))')
cursor.close()
conn.commit()
async def startup(uri) :
async with AioWebSocket(remote) as aws :
converse = aws.manipulator
reqMsg = json.dumps({'sub':'market.btcusdt.trade.detail', 'id':1})
await converse.send(reqMsg)
while True:
rec = await converse.receive()
buff = BytesIO(rec)
f = gzip.GzipFile(fileobj=buff)
res = f.read().decode('utf-8')
rj = json.loads(res)
if 'ping' in rj :
backmsg = json.dumps({'pong':rj['ping']})
await converse.send(backmsg)
print(res, backmsg)
if 'tick' in rj :
cursor = conn.cursor()
for tick in rj['tick']['data'] :
try :
cursor.execute('insert into tick (tradeId, price, amount, ts, direction) values (?,?,?,?,?)', (tick['tradeId'], tick['price'], tick['amount'], tick['ts'], tick['direction']))
except sqlite3.IntegrityError :
print(sqlite3.IntegrityError)
cursor.close()
conn.commit()
print(rj['tick']['data'])
else :
print(rj)
# async with AioWebSocket(uri) as aws :
# converse = aws.manipulator
# message = '{"action":"subscribe","args":["QuoteBin5m:14"]}'
# await converse.send(message)
# print('{time}-Client send: {message}'.format(time=datetime.now().strftime('%Y-%m-%d %H:%M:%S'), message=message))
# while True:
# mes = await converse.receive()
# print('{time}-Client receive: {rec}'.format(time=datetime.now().strftime('%Y-%m-%d %H:%M:%S'), rec=mes))
if __name__ == '__main__' :
logging.basicConfig(level=logging.DEBUG)
remote = 'wss://api.huobi.pro/ws'
try:
asyncio.get_event_loop().run_until_complete(startup(remote))
except KeyboardInterrupt as exc :
logging.info('Quit.') |
import unittest
from app import formatDate
from datetime import date
import database_api
from sqlalchemy import create_engine
from database import ExchangeRateModel,Base
from sqlalchemy.orm import sessionmaker
class DateTestCases(unittest.TestCase):
def test_date(self):
self.assertEqual(date(2018,1,1),formatDate("2018-01-01"))
self.assertEqual(date(2018,9,2),formatDate("2018-9-02"))
self.assertEqual(date(2018,8,2),formatDate("2018-8-02"))
self.assertEqual(date(2017,11,1),formatDate("2017-11-1"))
class DatabaseTestCases(unittest.TestCase):
engine = create_engine('sqlite:///test_rate.db')
Base.metadata.create_all(engine)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
model = ExchangeRateModel(base='USD',currency='EUR',date=date(2018,1,1),rate=2.1)
@classmethod
def setUpClass(cls):
cls.session.add(cls.model)
cls.session.commit()
@classmethod
def tearDownClass(cls):
Base.metadata.drop_all(cls.engine)
def test_query_rate(self):
result = database_api.getRateForDate(session = self.session,base=self.model.base,currency=self.model.currency,date=self.model.date)
self.assertEqual(self.model,result)
def test_insert_rate(self):
model = ExchangeRateModel(base='USD',currency='EUR',date=date(2018,1,2),rate=2.1)
database_api.insert(self.session,model)
result = database_api.getRateForDate(session = self.session,base=model.base,currency=model.currency,date=model.date)
self.assertEqual(model,result)
if __name__ == '__main__':
unittest.main() |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 24 20:40:49 2021
@author: jfalk
Pulling stock data using pandas-datareader
Analyzing it...somehow
FUCKING TENDIES BABYYYYYYYYYYYYYY
Want to estimate the low for the day to guess best time to buy
"""
import pandas as pd
import numpy as np
import os
import datetime
import matplotlib.pyplot as plt
import pandas_datareader.data as web
startdate = datetime.datetime(2010,12,1)
enddate = datetime.datetime(2021,1,1)
pull = web.DataReader("TXMD", "yahoo", startdate, enddate)
pull.Close.plot()
pull.corr()
cs = pull.Close
ema = cs.ewm(span=20).mean()
ema = ema[::-1].ewm(span=20).mean()[::-1]
plt.plot(pull.Close.values,color='b')
plt.plot(ema.values,color='r')
plt.show() |
#!/bin/python3
"""
Compute the edit distance between two strings
"""
def main():
print(edit_distance(input(), input()))
def edit_distance(A, B):
a_len = len(A)
b_len = len(B)
# use zeroth array indexing by taking advantage of how -1 wraps to the
# end of an array: 0-length precomputed row & column are on the 'outside'
dist = [ [0] * b_len + [a + 1] for a in range(a_len) ]
dist.append([ b + 1 for b in range(b_len) ] + [0])
for i, j in [ (i, j) for i in range(a_len) for j in range(b_len) ]:
dists = [
dist[i][j-1] + 1,
dist[i-1][j] + 1,
dist[i-1][j-1] + (0 if A[i] == B[j] else 1),
]
dist[i][j] = min(dists)
return dist[-2][-2]
if __name__ == "__main__":
main()
|
import numpy as np
import torch
from sklearn.metrics import roc_curve
import sklearn.metrics as sk_metrics
def most_recent_n(x, orig, n, metric):
sgn = np.sign(orig)
orig_mask = orig.copy()
for i in range(orig_mask.shape[0]):
num_revs = orig_mask[i,:].astype(bool).sum()
if n > num_revs:
orig_mask[i,:] = 0
else:
orig_mask[i,:][np.where(orig_mask[i,:] > 0)[0][-n:]] = 0
return metric(x * sgn, orig_mask)
|
"""Hypergeometric Distribution
Gendankenexperiment:
Foreground and background sequence sets are pre-defined.
Given N foreground sequences and M-N background sequences,
we randomly select N sequences from M. We consider the consensus
residue in the foreground as being type I and ask what is the probability
of observing at least as many type I sequences in our selection as we see
in the foreground.
"""
from math import ceil
from scipy.stats import hypergeom
from biofrills import consensus, alnutils
from .shared import count_col, combined_frequencies
def compare_cols(fg_col, fg_cons, fg_size, fg_weights,
bg_col, bg_cons, bg_size, bg_weights,
aa_freqs, pseudo_size):
"Compare alignments using the hypergeometric model"
# Number of consensus-type residues in the foreground column
fg_cons_count = count_col(fg_col, fg_weights)[fg_cons]
# Consensus residue frequency in the combined alignment column
p_j = count_col(bg_col, bg_weights)[fg_cons] + fg_cons_count
# Round fg counts & size to nearest integer for hypergeometric test
fg_cons_count_i = max(1, int(ceil(fg_cons_count)))
fg_size_i = int(ceil(fg_size))
bg_size_i = int(ceil(bg_size))
# Probability of fg col conservation vs. the combined/main set
pvalue = 1-hypergeom.cdf(fg_cons_count_i-1,fg_size_i+bg_size_i,
p_j, fg_size_i)
return pvalue
def compare_one(col, cons_aa, aln_size, weights, aa_freqs, pseudo_size):
"Column probability using the hypergeometric model."
# cons_count = col.count(cons_aa)
cons_count = count_col(col, weights)[cons_aa]
cons_count_i = int(ceil(cons_count))
p_j = int(ceil(aa_freqs[cons_aa]*aln_size))
size_i = int(ceil(aln_size))
pvalue = float(cons_count_i)/len(col)
#pvalue = hypergeom.cdf(cons_count_i-1,size_i,
#max(cons_count_i,p_j), len(col))
return pvalue
|
from openspending.model import Classifier
from openspending.test import DatabaseTestCase, helpers as h
def make_classifier():
return Classifier(name='classifier_foo',
label='Foo Classifier',
level='1',
taxonomy='class.foo',
description='Denotes the foo property.',
parent='class')
class TestClassifier(DatabaseTestCase):
def setup(self):
super(TestClassifier, self).setup()
self.cla = make_classifier()
self.cla.save()
def test_classifier_properties(self):
h.assert_equal(self.cla.label, 'Foo Classifier')
h.assert_equal(self.cla.level, '1')
h.assert_equal(self.cla.taxonomy, 'class.foo')
h.assert_equal(self.cla.description, 'Denotes the foo property.')
h.assert_equal(self.cla.parent, 'class') |
# Generated by Django 3.1.7 on 2021-04-07 07:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0005_auto_20210401_1635'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='groups',
),
migrations.RemoveField(
model_name='user',
name='is_superuser',
),
migrations.RemoveField(
model_name='user',
name='user_permissions',
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(editable=False, max_length=255, unique=True),
),
]
|
#coding=utf-8
from django import forms
from Apps.Materia.models import Materia
#formulario para crear materia
class FormCrearMateria(forms.ModelForm):
class Meta:
model = Materia
fields = [
'id',
'nombre',
'intensidad',
]
labels = {
'id': 'Identificación',
'nombre': 'Nombre',
'intensidad': 'Intensidad horaria',
}
widgets = {
'id': forms.TextInput(attrs={'class': 'form-control'}),
'nombre': forms.TextInput(attrs={'class': 'form-control'}),
'intensidad': forms.TextInput(attrs={'class': 'form-control'}),
}
#formulario para crear materia
class FormActualizarMateria(forms.ModelForm):
class Meta:
model = Materia
fields = [
'id',
'nombre',
'intensidad',
]
labels = {
'id': 'Identificación',
'nombre': 'Nombre',
'intensidad': 'Intensidad horaria',
}
widgets = {
'id': forms.TextInput(attrs={'readonly':'readonly', 'class': 'form-control form-control-sm'}),
'nombre': forms.TextInput(attrs={'class': 'form-control form-control-sm'}),
'intensidad': forms.TextInput(attrs={'class': 'form-control form-control-sm'}),
}
|
import logging
import math
from backend.group_service.group.domain.group import Group, GroupSerializer
from backend.user_service.user.domain.rider import Rider
from backend.user_service.user.domain.driver import Driver
from backend.common.messaging.infra.redis.redis_message_publisher \
import RedisMessagePublisher
from backend.common.event.group_created_event \
import GroupCreatedEvent
from backend.common.event.group_driver_updated_event \
import GroupDriverUpdatedEvent
from backend.common.event.group_cost_updated_event \
import GroupCostUpdatedEvent
logger = logging.getLogger(__name__)
class GroupApplicationService:
def create_group(self, rider_id_list, from_location, to_location):
group = Group.objects.create(
from_location=from_location,
to_location=to_location)
for i in range(len(rider_id_list)):
rider = Rider.objects.get(pk=rider_id_list[i])
rider.group = group
rider.save()
event = GroupCreatedEvent(
group_id=group.id,
rider_id_list=rider_id_list,
from_location=from_location,
to_location=to_location)
print('group created: {}'.format(event))
RedisMessagePublisher().publish_message(event)
return GroupSerializer(group).data
def driver_update_group(self, group_id, driver_id):
group = Group.objects.get(pk=group_id)
group.driver_id = driver_id
group.save()
driver = Driver.objects.get(pk=driver_id)
driver.group = group
driver.save()
event = GroupDriverUpdatedEvent(
group_id=group.id,
driver_id=driver_id,
rider_id_list=list(map(
lambda rider: rider.id,
Rider.objects.filter(group_id=group_id))),
from_location=group.from_location,
to_location=group.to_location,
)
print('group driver updated: {}'.format(event))
RedisMessagePublisher().publish_message(event)
return GroupSerializer(group).data
def cost_update_group(self, group_id, cost):
group = Group.objects.get(pk=group_id)
group.cost = cost
group.save()
rider_id_list = list(map(lambda rider: rider.id, \
Rider.objects.filter(group_id=group_id)))
rider_cost = math.ceil(cost / len(rider_id_list) / 100)*100;
event = GroupCostUpdatedEvent(
group_id=group.id,
rider_id_list=rider_id_list,
total_cost=cost,
rider_cost=rider_cost,
)
print('group cost updated: {}'.format(event))
RedisMessagePublisher().publish_message(event)
return GroupSerializer(group).data |
from echo_server import create_server_socket, send_msg, recv_msg
import socket
if __name__ == '__main__':
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('localhost', 8000))
msg = input('Type the message:')
try:
send_msg(client_socket, msg)
print('Send %s' % msg)
msg = recv_msg(client_socket)
print(msg)
except ConnectionError:
print('Socket is closed prematurely')
finally:
client_socket.close()
print('Connection is closed')
|
from typing import Counter
def solution(string):
c = Counter()
for i in string:
c[i] += 1
for i in string:
if c[i] == 1:
return i
return '\0'
print(solution('fjdjfljfdslfjdsj\n'))
"""
题目二:字符流中第一个只出现一次的字符
"""
class Solution:
def __init__(self):
self._container = [-1 for _ in range(256)]
self.index = 0
def insert(self, ch):
if self._container[ord(ch)] == -1:
self._container[ord(ch)] = self.index
elif self._container[ord(ch)] >= 0:
self._container[ord(ch)] = -2
self.index += 1
def first_appearing_once(self):
ch = '\0'
min_index = float("inf")
for i in range(256):
if 0 <= self._container[i] < min_index:
min_index = self._container[i]
ch = chr(i)
return ch
s = Solution()
s.insert('a')
print(s.first_appearing_once())
s.insert("b")
print(s.first_appearing_once())
s.insert("a")
print(s.first_appearing_once())
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Gusseppe Bravo <gbravor@uni.pe>
# License: BSD 3 clause
"""
This module provides a few of useful functions (actually, methods)
for describing the dataset which is to be studied.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas.tools.plotting import scatter_matrix
#try:
# sc.stop()
#except:
# pass
__all__ = [
'read', 'description', 'classBalance', 'hist', 'density']
class Analyze():
""" A class for data analysis """
def __init__(self, definer):
"""The init class.
Parameters
----------
problem_type : string
String that indicates if the model will be train for clasification
or regression.
response : string
String that indicates which column in the dataset is the class.
"""
self.problem_type = definer.problem_type
self.infer_algorithm = definer.infer_algorithm
self.response = definer.response
self.data_path = definer.data_path
self.data = definer.data
def pipeline(self):
analyzers = []
analyzers.append(self.hist)
analyzers.append(self.density)
analyzers.append(self.corr)
analyzers.append(self.scatter)
[m() for m in analyzers]
return self
def description(self):
"""Shows a basic data description .
Returns
-------
out : ndarray
"""
#return self.data.describe()
return pd.DataFrame(self.data.describe())
def classBalance(self):
"""Shows how balanced the class values are.
Returns
-------
out : pandas.core.series.Series
Serie showing the count of classes.
"""
return self.data.toPandas().groupby(self.response).size()
def hist(self, ax=None):
#plt.figure(figsize=(10.8, 3.6))
#for column in df:
#df[column].hist(color=[(0.196, 0.694, 0.823)], ax=ax, align='left', label = 'Frequency bar of subsectors')
self.data.toPandas().hist(color=[(0.196, 0.694, 0.823)], ax=ax, label='frecuencia')
plt.legend(loc='best')
if ax is None:
plt.show()
def density(self, ax=None):
#Analyze.data.plot(color=[(0.196, 0.694, 0.823)], kind='density',
#subplots=True, layout=(3,3), sharex=False, figsize = (10, 10))
self.data.toPandas().plot(kind='density',
subplots=True, layout=(3,3), sharex=False, ax=ax)
if ax is None:
plt.show()
def corr(self, ax=None):
corr = self.data.toPandas().corr()
names = list(self.data.toPandas().columns.values)
fig, ax1 = plt.subplots()
if ax is not None:
bar = ax.matshow(corr, vmin=-1, vmax=1)
else:
bar = ax1.matshow(corr, vmin=-1, vmax=1)
fig.colorbar(bar)
#plt.xticks(range(len(corr.columns)), corr.columns)
#plt.yticks(range(len(corr.columns)), corr.columns)
ax.set_xticks(range(len(corr.columns)))
ax.set_yticks(range(len(corr.columns)))
ax.set_xticklabels(names)
ax.set_yticklabels(names)
if ax is None:
plt.show()
def scatter(self, ax=None):
scatter_matrix(self.data.toPandas(), alpha=0.7, figsize=(6, 6), diagonal='kde', ax=ax)
if ax is None:
plt.show()
def box(self, ax=None):
self.data.toPandas().plot(kind="box" , subplots=True, layout=(3,3), sharex=False, sharey=False, ax=ax)
if ax is None:
plt.show()
|
gkey = "get google api"
|
from typing import Optional, Dict
from summer import StrainStratification, Multiply
from autumn.models.covid_19.parameters import VocComponent
from autumn.models.covid_19.constants import DISEASE_COMPARTMENTS, Strain, INFECTION
def get_strain_strat(voc_params: Optional[Dict[str, VocComponent]]):
"""
Stratify the model by strain, with two strata, being wild or "ancestral" virus type and the single variant of
concern ("VoC").
Args:
voc_params: All the VoC parameters (one VocComponent parameters object for each VoC)
Returns:
The strain stratification summer object
"""
# Process the requests
voc_names = list(voc_params.keys())
# Stratify model
strain_strat = StrainStratification("strain", [Strain.WILD_TYPE] + voc_names, DISEASE_COMPARTMENTS)
# Prepare population split and transmission adjustments
population_split = {Strain.WILD_TYPE: 1.}
transmissibility_adjustment = {Strain.WILD_TYPE: None}
for voc_name in voc_names:
population_split[voc_name] = 0.
transmissibility_adjustment[voc_name] = Multiply(voc_params[voc_name].contact_rate_multiplier)
# Apply population split
strain_strat.set_population_split(population_split)
# Apply transmissibility adjustments
strain_strat.set_flow_adjustments(INFECTION, transmissibility_adjustment)
return strain_strat
|
#Weighted Average of 4 items
#Has many issues
while True:
w1 = float(raw_input('Enter Weight 1\n'))
if w1 == 'next':
break
w2 = float(raw_input('Enter Weight 2\n'))
if w2 == 'next':
break
w3 = float(raw_input('Enter Weight 3\n'))
if w3 == 'next':
break
w4 = float(raw_input('Enter Weight 4\n'))
if w4 == 'next':
break
if (w1 + w2 + w3 + w4) != 1:
print 'Invalid Weights'
break
g1 = float(raw_input('Enter Grade 1\n'))
if g1 == 'next':
break
g2 = float(raw_input('Enter Grade 2\n'))
if g2 == 'next':
break
g3 = float(raw_input('Enter Grade 3\n'))
if g3 == 'next':
break
g4 = float(raw_input('Enter Grade 4\n'))
if (g1 + g2 + g3 + g4) >= 400:
print 'Enter Grades 0-100'
break
print 'Weighted Average is', float(w1*g1 + w2*g2 + w3*g3 + w4*g4), '%' |
import numpy as np
import pyqg
import pytest
import unittest
def QG():
return pyqg.QGModel
def Layered():
return pyqg.LayeredModel
def SQG():
return pyqg.SQGModel
def BT():
return pyqg.BTModel
@pytest.fixture(params=[QG, Layered, SQG, BT])
def model(request):
klass = request.param()
model = klass()
model._step_forward()
return model
def test_smagorinsky(model):
smag = pyqg.Smagorinsky()
du, dv = smag(model)
def test_backscatter_biharmonic(model):
back = pyqg.BackscatterBiharmonic()
dq = back(model)
def test_zb2020(model):
zb20 = pyqg.ZannaBolton2020()
du, dv = zb20(model)
def test_addition_and_scaling(model, rtol=1e-11):
back = pyqg.BackscatterBiharmonic()
smag = pyqg.Smagorinsky()
zb20 = pyqg.ZannaBolton2020()
comb = 0.5*smag + 0.75*zb20
du, dv = comb(model)
np.testing.assert_allclose(du, 0.5*smag(model)[0] + 0.75*zb20(model)[0],
rtol=rtol)
np.testing.assert_allclose(dv, 0.5*smag(model)[1] + 0.75*zb20(model)[1],
rtol=rtol)
# can't add uv and q parameterizations
with pytest.raises(AssertionError):
back + smag
def test_ring(model):
ring = pyqg.RingForcing()
dq = ring(model)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
import sys
import os
import optparse
import urlparse
from util import open_xml, filter_manifest, ProjectJSONEncoder
from project import Project, project_differences
import json
def resolve_manifest(node, filters=None, exclude=True):
if filters:
node = filter_manifest(node, filters)
default_node = node.find('default')
default_remote = default_node.get('remote')
default_remote_url = node.find(".//remote[@name='%s']" % default_remote).get('fetch')
default_revision = default_node.get('revision', 'HEAD')
projects = {}
for p_node in node.findall('project'):
if p_node.get('remote'):
url_base = node.find(".//remote[@name='%s']" % p_node.get('remote')).get('fetch')
else:
url_base = default_remote_url
# This is what repo does, seems legit
if url_base.index(':') == url_base.index('/')-1:
url_base = "gopher://" + url_base
url = urlparse.urljoin(url_base, p_node.get('name'))
if url.startswith('gopher://'):
url = url[9:]
p = Project(path=p_node.get('path'),
revision=p_node.get('revision', default_revision),
remote=p_node.get('remote', default_remote),
url=url,
upstream=p_node.get('upstream', None)
)
projects[p.path] = p
return projects
def print_summary(output, same, only_in_left, only_in_right, different, left_name, right_name):
summary=[]
if not 'left' in left_name:
left_name += ' (left)'
if not 'right' in right_name:
right_name += ' (right)'
summary.append("No differences for:")
if len(same) > 0:
summary.append("\n".join([" * '%s'" % x.path for x in same]))
summary.append("Only in %s:" % left_name)
for only_l in only_in_left:
summary.append(" * '%s'" % only_l.path)
summary.append("Only in %s:" % right_name)
for only_r in only_in_right:
summary.append(" * '%s'" % only_r.path)
summary.append("Different:")
for d in different:
summary.append(" * '%s' -- %s vs %s" % (d[0].path, left_name, right_name))
diffs = project_differences(d[0], d[1])
for d in diffs['different'].keys():
summary.append(" * %s: '%s' vs '%s'" % (d, diffs['different'][d]['left'], diffs['different'][d]['right']))
for s in [x for x in diffs['same'].keys() if x != 'path']:
summary.append(" * %s: %s is the same" % (s, diffs['same'][s]))
print >> output, "\n".join(summary)
def print_json_summary(output, same, only_in_left, only_in_right, different, left_name, right_name):
diffs = {'left_name': left_name, 'right_name': right_name,
'same': same,
'only_in_left': only_in_left,
'only_in_right': only_in_right}
diffs['differences'] = {}
for l,r in different:
diffs['differences'][l.path] = project_differences(l,r)
print json.dumps(diffs, cls=ProjectJSONEncoder, indent=2, sort_keys=True)
def diff_manifest_content(left, right, output_func, output, filters=None):
left_projects = resolve_manifest(open_xml(left), filters)
right_projects = resolve_manifest(open_xml(right), filters)
left_name = os.path.split(left.rstrip(os.sep))[1]
right_name = os.path.split(right.rstrip(os.sep))[1]
only_in_left = [] # list of projects that are only on the left
only_in_right = [] # ditto, right
in_both_sides = [] # list of paths that are on both sides
different = [] # list of L/R pairs
same = []
for l_key in sorted(left_projects.keys()):
if l_key not in right_projects.keys():
only_in_left.append(left_projects[l_key])
else:
in_both_sides.append(l_key)
for r_key in sorted(right_projects.keys()):
if r_key not in left_projects.keys():
only_in_right.append(right_projects[r_key])
for key in in_both_sides:
if left_projects[key] == right_projects[key]:
same.append(left_projects[key])
else:
different.append((left_projects[key], right_projects[key]))
if hasattr(output, 'write'):
output_func(output, same, only_in_left, only_in_right, different, left_name, right_name)
else:
with open(output, 'wb') as f:
output_func(f, same, only_in_left, only_in_right, different, left_name, right_name)
def diff(left, right, output, output_format, filters):
for i in (left, right):
if os.path.isdir(i):
print >> sys.stderr, "ERROR: %s is a directory" % args[i]
parser.exit(1)
# Once there are more modes, this should error out if more than one mode is specified
if output_format.lower() == 'json':
out_func = print_json_summary
elif output_format.lower() == 'report':
out_func = print_summary
else:
print >> sys.stderr, "ERROR: Invalid output format selected"
sys.exit(1)
if not output:
output = sys.stdout
diff_manifest_content(left, right, out_func, output=output, filters=filters)
|
import torch.optim as optim
class Train:
def __init__(self, data_sampler, model, criterion):
self.data_sampler = data_sampler
self.criterion = criterion
self.model = model
def train(self, iterations, lr):
optimizer = optim.Adam(self.model.parameters(), lr)
avg_loss = 0
for e in range(iterations):
siamese_1, siamese_2, label = self.data_sampler.sample()
siamese_1, siamese_2, label = siamese_1.cuda(), siamese_2.cuda(), label.cuda()
optimizer.zero_grad()
output1, output2 = self.model(siamese_1, siamese_2)
loss = self.criterion(output1, output2, label)
avg_loss = avg_loss + float(loss.item())
loss.backward()
optimizer.step()
if e % 50 == 49:
loss = avg_loss / 50
print("Step {} - lr {} - loss: {}".format(e, lr, loss))
avg_loss = 0
# error = self.siamese_nn.loss_func(2 ** 8)
# self.siamese_nn.append(error.detach())
|
import psycopg2
import pandas as pd
import ast
import random
import datetime
def random_date():
start_date = datetime.date(1950, 1, 1)
end_date = datetime.date(2002, 1, 1)
time_between_dates = end_date - start_date
days_between_dates = time_between_dates.days
random_number_of_days = random.randrange(days_between_dates)
random_date = start_date + datetime.timedelta(days=random_number_of_days)
return random_date
connection = psycopg2.connect("host='localhost' dbname='movies_db_demo' user='postgres' password='tiendat148'")
mycursor = connection.cursor()
ratings = pd.read_csv('./data/ratings_70.csv')
users = ratings['userId'].drop_duplicates().tolist()
len_users = len(users)
file_names = open('./data/names.txt','r')
names = file_names.readline()
names = ast.literal_eval(names)
file_names.close()
file_adress = open('./data/adress.txt','r')
adress = file_adress.readline()
adress = ast.literal_eval(adress)
file_adress.close()
dem = 1
for userId in users:
name_index = random.randint(0,len(names)-1)
adress_index = random.randint(0,len(adress)-1)
user_name = 'user_'+str(userId)
password = 'user_'+str(userId)
name = names[name_index]
email = str(name).lower() + '@gmail.com'
location = adress[adress_index]
birthday = random_date()
gender = 'male' if name_index < 100 else 'female'
sql = "INSERT INTO customer VALUES (" + str(userId) + ",'" + user_name + "','" + password + "','" + name + "','" + email + "','" + location + "','" + str(birthday) + "','" + gender + "'" + ")"
try:
mycursor.execute(sql)
connection.commit()
print(dem,'/',len_users)
dem += 1
except:
connection.rollback() |
import numpy
import pandas
from bokeh import models
from bokeh.models import HoverTool
from bokeh.plotting import figure, show
from bokeh import palettes
from sqlalchemy.orm import Session
from ticclat.flask_app.db import database
def corpus_size():
query = """
SELECT SUM(word_count) / 1e8 AS sum_word_count,
c.name AS name
FROM documents
LEFT JOIN corpusId_x_documentId cIxdI on documents.document_id = cIxdI.document_id
LEFT JOIN corpora c on cIxdI.corpus_id = c.corpus_id
GROUP BY c.corpus_id, c.name
ORDER BY c.name ASC
"""
connection = database.session.connection()
df = pandas.read_sql(query, connection)
p = figure(
title="Corpus size",
sizing_mode='stretch_both',
y_range=df['name'],
tools=['hover', 'pan', 'wheel_zoom', 'save', 'reset']
)
clipped_df_len = numpy.clip(len(df), 3, 10)
df['color'] = palettes.Category10[clipped_df_len][0:len(df)]
p.hbar(
y='name',
right='sum_word_count',
source=models.ColumnDataSource(df),
color='color',
height=1,
fill_alpha=0.9,
line_width=0,
muted_alpha=0,
)
p.xaxis.axis_label = 'Number of words (tokens) [× 10^8]'
p.yaxis.axis_label = 'Corpus'
hover = p.select(dict(type=HoverTool))
hover.tooltips = [
("Corpus", "@name"),
("Number of words;", "@word_count × 10^8")
]
hover.mode = 'mouse'
return p
if __name__ == '__main__':
database.setup()
database.session = Session(bind=database.engine.connect())
p = corpus_size()
show(p)
|
#!/usr/bin/python3
#above is path to the interpreter which the script will use
print("Hello, World!")
|
#Sort Stack : have all mins at the top
class Stack:
def __init__(self):
self.stack = []
self.size = 0
def push(self , val):
self.stack.append(val)
self.size += 1
print("Stack contents: " , self.stack)
def pop(self):
assert not self.isEmpty(), "Empty Stack"
self.size -= 1
return self.stack.pop()
def peek(self):
print(self.stack , self.size)
return self.stack[-1]
def isEmpty(self):
return self.size == 0
class SortStack:
def __init__(self):
self.orig = Stack()
self.buff = Stack()
def push(self , val):
self.orig.push(val)
def sortMin(self):
while not self.orig.isEmpty():
tmp = self.orig.pop()
print('tmp: ' , tmp)
print('buff: ' , self.buff.stack)
while not self.buff.isEmpty() and self.buff.peek() > tmp:
self.orig.push(self.buff.pop())
self.buff.push(tmp)
print('Orig: ', self.orig.stack)
print('Buff: ', self.buff.stack)
s = SortStack()
s.push(12)
s.push(8)
s.push(5)
s.push(7)
s.push(13)
s.sortMin()
|
n = int(input())
res = 0
arr = [list(map(int, input().split())) for _ in range(n)]
for i in range(n):
#tmp=input().split()
tmp = arr[i]
# 정렬을 한다.
tmp.sort()
a,b,c = map(int, tmp)
if a == b and b == c:
money = 10000+a*1000
elif a == b or a == c:
money = 1000+a*100
elif b == c:
money = 1000+b*100
else:
#정렬을 했기 때문에 c가 가장 큰값
money = c*100
if money>res:
res = money
print(res) |
from .anyapi import AnyAPI
|
#!/usr/bin/env python
''' Make flat ntuple from GEN data tier
'''
#
# Standard imports and batch mode
#
import ROOT
import os, sys
ROOT.gROOT.SetBatch(True)
import itertools
from math import sqrt, cos, sin, pi, acos
import imp
#RootTools
from RootTools.core.standard import *
#TopEFT
from TopEFT.Tools.user import skim_output_directory
from TopEFT.Tools.helpers import deltaPhi, deltaR, deltaR2, cosThetaStar, closestOSDLMassToMZ
from TopEFT.Tools.DelphesReader import DelphesReader
from TopEFT.Tools.objectSelection import isGoodDelphesJet, isGoodDelphesLepton
#
# Arguments
#
import argparse
argParser = argparse.ArgumentParser(description = "Argument parser")
argParser.add_argument('--logLevel', action='store', default='INFO', nargs='?', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE', 'NOTSET'], help="Log level for logging")
argParser.add_argument('--small', action='store_true', help='Run only on a small subset of the data?')#, default = True)
argParser.add_argument('--overwrite', action='store', nargs='?', choices = ['none', 'all', 'target'], default = 'none', help='Overwrite?')#, default = True)
argParser.add_argument('--targetDir', action='store', default='v5')
argParser.add_argument('--sample', action='store', default='fwlite_ttZ_ll_LO_scan', help="Name of the sample loaded from fwlite_benchmarks. Only if no inputFiles are specified")
argParser.add_argument('--inputFiles', action='store', nargs = '*', default=[])
argParser.add_argument('--targetSampleName', action='store', default=None, help="Name of the sample in case inputFile are specified. Otherwise ignored")
argParser.add_argument('--nJobs', action='store', nargs='?', type=int, default=1, help="Maximum number of simultaneous jobs.")
argParser.add_argument('--job', action='store', nargs='?', type=int, default=0, help="Run only job i")
args = argParser.parse_args()
#
# Logger
#
import TopEFT.Tools.logger as _logger
import RootTools.core.logger as _logger_rt
logger = _logger.get_logger( args.logLevel, logFile = None)
logger_rt = _logger_rt.get_logger(args.logLevel, logFile = None)
# Load sample either from
if len(args.inputFiles)>0:
logger.info( "Input files found. Ignoring 'sample' argument. Files: %r", args.inputFiles)
sample = FWLiteSample( args.targetSampleName, args.inputFiles)
else:
sample_file = "$CMSSW_BASE/python/TopEFT/samples/delphes_upgrade.py"
samples = imp.load_source( "samples", os.path.expandvars( sample_file ) )
sample = getattr( samples, args.sample )
logger.debug( 'Loaded sample %s with %i files.', sample.name, len(sample.files) )
maxEvents = -1
if args.small:
args.targetDir += "_small"
maxEvents=5000 # Number of files
sample.files=sample.files[:1]
xsec = sample.xsec
nEvents = sample.chain.GetEntries()
lumiweight1fb = xsec * 1000. / nEvents
logger.info( "Calculated lumiweight1fb %4.3f (xsec %4.3f, nEvents %i)", lumiweight1fb, xsec, nEvents )
# output directory
output_directory = os.path.join(skim_output_directory, 'delphes', args.targetDir, sample.name)
if not os.path.exists( output_directory ):
os.makedirs( output_directory )
logger.info( "Created output directory %s", output_directory )
# Load reweight pickle file if supposed to keep weights.
extra_variables = []
# Run only job number "args.job" from total of "args.nJobs"
if args.nJobs>1:
n_files_before = len(sample.files)
sample = sample.split(args.nJobs)[args.job]
n_files_after = len(sample.files)
logger.info( "Running job %i/%i over %i files from a total of %i.", args.job, args.nJobs, n_files_after, n_files_before)
def varnames( vec_vars ):
return [v.split('/')[0] for v in vec_vars.split(',')]
def vecSumPt(*args):
return sqrt( sum([o['pt']*cos(o['phi']) for o in args],0.)**2 + sum([o['pt']*sin(o['phi']) for o in args],0.)**2 )
def addIndex( collection ):
for i in range(len(collection)):
collection[i]['index'] = i
variables = []
# Lumi weight 1fb
variables += ["lumiweight1fb/F"]
# reconstructed bosons
variables += ["Z_l1_index/I", "Z_l2_index/I", "nonZ_l1_index/I", "nonZ_l2_index/I", "Z_pt/F", "Z_eta/F", "Z_phi/F", "Z_mass/F", "Z_lldPhi/F", "Z_lldR/F", "Z_cosThetaStar/F"]
# reconstructed leptons
lep_vars = "pt/F,eta/F,phi/F,pdgId/I,charge/I,isolationVar/F,isolationVarRhoCorr/F,sumPtCharged/F,sumPtNeutral/F,sumPtChargedPU/F,sumPt/F,ehadOverEem/F"
variables += ["lep[%s]"%lep_vars]
lep_varnames = varnames( lep_vars )
#
variables += ["nBTag/I", "nMuons/I", "nElectrons/I"]
# reconstructed jets
jet_vars = 'pt/F,eta/F,phi/F,bTag/F,bTagPhys/I,bTagAlgo/I'
variables += ["jet[%s]"%jet_vars]
jet_write_varnames = varnames( jet_vars )
variables += ["bj0_%s"%var for var in jet_vars.split(',')]
variables += ["bj1_%s"%var for var in jet_vars.split(',')]
jet_varnames= varnames( jet_vars )
variables += ["met_pt/F", "met_phi/F"]
def fill_vector_collection( event, collection_name, collection_varnames, objects):
setattr( event, "n"+collection_name, len(objects) )
for i_obj, obj in enumerate(objects):
for var in collection_varnames:
getattr(event, collection_name+"_"+var)[i_obj] = obj[var]
def fill_vector( event, collection_name, collection_varnames, obj):
if obj is None: return
for var in collection_varnames:
setattr(event, collection_name+"_"+var, obj[var] )
reader = DelphesReader( sample )
def filler( event ):
if reader.position % 100==0: logger.info("At event %i/%i", reader.position, reader.nEvents)
event.lumiweight1fb = lumiweight1fb
# read jets
jets = filter( lambda j: isGoodDelphesJet(j), reader.jets())
jets.sort( key = lambda p:-p['pt'] )
addIndex( jets )
# make b jets
# for j in jets:
# print j['pt'], j['eta'], j['bTag'], j['bTagPhys'], j['bTagAlgo']
bJets = filter( lambda j:j['bTagPhys']>=4, jets )
nonBJets = filter( lambda j:not (j['bTagPhys']<4), jets )
bj0, bj1 = ( bJets + nonBJets + [None, None] )[:2]
fill_vector( event, "bj0", jet_write_varnames, bj0)
fill_vector( event, "bj1", jet_write_varnames, bj1)
event.nBTag = len( bJets )
# read leptons
allLeps = reader.muonsTight() + reader.electrons()
allLeps.sort( key = lambda p:-p['pt'] )
leps = filter( isGoodDelphesLepton, allLeps )
# cross-cleaning of reco-objects
# leps = filter( lambda l: (min([999]+[deltaR2(l, j) for j in jets if j['pt']>30]) > 0.3**2 ), leps )
# give index to leptons
addIndex( leps )
# Store
fill_vector_collection( event, "lep", lep_varnames, leps )
fill_vector_collection( event, "jet", jet_varnames, jets )
event.nMuons = len( filter( lambda l:abs(l['pdgId'])==13, leps ) )
event.nElectrons = len( filter( lambda l:abs(l['pdgId'])==11, leps ) )
# MET
met = reader.met()[0]
event.met_pt = met['pt']
event.met_phi = met['phi']
# search for Z in leptons
(event.Z_mass, Z_l1_index, Z_l2_index) = closestOSDLMassToMZ(leps)
nonZ_indices = [ i for i in range(len(leps)) if i not in [Z_l1_index, Z_l2_index] ]
event.Z_l1_index = leps[Z_l1_index]['index'] if Z_l1_index>=0 else -1
event.Z_l2_index = leps[Z_l2_index]['index'] if Z_l2_index>=0 else -1
event.nonZ_l1_index = leps[nonZ_indices[0]]['index'] if len(nonZ_indices)>0 else -1
event.nonZ_l2_index = leps[nonZ_indices[1]]['index'] if len(nonZ_indices)>1 else -1
# Store Z information
if event.Z_mass>=0:
if leps[event.Z_l1_index]['pdgId']*leps[event.Z_l2_index]['pdgId']>0 or abs(leps[event.Z_l1_index]['pdgId'])!=abs(leps[event.Z_l2_index]['pdgId']):
raise RuntimeError( "not a Z! Should not happen" )
Z_l1 = ROOT.TLorentzVector()
Z_l1.SetPtEtaPhiM(leps[event.Z_l1_index]['pt'], leps[event.Z_l1_index]['eta'], leps[event.Z_l1_index]['phi'], 0 )
Z_l2 = ROOT.TLorentzVector()
Z_l2.SetPtEtaPhiM(leps[event.Z_l2_index]['pt'], leps[event.Z_l2_index]['eta'], leps[event.Z_l2_index]['phi'], 0 )
Z = Z_l1 + Z_l2
event.Z_pt = Z.Pt()
event.Z_eta = Z.Eta()
event.Z_phi = Z.Phi()
event.Z_lldPhi = deltaPhi(leps[event.Z_l1_index]['phi'], leps[event.Z_l2_index]['phi'])
event.Z_lldR = deltaR(leps[event.Z_l1_index], leps[event.Z_l2_index])
lm_index = event.Z_l1_index if leps[event.Z_l1_index]['pdgId'] > 0 else event.Z_l2_index
event.Z_cosThetaStar = cosThetaStar(event.Z_mass, event.Z_pt, event.Z_eta, event.Z_phi, leps[lm_index]['pt'], leps[lm_index]['eta'], leps[lm_index]['phi'] )
tmp_dir = ROOT.gDirectory
output_filename = os.path.join(output_directory, sample.name + '.root')
_logger. add_fileHandler( output_filename.replace('.root', '.log'), args.logLevel )
_logger_rt.add_fileHandler( output_filename.replace('.root', '_rt.log'), args.logLevel )
if os.path.exists( output_filename ) and args.overwrite =='none' :
logger.info( "File %s found. Quit.", output_filename )
sys.exit(0)
output_file = ROOT.TFile( output_filename, 'recreate')
output_file.cd()
maker = TreeMaker(
sequence = [ filler ],
variables = [ TreeVariable.fromString(x) for x in variables ] + extra_variables,
treeName = "Events"
)
tmp_dir.cd()
counter = 0
reader.start()
maker.start()
while reader.run( ):
maker.run()
counter += 1
if counter == maxEvents: break
logger.info( "Done with running over %i events.", reader.nEvents )
output_file.cd()
maker.tree.Write()
output_file.Close()
logger.info( "Written output file %s", output_filename )
|
from DataStructures.chapter05.排序.bubblesort import bubble_sort2, bubble_sort
from DataStructures.chapter05.排序.selectionsort import selectionsort
from DataStructures.chapter05.排序.insectionsort import insertion_sort
from DataStructures.chapter05.排序.shellsort import shell_sort
from DataStructures.chapter05.排序.归并排序 import merge_sort
from DataStructures.chapter05.排序.quicksort import quick_sort
from timeit import Timer
import random
def random_list(n):
return [random.randint(0, 1000) for i in range(n)]
list_ = random_list(500)
print(list_)
bubble = Timer("bubble_sort(list_[:])", "from __main__ import bubble_sort, list_")
bubble2 = Timer("bubble_sort2(list_[:])", "from __main__ import bubble_sort2, list_")
select = Timer("selectionsort(list_[:])", "from __main__ import selectionsort, list_")
insertion = Timer("insertion_sort(list_[:])", "from __main__ import insertion_sort, list_")
shell = Timer("shell_sort(list_[:])", "from __main__ import shell_sort, list_")
merge = Timer("merge_sort(list_[:])", "from __main__ import merge_sort, list_")
quick = Timer("quick_sort(list_[:])", "from __main__ import quick_sort, list_")
bubble_time = bubble.timeit(100)
bubble2_time = bubble2.timeit(100)
select_time = select.timeit(100)
insertion_time = insertion.timeit(100)
shell_time = shell.timeit(100)
merge_time = merge.timeit(100)
quick_time = quick.timeit(100)
print(list_)
print("冒泡排序用时:", bubble_time)
print("冒泡排序用时:", bubble2_time)
print("选择排序用时:", select_time)
print("插入排序用时:", insertion_time)
print("希尔排序用时:", shell_time)
print("归并排序用时:", merge_time)
print("快速排序用时:", quick_time)
print("*"*30)
print()
print("*"*30)
|
ejDBServer = ""
ejDBUsername = ""
ejDBPassword = ""
ejDBDBName = ""
ejMailHost = ""
ejMailPort =
ejMailUser = ""
ejMailPassword = ""
ejMailSender = ""
ejMailReceiver = ""
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
from typing import Tuple, Union
import pendulum
def utcnow() -> datetime.datetime:
return datetime.datetime.utcnow()
def _utctoday(now: datetime.datetime) -> datetime.date:
return now.date()
def utctoday() -> datetime.date:
now = datetime.datetime.utcnow()
return _utctoday(now)
def convert_timezone(
dt: Union[pendulum.DateTime, datetime.datetime], timezone: str
) -> pendulum.DateTime:
tz = pendulum.tz.timezone(timezone)
return tz.convert(dt) # type: ignore
def expand_datetime(
dt: pendulum.DateTime,
) -> Tuple[pendulum.DateTime, pendulum.DateTime]:
return dt, dt.add(days=1)
|
#03d: Greedy Motif Search
#http://rosalind.info/problems/3d/
#Given: Integers k and t, followed by a collection of strings Dna.
k = 3
t = 5
Dna = ['GGCGTTCAGGCA', 'AAGAATCAGTCA', 'CAAGGAGTTCGC', 'CACGTCAATCAC', 'CAATAATATTCG']
#If parsing from file:
f = open('rosalind_3d.txt', 'r')
contents = f.read().strip().split('\n')
f.close()
kt = contents.pop(0).strip()
k, t = [int(i) for i in kt.split(' ')]
Dna = contents
#Return: A collection of strings BestMotifs resulting from applying GreedyMotifSearch. If at any step you find more than one Profile-most probable k-mer in a given string, use the one occurring first.
import operator
import bio
def greedyMotifSearch(Dna, k, t):
def initialise():
return [string[:k] for string in Dna]
def getProfile(motifs):
arr = [list(m) for m in motifs]
trans = map(list, zip(*arr))
trans_string = [''.join(t) for t in trans]
length = len(motifs[0])
profile = [[0]*length for x in xrange(4)]
for i in xrange(length):
profile[0][i] = trans_string[i].count('A')
profile[1][i] = trans_string[i].count('C')
profile[2][i] = trans_string[i].count('G')
profile[3][i] = trans_string[i].count('T')
return map(list, zip(*profile))
# from most probable kmer problem
def getMostProbable(Text, k, Profile):
keys = {}
keys['A'] = 0
keys['C'] = 1
keys['G'] = 2
keys['T'] = 3
def getScore(kmer):
scores = [Profile[index][keys[k]] for index, k in enumerate(kmer)]
return reduce(operator.mul, scores, 1)
scores = []
for i in range(k,len(Text)+1):
scores.append((Text[i-k:i], getScore(Text[i-k:i])))
return max(scores, key = lambda c: c[1])[0]
def getConsensus(sequences):
string_length = len(sequences[0])
base_counts = {}
base_counts['A'] = [0]*string_length
base_counts['C'] = [0]*string_length
base_counts['G'] = [0]*string_length
base_counts['T'] = [0]*string_length
consensus = ''
for s in sequences:
for i in range(string_length):
base_counts[s[i]][i] += 1
for i in range(string_length):
consensus += max(base_counts, key = lambda x: base_counts[x][i])
return consensus
def getScore(motifs):
consensus = getConsensus(motifs)
score = 0
for m in motifs:
score += bio.hammingDistance(consensus, m)
return score
best_motifs = initialise()
for i in range(len(Dna[0]) - k + 1):
motif1 = Dna[0][i:i+k]
motifs = [motif1]
for j in range(1, t):
profile = getProfile(motifs)
motifs.append(getMostProbable(Dna[j], k, profile))
if getScore(motifs) < getScore(best_motifs):
best_motifs = motifs
return best_motifs
best_motifs = greedyMotifSearch(Dna, k, t)
print '\n'.join(best_motifs)
|
from django.contrib import admin
from socials.models import Keyword
@admin.register(Keyword)
class KeywordModelAdmin(admin.ModelAdmin):
list_display = admin.ModelAdmin.list_display + (
'name',
'created_at',
'updated_at',
)
list_filter = admin.ModelAdmin.list_filter + (
'created_at',
'updated_at',
)
search_fields = (
'name',
)
inlines = (
)
|
import cv2
import numpy as np
#/*! 为了绘制一个圆形,我们使用 cv2.circle 函数。我们传递 x,y,半径大小,RGB 颜色,深 */
#/*! 度作为参数*/
img = cv2.imread("image.jpg")
print(img.shape)
# cv2.circle(img,(x,y),radius,(R,G,B),THICKNESS)
# x:距x轴的距离
# y:与y轴的距离
# radius:半径大小(整数)
# R,G,B:RGB形式的颜色(255,255,0)
# 厚度:矩形的厚度(整数)
cv2.circle(img, (200, 130), 90, (255, 255, 0), 2)
cv2.imshow("Image", img)
cv2.waitKey(0)
|
from .S2Identified import S2Identified
from .terms import SBOL2
from rdflib import URIRef
from rdflib.namespace import RDF
class S2Attachment(S2Identified):
def __init__(self, g, uri):
super(S2Attachment, self).__init__(g, uri)
@property
def source(self):
return self.get_uri_property(SBOL2.source)
@source.setter
def source(self, source):
self.set_uri_property(SBOL2.source, source)
@property
def format(self):
return self.get_uri_property(SBOL2.format)
@format.setter
def format(self, format):
self.set_uri_property(SBOL2.format, format)
@property
def size(self):
return self.get_integer_property(SBOL2.size)
@size.setter
def size(self, size):
self.set_integer_property(SBOL2.size, size)
@property
def hash(self):
return self.get_string_property(SBOL2.hash)
@hash.setter
def hash(self, hash):
self.set_string_property(SBOL2.hash, hash)
|
from wingedsheep.carcassonne.objects.actions.action import Action
from wingedsheep.carcassonne.objects.coordinate import Coordinate
from wingedsheep.carcassonne.objects.tile import Tile
class TileAction(Action):
def __init__(self, tile: Tile, coordinate: Coordinate, tile_rotations: int):
self.tile = tile
self.coordinate = coordinate
self.tile_rotations = tile_rotations
|
from django.conf.urls import url, include
from rest_framework import routers
# import pdb;pdb.set_trace()
from organization_test_task import views
router = routers.DefaultRouter()
router.register(r'companies', views.CompanyListViewSet, base_name='company')
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^companies/(?P<pk>\d+)/update$', views.HeadquarterUpdateView.as_view(), name='update-company-headquarter'),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
|
class Solution:
def runningSum(self, nums):
res = []
for num in nums:
if len(res) == 0:
res.append(num)
else:
res.append(res[-1] + num)
return res
|
"""
use linked list to accomplish a queue ADT,
store both head and tail pointers
"""
class Empty(Exception): pass
class Node:
__slots__ = '_element', '_next'
def __init__(self, element, next):
self._element = element
self._next = next
class QueueUseLinkedList:
"""a queue using linked list"""
def __init__(self):
self._head = None
self._tail = None
self._size = 0
def __len__(self): return self._size
def is_empty(self): return self._size == 0
def first(self):
if self.is_empty():
raise Empty("The queue is empty")
else:
return self._head._element
def enqueue(self, value):
new_tail_node = Node(value, None) # first build a new node
if self.is_empty():
# if this is the first element, then assign to head. note do not write self._head = Node(value, None)
# since it will build a different node, although the _element values are equal
# since both head and tail are pointers, they need to point at exactly the same thing !!!
self._head = new_tail_node
else:
self._tail._next = new_tail_node
self._tail = new_tail_node
self._size += 1
def dequeue(self):
if self.is_empty():
raise Empty("The queue is already empty")
else:
# return the head element, and change the head
result = self._head._element
self._head = self._head._next
self._size -= 1
if self.is_empty(): # if empty queue, remember to fix the tail too.
self._tail = None
return result
def show(self): # just for debug purpose
if self.is_empty():
print("Empty queue")
else:
print("head => tail")
node = self._head
while node is not None:
print(node._element, end=" ")
node = node._next
print()
if __name__ == '__main__':
q = QueueUseLinkedList()
for i in range(20): q.enqueue(i)
q.show()
for _ in range(10): print(q.dequeue())
print(q.show())
print("length ", len(q))
print("first ", q.first())
print("is empty check: ", q.is_empty())
|
import discord
from discord.ext import commands
from discord.voice_client import VoiceClient
startup_extensions = ["Music"]
bot = commands.Bot("xD")
@bot.event
async def on_ready():
print("bot online")
print("Name " + bot.user.name)
print("ID " + bot.user.id)
class Main_Commands():
def __init__(self, bot):
self.bot = bot
@bot.command(pass_context=True)
async def ping(ctx):
await bot.say("nahh, it's just your internet that sucks")
@bot.command(pass_context=True)
async def hello(ctx):
if ctx.message.author.id == '282789892843634688':
await bot.say("uwu")
else:
await bot.say("you're not Nue, go away")
if __name__ == "__main__":
for extension in startup_extensions:
try:
bot.load_extension(extension)
except Exception as e:
exc = "{} : {}" . format(type(e) . __name__, e)
print("Failed to load extension {}\n{}" . format(extension, exc))
bot.run("token") |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 2 10:03:31 2019
@author: saib
"""
dict1 = {'name':'Saikumar', 'id':1235, 'cell':92992992, 'extn':8768}
print(dict1.keys())
print(dict1.values())
# Traverse dictionary using the key values
for k in dict1.keys():
print(k, "=>",dict1[k])
print(dict1.items())
# Traverse dictionary using the tuple
for (k,l) in dict1.items():
print(k, "=>",l)
#
#for (k,l) in ([('name', 'Saikumar'), ('id', 1235), ('cell', 92992992), ('extn', 8768)]):
# print(k, "=>",l)
#
|
import requests
import json
from itty import *
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from tabledef import *
#For SQL database
engine = create_engine('sqlite:///brainSparkRequests.db', echo=False)
# create a Session
Session = sessionmaker(bind=engine)
session = Session()
#################################
# CHANGE VARIABLES TO MATCH BOT #
#################################
#managerRoomID = "Y2lzY29zcGFyazovL3VzL1JPT00vNGU1Yzk3YjAtNDQ3YS0xMWU4LWFjMjMtZjMwYWJmMTViZWNk" #with Paulo
#managerRoomID = "Y2lzY29zcGFyazovL3VzL1JPT00vNGU3NTczNzAtNGM2OC0xMWU4LTg3NmEtNGYyZTM0MDAyNjRm" #without Paulo
managerRoomID = "Y2lzY29zcGFyazovL3VzL1JPT00vOGRiN2I3NTAtODhkMS0xMWU4LTkzNzYtYzU0YzIzNDE1ZTdj" #Demo
ACCESS_TOKEN = "MjA1ODg3MGYtMWVhYi00OTBlLTkwYmQtZDUyOTAzODAzN2I4ZmU5NmRmNDAtY2Ex"
bot_email = "brainsparktest@sparkbot.io"
botName = "BrainSparkTest"
#Global variables, don't change:
conversationCounter = 0
commentList = []
#temp variable
test = True
def CreateRequest(_requestText, _requesterRoomId):
ID = session.query(Request).count() + 1
#confirm submission of question to user
PostSparkMessage("Thank you for your feedback, we will respond as soon as possible. If you'd like to add anything to your feedback, please add #" + str(ID) + " to the message", _requesterRoomId)
#create responseRoom
responseRoomID = CreateSparkRoom(str(ID))
#post to managerRoom and get id of the message
messageId = PostSparkMessage("A new question has been asked: " + str(_requestText) + " --- Type '@" + str(botName) + " claim #" + str(ID) + "' in order to get added to the resolution Space for this question", managerRoomID)
#Post all information to the db
request = Request(_requesterRoomId, responseRoomID, messageId)
session.add(request)
session.commit()
#post question in newly created room
PostSparkMessage("The following question was asked: " + _requestText, responseRoomID)
PostSparkMessage("In order to reply to this question, please address the answer to @" + str(botName), responseRoomID)
def findRoom(the_header,room_name):
roomId=None
uri = 'https://api.ciscospark.com/v1/rooms'
resp = requests.get(uri, headers=the_header)
resp = resp.json()
for room in resp["items"]:
if room["title"] == room_name:
roomId=None
break
return(roomId)
#Create a sparkroom to answer the question
def CreateSparkRoom(_requestID):
global managerRoomID
the_header = setHeaders()
room_name = "resolve comment #" + _requestID
roomId=findRoom(the_header,room_name)
if roomId==None:
roomInfo = {"title":room_name}
uri = 'https://api.ciscospark.com/v1/rooms'
resp = requests.post(uri, json=roomInfo, headers=the_header)
var = resp.json()
roomId = var["id"]
return roomId
#Looks through the text to look for any #xxx and returns those in a set.
def FindTags(_text):
tags = str(_text)
print ""
print tags
print ""
x = {tag.strip("#") for tag in tags.split() if tag.startswith("#")}
return x.pop()
#Find the room that corresponds to the hashtag
def FindRoomToAdd(_text):
roomFound = False
tag = FindTags(_text)
print tag
#checks if a room for that tag exists.
for x in session.query(Request).filter(Request.id == int(tag)):
roomFound = True
return x.resolutionRoomID
if roomFound == False:
return False
#Find the room that corresponds to the hashtag
def AddToExistingComment(_text, _userRoomID):
commentFound = False
tag = FindTags(_text)
text = _text.replace("#" + tag + " ", "")
text = text.replace("#" + tag, "")
#checks if a room for that tag exists.
for x in session.query(Request).filter(Request.id == int(tag)):
if str(x.requesterID) == str(_userRoomID):
commentFound = True
PostSparkMessage("New message has been send by requester: " + text, x.resolutionRoomID)
if commentFound == False:
PostSparkMessage("room not found", _userRoomID)
#Add a user to the room
def AddToRoom(_roomID, _userID):
header = setHeaders()
member = {"roomId":_roomID,"personId": _userID, "isModerator": False}
uri = 'https://api.ciscospark.com/v1/memberships'
resp = requests.post(uri, json=member, headers=header)
#Post a spark message
def PostSparkMessage(message, roomId):
header = setHeaders()
message = {"roomId":roomId,"text":message}
uri = 'https://api.ciscospark.com/v1/messages'
resp = requests.post(uri, json=message, headers=header)
var = resp.json()
messageID = var["id"]
return messageID
#Relay the message that was posted to the user who requested the case
def RelayManagerMessage(_message, _roomID):
global botName
#TODO: extract text and roomID from _message (for testing we user _message as being the text)
text = _message.replace(botName + " ", "")
roomID = _roomID
roomFound = False
for x in session.query(Request).filter(Request.resolutionRoomID == str(roomID)):
roomFound = True
PostSparkMessage("You've received the following response: " + text + " --- to respond, use #" + str(x.id), x.requesterID)
PostSparkMessage("If this fully answered your question, type '#" + str(x.id) + " resolved'", x.requesterID)
if roomFound == False:
PostSparkMessage("Something has gone wrong with this request: requester ID not found", roomID)
#Create the Header for the Spark API
def setHeaders():
accessToken_hdr = 'Bearer ' + ACCESS_TOKEN
spark_header = {'Authorization': accessToken_hdr, 'Content-Type': 'application/json; charset=utf-8'}
return (spark_header)
def GetMessageText(_messageID):
header = setHeaders()
uri = "https://api.ciscospark.com/v1/messages/" + str(_messageID)
resp = requests.get(uri, headers=header)
resp = resp.json()
tempText = resp["text"]
text = json.dumps(tempText)
text = text.strip('"')
text = text.replace("\u2019", "\'")
text = text.replace("\u201c", "\"")
text = text.replace("\u201d", "\"")
return text
def RemoveMessage(_messageId):
header = setHeaders()
uri = "https://api.ciscospark.com/v1/messages/" + str(_messageId)
resp = requests.delete(uri, headers=header)
def CloseRoom(_roomID):
header = setHeaders()
uri = "https://api.ciscospark.com/v1/rooms/" + str(_roomID)
resp = requests.delete(uri, headers=header)
def ResolveRoom(_text):
tag = FindTags(_text)
for x in session.query(Request).filter(Request.id == int(tag)):
CloseRoom(x.resolutionRoomID)
RemoveMessage(x.messageID)
x.requesterID = None
x.resolutionRoomID = None
x.messageID = None
session.commit()
@post('/')
def index(request):
global managerRoomID
global bot_email
global botName
webhook = json.loads(request.body) # get payload from webhook
room_id = webhook['data']['roomId'] # get room id from message
message_id = webhook['data']['id'] # get message id
sender_id = webhook['data']['personId'] #get id of the sender
sender_email = webhook['data']['personEmail']
messageText = GetMessageText(message_id) #get text in the message
if str(sender_email) != str(bot_email):
if str(room_id) == str(managerRoomID):
if "claim #" in messageText.lower():
roomID = FindRoomToAdd(messageText)
if roomID != False:
AddToRoom(roomID, sender_id)
else:
PostSparkMessage("Tag not found", managerRoomID)
else:
if "#" in messageText:
if "resolved" in messageText.lower():
ResolveRoom(messageText)
else:
AddToExistingComment(messageText, room_id)
elif str(botName) in messageText:
RelayManagerMessage(messageText, room_id)
else:
CreateRequest(messageText, room_id)
return "true"
run_itty(server='wsgiref', host='0.0.0.0', port=10010) |
# epochs 100적용
# validation_split, callback 적용
# early_stopping 5 적용
# Reduce LR 3 적용
# modelcheckpoint 폴더에 hdf5 파일 적용
import numpy as np
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Dropout, Input
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 1. 데이터 / 전처리
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
x_train = x_train.reshape(60000, 28*28).astype('float32') / 255.
x_test = x_test.reshape(10000, 28*28).astype('float32') / 255.
# 2. 모델
def build_model(drop=0.5, optimizer='adam'):
inputs = Input(shape=(28*28,), name='input')
x = Dense(512, activation='relu', name='hidden1')(inputs)
x = Dropout(drop)(x)
x = Dense(256, activation='relu', name='hidden2')(inputs)
x = Dropout(drop)(x)
x = Dense(128, activation='relu', name='hidden3')(inputs)
x = Dropout(drop)(x)
outputs = Dense(10, activation='softmax', name='outpus')(x)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['acc'])
return model
def create_hyperparameters():
batches = [10, 20, 30, 40, 50]
optimizers = ['rmseprop', 'adam', 'adadelta']
dropout = [0.1, 0.2, 0.3]
return {"batch_size":batches, "optimizer":optimizers, "drop":dropout}
hyperparameters = create_hyperparameters()
model2 = build_model()
# searchCV에 넣기위해 keras 모델을 감싸는 역할
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
model2 = KerasClassifier(build_fn=build_model, verbose=1, epochs=3)
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
search = RandomizedSearchCV(model2, hyperparameters, cv=3)
# search = GridSearchCV(model2, hyperparameters, cv=3)
filepath = '../data/modelcheckpoint/k61_mnist_{epoch:02d}-{val_loss:.4f}.hdf5'
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
es = EarlyStopping(monitor='val_loss', patience=5, mode='auto')
cp = ModelCheckpoint(filepath=filepath, monitor='val_loss', save_best_only=True, mode='auto')
lr = ReduceLROnPlateau(monitor='val_loss', patience=3, factor=0.5, mode='auto')
search.fit(x_train, y_train, validation_split=0.2,verbose=1, epochs=100, callbacks=[es, lr])
print(search.best_params_)
print(search.best_estimator_)
print(search.best_score_) # 밑의 score 랑 다르다.
acc = search.score(x_test, y_test)
print('최종 acc :', acc) |
"""project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from service import views as service_views
from smile import views as smile_views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('', service_views.mainpage, name='mainpage'),
path('smile0/', service_views.smile_prepare, name='smile_prepare'),
path('smile/', service_views.smile_study, name='smile_study'),
path('empathy/', service_views.empathy_training, name='empathy_training'),
path('compare/', service_views.compare_photos, name='compare'),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
# import sqlite3 package
import sqlite3
def insert_db():
# sample data
classroom_data = [(1, "Raj", "M", 70, 84, 92),
(2, "Poonam", "F", 87, 69, 93),
(3, "Nik", "M", 65, 83, 90),
(4, "Rahul", "F", 83, 76, 89)]
# open connection
connection = sqlite3.connect("classroomDB.db")
# open cursor
cursor = connection.cursor()
# insert each student record
for student in classroom_data:
# formatted query string
insert_statement = """INSERT INTO classroom
(student_id, name, gender, physics_marks, chemistry_marks, mathematics_marks)
VALUES
({0}, "{1}", "{2}", {3}, {4}, {5});""".format(student[0], student[1], student[2],
student[3], student[4], student[5])
# execute insert query
cursor.execute(insert_statement)
# commit the changes
connection.commit()
# close the connection
connection.close()
if __name__ == "__main__":
print("Creating Data Base")
insert_db()
|
from nose.tools import with_setup, ok_, eq_, assert_almost_equal, nottest, assert_not_equal
import torch
from gtnlplib.constants import *
import numpy as np
#7.1a
def test_model_en_dev_accuracy1():
confusion = scorer.get_confusion(DEV_FILE,'model-dev-en.preds')
acc = scorer.accuracy(confusion)
ok_(acc > .85)
#7.1b
def test_model_en_dev_accuracy2():
confusion = scorer.get_confusion(DEV_FILE,'model-dev-en.preds')
acc = scorer.accuracy(confusion)
ok_(acc > .86)
#7.1c
def test_model_en_dev_accuracy3():
confusion = scorer.get_confusion(DEV_FILE,'model-dev-en.preds')
acc = scorer.accuracy(confusion)
ok_(acc > .87)
#7.1d
def test_model_en_test_accuracy1():
confusion = scorer.get_confusion(TEST_FILE,'model-te-en.preds')
acc = scorer.accuracy(confusion)
ok_(acc > .84)
#7.1e
def test_model_en_test_accuracy2():
confusion = scorer.get_confusion(TEST_FILE,'model-te-en.preds')
acc = scorer.accuracy(confusion)
ok_(acc > .85)
#7.1f
def test_model_en_test_accuracy3():
confusion = scorer.get_confusion(TEST_FILE,'model-te-en.preds')
acc = scorer.accuracy(confusion)
ok_(acc > .86)
#7.1g
def test_model_nr_dev_accuracy1():
confusion = scorer.get_confusion(NR_DEV_FILE,'model-dev-nr.preds')
acc = scorer.accuracy(confusion)
ok_(acc > .85)
#7.1h
def test_model_nr_dev_accuracy2():
confusion = scorer.get_confusion(NR_DEV_FILE,'model-dev-nr.preds')
acc = scorer.accuracy(confusion)
ok_(acc > .86)
#7.1i
def test_model_nr_dev_accuracy3():
confusion = scorer.get_confusion(NR_DEV_FILE,'model-dev-nr.preds')
acc = scorer.accuracy(confusion)
ok_(acc > .87)
#7.1j
def test_model_nr_test_accuracy1():
confusion = scorer.get_confusion(NR_TEST_FILE,'model-te-nr.preds')
acc = scorer.accuracy(confusion)
ok_(acc > .84)
#7.1k
def test_model_nr_test_accuracy2():
confusion = scorer.get_confusion(NR_TEST_FILE,'model-te-nr.preds')
acc = scorer.accuracy(confusion)
ok_(acc > .85)
#7.1L
def test_model_nr_test_accuracy3():
confusion = scorer.get_confusion(NR_TEST_FILE,'model-te-nr.preds')
acc = scorer.accuracy(confusion)
ok_(acc > .86)
|
from unittest import TestCase
from src.d3_network import ip_provider
class TestStaticIpProvider(TestCase):
def test_given_host_ip_when_create_static_ip_provider_then_get_host_ip_return_host_ip(self):
static_host_ip = '10.42.0.78'
scanner = ip_provider.StaticIpProvider(static_host_ip)
self.assertEqual(scanner.get_host_ip(), static_host_ip)
|
#!/usr/bin/env python
import pigpio
from grove import grove_pwm_buzzer,grove_4_digit_display, grove_led, grove_slide_potentiometer
from grove.grove_button import GroveButton
servo_pin = 18
buzzer_pin = 12
disp = grove_4_digit_display.Grove(16,17, brightness=grove_4_digit_display.BRIGHT_HIGHEST)
poti = grove_slide_potentiometer.Grove(A0)
led = grove_led.Grove(5)
btn = GroveButton(4)
|
from onegov.ballot.models import ElectionCompound
from onegov.core.collection import Pagination
from sqlalchemy import cast
from sqlalchemy import desc
from sqlalchemy import distinct
from sqlalchemy import extract
from sqlalchemy import Integer
class ElectionCompoundCollectionPagination(Pagination):
def __init__(self, session, page=0, year=None):
self.session = session
self.page = page
self.year = year
def __eq__(self, other):
return self.year == other.year and self.page == other.page
def subset(self):
query = self.query()
query = query.order_by(
desc(ElectionCompound.date),
ElectionCompound.shortcode,
ElectionCompound.title
)
if self.year:
query = query.filter(
extract('year', ElectionCompound.date) == self.year
)
return query
@property
def page_index(self):
return self.page
def page_by_index(self, index):
return self.__class__(self.session, index, self.year)
def for_year(self, year):
return self.__class__(self.session, 0, year)
class ElectionCompoundCollection(ElectionCompoundCollectionPagination):
def query(self):
return self.session.query(ElectionCompound)
def get_latest(self):
""" Returns the election compounds with the latest date. """
latest_date = self.query().with_entities(ElectionCompound.date)
latest_date = latest_date.order_by(desc(ElectionCompound.date))
latest_date = latest_date.limit(1).scalar()
return self.by_date(latest_date) if latest_date else None
def get_years(self):
""" Returns a list of years for which there are election compounds.
"""
year = cast(extract('year', ElectionCompound.date), Integer)
query = self.session.query
query = query(distinct(year))
query = query.order_by(desc(year))
return list(r[0] for r in query.all())
def by_date(self, date):
""" Returns the election compounds on the given date. """
query = self.query()
query = query.filter(ElectionCompound.date == date)
query = query.order_by(
ElectionCompound.shortcode,
ElectionCompound.title
)
return query.all()
def by_year(self, year=None):
""" Returns the election compounds for the current/given year. """
year = year or self.year
query = self.query()
query = query.filter(extract('year', ElectionCompound.date) == year)
query = query.order_by(
ElectionCompound.date,
ElectionCompound.shortcode,
ElectionCompound.title
)
return query.all()
def by_id(self, id):
""" Returns the election compound by id. """
query = self.query()
query = query.filter(ElectionCompound.id == id)
return query.first()
|
# Always prefer setuptools over distutils
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
# Get the long description from the README file
with open(
path.join(path.dirname(__file__), "README.md"), encoding="utf-8"
) as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
name="jitcache",
version="0.32",
description="jitcache is a just-in-time key-value cache that is"
"thread/process safe",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/sjtrny/jitcache",
author="Stephen Tierney",
author_email="sjtrny@gmail.com",
keywords="cache jit key value dictionary thread process",
py_modules=["jitcache"],
install_requires=[],
python_requires=">=3",
classifiers=["License :: OSI Approved :: MIT License"],
)
|
import pytest
import numpy as np
from pathlib import Path
from spikeinterface import NumpySorting
from spikeinterface import download_dataset
from spikeinterface import extract_waveforms
from spikeinterface.core import get_noise_levels
from spikeinterface.extractors import read_mearec
from spikeinterface.sortingcomponents.matching import find_spikes_from_templates, matching_methods
if hasattr(pytest, "global_test_folder"):
cache_folder = pytest.global_test_folder / "sortingcomponents"
else:
cache_folder = Path("cache_folder") / "sortingcomponents"
def test_find_spikes_from_templates():
repo = 'https://gin.g-node.org/NeuralEnsemble/ephy_testing_data'
remote_path = 'mearec/mearec_test_10s.h5'
local_path = download_dataset(
repo=repo, remote_path=remote_path, local_folder=None)
recording, gt_sorting = read_mearec(local_path)
folder = cache_folder / 'waveforms_mearec'
we = extract_waveforms(recording, gt_sorting, folder, load_if_exists=True,
ms_before=1, ms_after=2., max_spikes_per_unit=500, return_scaled=False,
n_jobs=1, chunk_size=10000)
method_kwargs = {
'waveform_extractor': we,
'noise_levels': get_noise_levels(recording),
}
sampling_frequency = recording.get_sampling_frequency()
result = {}
for method in matching_methods.keys():
if method == 'circus-omp':
# too slow to be tested on CI
continue
spikes = find_spikes_from_templates(recording, method=method, method_kwargs=method_kwargs,
n_jobs=2, chunk_size=1000, progress_bar=True)
result[method] = NumpySorting.from_times_labels(
spikes['sample_ind'], spikes['cluster_ind'], sampling_frequency)
# debug
# import matplotlib.pyplot as plt
# import spikeinterface.full as si
# metrics = si.compute_quality_metrics(we, metric_names=['snr'], load_if_exists=True, )
# comparisons = {}
# for method in matching_methods.keys():
# comp = si.compare_sorter_to_ground_truth(gt_sorting, result[method])
# comparisons[method] = comp
# si.plot_agreement_matrix(comp)
# plt.title(method)
# si.plot_sorting_performance(comp, metrics, performance_name='accuracy', metric_name='snr',)
# plt.title(method)
# plt.show()
if __name__ == '__main__':
test_find_spikes_from_templates()
|
#!/usr/bin/python
#ref: http://stackoverflow.com/questions/14508906/sending-messages-between-class-threads-python
#http://ja.pymotw.com/2/Queue/
import threading
import Queue
import time
Trigger= False #global variable to communicate btwn the threads
QTrigger= Queue.Queue()
IsActive= True
def Func1():
global IsActive,Trigger
while IsActive:
if Trigger:
Trigger= False
print 'p is pushed!!!'
#time.sleep(0.001) #Cpu usage is 100% if this is not used
def Func2():
while IsActive:
e= QTrigger.get()
if e=='p':
print 'p is pushed!!!'
elif e=='q':
print 'bye-bye!!!'
def MainThread():
global IsActive
while IsActive:
line= raw_input('q to quit, p to print > ')
if line == 'q':
IsActive= False
QTrigger.put('q')
break
elif line == 'p':
global Trigger
Trigger= True
QTrigger.put('p')
else:
print ' entered: ',line
#t1= threading.Thread(name='func1', target=Func1)
t1= threading.Thread(name='func2', target=Func2)
t2= threading.Thread(name='main', target=MainThread)
#t1.setDaemon(True)
#t2.setDaemon(True)
t1.start()
t2.start()
t1.join()
t2.join()
print 'Finished'
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from usermanagement.models import *
from patientmanagement.models import *
from appointments.models import DoctorSlots
from datetime import datetime
from appointments.models import *
# Create your models here.
from django.utils import timezone
#
class consultfee(models.Model):
patient = models.ForeignKey(Patient,on_delete=models.CASCADE)
doctor = models.ForeignKey(User,on_delete=models.CASCADE)
amount = models.IntegerField(null=True)
is_paid=models.BooleanField(default=False)
generate_date = models.CharField(max_length=300, null=True, blank=True)
class Consultion(models.Model):
appointment= models.ForeignKey(Appointment,on_delete=models.CASCADE)
amount = models.IntegerField(null=True)
is_paid = models.BooleanField(default= False)
timestamp = models.DateTimeField(default=timezone.now)
class BillGroup(models.Model):
purpose = [
(1, "OPD"),
(2, "LAB"),
(3, "MED"),
]
group = models.PositiveSmallIntegerField(choices=purpose)
def __str__(self):
if self.group==1:
s="OPD"
else:
s="LAB"
return s
class Bill(models.Model):
billid = models.SlugField(editable=False, primary_key=True)
appointment = models.ForeignKey(Appointment,on_delete=models.CASCADE,null=True)
group = models.ForeignKey(BillGroup,on_delete=models.CASCADE,null=True)
amount = models.IntegerField(null=True,blank=True)
def save(self, *args, **kwargs):
if not self.billid:
count = Bill.objects.all().count()+1
count + 1
group = self.group
self.billid = "{}-{}-{:06d}".format('LHMR',group, count)
super(Bill, self).save(*args, **kwargs) |
import common_vars as c_vars
from sklearn.model_selection import train_test_split
import pandas as pd
df = pd.read_csv(c_vars.train_file).as_matrix()
# df = pd.read_csv(c_vars.train_sample_file).as_matrix()
df_train, df_val = train_test_split(df, test_size = 0.1, random_state = 42, stratify = df[:,-1])
df_train = pd.DataFrame(df_train, columns = c_vars.header)
df_val = pd.DataFrame(df_val, columns = c_vars.header)
df_train.to_csv(c_vars.train_split_train, index = False)
df_val.to_csv(c_vars.train_split_val, index = False)
# split the train set into a smaller file for feature selection
df_train = df_train.as_matrix()
_, df_val = train_test_split(df_train, test_size = 0.1, random_state = 42, stratify = df_train[:,-1])
df_val = pd.DataFrame(df_val, columns = c_vars.header)
df_val.to_csv(c_vars.train_split_train_sample, index = False) |
from collections import defaultdict
def percentage_commission_per_row(row):
"""
Each row is a list of results from cloud_sql.get_all_data_per_shop(), which represents
one order promoted by one inf for one campaign and one shop
the row is ordered in:
subtotal_price, uid, campaign_id, commission, commission_type, commission_percentage, order_complete.shop
:return: a number for commission of this order.
"""
subtotal_price = float(row[0])
uid = row[1]
campaign_id = row[2]
if not row[5]:
commission_percentage = 0
else:
commission_percentage = float(row[5])
if not commission_percentage or commission_percentage <= 0:
return campaign_id, uid, 0
elif commission_percentage > 1:
commission_percentage = commission_percentage / 100.0
return campaign_id, uid, float(subtotal_price) * commission_percentage
def percentage_commission_per_shop(sqldata):
"""
Get and calculate all percentage based commission for given shop
:param shop: shop as identifier
:return: dict {
total_percentage_commission,
per_campaign_percentage_commission : {campaign_id: per_campaign_percentage_commission}
}
"""
percentage_commission = {'total_percentage_commission': 0, 'per_campaign_percentage_commission': {}}
if not sqldata or len(sqldata) == 0:
return percentage_commission
per_campaign_commission = {}
for row in sqldata:
campaign_id, uid, commission = percentage_commission_per_row(row)
if campaign_id not in per_campaign_commission:
per_campaign_commission[campaign_id] = commission
else:
per_campaign_commission[campaign_id] = per_campaign_commission[campaign_id] + commission
total_commission = 0
for commission in per_campaign_commission.values():
total_commission += commission
percentage_commission['total_percentage_commission'] = total_commission
percentage_commission['per_campaign_percentage_commission'] = per_campaign_commission
return percentage_commission
def fixed_commission_per_shop(sql_data):
fixed_commission = {'total_fixed_commission': 0}
# per row fields: fixed_commission, shop, campaign_id
if not sql_data or len(sql_data) == 0:
return fixed_commission
per_campaign_fixed_commission = {}
total_fixed_commission = 0
for row in sql_data:
if not row[0]:
fixed_comm = 0
else:
fixed_comm = float(row[0])
campaign_id = row[2]
total_fixed_commission += fixed_comm
per_campaign_fixed_commission[campaign_id] = fixed_comm
fixed_commission['per_campaign_fixed_commission'] = per_campaign_fixed_commission
fixed_commission['total_fixed_commission'] = total_fixed_commission
return fixed_commission
def combine_final_commissions(fixed_commission, percentage_commission):
"""
combine the two results dict from both percentage_commission_per_shop and fixed_commission_per_shop results
:return: dict with final commission for each campaign, and total commission
"""
final_results = {}
final_results['total_commission'] = fixed_commission.get('total_fixed_commission') \
+ percentage_commission.get('total_percentage_commission')
per_campaign = {}
if 'per_campaign_fixed_commission' in percentage_commission:
for campaign_id, fixed_comm in fixed_commission['per_campaign_fixed_commission'].items():
per_campaign[campaign_id] = fixed_comm
if 'per_campaign_percentage_commission' in percentage_commission:
for campaign_id, per_comm in percentage_commission['per_campaign_percentage_commission'].items():
if campaign_id not in per_campaign.keys():
per_campaign[campaign_id] = per_comm
else:
per_campaign[campaign_id] = per_campaign[campaign_id] + per_comm
final_results['per_campaign_total'] = per_campaign
final_results['per_campaign_fixed'] = fixed_commission
final_results['per_campaign_percentage'] = percentage_commission
return final_results
def count_visits_daily(sqldata):
"""
:param sqldata: results from the SQL query
select COUNT(*) as visits, track_visit.shop, DATE(track_visit.timestamp) as visit_date
"""
visits = {}
if not sqldata or len(sqldata) == 0:
visits['visit_counts'] = 0
visits['daily_visit'] = []
else:
total_cnt = 0
daily_visit = {}
for row in sqldata:
visit_count = int(row[0])
visit_date = str(row[2])
if visit_date in daily_visit:
daily_visit[visit_date] += visit_count
else:
daily_visit[visit_date] = visit_count
total_cnt += visit_count
visits['visit_counts'] = total_cnt
visits['daily_visit'] = daily_visit
return visits
def calculate_shop_daily_revenue(sqldata):
"""
:param sqldata: results from the following schema
SELECT SUM(subtotal_price) AS revenue, tracker_id.shop, order_date, campaign_id
:return:
"""
revenue_ts = []
if sqldata and len(sqldata) > 0:
for row in sqldata:
cur_ts = {
'daily_revenue': float(row[0]),
'order_date': row[2]
}
revenue_ts.append(cur_ts)
return revenue_ts
def calculate_campaign_daily_revenue(sqldata):
"""
:param sqldata: results from
SELECT SUM(subtotal_price) AS revenue, tracker_id.shop, order_date, campaign_id
:return:
"""
campaign_revenue = {}
campaign_revenue_ts = {}
if sqldata and len(sqldata) > 0:
for row in sqldata:
campaign_daily_revenue = float(row[0])
order_date = row[2]
campaign_id = row[3]
# cur_ts = {
# 'campaign_daily_revenue': campaign_daily_revenue,
# 'order_date': order_date
# }
if campaign_id in campaign_revenue:
campaign_revenue[campaign_id] += campaign_daily_revenue
else:
campaign_revenue[campaign_id] = campaign_daily_revenue
return campaign_revenue
def calculate_per_inf_roi(sqldata):
"""
Each row is a list of results from cloud_sql.get_all_data_per_shop_per_campaign(), which represents
one order promoted by one inf for one campaign and one shop
the row is ordered in:
subtotal_price, uid, campaign_id, commission, commission_type, commission_percentage, order_date, order_complete.shop
:return: a series of data representing ROI per influencer.
"""
total_revenue = {}
total_fixed_commission = {}
total_percentage_commission = {}
total_commission = {}
revenue_ts = defaultdict(dict)
total_roi = {}
for row in sqldata:
uid = row[1]
cur_rev = float(row[0])
if uid not in total_revenue:
total_revenue[uid] = cur_rev
else:
total_revenue[uid] += cur_rev
cur_fixed_commission = row[3] or 0
cur_fixed_commission = float(cur_fixed_commission)
if uid not in total_fixed_commission:
total_fixed_commission[uid] = cur_fixed_commission
else:
total_fixed_commission[uid] += cur_fixed_commission
cur_commission_percentage = row[5] or 0
cur_commission_percentage = float(cur_commission_percentage)
percent_commission = cur_commission_percentage * cur_rev
if uid not in total_percentage_commission:
total_percentage_commission[uid] = percent_commission
else:
total_percentage_commission[uid] += percent_commission
order_date = row[6].strftime("%m/%d/%Y")
if uid not in revenue_ts:
revenue_ts[uid] = {order_date: cur_rev}
else:
if order_date not in revenue_ts[uid]:
revenue_ts[uid][order_date] = cur_rev
else:
revenue_ts[uid][order_date] += cur_rev
for uid, fixed_comm in total_fixed_commission.items():
total_commission[uid] = fixed_comm + total_percentage_commission[uid]
total_roi[uid] = max((total_revenue[uid] - total_commission[uid]) / total_commission[uid], 0)
res = []
for uid, roi in total_roi.items():
res.append({
'uid': uid,
'roi': float(roi),
'total_revenue': float(total_revenue[uid]),
'total_commission': float(total_commission[uid]),
'revenue_ts': revenue_ts[uid]
})
return res
|
import pytest
from tests import config as conf
from tests import experiment as exp
@pytest.mark.nightly # type: ignore
def test_cifar10_pytorch_accuracy() -> None:
config = conf.load_config(conf.official_examples_path("cifar10_cnn_pytorch/const.yaml"))
experiment_id = exp.run_basic_test_with_temp_config(
config, conf.official_examples_path("cifar10_cnn_pytorch"), 1
)
trials = exp.experiment_trials(experiment_id)
trial_metrics = exp.trial_metrics(trials[0]["id"])
validation_errors = [
step["validation"]["metrics"]["validation_metrics"]["validation_accuracy"]
for step in trial_metrics["steps"]
if step.get("validation")
]
target_accuracy = 0.74
assert max(validation_errors) > target_accuracy, (
"cifar10_cnn_pytorch did not reach minimum target accuracy {} in {} steps."
" full validation error history: {}".format(
target_accuracy, len(trial_metrics["steps"]), validation_errors
)
)
|
"""proxyserver URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.conf import settings
from django.views.generic import RedirectView
from proxyserver.views import Proxy, AuthProxy
URLS = settings.URLS
urlpatterns = [
url(r'^$', RedirectView.as_view(url='main', permanent=False)),
url(r'^admin/(?P<path>.*)', Proxy.as_view(upstream=URLS['webfront'] + 'admin/')),
url(r'^login/$', Proxy.as_view(upstream=URLS['webfront'] + 'login/')),
url(r'^signup/$', Proxy.as_view(upstream=URLS['webfront'] + 'signup/')),
url(r'^reset_password/(?P<path>.*)', Proxy.as_view(upstream=URLS['webfront'] + 'reset_password/')),
url(r'^api/v1/(?P<path>.*)', AuthProxy.as_view(upstream=URLS['taskservice'] + 'api/v1/')),
url(r'^main/(?P<path>.*)', AuthProxy.as_view(upstream=URLS['webmain'])),
]
|
''' Analysis script for 1D 2l plots (RootTools)
'''
#Standard imports
import ROOT
from math import sqrt, cos, sin, pi, acos
import itertools,os
import copy
from operator import mul
import argparse
argParser = argparse.ArgumentParser(description = "Argument parser")
argParser.add_argument('--logLevel', action='store', default='INFO', nargs='?', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE', 'NOTSET'], help="Log level for logging")
args = argParser.parse_args()
#RootTools
from RootTools.core.standard import *
from TopEFT.Tools.user import data_directory
from TopEFT.samples.color import color
from TopEFT.Tools.cutInterpreter import cutInterpreter
from TopEFT.Tools.objectSelection import getFilterCut
from TopEFT.Tools.helpers import getObjDict, getCollection, getObjFromFile
#
# Arguments
#
import argparse
argParser = argparse.ArgumentParser(description = "Argument parser")
argParser.add_argument('--logLevel', action='store', default='INFO', nargs='?', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE', 'NOTSET'], help="Log level for logging")
argParser.add_argument('--plot_directory', action='store', default='94X_nonpromptClosure')
argParser.add_argument('--selection', action='store', default='njet0p-btag0p')
argParser.add_argument('--year', action='store', default=2017, type=int, help="Which year?" )
args = argParser.parse_args()
# Logger
import TopEFT.Tools.logger as logger
import RootTools.core.logger as logger_rt
logger = logger.get_logger( args.logLevel, logFile = None)
logger_rt = logger_rt.get_logger(args.logLevel, logFile = None)
# Samples
data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/"
postProcessing_directory = "TopEFT_PP_2016_mva_v7/trilep/"
from TopEFT.samples.cmgTuples_Summer16_mAODv2_postProcessed import *
postProcessing_directory = "TopEFT_PP_2016_mva_v7/trilep/"
from TopEFT.samples.cmgTuples_Data25ns_80X_03Feb_postProcessed import *
data_directory = "/afs/hephy.at/data/dspitzbart02/cmgTuples/"
postProcessing_directory = "TopEFT_PP_2017_mva_v7/trilep/"
from TopEFT.samples.cmgTuples_Fall17_94X_mAODv2_postProcessed import *
postProcessing_directory = "TopEFT_PP_2017_mva_v7/trilep/"
from TopEFT.samples.cmgTuples_Data25ns_94X_Run2017_postProcessed import *
postProcessing_directory = "TopEFT_PP_2017_mva_v8/trilep/"
dirs = {}
dirs['DY'] = ["DYJetsToLL_M50_ext"]
dirs['TTbar'] = ["TTLep_pow"]
directories = { key : [ os.path.join( data_directory, postProcessing_directory, dir) for dir in dirs[key]] for key in dirs.keys()}
DY = Sample.fromDirectory(name="DY", treeName="Events", isData=False, color=color.DY, texName="DY", directory=directories['DY'])
TT = Sample.fromDirectory(name="TT", treeName="Events", isData=False, color=0, texName="t#bar{t}", directory=directories['TTbar'])
sample = TT
#sample = DY_HT_LO_17 if args.year == 2017 else DY_HT_LO
#sample = TTLep_pow_17 if args.year == 2017 else TTLep_pow
# Trigger
from TopEFT.Tools.triggerSelector import triggerSelector
tr = triggerSelector(args.year)
# Event selection
nonpromptSelection = "nLeptons_FO_3l>=3 && Sum$((lep_tight_3l*(lep_pt - lep_ptCorr) + lep_ptCorr)>40&&lep_FO_3l>0)>0&&Sum$((lep_tight_3l*(lep_pt - lep_ptCorr) + lep_ptCorr)>20&&lep_FO_3l>0)>1&&Sum$((lep_tight_3l*(lep_pt - lep_ptCorr) + lep_ptCorr)>10&&lep_FO_3l>0)>2 && !(nLeptons_tight_4l>=4) && nLeptons_tight_3l<3"
promptSelection = "nLeptons_tight_3l==3 && nLeptons_FO_3l_genPrompt<=2 && Sum$(lep_pt>40&&lep_tight_3l>0)>0&&Sum$(lep_pt>20&&lep_tight_3l>0)>1&&Sum$(lep_pt>10&&lep_tight_3l>0)>2 && !(nLeptons_tight_4l>=4)"
#np_sel_string = "&&".join([getFilterCut(isData=False, year=args.year), tr.getSelection("MC"), nonpromptSelection, cutInterpreter.cutString(args.selection), "abs(Z_mass - 91.2)<10"])
#sel_string = "&&".join([getFilterCut(isData=False, year=args.year), tr.getSelection("MC"), promptSelection, cutInterpreter.cutString(args.selection), "abs(Z_mass - 91.2)<10 && Z_fromTight>0"])
np_sel_string = "&&".join([getFilterCut(isData=False, year=args.year), tr.getSelection("MC"), nonpromptSelection, cutInterpreter.cutString(args.selection)])
sel_string = "&&".join([getFilterCut(isData=False, year=args.year), tr.getSelection("MC"), promptSelection, cutInterpreter.cutString(args.selection)])
# weights
weight_central = "weight*reweightPU36fb*reweightBTagDeepCSV_SF*41.2"
# preparation for looper
loose_ID = "FO_3l"
tight_ID = "tight_3l"
nLeptons = 3
variables = map( TreeVariable.fromString, ["run/I", "lumi/I", "evt/I", "Z_pt/F", "cosThetaStar/F", "weight/F", "met_pt/F", "Z_mass/F", "nJetSelected/I", "nBTag/I", 'Z_l1_index/I', 'Z_l2_index/I', 'nonZ_l1_index/I', 'nonZ_l2_index/I', 'met_pt/F', 'nMuons_FO_3l/I'])
if not sample.isData: variables += map( TreeVariable.fromString, ['reweightPU36fb/F', 'reweightBTagDeepCSV_SF/F' ] )
variables += [VectorTreeVariable.fromString('lep[pt/F,ptCorr/F,eta/F,phi/F,FO_3l/I,tight_3l/I,FO_SS/I,tight_SS/I,jetPtRatiov2/F,pdgId/I]')]
# Get histograms directly from MC
plotvars = ["Z_mass", "nJetSelected", "met_pt", "nBTag", "nMuons_FO_3l", "lep_pt_trail"]
hists = {}
hists_pred = {}
hists["Z_mass"] = sample.get1DHistoFromDraw( "Z_mass", selectionString = sel_string, binning = [20,80,100], addOverFlowBin = 'upper', weightString = weight_central )
hists["met_pt"] = sample.get1DHistoFromDraw( "met_pt", selectionString = sel_string, binning = [20,0,200], addOverFlowBin = 'upper', weightString = weight_central )
hists["nJetSelected"] = sample.get1DHistoFromDraw( "nJetSelected", selectionString = sel_string, binning = [8,0,8], addOverFlowBin = 'upper', weightString = weight_central )
hists["nBTag"] = sample.get1DHistoFromDraw( "nBTag", selectionString = sel_string, binning = [4,0,4], addOverFlowBin = 'upper', weightString = weight_central )
hists["nMuons_FO_3l"] = sample.get1DHistoFromDraw( "nMuons_tight_3l", selectionString = sel_string, binning = [4,0,4], addOverFlowBin = 'upper', weightString = weight_central )
hists["lep_pt_trail"] = sample.get1DHistoFromDraw( "lep_pt[2]", selectionString = sel_string, binning = [10,0,100], addOverFlowBin = 'upper', weightString = weight_central )
# Run the tree reader for cases with more complicated plots
hists["lep_pt_trail"].Reset()
sample.setSelectionString(sel_string)
reader = sample.treeReader( variables = variables )
reader.start()
while reader.run():
nLep = len([ l for l in reader.event.lep_pt if l > 0])
lep = [getObjDict(reader.event, "lep"+'_', ["pt", "ptCorr", "eta", "phi", "FO_3l", "FO_SS", "tight_3l", "tight_SS", "pdgId","jetPtRatiov2"], i) for i in range(nLep) ]
# get the relevant leptons
lep = [ l for l in lep if l[tight_ID] ]
if len(lep) != nLeptons: print "bug"
allweights = ["weight", "reweightPU36fb", "reweightBTagDeepCSV_SF"]
if sample.isData:
weight = 1
else:
weights = [ getattr( reader.event, w ) for w in allweights ]
weight = reduce(mul, weights, 1)
hists["lep_pt_trail"].Fill(lep[2]['pt'], ( weight * 41.2 ))
for var in plotvars:
hists_pred[var] = hists[var].Clone()
hists_pred[var].Reset()
# Get the nonprompt prediction
muFile = os.path.expandvars("$CMSSW_BASE/src/TopEFT/Tools/data/FRData/muFR_all.root")
elFile = os.path.expandvars("$CMSSW_BASE/src/TopEFT/Tools/data/FRData/elFR_all.root")
muMap = getObjFromFile(muFile, "passed")
elMap = getObjFromFile(elFile, "passed")
sample.setSelectionString(np_sel_string)
reader = sample.treeReader( variables = variables )
reader.start()
while reader.run():
nLep = len([ l for l in reader.event.lep_pt if l > 0])
lep = [getObjDict(reader.event, "lep"+'_', ["pt", "ptCorr", "eta", "phi", "FO_3l", "FO_SS", "tight_3l", "tight_SS", "pdgId","jetPtRatiov2"], i) for i in range(nLep) ]
# get the relevant leptons
lep = [ l for l in lep if l[loose_ID] ]
# get tight and loose separately
looseNotTight = [ l for l in lep if not l[tight_ID] ]
tight = [ l for l in lep if l[tight_ID] ]
nLooseNotTight = len( looseNotTight )
nTight = len( tight )
# Really get ALL possible combinations.
allCombinations = itertools.combinations(tight+looseNotTight, nLeptons)
for comb in allCombinations:
FR = 1.
nLooseNotTight = 0
pts = [ l['pt'] if l[tight_ID] else l['ptCorr'] for l in comb ]
pts = sorted(pts, reverse=True)
for l in comb:
if l[tight_ID]:
continue
else:
if abs(l['pdgId']) == 11: FRmap = elMap
elif abs(l['pdgId']) == 13: FRmap = muMap
else: raise NotImplementedError
ptCut = 45. if sample.isData else 99.
ptCorrected = l['ptCorr'] if l['ptCorr'] < ptCut else (ptCut-1)
#print ptCorrected
FR_from_map = FRmap.GetBinContent(FRmap.FindBin(ptCorrected, abs(l['eta'])))
if sample.isData:
FR *= FR_from_map/(1-FR_from_map)
else:
FR *= FR_from_map
nLooseNotTight += 1
FR *= (-1)**(nLooseNotTight+1)
allweights = ["weight", "reweightPU36fb", "reweightBTagDeepCSV_SF"]
if sample.isData:
weight = 1
else:
weights = [ getattr( reader.event, w ) for w in allweights ]
weight = reduce(mul, weights, 1)
for var in plotvars:
if not var == "lep_pt_trail":
hists_pred[var].Fill(getattr(reader.event, var), ( weight * FR * 41.2 ))
else:
hists_pred[var].Fill(pts[2], ( weight * FR * 41.2 ))
def drawObjects( ):
tex = ROOT.TLatex()
tex.SetNDC()
tex.SetTextSize(0.04)
tex.SetTextAlign(11) # align right
lines = [
(0.15, 0.95, 'CMS Simulation'),
(0.65, 0.95, '%s MC (13 TeV)'%args.year )
]
return [tex.DrawLatex(*l) for l in lines]
texX = {
"Z_mass": "M(ll) (GeV)",
"met_pt": "E_{T}^{miss} (GeV)",
"nJetSelected": "N_{jet}",
"nBTag": "N_{b-tag}",
"nMuons_FO_3l": "N_{#mu}",
"lep_pt_trail": "p_{T}(trailing l) (GeV)",
}
for var in plotvars:
hists[var].style = styles.errorStyle( ROOT.kBlack )
hists_pred[var].style = styles.fillStyle( ROOT.kBlue-1 )
hists[var].legendText = sample.texName
hists_pred[var].legendText = "prediction"
plots = [[ hists_pred[var] ], [hists[var]] ]
print plots
plotting.draw(
Plot.fromHisto("%s_%s"%(sample.name, var),
plots,
texX = texX[var]
),
plot_directory = "/afs/hephy.at/user/d/dspitzbart/www/TopEFT/nonprompt/",
logX = False, logY = False, sorting = True,
#yRange = (0.008,3.),
ratio = {'yRange': (0.1, 2.4), 'texX':'MC/pred'},
drawObjects = drawObjects(),
copyIndexPHP = True
)
|
# Librerias Django
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
# Librerias en carpetas locales
from .submodels.department import PyDepartment
from .submodels.employee import PyEmployee
|
import tensorflow_hub as hub
import numpy as np
import tensorflow_text
# Some texts of different lengths.
english_sentences = ["dog", "Puppies are nice.", "I enjoy taking long walks along the beach with my dog."]
italian_sentences = ["cane", "I cuccioli sono carini.", "Mi piace fare lunghe passeggiate lungo la spiaggia con il mio cane."]
japanese_sentences = ["犬", "子犬はいいです", "私は犬と一緒にビーチを散歩するのが好きです"]
embed = hub.load("https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3")
# Compute embeddings.
en_result = embed(english_sentences)
it_result = embed(italian_sentences)
ja_result = embed(japanese_sentences)
# Compute similarity matrix. Higher score indicates greater similarity.
similarity_matrix_it = np.inner(en_result, it_result)
similarity_matrix_ja = np.inner(en_result, ja_result)
print(similarity_matrix_it, similarity_matrix_ja) |
"""
only use + and //, write a recursion, calculate the integer part of log2(n).
example:
log2(50) => 5
log2(31) => 4
log2(32) => 5
log2(1) => 0
log2(16) => 4
# the input n should be > 0
"""
def log2(n):
"""
return the integer part of the answer.
method: count how many times can // 2, the solution >= 1
"""
if n < 0: raise ValueError("Wrong input")
elif n == 0: return 0
elif n == 1: return 0
else: return 1 + log2(n//2) # here has 1 already, so n == 1 return 0
if __name__ == '__main__':
print(log2(50))
print(log2(1000))
print(log2(500))
|
def welcome():
print(
'+------------------------------------------------+'+
'\nWELCOME TO TIC TAC TOE\n'+
'+------------------------------------------------+\n')
def board(p):
print(
'+-------+-------+-------+\n'+
'| | | |\n'+
'| {} | {} | {} |\n'.format(p[0], p[1], p[2])+
'| | | |\n'+
'+-------+-------+-------+\n'+
'| | | |\n'+
'| {} | {} | {} |\n'.format(p[3], p[4], p[5])+
'| | | |\n'+
'+-------+-------+-------+\n'+
'| | | |\n'+
'| {} | {} | {} |\n'.format(p[6], p[7], p[8])+
'| | | |\n'+
'+-------+-------+-------+')
def instructions():
print(
'+------------------------------------------------+'+
'\nINSTRUCTIONS\n'+
'+------------------------------------------------+'+
'\nUsing a numpad as a reference, place your X or O'+
'\non the board using the numbers 1-9 corresponding'+
'\nto the grid from left to right and top to bottom.'+
'\nThe player who chooses X will start the game.'+
'\n+------------------------------------------------+\n')
placement = [' ']*9
x = True
o = False
player_1 = ' '
#outputs blank board and instructions. Player 1 ch
def player_start():
welcome()
board(placement)
instructions()
global player_1
try:
player_type = input('\nWould you like to be X or O? ')
if player_type.lower() == 'x':
print('\nOkay, Player 1 will go first as X')
print ('\nPlayer 1, ')
player_1 = 'x'
elif player_type.lower() == 'o':
print('Okay, Player 2 will go first as X')
print ('\nPlayer 2, ')
player_1 = 'o'
else:
raise ValueError
except ValueError:
print('Try that again. Please enter X or O to start the game: ')
#plays game while no winners detected
def player_input():
global x
global o
global placement
try:
while x == True or o == True:
try:
while x:
place_x = int(input('Where would you like to place your X? '))
if 1 <= place_x <= 9:
try:
if placement[place_x-1] == ' ':
placement[place_x-1] = 'X'
board(placement)
x = False
o = True
win_game(placement)
else:
raise ValueError
except ValueError:
print('Try that again. Someone has already played there.')
else:
x = True
raise ValueError
except ValueError:
print('Try again. Please enter a number 1-9.')
try:
while o:
place_o = int(input('Where would you like to place your O? '))
if 1 <= place_o <= 9:
try:
if placement[place_o-1] == ' ':
placement[place_o-1] = 'O'
board(placement)
o = False
x = True
win_game(placement)
else:
raise ValueError
except ValueError:
print('Try that again. Someone has already played there.')
else:
o = True
raise ValueError
except ValueError:
print('Try again. Please enter a number 1-9.')
except ValueError:
print('Try again.')
def win_game(p):
global x
global o
global placement
#winning tuple patterns
wins = (
(p[0], p[1], p[2]),
(p[3], p[4], p[5]),
(p[6], p[7], p[8]),
(p[2], p[5], p[8]),
(p[1], p[4], p[7]),
(p[0], p[3], p[6]),
(p[6], p[4], p[2]),
(p[0], p[4], p[8])
)
#checks if someone won, if not continues playing
if ('X','X','X') in wins:
x = False
o = False
if player_1 == 'x':
print('Player 1 wins as X!')
else:
print('Player 2 wins as X!')
placement = [' ']*9
play_again()
elif ('O','O','O') in wins:
x = False
o = False
if player_1 == 'x':
print('Player 2 wins as O!')
else:
print('Player 1 wins as O!')
placement = [' ']*9
play_again()
elif ' ' not in placement:
x = False
o = False
print("It's a tie!")
placement = [' ']*9
play_again()
else:
pass
#clears board if player wants a rematch
def play_again():
global x
while x == False and o == False:
try:
again = input('Do you want a rematch? Y/N: ')
if again.lower() == 'y' or again.lower() == 'yes':
x = True
board(placement)
break
elif again.lower() == 'n' or again.lower() == 'no':
print('Thanks for Playing!')
break
else:
raise ValueError
except ValueError:
print('Try that again. Please enter Y for yes or N for no.')
player_start()
player_input() |
# Copyright (C) 2016 Nokia Corporation and/or its subsidiary(-ies).
"""
Collection of helpers to run commands both on local and remote hosts.
"""
import datetime
import os
from logging import getLogger
import subprocess
import select
import signal
logger = getLogger(__name__)
class Host(object):
"""A Host contains the necessary information to connect to a server using SSH
and run commands on it.
Args:
name (str): server hostname (or IP address)
username (str): username to connect as
port (int): SSH port to use
"""
def __init__(self, name, username, port=22):
self.name = name
self.username = username
self.port = port
@classmethod
def from_server(klass, server, username):
"""
Args:
server (models.Server)
username (str)
"""
return klass(server.name, username, server.port)
def run_cmd_by_ssh(host, cmd, timeout=900):
"""
Args:
host (Host)
cmd (list of str): command to run
timeout (int): return (with a status code 1) if the command did
not complete before this time
Returns:
tuple: see exec_cmd documentation
"""
full_cmd = ['ssh', '{}@{}'.format(host.username, host.name), '-p', str(host.port)] + cmd
return exec_cmd(full_cmd, timeout=timeout)
def exec_script(working_directory, script_name, params=None):
"""Run a local shell script if it exists. No error is returned if the script does not exist.
Args:
working_directory: use this working directory to run the script
script_name: path to the script (absolute, or relative to the working directory)
"""
if params is None:
params = []
path = os.path.join(working_directory, script_name)
if not os.path.exists(path):
return (0, "No script '{}'.".format(script_name), None)
out = exec_cmd(['bash', script_name] + params, working_directory, use_shell=False)
return out
def remote_check_file_exists(path, host):
"""Returns true if 'ssh $username@$hostname stat $path' exits with code 0"""
stat_cmd = ['stat', path]
exit_code, _, __ = run_cmd_by_ssh(host, stat_cmd)
return exit_code == 0
def exec_script_remote(host, remote_working_directory, script_name, params=None):
"""Run a (remote) script on a remote host, using SSH."""
if params is None:
params = []
if not remote_check_file_exists(os.path.join(remote_working_directory, script_name), host):
return (0, "No remote script '{}'".format(script_name), None)
cmd = ['cd', remote_working_directory, '&&', 'bash', script_name] + params
return run_cmd_by_ssh(host, cmd)
def exec_cmd(cmd, current_working_directory=None, timeout=600, use_shell=None):
"""
Execute a command on the local machine.
Args:
cmd (list): command to execute. First element is the executable name, other elements are the parameters
current_working_directory (str): if provided, the command will be executed in a shell with the working directory set to this.
timeout (int): timeout in seconds for the command to complete. If the timeout is reached, returns immediately and set the exit code to 1.
Returns
a tuple: (exit code, stdout, sterr)
"""
try:
if use_shell is None:
use_shell = current_working_directory is not None
p = subprocess.Popen(cmd, shell=use_shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=current_working_directory, bufsize=1)
reads = [p.stdout.fileno(), p.stderr.fileno()]
outputs = ([], [])
# Wait for completion
start = datetime.datetime.now()
while p.poll() is None:
ret = select.select(reads, [], [], 0.2)
for fd in ret[0]:
for i in range(0, len(reads)):
if fd == reads[i]:
data = os.read(fd, 4096)
outputs[i].append(data)
break
# Check whether timeout is exceeded
now = datetime.datetime.now()
if (now - start) > datetime.timedelta(seconds=timeout):
os.kill(p.pid, signal.SIGKILL)
os.waitpid(p.pid, 0)
stdout = "".join(outputs[0])
stderr = "".join(outputs[1])
logger.error("cmd:[%s] timeout! so far: stdout:[%s] stderr:[%s]" % (cmd, stdout, stderr))
return (1, stdout, "Timeout (the command took more than {}s to return)\n\n{}".format(timeout, stderr))
# Read remaining data
performed_read = True
while performed_read:
performed_read = False
ret = select.select(reads, [], [], 0)
for fd in ret[0]:
for i in range(0, len(reads)):
if fd == reads[i]:
data = os.read(fd, 4096)
if len(data) > 0:
performed_read = True
outputs[i].append(data)
break
stdout = "".join(outputs[0])
stderr = "".join(outputs[1])
logger.debug("cmd:[%s] stdout:[%s] stderr:[%s]" % (cmd, stdout, stderr))
return (p.returncode, stdout, stderr)
except Exception as e:
logger.exception("error:[%s] cmd:[%s]" % (str(e), cmd))
return (1, "", str(e))
|
import allure
import pytest, logging
from allure_commons.types import AttachmentType
from selenium import webdriver
from selenium.webdriver.support.events import EventFiringWebDriver, AbstractEventListener
FORMAT = '%(name)s : %(asctime)-15s : %(filename)s : %(levelname)s : %(message)s'
logging.basicConfig(level=logging.INFO, filename="logs/test.log", format=FORMAT)
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="chrome")
parser.addoption("--executor", action="store", default="127.0.0.1")
parser.addoption("--bver", action="store", default="89.0")
parser.addoption("--vnc", action="store_true", default=False)
parser.addoption("--logs", action="store_true", default=False)
parser.addoption("--videos", action="store_true", default=False)
parser.addoption("--mobile", action="store_true")
parser.addoption("--maximized", action="store_true", default=False)
parser.addoption("--URL", action="store", default="http://demo-opencart.ru", help="Base url for web site")
@pytest.fixture
def browser(request):
browser = request.config.getoption("--browser")
executor = request.config.getoption("--executor")
version = request.config.getoption("--bver")
vnc = request.config.getoption("--vnc")
logs = request.config.getoption("--logs")
videos = request.config.getoption("--videos")
maximized = request.config.getoption("--maximized")
executor_url = f"http://{executor}:4444/wd/hub"
caps = {
"browserName": browser,
"browserVersion": version,
"screenResolution": "1280x720",
"name": "mkazantsev",
"selenoid:options": {
"enableVNC": vnc,
"enableVideo": videos,
"enableLog": logs
},
'acceptSslCerts': True,
'acceptInsecureCerts': True,
'timeZone': 'Europe/Moscow',
'goog:chromeOptions': {
'args': []
}
}
driver = EventFiringWebDriver(webdriver.Remote(
command_executor=executor_url,
desired_capabilities=caps
), ExceptionListener())
def fin():
driver.quit()
if maximized:
driver.maximize_window()
request.addfinalizer(fin)
return driver
@pytest.fixture
def base_url(request):
return request.config.getoption("--URL")
@pytest.fixture(scope="session")
def generate_env(request):
with open("allure-results/environment.properties", "w") as f:
f.writelines("Browser=" + request.config.getoption("--browser") + "\n")
f.writelines("Browser.Version=" + request.config.getoption("--bver") + "\n")
f.writelines("Stand=" + request.config.getoption("--URL") + "\n")
class ExceptionListener(AbstractEventListener):
def on_exception(self, exception, driver):
allure.attach(driver.get_screenshot_as_png(), name="Скриншот ошибки.png", attachment_type=AttachmentType.PNG)
logging.error(exception)
def after_navigate_to(self, url, driver):
allure.attach(driver.get_screenshot_as_png(), name=f"Переход на {url}", attachment_type=AttachmentType.PNG)
|
from django.conf.urls import include, url
from rest_framework.routers import DefaultRouter
from TestOnline.company.paper.views import *
router = DefaultRouter()
router.register(r'company/papers',PaperViewSet)
urlpatterns = [
# url(r'^company/createPaper/$',createPaper)
url(r"^company/getPapers/$",getPapers),
url(r"^company/addQuestionToPaper/$", addQuestionToPaper),
]
urlpatterns += router.urls |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 20 12:51:34 2018
@author: shams
"""
import numpy as np
import pandas as pd
from keras.preprocessing import sequence
from keras.models import load_model
from keras.layers import Dense, Input, LSTM
from keras.models import Model
import h5py
user_file = pd.read_json('C:/Users/shams/OneDrive/Documents/Projects/Insight/datasets/users.json')
channel_file = pd.read_json('C:/Users/shams/OneDrive/Documents/Projects/Insight/datasets/channels.json')
# loading input file
user= 'U0AB5K6HY'
filename = 'C:/Users/shams/OneDrive/Documents/Projects/Insight/datasets/'+user+'.json'
# defining the batch size and number of epochs
# per day
batch_size = 50
epochs = 100
timesteps = 7
#per week
batch_size = 10
epochs = 100
timesteps = 1
def get_train_length(dataset, batch_size, test_percent):
# substract test_percent to be excluded from training, reserved for testset
length = len(dataset)
length *= 1 - test_percent
train_length_values = []
for x in range(int(length) - 100,int(length)):
modulo=x%batch_size
if (modulo == 0):
train_length_values.append(x)
return (max(train_length_values))
length = get_train_length(df_data_1, batch_size, 0.1)
upper_train = length + timesteps*2
df_data_1_train = df_data_1[0:upper_train]
training_set = np.nan_to_num(df_data_1_train.loc[:,:].values)
training_set.shape
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
#training_set_scaled = sc.fit_transform(np.float64(training_set.reshape(-1,1)))
training_set_scaled = sc.fit_transform(np.float64(training_set))
training_set_scaled.shape
x_train = []
y_train = []
# Creating a data structure with n timesteps
"""
for i in range(timesteps, length + timesteps):
x_train.append(training_set_scaled[i-timesteps:i,0])
y_train.append(training_set_scaled[i:i+timesteps,0])
#y_train.append(training_set_scaled[i:i+1,0])
"""
# Creating a data structure with n timesteps: MULTIVARIATE
for i in range(timesteps, length + timesteps):
x_train.append(training_set_scaled[i-timesteps:i,:])
y_train.append(training_set_scaled[i:i+timesteps,0])
#y_train.append(training_set_scaled[i:i+1,0])
print (length + timesteps)
print (len(x_train))
print (len (y_train))
print (np.array(x_train).shape)
print (np.array(y_train).shape)
"""
# Reshaping
X_train, y_train = np.array(x_train), np.array(y_train)
X_train = np.reshape(x_train, (X_train.shape[0], X_train.shape[1], ))
y_train = np.reshape(y_train, (y_train.shape[0], y_train.shape[1], 1))
"""
# Reshaping
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], x_train.shape[2]))
y_train = np.reshape(y_train, (y_train.shape[0], y_train.shape[1], 1))
from keras.layers import Dense, Input, LSTM
from keras.models import Model
import h5py
# Initialising the LSTM Model with MAE Loss-Function
# Using Functional API
inputs_layer = Input(batch_shape=(batch_size,timesteps,x_train.shape[2]))
lstm_1 = LSTM(10, stateful=True, return_sequences=True)(inputs_layer)
lstm_2 = LSTM(10, stateful=True, return_sequences=True)(lstm_1)
output_layer = Dense(units = 1)(lstm_2)
regressor_mae = Model(inputs=inputs_layer, outputs = output_layer)
regressor_mae.compile(optimizer='adam', loss = 'mae')
regressor_mae.summary()
regressor_mae.save(filepath='C:/Users/shams/OneDrive/Desktop/Insight/models/test_model.h5')
for i in range(epochs):
print("Epoch: " + str(i))
regressor_mae.fit(X_train, y_train, shuffle=False, epochs = 1, batch_size = batch_size)
regressor_mae.reset_states()
|
import re
import sys
import numpy as np
def readPFM(file):
with open(file, 'rb') as f:
header = f.readline().decode('utf-8')
if 'PF' in header:
color = True
elif 'Pf' in header:
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', f.readline().decode('utf-8'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(f.readline().rstrip())
if scale < 0:
endian = '<' # little-endian
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(f, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data
def writePFM(file, image, scale=1):
with open(file, 'wb') as f:
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or (len(image.shape) == 3 and image.shape[2] == 1): # grayscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
f.write(b'PF\n' if color else b'Pf\n')
f.write(b'%d %d\n' % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == '<' or (endian == '=' and sys.byteorder == 'little'):
scale = -scale
f.write(b'%f\n' % scale)
image.tofile(f)
def cal_avgerr(GT, disp):
return np.mean(np.abs(GT - disp)[GT != np.inf]) |
# -*- coding:utf-8 -*-
from robot.models import Edu_School_Class, Edu_School_Class_User, Edu_School_Notice, EduWxRobotChatRoomData, \
EduWxRobotChatFriendData, EduWxRobotFriend, EduWxRobotChatRoom, EduWxRobotChatRoomMember, EduWxRobot, \
EduWxRobotChatRoomFiles, EduWxRobot
from robot.dao import dao_common as common
def add_friend_chat_data(data):
ewrc = EduWxRobotChatFriendData()
common.set_data_to_record(ewrc, data)
return common.insert_data(ewrc)
def add_edu_friend_data(data):
friend = EduWxRobotFriend()
common.set_data_to_record(friend, data)
return common.insert_data(friend)
def add_room_files_data(data):
roomfilse = EduWxRobotChatRoomFiles()
common.set_data_to_record(roomfilse, data)
return common.insert_data(roomfilse)
def add_school_class_data(data):
sclass = Edu_School_Class()
common.set_data_to_record(sclass, data)
return common.insert_data(sclass)
def add_school_notice_data(data):
notice = Edu_School_Notice()
common.set_data_to_record(notice, data)
return common.insert_data(notice)
def add_edu_robot_data(data):
robot = EduWxRobot()
common.set_data_to_record(robot, data)
return common.insert_data(robot)
def add_edu_robot_room_data(data):
room = EduWxRobotChatRoom()
common.set_data_to_record(room, data)
return common.insert_data(room)
def add_edu_robot_room_member_data(data):
member = EduWxRobotChatRoomMember()
common.set_data_to_record(member, data)
return common.insert_data(member)
def add_edu_robot_room_chat_data(data):
chat = EduWxRobotChatRoomData()
common.set_data_to_record(chat, data)
return common.insert_data(chat) |
"""
code test using 5.1
"""
import sys
data = []
for k in range(26):
a = len(data)
b = sys.getsizeof(data)
print("Length: {0:3d}; Size in bytes: {1:4d}".format(a, b))
data.append(None)
"""
Length: 0; Size in bytes: 56
Length: 1; Size in bytes: 88
Length: 2; Size in bytes: 88
Length: 3; Size in bytes: 88
Length: 4; Size in bytes: 88
Length: 5; Size in bytes: 120
Length: 6; Size in bytes: 120
....
Similar answers to the book, except the starting size is smaller than the result on the book.
The increment is every 32 bytes => 64 bit address
""" |
"""
Time/Space Complexity = O(N)
"""
#TLE
class Solution:
def rob(self, nums: List[int]) -> int:
def rob(indx = 0, start = 0):
if indx >= len(nums):
return 0
if start == 0 and indx == len(nums) - 1:
return 0
return nums[indx] + max(rob(indx+2, start), rob(indx+3, start))
out = 0
if len(nums) == 1:
return nums[0]
elif len(nums) == 2:
return max(nums[0], nums[1])
else:
return max(rob(0,0), rob(1,1), rob(2,2))
#DP
from functools import lru_cache
class Solution:
def rob(self, nums: List[int]) -> int:
@lru_cache(None)
def rob(indx = 0, start = 0):
if indx >= len(nums):
return 0
if start == 0 and indx == len(nums) - 1:
return 0
return nums[indx] + max(rob(indx+2, start), rob(indx+3, start))
out = 0
if len(nums) == 1:
return nums[0]
elif len(nums) == 2:
return max(nums[0], nums[1])
else:
return max(rob(0,0), rob(1,1), rob(2,2)) |
#!/usr/bin/env python
# encoding: utf-8
"""
random_permutation.py
Created by Jakub Konka on 2011-05-15.
Copyright (c) 2011 University of Strathclyde. All rights reserved.
"""
from __future__ import division
import sys
import os
import numpy as np
import math
from itertools import permutations
def generate(n):
# 1. Generate ordered list of n integers
numbers = [i+1 for i in range(n)]
# 2. Pick a number from 1..n at random and swap
# the number in that position in the list with the number at n
# 3. Repeat for remaining n-1...1 numbers in the list
segments = [i/n for i in range(n)]
for i in reversed(range(n)):
u = np.random.uniform(0,1)
index = -1
for seg in segments:
if u >= seg: index += 1
# index = int(np.rint(i*u))
numbers[index], numbers[i] = numbers[i], numbers[index]
return tuple(numbers)
if __name__ == '__main__':
n = 3
num = 100000
# Verification
# Generate num of random permutations of n numbers
result = [generate(n) for i in range(num)]
# Gather statistics (here, prob of occurrence of each permutation)
count = result.count
freq = [count(item) for item in set(result)]
prob = map(lambda x: x/num, freq)
print(prob)
|
import sys
import os
import weakref
import SocketServer
from SimpleXMLRPCServer import SimpleXMLRPCServer
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler
sys.path.append(os.path.realpath('.'))
sys.path.append(os.path.realpath('../'))
from application.helpers.instrumentsmanager.instrumentmgr import InstrumentManager
# Restrict to a particular path.
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
class AsyncXMLRPCServer(SocketServer.ThreadingMixIn, SimpleXMLRPCServer):
pass
class MyFuncs:
def div(self, x, y):
return x // y
if __name__ == '__main__':
myManager = InstrumentManager()
server = AsyncXMLRPCServer(
("localhost", 8000), requestHandler=RequestHandler, allow_none=True)
server.register_introspection_functions()
server.register_instance(RemoteInstrumentManager(myManager))
server.serve_forever()
|
class Compartment:
"""
A tuberculosis model compartment.
"""
SUSCEPTIBLE = "susceptible"
EARLY_LATENT = "early_latent"
LATE_LATENT = "late_latent"
INFECTIOUS = "infectious"
ON_TREATMENT = "on_treatment"
RECOVERED = "recovered"
BASE_COMPARTMENTS = [
Compartment.SUSCEPTIBLE,
Compartment.EARLY_LATENT,
Compartment.LATE_LATENT,
Compartment.INFECTIOUS,
Compartment.ON_TREATMENT,
Compartment.RECOVERED,
]
INFECTIOUS_COMPS = [
Compartment.INFECTIOUS,
Compartment.ON_TREATMENT,
]
LATENT_COMPS = [
Compartment.EARLY_LATENT,
Compartment.LATE_LATENT,
]
|
import string
class Solution:
def __init__(self):
self.keylist = {}
self.res = []
self.find = False
self.end = None
self.can_trans_map = {}
def can_trans(self, left, wordList):
# if f"{left}_{right}" in self.can_trans_map:
# return self.can_trans_map[f"{left}_{right}"]
# else:
# keylist = [1 for i in range(len(left)) if left[i] != right[i]]
# if keylist.count(1) == 1:
# self.can_trans_map[f"{left}_{right}"] = right
# return right
# else:
# self.can_trans_map[f"{left}_{right}"] = None
# return None
theset = set()
left_list = list(left)
for j in range(len(left)):
origin_char = left_list[j]
for k in string.ascii_lowercase:
left_list[j] = k
next_word = ''.join(left_list)
if next_word == origin_char:
continue
if next_word in wordList:
theset.add(next_word)
left_list[j] = origin_char
return theset
def find_xulie(self, key, nowlist):
anowlist = nowlist[:] + [key]
if self.end in anowlist:
self.res.append(anowlist)
else:
if key in self.keylist:
for i in self.keylist[key]:
self.find_xulie(i, anowlist)
def findLadders(self, beginWord: str, endWord: str, wordList: [str]) -> [[str]]:
# wordList = [TreeNode(i) for i in (set(wordList)-set(beginWord))]
self.end = endWord
wordList = set(wordList)
can_remove = {beginWord}
thislayer = {beginWord}
if not wordList or not endWord in wordList:
return []
while True:
wordList = wordList - can_remove
can_remove = set()
for j in thislayer:
if not (j in self.keylist):
self.keylist[j] = set()
j_kid = set()
a = self.can_trans(j, wordList)
if a:
j_kid |= a
if endWord in a:
self.find = True
self.keylist[j] = self.keylist[j] | set(j_kid)
can_remove = can_remove | (j_kid)
thislayer = can_remove
if not thislayer or endWord in thislayer:
break
if not self.find:
return []
else:
self.find_xulie(beginWord, [])
return self.res
# print(Solution().can_trans("dog","lkg"))
print(Solution().findLadders(
"hit", "cog", ["hot", "dot", "dog", "lot", "log", "cog"]
))
|
"""
base codec class with helpers for Sixteen Fourteen Hex Encoding
"""
class BaseBitCodec(object):
@staticmethod
def mask(num, masker):
"""
preserves only the bits of num that are set by the masker
:param num: integer to be masked
:param masker: integer whose bits indicate which bits to preserve
:return: result of num AND masker
Example, assume num=1240, masker=127
num in binary is 10011011000
masker in binary is 00001111111
num AND masker 00001011000
mask(num, masker) would return 88
"""
return num & masker
@staticmethod
def leftshift(num, n):
"""
returns the result of num shifted to the left n times
:param num: integer to be shifted
:param n: number of times to shift. Must be a whole number
:return: returns the result of shifting the bits of num
"n" times to the left
Example, assume num=1240, by=2
num in binary is: 0010011011000
num after left shift" 1001101100000
leftshift(num, shift_by) would return 4960
"""
return num << n
@staticmethod
def rightshift(num, n):
"""
returns the result of num shifted to the right n times
:param num: integer to be shifted
:param n: number of times to shift. Must be a whole number
:return: returns the result of shifting the bits of num
"n" times to the right
Example, assume num=1240, by=2
num in binary is: 0010011011000
num after left shift" 0000110110010
rightshift(1240, 2) would return 434
"""
return num >> n
@staticmethod
def mergebits(x, y):
"""
merges the bits of x and y together
:param x: integer to be merged
:param y: integer to be merged
:return: result of x OR y
Example, assume x=218, y=36
x in binary is: 11011010
y in binary is: 00100100
mergebits(x, y): 11111110
"""
return x | y
|
#!/usr/bin/env python3
'''Reads emails generated by the filter script and submits patches/make comments'''
import os
import re
import time
import datetime
from collections import namedtuple
import git
import gitlab
import cfg
import db_helper
import mail_helper
# Do some initialization early so we can abort in case of failure
LOCAL_REPO = git.Repo(cfg.LOCAL_REPO_PATH)
assert not LOCAL_REPO.bare
assert not LOCAL_REPO.is_dirty()
assert LOCAL_REPO.head.ref == LOCAL_REPO.heads.master
LOCAL_REPO_GIT = LOCAL_REPO.git
# Ensure we are up to date
LOCAL_REPO_GIT.fetch('upstream')
LOCAL_REPO_GIT.merge('upstream/master')
GITLAB = gitlab.Gitlab.from_config(cfg.BOT_LOGIN_CFG_NAME, [])
assert GITLAB is not None
FORK_REPO = GITLAB.projects.get(cfg.FORK_REPO_ID)
assert FORK_REPO is not None
MAIN_REPO = GITLAB.projects.get(cfg.MAIN_REPO_ID)
assert MAIN_REPO is not None
GITLAB_ADMIN = gitlab.Gitlab.from_config(cfg.ADMIN_LOGIN_CFG_NAME, [])
assert GITLAB_ADMIN is not None
ADMIN_PROJECT_VIEW = GITLAB_ADMIN.projects.get(cfg.MAIN_REPO_ID)
assert ADMIN_PROJECT_VIEW is not None
# Utility functions
def is_full(array):
for element in array:
if element is None:
return False
return True
# End Utility Functions
Patch = namedtuple('Patch', 'path msgid subject')
def create_or_update_merge_request(mr, title, author, description, patches, prologue_msg_id):
# create the local git branch
branch_name = None
if mr is None:
branch_name = 'ml-patchset-{0}'.format(time.time())
LOCAL_REPO_GIT.checkout('HEAD', b=branch_name)
else:
branch_name = mr.source_branch
LOCAL_REPO_GIT.checkout(branch_name)
LOCAL_REPO_GIT.reset('master', hard=True)
# apply the patches
try:
for patch in patches:
LOCAL_REPO_GIT.am(str(patch.path))
except:
print('Failed to apply patches, discarding patchset')
# TODO: make more robust, and send email back if it didn't apply
if mr is not None:
LOCAL_REPO_GIT.reset('origin/'+branch_name, hard=True)
LOCAL_REPO_GIT.checkout('master')
if mr is None:
LOCAL_REPO_GIT.branch(D=branch_name)
return
finally:
for patch in patches:
patch.path.unlink()
LOCAL_REPO_GIT.checkout('master')
# push work to origin
LOCAL_REPO_GIT.push('origin', branch_name, force=True)
# create merge request
if mr is None:
mr = FORK_REPO.mergerequests.create({'source_branch': branch_name,
'target_project_id': cfg.MAIN_REPO_ID,
'target_branch': 'master',
'title': title if title is not None else 'Multi-Patch Patchset from Mailing List',
'description': description})
if not cfg.BIDIRECTIONAL_COMM:
admin_mr = ADMIN_PROJECT_VIEW.mergerequests.get(mr.id)
admin_mr.discussion_locked = True
admin_mr.save()
# send email to mailing list as a place to put MR comments
if prologue_msg_id is None:
if len(patches) == 1:
db_helper.link_discussion_to_mail(db_helper.Discussion(mr.id, 0), patches[0].msgid)
return
# send meta email if prologue wasn't sent
mail_helper.send_mail('Gitlab discussion thread for recent patchset by' + author,
'Merge-Request Link: ' + mr.web_url)
else:
db_helper.link_discussion_to_mail(db_helper.Discussion(mr.id, 0), prologue_msg_id)
elif prologue_msg_id and len(patches) != 1:
extra_discussion = mr.discussions.create({'body': 'Discussion on updated commits'})
db_helper.link_discussion_to_mail(db_helper.Discussion(mr.id, extra_discussion.id), prologue_msg_id)
# create a discussion for each patch
for patch in patches:
patch_discussion = mr.discussions.create({'body': 'Discussion for {0}'.format(patch.subject)})
# link mail thread to discussion
db_helper.link_discussion_to_mail(db_helper.Discussion(mr.id, patch_discussion.id), patch.msgid)
Mail = namedtuple('Mail', 'msg_id reply_to sender body')
def format_email_body(raw_body):
# for now, just put the entire email in a code block to prevent markdown formatting on patches
# TODO: detect patches and put them in codeblocks, with the right language set
return '```\n' + raw_body + '\n```'
# if it's not a patch, see if it's a comment on a MR thread
def process_standard_mail(mail):
root_msg = db_helper.get_root_msg_id(mail.reply_to)
discussion_entry = db_helper.lookup_discussion(root_msg)
if discussion_entry is None:
print(mail.reply_to, root_msg)
return
db_helper.add_child(root_msg, mail.msg_id)
# TODO: Handle fancy emails
comment_body = 'Mail from {0} on mailing list:\n\n{1}'.format(mail.sender, format_email_body(mail.body))
print(comment_body)
mr = MAIN_REPO.mergerequests.get(discussion_entry.mr_id)
# get the discussion id, if present
discussion = mr.discussions.get(discussion_entry.disc_id) if discussion_entry.disc_id != 0 else None
if (not cfg.BIDIRECTIONAL_COMM and mr.author['id'] == cfg.BOT_GITLAB_ID):
return
admin_mr = ADMIN_PROJECT_VIEW.mergerequests.get(mr.id)
relock = False
if admin_mr.discussion_locked:
admin_mr.discussion_locked = False
admin_mr.save()
relock = True
if discussion is None:
mr.notes.create({'body': comment_body})
else:
discussion.notes.create({'body': comment_body})
if relock:
admin_mr.discussion_locked = True
admin_mr.save()
def find_root_mr(author_email, title):
for mr in MAIN_REPO.mergerequests.list(all=True):
if mr.commits().next().author_email == author_email and mr.title == title:
return mr
return None
def main():
out_of_patches = False
processed_patch_files = []
while not out_of_patches:
patches = None
prologue_msg_id = None
# Set to subject of either PATCH[0/n], or PATCH[1/1]
patchset_title = None
# If PATCH 0 exists, set the patchset description to the content of the email.
patchset_description = ''
current_author = None
mr = None
out_of_patches = True
for file_path in cfg.PATCHES_PATH.iterdir():
# discard if we've reached timeout
create_time = datetime.datetime.fromtimestamp(os.path.getctime(file_path))
if create_time < cfg.PATCH_PROCESS_TIMEOUT:
file_path.unlink()
continue
if file_path.name in processed_patch_files:
continue
out_of_patches = False
with file_path.open() as file:
mail_contents = file.read()
author = None
email = None
subject = None
msg_id = None
reply_to = None
try:
author = re.search(r'(?m)^From: (.*)$', mail_contents).group(1)
subject = re.search(r'(?m)^Subject: (.*)$', mail_contents).group(1)
msg_id = re.search(r'(?m)^Message-Id: (.*)$', mail_contents).group(1)
except AttributeError:
print('Invalid Message')
file_path.unlink()
continue
search = re.search(r'(?m)^In-Reply-To: (.*)$', mail_contents)
reply_to = search.group(1) if search is not None else None
patch_prefix = re.search(r'^\[PATCH(?: v(?P<version>\d+))?(?: (?P<patch_idx>\d+)/(?P<patch_total>\d+))?\]', subject)
author_search = re.search(r'^\"?(?P<name>[^\"]*)\"? <(?P<email>[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)>$', author)
email = author_search.group('email') if author_search is not None else None
if email is None or email == cfg.BOT_MAIL_ADDRESS:
file_path.unlink()
continue
if patch_prefix is None:
process_standard_mail(Mail(msg_id, reply_to, author, mail_contents[mail_contents.find('\n\n'):]))
file_path.unlink()
continue
if 'resend' in patch_prefix.group(0):
file_path.unlink()
continue
if current_author is not None and author != current_author:
continue
version = patch_prefix.group('version')
patch_idx = patch_prefix.group('patch_idx')
patch_total = patch_prefix.group('patch_total')
if patch_total is None:
patch_total = 1
patch_idx = 1
patchset_title = subject[patch_prefix.end() + 1:]
patch_idx = int(patch_idx)
patch_total = int(patch_total)
if version is not None and version != 1 and patch_idx == 1:
# Right now we only use patch 1 data to find the MR
mr = find_root_mr(email, subject[patch_prefix.end() + 1:])
if mr is None:
print('unable to find MR for versioned patch')
file_path.unlink()
continue
if patch_total < patch_idx:
file_path.unlink()
continue
if patches is None:
patches = [None] * patch_total
elif len(patches) != patch_total:
continue
current_author = author
processed_patch_files.append(file_path.name)
if patch_idx == 0:
patchset_title = subject[patch_prefix.end() + 1:]
prologue_msg_id = msg_id
continue
patches[patch_idx - 1] = Patch(file_path, msg_id, subject)
if is_full(patches):
create_or_update_merge_request(mr, patchset_title, current_author, patchset_description, patches, prologue_msg_id)
break
main()
|
# __author__ = 'cjweffort'
# -*- coding: utf-8 -*-
from numpy import *
"""
4 Fancy indexing and index tricks
"""
"""
4.1 Indexing with Arrays of Indices
"""
#对1维数组进行index选取
a = arange(12) ** 2
i = array([1, 1, 3, 8, 5])
print a[i]
j = array([[3, 4], [9, 7]])
print a[j]
#对2维数组进行index(一个维度)选取
palette = array([[0, 0, 0],
[255, 0, 0],
[0, 255, 0],
[0, 0, 255],
[255, 255, 255]])
image = array([[0, 1, 2, 0], [0, 3, 4, 0]])
print palette[image]
#对2维数组进行index(两个维度)选取
a = arange(12).reshape(3, 4)
i = array([[0, 1], [1, 2]])
j = array([[2, 1], [3, 3]])
print a[i, j]#等价于l = a[i,j]; print a[l]
print a[:, j]
#search of the maximum value of time-dependent series
time = linspace(20, 145, 5)
data = sin(arange(20).reshape(5, 4))
ind = data.argmax(axis = 0)
time_max = time[ind]
data_max = data[ind, xrange(data.shape[1])]
all(data_max == data.max(axis = 0))
a = arange(5)
a[[1, 3, 4]] = 0 #按照index对其进行重新赋值
a[[0, 0, 2]] = [1, 2, 3] #index出现重复的值,会对该索引到的值进行多次赋值
a[[0, 0, 2]] += 1 #此时index出现重复的值,不一定会对索引到的值进行多次操作
"""
4.2 Indexing with Boolean Arrays
"""
#First Way
a = arange(12).reshape(3,4)
b = a > 4
print b
print a[b] #1d array with the selected elems
a[b] = 0
print a
#Second Way
a = arange(12).reshape(3,4)
b1 = array([False,True,True])
b2 = array([True, False, True, False])
print a[b1, :]
print a[: b2]
print a[b1, b2]
"""
4.3 The ix_() Function
"""
a = array([2, 3, 4, 5])
b = array([8, 5, 4])
c = array([5, 4, 6, 8, 3])
ax, bx, cx = ix_(a, b, c)
print ax.shape, bx.shape, cx.shape
result = ax + bx * cx
print result
print result[3, 2, 4]
print a[3] + b[2] * c[4]
def ufunc_reduce(ufct, *vectors):
vs = ix_(*vectors)
r = ufct.identity
for v in vs:
r = ufct(r, v)
return r
print ufunc_reduce(add, a, b, c)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# Created By : Krikor Herlopian
# Created Date: Wed May 12 2021
# Email Address: kherl1@unh.newhaven.edu
# =============================================================================
sentence = input("Type your sentence \n")
sentence = list(sentence)
#keep counter for letters, and digits.
letters, digits = 0, 0
#loop over every character in sentence. let us check what each one is.
for c in sentence:
#its a letter update letter by 1, its digit update digit by 1.
if c.isalpha():
letters = letters + 1
if c.isdigit():
digits = digits + 1
else:
pass # its not a letter or digit , just ignore
print("Letters:", letters)
print("Digits:", digits) |
import urllib2 as ur
import re
link = "http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing="
data = "44827"
while True:
try:
data = "".join(re.findall('\d+',ur.urlopen(link+data).read()))
except:
print data
|
# -*- coding:UTF-8 -*-
from rest_framework.viewsets import GenericViewSet
from rest_framework import mixins
class CreateOnlyViewSet(mixins.CreateModelMixin, GenericViewSet):
"""
A viewset that provides default `create()` actions.
"""
pass
class CreateListDeleteViewSet(mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.DestroyModelMixin,
GenericViewSet):
pass
class ListOnlyViewSet(mixins.ListModelMixin,
GenericViewSet):
pass
class CreateListViewSet(mixins.CreateModelMixin,
mixins.ListModelMixin,
GenericViewSet):
pass
class RetrieveUpdateViewSets(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
GenericViewSet):
pass
class RetrieveOnlyViewSets(mixins.RetrieveModelMixin,
GenericViewSet):
pass
class ListRetrieveDeleteViewSets(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin):
pass
class ListDeleteViewSet(mixins.ListModelMixin,
mixins.DestroyModelMixin,
GenericViewSet):
pass
class ListDetailDeleteViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
GenericViewSet):
pass
class ListRetrieveCreateViewSets(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
GenericViewSet):
pass
|
import os
import os.path
import re
import subprocess
import datetime
import sys
import glob
import math
import logging
import numpy as np
import pyproj
logger=logging.getLogger("pyfarms.util")
def gitinfo():
try:
git_ret=subprocess.Popen(['git','log','--pretty=%H','HEAD^..HEAD'],
stdout=subprocess.PIPE)
git_hash = git_ret.communicate()[0]
except Exception as e:
print(e)
return None
if git_hash:
git_hash=git_hash.strip().decode()
try:
url_ret=subprocess.Popen(['git','remote','show','origin'],
stdout=subprocess.PIPE)
remote=url_ret.communicate()[0].decode()
except Exception as e:
return None
match=re.search('URL:\s*(\S+)\n',remote)
if match:
git_url=match.group(1)
scmversion='{0}:{1}'.format(git_url, git_hash)
else:
scmversion=git_hash
return scmversion
else:
return None
def makefile():
return open("Makefile").read()
def when():
return datetime.datetime.now().isoformat()
def effectively_readable(path):
import os, stat
uid = os.getuid()
euid = os.geteuid()
gid = os.getgid()
egid = os.getegid()
# This is probably true most of the time, so just let os.access()
# handle it. Avoids potential bugs in the rest of this function.
if uid == euid and gid == egid:
return os.access(path, os.R_OK)
st = os.stat(path)
# This may be wrong depending on the semantics of your OS.
# i.e. if the file is -------r--, does the owner have access or not?
if st.st_uid == euid:
return st.st_mode & stat.S_IRUSR != 0
# See comment for UID check above.
groups = os.getgroups()
if st.st_gid == egid or st.st_gid in groups:
return st.st_mode & stat.S_IRGRP != 0
return st.st_mode & stat.S_IROTH != 0
def check_filename(filename, argument):
"""
Given a command line argument that should be a readable file,
ensure it is a readable file and give informative messages if not.
If it's not a file but a directory, tell me. If the path is wrong,
tell me which part of th epath is wrong. If the file exists but
I don't have permission to read it, tell me that. Gosh!
"""
if filename is None or filename=="":
raise RuntimeError("No filename given for {0}".format(argument))
# Expand the ~ into the home directory in case shell expansion failed.
filename=re.sub("^\~", os.environ["HOME"], filename)
candidates=glob.glob(filename)
if len(candidates) is 1:
filename=candidates[0]
elif len(candidates) > 1:
raise RuntimeError("More than one file matches {0}".format(candidates))
else: #len(candidates) is 0:
basepath=filename
prevpath=basepath
while basepath is not "" and not os.path.exists(basepath):
prevpath=basepath
basepath=os.path.dirname(basepath)
if prevpath!=filename:
raise RuntimeError(("The path to {0} doesn't exist so {1} "+
"cannot be read.").format(basepath, filename))
else:
raise RuntimeError(("The file {0} doesn't exist in that "+
"directory").format(filename))
if not os.path.isfile(filename):
raise RuntimeError("The path {0} isn't a file.".format(filename))
if not effectively_readable(filename):
raise RuntimeError(("The file {0} exists, "+
"but this process cannot read it").format(filename))
return filename
_degrees_to_radians=np.pi/180
_radians_km=180*60*1.852/np.pi
def distancekm(latlon1, latlon2):
"""Distance computed on a spherical earth.
Taken from http://williams.best.vwh.net/avform.htm."""
ll1=latlon1*_degrees_to_radians
ll2=latlon2*_degrees_to_radians
return _radians_km*(2*np.arcsin(np.sqrt(np.power(np.sin((ll1[0]-ll2[0])/2),2)+
np.cos(ll1[0])*np.cos(ll2[0])*np.power(np.sin((ll1[1]-ll2[1])/2), 2))))
def GIS_default_projection(latlon):
"""
Given an array of latitude and longitude, return the same projection
NAADSM uses.
"""
minlat=np.min(latlon[:,0])
maxlat=np.max(latlon[:,0])
avglat=np.mean(latlon[:,0])
minlon=np.min(latlon[:,1])
maxlon=np.max(latlon[:,1])
avglon=np.mean(latlon[:,1])
projstr="+ellps=WGS84 +units=km +lon_0={0} +proj=aea +lat_0={1} +lat_1={2} +lat_2={3}".format(
avglon, minlat, minlat+(maxlat-minlat)/6, maxlat-(maxlat-minlat)/6)
logger.debug("Projection string {0}".format(projstr))
pp=pyproj.Proj(projstr)
projected=np.zeros(latlon.shape, dtype=np.double)
for idx in range(latlon.shape[0]):
projected[idx,:]=np.array(pp(latlon[idx,1], latlon[idx,0]))/1000
logger.debug("latlon {0}".format(latlon))
logger.debug("Projected GIS {0}".format(projected))
return projected
def GIS_distance(latlon1, latlon2):
"""
This function, named GIS_distance in NAADSM 3.2, is exactly the same.
Not kidding.
"""
x=latlon1[0]-latlon2[0]
y=latlon1[1]-latlon2[1]
return math.sqrt(x*x + y*y)
class ChunkIter(object):
"""
Someone asks for 2100 iterations but wants them in chunks
of 250, so this parcels that out, from (0, 250), (1, 250),
to (8, 100).
"""
def __init__(self, chunk, total):
self.total=total
self.chunk=chunk
def __iter__(self):
self.idx=0
return self
def __next__(self):
begin=self.idx*self.chunk
end=(self.idx+1)*self.chunk
if begin<self.total:
end=min(self.total, end)
self.idx+=1
return (self.idx, end-begin)
else:
raise StopIteration()
|
# Generated by Django 2.2.1 on 2019-07-27 15:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('faculty', '0023_auto_20190727_2117'),
]
operations = [
migrations.AddField(
model_name='iabatchroommapping',
name='suervisor',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='ia',
name='ia_end_time',
field=models.TimeField(),
),
migrations.AlterField(
model_name='ia',
name='ia_start_time',
field=models.TimeField(),
),
]
|
import random
number = random.randint(1,10)
tries=1
uname=input("Hello, What is ur user name?")
print("Hello",uname+".",)
question=input("Would you like to play a game(Y/N)?")
if question == 'N' or question == 'n':
print("Ok.. Bye")
if question == 'Y' or question == 'y':
guess=int(input("Can You guess the number I am thinking?"))
while guess!=number:
tries+=1
if guess>number:
print("Guess Lower...")
if guess<number:
print("Guess Higher")
guess=int(input("Thats wrong! Try Again?"))
print("well done you guessed it right in",tries,"tries")
|
import django
import sys, os
import pandas as pd
#import matplotlib.pyplot as plt
sys.path.append('/home/galm/software/django/tmv/BasicBrowser/')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "BasicBrowser.settings")
django.setup()
from scoping.models import *
q = Query.objects.get(pk=3771)
docs = q.doc_set.all()
# Necessary regular expressions
r1 = "(\w*climat\w* chang\w*)|(\wclimate\w* warming\*)(\w*global temperature\w*)|(\w*global warming\w*)|(\w*greenhouse gas\w*)|(\w*greenhouse effect\w*)|(\w*greenhouse warming\w*)"
r2 = "(\w*climat\w*)"
r3 = "(\w*acclimat\w*)"
r4 = "(\bclimat\w*)"
# Title search
step2_1 = set(docs.filter(title__iregex=r1).values_list('pk',flat=True))
step2_2 = set(docs.filter(title__iregex=r2).values_list('pk',flat=True))
step2_3 = set(docs.filter(pk__in=step2_2,title__iregex=r3).exclude(title__iregex=r4).values_list('pk',flat=True))
step2 = step2_1 | (step2_2 - step2_3)
# Abstracts
step3 = set(docs.filter(content__iregex=r1).values_list('pk',flat=True))
# Keywords
step4_1 = set(docs.filter(wosarticle__de__iregex=r1).values_list('pk',flat=True))
step4_2 = set(docs.filter(wosarticle__de__iregex=r2).values_list('pk',flat=True))
step4_3 = set(docs.filter(
pk__in=step4_2,
wosarticle__de__iregex=r3
).exclude(
wosarticle__de__iregex=r4
).values_list('pk',flat=True))
step4 = step4_1 | (step4_2 - step4_3)
# Keywords plus
step4_1_2 = set(docs.filter(wosarticle__kwp__iregex=r1).values_list('pk',flat=True))
step4_2_2 = set(docs.filter(wosarticle__kwp__iregex=r2).values_list('pk',flat=True))
step4_3_2 = set(docs.filter(
pk__in=step4_2,
wosarticle__kwp__iregex=r3
).exclude(
wosarticle__kwp__iregex=r4
).values_list('pk',flat=True))
step4__2 = step4_1_2 | (step4_2_2 - step4_3_2)
step5 = step2 | step3 | step4 | step4__2
s5docs = Doc.objects.filter(pk__in=step5)
print(s5docs.filter(
PY__in=list(range(1980,2015)),
wosarticle__dt__in=["Article","Review"]
).count())
new_q, created = Query.objects.get_or_create(
title="Haunschild - updated",
text="Manually filter Haunschild query",
project=q.project,
creator=User.objects.get(username="galm")
)
for d in s5docs:
d.query.add(new_q)
|
from __future__ import print_function
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
import os
SCOPES = 'https://mail.google.com/'
DIR_ = "app/controllers/email/"
def auth(token_):
""" Authenticates user from a token.json file enabling
get request from the Google APIs """
print('\n88992031:', token_, '\n')
store = file.Storage('/Users/charlkruger/Desktop/AtSchool/server/app/controllers/email/tokens/'+token_+'.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets(DIR_+'client_secret.json', SCOPES)
creds = tools.run_flow(flow, store)
return build('gmail', 'v1', http=creds.authorize(Http()))
if __name__ == '__main__':
print(type(auth('token'))) |
from ftplib import FTP
import pygame,os, datetime, time, fileinput
import pygame.camera
from pygame.locals import *
print 'Starting DAQ'
#---------------------------Loop for Repeat--------------------------------
while True:
print 'Grabing Data'
#-----------------------Set Desktop Path-------------------------------
desktop = os.path.join(os.path.join(os.path.expanduser('~')),'Desktop')
os.chdir(desktop)
#-----------------------Get Time Date----------------------------------
_dateTime = str(datetime.datetime.now())
#-----------------------Get Camera Image-------------------------------
pygame.init()
pygame.camera.init()
camlist = pygame.camera.list_cameras()
if camlist:
cam = pygame.camera.Camera(camlist[0],(640,480))
cam.start()
img = cam.get_image()
pygame.image.save(img,"picFile.jpg")
cam.stop()
#-----------------------Build HTML File--------------------------------
file = open("index.html","w")
file.write('<!DOCTYPE html>')
file.write('<html>')
file.write('<head>')
file.write('<style>')
file.write('img {')
file.write('display: block;')
file.write('margin-left: auto;')
file.write('margin-right: auto;')
file.write('}')
file.write('h2 {')
file.write('width: 100%;')
file.write('text-align:center;')
file.write('}')
file.write('</style>')
file.write('</head>')
file.write('<body>')
file.write('<img src="picFile.jpg" alt="cam" style="width:50%;">')
file.write('<h2>')
file.write(_dateTime)
file.write('</h2>')
file.write('</body>')
file.write('</html>')
file.close()
#-----------------------UpLoad Files To FTP----------------------------
ftp = FTP("")
ftp.login("","")
ftp.cwd('/public_html/')
fp = open('index.html','rb')
ftp.storbinary('STOR %s' % os.path.basename('index.html'),fp,1024)
fp = open('picFile.jpg','rb')
ftp.storbinary('STOR %s' % os.path.basename('picFile.jpg'),fp,1024)
ftp.quit()
#-----------------------Sleep Wait-------------------------------------
print 'New Data Ready'
count = 900
for i in range(900):
time.sleep(1)
count = count - 1
msg = str(count) + ' Seconds until next DAQ'
print msg
|
from flask_wtf import FlaskForm
from wtforms import TextAreaField
from wtforms.validators import DataRequired, ValidationError
from app.models import Comment
from datetime import datetime
def date_exists(form, field):
date = Appointment.query.filter(Appointment.date == date).first()
if date:
raise ValidationError("Date is already registered.")
class CommentForm(FlaskForm):
comment = TextAreaField('comment', validators=[DataRequired()])
|
from tkinter import *
def doNothing():
print("Fine then...")
root = Tk()
# **** MAIN MENU ****
# Add a menu
menu = Menu(root)
root.config(menu=menu)
# Create sub menus and append them to the main menu.
subMenu = Menu(menu)
sub2Menu = Menu(menu)
menu.add_cascade(label="Example", menu=subMenu)
menu.add_cascade(label="2", menu=sub2Menu)
# Add to the cascading submenu
subMenu.add_command(label="Do Nothing", command=doNothing)
subMenu.add_command(label="Again", command=doNothing)
subMenu.add_command(label="Last Time", command=doNothing)
subMenu.add_separator()
subMenu.add_command(label="Quit", command=root.quit)
# **** TOOLBAR ****
toolbar = Frame(root, bg="blue")
insertButt = Button(toolbar, text="Insert", command=doNothing)
insertButt.pack(side=LEFT, padx=2, pady=2)
inertButt = Button(toolbar, text="Print", command=doNothing)
inertButt.pack(side=LEFT, padx=2, pady=2)
toolbar.pack(side=TOP, fill=X)
root.mainloop()
|
import pytorch_lightning as pl
import numpy as np
import torch
import torch.nn as nn
import torch.functional as F
import util
from argparse import ArgumentParser
#from dataset import get_dataloader
from models import AutoEncoder
class AutoencoderModel(pl.LightningModule):
def __init__(self, hparams):
super().__init__()
self.val_dict = {}
self.train_losses=[]
self.hparams = hparams
self.hparams["tpu_cores"] = 0
self.loss = self.get_loss_fn()
# you can get fancier here of course, we will likely have a separate
# class for the model
self.model = AutoEncoder(
self.hparams["latent_dim"]
)
def forward(self, inputs):
output = self.model(inputs)
return dict(latent=output[0], predicted=output[1])
def training_step(self, batch, batch_idx):
x = batch
sign = torch.sign(x)
_, preds = self.model(x)
if self.hparams['sigmoid']:
preds = torch.sigmoid(preds)
preds = preds * sign
loss = self.loss(preds, x)
self.train_losses.append(loss.detach().cpu().item())
self.log(
"train_loss",
loss,
on_epoch=True,
on_step=True,
logger=True,
prog_bar=True,
)
return loss
def training_epoch_end(self, training_result):
self.log(
"epoch_train_loss",
sum(self.train_losses)/len(self.train_losses),
on_epoch=True,
logger=True,
)
self.train_losses = []
def validation_step(self, batch, batch_idx):
x = batch
sign = torch.sign(x)
_, preds = self.model(x)
if self.hparams['sigmoid']:
preds = torch.sigmoid(preds)
loss = self.loss(preds * sign, x)
self.log("val_loss", loss, on_epoch=True, on_step=False, )
for n in [1, 5, 10, 20]:
x_mask = x.clone().detach()
for i in range(x_mask.shape[0]):
num_revs = x_mask[i, :].bool().sum()
if n > num_revs:
x_mask[i, :] = 0
else:
x_mask[i, :][torch.where(x_mask[i, :] > 0)[-n:]] = 0
_, preds = self.model(x_mask)
if self.hparams['sigmoid']:
preds = torch.sigmoid(preds)
loss = self.loss(preds * sign, x)
self.log(f"val_last_{n}_loss", loss, on_epoch=True, on_step=False, )
self.val_dict.setdefault(f"val_last_{n}_loss", []).append(loss.detach().cpu().item())
def validation_epoch_end(self, validation_result):
for k, v in self.val_dict.items():
self.log(f"epoch_{k}", sum(v) / len(v), on_epoch=True, logger=True)
self.val_dict = {}
def get_loss_fn(self):
if self.hparams['reduction'] == "sum":
loss = nn.MSELoss(reduction='sum')
else:
loss = nn.MSELoss()
final_loss = loss
return final_loss
def configure_optimizers(self):
if self.hparams["optimizer"] == "Adam":
optim = torch.optim.Adam(self.model.parameters(), lr=self.hparams["lr"])
else:
optim = torch.optim.SGD(
self.model.parameters(),
lr=self.hparams["lr"],
momentum=self.hparams["momentum"],
)
# test this
return util.set_schedule(self, optim)
#def __dataloader(self, split):
# return get_dataloader(split, self.hparams)
#def val_dataloader(self):
# return self.__dataloader("valid")
#def train_dataloader(self):
# return self.__dataloader("train")
#def test_dataloader(self):
# return self.__dataloader("test")
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--latent_dim", type=int, default=256)
parser.add_argument("--scheduler", type=str, default="none")
parser.add_argument("--reduction", type=str, default="mean")
parser.add_argument("--normalize", type=util.str2bool, default=False)
parser.add_argument("--sigmoid", type=util.str2bool, default=False)
return parser
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
import os
numSubsets = 10
def compute_d_N(df):
lastRow = df.tail(1)['i']
lastRow = str(lastRow).split(" ")
totalNumWords = int(lastRow[4])
#print(lastRow)
#print(totalNumWords)
df = df[:-1] # Delete last row (contains total number of words)
df = df.drop(df.columns[2], axis=1)
totalDiffWords = len(df)
return totalDiffWords, totalNumWords
def heaps_law(N, k, Beta):
# d = k * N ** Beta
return (k * (N ** Beta))
def main():
d = []
N = []
for i in range(numSubsets):
num_subset = (str(i + 1).zfill(2))
counterCsv = "../files/out_ss_{}.csv".format(num_subset)
ds, Ns = compute_d_N(pd.read_csv(counterCsv))
d.append(ds)
N.append(Ns)
##### CURVE FITTING #####
xdata = N
ydata = d
print(xdata)
print(ydata)
popt, pcov = curve_fit(heaps_law, xdata, ydata)
xdata = np.log(xdata)
fit = np.log(heaps_law(xdata, *popt))
print("OPT: {}".format(popt))
##### LOG. PLOT #######
plt.plot(xdata, fit, 'r-', label='')
plt.legend(loc='lower right')
plt.ylabel("Num. of diff words (d)")
plt.xlabel("Total num. of words (N)")
plt.title("Heaps law log-log plot")
plt.show()
##### NON-LOG. PLOT #######
fit2 = (heaps_law(xdata, *popt))
plt.plot(N, fit2, 'r-', label='')
plt.legend(loc='lower right')
plt.ylabel("Num. of diff words (d)")
plt.xlabel("Total num. of words (N)")
plt.title("Heaps law plot")
plt.show()
if __name__ == '__main__':
main() |
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from datetime import datetime
category = ["소통", "팀워크", "도전", "능동", "성실", "정직", "인내심", "창의", "글로벌역량", "주인의식"]#10개
keyword = ["소통", "대화", "교류",
"팀워크", "협력", "협동",
"도전", "모험", "용기",
"능동", "열정", "적극",
"성실", "근면", "충실",
"정직", "진실", "솔직",
"인내심", "끈기", "근성",
"창의", "창조", "독창",
"문화", "국제", "외국",
"책임", "의무", "자기주도"]#30개
#오늘, 일년전 날짜 계산
today = datetime.today()
past = datetime(today.year-1, today.month, today.day)
today = today.strftime("%Y.%m.%d")
past = past.strftime("%Y.%m.%d")
baseUrl = "https://section.blog.naver.com/Search/Post.nhn"
baseUrl = baseUrl + "?startDate="+past+"&endDate="+today + "&rangeType=PERIOD"
driver = webdriver.Chrome("C:/Users/abcd_/workspace/chromedriver.exe")
driver.get(baseUrl)
for i in range(len(keyword)):
#파일 경로지정&오픈
filename = category[i//3]
savePath = "C:/blog/" + filename + ".txt"
saveFile = open(savePath, 'a', encoding='utf-8')
# 검색창 clear
driver.find_element_by_name("sectionBlogQuery").clear()
# 검색창 입력
elem = driver.find_element_by_name("sectionBlogQuery")
elem.send_keys(keyword[i])
# 검색클릭
elem = driver.find_element_by_xpath('//*[@id="header"]/div[1]/div/div[2]/form/fieldset/a[1]')
elem.click()
###############300페이지 블로그 끌어오기
for page in range(1, 301):
blogs = driver.find_elements_by_class_name("list_search_post")
for blog in blogs:
try:
#제목
title = blog.find_element_by_class_name("title").text
saveFile.write(title)
print(title)
#본문
body = blog.find_element_by_class_name("text").text
saveFile.write(body)
except NoSuchElementException:
print("Error")
continue
driver.get(baseUrl + "&pageNo=" + str(page+1)+"&keyword="+keyword[i])
###############
saveFile.close()
|
class Solution(object):
def twoSum(self, nums, target):
num_dict = {}
for i, x in enumerate(nums):
if target - x in num_dict:
return (num_dict[target - x], i)
num_dict[x] = i
if __name__ == '__main__':
print "index1=%d, index2=%d" % Solution().twoSum((3, 2, 4), 6)
|
from django.db import models
from rest_framework import serializers
# Create your models here.
class OnlineUser(models.Model):
#owner = models.ForeignKey(KxUser, to_field='email', db_column='ower_email')
mac = models.CharField(max_length=100, primary_key=True)
email = models.CharField(max_length=50)
#nick = models.CharField(max_length=20, db_column='nick')
lan_ip = models.CharField(max_length=50)
wlan_ip = models.CharField(max_length=50)
class Meta:
db_table = 'kx_userlogin'
def __unicode__(self):
return self.mac
class OnlineUserSerializer(serializers.ModelSerializer):
class Meta:
model = OnlineUser
fields = ('mac', 'lan_ip', 'wlan_ip', 'email')
|
#Programa: act8.py
#Propósito: introducir nota edad y sexo y mostrar si esta aceptada o no
#Autor: Jose Manuel Serrano Palomo.
#Fecha: 17/10/2019
#
#Variables a usar:
# nota, edad y sexo son los datos a introducir por el usuario
#
#Algoritmo:
# LEER nota, edad y sexo
# si nota mayor o igual 5 y edad mayor o igual a 18 y sexo F imprime aceptada
# si se cumple lo anterior pero el sexo es M imprime posible
# si no se cumple imprime no aceptada
print("ver si esta aceptada o no")
print("---------------------\n")
#leemos los datos
nota = int(input("Introduce la nota: "))
edad = int(input("Introduce la edad: "))
sexo = str(input("Introduce el sexo: "))
#comprobamos los datos
if nota >= 5 and edad >= 18 and sexo == 'F':
print("ACEPTADA")
elif nota >= 5 and edad >= 18 and sexo == 'M':
print("POSIBLE")
else:
print("NO ACEPTADA")
|
from onegov.org.utils import annotate_html, remove_empty_paragraphs
from onegov.form.fields import HtmlField as HtmlFieldBase
class HtmlField(HtmlFieldBase):
"""A textfield with html with integrated sanitation and annotation,
cleaning the html and adding extra information including setting the
image size by querying the database.
"""
def pre_validate(self, form):
super(HtmlField, self).pre_validate(form)
self.data = remove_empty_paragraphs(
annotate_html(
self.data, form.request
)
)
|
from Actor import Actor
#
# A Tree object
# Spawns fruit to eat
#
class Fruit(Actor):
# Init
def __init__(self, tree):
Actor.__init__(self, "An apple", 0x147514)
self.ignoreBlocking = True
self.lifetime = 0
self.tree = tree
self.edible = True
self.hungerValue = 10
self.canPass = True
#
# Tick
#
def tick(self, world, tick):
self.lifetime += 1
# Go rotten after 50 ticks
if self.lifetime > 50:
self.kill()
#
# On kill, respawn
#
def kill(self):
self.tree.doIn(15, "replenish", self.cell)
Actor.kill(self) |
import json
import os
import praw
import requests
from list_of_subreddits import SUBREDDITS
def set_environment_variables():
with open('config.json','rb') as f:
environment_variables = json.loads(f.read())
for key,value in environment_variables.iteritems():
os.environ[key]=str(value)
def handle():
set_environment_variables()
CLIENT_ID = os.getenv('CLIENT_ID')
CLIENT_SECRET = os.getenv('CLIENT_SECRET')
NUMBER_OF_SUBMISSIONS = int(os.getenv('NUMBER_OF_SUBMISSIONS'))
PASSWORD = os.getenv('PASSWORD')
SLACK_WEBHOOK_URL= os.getenv('SLACK_WEBHOOK_URL')
SLACK_CHANNEL = os.getenv('SLACK_CHANNEL')
USER_AGENT = os.getenv('USER_AGENT')
USERNAME = os.getenv('USERNAME')
reddit = praw.Reddit(client_id=CLIENT_ID,client_secret=CLIENT_SECRET,username=USERNAME,password=PASSWORD,user_agent=USER_AGENT)
for subreddit_name in SUBREDDITS:
subreddit = reddit.subreddit(str(subreddit_name))
submission = subreddit.hot(limit=NUMBER_OF_SUBMISSIONS)
for sub in submission:
url = sub.url
slack_payload = {'unfurl_links': True, 'channel': SLACK_CHANNEL}
slack_payload['text'] = url
requests.post(SLACK_WEBHOOK_URL, json=slack_payload)
handle()
|
all_sqrt = []
max_num = 9
b = 1
for a, b in [[1, 2], [1,1]]:
if a == b:
print(a, ' ', b) |
# Association Rules
# Recommending books with support
# Compute support for Hunger and Potter
supportHP = np.logical_and(books['Hunger'], books['Potter']).mean()
# Compute support for Hunger and Twilight
supportHT = np.logical_and(books['Hunger'], books['Twilight']).mean()
# Compute support for Potter and Twilight
supportPT = np.logical_and(books['Potter'], books['Twilight']).mean()
# Print support values
print("Hunger Games and Harry Potter: %.2f" % supportHP)
print("Hunger Games and Twilight: %.2f" % supportHT)
# Refining support with confidence
# Compute support for Potter and Twilight
supportPT = np.logical_and(books['Potter'], books['Twilight']).mean()
# Compute support for Potter
supportP = books['Potter'].mean()
# Compute support for Twilight
supportT = books['Twilight'].mean()
# Compute confidence for both rules
confidencePT = supportPT / supportP
confidenceTP = supportPT / supportT
# Print results
print('{0:.2f}, {1:.2f}'.format(confidencePT, confidenceTP))
# Further refinement with lift
# Compute support for Potter and Twilight
supportPT = np.logical_and(books['Potter'], books['Twilight']).mean()
# Compute support for Potter
supportP = books['Potter'].mean()
# Compute support for Twilight
supportT = books['Twilight'].mean()
# Compute lift
lift = supportPT / (supportP * supportT)
# Print lift
print("Lift: %.2f" % lift)
# Computing conviction
# Compute support for Potter AND Hunger
supportPH = np.logical_and(books['Potter'], books['Hunger']).mean()
# Compute support for Potter
supportP = books['Potter'].mean()
# Compute support for NOT Hunger
supportnH = 1.0 - books['Hunger'].mean()
# Compute support for Potter and NOT Hunger
supportPnH = supportP - supportPH
# Compute and print conviction for Potter -> Hunger
conviction = supportP * supportnH / supportPnH
print("Conviction: %.2f" % conviction)
# Computing conviction with a function
def conviction(antecedent, consequent):
# Compute support for antecedent AND consequent
supportAC = np.logical_and(antecedent, consequent).mean()
# Compute support for antecedent
supportA = antecedent.mean()
# Compute support for NOT consequent
supportnC = 1.0 - consequent.mean()
# Compute support for antecedent and NOT consequent
supportAnC = supportA - supportAC
# Return conviction
return supportA * supportnC / supportAnC
# Promoting ebooks with conviction
# Compute conviction for twilight -> potter and potter -> twilight
convictionTP = conviction(twilight, potter)
convictionPT = conviction(potter, twilight)
# Compute conviction for twilight -> hunger and hunger -> twilight
convictionTH = conviction(twilight, hunger)
convictionHT = conviction(hunger, twilight)
# Compute conviction for potter -> hunger and hunger -> potter
convictionPH = conviction(potter, hunger)
convictionHP = conviction(hunger,potter)
# Print results
print('Harry Potter -> Twilight: ', convictionHT)
print('Twilight -> Potter: ', convictionTP)
# Computing association and dissociation
# Compute the support of Twilight and Harry Potter
supportT = books['Twilight'].mean()
supportP = books['Potter'].mean()
# Compute the support of both books
supportTP = np.logical_and(books['Twilight'], books['Potter']).mean()
# Complete the expressions for the numerator and denominator
numerator = supportTP - supportT*supportP
denominator = max(supportTP*(1-supportT), supportT*(supportP-supportTP))
# Compute and print Zhang's metric
zhang = numerator / denominator
print(zhang)
# Defining Zhang's metric
# Define a function to compute Zhang's metric
def zhang(antecedent, consequent):
# Compute the support of each book
supportA = antecedent.mean()
supportC = consequent.mean()
# Compute the support of both books
supportAC = np.logical_and(antecedent, consequent).mean()
# Complete the expressions for the numerator and denominator
numerator = supportAC - supportA*supportC
denominator = max(supportAC*(1-supportA), supportA*(supportC-supportAC))
# Return Zhang's metric
return numerator / denominator
# Applying Zhang's metric
# Define an empty list for Zhang's metric
zhangs_metric = []
# Loop over lists in itemsets
for itemset in itemsets:
# Extract the antecedent and consequent columns
antecedent = books[itemset[0]]
consequent = books[itemset[1]]
# Complete Zhang's metric and append it to the list
zhangs_metric.append(zhang(antecedent, consequent))
# Print results
rules['zhang'] = zhangs_metric
print(rules)
# Filtering with support and conviction
# Preview the rules DataFrame using the .head() method
print(rules.head())
# Select the subset of rules with antecedent support greater than 0.05
rules = rules[rules['antecedent support'] > 0.05]
# Select the subset of rules with a consequent support greater than 0.01
rules = rules[rules['consequent support'] > 0.01]
# Select the subset of rules with a conviction greater than 1.01
rules = rules[rules['conviction'] > 1.01]
# Print remaining rules
print(rules)
# Using multi-metric filtering to cross-promote books
# Set the lift threshold to 1.5
rules = rules[rules['lift'] > 1.5]
# Set the conviction threshold to 1.0
rules = rules[rules['conviction']>1.0]
# Set the threshold for Zhang's rule to 0.65
rules = rules[rules['zhang']>0.65]
# Print rule
print(rules[['antecedents','consequents']])
print("Harry Potter and Twilight: %.2f" % supportPT) |
#!/usr/bin/python
import sys, re
for line in sys.stdin.readlines():
line = re.sub("[0-4]", "<", line)
line = re.sub("[6-9]", ">", line)
print line
|
# Mad Libs
# 확장 버전
noun = input("Enter a noun: ")
verb = input("Enter a verb: ")
adjective = input("Enter an adjective: ")
adverb1 = input("Enter an adverb: ")
adverb2 = input("Enter another adverb: ")
print("Do you {0} your {1} {2} {3}? That's {4}!".format(verb, adjective, noun, adverb1, adverb2))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.