text
stringlengths 8
6.05M
|
|---|
# --------------------------------------------------------------------------- #
# Network in Network, ICML2014, https://arxiv.org/abs/1312.4400
# pytorch implementation by Haiyang Liu (haiyangliu1997@gmail.com)
# --------------------------------------------------------------------------- #
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['NIN']
class NIN(nn.Module):
def __init__(self):
super(NIN,self).__init__()
self.net_layers=nn.Sequential(
nn.Conv2d(3,96,11,4,0),
nn.ReLU(),
nn.Conv2d(96,96,1,1,0),
nn.ReLU(),
nn.Conv2d(96,96,1,1,0),
nn.ReLU(),
nn.Conv2d(96,256,5,1,2),
nn.ReLU(),
nn.Conv2d(256,256,1,1,0),
nn.ReLU(),
nn.Conv2d(256,256,1,1,0),
nn.ReLU(),
nn.MaxPool2d(2,2),
nn.Conv2d(256,384,3,1,1),
nn.ReLU(),
nn.Conv2d(384,384,1,1,0),
nn.ReLU(),
nn.Conv2d(384,384,1,1,0),
nn.ReLU(),
nn.MaxPool2d(2,2),
nn.Conv2d(384,1024,3,1,1),
nn.ReLU(),
nn.Conv2d(1024,1024,1,1,0),
nn.ReLU(),
nn.Conv2d(1024,1024,1,1,0),
nn.ReLU(),
nn.MaxPool2d(2,2),
nn.AdaptiveAvgPool2d((1,1)),
nn.Softmax(dim=1),
)
self.initialization()
def initialization(self):
_flag = 1
for layer in self.net_layers:
if isinstance(layer,nn.Conv2d) or isinstance(layer,nn.Linear):
nn.init.normal_(layer.weight, mean=0.0, std=0.01)
print('init layer:',_flag,' weight success')
if _flag == 1 or _flag == 3:
nn.init.constant_(layer.bias, 0.0)
print('init layer:',_flag,'bias to 0')
else:
nn.init.constant_(layer.bias, 1.0)
print('init layer:',_flag,'bias to 1')
_flag += 1
def forward(self,input_):
x = self.net_layers(input_)
return x
def _test():
from torchsummary import summary
model = NIN()
model = model.cuda()
summary(model,input_size=(3,224,224))
if __name__ == "__main__":
_test()
# ------------------------------- model summary ----------------------------- #
# Conv2d-21 [-1, 1024, 13, 13] 3,539,968
# ReLU-22 [-1, 1024, 13, 13] 0
# Conv2d-23 [-1, 1024, 13, 13] 1,049,600
# ReLU-24 [-1, 1024, 13, 13] 0
# Conv2d-25 [-1, 1024, 13, 13] 1,049,600
# ReLU-26 [-1, 1024, 13, 13] 0
# MaxPool2d-27 [-1, 1024, 6, 6] 0
# AdaptiveAvgPool2d-28 [-1, 1024, 1, 1] 0
# Softmax-29 [-1, 1024, 1, 1] 0
# ================================================================
# Total params: 7,619,776
# Trainable params: 7,619,776
# Non-trainable params: 0
# ----------------------------------------------------------------
# Input size (MB): 0.57
# Forward/backward pass size (MB): 69.94
# Params size (MB): 29.07
# Estimated Total Size (MB): 99.58
# ---------------------------------- end ------------------------------------ #
|
import math
n,m=[int(x) for x in raw_input().split(" ")]
listA=[int(x) for x in raw_input().split(" ")]
listM=[]
for i in range(m):
listM.append([int(x) for x in raw_input().split(" ")])
sum=0
Phi=(1+math.sqrt(5))/2.0
phi=(1-math.sqrt(5))/2.0
diff=Phi-phi
def NthFib(x):
return int((math.pow(Phi,x) - math.pow(phi,x))/diff)
for i in range(m):
if listM[i][0]==1:
for j in range(listM[i][1],listM[i][2]+1):
listA[j-1]+=NthFib(j-listM[i][1]+1)
else:
for j in range(listM[i][1],listM[i][2]+1):
sum += listA[j-1]
print sum
sum=0
|
import csv
import re
from Levenshtein import distance
from .itm2utm import itm2geo
L_FACTOR = 3
F_CENTRES = ('datasets/Centres_of_Population_-_OSi_'
'National_Placenames_Gazetteer.csv')
F_TOWNLANDS = ('datasets/Townlands_-_OSi_'
'National_Placenames_Gazetteer.csv')
F_COUNTIES = ('datasets/Counties_-_OSi_National_Placenames_Gazetteer.csv')
def read_centres():
with open(F_CENTRES) as file:
reader = csv.DictReader(file)
yield from reader
def _read_townlands():
with open(F_TOWNLANDS) as file:
reader = csv.DictReader(file)
yield from reader
def read_counties():
with open(F_COUNTIES) as file:
reader = csv.DictReader(file)
yield from reader
def read_townlands():
mult = ' or '
output = []
for item in _read_townlands():
if mult in item['English_Name']:
words = item['English_Name'].split(mult)
new = dict(item)
item['English_Name'] = words[0].strip()
output += [item]
new['English_Name'] = words[1].strip()
output += [new]
else:
output += [item]
return output
def cleanup(term):
term = term.upper()
term = term.strip()
terms = r"[^a-zA-Z0-9, ]|^CO\.? | PO$"
term = re.sub(terms, ' ', term, flags=re.I)
return term.strip()
ds_centres = read_centres()
ds_townlands = read_townlands()
ds_counties = read_counties()
comparers = [
(ds_centres, 'centre'),
(ds_townlands, 'town'),
(ds_counties, 'countie'),
]
def assemble_comparison(english_name, county, item):
compare_result = {
'query_english_name': english_name,
'query_county': county,
'item_english_name': item['English_Name'].upper(),
'item_county': item['County'].upper(),
'fullitem': item
}
compare_result['cdist'] = distance(
compare_result['item_county'], compare_result['query_county']
)
compare_result['edist'] = distance(
compare_result['item_english_name'],
compare_result['query_english_name']
)
compare_result['equals'] = \
compare_result['cdist'] < L_FACTOR and \
compare_result['edist'] < L_FACTOR
compare_result['exact'] = \
not compare_result['cdist'] and not compare_result['edist']
compare_result['distance'] = \
compare_result['cdist'] + compare_result['edist']
return compare_result
def base_filter(english_name, county, dataset):
compare_result = map(
lambda item: assemble_comparison(
english_name,
county,
item
),
dataset
)
filter_result = filter(
lambda item: item['equals'],
compare_result
)
sorted_result = sorted(
filter_result,
key=lambda item: item['distance']
)
return list(sorted_result)
def extract_prefered_addresses(filtereds):
exact = list(
filter(
lambda item: item['exact'],
filtereds
)
)
if exact:
return exact
return filtereds
def serialize(address_list, level):
for item in address_list:
item['geo'] = itm2geo(
float(item['fullitem']['ITM_E']),
float(item['fullitem']['ITM_N'])
)
item['level'] = level
return address_list
def geocode(query):
query = query.split(',')
query = [cleanup(item) for item in query]
# search for a full address.
for i in reversed(range(len(query))):
for dataset, level in comparers:
base_filtereds = base_filter(query[i], query[-1], dataset)
filtereds = extract_prefered_addresses(base_filtereds)
if filtereds:
return serialize(filtereds, level)
return []
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.objects.qos import policy
_QOS_POLICY_CLS = policy.QosPolicy
_VALID_CLS = (
_QOS_POLICY_CLS,
)
_VALID_TYPES = [cls.obj_name() for cls in _VALID_CLS]
# Supported types
QOS_POLICY = _QOS_POLICY_CLS.obj_name()
_TYPE_TO_CLS_MAP = {
QOS_POLICY: _QOS_POLICY_CLS,
}
def get_resource_type(resource_cls):
if not resource_cls:
return None
if not hasattr(resource_cls, 'obj_name'):
return None
return resource_cls.obj_name()
def is_valid_resource_type(resource_type):
return resource_type in _VALID_TYPES
def get_resource_cls(resource_type):
return _TYPE_TO_CLS_MAP.get(resource_type)
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import re
# 除外文字列の集合
exclude_words = set()
def split_space(text):
'''
指定したテキストから単語を抽出します
@param text: unicode
@return: iter
'''
words = re.split(u"\s+", text.strip())
for word in words:
if word not in exclude_words:
yield word
def ngram(text, n=2):
'''
指定したテキストをNgramで分解します
@param text: unicode
'''
cur = 0
while(len(text) >= cur + n):
yield text[cur:n+cur]
cur += 1
|
train_results_root_folder = "/change_this_placeholder_folder" # For example "/Users/your-user/train-results"
trained_using_aws_spot_instance = False
|
r"""
Utilities (:mod:`meshless.utils`)
=================================
"""
import numpy as np
def area_of_polygon(x, y):
"""Area of an arbitrary 2D polygon given its vertices
"""
area = 0.0
for i in range(-1, len(x)-1):
area += x[i] * (y[i+1] - y[i-1])
return abs(area) / 2.0
def unitvec(vector):
"""Return the unit vector
"""
return vector / np.linalg.norm(vector)
def getMid(elem):
"""Get mid xyz coordinates given a pyNastran Element
"""
return elem.get_node_positions().mean(axis=0)
|
import unittest
import json
from .common import ApiTestBase, compat_mock
class CollectionsTests(ApiTestBase):
"""Tests for CollectionsEndpointsMixin."""
@staticmethod
def init_all(api):
return [
{
'name': 'test_create_collection',
'test': CollectionsTests('test_create_collection', api),
},
{
'name': 'test_create_collection_mock',
'test': CollectionsTests('test_create_collection_mock', api),
},
{
'name': 'test_collection_feed',
'test': CollectionsTests('test_collection_feed', api),
},
{
'name': 'test_edit_collection',
'test': CollectionsTests('test_edit_collection', api),
},
{
'name': 'test_edit_collection_mock',
'test': CollectionsTests('test_edit_collection_mock', api),
},
{
'name': 'test_delete_collection',
'test': CollectionsTests('test_delete_collection', api),
},
{
'name': 'test_delete_collection_mock',
'test': CollectionsTests('test_delete_collection_mock', api),
},
]
def test_collection_feed(self):
results = self.api.list_collections()
self.assertTrue(results.get('items'), 'No collection')
first_collection_id = results['items'][0]['collection_id']
results = self.api.collection_feed(first_collection_id)
self.assertEqual(results.get('status'), 'ok')
self.assertEqual(str(results.get('collection_id', '')), first_collection_id)
self.assertIsNotNone(results.get('items'))
@unittest.skip('Modifies data.')
def test_create_collection(self):
name = 'A Collection'
results = self.api.create_collection(name)
self.assertEqual(results.get('status'), 'ok')
self.assertIsNotNone(results.get('collection_id'))
self.assertEqual(results.get('collection_name'), name)
@compat_mock.patch('instapi.Client._call_api')
def test_create_collection_mock(self, call_api):
name = 'A Collection'
call_api.return_value = {
'status': 'ok',
'collection_id': 123,
'collection_name': name,
}
media_ids = ['1495028858729943288_25025320']
params = {
'name': name,
'added_media_ids': json.dumps(media_ids, separators=(',', ':')),
}
params.update(self.api.authenticated_params)
self.api.create_collection(name, media_ids)
call_api.assert_called_with('collections/create/', params=params)
self.api.create_collection(name, media_ids[0])
call_api.assert_called_with('collections/create/', params=params)
@unittest.skip('Modifies data.')
def test_edit_collection(self):
results = self.api.list_collections()
self.assertTrue(results.get('items'), 'No collections')
first_collection_id = results['items'][0]['collection_id']
results = self.api.edit_collection(
first_collection_id, ['1495028858729943288_25025320']
)
self.assertEqual(results.get('status'), 'ok')
self.assertIsNotNone(results.get('collection_id'))
@compat_mock.patch('instapi.Client._call_api')
def test_edit_collection_mock(self, call_api):
collection_id = 123
call_api.return_value = {
'status': 'ok',
'collection_id': collection_id,
'collection_name': 'A Collection',
}
media_ids = ['1495028858729943288_25025320']
params = {'added_media_ids': json.dumps(media_ids, separators=(',', ':'))}
params.update(self.api.authenticated_params)
self.api.edit_collection(collection_id, media_ids)
call_api.assert_called_with(
'collections/{collection_id!s}/edit/'.format(
**{'collection_id': collection_id}
),
params=params,
)
self.api.edit_collection(collection_id, media_ids[0])
call_api.assert_called_with(
'collections/{collection_id!s}/edit/'.format(
**{'collection_id': collection_id}
),
params=params,
)
@unittest.skip('Modifies data.')
def test_delete_collection(self):
results = self.api.list_collections()
self.assertTrue(results.get('items'), 'No collections')
first_collection_id = results['items'][0]['collection_id']
results = self.api.delete_collection(first_collection_id)
self.assertEqual(results.get('status'), 'ok')
@compat_mock.patch('instapi.Client._call_api')
def test_delete_collection_mock(self, call_api):
collection_id = 123
call_api.return_value = {'status': 'ok'}
self.api.delete_collection(collection_id)
call_api.assert_called_with(
'collections/{collection_id!s}/delete/'.format(
**{'collection_id': collection_id}
),
params=self.api.authenticated_params,
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cidade',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nome', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Endereco',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('logradouro', models.CharField(max_length=255)),
('complemento', models.CharField(max_length=255, null=True, blank=True)),
('bairro', models.CharField(max_length=100)),
('cep', models.CharField(max_length=9, blank=True)),
('cidade', models.ForeignKey(to='localizacao.Cidade')),
],
options={
'verbose_name': 'Endere\xe7o',
'verbose_name_plural': 'Endere\xe7os',
},
),
migrations.CreateModel(
name='Pais',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nome', models.CharField(unique=True, max_length=100)),
('sigla', models.CharField(unique=True, max_length=2)),
],
options={
'verbose_name': 'Pa\xeds',
'verbose_name_plural': 'Pa\xedses',
},
),
migrations.CreateModel(
name='Uf',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nome', models.CharField(unique=True, max_length=100)),
('sigla', models.CharField(unique=True, max_length=2)),
('pais', models.ForeignKey(verbose_name=b'Pa\xc3\xads', to='localizacao.Pais')),
],
options={
'verbose_name': 'Uf',
'verbose_name_plural': 'Ufs',
},
),
migrations.AddField(
model_name='cidade',
name='uf',
field=models.ForeignKey(to='localizacao.Uf'),
),
]
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#################################################################################################
# #
# find_cron_records.py:reads cron job file and find newly recorded error message of each job #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# Last Update: Oct 27, 2021 #
# #
#################################################################################################
import sys
import os
import string
import re
import getpass
import socket
import random
import time
import Chandra.Time
import datetime
#
#--- reading directory list
#
path = '/data/mta/Script/Cron_check/house_keeping/dir_list_py'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append pathes to private folders to a python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- check whose account, and set a path to temp location
#
user = getpass.getuser()
user = user.strip()
#
#---- find host machine name
#
machine = socket.gethostname()
machine = machine.strip()
atemp = re.split('\.', machine)
machine = atemp[0]
#
#--- possible machine names and user name lists
#
cpu_list = ['colossus-v', 'c3po-v', 'r2d2-v', 'boba-v']
usr_list = ['mta', 'cus']
cpu_usr_list = ['colossus-v_mta', 'r2d2-v_mta', 'r2d2-v_cus', 'c3po-v_mta', 'c3po-v_cus',\
'boba-v_mta', 'boba-v_cus']
#
#--- temp writing file name
#
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#
#--- a list of error signatures
#
elist = ['error', 'cannot', 'permission denied', 'not found', 'failed', 'invalid',\
'out of range', 'undefined',"Can't Access", "Execution halted",\
"Unable to connect to remote host", "UnboundLocalError" , "Illegal division by zero"]
#
#--- a list of none-real error signature (to be ignored)
#
nlist = ['cleartool', 'file exists', 'cannot remove', 'cannot stat', '\/usr\/bin\/du']
#--------------------------------------------------------------------------------------------
#-- check_cron_errors: reads cron job file and find newly recorded error message of each job
#--------------------------------------------------------------------------------------------
def check_cron_records():
"""
driving script: reads cron job file and find newly recorded error message of each job
Input: none but use cronjob listing for the <user> on <manchine>
it also reads <house_keeping>/Records/<machine>_<user> for the past record
the errors are read from /home/<user>/Logs/xxx.cron files
Output: <house_keeping>/Records/<machine>_<user> --- updated
<house_keeping>/Records/<machine>_<user>_error_list --- a list of the errors
"""
#
#--- setup a record file name depending on the user and the machine
#
cfile = house_keeping + 'Records/' + machine + '_' + user
#
#--- if there is the past record, read it
#
if os.path.isfile(cfile):
[pname, ptime, psize] = get_prev_data(cfile)
#
#--- move the previous record
#
cmd = 'mv ' + cfile + ' ' + cfile + '~'
os.system(cmd)
lname = extract_cron_file_name()
[cname, ctime, csize] = update_record_file(cfile, lname)
#
#--- find error messages and create error list
#
compare_and_find(cname, ctime, csize, pname, ptime, psize)
#
#--- if this is the first time, just create a record file
#
else:
#
#--- crate a list of cron jobs
#
lname = extract_cron_file_name()
#
#--- find the last update time and the file size of the files in the list
#
[cname, ctime, csize] = update_record_file(cfile, lname)
#---------------------------------------------------------------------------------------------
#-- update_record_file: for each cron job, find the last updated time and the current file length
#---------------------------------------------------------------------------------------------
def update_record_file(cfile, lname):
"""
for each cron job, find the last updated time and the current file length (in line #)
Input: cfile --- output file name <house_keeping>/Records/<machine>_<user>
lname --- a list of cron jobs
Output: cfile --- an updated recorded file in <house_keeping>/Records/
cname --- a list of the current file names
ctime --- a list of the last updated time of each cron job
csize --- a list of the current file length in line # of each cron record file
"""
cname = []
ctime = []
csize = []
sline = ''
for ent in lname:
ifile = '/home/' + user + '/Logs/' + ent
if os.path.isfile(ifile):
time = modification_date(ifile)
fsize = file_length(ifile)
cname.append(ent)
ctime.append(time)
csize.append(fsize)
sline = sline + ifile + ' : '+ str(time) + ' : ' + str(fsize) + '\n'
if sline != '':
with open(cfile, 'w') as fo:
fo.write(sline)
return [cname, ctime, csize]
#---------------------------------------------------------------------------------------------
#-- get_prev_data: read the last recorded data from <hosue_keeping>/<machine>_<user> ---
#---------------------------------------------------------------------------------------------
def get_prev_data(cfile):
"""
read the last recorded data from <hosue_keeping>/<machine>_<user>
Input: cfile --- the input file name: <hosue_keeping>/<machine>_<user>
Output: jname --- a list of the cron job names
jtime --- a list of the last updated time of each cron job
jsize --- a list of the file size of each cron job
"""
prev = read_data_file(cfile)
jname = []
jtime = []
jsize = []
for ent in prev:
atemp = re.split(' : ', ent)
try:
val = int(float(atemp[1]))
val2 = int(float(atemp[2]))
jname.append(atemp[0])
jtime.append(val)
jsize.append(val2)
except:
continue
return [jname, jtime, jsize]
#--------------------------------------------------------------------------------------------------
#--- extract_cron_file_name: extract cron error message file names for the current user/machine ---
#--------------------------------------------------------------------------------------------------
def extract_cron_file_name():
"""
extract cron error message file names for the current user/machine
output: cron_file_name: a list of cron file names (file names only no directory path)
"""
try:
cmd = 'crontab -l >' + zspace
os.system(cmd)
data = read_data_file(zspace, remove=1)
except:
exit(1)
cron_file_name = []
for ent in data:
m = re.search('Logs', ent)
if m is not None and ent[0] != '#':
try:
atemp = re.split('Logs/', ent)
btemp = re.split('2>&1', atemp[1])
cron = btemp[0]
cron = cron.strip()
cron_file_name.append(cron)
except:
continue
#
#--- removing duplicated lines
#
cron_file_name = list(set(cron_file_name))
return cron_file_name
#--------------------------------------------------------------------------------------------------
#-- modification_date: find the time of the file modified ---
#--------------------------------------------------------------------------------------------------
def modification_date(filename):
"""
find the time of the file modified
http://stackoverflow.com/questions/237079/how-to-get-file-creation-modification-date-times-in-python
Input: filename --- file name
Output: time --- time in seconds from 1998.1.1.
"""
t = os.path.getmtime(filename)
ltime = datetime.datetime.fromtimestamp(t)
return convert_time_format(ltime)
#--------------------------------------------------------------------------------------------------
#-- convert_time_format: convert time format from datetime format to seconds from 1998.1.1 ---
#--------------------------------------------------------------------------------------------------
def convert_time_format(ltime):
"""
convert time format from datetime format to seconds from 1998.1.1
Input: ltime --- time from datetime.datetime function
Output: time --- time in seconds from 1998.1.1
"""
line = ltime.strftime("%Y%m%d%H%M%S")
#stime = Chandra.Time.DateTime(line).secs
stime = int(float(line))
return stime
#--------------------------------------------------------------------------------------------------
#-- file_length: find a file length in line number --
#--------------------------------------------------------------------------------------------------
def file_length(filename):
"""
find a file length in line number
Input: filename --- inputfile name
Output: length --- the file length in line number
"""
data = read_data_file(filename)
return len(data)
#-------------------------------------------------------------------------------------------------
#-- compare_and_find: compare the current cron job output to find new error messages
#--------------------------------------------------------------------------------------------------
def compare_and_find(cname, ctime, csize, pname, ptime, psize):
"""
compare the current cron job output to that of the past and find new error messages
(we assume that the cron files are accumulate so that any new potion indicated
by line # is the new record)
Input: cname --- a list of the cron job record output file names
ctime --- a list of the latest update time of each cron job
csize --- a list of the lattest size of the cron job record output file
pname --- a list of the cron job record output file names in the last recorded file
ptime --- a list of the last update time of each cron job
psize --- a list of the last size of the cron job record output file
Output: a file <house_keeping>/Records/<machine>_<user>_error_list
"""
file_changed = []
for i in range(0, len(cname)):
for j in range(0, len(pname)):
m = re.search(cname[i], pname[j])
if m is not None:
if ctime[i] > ptime[j]:
if (str(csize[i]) <= str(psize[j])):
#
#--- if the file size is same as before or smaller, we assme that the file is replaced
#
error = check_for_error(pname[j])
if error != 'na':
record_error(cname[i], ctime[i], error)
#
#--- if the file size is larger this round, then a new output from cron job
#--- is appended to the old file
#
if str(csize[i]) > str(psize[j]):
error = check_for_error(pname[j], start = int(psize[j]))
if error != 'na':
record_error(cname[i], ctime[i], error)
break
#-------------------------------------------------------------------------------------------------
#-- check_for_error: find errors reported in the file. check only newly appended part --
#--------------------------------------------------------------------------------------------------
def check_for_error(ifile, start=0):
"""
find errors reported in the file. check only newly appended part
Input: ifile --- cron job output file name
start --- the last line of the last record. if this is not given,
check from the beginning
Output: error_list --- a list of error messages collected from the file
"""
#
#--- ignore idl processes
#
mc = re.search('idl', ifile)
if mc is not None:
return 'na'
data = read_data_file(ifile)
error_list = []
for i in range(start, len(data)):
ent = data[i]
lent = ent.lower()
#
#--- check the line contains error signatures
#
#
#--- ignore some idl specific error messages
#
ms1 = re.search('arithmetic', lent)
ms2 = re.search('underflow', lent)
if (ms1 is not None) and (ms2 is not None):
continue
ms3 = re.search('% Type conversion error', lent)
if (ms3 is not None):
continue
#
#--- ignore 'ls ' error
#
ms4 = re.search('ls: cannot access', lent)
if ms4 is not None:
continue
#
#--- ignore 'image header' error
#
ms5 = re.search('improper image header', lent)
if ms5 is not None:
continue
ms8 = re.search('convert: no images defined', lent)
if ms8 is not None:
continue
#
#--- ignore 'warning'
#
ms6 = re.search('RuntimeWarning', lent)
if ms6 is not None:
continue
chk = 0
for test in elist:
m = re.search(test, lent)
if m is not None:
chk = 1
break
#
#--- check other non-significant error
#
if chk == 1:
for test in nlist:
m = re.search(test, lent)
if m is not None:
chk = 0
break
if chk > 0:
error_list.append(ent)
error_list = list(set(error_list))
if len(error_list) == 0:
return 'na'
else:
return error_list
#-----------------------------------------------------------------------------------------------
#-- record_error: append the error messages in the error record file ---
#-----------------------------------------------------------------------------------------------
def record_error(fname, stime, error_list):
"""
append the error messages in the error record file
Input: fname --- a cron job output file name
stime --- the time of the last updated
error_list --- a list of the error detected from the file
Output: <house_keeping>/Records/<machine>_<user>_error_list
--- the file contains error messages
"""
ofile = house_keeping + 'Records/' + machine + '_' + user + '_error_list'
with open(ofile, 'a') as fo:
for ent in error_list:
line = fname + ' : ' + str(stime) + ' : ' + ent + "\n"
fo.write(line)
#--------------------------------------------------------------------------
#-- read_data_file: read a data file and create a data list --
#--------------------------------------------------------------------------
def read_data_file(ifile, remove=0, ctype='r'):
"""
read a data file and create a data list
input: ifile --- input file name
remove --- if > 0, remove the file after reading it
ctype --- reading type such as 'r' or 'b'
output: data--- a list of data
"""
#
#--- if a file specified does not exist, return an empty list
#
if not os.path.isfile(ifile):
return []
try:
with open(ifile, ctype) as f:
data = [line.strip() for line in f.readlines()]
except:
with codecs.open(ifile, ctype, encoding='utf-8', errors='ignore') as f:
data = [line.strip() for line in f.readlines()]
#
#--- if asked, remove the file after reading it
#
if remove > 0:
cmd = 'rm -rf ' + ifile
os.system(cmd)
return data
#------------------------------------------------------------------------------------------------
if __name__ == '__main__':
check_cron_records()
|
def bubble(lst):
length = len(lst) - 1
result = []
no_swap = False
while not no_swap:
no_swap = True
for a in range(length):
if lst[a] > lst[a + 1]:
lst[a], lst[a + 1] = lst[a + 1], lst[a]
result.append(list(lst))
no_swap = False
return result
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent # noqa: PNT20
from pants.backend.helm.util_rules.chart_metadata import DEFAULT_API_VERSION, ChartType
def gen_chart_file(
name: str,
*,
version: str,
description: str | None = None,
type: ChartType = ChartType.APPLICATION,
api_version: str = DEFAULT_API_VERSION,
icon: str | None = None,
) -> str:
metadata_yaml = dedent(
f"""\
apiVersion: {api_version}
name: {name}
version: {version}
type: {type.value}
"""
)
if description:
metadata_yaml += f"description: {description}\n"
if icon:
metadata_yaml += f"icon: {icon}\n"
return metadata_yaml
HELM_CHART_FILE = gen_chart_file("mychart", version="0.1.0")
HELM_CHART_WITH_DEPENDENCIES_FILE = dedent(
"""\
apiVersion: v2
name: mychart
description: A Helm chart for Kubernetes
version: 0.1.0
icon: https://www.example.com/icon.png
dependencies:
- name: other_chart
repository: "@myrepo"
version: "~0.1.0"
alias: dependency_alias
"""
)
HELM_CHART_FILE_V1_FULL = dedent(
"""\
name: foo
version: 0.1.0
kubeVersion: 1.17
description: The foo chart
keywords:
- foo
- chart
home: https://example.com
sources:
- https://example.com/git
dependencies:
- name: bar
version: 0.2.0
repository: https://example.com/repo
condition: bar.enabled
tags:
- foo
- bar
import-values:
- data
alias: bar-alias
maintainers:
- name: foo
email: bar@example.com
url: https://example.com/foo
icon: https://example.com/icon.png
appVersion: 0.1.0
deprecated: true
annotations:
example: yes
name: foo
"""
)
HELM_CHART_FILE_V2_FULL = dedent(
"""\
apiVersion: v2
name: quxx
version: 0.1.0
kubeVersion: 1.17
description: The foo chart
type: library
keywords:
- foo
- chart
home: https://example.com
sources:
- https://example.com/git
dependencies:
- name: bar
version: 0.2.0
repository: https://example.com/repo
condition: bar.enabled
tags:
- foo
- bar
import-values:
- data
alias: bar-alias
maintainers:
- name: foo
email: bar@example.com
url: https://example.com/foo
icon: https://example.com/icon.png
appVersion: 0.1.0
deprecated: true
annotations:
example: yes
name: quxx
"""
)
K8S_SERVICE_TEMPLATE = dedent(
"""\
apiVersion: v1
kind: Service
metadata:
name: {{ template "fullname" . }}
labels:
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.externalPort }}
targetPort: {{ .Values.service.internalPort }}
protocol: TCP
name: {{ .Values.service.name }}
selector:
app: {{ template "fullname" . }}
"""
)
K8S_INGRESS_TEMPLATE_WITH_LINT_WARNINGS = dedent(
"""\
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ template "fullname" . }}
labels:
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
spec:
rules:
- host: example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ template "fullname" . }}
port:
name: http
"""
)
K8S_POD_TEMPLATE = dedent(
"""\
apiVersion: v1
kind: Pod
metadata:
name: {{ template "fullname" . }}
labels:
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
spec:
containers:
- name: myapp-container
image: busybox:1.28
initContainers:
- name: init-service
image: busybox:1.29
"""
)
K8S_POD_FILE = dedent(
"""\
apiVersion: v1
kind: Pod
metadata:
name: foo
labels:
chart: foo-bar
spec:
containers:
- name: myapp-container
image: busybox:1.28
initContainers:
- name: init-service
image: busybox:1.29
"""
)
K8S_CRD_FILE = dedent(
"""\
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
# name must match the spec fields below, and be in the form: <plural>.<group>
name: myplatforms.contoso.com
spec:
# group name to use for REST API: /apis/<group>/<version>
group: contoso.com
names:
# plural name to be used in the URL: /apis/<group>/<version>/<plural>
plural: myplatforms
# singular name to be used as an alias on the CLI and for display
singular: myplatform
# kind is normally the CamelCased singular type. Your resource manifests use this.
kind: MyPlatform
# shortNames allow shorter string to match your resource on the CLI
shortNames:
- myp
# either Namespaced or Cluster
scope: Namespaced
versions:
- name: v1alpha1
# Each version can be enabled/disabled by Served flag.
served: true
# One and only one version must be marked as the storage version.
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
appId:
type: string
language:
type: string
enum:
- csharp
- python
- go
os:
type: string
enum:
- windows
- linux
instanceSize:
type: string
enum:
- small
- medium
- large
environmentType:
type: string
enum:
- dev
- test
- prod
replicas:
type: integer
minimum: 1
required: ["appId", "language", "environmentType"]
required: ["spec"]
"""
)
K8S_CUSTOM_RESOURCE_FILE = dedent(
"""\
apiVersion: myplatforms.contoso.com/v1alpha1
kind: MyPlatform
metadata:
name: cr_foo
spec:
appId: foo
language: python
environmentType: test
"""
)
HELM_TEMPLATE_HELPERS_FILE = dedent(
"""\
{{- define "fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
"""
)
HELM_VALUES_FILE = dedent(
"""\
service:
name: test
type: ClusterIP
externalPort: 80
internalPort: 1223
"""
)
HELM_BATCH_HOOK_TEMPLATE = dedent(
"""\
apiVersion: batch/v1
kind: Job
metadata:
name: "{{ .Release.Name }}"
labels:
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": post-install
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": hook-succeeded
spec:
template:
metadata:
name: "{{ .Release.Name }}"
labels:
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
spec:
restartPolicy: Never
containers:
- name: post-install-job
image: "alpine:3.3"
command: ["/bin/sleep","{{ default "10" .Values.sleepyTime }}"]
"""
)
|
name=[]
age=[]
gender=[]
location=[]
flag=0
no= int(input("Enter number of customers :"))
for i in range(no):
n=input("Enter name : ")
name.append(n)
a=input("Enter age : ")
age.append(a)
g=input("Enter gender : ")
gender.append(g)
l=input("Enter location : ")
location.append(l)
s = input("Enter loacation to search for : ")
for i in range(no):
if(location[i]==s):
flag=1
print(name[i])
print(age[i])
print(gender[i])
else:
flag=0
if(flag==0):
print("No customer found in location")
|
import numpy as np
import tensorflow as tf
from tf_util.tf_util import get_array_str
import dsn.util.tf_integrals as tfi
from dsn.util.tf_langevin import bounded_langevin_dyn, bounded_langevin_dyn_np
import dsn.util.np_integrals as npi
import os
DTYPE = tf.float64
def rank1_spont_static_solve(
mu_init, delta_0_init, g, Mm, Mn, Sm, num_its, eps, gauss_quad_pts=50
):
# convergence equations used for langevin-like dynamimcs solver
def f(x):
mu = x[:, 0]
delta_0 = x[:, 1]
Phi = tfi.Phi(mu, delta_0, num_pts=gauss_quad_pts)
PhiSq = tfi.PhiSq(mu, delta_0, num_pts=gauss_quad_pts)
F = Mm * Mn * Phi
H = (g ** 2) * PhiSq + (Sm ** 2) * (Mn ** 2) * Phi ** 2
return tf.stack([F, H], axis=1)
x_init = tf.stack([mu_init, delta_0_init], axis=1)
non_neg = [False, True]
xs_end = bounded_langevin_dyn(f, x_init, eps, num_its, non_neg)
mu = xs_end[:, 0]
delta_0 = xs_end[:, 1]
return mu, delta_0
def rank1_spont_chaotic_solve(
mu_init,
delta_0_init,
delta_inf_init,
g,
Mm,
Mn,
Sm,
num_its,
eps,
gauss_quad_pts=50,
db=False,
):
# convergence equations used for langevin-like dynamimcs solver
def f(x):
mu = x[:, 0]
delta_0 = x[:, 1]
delta_inf = x[:, 2]
Phi = tfi.Phi(mu, delta_0, num_pts=gauss_quad_pts)
PrimSq = tfi.PrimSq(mu, delta_0, num_pts=gauss_quad_pts)
IntPrimPrim = tfi.IntPrimPrim(mu, delta_0, delta_inf, num_pts=gauss_quad_pts)
IntPhiPhi = tfi.IntPhiPhi(mu, delta_0, delta_inf, num_pts=gauss_quad_pts)
F = Mm * Mn * Phi
G_squared = delta_inf ** 2 + 2 * (
(g ** 2) * (PrimSq - IntPrimPrim)
+ (Mn ** 2) * (Sm ** 2) * (Phi ** 2) * (delta_0 - delta_inf)
)
G = tf.sqrt(tf.nn.relu(G_squared))
H = (g ** 2) * IntPhiPhi + (Mn ** 2) * (Sm ** 2) * (Phi ** 2)
return tf.stack([F, G, H], axis=1)
x_init = tf.stack([mu_init, delta_0_init, delta_inf_init], axis=1)
non_neg = [False, True, True]
if db:
xs_end, xs = bounded_langevin_dyn(f, x_init, eps, num_its, non_neg, db=db)
else:
xs_end = bounded_langevin_dyn(f, x_init, eps, num_its, non_neg, db=db)
mu = xs_end[:, 0]
delta_0 = xs_end[:, 1]
delta_inf = xs_end[:, 2]
if db:
return mu, delta_0, delta_inf, xs
else:
return mu, delta_0, delta_inf
def rank1_input_chaotic_solve(
mu_init,
kappa_init,
delta_0_init,
delta_inf_init,
g,
Mm,
Mn,
MI,
Sm,
Sn,
SmI,
SnI,
Sperp,
num_its,
eps,
gauss_quad_pts=50,
db=False,
):
square_diff_init = (tf.square(delta_0_init) - tf.square(delta_inf_init)) / 2.0
SI_squared = (SmI ** 2 / Sm ** 2) + (SnI ** 2) / (Sn ** 2) + Sperp ** 2
# convergence equations used for langevin-like dynamimcs solver
def f(x):
mu = x[:, 0]
kappa = x[:, 1]
square_diff = x[:, 2]
delta_inf = x[:, 3]
delta_0 = tf.sqrt(2 * square_diff + tf.square(delta_inf))
Phi = tfi.Phi(mu, delta_0, num_pts=gauss_quad_pts)
Prime = tfi.Prime(mu, delta_0, num_pts=gauss_quad_pts)
PrimSq = tfi.PrimSq(mu, delta_0, num_pts=gauss_quad_pts)
IntPrimPrim = tfi.IntPrimPrim(mu, delta_0, delta_inf, num_pts=gauss_quad_pts)
IntPhiPhi = tfi.IntPhiPhi(mu, delta_0, delta_inf, num_pts=gauss_quad_pts)
F = Mm * kappa + MI # mu
G = Mn * Phi + SnI * Prime
H = tf.square(g) * (PrimSq - IntPrimPrim) + (
tf.square(Sm) * tf.square(kappa) + 2 * SmI * kappa + SI_squared
) * (delta_0 - delta_inf)
I = (
tf.square(g) * IntPhiPhi
+ tf.square(Sm) * tf.square(kappa)
+ 2 * SmI * kappa
+ SI_squared
)
return tf.stack([F, G, H, I], axis=1)
x_init = tf.stack([mu_init, kappa_init, square_diff_init, delta_inf_init], axis=1)
non_neg = [False, False, True, True]
if db:
xs_end, xs = bounded_langevin_dyn(f, x_init, eps, num_its, non_neg, db=db)
else:
xs_end = bounded_langevin_dyn(f, x_init, eps, num_its, non_neg, db=db)
mu = xs_end[:, 0]
kappa = xs_end[:, 1]
square_diff = xs_end[:, 2]
delta_inf = xs_end[:, 3]
delta_0 = tf.sqrt(2 * square_diff + tf.square(delta_inf))
if db:
return mu, kappa, delta_0, delta_inf, xs
else:
return mu, kappa, delta_0, delta_inf
def rank2_CDD_static_solve(
kappa1_init,
kappa2_init,
delta_0_init,
cA,
cB,
g,
rhom,
rhon,
betam,
betan,
gammaA,
gammaB,
num_its,
eps,
gauss_quad_pts=50,
db=False,
):
# Use equations 159 and 160 from M&O 2018
SI = 1.2
Sy = 1.2
# convergence equations used for langevin-like dynamimcs solver
def f(x):
kappa1 = x[:, 0]
kappa2 = x[:, 1]
delta_0 = x[:, 2]
mu = tf.zeros((1,), dtype=DTYPE)
Prime = tfi.Prime(mu, delta_0, num_pts=gauss_quad_pts)
PhiSq = tfi.PhiSq(mu, delta_0, num_pts=gauss_quad_pts)
F = (
rhom * rhon * kappa1
+ betam * betan * (kappa1 + kappa2)
+ cA * (SI ** 2)
+ rhon * gammaA
) * Prime
G = (
rhom * rhon * kappa2
+ betam * betan * (kappa1 + kappa2)
+ cB * (SI ** 2)
+ rhon * gammaB
) * Prime
H = (g ** 2) * PhiSq
H += ((Sy ** 2) + tf.square(betam)) * (tf.square(kappa1) + tf.square(kappa2))
H += (
(SI ** 2) * (cA ** 2 + cB ** 2)
+ tf.square(rhom * kappa1 + gammaA)
+ tf.square(rhom * kappa2 + gammaB)
)
return tf.stack([F, G, H], axis=1)
x_init = tf.stack([kappa1_init, kappa2_init, delta_0_init], axis=1)
non_neg = [False, False, True]
if db:
xs_end, xs = bounded_langevin_dyn(f, x_init, eps, num_its, non_neg, db=db)
else:
xs_end = bounded_langevin_dyn(f, x_init, eps, num_its, non_neg, db=db)
kappa1 = xs_end[:, 0]
kappa2 = xs_end[:, 1]
delta_0 = xs_end[:, 2]
mu = tf.zeros((1,), dtype=DTYPE)
Prime = tfi.Prime(mu, delta_0, num_pts=gauss_quad_pts)
z = betam * (kappa1 + kappa2) * Prime
if db:
return kappa1, kappa2, delta_0, z, xs
else:
return kappa1, kappa2, delta_0, z
def rank2_CDD_chaotic_solve(
kappa1_init,
kappa2_init,
delta_0_init,
delta_inf_init,
cA,
cB,
g,
rhom,
rhon,
betam,
betan,
gammaA,
gammaB,
num_its,
eps,
gauss_quad_pts=50,
db=False,
):
SI = 1.2
Sy = 1.2
SyA = Sy
SyB = Sy
SIA = SI
SIB = SI
SIctxA = 1.0
SIctxB = 1.0
Sw = 1.0
Sm1 = SyA + rhom * SIctxA + betam * Sw
Sm2 = SyB + rhom * SIctxB + betam * Sw
square_diff_init = (tf.square(delta_0_init) - tf.square(delta_inf_init)) / 2.0
# convergence equations used for langevin-like dynamimcs solver
def f(x):
kappa1 = x[:, 0]
kappa2 = x[:, 1]
square_diff = x[:, 2]
delta_inf = x[:, 3]
mu = tf.zeros((1,), dtype=DTYPE)
delta_0 = tf.sqrt(2 * square_diff + tf.square(delta_inf))
Prime = tfi.Prime(mu, delta_0, num_pts=gauss_quad_pts)
PrimSq = tfi.PrimSq(mu, delta_0, num_pts=gauss_quad_pts)
IntPrimPrim = tfi.IntPrimPrim(mu, delta_0, delta_inf, num_pts=gauss_quad_pts)
IntPhiPhi = tfi.IntPhiPhi(mu, delta_0, delta_inf, num_pts=gauss_quad_pts)
noise_corr = (
(Sw ** 2 + tf.square(betam)) * (tf.square(kappa1) + tf.square(kappa2))
+ (SI ** 2) * (cA ** 2 + cB ** 2)
+ tf.square(rhom * kappa1 + gammaA)
+ tf.square(rhom * kappa2 + gammaB)
)
F = (
rhom * rhon * kappa1
+ betam * betan * (kappa1 + kappa2)
+ cA * SI
+ rhon * gammaA
) * Prime
G = (
rhom * rhon * kappa2
+ betam * betan * (kappa1 + kappa2)
+ cB * SI
+ rhon * gammaB
) * Prime
H = tf.square(g) * (PrimSq - IntPrimPrim) + noise_corr * (delta_0 - delta_inf)
I = tf.square(g) * IntPhiPhi + noise_corr
return tf.stack([F, G, H, I], axis=1)
x_init = tf.stack(
[kappa1_init, kappa2_init, square_diff_init, delta_inf_init], axis=1
)
non_neg = [False, False, True, True]
if db:
xs_end, xs = bounded_langevin_dyn(f, x_init, eps, num_its, non_neg, db=db)
else:
xs_end = bounded_langevin_dyn(f, x_init, eps, num_its, non_neg, db=db)
kappa1 = xs_end[:, 0]
kappa2 = xs_end[:, 1]
square_diff = xs_end[:, 2]
delta_inf = xs_end[:, 3]
mu = tf.zeros((1,), dtype=DTYPE)
delta_0 = tf.sqrt(2 * square_diff + tf.square(delta_inf))
Prime = tfi.Prime(mu, delta_0, num_pts=gauss_quad_pts)
z = betam * (kappa1 + kappa2) * Prime
if db:
return kappa1, kappa2, delta_0, delta_inf, z, xs
else:
return kappa1, kappa2, delta_0, delta_inf, z
def rank1_spont_static_solve_np(mu_init, delta_0_init, g, Mm, Mn, Sm, num_its, eps):
def f(x):
mu = x[:, 0]
delta_0 = x[:, 1]
Phi = npi.Phi(mu, delta_0)
PhiSq = npi.PhiSq(mu, delta_0)
F = Mm * Mn * Phi
H = (g ** 2) * PhiSq + (Sm ** 2) * (Mn ** 2) * Phi ** 2
return np.stack([F, H], axis=1)
x_init = np.stack([mu_init, delta_0_init], axis=1)
non_neg = [False, True]
xs_end = bounded_langevin_dyn_np(f, x_init, eps, num_its, non_neg)
mu = xs_end[:, 0]
delta_0 = xs_end[:, 1]
return mu, delta_0
def rank1_input_chaotic_solve_np(
mu_init,
kappa_init,
delta_0_init,
delta_inf_init,
g,
Mm,
Mn,
MI,
Sm,
Sn,
SmI,
SnI,
Sperp,
num_its,
eps,
gauss_quad_pts=50,
db=False,
):
square_diff_init = (np.square(delta_0_init) - np.square(delta_inf_init)) / 2.0
SI_squared = (SmI ** 2 / Sm ** 2) + (SnI ** 2) / (Sn ** 2) + Sperp ** 2
# convergence equations used for langevin-like dynamimcs solver
def f(x):
mu = x[:, 0]
kappa = x[:, 1]
square_diff = x[:, 2]
delta_inf = x[:, 3]
delta_0 = np.sqrt(2 * square_diff + np.square(delta_inf))
Phi = npi.Phi(mu, delta_0, num_pts=gauss_quad_pts)
Prime = npi.Prime(mu, delta_0, num_pts=gauss_quad_pts)
PrimSq = npi.PrimSq(mu, delta_0, num_pts=gauss_quad_pts)
IntPrimPrim = npi.IntPrimPrim(mu, delta_0, delta_inf, num_pts=gauss_quad_pts)
IntPhiPhi = npi.IntPhiPhi(mu, delta_0, delta_inf, num_pts=gauss_quad_pts)
F = Mm * kappa + MI # mu
G = Mn * Phi + SnI * Prime
H = np.square(g) * (PrimSq - IntPrimPrim) + (
np.square(Sm) * np.square(kappa) + 2 * SmI * kappa + SI_squared
) * (delta_0 - delta_inf)
I = (
np.square(g) * IntPhiPhi
+ np.square(Sm) * np.square(kappa)
+ 2 * SmI * kappa
+ SI_squared
)
return np.stack([F, G, H, I], axis=1)
x_init = np.stack([mu_init, kappa_init, square_diff_init, delta_inf_init], axis=1)
non_neg = [False, False, True, True]
if db:
xs_end, xs = bounded_langevin_dyn_np(f, x_init, eps, num_its, non_neg, db=db)
else:
xs_end = bounded_langevin_dyn_np(f, x_init, eps, num_its, non_neg, db=db)
mu = xs_end[:, 0]
kappa = xs_end[:, 1]
square_diff = xs_end[:, 2]
delta_inf = xs_end[:, 3]
delta_0 = np.sqrt(2 * square_diff + np.square(delta_inf))
if db:
return mu, kappa, delta_0, delta_inf, xs
else:
return mu, kappa, delta_0, delta_inf
def rank2_CDD_static_solve_np(
kappa1_init,
kappa2_init,
delta_0_init,
cA,
cB,
g,
rhom,
rhon,
betam,
betan,
gammaA,
gammaB,
num_its,
eps,
num_pts=200,
db=False,
):
# Use equations 159 and 160 from M&O 2018
SI = 1.2
Sy = 1.2
# convergence equations used for langevin-like dynamimcs solver
def f(x):
kappa1 = x[:, 0]
kappa2 = x[:, 1]
delta_0 = x[:, 2]
mu = np.zeros((1,))
Prime = npi.Prime(mu, delta_0, num_pts=num_pts)
PhiSq = npi.PhiSq(mu, delta_0, num_pts=num_pts)
F = (
rhom * rhon * kappa1
+ betam * betan * (kappa1 + kappa2)
+ cA * (SI ** 2)
+ rhon * gammaA
) * Prime
G = (
rhom * rhon * kappa2
+ betam * betan * (kappa1 + kappa2)
+ cB * (SI ** 2)
+ rhon * gammaB
) * Prime
H = (g ** 2) * PhiSq
H += ((Sy ** 2) + np.square(betam)) * (np.square(kappa1) + np.square(kappa2))
H += (
(SI ** 2) * (cA ** 2 + cB ** 2)
+ np.square(rhom * kappa1 + gammaA)
+ np.square(rhom * kappa2 + gammaB)
)
return np.stack([F, G, H], axis=1)
x_init = np.stack([kappa1_init, kappa2_init, delta_0_init], axis=1)
non_neg = [False, False, True]
if db:
xs_end, xs = bounded_langevin_dyn_np(f, x_init, eps, num_its, non_neg, db=db)
else:
xs_end = bounded_langevin_dyn_np(f, x_init, eps, num_its, non_neg, db=db)
kappa1 = xs_end[:, 0]
kappa2 = xs_end[:, 1]
delta_0 = xs_end[:, 2]
mu = np.zeros((1,))
Prime = npi.Prime(mu, delta_0)
z = betam * (kappa1 + kappa2) * Prime
if db:
return kappa1, kappa2, delta_0, z, xs
else:
return kappa1, kappa2, delta_0, z
def warm_start(system):
assert system.name == "LowRankRNN"
ws_filename = get_warm_start_dir(system)
print(ws_filename)
ws_its = 1500
if not os.path.isfile(ws_filename):
rank = system.model_opts["rank"]
behavior_type = system.behavior["type"]
if rank == 2 and behavior_type == "CDD":
cAs = [0, 1]
cBs = [0, 1]
a = system.a
b = system.b
step = system.warm_start_grid_step
grid_vals_list = []
nvals = []
j = 0
for i in range(len(system.all_params)):
param = system.all_params[i]
if param in system.free_params:
vals = np.arange(a[j], b[j] + step, step)
j += 1
else:
vals = np.array([system.fixed_params[param]])
grid_vals_list.append(vals)
nvals.append(vals.shape[0])
m = np.prod(np.array(nvals))
grid = np.array(np.meshgrid(*grid_vals_list))
grid = np.reshape(grid, (len(system.all_params), m))
solution_grids = np.zeros((2, 2, m, 3))
for cA in cAs:
for cB in cBs:
_cA = cA * np.ones((m,))
_cB = cB * np.ones((m,))
kappa1_init = -5.0 * np.ones((m,))
kappa2_init = -4.0 * np.ones((m,))
delta0_init = 2.0 * np.ones((m,))
kappa1, kappa2, delta_0, z, xs = rank2_CDD_static_solve_np(
kappa1_init,
kappa2_init,
delta0_init,
_cA,
_cB,
grid[0],
grid[1],
grid[2],
grid[3],
grid[4],
grid[5],
grid[6],
ws_its,
system.solve_eps,
num_pts=50,
db=True,
)
solution_grids[cA, cB] = np.stack((kappa1, kappa2, delta_0), axis=1)
elif rank == 1 and behavior_type == "BI":
step = system.warm_start_grid_step
a = system.a
b = system.b
free_param_inds = []
grid_vals_list = []
nvals = []
j = 0
for i in range(len(system.all_params)):
param = system.all_params[i]
if param in system.free_params:
vals = np.arange(a[j], b[j] + step, step)
free_param_inds.append(i)
j += 1
else:
vals = np.array([system.fixed_params[param]])
grid_vals_list.append(vals)
nvals.append(vals.shape[0])
print("nvals", nvals)
m = np.prod(np.array(nvals))
print("m", m)
grid = np.array(np.meshgrid(*grid_vals_list))
grid = np.reshape(grid, (len(system.all_params), m))
mu_init = 5.0 * np.ones((m,))
kappa_init = 5.0 * np.ones((m,))
delta_0_init = 5.0 * np.ones((m,))
delta_inf_init = 4.0 * np.ones((m,))
mu, kappa, delta_0, delta_inf, xs = rank1_input_chaotic_solve_np(
mu_init,
kappa_init,
delta_0_init,
delta_inf_init,
grid[0],
grid[1],
grid[2],
grid[3],
grid[4],
grid[5],
grid[6],
grid[7],
grid[8],
ws_its,
system.solve_eps,
gauss_quad_pts=50,
db=True,
)
solution_grid = np.stack((mu, kappa, delta_0, delta_inf), axis=1)
np.savez(
ws_filename,
param_grid=grid[free_param_inds, :],
solution_grid=solution_grid,
xs=xs,
)
else:
print("Already warm_started.")
print(ws_filename)
npzfile = np.load(ws_filename)
xs = npzfile["xs"]
return ws_filename, xs
def get_warm_start_dir(system):
rank = system.model_opts["rank"]
type = system.behavior["type"]
a_str = get_array_str(system.a)
b_str = get_array_str(system.b)
step = system.warm_start_grid_step
ws_dir = "data/warm_starts/"
if not os.path.isdir(ws_dir):
os.makedirs(ws_dir)
ws_filename = ws_dir + "rank%d_%s_a=%s_b=%s_step=%.2E.npz" % (
rank,
type,
a_str,
b_str,
step,
)
return ws_filename
|
from city import City
from population import Population
from tour import Tour
from tour_manager import TourManager
import algorithm
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='Using Genetic Algorithm to solve Traveling Salesman Problem.')
parser.add_argument('-g', '--generations',
help='Number of generations to iterate',
dest='generations',
required=True,
type=int)
parser.add_argument('-n', '--num-cities',
help='Number of cities',
dest='num_cities',
required=True,
type=int)
parser.add_argument('-p', '--population-size',
help='Population size',
dest='population_size',
required=True,
type=int)
args = parser.parse_args()
num_cities = args.num_cities
population_size = args.population_size
generations = args.generations
return generations, num_cities, population_size
def main():
generations, num_cities, population_size = parse_args()
tour_manager = TourManager()
tour_manager.create_tour_cities(num_cities)
population = Population(population_size, True, tour_manager)
print "Initial distance = %d" % (population.get_fittest().get_distance(),)
for i in range(0, generations):
population = algorithm.evolve_population(population)
print "generation=%d shortest_distance=%d" % (i, population.get_fittest().get_distance())
fittest = population.get_fittest()
print """
Finished
Final distance=%d
Solution
--------
%s
""" % (fittest.get_distance(), fittest)
if __name__ == '__main__':
main()
|
import cv2 as cv
import numpy as np
def nothing(position):
print(position)
img = np.zeros((300, 512, 3), np.uint8)
cv.namedWindow('image')
cv.createTrackbar('B', 'image', 0, 255, nothing)
cv.createTrackbar('G', 'image', 0, 255, nothing)
cv.createTrackbar('R', 'image', 0, 255, nothing)
switch = '0 : OFF\n 1 : ON'
cv.createTrackbar(switch, 'image', 0, 1, nothing)
while True:
cv.imshow('image', img)
key = cv.waitKey(1) & 0xFF
blue = cv.getTrackbarPos('B', 'image')
green = cv.getTrackbarPos('G', 'image')
red = cv.getTrackbarPos('R', 'image')
switch_bar = cv.getTrackbarPos(switch, 'image')
if switch_bar == 0:
img[:] = 0
else:
img[:] = [blue, green, red]
if key == 27:
break
cv.destroyAllWindows()
|
from flask import render_template, request
from app import app
import flask_mobility.decorators as mobdec
from scriptMaps import n1_MapaclassMaxPB
from scriptDivCompare import Compareclass
from scriptPacotes import n1_PacotesclassMaxPB
from pprint import pprint
from dbmongo import *
from datetime import *
import time
@app.route('/')
@app.route('/index')
def index():
return render_template("untitled2teste.html")
################################################################### MAPA #######################################
@app.route('/maps')
def n0MapaMaxPB():
listEquipment=""
try:
a = Compareclass()
listEquipment = a.leitura()
except:
print "Error"
a = 0
return render_template("n0_tabMapMaxPB.html", listEquipment=listEquipment)
@app.route('/n1_Maps')
def n1MapaMaxPB():
lista=""
try:
serial1 = request.args['serial1']
limiteinferior = request.args['linf']
limitesuperior = request.args['lsup']
anoS, mesS, diaS, horaS, minutoS, segundoS = limitesuperior.split(':')
anoI, mesI, diaI, horaI, minutoI, segundoI = limiteinferior.split(':')
dtsup = datetime(int(anoS),int(mesS),int(diaS),int(horaS),int(minutoS),int(segundoS))
dtinf = datetime(int(anoI),int(mesI),int(diaI), int(horaI),int(minutoI),int(segundoI))
a = n1_MapaclassMaxPB(serial1,dtsup ,dtinf)
print "01"
lista = a.leitura()
except:
print "Error"
a = 0
return render_template("n1_Maps.html", lista=lista)
################################################################### PACOTES ######################################
@app.route('/pacotesgprs')
def n0PacotesMaxPB():
listEquipment=""
try:
a = Compareclass()
listEquipment = a.leitura()
except:
print "Error"
a = 0
return render_template("n0_tabPacotesMaxPB.html", listEquipment=listEquipment)
@app.route('/n1_Pacotes')
def n1PacotesMaxPB():
lista=""
try:
serial1 = request.args['serial1']
limiteinferior = request.args['linf']
limitesuperior = request.args['lsup']
anoS, mesS, diaS, horaS, minutoS, segundoS = limitesuperior.split(':')
anoI, mesI, diaI, horaI, minutoI, segundoI = limiteinferior.split(':')
dtsup = datetime(int(anoS),int(mesS),int(diaS),int(horaS),int(minutoS),int(segundoS))
dtinf = datetime(int(anoI),int(mesI),int(diaI), int(horaI),int(minutoI),int(segundoI))
a = n1_PacotesclassMaxPB(serial1,dtsup ,dtinf)
lista = a.leitura()
except:
print "Error"
a = 0
return render_template("n1_Pacotes.html", lista=lista)
|
import solver
from ..translators.caesartranslator import *
from ..keygenerators.numberkeygenerator import *
from ..keygenerators.keygenerator import *
from ..scorers.czechscorer import *
class BruteForceSolver(solver.Solver):
"""Tries out all possible solutions"""
def __init__(self, keyGenerator=NumberKeyGenerator(), translator=CaesarTranslator(), scorer=CzechScorer(), quiet=False):
"""keyGenerator can be either KeyGenerator or iterable, to silence text output use quiet"""
solver.Solver.__init__(self, keyGenerator, translator, scorer)
if (quiet):
self.printer = lambda *x: None
self.lastPrint = lambda *x: None
def solve(self, text=None, return_all_keys=False):
best = (0.0, None)
all_keys = []
gen = self.keyGenerator
if (isinstance(self.keyGenerator, KeyGenerator)): # otherwise iterable
gen = self.keyGenerator.getAllKeys()
for key in gen:
score, ciphered_text = self.score(key, text)
if (return_all_keys):
all_keys.append((score, key))
self.printer(key, score, ciphered_text)
if (score > best[0]):
best = (score, key)
self.lastPrint(best[1], best[0], self.score(best[1], text)[1])
if (return_all_keys):
return sorted(all_keys, key=lambda x: -x[0])
return best
def lastPrint(self, key, score, text=None):
print
print "=====Best Solution====="
print "Score:", score
print "Key:", "".join(key)
print "Text:", text
def setKeyGenerator(self, keyGenerator):
self.keyGenerator = keyGenerator
def setStartingPoint(self, startingPoint):
raise NotImplementedError()
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012, Clément MATHIEU
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import bs4
import datetime
import errno
from infoqscraper import client
from infoqscraper import utils
import os
import re
import shutil
import subprocess
import tempfile
import urllib
def get_summaries(client, filter=None):
""" Generate presentation summaries in a reverse chronological order.
A filter class can be supplied to filter summaries or bound the fetching process.
"""
try:
index = 0
while True:
rb = _RightBarPage(client, index)
summaries = rb.summaries()
if filter is not None:
summaries = filter.filter(summaries)
for summary in summaries:
yield summary
index += len(summaries)
except StopIteration:
pass
class MaxPagesFilter(object):
""" A summary filter set an upper bound on the number fetched pages"""
def __init__(self, max_pages):
self.max_pages = max_pages
self.page_count = 0
def filter(self, presentation_summaries):
if self.page_count >= self.max_pages:
raise StopIteration
self.page_count += 1
return presentation_summaries
class Presentation(object):
""" An InfoQ presentation.
"""
def __init__(self, client, id):
self.client = client
self.id = id
self.soup = self._fetch()
def _fetch(self):
"""Download the page and create the soup"""
url = client.get_url("/presentations/" + self.id)
content = self.client.fetch_no_cache(url).decode('utf-8')
return bs4.BeautifulSoup(content, "html.parser")
@property
def metadata(self):
def get_title(pres_div):
return pres_div.find('h1', class_="general").div.get_text().strip()
def get_date(pres_div):
str = pres_div.find('span', class_='author_general').contents[2]
str = str.replace(u'\n', u' ')
str = str.replace(u'\xa0', u' ')
str = str.split("on ")[-1]
str = str.strip()
return datetime.datetime.strptime(str, "%b %d, %Y")
def get_author(pres_div):
return pres_div.find('span', class_='author_general').contents[1].get_text().strip()
def get_timecodes(pres_div):
for script in pres_div.find_all('script'):
mo = re.search("TIMES\s?=\s?new\s+Array.?\((\d+(,\d+)+)\)", script.get_text())
if mo:
return [int(tc) for tc in mo.group(1).split(',')]
def get_slides(pres_div):
for script in pres_div.find_all('script'):
mo = re.search("var\s+slides\s?=\s?new\s+Array.?\(('.+')\)", script.get_text())
if mo:
return [client.get_url(slide.replace('\'', '')) for slide in mo.group(1).split(',')]
def get_video(pres_div):
for script in pres_div.find_all('script'):
mo = re.search('var jsclassref = \'(.*)\';', script.get_text())
if mo:
b64 = mo.group(1)
path = base64.b64decode(b64)
# Older presentations use flv and the video path does not contain
# the extension. Newer presentations use mp4 and include the extension.
if path.endswith(".mp4"):
return "mp4:%s" % path
elif path.endswith(".flv"):
return "flv:%s" % path[:-4]
else:
raise Exception("Unsupported video type: %s" % path)
def get_bio(div):
return div.find('p', id="biotext").get_text(strip=True)
def get_summary(div):
return "".join(div.find('p', id="summary").get_text("|", strip=True).split("|")[1:])
def get_about(div):
return div.find('p', id="conference").get_text(strip=True)
def add_pdf_if_exist(metadata, pres_div):
# The markup is not the same if authenticated or not
form = pres_div.find('form', id="pdfForm")
if form:
metadata['pdf'] = client.get_url('/pdfdownload.action?filename=') + urllib.quote(form.input['value'], safe='')
else:
a = pres_div.find('a', class_='link-slides')
if a:
metadata['pdf'] = client.get_url(a['href'])
def add_mp3_if_exist(metadata, bc3):
# The markup is not the same if authenticated or not
form = bc3.find('form', id="mp3Form")
if form:
metadata['mp3'] = client.get_url('/mp3download.action?filename=') + urllib.quote(form.input['value'], safe='')
else:
a = bc3.find('a', class_='link-mp3')
if a:
metadata['mp3'] = client.get_url(a['href'])
if not hasattr(self, "_metadata"):
pres_div = self.soup.find('div', class_='presentation_full')
metadata = {
'url': client.get_url("/presentations/" + self.id),
'title': get_title(pres_div),
'date' : get_date(pres_div),
'auth' : get_author(pres_div),
'timecodes': get_timecodes(self.soup),
'slides': get_slides(self.soup),
'video_url': "rtmpe://video.infoq.com/cfx/st/",
'video_path': get_video(self.soup),
'bio': get_bio(pres_div),
'summary': get_summary(pres_div),
'about': get_about(pres_div),
}
add_mp3_if_exist(metadata, pres_div)
add_pdf_if_exist(metadata, pres_div)
self._metadata = metadata
return self._metadata
class Downloader(object):
def __init__(self, presentation, ffmpeg="ffmpeg", rtmpdump="rtmpdump", swfrender="swfrender"):
self.presentation = presentation
self.ffmpeg = ffmpeg
self.rtmpdump = rtmpdump
self.swfrender = swfrender
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
shutil.rmtree(self.tmp_dir)
@property
def tmp_dir(self):
if not hasattr(self, "_tmp_dir"):
self._tmp_dir = tempfile.mkdtemp(prefix="infoq")
return self._tmp_dir
@property
def _audio_path(self):
return os.path.join(self.tmp_dir, "audio.ogg")
@property
def _video_path(self):
return os.path.join(self.tmp_dir, 'video.avi')
def create_presentation(self, output_path=None):
""" Create the presentation.
The audio track is mixed with the slides. The resulting file is saved as output_path
DownloadFailedException is raised if some resources cannot be fetched.
"""
try:
audio = self.download_mp3()
except client.DownloadError:
audio = self.download_video()
raw_slides = self.download_slides()
# Convert slides into JPG since ffmpeg does not support SWF
jpg_slides = self._convert_slides(raw_slides)
# Create one frame per second using the timecode information
frame_pattern = self._prepare_frames(jpg_slides)
# Now Build the video file
output = self._assemble(audio, frame_pattern, output=output_path)
return output
def download_video(self, output_path=None):
"""Downloads the video.
If self.client.cache_enabled is True, then the disk cache is used.
Args:
output_path: Where to save the video if not already cached. A
file in temporary directory is used if None.
Returns:
The path where the video has been saved. Please note that it can not be equals
to output_path if the video is in cache
Raises:
DownloadFailedException: If the video cannot be downloaded.
"""
rvideo_path = self.presentation.metadata['video_path']
if self.presentation.client.cache:
video_path = self.presentation.client.cache.get_path(rvideo_path)
if not video_path:
video_path = self.download_video_no_cache(output_path=output_path)
self.presentation.client.cache.put_path(rvideo_path, video_path)
else:
video_path = self.download_video_no_cache(output_path=output_path)
return video_path
def download_video_no_cache(self, output_path=None):
"""Downloads the video.
Args:
output_path: Where to save the video. A file in temporary directory is
used if None.
Returns:
The path where the video has been saved.
Raises:
DownloadFailedException: If the video cannot be downloaded.
"""
video_url = self.presentation.metadata['video_url']
video_path = self.presentation.metadata['video_path']
if not output_path:
output_path = self._video_path
try:
cmd = [self.rtmpdump, '-q', '-r', video_url, '-y', video_path, "-o", output_path]
utils.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
try:
os.unlink(output_path)
except OSError:
pass
raise client.DownloadError("Failed to download video at %s: rtmpdump exited with %s" % (video_url, e.returncode))
return output_path
def download_slides(self, output_dir=None):
""" Download all SWF slides.
If output_dir is specified slides are downloaded at this location. Otherwise the
tmp_dir is used. The location of the slides files are returned.
A DownloadFailedException is raised if at least one of the slides cannot be download..
"""
if not output_dir:
output_dir = self.tmp_dir
return self.presentation.client.download_all(self.presentation.metadata['slides'], output_dir)
def download_mp3(self, output_path=None):
""" Download the audio track.
If output_path is specified the audio track is downloaded at this location. Otherwise
the tmp_dir is used. The location of the file is returned.
A DownloadFailedException is raised if the file cannot be downloaded.
"""
if not output_path:
output_path = self._audio_path
dir = os.path.dirname(output_path)
filename = os.path.basename(output_path)
return self.presentation.client.download(self.presentation.metadata['mp3'], dir, filename=filename)
def _assemble(self, audio, frame_pattern, output=None):
if not output:
output = os.path.join(self.tmp_dir, "output.avi")
try:
# Try to be compatible as much as possible with old ffmpeg releases (>= 0.7)
# - Do not use new syntax options
# - Do not use libx264, not available on old Ubuntu/Debian
# - Do not use -threads auto, not available on 0.8.*
# - Old releases are very picky regarding arguments position
#
# 0.5 (Debian Squeeze & Ubuntu 10.4) is not supported because of
# scaling issues with image2.
cmd = [
self.ffmpeg, "-v", "0",
"-i", audio,
"-f", "image2", "-r", "1", "-s", "hd720","-i", frame_pattern,
"-map", "1:0", "-acodec", "libmp3lame", "-ab", "128k",
"-map", "0:1", "-vcodec", "mpeg4", "-vb", "2M",
output
]
utils.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise Exception("Failed to create final movie as %s.\n"
"\tExit code: %s\n"
"\tOutput:\n%s"
% (output, e.returncode, e.output))
return output
def _convert_slides(self, slides):
swf_render = utils.SwfConverter(swfrender_path=self.swfrender)
def convert(slide):
if slide.endswith("swf"):
return swf_render.to_jpeg(slide)
elif slide.endswith("jpg"):
return slide
else:
raise Exception("Unsupported slide type: %s" % slide)
return [convert(s) for s in slides]
def _prepare_frames(self, slides, ext="jpg"):
timecodes = self.presentation.metadata['timecodes']
frame = 0
for slide_index in xrange(len(slides)):
src = slides[slide_index]
for remaining in xrange(timecodes[slide_index], timecodes[slide_index+1]):
dst = os.path.join(self.tmp_dir, "frame-{0:04d}." + ext).format(frame)
try:
os.link(src, dst)
except OSError as e:
if e.errno == errno.EMLINK:
# Create a new reference file when the upper limit is reached
# (previous to Linux 3.7, btrfs had a very low limit)
shutil.copyfile(src, dst)
src = dst
else:
raise e
frame += 1
return os.path.join(self.tmp_dir, "frame-%04d." + ext)
class _RightBarPage(object):
"""A page returned by /rightbar.action
This page lists all available presentations with pagination.
"""
def __init__(self, client, index):
self.client = client
self.index = index
@property
def soup(self):
"""Download the page and create the soup"""
try:
return self._soup
except AttributeError:
url = client.get_url("/presentations/%s" % self.index)
content = self.client.fetch_no_cache(url).decode('utf-8')
self._soup = bs4.BeautifulSoup(content)
return self._soup
def summaries(self):
"""Return a list of all the presentation summaries contained in this page"""
def create_summary(div):
def get_id(div):
return get_url(div).rsplit('/')[-1]
def get_url(div):
return client.get_url(div.find('h2', class_='itemtitle').a['href'])
def get_desc(div):
return div.p.get_text(strip=True)
def get_auth(div):
return div.find('span', class_='author').a['title']
def get_date(div):
str = div.find('span', class_='author').get_text()
str = str.replace(u'\n', u' ')
str = str.replace(u'\xa0', u' ')
match = re.search(r'on\s+(\w{3} [0-9]{1,2}, 20[0-9]{2})', str)
return datetime.datetime.strptime(match.group(1), "%b %d, %Y")
def get_title(div):
return div.find('h2', class_='itemtitle').a['title']
return {
'id': get_id(div),
'url': get_url(div),
'desc': get_desc(div),
'auth': get_auth(div),
'date': get_date(div),
'title': get_title(div),
}
videos = self.soup.findAll('div', {'class': 'news_type_video'})
return [create_summary(div) for div in videos]
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# In a forest, each rabbit has some color. Some subset of rabbits (possibly all of them) tell you how many other
# rabbits have the same color as them. Those answers are placed in an array.
# Return the minimum number of rabbits that could be in the forest.
# Examples:
# Input: answers = [1, 1, 2]
# Output: 5
# Explanation:
# The two rabbits that answered "1" could both be the same color, say red.
# The rabbit than answered "2" can't be red or the answers would be inconsistent.
# Say the rabbit that answered "2" was blue.
# Then there should be 2 other blue rabbits in the forest that didn't answer into the array.
# The smallest possible number of rabbits in the forest is therefore 5: 3 that answered plus 2 that didn't.
# Input: answers = [10, 10, 10]
# Output: 11
# Input: answers = []
# Output: 0
# Note:
# answers will have length at most 1000.
# Each answers[i] will be an integer in the range [0, 999].
# 54 / 54 test cases passed.
# Status: Accepted
# Runtime: 52 ms
from collections import Counter
class Solution(object):
def numRabbits(self, answers):
"""
:type answers: List[int]
:rtype: int
"""
counter = Counter(answers)
res = counter[0]
for i in counter:
if i:
while counter[i] > 0:
res += i + 1
counter[i] -= i + 1
return res
if __name__ == '__main__':
print(Solution().numRabbits([1, 1, 2]))
print(Solution().numRabbits([10, 10, 10]))
print(Solution().numRabbits([0, 0, 1, 1, 1]))
print(Solution().numRabbits([2, 1, 2, 2, 2, 2, 2, 2, 1, 1]))
|
name =1
age =1
name = 2
age =2
|
import torch
from torch import nn
from torch.nn import functional as F
class ResBlk(nn.Module):
"""
resnet block
"""
def __init__(self, ch_in, ch_out, stride=1):
super(ResBlk, self).__init__()
self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=stride, padding=1)
self.bn1 = nn.BatchNorm2d(ch_out)
self.conv2 = nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2d(ch_out)
self.extra = nn.Sequential()
if stride!=1 or ch_out != ch_in:
self.extra = nn.Sequential(
nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=stride),
nn.BatchNorm2d(ch_out)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out = self.extra(x) + out
out = F.relu(out)
return out
class ResNet18(nn.Module):
"""
resnet model
"""
def __init__(self):
super(ResNet18, self).__init__()
self.conv1= nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.bn1 = nn.BatchNorm2d(64)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1,return_indices=True)
# followed 4 blocks
self.blk11 = ResBlk(64, 64, stride=1)
self.blk12 = ResBlk(64, 64, stride=1)
self.blk21 = ResBlk(64, 128, stride=2)
self.blk22 = ResBlk(128, 128, stride=1)
self.blk31 = ResBlk(128, 256, stride=2)
self.blk32 = ResBlk(256, 256, stride=1)
self.blk41 = ResBlk(256, 512, stride=2)
self.blk42 = ResBlk(512, 512, stride=1)
self.outlayer = nn.Linear(512 * 1 * 1, 10)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x,indices=self.pool(x)
x = self.blk11(x)
x = self.blk12(x)
x = self.blk21(x)
x = self.blk22(x)
x = self.blk31(x)
x = self.blk32(x)
x = self.blk41(x)
x = self.blk42(x)
x = F.adaptive_avg_pool2d(x, [1, 1])
# print('after pool:', x.shape)
x = x.view(x.size(0), -1)
x = self.outlayer(x)
return x,indices
class FirstLayer(nn.Module):
"""
FirstLayer
"""
def __init__(self):
super(FirstLayer, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.bn1 = nn.BatchNorm2d(64)
def forward(self, x):
x = self.conv1(x)
x=self.bn1(x)
x=F.relu(x)
return x
class LastLayer(nn.Module):
"""
LastLayer
"""
def __init__(self):
super(LastLayer, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.bn1 = nn.BatchNorm2d(64)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, return_indices=True)
# followed 4 blocks
self.blk11 = ResBlk(64, 64, stride=1)
self.blk12 = ResBlk(64, 64, stride=1)
self.blk21 = ResBlk(64, 128, stride=2)
self.blk22 = ResBlk(128, 128, stride=1)
self.blk31 = ResBlk(128, 256, stride=2)
self.blk32 = ResBlk(256, 256, stride=1)
self.blk41 = ResBlk(256, 512, stride=2)
self.blk42 = ResBlk(512, 512, stride=1)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x, indices = self.pool(x)
x = self.blk11(x)
x = self.blk12(x)
x = self.blk21(x)
x = self.blk22(x)
x = self.blk31(x)
x = self.blk32(x)
x = self.blk41(x)
x = self.blk42(x)
return x,indices
class BackResBlk(nn.Module):
def __init__(self, ch_in, ch_out, stride=1):
super(BackResBlk, self).__init__()
self.deconv1 = nn.ConvTranspose2d(ch_in, ch_in, kernel_size=3, stride=1, padding=1)
if ch_in == ch_out:
self.deconv2 = nn.ConvTranspose2d(ch_in, ch_out, 3, stride=stride, padding=1)
else:
self.deconv2 = nn.ConvTranspose2d(ch_in, ch_out, 3, stride=stride, padding=1, output_padding=1)
self.extra = nn.Sequential()
if stride!=1 or ch_out != ch_in:
self.extra = nn.Sequential(
nn.ConvTranspose2d(ch_in, ch_out, kernel_size=1, stride=stride,output_padding=1),
)
def forward(self, x):
x_res = self.extra(x)
x = F.relu(self.deconv1(x))
x=self.deconv2(x)
x = F.relu(x_res + x)
return x
class BackFirstLayer(nn.Module):
def __init__(self):
super(BackFirstLayer, self).__init__()
self.deconv1 = nn.ConvTranspose2d(64, 3, kernel_size=7, stride=2, padding=3,output_padding=1)
def forward(self, x):
x = self.deconv1(x)
# x = F.relu(x)
return x
class BackLastLayer(nn.Module):
def __init__(self,indices):
super(BackLastLayer, self).__init__()
self.deblk11 = BackResBlk(512,512,1)
self.deblk12 = BackResBlk(512,256,2)
self.deblk21 = BackResBlk(256,256,1)
self.deblk22 = BackResBlk(256,128,2)
self.deblk31 = BackResBlk(128,128,1)
self.deblk32 = BackResBlk(128,64 ,2)
self.deblk41 = BackResBlk(64, 64, 1)
self.deblk42 = BackResBlk(64 ,64 ,1)
self.max_unpool = nn.MaxUnpool2d(kernel_size=3, stride=2, padding=1)
self.deconv1 = nn.ConvTranspose2d(64, 3, kernel_size=7, stride=2, padding=3,output_padding=1)
self.indices = indices
def forward(self, x):
x = self.deblk11(x)
x = self.deblk12(x)
x = self.deblk21(x)
x = self.deblk22(x)
x = self.deblk31(x)
x = self.deblk32(x)
x = self.deblk41(x)
x = self.deblk42(x)
x = self.max_unpool (x,self.indices,output_size=torch.Size([x.size()[0], 64, 112, 112]))
x = self.deconv1(x)
x = F.relu(x)
return x
|
import unittest
import best_travel as b
class BestTravelTest(unittest.TestCase):
def test_best_sum(self):
self.assertEqual(b.choose_best_sum([50, 55, 57, 58, 60], 3, 174), [55,58,60])
|
#!/usr/bin/env python3
# Copyright (c) 2021 Mahdi Biparva, mahdi.biparva@gmail.com
# miTorch: Medical Imaging with PyTorch
# Deep Learning Package for 3D medical imaging in PyTorch
# Implemented by Mahdi Biparva, April 2021
# Brain Imaging Lab, Sunnybrook Research Institute (SRI)
import os
import time
import socket
import pprint
import numpy as np
from trainer import Trainer
from evaluator import Evaluator
import utils.logging as logging
import utils.misc as misc
import utils.checkpoint as checkops
import utils.distributed as du
import torch
from torch.utils.tensorboard import SummaryWriter
from netwrapper.net_wrapper import NetWrapperHFB, NetWrapperWMH
logger = logging.get_logger(__name__)
class EpochLoop:
def __init__(self, cfg):
self.cfg = cfg
self.trainer, self.evaluator = None, None
self.device, self.net_wrapper = None, None
self.tb_logger_writer = None
self.best_eval_metric = float('inf')
self.setup_gpu()
def setup_gpu(self):
cuda_device_id = self.cfg.GPU_ID
torch.cuda.set_device(cuda_device_id)
if self.cfg.USE_GPU and torch.cuda.is_available():
self.device = torch.device('cuda:{}'.format(cuda_device_id))
print('cuda available')
print('device count is', torch.cuda.device_count())
print(self.device, 'will be used ...')
else:
self.device = torch.device('cpu')
def setup_tb_logger(self):
self.tb_logger_writer = SummaryWriter(self.cfg.OUTPUT_DIR)
with open(os.path.join(self.cfg.OUTPUT_DIR, 'cfg.yml'), 'w') as outfile:
# pyyaml crashes on CfgNodes in tuples so I will narrow it down to the net name
cfg = self.cfg.clone()
cfg.dump(stream=outfile)
def tb_logger_update(self, e, worker):
if not (self.cfg.DDP and self.cfg.DDP_CFG.RANK): # no ddp or rank zero
if e == 0 and self.tb_logger_writer is None:
self.setup_tb_logger()
worker.tb_logger_update(self.tb_logger_writer, e)
def save_checkpoint(self, cur_epoch, eval_metric, every_epoch=False):
if checkops.is_checkpoint_epoch(cur_epoch, self.cfg.TRAIN.CHECKPOINT_PERIOD):
if self.cfg.DDP and self.cfg.DDP_CFG.RANK:
return
if every_epoch:
self.net_wrapper.save_checkpoint(self.cfg.OUTPUT_DIR, cur_epoch, best=False)
logger.info(f'checkpoint saved at epoch {cur_epoch} in the path {self.cfg.OUTPUT_DIR}')
# add if it is the best, save it separately too
self.best_eval_metric = min(eval_metric, self.best_eval_metric)
if eval_metric == self.best_eval_metric:
logger.info('--- best snapshot taken. current {} | best {}'.format(eval_metric, self.best_eval_metric))
self.net_wrapper.save_checkpoint(self.cfg.OUTPUT_DIR, cur_epoch, best=True)
logger.info(f'best checkpoint saved at epoch {cur_epoch} in the path {self.cfg.OUTPUT_DIR}')
def load_checkpoint(self):
if self.cfg.TRAIN.AUTO_RESUME and checkops.has_checkpoint(self.cfg.OUTPUT_DIR):
logger.info("Load from last checkpoint.")
last_checkpoint = checkops.get_last_checkpoint(self.cfg.OUTPUT_DIR)
checkpoint_epoch = self.net_wrapper.load_checkpoint(last_checkpoint)
start_epoch = checkpoint_epoch + 1
elif self.cfg.TRAIN.CHECKPOINT_FILE_PATH != "":
logger.info("Load from given checkpoint file.")
checkpoint_epoch = self.net_wrapper.load_checkpoint(self.cfg.TRAIN.CHECKPOINT_FILE_PATH)
start_epoch = checkpoint_epoch + 1
else:
start_epoch = 0
du.synchronize()
return start_epoch
def check_if_validating(self, cur_epoch):
if misc.is_eval_epoch(self.cfg, cur_epoch):
self.evaluator_epoch_loop(cur_epoch)
logger.info('*** Done validating at epoch {}'.format(cur_epoch))
return self.evaluator.meters.get_epoch_loss()
return None
def lr_scheduling(self, eval_loss_avg_last):
self.net_wrapper.schedule_step(eval_loss_avg_last)
def trainer_epoch_loop(self, start_epoch):
for cur_epoch in range(start_epoch, self.cfg.SOLVER.MAX_EPOCH):
self.trainer.set_net_mode(self.net_wrapper.net_core)
if self.cfg.DDP:
self.trainer.data_container.sampler.set_epoch(cur_epoch)
self.trainer.meters.reset()
self.trainer.batch_loop(self.net_wrapper, cur_epoch)
self.trainer.meters.log_epoch_stats(cur_epoch, 'train')
self.tb_logger_update(cur_epoch, self.trainer)
eval_loss_avg_last = self.check_if_validating(cur_epoch)
self.save_checkpoint(cur_epoch, eval_metric=eval_loss_avg_last)
self.lr_scheduling(eval_loss_avg_last)
def evaluator_epoch_loop(self, start_epoch):
self.evaluator.set_net_mode(self.net_wrapper.net_core)
self.evaluator.meters.reset()
self.evaluator.batch_loop(self.net_wrapper, start_epoch)
self.evaluator.meters.log_epoch_stats(start_epoch, 'valid')
self.tb_logger_update(start_epoch, self.evaluator)
def main_setup(self):
np.random.seed(self.cfg.RNG_SEED)
torch.manual_seed(self.cfg.RNG_SEED)
socket_name = socket.gethostname()
logging.setup_logging(
output_dir=self.cfg.OUTPUT_DIR if 'scinet' in socket_name or 'computecanada' in socket_name else None
)
logger.info("Train with config:")
logger.info(pprint.pformat(self.cfg))
if self.cfg.DDP:
logger.info('DDP is on. It is DDP config is:')
logger.info(pprint.pformat(self.cfg.DDP_CFG))
def create_sets(self):
self.trainer = Trainer(self.cfg, self.device) if self.cfg.TRAIN.ENABLE else None
self.evaluator = Evaluator(self.cfg, self.device) if self.cfg.VALID.ENABLE else None
def setup_net(self):
if self.cfg.WMH.ENABLE:
self.net_wrapper = NetWrapperWMH(self.device, self.cfg)
else:
self.net_wrapper = NetWrapperHFB(self.device, self.cfg)
def run(self, start_epoch):
logger.info("Start epoch: {}".format(start_epoch + 1))
if self.cfg.TRAIN.ENABLE:
self.trainer_epoch_loop(start_epoch)
elif self.cfg.VALID.ENABLE:
self.evaluator_epoch_loop(0)
elif self.cfg.TESTING:
raise NotImplementedError('TESTING mode is not implemented yet')
else:
raise NotImplementedError('One of {TRAINING, VALIDATING, TESTING} must be set to True')
if not (self.cfg.DDP and self.cfg.DDP_CFG.RANK):
self.tb_logger_writer.close()
def main(self):
self.main_setup()
self.create_sets()
self.setup_net()
start_epoch = self.load_checkpoint()
self.run(start_epoch)
|
import os
import re
dirpath = os.getcwd()
dirpath+='/'
chr_names=['chr1','chr2','chr3','chr4',
'chr5','chr6','chr7','chr8',
'chr9','chr10','chr11','chr12',
'chr13','chr14','chr15','chr16',
'chr17','chr18','chr19','chr20',
'chr21','chr22','chrX','chrY']
outp=open('ref_chr_startpos.txt','w')
for CHR in chr_names:
inp = open(dirpath+CHR+'.maf', "r")
for line in inp:
if "hg38" in line and line[0] == 's':
startpos =int(re.findall(r'\w+', line)[3])
firstlen =int(re.findall(r'\w+', line)[4])
outp.write(CHR+' '+str(startpos)+'\n')
break
inp.close()
outp.close()
|
#!/usr/bin/env python
import re
import os
import time
import hashlib
orig_text = open('pf.scala').read()
def compute_tempvalue(prog_text):
digest = hashlib.sha1(prog_text).hexdigest()
tempvalue = hashlib.sha1('42' + digest).hexdigest()
return tempvalue
for x in range(10000, 99999):
new_text = re.sub(r'z\.contains\("\w+"\)',
'z.contains("{}")'.format(x),
orig_text)
if x % 1000 == 0:
print("--- Attempt {}".format(x))
tempvalue = compute_tempvalue(new_text)
if str(x) in tempvalue:
print('SUCCESS!')
print(x)
break
|
from itertools import combinations
import pandas as pd
from sklearn.preprocessing import (PolynomialFeatures, OneHotEncoder,
StandardScaler)
def onehot_conversion(X_cat, model=None):
if model:
X_onehot = pd.DataFrame(model.transform(X_cat),
columns=model.get_feature_names(
input_features=X_cat.columns))
X_onehot.index = X_cat.index
return X_onehot
model = OneHotEncoder(sparse=False)
model.fit(df)
df_onehot = pd.DataFrame(model.transform(df),
columns=model.get_feature_names(input_features=df.columns)
)
df_onehot.index = df.index
cmap = {}
for col in df.columns:
cmap[col] = list(pd.get_dummies(df[col]).columns)
return df_onehot, model, cmap
def poly_generation(dataframe, n=3, model=None):
if model:
df_poly = pd.DataFrame(model.transform(dataframe),
columns=model.get_feature_names(
input_features=dataframe.columns))
return df_poly
model = PolynomialFeatures(degree=n)
model.fit(dataframe)
df_poly = pd.DataFrame(model.transform(dataframe),
columns=model.get_feature_names(
input_features=dataframe.columns))
return df_poly, model
def standard_scaler(dataframe, model=None):
if model:
X_stsc = model.transform(dataframe)
df_stsc = pd.DataFrame(X_stsc, columns=dataframe.columns, index=dataframe.index)
return df_stsc
model = StandardScaler()
model.fit(dataframe)
X_stsc = model.transform(dataframe)
df_stsc = pd.DataFrame(X_stsc, columns=dataframe.columns, index=dataframe.index)
return df_stsc, model
|
from . import inline
|
EnsureSConsVersion(1,2)
import os
import sys
import inspect
import platform
import re
import subprocess
from SCons import SConf
def getTools():
result = []
if os.name == 'nt':
result = ['nvcc', 'default', 'msvc']
elif os.name == 'posix':
result = [ 'nvcc', 'default','g++']
else:
result = [ 'nvcc', 'default']
return result
OldEnvironment = Environment
# this dictionary maps the name of a compiler program to a dictionary mapping the name of
# a compiler switch of interest to the specific switch implementing the feature
gCompilerOptions = {
'gcc' : {'warn_all' : '-Wall',
'warn_errors' : '-Werror',
'optimization' : '-O3', 'debug' : '-g',
'exception_handling' : '', 'standard': ''},
'clang' : {'warn_all' : '-Wall',
'warn_errors' : '-Werror',
'optimization' : '-O3', 'debug' : '-g',
'exception_handling' : '', 'standard': ''},
'g++' : {'warn_all' : '-Wall',
'warn_errors' : '-Werror',
'optimization' : '-O3', 'debug' : '-g',
'exception_handling' : '', 'standard': '-std=c++11'},
'c++' : {'warn_all' : '-Wall',
'warn_errors' : '-Werror',
'optimization' : '-O3', 'debug' : '-g',
'exception_handling' : '',
'standard': ['-stdlib=libc++', '-std=c++0x', '-pthread']},
'clang++' : {'warn_all' : '-Wall',
'warn_errors' : '-Werror',
'optimization' : ['-O3'], 'debug' : ['-g'],
'exception_handling' : '',
'standard': ['-stdlib=libc++', '-std=c++11', '-pthread']},
'cl' : {'warn_all' : '/Wall',
'warn_errors' : '/WX',
'optimization' : ['/Ox', '/MD', '/Zi', '/DNDEBUG'],
'debug' : ['/Zi', '/Od', '/D_DEBUG', '/RTC1', '/MDd'],
'exception_handling': '/EHsc',
'standard': ['/GS', '/GR', '/Gd', '/fp:precise',
'/Zc:wchar_t','/Zc:forScope', '/DYY_NO_UNISTD_H']}
}
# this dictionary maps the name of a linker program to a dictionary mapping the name of
# a linker switch of interest to the specific switch implementing the feature
gLinkerOptions = {
'gcc' : {'debug' : '', 'libraries' : ''},
'clang' : {'debug' : '', 'libraries' : ''},
'g++' : {'debug' : '', 'libraries' : ''},
'c++' : {'debug' : '', 'libraries' : '-lc++'},
'clang++' : {'debug' : '', 'libraries' : '-lc++'},
'link' : {'debug' : '/debug', 'libraries' : ''}
}
def getCFLAGS(mode, warn, warnings_as_errors, CC):
result = []
if mode == 'release':
# turn on optimization
result.append(gCompilerOptions[CC]['optimization'])
elif mode == 'debug':
# turn on debug mode
result.append(gCompilerOptions[CC]['debug'])
result.append('-DPRNN_DEBUG')
if warn:
# turn on all warnings
result.append(gCompilerOptions[CC]['warn_all'])
if warnings_as_errors:
# treat warnings as errors
result.append(gCompilerOptions[CC]['warn_errors'])
result.append(gCompilerOptions[CC]['standard'])
return result
def getLibCXXPaths():
"""Determines libc++ path
returns (inc_path, lib_path)
"""
# determine defaults
if os.name == 'posix':
inc_path = '/usr/include'
lib_path = '/usr/lib/libc++.so'
else:
raise ValueError, 'Error: unknown OS. Where is libc++ installed?'
# override with environement variables
if 'LIBCXX_INC_PATH' in os.environ:
inc_path = os.path.abspath(os.environ['LIBCXX_INC_PATH'])
if 'LIBCXX_LIB_PATH' in os.environ:
lib_path = os.path.abspath(os.environ['LIBCXX_LIB_PATH'])
return (inc_path, lib_path)
def getCXXFLAGS(mode, warn, warnings_as_errors, CXX):
result = []
if mode == 'release':
# turn on optimization
result.append(gCompilerOptions[CXX]['optimization'])
elif mode == 'debug':
# turn on debug mode
result.append(gCompilerOptions[CXX]['debug'])
# enable exception handling
result.append(gCompilerOptions[CXX]['exception_handling'])
if warn:
# turn on all warnings
result.append(gCompilerOptions[CXX]['warn_all'])
if warnings_as_errors:
# treat warnings as errors
result.append(gCompilerOptions[CXX]['warn_errors'])
result.append(gCompilerOptions[CXX]['standard'])
return result
def getLINKFLAGS(mode, LINK):
result = []
if mode == 'debug':
# turn on debug mode
result.append(gLinkerOptions[LINK]['debug'])
result.append(gLinkerOptions[LINK]['libraries'])
return result
def cuda_exists(env):
if not env['with_cuda']:
return False
return os.path.exists(env['cuda_path'])
def getExtraLibs(env):
if os.name == 'nt':
return []
else:
if cuda_exists(env):
return ['cudart_static']
else:
return []
def importEnvironment():
env = { }
if 'PATH' in os.environ:
env['PATH'] = os.environ['PATH']
if 'CXX' in os.environ:
env['CXX'] = os.environ['CXX']
if 'CC' in os.environ:
env['CC'] = os.environ['CC']
if 'TMP' in os.environ:
env['TMP'] = os.environ['TMP']
if 'LD_LIBRARY_PATH' in os.environ:
env['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH']
return env
def updateEnvironment(env):
originalEnvironment = importEnvironment()
for key, value in originalEnvironment.iteritems():
env[key] = value
def BuildEnvironment():
vars = Variables()
# add a variable to handle RELEASE/DEBUG mode
vars.Add(EnumVariable('mode', 'Release versus debug mode', 'debug',
allowed_values = ('release', 'debug')))
# add a variable to handle warnings
vars.Add(BoolVariable('Wall', 'Enable all compilation warnings', 1))
# shared or static libraries
libraryDefault = 'shared'
vars.Add(EnumVariable('library', 'Build shared or static library',
libraryDefault, allowed_values = ('shared', 'static')))
# add a variable to treat warnings as errors
vars.Add(BoolVariable('Werror', 'Treat warnings as errors', 1))
# enable_cuda
vars.Add(BoolVariable('with_cuda', 'Enable cuda', 1))
# add a variable to determine the install path
default_install_path = '/usr/local'
if 'PRNN_INSTALL_PATH' in os.environ:
default_install_path = os.environ['PRNN_INSTALL_PATH']
vars.Add(PathVariable('install_path', 'The prnn install path',
default_install_path, PathVariable.PathIsDirCreate))
vars.Add(BoolVariable('install', 'Include prnn install path in default '
'targets that will be built and configure to install in the '
'install_path (defaults to false unless one of the targets is '
'"install")', 0))
# add a variable to handle cuda install path
cuda_path = "/usr/local/cuda"
if 'CUDA_PATH' in os.environ:
cuda_path = os.environ['CUDA_PATH']
vars.Add(PathVariable('cuda_path', 'Cuda toolkit install path', cuda_path,
PathVariable.PathAccept))
# add a variable to handle cuda architecture
default_cuda_arch = 'sm_30'
if 'CUDA_ARCH' in os.environ:
default_cuda_arch = os.environ['CUDA_ARCH']
vars.Add(EnumVariable('cuda_arch', 'Cuda architecture', default_cuda_arch,
allowed_values = ('sm_30', 'sm_35', 'sm_50', 'sm_52', 'sm_75')))
# create an Environment
env = OldEnvironment(ENV = importEnvironment(), \
tools = getTools(), variables = vars)
updateEnvironment(env)
# set the version
env.Replace(VERSION = "0.1")
# always link with the c++ compiler
if os.name != 'nt':
env['LINK'] = env['CXX']
# get C compiler switches
env.AppendUnique(CFLAGS = getCFLAGS(env['mode'], env['Wall'], \
env['Werror'], env.subst('$CC')))
# get CXX compiler switches
env.AppendUnique(CXXFLAGS = getCXXFLAGS(env['mode'], env['Wall'], \
env['Werror'], env.subst('$CXX')))
# get linker switches
env.AppendUnique(LINKFLAGS = getLINKFLAGS(env['mode'], env.subst('$LINK')))
# Install paths
if env['install']:
env.Replace(INSTALL_PATH = os.path.abspath(env['install_path']))
else:
env.Replace(INSTALL_PATH = os.path.abspath('.'))
# get libc++
if env['CXX'] == 'c++':
env.AppendUnique(CPPPATH = getLibCXXPaths()[0])
# set extra libs
env.Replace(EXTRA_LIBS=getExtraLibs(env))
# set the build path
env.Replace(BUILD_ROOT = str(env.Dir('.').abspath))
env.AppendUnique(CPPPATH = os.path.join(env['BUILD_ROOT'], 'include'))
# set prnn include path
if env['install']:
env.AppendUnique(LIBPATH = os.path.abspath(os.path.join(env['install_path'], 'lib')))
else:
env.AppendUnique(LIBPATH = os.path.abspath('.'))
# we need librt on linux
if sys.platform == 'linux2':
env.AppendUnique(EXTRA_LIBS = ['-lrt'])
# we need libdl on max and linux
if os.name != 'nt':
env.AppendUnique(EXTRA_LIBS = ['-ldl'])
# generate help text
Help(vars.GenerateHelpText(env))
return env
|
from django.core.cache import cache, get_cache
cachekey_usr_session_profix = 'usr_session_'#roleid
class cache:
@staticmethod
def loc_setValue(key, val):
"""
set local memery cache.
"""
c = get_cache('in_memery')
c.set(key, val)
@staticmethod
def loc_getValue(key):
"""
get local memery cache.
"""
c = get_cache('in_memery')
return c.get(key)
@staticmethod
def loc_delete(key):
"""
delete local memery cache.
"""
c = get_cache('in_memery')
c.delete(key)
@staticmethod
def mc_setValue(key, val):
"""
set defalut memcached.
"""
c = get_cache('default')
c.set(key, val)
@staticmethod
def mc_getValue(key):
"""
get defalut memcached.
"""
c = get_cache('default')
return c.get(key)
@staticmethod
def mc_delete(key, val):
"""
delete defalut memcached.
"""
c = get_cache('default')
c.delete(key, val)
@staticmethod
def mc_hasKey(key):
"""
check if key exist in defalut memcached
"""
c = get_cache('default')
return c.has_key(key)
|
# I pledge my honor that I have abided by the Stevens Honor System.
# Jeffrey Eng
def sum(p):
total = 0
for i in p:
total += float(i)
return total
def main():
numbers = input("Enter a list of numbers separated by spaces: ")
x = numbers.split()
print("The sum is", sum(x))
main()
|
import os
import math
from work.models import Site, ShiftedQty, ProgressQty, SurveyQty, ShiftedQtyExtra, ProgressQtyExtra, SiteExtra, DprQty, Log, Resolution, ResolutionLink, Log, Loa
import pandas as pd
from work.data import DISTRICTS_ALLOWED, DIVISIONS_ALLOWED, PROGRESS_QFIELDS, SURVEY_QFIELDS, REVIEW_QFIELDS, DPR_INFRA_FIELDS, SHIFTED_QFIELDS
from django.db.models import F, Sum, Count, Q, FileField
from work.functions import formatString
from work.models import getHabID
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import Permission, User, Group
from django.contrib.auth.decorators import login_required
import pdb
import numpy as np
def getSite(census, habitation):
site = None
additional = False
habid = getHabID(census=census, habitation=formatString(habitation))
site = Site.objects.filter(hab_id=habid).first()
if(site):
if(site.origin):
site = site.origin
else:
#: if None, look into additional
sitex = SiteExtra.objects.filter(hab_id=habid).first()
if(sitex):
if(sitex.site):
site = sitex.site
additional = True
return site, additional
def getSiteData(census, habitation):
site = None
survey = None
progress = None
site, isAdd = getSite(census, habitation)
if(not isAdd):
survey = SurveyQty.objects.filter(site=site).first()
progress = ProgressQty.objects.filter(site=site).first()
return site, survey, progress
def getSiteProgressdf(district=None):
site_fields = ['origin__hab_id','hab_id', 'village', 'census', 'habitation', 'district', 'division', 'category', 'project__name']
qty_field = ['ht', 'pole_ht_8m', 'lt_3p', 'lt_1p', 'pole_lt_8m', 'dtr_100', 'dtr_63', 'dtr_25']
# dfP = pd.DataFrame(ProgressQty.objects.all().values(*num_fields))
# dfP.set_index('site__hab_id', inplace=True)
# dfP['rem'] = 'site'
# dfPX = pd.DataFrame(ProgressQtyExtra.objects.all().values(*num_fields))
# dfPX.set_index('site__hab_id', inplace=True)
# dfPX['rem'] = 'extra'
#df = dfP.add(dfPX, fill_value=0, numeric_only=True)
# df = pd.concat([dfP, dfPX])
if(district == None):
# scope = Site.objects.exclude((Q(progressqty=None) & Q(surveyqty=None)) | Q(surveyqty__status='canceled') | Q(progressqty__status='canceled'))
scope = Site.objects.exclude((Q(progressqty=None)) | Q(progressqty__status='canceled'))
else:
# scope = Site.objects.filter(district=str.upper(district)).exclude((Q(progressqty=None) & Q(surveyqty=None)) | Q(surveyqty__status='canceled') | Q(progressqty__status='canceled'))
scope = Site.objects.filter(district=str.upper(district)).exclude((Q(progressqty=None)) | Q(progressqty__status='canceled'))
sfield = ['surveyqty__' + f for f in qty_field]
pfield = ['progressqty__'+f for f in qty_field]
data = scope.values(*site_fields, *sfield, 'surveyqty__status', *pfield, 'progressqty__status', 'dprqty__project', 'progressqty__cert','progressqty__review', 'billing__status', 'billing__billno', 'billing__remark')
df = pd.DataFrame(data)
# copy progress data if survey data is blank.
_survey_infra = sum([df['surveyqty__' + f] for f in qty_field])
_progress_infra = sum([df['progressqty__' + f] for f in qty_field])
df['survey_infra'] = _survey_infra
df['progress_infra'] = _progress_infra
df = df[(_survey_infra > 0) | (_progress_infra > 0)]
# df[(_survey_infra > 0)].loc[:,sfield] = df[(_survey_infra > 0)].loc[:,pfield]
df.to_excel('outputs/progress_sites.xlsx')
return df
def checkInfraNil(progress, shifted):
hasError = False
res = []
# values = [getattr(progress, f, 0) or 0 for f in PROGRESS_QFIELDS]
# qtysum = sum([v for v in values if not v == None])
try:
pqtysum = sum([getattr(progress, f, 0) or 0 for f in PROGRESS_QFIELDS])
sqtysum = sum([getattr(shifted, f, 0) or 0 for f in SHIFTED_QFIELDS])
if(not (pqtysum > 0 and sqtysum > 0)):
hasError = True
res.append({'class': 'error', 'text': '{}: infra is nil'.format(progress)})
return hasError, res
except Exception as ex:
hasError = True
res.append({'class': 'error', 'text': '{}: {}'.format(progress.site, ex.__str__())})
return hasError, res
def checkAgainstSurvey(site, progress):
survey = SurveyQty.objects.filter(site=site).first()
res = []
hasError = False
for f in PROGRESS_QFIELDS:
# print('comparing {} against survey'.format(f))
try:
diff = (getattr(progress, f, 0) or 0) - (getattr(survey, f, 0) or 0) # if access 20%
comp = (getattr(progress, f, 0) or 0) > 1.2 * (getattr(survey, f, 0) or 0) # if access 20%
if(comp):
res.append(
{'class': 'warning', 'text': 'excess {} {}:\t\t{} \tby {}'.format(site.census, site.habitation, f, round(diff,1))})
except Exception as ex:
res.append(
{'class': 'error', 'text': '{}: {}'.format(progress.site, ex.__str__())})
return hasError, res
def validateProgressFile(file):
status = []
hasError = False
try:
df = pd.read_excel(file, sheet_name='upload', header=None)
except Exception as ex:
hasError=True
status.append({'class':'error','text': ex.__str__()})
return hasError, status, None
data_row = 6
# check format
dfTemplate = pd.read_excel('files/progress_report.xlsx', header=None)
try:
columns = dfTemplate.iloc[data_row-1]
for i in range(24):
if(df.iloc[data_row-1, i] != columns[i]):
status.append(
{'class': 'error', 'text': 'Format error @: {}'.format(columns[i])})
hasError = True
if(hasError):
return hasError, status, None
df_data = df[data_row:]
df_data.iloc[:, 7:23].fillna(value=0, inplace=True)
df_data.iloc[:, 7:23].replace('', 0, inplace=True)
df_data = df_data.rename(columns=df.iloc[data_row-1, :])
df_data = df_data.fillna('')
except Exception as ex:
return True, [{'class':'error', 'text': ex.__str__()}], None
return hasError, status, df_data
def _assignQty(pqty, sqty, data):
fields_shifted = ['acsr', 'cable_3p', 'cable_1p',
'pole_8m', 'pole_9m', 'dtr_100', 'dtr_63', 'dtr_25']
for field in fields_shifted:
setattr(sqty, field, getattr(data, field + '_shifted', 0))
fields_progress = ['ht', 'pole_ht_8m', 'lt_3p', 'lt_1p',
'pole_lt_8m', 'dtr_100', 'dtr_63', 'dtr_25', 'remark', 'status']
for field in fields_progress:
setattr(pqty, field, getattr(data, field, 0))
return pqty, sqty
def updateProgressSingle(progressdata, updateid, isTest):
updated = False
status = []
hasError = False
village = formatString(progressdata['village'])
census = progressdata['census']
habitation = formatString(progressdata['habitation'])
site, additional = getSite(census, habitation)
pqty = None
sqty = None
print('Processing... {}'.format(site))
if(site and not additional):
sqty = ShiftedQty.objects.filter(site=site).first()
if(not sqty):
sqty = ShiftedQty(site=site)
pqty = ProgressQty.objects.filter(site=site).first()
if(not pqty):
pqty = ProgressQty(site=site)
status.append(
{'class': 'success', 'text': 'Updating: {village} {census} {habitation}'.format(**progressdata)})
elif(site and additional):
sqty = ShiftedQtyExtra.objects.filter(site=site).first()
if(not sqty):
sqty = ShiftedQtyExtra(site=site)
pqty = ProgressQtyExtra.objects.filter(site=site).first()
if(not pqty):
pqty = ProgressQtyExtra(site=site)
# _assignQty(pqty, sqty, progressdata)
# pqty.changeid = updateid
# sqty.changeid = updateid
status.append(
{'class': 'success', 'text': 'Updating (additional): {village} {census} {habitation}'.format(**progressdata)})
else:
#: another additional site... requires formal approval
status.append(
{'class': 'error', 'text': "Unknown site: {village} {census} {habitation}".format(**progressdata)})
hasError = True
if(pqty):
# skip update if...
# if((not pqty.review == 'not reviewed' ) or (pqty.status == 'completed') or (pqty.cert == True)):
if((not pqty.review == 'not reviewed' ) or (pqty.cert == True)):
status.append(
{'class': 'info', 'text': "skipped: {village} {census} {habitation} completed, under review".format(**progressdata)})
return False, status, False
_assignQty(pqty, sqty, progressdata)
hasError, warnings = checkAgainstSurvey(site, pqty)
status.extend(warnings)
pqty.changeid = updateid
sqty.changeid = updateid
hasError, errors = checkInfraNil(pqty, sqty)
status.extend(errors)
# input('check')
if(not isTest and not hasError):
pqty.save()
sqty.save()
print('saving...')
status.append(
{'class': 'success', 'text': "Updated {village} {census} {habitation}".format(**progressdata)})
updated = True
# print(status)
return hasError, status, updated
def UpdateProgress(file, updateid, isTest):
status = []
hasError, dfstatus, dfProgress = validateProgressFile(file)
updated = False
if(hasError):
status.extend(dfstatus)
# print(status)
return status
for index, row in dfProgress.iterrows():
iferror, stat, updated = updateProgressSingle(row, updateid, isTest)
status.extend(stat)
if(updated and not isTest):
log = Log(model='Progress', changeid=updateid)
log._save()
# print(status)
return status
def getDistrictProgressSummary():
num_fields = ['ht', 'ht_conductor', 'pole_ht_8m', 'lt_3p', 'lt_1p',
'pole_lt_8m', 'dtr_100', 'dtr_63', 'dtr_25']
df_district = pd.DataFrame(ProgressQty.objects.exclude(status='canceled').values(
district=F('site__district')).annotate(
*[Sum(f) for f in num_fields],
completed=Count('status', filter=Q(status='completed')),
cert=Count('cert', filter=Q(cert=True))
))
df_district.set_index('district', inplace=True)
print(df_district)
df_district['LT'] = df_district['lt_1p__sum'] + df_district['lt_3p__sum']
df_district['DTR#'] = (df_district['dtr_100__sum'] + df_district['dtr_63__sum'] + df_district['dtr_25__sum'])
df_district['MVA'] = (df_district['dtr_100__sum']*100 + df_district['dtr_63__sum']*63 + df_district['dtr_25__sum']*25)/1000
df_extra = pd.DataFrame(ProgressQtyExtra.objects.values(
district=F('site__district')).annotate(
*[Sum(f) for f in num_fields],
completed=Count(
'status', filter=Q(status='completed')),
cert=Count(
'cert', filter=Q(cert=True))))
if(not df_extra.empty):
df_extra.set_index('district', inplace=True)
if(not df_extra.empty):
df_district = df_district.add(df_extra, fill_value=0)
dfProgress = df_district.copy()
# Add approved hab count from SurveyQty
sqty = SurveyQty.objects.filter(status='approved')
dfSuveyed = pd.DataFrame(sqty.values(
district=F('site__district')).annotate(approved=Count('site')))
dfSuveyed.set_index('district', inplace=True)
df_district['approved'] = dfSuveyed
dpr = DprQty.objects.filter(has_infra=True)
dfDPR = pd.DataFrame(dpr.values(
district=F('site__district')).annotate(approved=Count('site')))
dfDPR.set_index('district', inplace=True)
df_district['DPRHabs'] = dfDPR
#Scope
scope = Site.objects.exclude((Q(progressqty=None) & Q(surveyqty=None)) | Q(surveyqty__status='canceled') | Q(progressqty__status='canceled'))
# scope = Site.objects.exclude((Q(progressqty=None) & Q(surveyqty=None)) | Q(surveyqty__status='canceled') | Q(progressqty__status='canceled'))
# scope = Site.objects.exclude(Q(surveyqty=None) & Q(progressqty=None)).exclude(surveyqty__status='canceled').exclude(progressqty__status='canceled')
# dfsites = pd.DataFrame(scope.values('hab_id','district'))
# dfsites.to_excel('../sites.xlsx')
dfScope = pd.DataFrame(scope.values('district').annotate(scope=Count('id')))
print(dfScope)
dfScope.set_index('district', inplace=True)
df_district['scope'] = dfScope
df_district['Scope cat I'] = pd.DataFrame(scope.filter(category="I").values('district').annotate(scope_catI=Count('id'))).set_index('district')
df_district['Scope cat II'] = pd.DataFrame(scope.filter(category="II").values('district').annotate(scope_catI=Count('id'))).set_index('district')
df_district['Scope cat III'] = pd.DataFrame(scope.filter(category="III").values('district').annotate(scope_catI=Count('id'))).set_index('district')
dfDprqty = pd.DataFrame(dpr.values(district=F('site__district')).annotate(
*[Sum(f) for f in [*DPR_INFRA_FIELDS, 'ht_conductor']]))
dfDprqty.columns = [f.replace('__sum', '') for f in dfDprqty.columns]
dfDprqty.set_index('district', inplace=True)
dfDprqty['section'] = '1. DPR'
# dfDprqty['pole_ht_8m'] = pd.np.ceil(dfDprqty['ht'] * 15).astype(int)
# dfDprqty['pole_lt_8m'] = pd.np.ceil(dfDprqty['lt_3p'] * 25 + dfDprqty['lt_1p'] * 22).astype(int)
dfDprqty['pole_ht_8m'] = dfDprqty['ht'] * 14
dfDprqty['pole_lt_8m'] = dfDprqty['lt_3p'] * 25 + dfDprqty['lt_1p'] * 22
dfDprqty['pole_9m'] = (dfDprqty['dtr_100'] +
dfDprqty['dtr_63'] + dfDprqty['dtr_25'])*2
dfDprqty['pole_8m'] = dfDprqty['pole_ht_8m'] + dfDprqty['pole_lt_8m']
loa = Loa.objects.all()
dfLoa = pd.DataFrame(loa.values())
dfLoa['district'] = dfLoa['area']
dfLoa.set_index('district', inplace=True)
dfLoa['pole_8m'] = dfLoa['pole_ht_8m'] + dfLoa['pole_lt_8m']
dfLoa['section'] = '2. LOA'
dfPQty = df_district.copy()
dfPQty.columns = [f.replace('__sum', '') for f in dfPQty.columns]
dfPQty['section'] = '6. Executed'
dfPQty['pole_9m'] = (dfPQty['dtr_100'] +
dfPQty['dtr_63'] + dfPQty['dtr_25'])*2
dfPQty['pole_8m'] = dfPQty['pole_ht_8m'] + dfPQty['pole_lt_8m']
sscope = SurveyQty.objects.exclude(status='canceled')
dfScopeSurvQty = pd.DataFrame(sscope.values(district=F('site__district')).annotate(*[Sum(f) for f in SURVEY_QFIELDS]))
dfScopeSurvQty.columns = [f.replace('__sum', '') for f in dfScopeSurvQty.columns]
dfScopeSurvQty['section'] = '3. Scope'
dfScopeSurvQty.set_index('district', inplace=True)
dfScopeSurvQty['pole_8m'] = dfScopeSurvQty['pole_ht_8m'] + dfScopeSurvQty['pole_lt_8m']
dfConsolidatedScope = pd.DataFrame(list(map(consolidatedScope,sscope))).groupby('district').sum()
dfConsolidatedScope['section'] = '3. Scope Consolidated'
dfConsolidatedScope['pole_8m'] = dfConsolidatedScope['pole_ht_8m'] + dfConsolidatedScope['pole_lt_8m']
dfSurvQty = pd.DataFrame(sqty.values(district=F('site__district')).annotate(
*[Sum(f) for f in SURVEY_QFIELDS]))
dfSurvQty.columns = [f.replace('__sum', '') for f in dfSurvQty.columns]
dfSurvQty['section'] = '4. Approved'
dfSurvQty.set_index('district', inplace=True)
dfSurvQty['pole_8m'] = dfSurvQty['pole_ht_8m'] + dfSurvQty['pole_lt_8m']
sfield = [*SURVEY_QFIELDS, 'pole_8m']
dfQtyBal = dfLoa[sfield].subtract(dfSurvQty[sfield])
dfQtyBal['section'] = '5. Approval Balance'
dfExePc = (dfPQty[sfield]/dfLoa[sfield] * 100).fillna(0).astype(int)
dfExePc['section'] = '7. Completed %'
completed = ProgressQty.objects.filter(status='completed')
dfCompleted = pd.DataFrame(completed.values(district=F('site__district')).annotate(*[Sum(f) for f in SURVEY_QFIELDS]))
dfCompleted.columns = [f.replace('__sum', '') for f in dfCompleted.columns]
dfCompleted['pole_8m'] = dfCompleted['pole_ht_8m'] + dfCompleted['pole_lt_8m']
dfCompleted['pole_9m'] = (dfCompleted['dtr_100'] +
dfCompleted['dtr_63'] + dfCompleted['dtr_25'])*2
# dfCompleted['section'] = '7. completed'
dfCompleted.set_index('district', inplace=True)
dfQtyComBal = dfLoa[sfield].subtract(dfPQty[sfield])
dfQtyComBal['section'] = '8. LOA - Executed'
dfOngoing = dfPQty[sfield].subtract(dfCompleted[sfield])
notCompleted = SurveyQty.objects.exclude(site__progressqty__status='completed', status='canceled')
dfNotCompleted = pd.DataFrame(notCompleted.values(district=F('site__district')).annotate(*[Sum(f) for f in SURVEY_QFIELDS]))
dfNotCompleted.columns = [f.replace('__sum', '') for f in dfNotCompleted.columns]
dfNotCompleted['pole_8m'] = dfNotCompleted['pole_ht_8m'] + dfNotCompleted['pole_lt_8m']
dfNotCompleted.set_index('district', inplace=True)
dfNotCompleted = dfNotCompleted[sfield].subtract(dfOngoing[sfield])
dfNotCompleted['section'] = '9. To Execute'
# Compile into one table
dfQty = pd.concat([dfDprqty, dfLoa, dfSurvQty, dfScopeSurvQty, dfPQty,
dfQtyBal, dfExePc, dfNotCompleted, dfQtyComBal, dfConsolidatedScope], sort=False)
dfQty.sort_values(by=['district', 'section'], inplace=True)
dfQty.set_index([dfQty.index, dfQty['section']], inplace=True)
del dfQty['section']
display_fields = ['ht_conductor', *DPR_INFRA_FIELDS, 'pole_8m', 'pole_9m']
dfQty = dfQty[display_fields]
dfQty.loc[('TOTAL', '1. DPR'),
display_fields] = dfDprqty[display_fields].sum()
dfQty.loc[('TOTAL', '2. LOA'),
display_fields] = dfLoa[display_fields].sum()
dfQty.loc[('TOTAL', '3. Scope'),
display_fields] = dfScopeSurvQty[display_fields].sum()
dfQty.loc[('TOTAL', '3. Scope Consolidated'),
display_fields] = dfConsolidatedScope[display_fields].sum()
dfQty.loc[('TOTAL', '4. Approved'),
display_fields] = dfSurvQty[display_fields].sum()
dfQty.loc[('TOTAL', '5. Approval Balance'),
display_fields] = dfQtyBal[display_fields].sum()
dfQty.loc[('TOTAL', '6. Executed'),
display_fields] = dfPQty[display_fields].sum()
dfQty.loc[('TOTAL', '7. Completed %'), display_fields] = dfQty.loc[(
'TOTAL', '6. Executed'), display_fields]/dfQty.loc[('TOTAL', '2. LOA'), display_fields]*100
dfQty.loc[('TOTAL', '8. LOA - Executed'),
display_fields] = dfQtyComBal[display_fields].sum()
dfQty.loc[('TOTAL', '9. To Execute'),
display_fields] = dfNotCompleted[display_fields].sum()
dfQty = np.around(dfQty, 1)
intFields = [f for f in display_fields if ('pole' in f or 'dtr' in f)]
dfQty.fillna(0, inplace=True)
dfQty[intFields] = dfQty[intFields].astype(int)
dfQty.to_excel('outputs/balance_progress.xlsx')
# additional sites are those not included in DPR
dfNotDPR = pd.DataFrame(SurveyQty.objects.exclude(site__in=DprQty.objects.values('site')).values(
district=F('site__district')).annotate(additional=Count('site')))
dfNotDPR.set_index('district', inplace=True)
df_district['Non DPR'] = dfNotDPR
# if(not dfNotDPR.empty):
# df_district = df_district.add(dfNotDPR, fill_value=0)
#: non approved
# nonapproved = ProgressQty.objects.exclude(site__in=sqty.values('site')).values(
# district=F('site__district')).annotate(non_approved=Count('site'))
# dfNonapproved = pd.DataFrame(nonapproved)
# dfNonapproved.set_index('district', inplace=True)
# nonapprovednosite = ProgressQtyExtra.objects.exclude(site__site__in=SurveyQty.objects.all(
# ).values('site')).values(district=F('site__district')).annotate(non_approved=Count('site'))
# dfNonapprovedNonSite = pd.DataFrame(nonapprovednosite)
# if(not dfNonapprovedNonSite.empty):
# dfNonapprovedNonSite.set_index('district', inplace=True)
# if(not dfNonapprovedNonSite.empty):
# dfNonapproved.add(dfNonapprovedNonSite, fill_value=0)
df_district['Non approved'] = df_district['scope'] - df_district['approved']
df_district.fillna(0, inplace=True)
df_district.loc['∑'] = df_district.sum(numeric_only=True)
int_fields = ['completed', 'cert', 'approved','scope','Scope cat I','Scope cat II','Scope cat III', 'DPRHabs', 'Non DPR', 'Non approved', *[f+'__sum' for f in ['pole_ht_8m',
'pole_lt_8m', 'dtr_100', 'dtr_63', 'dtr_25']], 'DTR#']
df_district[int_fields] = df_district[int_fields].astype(int)
df_district.columns = [c.replace('__sum', '') for c in df_district.columns]
# materials shifted qtys
num_fields = ['acsr', 'cable_3p', 'cable_1p',
'pole_8m', 'pole_9m', 'dtr_100', 'dtr_63', 'dtr_25']
df_shifted = pd.DataFrame(ShiftedQty.objects.values(district=F('site__district')).annotate(*[Sum(f) for f in
num_fields
]))
df_shifted.set_index('district', inplace=True)
df_shiftedExtra = pd.DataFrame(ShiftedQtyExtra.objects.values(district=F('site__district')).annotate(*[Sum(f) for f in
num_fields
]))
if(not df_shiftedExtra.empty):
df_shiftedExtra.set_index('district', inplace=True)
df_shifted = df_shifted.add(df_shiftedExtra, fill_value=0)
df_shifted.loc['∑'] = df_shifted.sum()
int_fields = [f+'__sum' for f in ['pole_8m',
'pole_9m', 'dtr_100', 'dtr_63', 'dtr_25']]
df_shifted[int_fields] = df_shifted[int_fields].astype(int)
df_shifted.columns = [c.replace('__sum', '') for c in df_shifted.columns]
df_summary = df_shifted.join(df_district, lsuffix="(shifted)")
df_summary.columns = [c.replace('__sum', '') for c in df_summary.columns]
df_summary.to_excel('outputs/progess_summary.xlsx')
# completed sites
dfCatsAll = pd.DataFrame(DprQty.objects.filter(has_infra=True).values(district=F('site__district')).annotate(
dprhab_II=Count('category', filter=Q(category="II")), dprhab_III=Count('category', filter=Q(category="III"))))
dfCatsAll.set_index('district', inplace=True)
dfCatsAll['DPR total'] = dfDPR
completedSites = ProgressQty.objects.filter(
status='completed').values('site')
dfCats = pd.DataFrame(DprQty.objects.filter(site__in=completedSites).values(district=F('site__district')).annotate(
completed_II=Count('category', filter=Q(category="II")), completed_III=Count('category', filter=Q(category="III"))))
dfCats.set_index('district', inplace=True)
dfCats['completed_unassigned'] = (df_district['completed'] - dfCats['completed_II'] -
dfCats['completed_III']).fillna(0).astype(int)
dfCats['completed_total'] = df_district['completed']
approvedSites = SurveyQty.objects.all().values('site')
dfCatsSurv = pd.DataFrame(DprQty.objects.filter(site__in=completedSites).values(district=F('site__district')).annotate(
approved_II=Count('category', filter=Q(category="II")), approved_III=Count('category', filter=Q(category="III"))))
dfCatsSurv.set_index('district', inplace=True)
dfCatsSurv['approved_unassigned'] = (df_district['approved'] - dfCatsSurv['approved_II'] -
dfCatsSurv['approved_III']).fillna(0).astype(int)
dfCatsSurv['approved_total'] = df_district['approved']
dfCats = pd.concat([dfCatsSurv, dfCatsAll, dfCats], axis=1, sort=True)
dfCats.loc['∑'] = dfCats.sum()
fs = ['approved_II', 'approved_III', 'approved_unassigned', 'approved_total', 'dprhab_II',
'dprhab_III', 'DPR total', 'completed_II', 'completed_III', 'completed_unassigned', 'completed_total']
dfCats[fs] = dfCats[fs].fillna(0).astype(int)
# remove ht_conductor for progress display
del df_district['ht_conductor']
result1 = df_district.to_html(
na_rep="", justify="center", classes=['datatable'])
result2 = df_shifted.to_html(
na_rep="", justify="center", classes=['datatable'])
result3 = dfCats.to_html(
na_rep="", justify="center", classes=['datatable'])
result4 = dfQty.to_html(
na_rep="", justify="center", classes=['datatable'])
result = result1 + '<br>Shifted Qty<br>' + result2 + \
'<br>Categorywise' + result3 + '<BR>Balance' + result4
result = result.replace("_", " ").title()
return result
def getLog():
return Log.objects.values()[::-1][:20]
def createVariation(site, **kwargs):
xsite = SiteExtra()
xsite.village = kwargs['village']
xsite.census = kwargs['census']
xsite.habitation = kwargs['habitation']
xsite.district = kwargs['district']
xsite.division = kwargs['division']
xsite.category = kwargs['category']
xsite.block = kwargs['block']
xsite.site = site
xsite.save()
def switchSite(from_site_id, to_site_id):
try:
fromSite = Site.objects.get(id=from_site_id)
toSite = Site.objects.get(id=to_site_id)
for model in ['surveyqty', 'progressqty', 'shiftedqty', 'dprqty']:
obj = getattr(fromSite, model, None)
if(obj):
if(not getattr(toSite,model,None)):
obj.site = toSite
obj.save()
createVariation(toSite, **vars(fromSite))
# fromSite.delete()
return [{'class':'success', 'text':'site switched successfully'}]
except Exception as ex:
return [{'class':'bad', 'text':ex.__str__()}]
def consolidatedScope(svqty):
qty = svqty
try:
if(svqty.site.progressqty.review == 'ok'):
qty = svqty.site.progressqty
except:
pass
res = {x:getattr(qty,x,0) for x in SURVEY_QFIELDS}
res['district'] = svqty.site.district
return res
|
from itertools import groupby
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 15 12:04:27 2019
@author: CNEA
"""
class MATERIAL(object):
_KEY__ = ''
_NUM__ = None
_TYPE__ = 'MATERIAL'
def __init__(self, *args, **kwargs):
self._NUM__ = args
self._KEY__ = kwargs['KEY']
def string(self, *args, **kwargs):
return '^^ ' + self._TYPE__ + ' = ' + \
'(' * (len(self._NUM__) != 1) + \
' '.join(map(str, self._NUM__)) + \
')' * (len(self._NUM__) != 1) + \
(' KEY= ' + self._KEY__) * (
1 if not hasattr(self, '_ax') else 0) # TODO esta linea está recontra improlija
class FUEL_ELEMENT(MATERIAL):
_AX_ELEM = 1
def __init__(self, *args, **kwargs):
super(FUEL_ELEMENT, self).__init__(*args, **kwargs)
try:
Ax_elements = iter(kwargs['AXIAL'])
self._AX_ELEM = sum(map(lambda ax_slice: ax_slice[1].stop - ax_slice[1].start, Ax_elements))
self._ax = kwargs['AXIAL']
except TypeError as te:
self._AX_ELEM = kwargs['AXIAL']
self._ax = [self._KEY__ for _ in range(self._AX_ELEM)]
self._TYPE__ = 'FUEL ELEMENT'
@property
def ax_elem(self):
return self._AX_ELEM
def __setitem__(self, ax_slice, key):
assert isinstance(key, str), 'Tipo de variable ' + str(type(key))+' cuando se esperaba str'
if hasattr(ax_slice, 'indices'):
for k in range(*ax_slice.indices(self._AX_ELEM)):
self._ax[k] = key
else:
self._ax[ax_slice] = key
def string(self, *args, **kwargs):
_ax = []
for group, items in groupby(enumerate(self._ax), lambda a: a[1]):
_group = list(items)
start = _group[0][0]
stop = _group[-1][0]
_ax.append((group, slice(start, stop)))
return super(FUEL_ELEMENT, self).string(*args, **kwargs) + (
' *\n' + ' ' * len(super(FUEL_ELEMENT, self).string(*args, **kwargs))).join(
map(lambda ax_slice:
' KEY= ' + ax_slice[0] + ' AXIAL ELEMENT={0:2d} TO {1:2d}'.format(
ax_slice[1].start + 1, ax_slice[1].stop + 1),
_ax))
pass # FUEL_ELEMENT
class CONTROL_ROD(FUEL_ELEMENT):
def __init__(self, *args, **kwargs):
assert ('IN' in kwargs)
Kwargs = kwargs.copy()
Kwargs.update({'KEY': kwargs['KEY'] + '/IN=' + kwargs['IN']})
super(CONTROL_ROD, self).__init__(*args, **Kwargs)
self._TYPE__ = 'CONTROL ROD'
pass # CONTROL_ROD
class SECCION8(object):
def __init__(self, library):
self.__library = library
self.__materials = []
def AddMaterial(self, material):
assert isinstance(material, MATERIAL)
self.__materials.append(material)
def to_file(self, file):
with open(file, 'w') as fod:
fod.write('^^ LIBRARY = ' + self.__library + '\n')
fod.write('^^ SEC 8 GROUP from library\n')
AX_ELEM = 0
for material in self.__materials:
if hasattr(material, 'ax_elem') and material.ax_elem > AX_ELEM:
AX_ELEM = material.ax_elem
fod.write(material.string() + '\n')
# print(AX_ELEM)
# print(material._get_ax_elem())
fod.write('^^ INSERTION MAPPING 0 {}\n'.format(AX_ELEM))
fod.write('^^ FISSION SPEC FROM LIBRARY\n')
return
def ChangeMaterial(self, NewKeyword, Start, Stop, *MatIndex):
Start = int(Start)
Stop = int(Stop)
if Start != Stop:
for Indx in MatIndex:
self.__materials[Indx][Start:Stop] = NewKeyword
else:
for Indx in MatIndex:
self.__materials[Indx][Start] = NewKeyword
return
pass # SECCION8
def main_test():
print(MATERIAL(1, KEY='AGUA').string())
print(FUEL_ELEMENT(2, KEY='TIPO5', AXIAL=10).string())
print(CONTROL_ROD(4, 3, 4, 5, 6, KEY='TIPO9', AXIAL=9, IN='TIPO3').string())
return
def main():
aer_sc8 = SECCION8('aer')
aer_sc8.AddMaterial(MATERIAL(1, KEY='AGUA'))
alf = FUEL_ELEMENT(2, KEY='TIPO5', AXIAL=10)
alf[1:5] = 'TIPO6'
aer_sc8.AddMaterial(alf)
aer_sc8.AddMaterial(CONTROL_ROD(4, 3, 4, 5, 6, KEY='TIPO9', AXIAL=9, IN='TIPO3'))
aer_sc8.to_file('aer_output_python.mat')
return
class C:
def __init__(self, **kwargs):
self._val = 1
self.__b = {0: 1}
# self.axial = kwargs['AXIAL']
@property
def val(self):
return self._val
@property
def b(self):
return self.__b
def __getitem__(self, items):
print(items)
return items
def __setitem__(self, key, value):
self[key] = value
def show(self, *args):
print(args)
return
if __name__ == '__main__':
# aer_sc8 = SECCION8('aer')
# aer_sc8.AddMaterial(MATERIAL(1, KEY='AGUA'))
alf = FUEL_ELEMENT(2, KEY='TIPO5', AXIAL=10)
# alf[1:5] = 'TIPO6'
# aer_sc8.AddMaterial(alf)
# aer_sc8.AddMaterial(CONTROL_ROD(4, 3, 4, 5, 6, KEY='TIPO9', AXIAL=9, IN='TIPO3'))
# aer_sc8.to_file('aer_output_python.mat')
# c = C()
# c.show(*map(lambda CR: '{}b'.format(CR), [21, 23]))
|
#### Class 03
#### Reading and writing files
## Reading text files ------------------------------------------------
import sys
import os
os.chdir("C:/Users/wooki/Documents/GitHub/pythoncourse2018/day04")
## Read all lines as one string
with open('test_readfile.txt') as f:
the_whole_thing = f.read()
print the_whole_thing
## Read line by line
with open('test_readfile.txt') as f:
lines_list = f.readlines()
for l in lines_list:
print l
## More efficiently we can loop over the file object
## (i.e. we don't need the variable lines)
with open('test_readfile.txt') as f:
for l in f:
print l
## We can also manually open and close files,
## now we need to handle exceptions and close
## I never do this
f = open('test_readfile.txt')
print f.read()
f.close()
## Writing text files ------------------------------------------------
## Writing files is easy,
## open command takes r, w, a, plus some others
with open('test_writefile.txt', 'w') as f:
## wipes the file clean and opens it
f.write("Hi guys.\n")
f.write("Does this go on the second line?\n")
f.writelines(['a\n', 'b\n', 'c\n'])
with open('test_writefile.txt', 'a') as f:
## appends
f.write("I got appended!")
## Writing csv files ------------------------------------------------
import csv
## Open a file stream and create a CSV writer object
with open('test_writecsv.txt', 'wb') as f:
my_writer = csv.writer(f)
for i in range(1, 100):
my_writer.writerow([i, i-1])
## Now read in the csv
with open('test_writecsv.txt', 'rb') as f:
my_reader = csv.reader(f)
mydat = []
for row in my_reader:
mydat.append(row)
print mydat
## Adding column names
with open('test_csvfields.txt', 'wb') as f:
my_writer = csv.DictWriter(f, fieldnames = ("A", "B"))
my_writer.writeheader()
for i in range(1, 100):
my_writer.writerow({"B":i, "A":i-1})
with open('test_csvfields.csv', 'rb') as f:
my_reader = csv.DictReader(f)
for row in my_reader:
print row
# Copyright (c) 2014 Matt Dickenson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
|
import numpy as np
import math
class Pegasos(object):
"""docstring for Pegasos"""
def __init__(self, reg, k, maxiter = 1000000, X=None, Y=None, check = False ):
super(Pegasos, self).__init__()
self.reg = reg
self.k = k
self.W = None
self.iteration = 2
self.maxiter = maxiter
self.X = X
self.Y = Y
self.check = check
def update(self, X, Y):
if self.iteration > self.maxiter:
return
x_shape = X.shape
y_shape = Y.shape
if self.W is None:
self.W = 1/math.sqrt(self.reg) * np.random.randn(x_shape[1] , y_shape[1])
#Learning Rate
eta = 1.0 / (self.reg * self.iteration)
#Evaluation
z = np.multiply(np.dot(X, self.W), Y)
#Stochastic Gradient Descent Step
self.W = (1.0 - eta * self.reg) * self.W - eta / self.k * self.vectorize_dloss(z) * np.multiply(X, Y.T).T
#Projection Step
projection = 1.0 / math.sqrt(self.reg) / (np.linalg.norm(self.W))
if projection < 1.0:
self.W = projection * self.W
if self.check:
#Sanity Check on Loss Function
if self.iteration % 1000 == 0:
print "loss", self.loss(self.X, self.Y)
self.iteration = self.iteration + 1
def solve(self):
pass
def vectorize_dloss(self, z):
#L1-loss vectorize
dloss = np.zeros(z.shape)
dloss[z<1] = -1
return dloss
def score(self, X, Y):
#binary svm
if Y.shape[1] == 1:
prediction = np.multiply(np.dot(X, self.W), Y)
return len(prediction[prediction > 0]) / float(len(prediction))
#ova svm
else:
#prediction
prediction = np.argmax(np.dot(X, self.W), axis = 1) - np.argmax(Y, axis = 1)
return len(prediction[prediction == 0]) / float(len(prediction))
def loss(self, X, Y):
loss = 1 - np.multiply(np.dot(X, self.W), Y)
return np.sum(loss[loss > 0])
def infer(self, X):
return np.dot(X, self.W)
|
import time
x=int(input("Please enter the first number :"))
y=int(input("Please enter the second number :"))
i=1
factorsof_x=set()
factorsof_y=set()
for i in range(1,x+1):
if x%i==0:
factorsof_x.add(i)
print("Calculating factors of",x,"...")
time.sleep(1)
print(factorsof_x)
for i in range(1,y+1):
if y%i==0:
factorsof_y.add(i)
print("Calculating factors of",y,"...")
time.sleep(1)
print(factorsof_y)
time.sleep(1)
common_fact=factorsof_x.intersection(factorsof_y)
print("Calculating common factors ... ")
time.sleep(1)
print(common_fact)
time.sleep(1)
common_list=list(common_fact)
print("Calculating GCD of",x,"and",y,"...")
time.sleep(1)
print(max(common_list))
|
import numpy as np
import logging
from omegaconf import DictConfig
import torch
import torch.nn.functional as F
from torch.utils.data import IterableDataset
from torch.nn.utils.rnn import pad_sequence
from transformers import AutoTokenizer
from typing import Dict, Tuple
logger = logging.getLogger(__name__)
# pylint:disable=no-member
class FullDocDataset(IterableDataset):
def __init__(self, data, max_seq_len: int = 128, sep_token_id: int = -1, allow_cross_doc=True, cycle=False):
self.data = data
self.max_seq_len = max_seq_len
self.sep_token_id = sep_token_id
self.allow_cross_doc = allow_cross_doc
self.cycle = cycle
if isinstance(self.data[0][0], torch.Tensor):
self.sep_token_id = torch.tensor([self.sep_token_id], dtype=self.data[0][0].dtype)[0]
def __iter__(self):
"""
Returns:
sequence: torch.LongTensor
document_end: bool whether it is the end of the document
"""
indices = np.arange(len(self.data))
np.random.shuffle(indices)
seq_buffer = []
document_end = False
past_pointer = 0
# then we sample a sequence with the desired sequence length
for index in indices:
document = self.data[index]
if not self.allow_cross_doc:
for i in range(0, len(document), self.max_seq_len):
document_end = i + self.max_seq_len > len(document)
yield self.wrap_output(document[i:i + self.max_seq_len]), document_end
else:
while past_pointer < len(document):
# history pointer for the current document
next_pointer = past_pointer + self.max_seq_len - len(seq_buffer)
segment = document[past_pointer:next_pointer]
seq_buffer.extend(segment)
if len(seq_buffer) == self.max_seq_len:
document_end = (past_pointer + self.max_seq_len >= len(document)) | document_end
yield self.wrap_output(seq_buffer), document_end
seq_buffer = []
document_end = False
past_pointer += len(segment)
# if the document is over
past_pointer = 0
if len(seq_buffer) > 0:
seq_buffer.append(self.sep_token_id)
document_end = True
def wrap_output(self, x):
x = torch.stack(x, dim=0)
return x
def __len__(self):
"Estimated value. Only use for debug!"
return sum([len(item) for item in self.data]) // self.max_seq_len
class FullDocCollate:
def __init__(self, config):
self.config = config
self.tokenizer = AutoTokenizer.from_pretrained("roberta-base")
self.pad_token_id = self.tokenizer.pad_token_id
self.mask_token_id = self.tokenizer.mask_token_id
def __call__(self, batch):
batch_input_ids, _ = zip(*batch)
batch_input_ids = pad_sequence(batch_input_ids, batch_first=True, padding_value=self.pad_token_id)
batch_input_ids = F.pad(batch_input_ids, (1, 0), value=self.tokenizer.cls_token_id)
batch_input_ids = F.pad(batch_input_ids, (0, 1), value=self.tokenizer.sep_token_id)
batch_input_ids = batch_input_ids.long()
batch_input_ids, labels = self.mask_tokens(batch_input_ids)
batch = {"input_ids": batch_input_ids, "labels": labels}
return batch
def mask_tokens(self, inputs: torch.Tensor, mlm_probability: float = 0.15) -> Tuple[torch.Tensor, torch.Tensor]:
""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, mlm_probability)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.pad_token_id is not None:
padding_mask = labels.eq(self.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.mask_token_id
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(self.config.model.vocab_size, labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
|
import pandas as pd
import numpy as np
df = pd.read_csv('../dataset/googleplaystore.csv')
df['new'] = pd.to_datetime(df['Last Updated'])
df['lastupdate'] = (df['new'] - df['new'].max()).dt.days
df.to_csv('../dataset/pre-processed/lastUpdated.csv')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__mtime__ = '2019/5/5'
from selenium import webdriver
from time import sleep
import unittest
class Login(unittest.TestCase):
'''
禅道登录
'''
@classmethod
def setUpClass(cls):
cls.driver = webdriver.Firefox()
def setUp(self):
self.driver.get("http://127.0.0.1:82/zentao/user-login-L3plbnRhby8=.html")
def is_login_success(self):
try:
text = self.driver.find_element_by_css_selector(".user-name").text
return text
except:
return ""
def is_alert_exist(self):
try:
sleep(3)
alert = self.driver.switch_to_alert()
text = alert.text
sleep(2)
alert.accept()
return text
except Exception as info:
print(info)
return ""
def test_01_(self):
'''
用例1 登录成功
:return:
'''
sleep(2)
self.driver.find_element_by_css_selector("#account").send_keys("admin")
self.driver.find_element_by_css_selector("[name='password']").send_keys("Yanfengmusic521")
self.driver.find_element_by_css_selector("#submit").click()
sleep(3)
text = self.is_login_success()
self.assertTrue(text=='admin')
def test_02(self):
'''
用例2 登录失败
'''
sleep(2)
self.driver.find_element_by_css_selector("#account").send_keys("admin")
self.driver.find_element_by_css_selector("[name='password']").send_keys("Yanfengmusic52")
self.driver.find_element_by_css_selector("#submit").click()
sleep(3)
text = self.is_login_success()
print("登录失败,获取结果为%s" % text)
# self.assertTrue(text=="")
self.assertTrue(1 == 2)
def tearDown(self):
self.is_alert_exist()
self.driver.delete_all_cookies()
self.driver.refresh()
@classmethod
def tearDownClass(cls):
cls.driver.quit()
# if __name__ == '__main__':
# unittest.main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from models import *
# Register your models here.
admin.site.register(Movie)
admin.site.register(Keyword)
admin.site.register(Actor)
admin.site.register(UserMovie)
|
from Pyskell.Language.PyskellTypeSystem.TypeSignature import *
from Pyskell.Language.PyskellTypeSystem.TypedFunction import *
from Pyskell.Language.PyskellTypeSystem.AlgebraicDataType import *
from Pyskell.Language.PyskellTypeSystem.TypeClass import *
from Pyskell.Language.PyskellTypeSystem.PatternMatching import *
|
from app.models.article import Tags
async def create_tag(name: str):
return await Tags.objects.create(name=name)
async def get_tags():
return await Tags.objects.all()
|
#!/usr/bin/env python
"""
This uses the spotify web API to get analysis data for the track.
The analysis is used to create a "smart fast forward" that advances
to sections of the song (n-key).
It also uses the analysis data to send note data over OSC to a synthesizer
creating a very strange sort of automatic accompianment.
Need to call by settings credentials in environment variables.
Here's a shell script that can do that:
----------------
#!/bin/bash
#call this pseudoscript like:
# $ . spotcreds.bash
# or
# $ source spotcreds.bash
export SPOTIPY_CLIENT_ID='f6b5e0293b1446fbbd9402c1f365085e'
export SPOTIPY_CLIENT_SECRET='ec56c12525ce49fbb19f442e0916a52b'
export SPOTIPY_REDIRECT_URI='http://localhost:8888/callback'
export LD_LIBRARY_PATH=/usr/local/lib
python play_analysis.py
----------------
"""
from __future__ import unicode_literals
import sys
from keys import KBHit
import threading
import time
import spotify
from track_features import get_analysis
from osc_tx import oscsend
from creds import spot_username, spot_password
if sys.argv[1:]:
track_uri = sys.argv[1]
else:
track_uri = 'spotify:track:1ZPlNanZsJSPK5h9YZZFbZ'
#some other tracks: 7Ke18a4dLDyjdBRNd5iLLM 5uNlgK7FEg6r9BGy12P9Sx 5GgUWb9o5ga3F7o6MYyDHO 1VsNbze4CN1b1QgVdWlc3K 11hqMWwX7sF3sOGdtijofF
#track Keyboard presses
kb = KBHit()
# Assuming a spotify_appkey.key in the current dir
session = spotify.Session()
# Process events in the background
loop = spotify.EventLoop(session)
loop.start()
# Connect an audio sink
audio = spotify.AlsaSink(session)
# Events for coordination
logged_in = threading.Event()
end_of_track = threading.Event()
section_times = list()
timecount = 0
def clamp(n, smallest, largest): return max(smallest, min(n, largest))
def on_connection_state_updated(session):
if session.connection.state is spotify.ConnectionState.LOGGED_IN:
logged_in.set()
def on_end_of_track(self):
end_of_track.set()
def to_next(t):
global starttime
ms = (1000 * section_times[t])-500
totime = clamp (int( ms ), 0, 3600000) #keep it between 0ms and an hour
starttime = time.time() - (totime * 0.001) #reset
print ('next section at time: '+str(totime))
session.player.seek(totime)
oscsend('/key',track_data.key)
oscsend('/majmin',track_data.maj_or_min)
oscsend('/tempo',track_data.bpm)
# Register event listeners
session.on(
spotify.SessionEvent.CONNECTION_STATE_UPDATED, on_connection_state_updated)
session.on(spotify.SessionEvent.END_OF_TRACK, on_end_of_track)
# Block until Login is complete
print('Waiting for Login to Complete...')
session.login(spot_username, spot_password, remember_me=True)
logged_in.wait()
print('logged in!')
# Assuming a previous login with remember_me=True and a proper logout
#session.relogin()
#logged_in.wait()
# Play a track
tid = [ track_uri.split(':')[2] ]
print(tid)
track_data = get_analysis(tid)
section_times = track_data.section_times
print(section_times)
track = session.get_track(track_uri).load()
session.player.load(track)
session.player.play()
starttime = time.time()
current_segment = 0
segs = track_data.segments[0]
et = 0
pitchnames = ['c','c#','d','d#','e','f','f#','g','g#','a','a#','b']
# Wait for playback to complete or Ctrl+C or, better, Ctrl+Shift+\. There is probably a better way to do this.
try:
while not end_of_track.wait(0.1):
et = (time.time() - starttime) #elapsed time
#print('tick %d',et)
if segs[current_segment]['start'] < et:
#get the segment pitches and filter out items with low presence (<0.7). Send list of 1/0 to pd
pitchlist = segs[current_segment]['pitches']
pitchbools = [int(x>0.8) for x in pitchlist]
#print('pitchlist ' + str(segs[current_segment]['start'])+' - '+str(pitchlist) )
pitchchar = list()
presentpitches = list()
#massage the pitch array into something we can use in pure data. Instead of bools at note position, create a list of note numbers
for p in range ( 0, len(pitchlist) ):
if pitchbools[p] == 1:
presentpitches.append(p)
#pitchchar.append(pitchnames[p])
oscsend('/pitches',presentpitches)
#print('pitchchar ' + str(pitchchar) )
current_segment = current_segment + 1
if kb.kbhit():
c = kb.getch()
if ord(c) == 27: # ESC
break
if ord(c) == 110: # n
to_next( timecount % len(section_times) )
timecount = timecount+1
if ord(c) == 115: # s
print('s key')
pass
except KeyboardInterrupt:
pass
|
# -*- coding: utf-8 -*-
class Solution:
def wordBreak(self, s, wordDict):
if not s:
return True
wordSet = set(wordDict)
result = [False] * (len(s) + 1)
for i in range(len(s) + 1):
if not result[i] and s[: i + 1] in wordSet:
result[i] = True
if result[i]:
for j in range(i + 1, len(s) + 1):
if not result[j] and s[i + 1 : j + 1] in wordSet:
result[j] = True
return result[-1]
if __name__ == "__main__":
solution = Solution()
assert solution.wordBreak("leetcode", ["leet", "code"])
assert solution.wordBreak("applepenapple", ["apple", "pen"])
assert not solution.wordBreak("catsandog", ["cats", "dog", "sand", "and", "cat"])
assert not solution.wordBreak(150 * "a" + "b", ["a", "aa"])
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.python.target_types import (
PythonRequirementFindLinksField,
PythonRequirementModulesField,
PythonRequirementResolveField,
PythonRequirementsField,
PythonRequirementTarget,
)
from pants.engine.rules import collect_rules, rule
from pants.engine.target import (
COMMON_TARGET_FIELDS,
BoolField,
GeneratedTargets,
GenerateTargetsRequest,
StringField,
TargetGenerator,
)
from pants.engine.unions import UnionMembership, UnionRule
from pants.util.strutil import help_text
from pants.version import PANTS_SEMVER
class PantsRequirementsTestutilField(BoolField):
alias = "testutil"
default = True
help = "If true, include `pantsbuild.pants.testutil` to write tests for your plugin."
class PantsRequirementsVersionSpecField(StringField):
alias = "version_spec"
default = f"== {PANTS_SEMVER.public}"
help = help_text(
"""
The PEP 440 version specifier version of Pants to target.
E.g. `== 2.15.*`, or `>= 2.16.0, < 2.17.0`
"""
)
class PantsRequirementsTargetGenerator(TargetGenerator):
alias = "pants_requirements"
help = help_text(
"""
Generate `python_requirement` targets for Pants itself to use with Pants plugins.
This is useful when writing plugins so that you can build and test your
plugin using Pants.
The generated targets will have the correct version based on the exact `version` in your
`pants.toml`, and they will work with dependency inference. They're pulled directly from
our GitHub releases, using the relevant platform markers.
(If this versioning scheme does not work for you, you can directly create
`python_requirement` targets for `pantsbuild.pants` and `pantsbuild.pants.testutil`. We
also invite you to share your ideas at
https://github.com/pantsbuild/pants/issues/new/choose)
"""
)
generated_target_cls = PythonRequirementTarget
core_fields = (
*COMMON_TARGET_FIELDS,
PantsRequirementsVersionSpecField,
PantsRequirementsTestutilField,
PythonRequirementFindLinksField,
)
copied_fields = COMMON_TARGET_FIELDS
moved_fields = (PythonRequirementResolveField,)
class GenerateFromPantsRequirementsRequest(GenerateTargetsRequest):
generate_from = PantsRequirementsTargetGenerator
@rule
def generate_from_pants_requirements(
request: GenerateFromPantsRequirementsRequest, union_membership: UnionMembership
) -> GeneratedTargets:
generator = request.generator
version_spec = generator[PantsRequirementsVersionSpecField].value
def create_tgt(dist: str, module: str) -> PythonRequirementTarget:
return PythonRequirementTarget(
{
PythonRequirementsField.alias: (f"{dist} {version_spec}",),
PythonRequirementFindLinksField.alias: ("https://wheels.pantsbuild.org/simple",),
PythonRequirementModulesField.alias: (module,),
**request.template,
},
request.template_address.create_generated(dist),
union_membership,
)
result = [create_tgt("pantsbuild.pants", "pants")]
if generator[PantsRequirementsTestutilField].value:
result.append(create_tgt("pantsbuild.pants.testutil", "pants.testutil"))
return GeneratedTargets(generator, result)
def rules():
return (
*collect_rules(),
UnionRule(GenerateTargetsRequest, GenerateFromPantsRequirementsRequest),
)
|
import gpt_2_simple as gpt2
import pandas as pd
from tqdm import tqdm
import os
input_data_file = './data/2-quotes_filtered.csv'
output_folder = "output/"
split_quotes = pd.read_csv(input_data_file)
print(split_quotes.head())
## These were the top 20 most common topics in the dataset
topics_to_keep = ['life', 'love','inspirational', 'humor',
'death', 'art', 'education', 'books', 'change', 'time',
'beauty', 'god', 'happiness', 'children', 'work', 'faith',
'funny', 'good', 'family', 'friendship']
quotes_to_keep = split_quotes[split_quotes['topic'].isin(topics_to_keep)]
## Create the GPT-ready dataset
file_name = os.path.join(output_folder, "processed_quotes.txt")
with open(file_name, mode='w') as open_file:
for index, row in split_quotes.iterrows():
open_file.write("_TOPIC_ {} _QUOTE_ {} _AUTHOR_ {} _END_\n".format(row['topic'], row['quote'], row['author']))
## Encode it to make loading faster during training
gpt2.encode_dataset(file_name, out_path=os.path.join(output_folder,'text_encoded.npz'))
|
# -*-coding: utf-8-*-
import numpy as np
'''将文件中的数据读取至矩阵'''
def openFile(filename):
file_object = open(filename)
fileContext = file_object.readlines()
tempData = np.array(fileContext)
# print tempData
size = tempData.size
# print size
data = np.zeros([size, 5], dtype=basestring)
for i in range(0, size):
for j in range(0, 5):
data[i][j] = tempData[i].split('\t')[j]
if j == 4:
data[i][j] = data[i][j].strip('\n')
# print data
return data, size
'''将矩阵的文本属性数据量化,并归一化'''
def turnTheMatrix(data, size):
myData = np.zeros([size, 5])
operator = [{'young':0, 'pre':0.5, 'presbyopic': 1}, {'myope': 0, 'hyper': 1}, {'yes': 0, 'no': 1},
{'reduced': 0, 'normal': 1}, {'no lenses':0, 'soft':0.5, 'hard':1}]
for j in range(0, 5):
for i in range(0, size):
myData[i][j] = operator[j].get(data[i][j])
return myData
def counter(lineNum, aimer_1, data, size, aimer_2 = None):
if aimer_2 == None:
temp = 0
for i in range(0, size):
if data[i][lineNum] == aimer_1:
temp = temp + 1
else:
temp = 0
for i in range(0, size):
if data[i][lineNum] == aimer_1 and data[i][4] == aimer_2:
temp = temp + 1
return temp
def pOfClass(data, size):
c = np.zeros(3, dtype=float)
p = np.zeros(3, dtype=float)
for i in range(size):
if data[i][4] == 0:
c[0] = c[0] + 1
elif data[i][4] == 0.5:
c[1] = c[1] + 1
elif data[i][4] == 1:
c[2] = c[2] + 1
for i in range(3):
p[i] = c[i] / size
return c, p
def classifier(data, testData, size):
# print '\n' + str(testData)
probability = np.ones(3, dtype=float) # 预测概率数组
condition = testData # 目标条件数组
# 综合条件的概率
temp = 0
for i in range(size):
flag = True
for j in range(0, 4):
if data[i][j] != condition[j]:
flag = False
break
if flag == True:
temp = temp + 1
# print 'temp = ' + str(temp)
# 分类目标的概率
num_result, pResult = pOfClass(data, size)
# P(条件 | 分类目标类型)
p_condition = np.zeros([3, 4], dtype=float)
feature = 0.0
for j in range(3):
for i in range(0, 4):
count = counter(i, condition[i], data, size, feature)
p_condition[j][i] = count / num_result[j] * 1.0
# print '-----' + str(count) + '--' + str(num_result[j])
feature = feature + 0.5
# print '=====' + str(p_condition)
#各种情况的概率
for q in range(3):
for j in range(0, 4):
probability[q] = probability[q] * p_condition[q][j]
probability[q] = probability[q] * pResult[q] / (temp / size + 0.0000001) # 分母加0.0000001来避免分母为零
pred = 0
if probability[0] >= probability[1] and probability[0] >= probability[2]:
pred = 0
elif probability[1] >= probability[0] and probability[1] >= probability[2]:
pred = 0.5
elif probability[2] >= probability[0] and probability[2] >= probability[1]:
pred = 1
return pred
if __name__ == '__main__':
sourceData, size = openFile('lenses.txt')
data = turnTheMatrix(sourceData, size)
#print data
prediction = np.zeros(size, dtype=float)
temp = 0.0
for i in range(size):
prediction[i] = classifier(data, data[i], size)
if prediction[i] == data[i][4]:
temp = temp + 1
print 'prediction:\n' + str(prediction)
print 'class:\n' + str(data[:, 4])
print 'the precision: ' + str(round(temp / size * 100, 2)) + '%'
|
import pyrealsense2 as rs
import numpy as np
import cv2
import open3d as o3d
from floodfill import Grid
import scipy
import matplotlib.pyplot as plt
import time
from tracking import processObjects
NUMBER_OF_OBJECTS = 100
MAX_DIST = 0.5
config_perception = {
"IMAGE_WIDTH":640,
"IMAGE_HEIGHT":480,
"samplesX":32,
"samplesY":32,
"pruneThreshold":0.8,
"greedThreshold":0.01,
"mergeThreshold":0.1}
def main():
# objects = np.empty(NUMBER_OF_OBJECTS, dtype=object)
objects = []
old_time = time.time()
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('./data/output.avi', fourcc, 30.0, (1280, 720))
pipeline = rs.pipeline()
queue = rs.frame_queue(5000, keep_frames=True)
config = rs.config()
vis = o3d.visualization.Visualizer()
vis.create_window('PCD', width=1280, height=720)
pointcloud = o3d.geometry.PointCloud()
vis.add_geometry(pointcloud)
geom_added = False
# note: using 640 x 480 depth resolution produces smooth depth boundaries
# using rs.format.bgr8 for color image format for OpenCV based image visualization
config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 30)
config.enable_device_from_file("./data/realsense.bag", repeat_playback=True)
# Start streaming
profile = pipeline.start(config, queue)
depth_sensor = profile.get_device().first_depth_sensor()
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_scale = depth_sensor.get_depth_scale()
# We will not display the background of objects more than
# clipping_distance_in_meters meters away
clipping_distance_in_meters = 4 # 3 meter
clipping_distance = clipping_distance_in_meters / depth_scale
# filters
# decimation filter
decimation = rs.decimation_filter()
decimation.set_option(rs.option.filter_magnitude, 4)
# spatial filter
spatial = rs.spatial_filter()
spatial.set_option(rs.option.filter_magnitude, 5)
spatial.set_option(rs.option.filter_smooth_alpha, 1)
spatial.set_option(rs.option.filter_smooth_delta, 50)
temporal = rs.temporal_filter()
hole_filling = rs.hole_filling_filter()
depth_to_disparity = rs.disparity_transform(True)
disparity_to_depth = rs.disparity_transform(False)
# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
align = rs.align(align_to)
transformation_matrix = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]
inv_transform = np.linalg.inv(transformation_matrix)
#grid = Grid(config_perception)
#grid.initialize()
# Streaming loop
frame_count = 0
intrinsics = profile.get_stream(rs.stream.depth).as_video_stream_profile().get_intrinsics()
pcd_old = o3d.geometry.PointCloud()
while frame_count < 1000:
cycle_time_start = time.time()
# Get frameset of color and depth
# frames = pipeline.wait_for_frames()
frames = queue.wait_for_frame().as_frameset()
current_time = frames.get_frame_metadata(rs.frame_metadata_value.time_of_arrival)
start = time.time()
# Align the depth frame to color frame
aligned_frames = align.process(frames)
# Get aligned frames
depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
# Validate that both frames are valid
if not depth_frame or not color_frame:
continue
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
end = time.time()
#print("Frame Post Processing took: " + str((end - start) * 1000) + "ms")
#############
####
## Create Point Cloud
####
#############
start = time.time()
img_depth = o3d.geometry.Image(depth_image)
img_color = o3d.geometry.Image(color_image)
rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(
img_color, img_depth, depth_trunc=clipping_distance_in_meters, convert_rgb_to_intensity=False)
pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(intrinsics.width, intrinsics.height, intrinsics.fx,
intrinsics.fy,
intrinsics.ppx, intrinsics.ppy)
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd, pinhole_camera_intrinsic)
pcd = pcd.remove_non_finite_points(remove_nan=True ,remove_infinite=True)
pcd = pcd.voxel_down_sample(voxel_size=0.05)
points = np.asarray(pcd.points)
pcd_tree = o3d.geometry.KDTreeFlann(pcd)
start = time.time()
[k, idx, _] = pcd_tree.search_knn_vector_3d(pcd.points[1500], 4)
end = time.time()
print("Neighbour search: " + str((end - start) * 1000) + "ms")
print(idx)
#grid.update(points)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
A=int(input())
list=['программист','программиста', 'программистов']
iA = A % 100
if iA >=9 and iA <=20:
print(str(A) + " " + list[2])
else:
iA = iA % 10
if iA == 1:
print(str(A) + " " + list[0])
elif iA > 1 and iA < 5:
print(str(A) + " " + list[1])
else:
print(str(A) + " " + list[2])# put your python code here
|
import unittest
from expense import Expense
class TestCategoryIncome(unittest.TestCase):
def setUp(self):
self.expense = Expense(10, 'Food', '12-12-1234')
def test_str(self):
self.assertEqual(str(self.expense), '10$ - Food - 12-12-1234 - Expense')
def test_repr(self):
self.assertEqual(repr(self.expense), '10$ - Food - 12-12-1234 - Expense')
def test_eq(self):
self.assertTrue(self.expense == Expense(10, 'Food', '12-12-1234'))
if __name__ == '__main__':
unittest.main()
|
from xml.etree import ElementTree as et
import os
from sys import exit
from time import sleep
import traceback as trace
def main():
for filename in os.listdir(path=os.getcwd() + "\\xml"):
if filename.endswith(".xml"):
root = et.parse("xml/" + filename).getroot()
for element in root[0][1][0]:
if element.find("Denominazione") != None:
denominazione = element.find("Denominazione").text
break
for element in root[1][0]:
if element.find("Data") != None:
data = element.find("Data").text
numero = element.find("Numero").text
numero = numero.replace("/","-")
break
try:
path = os.getcwd() + "\\pdf\\" + data + "_" + denominazione + "_" + numero + ".txt"
pdfcreator = "PDFCreator.exe /PrintFile=\"" + path + "\""
with open(path, "w") as f:
f.write("----------------------------- TESTA FATTURA -----------------------------\n")
for element in root.iter("*"):
if element.tag == "DatiTrasmissione" or element.tag == "CedentePrestatore" or element.tag == "CessionarioCommittente":
f.write("\n:> " + element.tag + "\n")
elif element.tag == "FatturaElettronicaBody":
f.write("\n\n----------------------------- DETTAGLIO FATTURA -----------------------------\n\n")
elif element.tag == "NumeroLinea":
f.write("\n")
elif element.tag == "ds:Signature" or element.tag == "Allegati":
break
if len(element.getchildren()) == 0:
f.write(element.tag + ": " + element.text + "\n")
except OSError:
trace.print_exc()
break
exit(-1)
except (DeprecationWarning, TypeError):
continue
finally:
os.system(pdfcreator)
sleep(3)
for filename in os.listdir(path=os.getcwd() + "\\pdf"):
if filename.endswith(".txt"):
os.remove(os.getcwd() + "\\pdf" + "\\" + filename)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import math
import numpy as np
from numpy import array
from operator import add
from regions import *
################################################################################
############################### Vertices buffer ################################
################################################################################
_bytes_alignment = 4
class Vertices(object):
def __init__(self, d = 3, cache = 1024, dtype = np.float64):
self.d, self.dtype, self.cache, self.used = d, dtype, cache, 0
self.d_aligned = d_aligned = \
int(math.ceil(self.d / float(_bytes_alignment)) * _bytes_alignment)
self.array = np.ndarray(d_aligned * cache, dtype = dtype)
self.__objects = []
def _resize_cache(self, new_size):
cache = self.cache
self.array = np.append(
self.array,
np.ndarray((new_size - cache) * self.d_aligned, dtype = self.dtype)
)
self.cache = new_size
def _check_cache(self):
used, cache = self.used, self.cache
if used >= cache:
self._resize_cache(int(cache * 1.25))
def append(self, vertex):
self._check_cache()
d, d_aligned, used = self.d, self.d_aligned, self.used
self.array[used * d_aligned : used * d_aligned + d] = vertex
self.__objects.append(vertex)
vertex.container, vertex.index = self, used
vertex.dtype = self.dtype
self.used += 1
def extend(self, lst):
map(self.append, lst)
def __getitem__(self, i):
return self.__objects[i]
class Vertex(np.ndarray): # TODO: make vertex be just a view of vertices array
def __new__(cls, coords):
result = np.ndarray.__new__(cls, len(coords))
result[:] = coords
return result
def __init__(self, *args, **kwargs):
super(Vertex, self).__init__(*args, **kwargs)
self.container = None
self.index = None
self.elements = []
class Elements(Vertices): # TODO?
def __init__(self, *args, **kwargs):
kwargs['dtype'] = np.int32
super(Elements, self).__init__(self, *args, **kwargs)
def append(self, element):
self._check_cache()
d, used = self.d, self.used
# . . .
################################################################################
################################# Mesh objects #################################
################################################################################
# grids
class Mesh(object):
def __init__(self):
self.vertices = Vertices()
self.elements = []
############################################################
###################### Mesh elements #######################
############################################################
class Element(object):
def __init__(self):
self.adjacent = set()
def __contains__(self, r):
raise NotImplementedError
def square(self):
raise NotImplementedError
class ComposedElement(Element):
def __init__(self):
super(Mesh.ComposedElement, self).__init__()
self.elements = []
def __contains__(self, r):
return any([r in el for el in self.elements])
def square(self):
return sum([el.square() for el in self.elements])
def _process_elements(self):
# just in case: we should still count adjacent elements.
# ...
pass
class PolygonalElement(Element):
def __init__(self):
super(Mesh.PolygonalElement, self).__init__()
self.vertices = []
def __contains__(self, r):
raise NotImplementedError # TODO
def _process_vertices(self):
# find adjacent shapes
candidates = set(reduce(add, map(lambda v: v.elements, self.vertices), []))
is_adjacent = lambda el: len(set(el.vertices) & set(self.vertices)) >= 2
self.adjacent = self.adjacent | set(filter(is_adjacent, candidates))
self.adjacent.discard(self) # just in case...
# register self in adjacent elements
[el.adjacent.add(self) for el in self.adjacent]
# register self in vertices
[v.elements.append(self) for v in self.vertices]
class Triangle(PolygonalElement):
def __init__(self, v1, v2, v3):
super(Mesh.Triangle, self).__init__()
# TODO: sort points in CW or CCW order?
self.vertices = [v1, v2, v3]
self._process_vertices()
# linear algebra data
self.__cross = np.cross(v2 - v1, v3 - v1)
self.__mtx = np.array([v2 - v1, v3 - v1, self.__cross]).T
self.__mtx_inv = np.linalg.inv(self.__mtx)
def __contains__(self, r):
v1, v2, v3 = self.vertices
coords = np.dot(self.__mtx_inv, r)
return 0. <= coords[0] <= 1. and \
0. <= coords[1] <= 1. and \
abs(coords[2]) < 1E-6
def square(self):
v1, v2, v3 = self.vertices
return 0.5 * np.linalg.norm(self.__cross)
def center(self):
v1, v2, v3 = self.vertices
return (v1 + v2 + v3) / 3.
#################### Uniform grid ####################
class UniformGrid(Mesh):
def __init__(self, h):
super(UniformGrid, self).__init__()
self.h = h
def __call__(self, region, **kwargs):
cube = region.container()
h = self.h
n, d = int(cube.edge / h), cube.d
# FIXME: using a dumb way
a = np.zeros([n] * d)
it = np.nditer(a, flags=['multi_index'])
while not it.finished:
pt = it.multi_index * h
if pt in cube:
self.vertices.append(array(pt))
pt += 0.5 * h # TEST
if pt in cube:
self.shapes.append(Mesh.Square(pt, 0.5 * h))
if len(self.vertices) == 0 and len(self.shapes) == 0:
raise ValueError("Mesh type incompatible with given region!")
return self
|
from waitress import serve
from newProject.wsgi import application
if __name__ == '__main__':
serve(application, host = 'localhost', port='8080')
|
"""This contains all of the model filters for the Ghostwriter application."""
import django_filters
from django import forms
from .models import Client
class ClientFilter(django_filters.FilterSet):
"""Filter used to search the `Client` model."""
name = django_filters.CharFilter(lookup_expr='icontains')
class Meta:
model = Client
fields = ['name']
class ProjectFilter(django_filters.FilterSet):
"""Filter used to search the `Project` model."""
start_date = django_filters.DateFilter(lookup_expr=('gt'),)
end_date = django_filters.DateFilter(lookup_expr=('lt'))
start_date_range = django_filters.DateRangeFilter(field_name='start_date')
end_date_range = django_filters.DateRangeFilter(field_name='end_date')
STATUS_CHOICES = (
(0, 'All Projects'),
(1, 'Completed'),
)
complete = django_filters.ChoiceFilter(choices=STATUS_CHOICES,
empty_label=None,
label='Project status')
class Meta:
model = Client
fields = ['complete',]
|
# preprocessing for machine learning
import numpy as np
import pandas as pd
import pickle
def process_data_for_labels(ticker):
hm_days = 7
df = pd.read_csv('sp500_joined_closes.csv', index_col=0)
tickers = df.columns.values.tolist()
df.fillna(0, inplace=True)
for i in range(1, hm_days+1):
# np.log(df/df.shift(1)).dropna()
df['{}_{}d'.format(ticker,i)] = (df[ticker].shift(-i) - df[ticker])/df[ticker]
df.fillna(0, inplace=True)
print(df)
return tickers, df
if __name__ == "__main__":
process_data_for_labels('XOM')
|
import numpy as np
import h5py
input_lm_files = ['_prerun_result.lm']
output_figfile_prefix = "Pre"
output_figfile_dir = 'figs'
## Offscreen rendering
# mlab.options.offscreen = True
## Define molecules and volume
cyt = 1
NA = 6.022e23
f = h5py.File(input_lm_files[0],'r')
data = f['Model']['Diffusion']['LatticeSites'][()]
num_voxels = np.count_nonzero(data == cyt)
Spacing = f['Model']['Diffusion'].attrs['latticeSpacing']
volume_in_L = num_voxels * Spacing * Spacing * Spacing * 1000
mnames = f['Parameters'].attrs['speciesNames'].decode().split(',')
S = {}
for i in range(len(mnames)):
S[mnames[i]] = i+1
f.close()
##
Timepoints = [0]
Numbers = np.zeros((1,len(S)),int)
for i, lmfile in enumerate(input_lm_files):
print('file :', lmfile)
f = h5py.File(lmfile,'r')
tmp = f['Simulations']['0000001']['LatticeTimes'][()]
tmp = tmp + Timepoints[-1]
tmp = tmp.tolist()
Timepoints.extend(tmp[1:])
#
tmp = f['Simulations']['0000001']['SpeciesCounts'][()]
Numbers = np.append(Numbers, tmp[1:,:], axis=0)
if i == 0:
print(S.keys())
print('Initial numbers: ', tmp[0,:])
#
f.close()
#print('TimePoints: ', Timepoints)
#print('Numbers : ', Numbers)
print('Num voxels : ', num_voxels)
print('Spacing : ', Spacing)
print('Volume in fL: ', volume_in_L * 1e15)
uMs = Numbers / NA * 1e6 / volume_in_L
Numbers = uMs
import matplotlib.pyplot as plt
Targ = 'Ca'
Targ = 'NMDAR'
Targ = 'Ca'
Targ = 'Ca'
Targ = 'CaM'
Targ = 'CaN2'
Targ = 'CaN'
Targ = 'CaN'
#Targ = 'CaM'
fig = plt.figure(figsize=(6,4))
ax=fig.add_subplot(111)
if Targ == 'Ca':
ax.plot(Timepoints, Numbers[:,S[Targ]-1], label=Targ)
elif Targ == 'CaN':
CaN = Numbers[:,S['N0C0_CN']-1] + Numbers[:,S['N0C1_CN']-1] + Numbers[:,S['N0C2_CN']-1]\
+ Numbers[:,S['N1C0_CN']-1] + Numbers[:,S['N1C1_CN']-1] + Numbers[:,S['N1C2_CN']-1]\
+ Numbers[:,S['N2C0_CN']-1] + Numbers[:,S['N2C1_CN']-1] + Numbers[:,S['N2C2_CN']-1]
ax.plot(Timepoints, CaN, label=Targ)
elif Targ == 'CaN2':
CaNs = ['CN','N0C0_CN', 'N0C1_CN', 'N0C2_CN', 'N1C0_CN', 'N1C1_CN', 'N1C2_CN',\
'N2C0_CN','N2C1_CN','N2C2_CN']
for name in CaNs:
ax.plot(Timepoints, Numbers[:,S[name]-1], label=name )
elif Targ == 'CaM':
CaMs = ['N0C0','N0C1' ];
#CaMs = ['N0C2','N1C0', 'N1C1','N1C2','N2C0', 'N2C1', 'N2C2'];
for cam in CaMs:
ax.plot(Timepoints, Numbers[:,S[cam]-1], label=cam )
elif Targ == 'NMDAR':
NRs = ['NR_Glu','NR_O'];
for name in NRs:
ax.plot(Timepoints, Numbers[:,S[name]-1], label=name )
ax.set_position([0.2,0.2,0.7,0.6])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.title(Targ)
plt.xlabel('Time (s)')
#plt.ylabel('Number')
plt.ylabel('(uM)')
hans, labs = ax.get_legend_handles_labels()
ax.legend(handles=hans,labels=labs, frameon=False)
plt.savefig(output_figfile_dir+'/'+ output_figfile_prefix + '_' + Targ + '.pdf')
plt.savefig(output_figfile_dir+'/'+ output_figfile_prefix + '_' + Targ + '.png',dpi=150)
plt.show()
|
# coding: utf-8
# Part of PIT Solutions AG. See LICENSE file for full copyright and licensing details.
import datetime
import logging
from odoo import fields, models, api, _
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT
_logger = logging.getLogger(__name__)
EXCEPTION_LOG_TYPE = {
('red', _("Danger")),
('olive', _("Warning")),
('gray', _("Info")),
('green', _("Success")),
}
class PaymentAcquirerLog(models.Model):
_name = "payment.acquirer.log"
_description = "Payment acquirer log details"
_order = "id desc"
name = fields.Char(string="Description", required=True)
detail = fields.Html(string="Detail",)
origin = fields.Char(string="Origin", default='wallee', readonly=True)
type = fields.Selection(EXCEPTION_LOG_TYPE, string="Type",
default='gray', readonly=True, required=True)
@api.model
def clean_old_logging(self, days=90):
"""
Function called by a cron to clean old loggings.
@return: True
"""
last_days = datetime.datetime.now() +\
datetime.timedelta(days=-days)
domain = [
('create_date', '<', last_days.strftime(
DEFAULT_SERVER_DATETIME_FORMAT))
]
logs = self.search(domain)
logs.unlink()
message = " %d logs are deleted" % (len(logs))
return self._post_log({'name': message})
@api.model
def _post_log(self, vals):
self.create(vals)
self.env.cr.commit()
|
# Generated by Django 3.1.3 on 2021-01-04 14:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0011_booking_service'),
]
operations = [
migrations.AlterField(
model_name='booking',
name='date',
field=models.DateField(verbose_name='Date Time Field'),
),
migrations.AlterField(
model_name='booking',
name='service',
field=models.CharField(choices=[('Phone', 'Phone'), ('Email', 'Email'), ('Office', 'Office')], default='Phone', max_length=6, verbose_name='Get Service By'),
),
]
|
# _______________________________________________
## Introduction
# _______________________________________________
# Creating a prognostic model from Wisconsin Breast Cancer Data
# by Victor Wan
# Desc: Visualising Breast Cancer Wisconsin data and creating a predictive model based on nuclear features
# Importing libraries
print('Creating a prognostic model from Wisconsin Breast Cancer Data\n~by Victor Wan\nDesc: Visualising Breast Cancer Wisconsin data and creating a predictive model based on nuclear features')
# used to find breast_cancer_data.csv
import os
# numpy is used to manipulate arrays (used in this project for .column_stack())
import numpy as np
# panda for data analysis (used for reading in data and converting to DataFrame)
import pandas as pd
# libraries for plotting data
import matplotlib.pyplot as plt
from matplotlib import rcParams
import seaborn as sb
# libraries for logistic regression
import sklearn
from sklearn import preprocessing
from sklearn.preprocessing import scale
from sklearn import datasets
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import classification_report
import statsmodels.api as sm
import statsmodels.formula.api as smf
# Setting graph styles
# %matplotlib inline
# rcParams['figure.figsize'] = 5, 4
print('Setting graph styles...')
sb.set_style('whitegrid')
# Locate and read data
print('Locating and reading data...')
address = os.path.realpath(
os.path.join(os.getcwd(), 'breast_cancer_data.csv'))
df=pd.read_csv(address)
# column_names=["id","diagnosis","radius_mean","texture_mean","perimeter_mean","area_mean","smoothness_mean","compactness_mean","concavity_mean","concave points_mean","symmetry_mean","fractal_dimension_mean","radius_se","texture_se","perimeter_se","area_se","smoothness_se","compactness_se","concavity_se","concave points_se","symmetry_se","fractal_dimension_se","radius_worst","texture_worst","perimeter_worst","area_worst","smoothness_worst","compactness_worst","concavity_worst","concave points_worst","symmetry_worst","fractal_dimension_worst"]
# df.columns=column_names
# used https://github.com/patrickmlong/Breast-Cancer-Wisconsin-Diagnostic-DataSet/blob/master/Breast%20Cancer%20Wisconsin%20(Diagnostic)%20DataSet_Orignal_Data_In_Progress.ipynb as a guide
# define functions
print('Defining functions...')
def avg(a):
'''returns an average of counts stored in a series
'''
return(a/sum(a))
def count_categorical_to_df(a):
'''input a categorical variable and create a DataFrame of counts for each level of the categorical variable.
'''
value_counts=a.value_counts()
return(pd.DataFrame(value_counts))
def append_first_10_columns_to_tuple(a):
'''input a dataframe and create a 2D tuple with only columns 0-9
'''
columns_1_to_10_list = []
for column in range(10):
columns_1_to_10_list.append(a.iloc[:,column])
return(tuple(columns_1_to_10_list))
def non_numeric_to_NA(col):
'''Check whether a column's values are numeric, and if not numeric, modify to NA.
'''
for id in range(568):
if type(col[id]) == int or type(col[id]) == np.float64:
pass
else:
col[id]=np.float64(col[id])
## creating string and binary diagnosis variables
# saving string version of Diagnosis for future reference (when plotting)
diagnosis_str = df.diagnosis
# remapping M and B to 1 and 0 respectively
diagnosis_coder = {'M':1, 'B':0}
df.diagnosis = df.diagnosis.map(diagnosis_coder)
diagnosis_int = df.diagnosis
# create separate dataframes for graphing later on
df_b = df[df['diagnosis'] == 0]
df_m = df[df['diagnosis'] == 1]
# dropping unnecessary columns
# ID is not necessary for analysis, diagnosis is removed for rearranging, Unnamed: 32 is an unknown column.
df.drop(['id', 'diagnosis', 'Unnamed: 32'], axis = 1, inplace = True)
df['diagnosis'] = diagnosis_int
# checking if all values in df.texture_mean are numpy.float64, and converting to NA if false
non_numeric_to_NA(df.texture_mean)
# peeking at data
print('Peeking at data...')
print(df.head())
print(df.info())
# _______________________________________________
## Visualise data
# _______________________________________________
print('Visualising data...')
print('Visualising the proportion of benign and malignant cases...')
# creating a dataframe table and bar chart comparing the amount of benign and malignant cases
t_diagnosis = count_categorical_to_df(df['diagnosis'])
diagnosis_value_counts=df['diagnosis'].value_counts()
t_diagnosis=pd.DataFrame(diagnosis_value_counts)
t_diagnosis['percent']=100*avg(diagnosis_value_counts)
print(t_diagnosis)
diagnosis_value_counts.plot(kind='bar')
print('There are more benign than malignant cases in the Wisconsin dataset')
# Create list of df column names
mean_features = []
for column in df.columns[0:10]:
mean_features.append(column)
# Create dataframe where only mean features and diagnosis are included
df_10 = df.loc[:,mean_features]
df_10['diagnosis_str']=diagnosis_str
# creating a pairplot of data
print('Creating pairplot of data...')
sb.pairplot(df_10, hue='diagnosis_str', palette='hls')
plt.show()
# Creating a matrix of boxplots for mean features
print('Creating histograms showing distribution when separated by benign and malignant cases...')
fig = plt.figure()
for i,b in enumerate(list(df.columns[0:10])):
# enumerate starts at index 0, need to add 1 for subplotting
i +=1
# creating subplots
ax = fig.add_subplot(3,4,i)
ax.boxplot([df_b[b], df_m[b]])
ax.set_title(b)
plt.tight_layout()
plt.legend()
plt.show()
print('Plots show distinct patterns\n1. radius/area/perimeter/compactness/concavity/concave_points features have distinct Bemign and Malignant populations\n2. Smoothness/symmetry are very homogenous\nConcavity and concave_points seem to have the strongest positive relationship with other variables.')
# _______________________________________________
## Logistic Regression
# _______________________________________________
print('Performing logistic regression analysis...')
# creating a tuple dataframe for the first 10 columns of df (ie. the columns which show mean characteristics).
columns_1_to_10_tuple = append_first_10_columns_to_tuple(df)
# defining the x and y variables for logistic regression
y = diagnosis_int
x = np.column_stack(columns_1_to_10_tuple)
x = sm.add_constant(x,prepend=True)
# creating logistic regression
x_train,x_test,y_train,y_test = train_test_split(x,y,random_state=0)
logreg = LogisticRegression().fit(x_train,y_train)
logreg
print("Training set score: {:.3f}".format(logreg.score(x_train,y_train)))
print("Test set score: {:.3f}".format(logreg.score(x_test,y_test)))
# create a confusion matrix
y_predict = logreg.predict(x_test)
print(classification_report(y_test,y_predict))
# cross validating
scores = cross_val_score(logreg, x_train, y_train, cv=5)
scores
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
print('\nConducted logistic regression because the output only has two possibilities. The model does not assume normal distrubution, which is ideal as the pair plot shows some skewed distributions.\nA random forest would also have worked but a logistic regression is faster and more interpretable. This is significant considering the size of the dataframe.\nAlso the accuracy is high whilst the sd of the accuracy is small')
|
#__init__ constructor yapıcı metot
class Personel:
isim = ""
soyisim = ""
yas = 0
def __str__(self):
return "{} {}".format(self.isim,self.soyisim)
def __init__(self, firstname, lastname, age):
self.isim = firstname
self.soyisim = lastname
self.yas = age
# __init__ metodu çalıştığından
#personel = Personel()
#personel.isim = "simge"
#personel.soyisim = "karademir"
#personel.yas = 100
#print(personel)
employee = Personel("simge","karademir",100)
print(employee)
emp = employee
|
import tkinter as tk
from tkinter import messagebox
class Application(tk.Frame):
def __init__(self,master=None):
super().__init__(master)
self.master=master
self.pack()
self.createWidget()
def createWidget(self):
self.btn01= tk.Button(self,text='insert point',command=self.insertpoint)
self.btnquit=tk.Button(self,text='insert end',command=self.insertend)
self.e = tk.Entry(self,show="*")
self.t = tk.Text(self,height=2)
self.e.pack()
self.t.pack()
self.btn01.pack()
self.btnquit.pack()
def insertpoint(self):
var=self.e.get()
self.t.insert('insert',var)
def insertend(self):
var = self.e.get()
self.t.insert("end",var)
if __name__=='__main__':
root = tk.Tk()
root.geometry("400x400+400+400")
root.title("经典GUI程序")
app = Application(master = root)
root.mainloop()
|
import os
import math
import logging
import time
import pickle
import pandas as pd
import numpy as np
import datetime
from pygooglenews import GoogleNews
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
def tanh(x):
t=(np.exp(x)-np.exp(-x))/(np.exp(x)+np.exp(-x))
return t
def get_state(data, t, n_days):
if t<n_days:
block = [data[0]]*(n_days-t)+data[:t]
else:
block = data[t-n_days:t]
res = [[0 for _ in range(len(data[0]))]]
#print(block[0])
for i in range(n_days-1):
#print([10*(block[i+1][x] - block[i][x])/block[i][x] for x in range(len(block[0]))])
#time.sleep(1)
res.append([10*(block[i+1][x] - block[i][x])/block[0][x] for x in range(len(block[0]))])
#print(res)
return np.array([res])
def get_stock_data_sentiment(stock_file, e=False):
df = pd.read_csv(stock_file)
#stockdat = list(df['Close'])
datedat = list(df['Date'])
#nltk.download('vader_lexicon')
sid = SentimentIntensityAnalyzer()
output = []
name = stock_file.split("/")[2][:-4]
gn = GoogleNews(lang = 'en')
lastdate = datetime.datetime.strptime(datedat[-1], "%Y-%m-%d")
lastdate = lastdate+datetime.timedelta(days=1)
datedat.append(lastdate.strftime("%Y-%m-%d"))
for date in range(len(datedat)-1):
search = gn.search("{} stock".format(name), from_=datedat[date], to_=datedat[date+1])
#time.sleep(.001)
search = [i["title"] for i in search["entries"]]
d = sid.polarity_scores(". ".join(search))
print("{} stock".format(name), int(100*date/(len(datedat)-1)))
output.append([d["neu"], d["pos"], d["neg"]])
with open(stock_file[:-4]+'.pkl', 'wb') as f:
pickle.dump(output, f)
return output
def get_stock_data(stock_file, e=False, d = None):
if d is not None: df = d
else: df = pd.read_csv(stock_file)
out = np.dstack((np.array(df["Open"]), np.array(df["High"]), np.array(df["Low"]), np.array(df["Close"]), np.array(df["Volume"]))).tolist()[0]
latest_nonzero = out[0]
for i in range(len(out)):
#if 0 in latest_nonzero: print(latest_nonzero)
#if 0 in out[i]: print(latest_nonzero)
for feature in range(len(out[i])):
if out[i][feature] == 0 or math.isnan(out[i][feature]):
#print(latest_nonzero[0]==0)
#print(latest_nonzero[feature])
if feature == 0:
try: out[i][feature]=out[i-1][3]
except: out[i][feature]=out[i][3]
#print(out[i][feature], out[i][3])
else: out[i][feature] = latest_nonzero[feature]
#print(out[i])
#print(i, out[i], latest_nonzero[feature])
else:
latest_nonzero[feature] = out[i][feature]
return out
def get_stock_price(stock_file, e=False):
df = pd.read_csv(stock_file)
return list(df['Close'])
def get_stock_volume(stock_file, e=False):
df = pd.read_csv(stock_file)
return list(df['Volume'])
def get_dates_data(stock_file):
df = pd.read_csv(stock_file)
return list(df['Date'])
|
def sort_me(lst):
lst.sort(key=lambda a: str(a)[-1])
return lst
# return sorted(key=lambda a: str(a)[-1])
|
import sys
import requests
from datetime import datetime
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import sessionmaker
engine = create_engine('postgresql+psycopg2://likit@localhost/research_dev')
Base = automap_base()
Base.prepare(engine, reflect=True)
Session = sessionmaker(bind=engine)
session = Session()
ScopusSubjArea = Base.classes.scopus_subj_areas
API_KEY = '871232b0f825c9b5f38f8833dc0d8691'
# https://api.elsevier.com/documentation/search/SCOPUSSearchTips.htm
def main(affil, abbr, year):
subj_areas = {'COMP': 0, 'CENG': 0, 'CHEM': 0,
'PHAR': 0, 'AGRI': 0, 'ARTS': 0,
'BIOC': 0, 'BUSI': 0, 'DECI': 0,
'DENT': 0, 'EART': 0, 'ECON': 0,
'ENER': 0, 'ENGI': 0, 'ENVI': 0,
'HEAL': 0, 'IMMU': 0, 'MATE': 0,
'MATH': 0, 'MEDI': 0, 'NEUR': 0,
'NURS': 0, 'PHYS': 0, 'PSYC': 0,
'SOCI': 0, 'VETE': 0, 'MULT': 0}
citations = {'COMP': 0, 'CENG': 0, 'CHEM': 0,
'PHAR': 0, 'AGRI': 0, 'ARTS': 0,
'BIOC': 0, 'BUSI': 0, 'DECI': 0,
'DENT': 0, 'EART': 0, 'ECON': 0,
'ENER': 0, 'ENGI': 0, 'ENVI': 0,
'HEAL': 0, 'IMMU': 0, 'MATE': 0,
'MATH': 0, 'MEDI': 0, 'NEUR': 0,
'NURS': 0, 'PHYS': 0, 'PSYC': 0,
'SOCI': 0, 'VETE': 0, 'MULT': 0}
query = 'AFFILORG(%s)' \
'AND PUBYEAR IS %s' % (affil, year)
params = {'apiKey': API_KEY, 'query': query, 'httpAccept': 'application/json'}
url = 'http://api.elsevier.com/content/search/scopus'
for subj in subj_areas:
params['subj'] = subj
r = requests.get(url, params=params).json()
total_results = int(r['search-results']['opensearch:totalResults'])
subj_areas[subj] = total_results
for a in r['search-results']['entry']:
cite = a.get('citedby-count', '')
if cite != '':
citations[subj] += int(cite)
for subj in subj_areas:
a = ScopusSubjArea(year=year, area=subj,
articles=subj_areas[subj],
affil_abbr=abbr,
citations=citations[subj])
session.add(a)
session.commit()
if __name__=='__main__':
main(sys.argv[1], sys.argv[2], sys.argv[3])
|
"""
orignal author: Andreas Rene Geist
email: andreas.geist@tuhh.de
website: https://github.com/AndReGeist
license: BSD
addition and modification of file by Patrick Phillips summer 2019
email: pphill10@u.rochester.edu
website: https://github.com/peweetheman
"""
import time
# import resource
import os
import numpy as np
from scipy import sqrt
from control_algorithms import control_scripts
import Config
from gp_scripts import gp_scripts
import plot_scripts
from true_field import true_field
for iter in range(Config.iterations):
# AUV starting state
x_auv = Config.x_auv
trajectory_1 = np.array(x_auv).reshape(1, 3)
# Initialize Field
true_field1 = true_field(False)
# Calculate and set plot parameters
plot_settings = {"vmin": np.amin(true_field1.z_field) - 0.1, "vmax": np.amax(true_field1.z_field) + 0.1, "var_min": 0,
"var_max": 3, "levels": np.linspace(np.amin(true_field1.z_field) - 0.1, np.amax(true_field1.z_field) + 0.1, 20),
"PlotField": False, "LabelVertices": True}
# Initialize plots
if Config.plot is True:
fig1, hyper_x, hyper_y, bottom, colors = plot_scripts.initialize_animation1(true_field1, **plot_settings)
# Initialize data collection
if Config.collect_data is True:
filename = os.path.join('data', Config.control_algo + '_runtime' + str(Config.max_runtime) + '_pathlength' + str(Config.simulation_max_dist) + "_" + str(iter))
print("data file: ", filename)
data = np.zeros(shape=(5, 1))
# Initialize GMRF
time_1 = time.time()
gmrf1 = gp_scripts.GMRF(Config.gmrf_dim, Config.alpha_prior, Config.kappa_prior, Config.set_Q_init)
# print(gmrf1.__dict__)
time_2 = time.time()
print("Time for GMRF init: /", "{0:.2f}".format(time_2 - time_1))
# Initialize Controller
u_optimal = np.zeros(shape=(Config.N_horizon, 1))
"""#####################################################################################"""
"""START SIMULATION"""
# resource.setrlimit(resource.RLIMIT_STACK, (1e10, 1e12)) # I haven't really found a great way to set resource limits
total_calc_time_control_script = 0
path_length = 0
myplot = None
for time_in_ms in range(0, Config.simulation_end_time): # 1200 ms
if time_in_ms % Config.sample_time_gmrf < 0.0000001:
# Compute discrete observation vector and new observation
sd_obs = [int((x_auv[1]) * 1e2), int((x_auv[0]) * 1e2)]
# print("sd_obs", sd_obs, np.array(true_field1.z_field[sd_obs[0], sd_obs[1]]))
# print("or with f", sd_obs, np.array(true_field1.f(x_auv[0], x_auv[1])))
y_t = np.array(true_field1.f(x_auv[0], x_auv[1])) + np.random.normal(loc=0.0, scale=sqrt(Config.sigma_w_squ), size=1)
# Update GMRF belief
time_3 = time.time()
mue_x, var_x, pi_theta = gmrf1.gmrf_bayese_update(x_auv, y_t)
time_4 = time.time()
# print("Calc. time GMRF: /", "{0:.2f}".format(time_4 - time_3))
# Run control algorithm to calculate new path. Select which one in Config file.
if Config.control_algo == 'PI':
u_optimal, tau_x, tau_optimal = control_scripts.pi_controller(x_auv, u_optimal, var_x, Config.pi_parameters, gmrf1.params, Config.field_dim, Config.set_sanity_check)
x_auv = Config.auv_dynamics(x_auv, u_optimal[0], 0, Config.sample_time_gmrf / 100, Config.field_dim)
control = None
else:
control = Config.control_algorithm(start=x_auv, u_optimal=u_optimal, gmrf_params=gmrf1.params, var_x=var_x, max_dist=Config.simulation_max_dist-path_length, plot=myplot)
path_optimal, u_optimal, tau_optimal = control.control_algorithm()
x_auv = Config.auv_dynamics(x_auv, u_optimal[0], 0, Config.sample_time_gmrf / 100, Config.field_dim, tau_optimal[:, 4])
tau_x = None
trajectory_1 = np.vstack([trajectory_1, x_auv])
time_5 = time.time()
control_calc_time = time_5 - time_4
print("Calc. time control script: /", "{0:.2f}".format(time_5 - time_4))
# Plot new GMRF belief and optimal control path. Comment out this region for quick data collection
if Config.plot is True:
traj, myplot = plot_scripts.update_animation1(control, pi_theta, fig1, hyper_x, hyper_y, bottom, colors, true_field1, x_auv, mue_x, var_x, gmrf1.params, trajectory_1, tau_x, tau_optimal, **plot_settings)
time_6 = time.time()
print("Calc. time Plot: /", "{0:.2f}".format(time_6 - time_5))
# Calculate trajectory length and terminate after trajectory length exceeds bound
path_length = 0
for kk in range(1, np.size(trajectory_1, axis=0)):
path_length += ((trajectory_1[kk, 0] - trajectory_1[kk-1, 0]) ** 2 + (trajectory_1[kk, 1] - trajectory_1[kk-1, 1]) ** 2)
print("path_length: ", path_length)
if path_length >= Config.simulation_max_dist-.5:
print("END DUE TO MAX PATH LENGTH")
break
# CODE FOR BENCHMARKING
if Config.collect_data is True:
(lxf, lyf, dvx, dvy, lx, ly, n, p, de, l_TH, p_THETA, xg_min, xg_max, yg_min, yg_max) = gmrf1.params
# sum of variances
total_variance_sum = np.sum(gmrf1.var_x)
field_variance_sum = 0
for nx in range(15, 65):
for ny in range(15, 40):
field_variance_sum += gmrf1.var_x[(ny * lx) + nx]
# RMSE of mean in field bounds
mean_RMSE = 0
for nx in range(15, 65):
for ny in range(15, 40):
# print("gmrf mean x, y, z: ", de[0] * nx + xg_min, de[1] * ny + yg_min, gmrf1.mue_x[(ny * lx) + nx])
# print("true field mean x, y, z: ", de[0] * nx + xg_min, de[1] * ny + yg_min, true_field1.f(de[0] * nx + xg_min, de[1] * ny + yg_min))
mean_RMSE += (gmrf1.mue_x[(ny * lx) + nx] - true_field1.f(de[0] * nx + xg_min, de[1] * ny + yg_min)) ** 2
mean_RMSE = sqrt(mean_RMSE)
# organize data to write to file
col = np.vstack((path_length, total_variance_sum, field_variance_sum, mean_RMSE, control_calc_time))
data = np.concatenate((data, col), axis=1)
if Config.collect_data is True:
np.save(filename, data)
|
from django.db import models
from django.urls import reverse
class Roaster(models.Model):
name = models.CharField(max_length=120)
city = models.CharField(max_length=120)
country = models.CharField(max_length=120)
website = models.CharField(max_length=120)
def get_absolute_url(self):
return reverse("roasters:roaster-detail", kwargs={'id': self.id})
def __str__(self):
return self.name
|
def reverse(self):
llist = self.__reverse_recursive(self.begin)
llist.next = None
self.begin, self.tail = self.tail, llist
def __reverse_recursive(self, curr):
if curr.next == None:
return curr
else:
node = self.__reverse_recursive(curr.next)
node.next = curr
return curr
|
# /*
# Copyright 2011, Lightbox Technologies, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# */
import md5
keyType = 'text'
valueType = 'text'
def mapper(key, entry, context):
path = None
hashValue = None
good = False
try:
path = entry.fullPath()
size = entry['size']
except:
pass
else:
if (size > 0):
try:
body = entry.getStream()
except Exception, ex:
context.warning("could not open %s. %s" % (path, str(ex)))
else:
try:
digest = md5.new()
s = body.read(1024)
while (len(s) > 0):
digest.update(s)
s = body.read(1024)
hashValue = digest.hexdigest()
good = True
except Exception, ex:
context.warning("problem reading %s. %s" % (path, str(ex)))
if (path and hashValue and good):
context.emit(path, hashValue)
|
t = int(input())
while t > 0:
a,b,c,d = map(int,input().split())
x,y,x1,y1,x2,y2 = map(int,input().split())
x = x + (b - a)
y = y + (d - c)
if abs(a + b) > 0 and x1 == x2:
print("NO")
elif abs(c + d) > 0 and y1 == y2:
print("NO")
elif x1 <= x <= x2 and y1 <= y <= y2:
print("Yes")
else:
print("NO")
#print(x,y)
t = t-1
|
import sqlite3
conexion = sqlite3.connect("Tienda de mascotas")
puntero = conexion.cursor()
###########################################
"""
puntero.execute('''
CREATE TABLE MASCOTA(
ID_MASCOTA INTEGER PRIMARY KEY AUTOINCREMENT,
NOMBRE VARCHAR(10),
PESO INTEGER)
''')
"""
"""
puntero.execute('INSERT INTO MASCOTA VALUES(NULL, "Nanu", 11)')
"""
"""
variasMascotas = [
("Rocco", 35),
("Luna", 8)
]
puntero.executemany("INSERT INTO MASCOTA VALUES(NULL,?,?)", variasMascotas)
"""
"""
#READ
puntero.execute('SELECT * FROM MASCOTA WHERE PESO = 11')
seleccion = puntero.fetchall()
print(seleccion)
"""
"""
#UPDATE
puntero.execute('UPDATE MASCOTA SET NOMBRE = "Nanu" WHERE NOMBRE = "Nani"')
"""
"""
#DELETE
puntero.execute("DELETE FROM MASCOTA WHERE NOMBRE = 'Nanu'")
"""
###########################################
conexion.commit()
conexion.close()
|
#modified maka package from https://github.com/gfhuertac/maka
import os
import sys
import json
from os.path import join, dirname
from random import randint
from queue import Queue
from threading import Thread
from time import sleep
from dotenv import load_dotenv
from optparse import IndentedHelpFormatter, OptionGroup, OptionParser
load_dotenv('.env')
try:
import maka.classes as classes
import maka.inquirer as inquirer
except ImportError:
import inspect
CURRENT_DIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
PARENT_DIR = os.path.dirname(CURRENT_DIR)
os.sys.path.insert(0, PARENT_DIR)
import classes
import inquirer
DELAY = 1
NUM_QUERIER_THREADS = 2
ROOT = {'author': None, 'aliases': [], 'articles': []}
THE_QUEUE = Queue()
QUITTHREAD = False
def find_article(article, parent=None):
if parent is not None:
for p in ROOT['articles']:
if p['id'] == parent and p['cites']:
for art in p['cites']:
if art['id'] == article:
return art
else:
for art in ROOT['articles']:
if art['id'] == article:
return art
return None
def querier_enclosure(i, q):
"""
Wrapper for the query procedure in order to be used in a Worker
"""
while not QUITTHREAD:
print('Worker {}: Looking for the next query'.format(i))
args = q.get()
query = inquirer.AcademicQuerier(args['query_type'], args['payload'])
if query is not None:
results = query.post()
if results:
if args['query_type'] == inquirer.AcademicQueryType.INTERPRET:
expr = 'OR({})'.format(','.join([interpretation['rules'][0]['value']
for interpretation in results]))
THE_QUEUE.put({
'query_type': inquirer.AcademicQueryType.EVALUATE,
'payload': {
'expr': expr,
'attributes': '*'
},
'parent': None
})
elif args['query_type'] == inquirer.AcademicQueryType.EVALUATE:
parent = args.get('parent', None)
branch = ROOT['articles'] if parent is None else (find_article(parent))['cites']
for result in results:
article = find_article(result['id'], parent)
if article is None:
branch.append(result)
if parent is None:
expr = 'RId={}'.format(result['id'])
THE_QUEUE.put({
'query_type': inquirer.AcademicQueryType.EVALUATE,
'payload': {
'expr': expr,
'attributes': '*'
},
'parent': result['id']
})
total = len(branch)
if total%50 == 0:
new_payload = args['payload'].copy()
new_payload['offset'] = total
THE_QUEUE.put({
'query_type': args['query_type'],
'payload': new_payload,
'parent': args['parent']
})
q.task_done()
sleep(DELAY)
def authors(author):
global ROOT
ROOT = {'author': None, 'aliases': [], 'articles': []}
workers = []
for i in range(NUM_QUERIER_THREADS):
worker = Thread(target=querier_enclosure, args=(i, THE_QUEUE,))
workers.append(worker)
worker.setDaemon(True)
worker.start()
ROOT['author'] = author
THE_QUEUE.put({
'query_type': inquirer.AcademicQueryType.INTERPRET,
'payload': {
'query': 'papers by {}'.format(author)
}
})
THE_QUEUE.join()
print('Done')
QUITTHREAD = True
return ROOT
#json.dump(ROOT, outfile, cls=classes.AcademicEncoder, indent=4)
import pandas as pd
import numpy as np
from maka.classes import AcademicPaper, AcademicAuthor
import pickle
import pprint
import matplotlib.pylab as plt
#dump ROOT here because running the codes above many times costs time and might reach the Microsoft Academiic API access limitation
pkl_file = open('xiaolicheng.pkl', 'rb')
ROOT= pickle.load(pkl_file)
pprint.pprint(ROOT)
#gain parent article's id and citations's id
#ROOT['articles'][3]['references'][0]['id']
dict_id={}
for article in ROOT['articles']:
if article['id'] not in dict_id:
dict_id[article['id']]=[]
for cite in article['cites']:
dict_id[article['id']].append(cite['id'])
dict_id
# gain the object of citing articles
list1 = []
for key, value in dict_id.items():
list2 = []
for id in value:
art = find_article(id, parent=key)
list2.append(art)
list1.append(list2)
list1[0]
def count_year(y, list):
count = 0
for ob in list:
year = ob['year']
if year <= y:
count += 1
# print(count)
return count
# count_year(2002,list1[0])
def find_start_year(list):
start = 2018
for ob in list:
year = ob['year']
if year < start:
start = year
return start
# find_start_year(list1[0])
#produce a dictionary, the keys are years,values are citation list
dict_h = {}
for list in list1:
start = find_start_year(list)
years = np.arange(start,2019)
for i in years:
if i not in dict_h:
dict_h[i] = []
dict_h[i].append(count_year(i,list))
else:
dict_h[i].append(count_year(i,list))
# print(i, [count_year(i,list)])
dict_h
#take citation list from dict_h to calculate h-index, :type citations: List[int]
#method from: https://github.com/kamyu104/LeetCode/blob/master/Python/h-index.py
def cal_hindex(citations):
citations.sort(reverse=True)
h=0
for x in citations:
if x>= h + 1:
h += 1
else:
break
return h
dict_hindex={}
for key in dict_h.keys():
cit = dict_h[key]
if key not in dict_hindex:
dict_hindex[key]=cal_hindex(cit)
dict_hindex
#plot h-index variation in years
#method from https://stackoverflow.com/questions/37266341/plotting-a-python-dict-in-order-of-key-values/37266356
h_list=sorted(dict_hindex.items())
x, y = zip(*h_list)
plt.plot(x, y, marker='.', markersize=12)
plt.title("H-index variation")
plt.xlabel("Year")
plt.ylabel("H-index")
plt.show()
h_list
|
#!/usr/bin/env python3
import hanlp, re #, pdb
from .ret_semantics import analyze_ret
from .causal_semantics import analyze_causal
from .arg_semantics import analyze_arg_pre, analyze_arg_post
'''
dependency labels: https://universaldependencies.org/u/dep/
pos labels: https://universaldependencies.org/u/pos/
'''
HanLP = hanlp.load(hanlp.pretrained.mtl.UD_ONTONOTES_TOK_POS_LEM_FEA_NER_SRL_DEP_SDP_CON_XLMR_BASE)
split_sent = hanlp.load(hanlp.pretrained.eos.UD_CTB_EOS_MUL) # end of sentence, i.e., sentence boundary detection.
class Dep_analyzer:
def __init__(self, sentences, need_display=False):
self.sentences = split_sent(sentences)
self.dep_info, self.root_ids = self.__get_dep_info(need_display)
'''
Dispaly the output of dep_info()
'''
def __display_sent_dep(self, dep):
num = 0
for sent_dep in dep:
num += 1
print(f"-------------- sentence {num} --------------")
for dep_edge in sent_dep:
print('id: {}\tword: {}\tpos: {}\thead id: {}\thead: {}\tdeprel: {}\tchild_id: {}'.format(
dep_edge['id'], dep_edge['tok'], dep_edge['pos'],
dep_edge['head_id'], dep_edge['head_tok'], dep_edge['deprel'],
dep_edge['child_id']), sep='\n')
print(f"--------------------------------------------")
'''
Get the formatted dependency parsing results.
'''
def __get_dep_info(self, need_display=False):
# xxx: Better not. Some func names/arguments maybe sensitive to its case.
# doc = doc.lower()
sentences = self.sentences
result = HanLP(sentences)
sentence_dep = []
root_ids = []
for i in range(len(sentences)):
dep_edge = []
root_id = None
# Unify the format.
for n, (dep_head, deprel) in enumerate(result['dep'][i]):
# Note:the index of hanlp is from 0, while the dep_head is from 1.
# three items: edge.target, edge.source, edge.dep
# format: [current_id, current_tok, current_pos, head_id, head_tok, deprel]
dep_edge.append({
'id': n,
'tok': result['tok'][i][n],
'pos': result['pos'][i][n],
'head_id': dep_head-1,
'head_tok': result['tok'][i][dep_head - 1] if dep_head > 0 else "root",
'child_id' : [],
'deprel': deprel
})
if dep_head == 0:
root_id = n
for n, dep in enumerate(dep_edge):
if dep['head_id'] >= 0:
dep_edge[dep['head_id']]['child_id'].append(n)
sentence_dep.append(dep_edge)
root_ids.append(root_id)
if need_display:
print("sentence's number:", len(sentences))
self.__display_sent_dep(sentence_dep)
return sentence_dep, root_ids
def __find_conjunct(self, sentence_dep, src_id):
conjuncts = []
src_dep = sentence_dep[src_id]
for child_id in src_dep['child_id']:
if sentence_dep[child_id]['deprel'] in ['conj', 'parataxis']:
conjuncts.append(child_id)
elif sentence_dep[child_id]['deprel'] == 'appos':
conjuncts += self.__find_conjunct(sentence_dep, child_id)
return conjuncts
def retrive_related_tok(self, sentense_dep, src_id, target_relation=""):
result_tok, result_id = "", -1
src_edge = sentense_dep[src_id]
for child_id in src_edge['child_id']:
if sentense_dep[child_id]['deprel'] != target_relation:
continue
result_tok, result_id = sentense_dep[child_id]['tok'], child_id
break
return result_tok, result_id
'''
Preprocess sentences by their formatted dependency information.
Output: {
"dep_info": dep_info,
"root": [],
"subject": [],
"action": [],
"object": [],
"clause": {head_id: [clause_id]},
}
'''
def preprocess_dep(self):
result_list = []
for sentence_index, sentence_dep in enumerate(self.dep_info):
result_dict = {
"dep_info": sentence_dep,
"root": [],
"subject": [],
"action": [],
"object": [],
"clause": {},
}
# First retriving root node.
root_id = self.root_ids[sentence_index]
if root_id != None:
dep_edge = sentence_dep[root_id]
result_dict['root'].append(root_id)
result_dict['root'] += self.__find_conjunct(sentence_dep, root_id)
if dep_edge['pos'] == "VERB":
result_dict['action'].append(root_id)
result_dict['action'] += self.__find_conjunct(sentence_dep, root_id)
# Then analyzing all content.
for n, dep_edge in enumerate(sentence_dep):
deprel = dep_edge['deprel']
# For main clause's subject & object
if dep_edge['head_id'] in result_dict['root']:
if 'nsubj' in deprel:
result_dict['subject'].append(n)
result_dict['subject'] += self.__find_conjunct(sentence_dep, n)
elif deprel in ['obj', 'iobj']:
result_dict['object'].append(n)
result_dict['object'] += self.__find_conjunct(sentence_dep, n)
# For sub clause
if deprel in ['acl', 'acl:relcl', 'advcl']:
if dep_edge['head_id'] not in result_dict['clause']:
result_dict['clause'][dep_edge['head_id']] = [n]
else:
result_dict['clause'][dep_edge['head_id']].append(n)
for cl_id in self.__find_conjunct(sentence_dep, n):
result_dict['clause'][dep_edge['head_id']].append(cl_id)
result_list.append(result_dict)
return result_list
'''
Desc: Analyze sentences and extract semantics.
Input: sentences - different classified sentences in the documentation about an API.
'''
def main(sentences, cur_func, func_list={}, display=False, target_types=[]):
feature = {}
# For return
if "return" in target_types and "ret" in sentences:
# ret_feature format: {'value': [], 'cond': []}
ret_desc = sentences['ret']
if ret_desc != {}:
ret_dep = Dep_analyzer(ret_desc, display)
ret_feature = analyze_ret(ret_dep, display)
if ret_feature != {}:
feature['ret'] = ret_feature
# For arguments
if "args" in target_types and "args" in sentences:
args_desc = {'pre': [], 'post': []}
# Filter out irrelevant sentences
for arg_desc in sentences['args']:
pre_desc = ""
post_desc = ""
pattern_pre = re.compile(r'\b(should|must)\b (not)? *be', re.IGNORECASE)
status_words = re.compile(r'(success|fail|error|status)', re.IGNORECASE)
for sentence in arg_desc.split("."):
if pattern_pre.search(sentence) != None:
pre_desc += sentence + ". "
# Ignore the sentences which do not have status words.
elif status_words.search(sentence) != None:
post_desc += sentence + ". "
args_desc['pre'].append(pre_desc)
args_desc['post'].append(post_desc)
# arg_feature format: {'pre.check': bool, 'post.check': bool}
arg_feature = {'arg.pre': [False for _ in range(len(cur_func['args_name']))],
'arg.post': [False for _ in range(len(cur_func['args_name']))]}
for i, desc in enumerate(args_desc['pre']):
dep = Dep_analyzer(desc, display)
arg_feature['arg.pre'][i] = analyze_arg_pre(dep, cur_func['args_name'][i])
# Only care the functions which do not use return as status.
for i, desc in enumerate(args_desc['post']):
# In case of imprecise definition analysis
if i == len(cur_func['args_type']):
break
dep = Dep_analyzer(desc, display)
arg_feature['arg.post'][i] = analyze_arg_post(dep, cur_func['args_name'][i], cur_func['args_type'][i])
if display:
print('arg:', arg_feature)
for need_check in arg_feature['arg.pre']:
if need_check == True:
feature['arg.pre'] = arg_feature['arg.pre']
break
for need_check in arg_feature['arg.post']:
if need_check == True:
feature['arg.post'] = arg_feature['arg.post']
break
# For causality
if "causality" in target_types and "causality" in sentences:
causal_desc = ""
# Filter out irrelevant sentences
sensitive_word = re.compile(r' \b(must|free|clear|clean|initiate|allocate|release|open|close|' +
r'frees|clears|cleans|initiates|allocates|releases|opens|closes|' +
r'freed|cleared|cleaned|initiated|allocated|released|opened|closed)\b ', re.IGNORECASE)
# causal_pattern = re.compile(r'(earlier|previous|after|before|later)', re.IGNORECASE)
for sentence in sentences['causality'].split("."):
if sensitive_word.search(sentence) != None: # or causal_pattern.search(sentence) != None:
causal_desc += sentence + ". "
# causal_feature format: {'pre': [], 'post': []}
if causal_desc != {}:
causal_dep = Dep_analyzer(causal_desc, display)
causal_feature = analyze_causal(causal_dep, cur_func, func_list, display)
if causal_feature != {}:
feature['casuality'] = causal_feature
return feature
if __name__ == '__main__':
pass
|
from appium.webdriver.webdriver import WebDriver
class Page(object):
""" Base Class for all pageobject classes. """
def __init__(self, driver: WebDriver):
self.driver = driver
def set_driver(self, driver: WebDriver):
""" Set class attribute driver with the input WebDriver element
:param driver: WebDriver element
"""
self.driver = driver
|
from pandac.PandaModules import loadPrcFileData # loading prc files
loadPrcFileData("", "framebuffer-multisample 1")
loadPrcFileData("", "multisamples 1")
#loadPrcFileData("", "fullscreen #t")
#loadPrcFileData("", "window-resolution x y")
# global python imports
import math, sys, random
#-----------------------------------------------------------------------------
# Panda imports
import direct.directbase.DirectStart #starts panda
from pandac.PandaModules import * #basic Panda modules
from direct.showbase.DirectObject import DirectObject #for event handling
from direct.actor.Actor import Actor #for animated models
from direct.interval.IntervalGlobal import * #for compound intervals
from direct.task import Task #for update functions
from panda3d.core import Shader #attempted to get shaders working
from direct.particles.ParticleEffect import ParticleEffect #attempted to get particle effects working
class FlameTest:
def __init__(self):
self.setupLights()
self.loadItem()
self.loadParticles()
def loadItem(self):
self.itemNode = loader.loadModel('../Models/torch')
self.itemNode.setColor(Vec4(1,1,1,1))
self.itemNode.setScale(2)
self.itemNode.reparentTo(render)
self.itemNode.setPos(0,0,0)
def loadParticles(self):
base.enableParticles()
self.rFlame = ParticleEffect()
self.rFlame.loadConfig("../Models/fire.ptf")
self.rFlame.start(self.itemNode)
pos = self.itemNode.getPos()
self.rFlame.setPos(pos[0], pos[1], pos[2] + 4)
lightNode = NodePath('flame')
lightNode.reparentTo(self.rFlame)
lightNode.setZ(lightNode.getZ() + 0.5)
flame = PointLight('flame-light')
flameNP = lightNode.attachNewNode(flame)
flameNP.node().setColor(Vec4(0.9, 0.7, 0.5, 1.0))
# flameNP.node().setAttenuation(Vec3(0, 0.001, 0.000009))
flameNP.setZ(flameNP.getZ() + 0.6)
render.setLight(flameNP)
def setupLights(self):
# set up an ambient light
self.ambientLight = AmbientLight("ambientLight")
#for setting colors, alpha is largely irrelevant
# slightly blue to try and produce a wintry, snowy look
self.ambientLight.setColor((0.1, 0.1, 0.1, 1.0))
#create a NodePath, and attach it directly into the scene
self.ambientLightNP = render.attachNewNode(self.ambientLight)
#the node that calls setLight is what's illuminated by the given light
#you can use clearLight() to turn it off
render.setLight(self.ambientLightNP)
test = FlameTest()
run()
|
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.template import loader
from django.urls import reverse
from django.views import generic
class IndexView(generic.ListView):
template_name = 'typo/index.html'
def get_queryset(self):
"""Return the last five published questions."""
return "hello"
# Create your views here.
|
import tensorflow.keras as keras
import tensorflow as tf
import pandas
tf.random.set_seed(1)
dataframe = pandas.read_csv("data/breast-cancer/data.csv", index_col="id")
y = dataframe.diagnosis
x = dataframe.drop("diagnosis", 1)
print(x.shape)
model = keras.Sequential([
keras.layers.Dense(25, input_shape=(x.shape[1],)),
keras.layers.Dense(20, activation="relu"),
keras.layers.Dense(10, activation="relu"),
keras.layers.Dense(1)
])
model.compile(loss="mse", metrics="accuracy")
model.summary()
history = model.fit(x, y, epochs=100)
predicted = model.predict(x)
print(model.evaluate(x,y))
import numpy as np
print(predicted)
predicted = np.where(predicted > 0.5,1,0)
predicted = predicted.reshape(-1)
print(predicted)
errors = predicted - y
print(errors)
print(len(errors[errors == 0]) / len(y))
|
#!/usr/bin/env pybricks-micropython
from pybricks.hubs import EV3Brick
from pybricks.ev3devices import ColorSensor
from pybricks.parameters import Port, Color
ev3 = EV3Brick()
colourLeft = ColorSensor(Port.S2)
steering_drive = steering.drive(Motor(Port.B), Motor(Port.C))
def testing_blackline (correction, speed):
while True:
cur_RLI = colourRight.reflected_light_intensity
error = cur_RLI - 40
steering = error * correction
steering_drive.on(speed,steering)
testing_blackline (0.5, 5)
#!/usr/bin/env pybricks-micropython
import time
from sys import stderr
ev3 = EV3Brick()
colourLeft = ColorSensor(Port.S2)
def RLI_testing2():
x=0
start_time = time.time()
while time.time() < start_time + 1:
RLI = colourLeft.reflection()
x = x+1
print(x, file=stderr)
|
import jsonfield
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.db import models
# Create your models here.
TIME_UNIT_SELECTION = (
('0', 'Hours'),
('1', 'Days')
)
# COMPLEXITY_SELECTION = (
# ('0', '1'),
# ('1', '2'),
# ('2', '3'),
# ('3', '4'),
# ('4', '5')
# )
COMPLEXITY_SELECTION = (
('0', '1'),
('1', '2')
)
class ComplexityRate(models.Model):
"""ComplexityRate model for defining rates
Use Case [Lower -> Higher]
1 - x
2 - 2x
3 - 3x
4 - 4x
5 - 5x
"""
complexity_rate = models.CharField(
_('complexity_rate'),
max_length=1, choices=COMPLEXITY_SELECTION, default='0')
complexity_rate = models.IntegerField(
_('complexity_rate'), max_length=2, default=1)
def save(self, *args, **kwargs):
super(ComplexityRate, self).save(*args, **kwargs)
class HourRate(models.Model):
"""HourRate model for defining hourly rates
Use Case - Min. hrs before new set of rates
2 - 6x
5 - 5x
8 - 4x
12 - 3x
16 - 2x
24 - x
"""
hour_rate = jsonfield.JSONField(
_('hour_rate'), default='{}', max_length=9999)
def save(self, *args, **kwargs):
super(HourRate, self).save(*args, **kwargs)
class PricingModel(models.Model):
"""Pricing Model to estimate price"""
time_unit_selection = models.CharField(
_('time_unit_selection'),
max_length=1,
choices=TIME_UNIT_SELECTION,
default='0'
)
estimated_time = models.DecimalField(
_('estimated_time'),
decimal_places=2,
max_digits=100,
default=1.0
)
complexity = models.CharField(
_('complexity'),
max_length=1,
choices=COMPLEXITY_SELECTION,
default='0'
)
# complexity = models.IntegerField(_('complexity'), max_length=2, default=1)
discount = models.DecimalField(
_('discount'),
decimal_places=2,
max_digits=100,
default=0
)
def save(self, *args, **kwargs):
super(PricingModel, self).save(*args, **kwargs)
|
#coding:utf-8
#!/usr/bin/env python
from gclib.facility import facility
from gclib.utility import currentTime, is_same_day, randint
from game.utility.config import config
from game.utility.email import email
class infection_arena(facility):
def __init__(self):
"""
初始化
"""
facility.__init__(self)
self.battle = {}
self.user = {}
self.prestige_ladder = {}
self.damage_ladder = {}
self.last_update_time = 0
def getData(self):
"""
得到 data
"""
data = {}
data['battle'] = self.battle
data['user'] = self.user
data['prestige_ladder'] = self.prestige_ladder
data['damage_ladder'] = self.damage_ladder
data['last_update_time'] = self.last_update_time
return data
def load(self, name, data):
"""
加载
"""
facility.load(self, name, data)
self.battle = data['battle']
self.user = data['user']
self.prestige_ladder = data['prestige_ladder']
self.damage_ladder = data['damage_ladder']
self.last_update_time = data['last_update_time']
@staticmethod
def make_user(name):
"""
制造玩家
"""
return {'prestige':0, 'last_hit_time': 0, 'damage':0, 'level':1, 'infection_list':[], 'name': name, 'prestige_score': 0 ,'last_award_prestige_score' : 0}
@staticmethod
def make_relief(battle):
"""
制造援军
"""
data = {}
data['roleid'] = battle['roleid']
data['rolename'] = battle['rolename']
data['create_time'] = battle['create_time']
data['quality'] = battle['quality']
data['level'] = battle['level']
return data
@staticmethod
def battle_total_hp(battle):
"""
总hp
"""
totalhp = 0
for mon in battle['monster']:
totalhp = totalhp + mon['hp']
return totalhp
@staticmethod
def battle_is_finish(battle, now, gameConf):
"""
战斗完成
"""
return infection_arena.battle_is_escape(battle, now, gameConf) or infection_arena.battle_is_clear(battle)
@staticmethod
def battle_is_clear(battle):
"""
战斗全清
"""
return battle.has_key('last_hit')
@staticmethod
def battle_clear_time(battle):
"""
全清时间
"""
return battle[battle['last_hit']]['last_hit_time']
@staticmethod
def battle_is_escape(battle, now, gameConf):
"""
是否逃跑
"""
quality = battle['quality']
escape_time = gameConf['infection_quality'][quality]['escape_time']
if battle['create_time'] + escape_time < now:
return True
return False
@staticmethod
def battle_escapse_time(battle, now, gameConf):
"""
逃跑时间
"""
quality = battle['quality']
escape_time = gameConf['infection_quality'][quality]['escape_time']
return battle['create_time'] + escape_time
def encounter(self, roleid, name):
"""
遇敌
"""
gameConf = config.getConfig('game')
now = currentTime()
self.update_battle(now, gameConf)
if self.battle.has_key(roleid):
battle = self.battle[roleid][-1]
if battle and (not infection_arena.battle_is_finish(battle, now, gameConf)):
return {'msg': 'infection_battle_not_finish'}
rd = randint()
quality = -1
for (i, qualityInfo) in enumerate(gameConf['infection_quality']):
if rd > qualityInfo['probability']:
rd = rd - qualityInfo['probability']
else:
quality = i
if quality < 0:
return {'msg':'infection_bad_quality'}
if not self.user.has_key(roleid):
self.user[roleid] = infection_arena.make_user(name)
level = self.user[roleid]['level']
self.update_prestige(roleid, now)
infectionBattleConf = config.getConfig('infection_battle')
infectionBattleInfo = infectionBattleConf[str(quality)][level - 1]
if not infectionBattleInfo:
return {'msg':'infection_battle_not_exist'}
battle = {}
battle['monster'] = []
totalhp = 0
monsterConf = config.getConfig('monster')
for monsterid in infectionBattleInfo['monster']:
monsterInfo = monsterConf[monsterid]
monster = {}
monster['monsterid'] = monsterid
monster['hp'] = monsterInfo['hp']
totalhp = totalhp + monsterInfo['hp']
battle['monster'].append(monster)
battle['monster_total_hp'] = totalhp
battle['quality'] = quality
battle['level'] = level
battle['roleid'] = roleid
battle['create_time'] = now
battle['user'] = {}
battle['rolename'] = name
self.battle[roleid] = []
self.battle[roleid].append(battle)
self.user[roleid]['infection_list'].append(infection_arena.make_relief(battle))
self.save()
return {'battle':self.battle[roleid]}
def beat(self, roleid, rolelevel, rolename, battleRoleid, damage):
"""
击败
"""
if not self.battle.has_key(battleRoleid):
return {'msg': 'infection_battle_not_exist'}
if not self.battle[battleRoleid]:
return {'msg': 'infection_battle_not_exist'}
now = currentTime()
gameConf = config.getConfig('game')
battle = self.battle[battleRoleid][-1]
if infection_arena.battle_is_finish(battle, now, gameConf):
return {'msg': 'infection_battle_finish'}
self.update_battle(now, gameConf)
canCall = False
if battle['monster_total_hp'] == infection_arena.battle_total_hp(battle):
canCall = True
totaldamage = sum(damage)
lefthp = 0
for (i, monster) in enumerate(battle['monster']):
monster['hp'] = monster['hp'] - damage[i]
if monster['hp'] < 0:
totaldamage = totaldamage + monster['hp']
monster['hp'] = 0
lefthp = lefthp + monster['hp']
if not battle['user'].has_key(roleid):
battle['user'][roleid] = {}
if not battle['user'][roleid].has_key('damage'):
battle['user'][roleid]['damage'] = 0
battle['user'][roleid]['damage'] = battle['user'][roleid]['damage'] + totaldamage
battle['user'][roleid]['last_hit_time'] = now
infectionBattleConf = config.getConfig('infection_battle')
infectionBattleInfo = infectionBattleConf[str(battle['quality'])][battle['level'] - 1]
prestige = int((float(totaldamage) / battle['monster_total_hp'] * infectionBattleInfo['prestige']) * gameConf['infection_quality'][battle['quality']]['prestige_rate'])
if not self.user.has_key(roleid):
self.user[roleid] = infection_arena.make_user(rolename)
now = currentTime()
self.update_prestige(roleid, now)
self.user[roleid]['last_hit_time'] = now
self.user[roleid]['prestige'] = self.user[roleid]['prestige'] + prestige
self.user[roleid]['prestige_score'] = self.user[roleid]['prestige_score'] + prestige
prestige_ladder_position = self.update_prestige_ladder(roleid, rolelevel, self.user[roleid]['prestige'], gameConf)
if battle['user'][roleid]['damage'] > self.user[roleid]['damage']:
self.user[roleid]['damage'] = battle['user'][roleid]['damage']
self.update_damage_ladder(roleid, rolelevel, self.user[roleid]['damage'], gameConf)
data = {}
if lefthp == 0:
if roleid == battleRoleid and self.user[roleid]['level'] <= len(infectionBattleConf['0']):
self.user[roleid]['level'] = self.user[roleid]['level'] + 1
battle['last_hit'] = roleid
self.save()
data['total_damage'] = totaldamage
data['left_hp'] = lefthp
data['prestige'] = prestige
data['prestige_score'] = self.user[roleid]['prestige_score']
data['prestige_ladder_position'] = prestige_ladder_position
data['can_call'] = canCall
return data
def update_prestige_ladder(self, roleid, rolelevel, prestige, gameConf):
"""
更新声望排行榜
"""
levelGroup = gameConf['infection_ladder_level_group'][-1]
for lg in gameConf['infection_ladder_level_group']:
if rolelevel < lg:
levelGroup = lg
if not self.prestige_ladder.has_key(lg):
self.prestige_ladder[lg] = []
else:
if roleid in self.prestige_ladder[lg]:
self.prestige_ladder[lg].remove(roleid)
prestige_position = -1
for(i, rid) in enumerate(self.prestige_ladder[levelGroup]):
if self.user[rid]['prestige'] < prestige:
prestige_position = i
if prestige_position < 0 and (len(self.prestige_ladder[levelGroup]) < gameConf['infection_ladder_max_size']):
self.prestige_ladder[levelGroup].append(roleid)
prestige_position = len(self.prestige_ladder[levelGroup]) - 1
elif prestige_position >= 0:
self.prestige_ladder[levelGroup].insert(prestige_position, roleid)
return prestige_position
def update_damage_ladder(self, roleid, rolelevel, damage, gameConf):
"""
更新伤害排行
"""
levelGroup = gameConf['infection_ladder_level_group'][-1]
for lg in gameConf['infection_ladder_level_group']:
if rolelevel < lg:
levelGroup = lg
if not self.damage_ladder.has_key(lg):
self.damage_ladder[lg] = []
else:
if roleid in self.damage_ladder[lg]:
self.damage_ladder[lg].remove(roleid)
damage_position = -1
for (i, rid) in enumerate(self.damage_ladder[levelGroup]):
if self.user[rid]['damage'] < damage:
damage_position = i
if damage_position < 0 and (len(self.damage_ladder[levelGroup]) < gameConf['infection_ladder_max_size']):
self.damage_ladder[levelGroup].append(roleid)
elif damage_position >=0:
self.damage_ladder[levelGroup].insert(damage_position, roleid)
def update_prestige(self, roleid, now):
"""
更新声望
"""
if not is_same_day(self.user[roleid]['last_hit_time'], now):
self.user[roleid]['prestige'] = 0
self.user[roleid]['prestige_score'] = 0
def call_relief(self, roleid, friend):
"""
招唤援军
"""
if not self.battle.has_key(roleid):
return {'msg': 'infection_battle_not_exist'}
if not self.battle[roleid]:
return {'msg': 'infection_battle_not_exist'}
now = currentTime()
gameConf = config.getConfig('game')
battle = self.battle[roleid][-1]
if infection_arena.battle_is_finish(battle, now, gameConf):
return {'msg': 'infection_battle_finish'}
for f in friend:
if not self.user.has_key(f[0]):
self.user[f[0]] = infection_arena.make_user(f[1])
reliefBattle = infection_arena.make_relief(battle)
self.user[f[0]]['infection_list'].append(reliefBattle)
self.save()
return {}
def get_infection_battle(self, roleid):
"""
得到援军
"""
if not self.user.has_key(roleid):
return {'msg': 'infection_not_exist'}
now = currentTime()
gameConf = config.getConfig('game')
self.update_battle(now, gameConf)
data = {}
data['battle'] = []
for inf in self.user[roleid]['infection_list']:
battleRoleid = inf['roleid']
b = {}
if self.battle.has_key(battleRoleid):
for battle in self.battle[battleRoleid]:
if battle['create_time'] == inf['create_time']:
b['monster'] = battle['monster']
b['create_time'] = battle['create_time']
b['roleid'] = battle['roleid']
b['rolename'] = battle['rolename']
b['quality'] = battle['quality']
b['level'] = battle['level']
if not b:
b['total_hp'] = 0
b['create_time'] = 0
b['roleid'] = inf['roleid']
b['rolename'] = inf['rolename']
b['quality'] = inf['quality']
b['level'] = inf['level']
data['battle'].append(b)
return data
def get_battle_award(self, roleid, battleRoleid, create_time):
"""
得到战斗奖励
"""
if not self.battle.has_key(battleRoleid):
return {'msg': 'infection_battle_not_exist'}
if not self.battle[battleRoleid]:
return {'msg': 'infection_battle_not_exist'}
now = currentTime()
gameConf = config.getConfig('game')
self.update_battle(now, gameConf)
battle = {}
for b in self.battle[battleRoleid]:
if b['create_time'] == create_time:
battle = b
break
if not battle:
return {'msg': 'infection_battle_not_exist'}
if infection_arena.battle_total_hp(battle) > 0:
return {'msg': 'infection_battle_not_finish'}
callerDropid = ''
lastHitDropid = ''
hitDropid = ''
quality = battle['quality']
level = battle['level']
infectionBattleConf = config.getConfig('infection_battle')
infectionBattleInfo = infectionBattleConf[str(quality)][level - 1]
if battle['roleid'] == roleid:
if (not battle.has_key('caller_award')) or ( not battle['caller_award']):
callerDropid = infectionBattleInfo['caller_dropid']
battle['caller_award'] = {'roleid':roleid, 'dropid':callerDropid}
if battle['last_hit'] == roleid:
if (not battle.has_key('last_hit_award')) or ( not battle['last_hit_award']):
lastHitDropid = infectionBattleInfo['last_hit_dropid']
battle['last_hit_award'] = {'roleid':roleid, 'dropid':lastHitDropid}
if battle['user'].has_key(roleid):
if not battle['user'][roleid].has_key('hit_award'):
hitDropid = infectionBattleInfo['hit_dropid']
battle['user'][roleid]['hit_award'] = {'roleid':roleid, 'dropid':hitDropid}
if (not callerDropid) or (not lastHitDropid) or (not hitDropid):
return {'msg': 'infection_battle_no_award'}
self.save()
data = {}
if callerDropid:
data['call_dropid'] = callerDropid
if lastHitDropid:
data['last_hit_dropid'] = lastHitDropid
if hitDropid:
data['hit_dropid'] = hitDropid
return data
def get_prestige_award(self, roleid, rolelevel):
"""
得到声望奖励
"""
infectionPrestigePriceConf = config.getConfig('infection_prestige_price')
gameConf = config.getConfig('game')
levelGroup = gameConf['infection_ladder_level_group'][-1]
for lg in gameConf['infection_ladder_level_group']:
if rolelevel < lg:
levelGroup = lg
break
infectionPrestigePriceInfo = infectionPrestigePriceConf[str(levelGroup)]
last_award_prestige_score = self.user[roleid]['last_award_prestige_score']
prestige_score = self.user[roleid]['prestige_score']
award = []
next_awrd_score = last_award_prestige_score
for key in infectionPrestigePriceInfo:
pp = int(key)
if pp > last_award_prestige_score and pp <= prestige_score:
award.append(key)
if next_awrd_score < pp:
next_awrd_score = pp
self.user[roleid]['last_award_prestige_score'] = next_awrd_score
data = {}
data['award'] = award
return data
def update_battle(self, now, gameConf):
"""
更新战斗
"""
if is_same_day(self.last_update_time, now):
return
email.send_ladder_email(self.damage_ladder, '4')
email.send_ladder_email(self.prestige_ladder, '4')
for roleid in self.battle.keys():
battleRemoveList = []
for battle in self.battle[roleid]:
quality = battle['quality']
if infection_arena.battle_is_escape(battle, now, gameConf):
if not is_same_day(battle['create_time'] + gameConf['infection_quality'][quality]['escape_time'], now):
battleRemoveList.append(battle)
elif infection_arena.battle_is_clear(battle):
hit_time = battle['user'][battle['last_hit']]['last_hit_time']
if not is_same_day(hit_time, now):
battleRemoveList.append(battle)
for rb in battleRemoveList:
self.battle[roleid].remove(rb)
if not self.battle[roleid]:
del self.battle[roleid]
self.last_update_time = now
def prestige_ladder_list(self, rolelevel):
"""
声望排行榜
"""
gameConf = config.getConfig('game')
levelGroup = gameConf['infection_ladder_level_group'][-1]
for lg in gameConf['infection_ladder_level_group']:
if rolelevel < lg:
levelGroup = lg
break
data = {}
data['prestige_ladder'] = []
for (i, roleid) in enumerate(self.prestige_ladder[str(levelGroup)]):
item = {}
item['position'] = i
item['name'] = self.user[roleid]['name']
item['prestige'] = self.user[roleid]['prestige']
data['prestige_ladder'].append(item)
return data
def damdage_ladder_list(self, rolelevel):
"""
伤害排行榜
"""
gameConf = config.getConfig('game')
levelGroup = gameConf['infection_ladder_level_group'][-1]
for lg in gameConf['infection_ladder_level_group']:
if rolelevel < lg:
levelGroup = lg
data = {}
data['damdage_ladder'] = []
for (i, roleid) in enumerate(self.damage_ladder[str(levelGroup)]):
item = {}
item['position'] = i
item['name'] = self.user[roleid]['name']
item['prestige'] = self.user[roleid]['prestige']
data['damdage_ladder'].append(item)
return data
def user_info(self, roleid):
"""
玩家信息
"""
if self.user.has_key(roleid):
data = {}
data['prestige'] = self.user[roleid]['prestige']
data['prestige_score'] = self.user[roleid]['prestige_score']
data['prestige_ladder_position'] = -1
if roleid in self.prestige_ladder:
data['prestige_ladder_position'] = self.prestige_ladder.index(roleid)
return data
else:
data = {}
data['prestige'] = 0
data['prestige_score'] = 0
data['prestige_ladder_position'] = -1
return data
def breset_prestige_score(self, roleid):
"""
声望分数
"""
if not self.user.has_key(roleid):
return {'prestige_score':0}
self.user[roleid]['prestige_score'] = 0
return {'prestige_score':0}
|
locators= {
'url':'https://qa-pro.domclick.ru/',
'Шапка':{'Войти в шапке': ['[class^="js-topline-auth-button-text topline__icon-link__text"]', 0]
},
'Лендинг':{
'Кнопка Зарегистрироваться': ['[class^="ui bulky green button head-button js-signup"]', 0],
'Смотреть видео': ['[class^="features-video__image"]', 0],
'Закрыть видео': ['[class^="features-video__close js-close-video"]', 0],
'Доступно в гугл-плей': ['[class^="input__control"]', 0],
'Доступно в ап-стор': ['[class^="input__control"]', 1],
'FAQ-1': ['[class^="accordion-item__title"]', 0],
'FAQ-2': ['[class^="accordion-item__title"]', 1],
'FAQ-3': ['[class^="accordion-item__title"]', 2],
'FAQ-4': ['[class^="accordion-item__title"]', 3],
'FAQ-5': ['[class^="accordion-item__title"]', 4],
'Войти из тела': ['[class^="ui button bulky green auth-button js-partner-link"]', 0]
}
}
|
# -*- coding: utf-8 -*-
# flake8: noqa
# Model changes with django-simple-history 1.5.4
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('webplatformcompat', '0006_add_user_changesets_related_name'),
]
operations = [
migrations.AlterModelOptions(
name='historicalbrowser',
options={'ordering': ('-history_date', '-history_id'), 'get_latest_by': 'history_date', 'verbose_name': 'historical browser'},
),
migrations.AlterModelOptions(
name='historicalfeature',
options={'ordering': ('-history_date', '-history_id'), 'get_latest_by': 'history_date', 'verbose_name': 'historical feature'},
),
migrations.AlterModelOptions(
name='historicalmaturity',
options={'ordering': ('-history_date', '-history_id'), 'get_latest_by': 'history_date', 'verbose_name': 'historical maturity', 'verbose_name_plural': 'historical_maturities'},
),
migrations.AlterModelOptions(
name='historicalsection',
options={'ordering': ('-history_date', '-history_id'), 'get_latest_by': 'history_date', 'verbose_name': 'historical section'},
),
migrations.AlterModelOptions(
name='historicalspecification',
options={'ordering': ('-history_date', '-history_id'), 'get_latest_by': 'history_date', 'verbose_name': 'historical specification'},
),
migrations.AlterModelOptions(
name='historicalsupport',
options={'ordering': ('-history_date', '-history_id'), 'get_latest_by': 'history_date', 'verbose_name': 'historical support'},
),
migrations.AlterModelOptions(
name='historicalversion',
options={'ordering': ('-history_date', '-history_id'), 'get_latest_by': 'history_date', 'verbose_name': 'historical version'},
),
migrations.AlterField(
model_name='historicalbrowser',
name='history_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='historicalfeature',
name='history_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='historicalmaturity',
name='history_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='historicalsection',
name='history_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='historicalspecification',
name='history_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='historicalsupport',
name='history_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='historicalversion',
name='history_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
]
|
from django.contrib import admin
from .models import Stock, Company, Excel # .models is models file in current directory
# Register your models here.
admin.site.register(Excel)
admin.site.register(Company)
admin.site.register(Stock)
|
import numpy as np
# シグモイド関数
def sigmoid(x):
return 1.0/(1.0+np.exp(-x))
# シグモイド関数の微分形
def sigmoid_grad(x):
return (1.0 - sigmoid(x)) * sigmoid(x)
# ステップ関数
def step(x):
y = x > 0
return y.astype(np.int)
# Relu関数
def relu(x):
return np.max(0, x)
# 恒等関数
def identity(x):
return x
# オーバーフロー対策に関しては
# ゼロから作るDeepLearningのp69参照
# p118で使っているsoftmaxは以下のように変更されている
def softmax(x):
# バッチ処理用
if x.ndim == 2:
# 転置させないと, np.maxの次元数が想定通りにならない
x = x.T
x = x - np.max(x, axis=0) # オーバーフロー対策
y = np.exp(x) / np.sum(np.exp(x), axis=0)
return y.T
x = x - np.max(x) # オーバーフロー対策
return np.exp(x) / np.sum(np.exp(x))
# 平均二乗誤差
def mean_square_error(y, t):
return 0.5 * np.sum((y-t)**2)
# バッチデータ対応版
# 交差エントロピー誤差
# y : 1次元 or 2次元配列(バッチ処理の時)の予測データ
# t : 教師データ
def cross_entropy_error(y, t):
# 1次元データだった場合バッチ数1の2次元データ(バッチデータ)に変更
if y.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
# 教師データがone-hot-vectorの場合,正解ラベルのインデックスに変換
if y.size == t.size:
t = t.argmax(axis=1)
batch_size = y.shape[0]
# ラベル表現(2など,正解のインデックスが直接指定されている)の場合
return -np.sum(np.log(y[np.arange(batch_size), t])) / batch_size;
# 数値微分(中心差分を用いた微分)
def diff(f, x):
h = 1e-4
return (f(x+h) - f(x-h)) / (2*h)
# 偏微分
def partial_diff(f, x, idx):
h = 1e-4
val = x[idx]
# f(x+h)の計算
x[idx] = val + h
fxh1 = f(x)
# f(x-h)の計算
x[idx] = val - h
fxh2 = f(x)
# 計算
ret = (fxh1 - fxh2) / (2*h)
# 元に戻しておく
x[idx] = val
return ret
# 勾配計算
# ゼロから作るDeepLearning p112のコラム参照
def gradient(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
# xが多次元配列でも全要素にアクセスかつ書き換え可能なようにイテレータを使う
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2 * h)
x[idx] = tmp_val # 値を元に戻す
it.iternext()
return grad
def gradient_descent(f, init_x, lr=0.01, step_num=100):
x = init_x
for i in range(step_num):
grad = gradient(f, x)
x -= lr*grad
return x
|
"""
CCT 建模优化代码
GPU 加速示例(3)
作者:赵润晓
日期:2021年5月6日
"""
# 因为要使用父目录的 cctpy 所以加入
from os import error, path
import sys
sys.path.append(path.dirname(path.abspath(path.dirname(__file__))))
from hust_sc_gantry import HUST_SC_GANTRY
from cctpy import *
ga32 = GPU_ACCELERATOR(float_number_type=GPU_ACCELERATOR.FLOAT32)
ga64 = GPU_ACCELERATOR(float_number_type=GPU_ACCELERATOR.FLOAT64,block_dim_x=512)
# ----- track_one_particle_with_multi_qs -----
# 创建 beamline 只有一个 qs
bl = HUST_SC_GANTRY().create_second_bending_part_beamline()
# 创建粒子(理想粒子)
particle = ParticleFactory.create_proton_along(bl,kinetic_MeV=215)
# 复制三份
particle_cpu = particle.copy()
particle_gpu32 = particle.copy()
particle_gpu64 = particle.copy()
# 运行
footstep=100*MM
ParticleRunner.run_only(particle_cpu,bl,bl.get_length(),footstep=footstep)
ga32.track_one_particle_with_multi_qs(bl,particle_gpu32,bl.get_length(),footstep=footstep)
ga64.track_one_particle_with_multi_qs(bl,particle_gpu64,bl.get_length(),footstep=footstep)
print("track_one_particle_with_multi_qs")
print("CPU计算结果: ",particle_cpu.detailed_info())
print("GPU32计算结果: ",particle_gpu32.detailed_info())
print("GPU64计算结果: ",particle_gpu64.detailed_info())
print("GPU32计算和CPU对比: ",(particle_cpu-particle_gpu32).detailed_info())
print("GPU64计算和CPU对比: ",(particle_cpu-particle_gpu64).detailed_info())
# track_one_particle_with_multi_qs
# CPU计算结果: Particle[p=(7.409509849267735, -0.028282989447753218, 5.0076184754665586e-05), v=(1809891.9615852616, -174308430.5414393, -330480.4098605619)], rm=2.0558942080656965e-27, e=1.6021766208e-19, speed=174317774.94179922, distance=7.104727865682728]
# GPU32计算结果: Particle[p=(7.409510612487793, -0.02828289568424225, 5.0118236686103046e-05), v=(1809917.875, -174308416.0, -330476.3125)],
# rm=2.0558942007434142e-27, e=1.602176597458587e-19, speed=174317776.0, distance=7.104727745056152]
# GPU64计算结果: Particle[p=(7.409509849267735, -0.028282989447752843, 5.0076184754525616e-05), v=(1809891.961585234, -174308430.54143927, -330480.409860578)], rm=2.0558942080656965e-27, e=1.6021766208e-19, speed=174317774.94179922, distance=7.104727865682728]
# GPU32计算和CPU对比: Particle[p=(-7.632200578200354e-07, -9.376351096934687e-08, -4.2051931437459694e-08), v=(-25.91341473837383, -14.541439294815063, -4.097360561892856)], rm=7.322282306994799e-36, e=2.3341413164924317e-27, speed=-1.0582007765769958, distance=1.2062657539502197e-07]
# GPU64计算和CPU对比: Particle[p=(0.0, -3.7470027081099033e-16, 1.3997050046787862e-16), v=(2.7706846594810486e-08, -2.9802322387695312e-08, 1.6123522073030472e-08)], rm=0.0, e=0.0, speed=0.0, distance=0.0]
# track_one_particle_with_single_qs 对比
# CPU计算结果: Particle[p=(7.409509849267735, -0.028282989447753218, 5.0076184754665586e-05), v=(1809891.9615852616, -174308430.5414393, -330480.4098605619)], rm=2.0558942080656965e-27, e=1.6021766208e-19, speed=174317774.94179922, distance=7.104727865682728]
# GPU32计算结果: Particle[p=(7.409510612487793, -0.02828289568424225, 5.0118236686103046e-05), v=(1809917.875, -174308416.0, -330476.3125)], rm=2.0558942007434142e-27, e=1.602176597458587e-19, speed=174317776.0, distance=7.104727745056152]
# GPU64计算结果: Particle[p=(7.409509849267735, -0.028282989447752843, 5.0076184754525616e-05), v=(1809891.961585234, -174308430.54143927, -330480.409860578)], rm=2.0558942080656965e-27, e=1.6021766208e-19, speed=174317774.94179922, distance=7.104727865682728]
# GPU32计算和CPU对比: Particle[p=(-7.632200578200354e-07, -9.376351096934687e-08, -4.2051931437459694e-08), v=(-25.91341473837383, -14.541439294815063, -4.097360561892856)], rm=7.322282306994799e-36, e=2.3341413164924317e-27, speed=-1.0582007765769958, distance=1.2062657539502197e-07]
# GPU64计算和CPU对比: Particle[p=(0.0, -3.7470027081099033e-16, 1.3997050046787862e-16), v=(2.7706846594810486e-08, -2.9802322387695312e-08, 1.6123522073030472e-08)], rm=0.0, e=0.0, speed=0.0, distance=0.0]
# ------------ 真正的多qs -------
# 创建 beamline 3个 qs
bl = HUST_SC_GANTRY().create_first_bending_part_beamline()
# 创建粒子(理想粒子)
particle = ParticleFactory.create_proton_along(bl,kinetic_MeV=215)
# 复制三份
particle_cpu = particle.copy()
particle_gpu32 = particle.copy()
particle_gpu64 = particle.copy()
# 运行
footstep=100*MM
ParticleRunner.run_only(particle_cpu,bl,bl.get_length(),footstep=footstep)
ga32.track_one_particle_with_multi_qs(bl,particle_gpu32,bl.get_length(),footstep=footstep)
ga64.track_one_particle_with_multi_qs(bl,particle_gpu64,bl.get_length(),footstep=footstep)
print("track_one_particle_with_multi_qs 2 ")
print("CPU计算结果: ",particle_cpu.detailed_info())
print("GPU32计算结果: ",particle_gpu32.detailed_info())
print("GPU64计算结果: ",particle_gpu64.detailed_info())
print("GPU32计算和CPU对比: ",(particle_cpu-particle_gpu32).detailed_info())
print("GPU64计算和CPU对比: ",(particle_cpu-particle_gpu64).detailed_info())
# track_one_particle_with_multi_qs 2
# CPU计算结果: Particle[p=(3.687315812380205, 1.548315945537494, -0.003352065021200123), v=(119474899.55705348, 126923892.97270872, -352485.58348381834)], rm=2.0558942080656965e-27, e=1.6021766208e-19, speed=174317774.94179922, distance=4.149802255227576]
# GPU32计算结果: Particle[p=(3.6873157024383545, 1.5483157634735107, -0.0033521109726279974), v=(119474888.0, 126923888.0, -352490.09375)], rm=2.0558942007434142e-27, e=1.602176597458587e-19, speed=174317776.0, distance=4.149802207946777]
# GPU64计算结果: Particle[p=(3.687315812380205, 1.5483159455374929, -0.0033520650212005175), v=(119474899.55705343, 126923892.97270869, -352485.58348386886)], rm=2.0558942080656965e-27, e=1.6021766208e-19, speed=174317774.94179922, distance=4.149802255227576]
# GPU32计算和CPU对比: Particle[p=(1.0994185029034043e-07, 1.8206398322284656e-07, 4.595142787458539e-08), v=(11.557053476572037, 4.9727087169885635, 4.51026618166361)], rm=7.322282306994799e-36, e=2.3341413164924317e-27, speed=-1.0582007765769958, distance=4.728079883165037e-08]
# GPU64计算和CPU对比: Particle[p=(0.0, 1.1102230246251565e-15, 3.946495907847236e-16), v=(4.470348358154297e-08, 2.9802322387695312e-08, 5.052424967288971e-08)], rm=0.0, e=0.0, speed=0.0, distance=0.0]
|
import wx, yaml
import wx.lib.scrolledpanel as scrolled
from passlib.hash import sha256_crypt
import collections
from collections import OrderedDict
import serial, glob, sys
import time
import struct
class PasswordSettings(wx.Frame):
def __init__(self, config, config_file_name):
wx.Frame.__init__(self, None, title="Password Settings", pos=(250,250))
self.config = config
self.config_file_name = config_file_name
self.Bind(wx.EVT_CLOSE, self.OnClose)
panel = wx.Panel(self)
box = wx.BoxSizer(wx.VERTICAL)
prompt_text = wx.StaticText(panel, -1, 'Please enter your current password:')
box.Add(prompt_text, 0, wx.ALL)
current_password = wx.TextCtrl(panel, -1, '', size=(300,-1), pos=(10,10), style = wx.TE_PASSWORD)
self.Bind(wx.EVT_TEXT, lambda evt: self.OnTryPassword(evt, current_password, new_password), current_password)
box.Add(current_password, 0, wx.ALL)
new_prompt_text = wx.StaticText(panel, -1, 'Please enter a new password:')
box.Add(new_prompt_text, 0, wx.ALL)
new_password = wx.TextCtrl(panel, -1, '', size=(300,-1), pos=(10,10), style = wx.TE_PASSWORD)
box.Add(new_password, 0, wx.ALL)
self.Bind(wx.EVT_TEXT, lambda evt: self.OnTypeNewPassword(evt, confirm_password), new_password)
new_password.Disable()
confirm_prompt_text = wx.StaticText(panel, -1, 'Please confirm the new password:')
box.Add(confirm_prompt_text, 0, wx.ALL)
confirm_password = wx.TextCtrl(panel, -1, '', size=(300,-1), pos=(10,10), style = wx.TE_PASSWORD)
box.Add(confirm_password, 0, wx.ALL)
self.Bind(wx.EVT_TEXT, lambda evt: self.OnConfirmNewPassword(evt, new_password, enter_button), confirm_password)
confirm_password.Disable()
enter_button = wx.Button(panel, -1, 'Change')
self.Bind(wx.EVT_BUTTON, lambda evt: self.OnEnterButton(evt, new_password.GetValue()), enter_button)
enter_button.Disable()
box.Add(enter_button, 0, wx.ALL)
panel.SetSizer(box)
panel.Layout()
def OnTryPassword(self, evt, field_to_compare, field_to_unlock):
outcome = sha256_crypt.verify(field_to_compare.GetValue(), self.config["password"])
if outcome == True:
field_to_unlock.Enable()
else:
field_to_unlock.Disable()
def OnTypeNewPassword(self, evt, field_to_unlock):
if evt.GetEventObject().GetValue() != '':
field_to_unlock.Enable()
else:
field_to_unlock.Disable()
def OnConfirmNewPassword(self, evt, field_to_compare, button_to_unlock):
if evt.GetEventObject().GetValue() == field_to_compare.GetValue():
button_to_unlock.Enable()
else:
button_to_unlock.Disable()
def OnEnterButton(self, evt, new_password):
dlg = wx.MessageDialog(self, "Are you sure you want to change your password?", "Confirm Password Change", wx.YES_NO|wx.ICON_QUESTION)
result = dlg.ShowModal()
dlg.Destroy()
if result == wx.ID_YES:
self.config["password"] = sha256_crypt.encrypt(new_password)
with open(self.config_file_name, "w") as u_cfg:
yaml.dump(self.config, u_cfg)
self.MakeModal(False)
self.Destroy()
info_dlg = wx.MessageDialog(self, "Password has been changed.", "Change Password", wx.OK| wx.ICON_INFORMATION)
info_result = info_dlg.ShowModal()
def OnClose(self, evt):
self.MakeModal(False)
evt.Skip()
|
#! -*- coding: utf-8 -*-
from libs.pontovitoria import PontoVitoria
from linhas import Linha
class RotaIndireta():
def __init__(self, p1,p2, l1,l2, pontos_intersecao):
self.l1 = l1
self.l2 = l2
self.p1 = p1
self.p2 = p2
self.pontos_intersecao = pontos_intersecao
self.pontos_de_parada = []
def get_pontos_de_parada(self):
"""
Para diferenciar entre os pontos de ida e volta é preciso escolher
apenas os pontos com ordem menor que a do ponto de destino na linha destino.
"""
if len(self.pontos_de_parada):
return self.pontos_de_parada
pontos_de_parada = []
ordem_destino = self.l2.get_ordem_do_ponto(self.p2)
for p in self.pontos_intersecao:
if self.l2.get_ordem_do_ponto(p) < ordem_destino:
pontos_de_parada.append(p)
self.pontos_de_parada = pontos_de_parada
return pontos_de_parada
def to_json(self):
"""
converte esta rota na sua represetação em formato json
"""
pontosjson = ""
for p in self.get_pontos_de_parada():
pontosjson +="\"%s\", " % p
pontosjson = pontosjson[:-2] # remove ultima virgula
return "{ \"linha1\":\"%s\", \"linha2\":\"%s\", \"pontos_de_parada\": [%s] }" % (self.l1.linha_id, self.l2.linha_id, pontosjson)
def __repr__(self):
return u"%s --> %s\t (%d, %d, %d, %d)" % (self.l1.linha_id,self.l2.linha_id, len(self.l1.grafo.vertices), len(self.l2.grafo.vertices),len(self.pontos_intersecao),len(self.get_pontos_de_parada()))
class Rotas:
"""
Essa classe descobre quais são as linhas diretas e indiretas que fazem determinado percurso
>>> e = Rotas(6151,5059)
>>> e.linhas_diretas
[]
>>> len(e.linhas_indiretas)
26
>>> e.save_json()
"""
def __init__(self, p_inicio, p_destino):
self.p1 = p_inicio
self.p2 = p_destino
pv = PontoVitoria()
linhas_diretas = pv.linhasQueFazemPercurso(p_inicio, p_destino)
outras_linhas = set(pv.linhasQuePassamNoPonto(p_inicio)) - set(linhas_diretas)
linhas_destino = set(pv.linhasQuePassamNoPonto(p_destino)) - set(linhas_diretas)
linhas_indiretas = []
for l in outras_linhas:
l1 = Linha(l)
for ld in linhas_destino:
l2 = Linha(ld)
pontos_de_intersecao = l1.pontos_de_intersecao_com(l2)
if pontos_de_intersecao:
linhas_indiretas.append( RotaIndireta(p_inicio,p_destino,l1,l2,pontos_de_intersecao))
self.linhas_diretas = linhas_diretas
self.linhas_indiretas = []
for r in linhas_indiretas:
if len(r.get_pontos_de_parada())>0:
self.linhas_indiretas.append(r)
linhas_indiretas
def save_json(self):
"""
salva as rotas descobertas num arquivo de nome PONTOINICIO_PONTODESTINO.json
"""
rotas_diretas = ""
for r in self.linhas_diretas:
rotas_diretas +="\"%s\", " % r
rotas_diretas= rotas_diretas[:-2]
rotas_indiretas = ""
for r in self.linhas_indiretas:
rotas_indiretas +="%s, " % r.to_json()
rotas_indiretas = rotas_indiretas[:-2]
texto_json = "{ \"ponto_inicial\":\"%s\", \"ponto_destino\":\"%s\",\n\"linhas_diretas\":[%s],\n\"linhas_indiretas\":[%s] }" % (self.p1,self.p2, rotas_diretas, rotas_indiretas)
with open("dados/rotas/%s_%s.json" % (self.p1,self.p2),'w') as f:
f.write(texto_json)
def __repr__(self):
texto = "ROTA: %s\t->\t%s\n" % (self.p1, self.p2)
texto+= "Linhas diretas: %s\n" % self.linhas_diretas
texto+= "linhas indiretas:\nLinha 1 --> Linha2\t (n_pontos1, n_pontos2, n_pontos_intersecao,n_pontos_parada)\n"
for l in self.linhas_indiretas:
texto+=str(l)+"\n"
return texto
def test():
pontos= [
['6043','Maria Ortiz' ],
['5059', 'Shopping Vitoria'],
['5029', 'Shopping Bullevard'],
['6166', 'UFES'],
['4033', 'UFES - campus Maruipe'],
['7041', 'São Pedro'],
['2137', 'Rodoviária de Vitoria'],
['6163', 'Aeroporto']
]
for p in pontos:
for p2 in pontos:
if p != p2:
r=Rotas(p[0],p2[0])
print "rota: %s --> %s calculada" %(p[0],p2[0])
r.save_json()
if __name__== "__main__":
test()
|
"""
Pooja would like to withdraw X $US from an ATM. The cash machine will only accept the transaction if X is a multiple of 5, and Pooja's account balance has enough cash to perform the withdrawal transaction (including bank charges). For each successful withdrawal the bank charges 0.50 $US. Calculate Pooja's account balance after an attempted transaction.
Input
Positive integer 0 < X <= 2000 - the amount of cash which Pooja wishes to withdraw.
Nonnegative number 0<= Y <= 2000 with two digits of precision - Pooja's initial account balance.
Output
Output the account balance after the attempted transaction, given as a number with two digits of precision. If there is not enough money in the account to complete the transaction, output the current bank balance.
Example - Successful Transaction
Input:
30 120.00
Output:
89.50
Example - Incorrect Withdrawal Amount (not multiple of 5)
Input:
42 120.00
Output:
120.00
Example - Insufficient Funds
Input:
300 120.00
Output:
120.00
"""
x,y = input().split()
x = int(x)
y = float(y)
if(x%5==0 and y>x+0.5):
print(y-x-0.5)
else:
print(y)
|
import os
def repo_exists(repo):
os.chdir(f"D:\\MyProjects\\Python Projects")
return os.path.isdir(repo)
def check_status(repo):
os.chdir(f"D:\\MyProjects\\Python Projects\\{repo}")
os.system("git status")
os.chdir(f"D:\\MyProjects\\Python Projects")
def delete_local_repo(repo):
os.chdir("D:\\MyProjects\\Python Projects")
os.system(f"RD /S /Q {repo}")
def delete_env(repo):
os.system(f"conda remove --name {repo} --all --yes")
|
# -*- coding: utf-8 -*-
import json, base64, traceback, logging
import inject, psycopg2
import pytz, datetime
import dateutil.parser
import uuid
import pytz
from model.systems.assistance.date import Date
from model.systems.issue.issue import Issue
class IssueModel:
issue = inject.attr(Issue)
'''
' Metodo recursivo de eliminacion de peticiones y sus hijos
'''
def __deleteIssue(self, con, id):
cur = con.cursor()
ids = self.issue.getChildsId(con, id)
for idChild in ids:
self.__deleteIssue(con, idChild)
self.issue.deleteStatesFromIssue(con, id)
self.issue.deleteIssue(con, id)
'''
' Eliminar peticion y sus hijos
'''
def deleteIssue(self, con, id):
self.__deleteIssue(con, id)
events = []
e = {
'type':'IssueDeletedEvent',
'data':id,
}
events.append(e)
return events
'''
' Insertar datos, se insertan los datos del request y el estado
'''
def insert(self,con,request,officeId,requestorId,created,priority,visibility,relatedRequestId, state):
createdutc = created.astimezone(pytz.utc)
id = str(uuid.uuid4())
self.issue.insertIssue(con, id, request, officeId, requestorId, createdutc, priority, visibility, relatedRequestId)
self.issue.insertState(con, id, requestorId, createdutc, state)
events = []
e = {
'type':'IssueInsertedEvent',
'data':{
'id':id,
'request':request,
'officeId':officeId,
'requestorId':requestorId,
'created':createdutc,
'priority':priority,
'visibility':visibility,
'relatedRequestId':relatedRequestId,
'state':state,
'nodes':[],
}
}
events.append(e)
return events
|
'''
Given a string, compute recursively (no loops) a new string where all the lowercase 'x' chars have been changed to 'y' chars.
'''
def replaceX (str):
print 'input:',str
if len(str) == 0:
return str
if len(str) == 1 and str == 'x':
return 'y'
elif len(str) == 1 and str != 'x':
return str
mid = int(len(str)/2)
return replaceX(str[0:mid]) + replaceX(str[mid:len(str)])
print replaceX('abcxabc')
|
#!/usr/bin/env python2
#
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import json
import os
import sys
import time
import threading
sys.path.append("utils")
import bmv2
import helper
from convert import *
from p4.v1 import p4runtime_pb2
# Helper function to add entries to the Ethernet table.
# The router cares about frames that match the local MAC or the broadcast MAC.
# Nothing else.
def AddEthernetEntries(sw, helper, local_mac, ingress_port):
table_entry = helper.buildTableEntry(
table_name = "cis553Ingress.tiHandleEthernet",
match_fields = { "hdr.ethernet.dstAddr": local_mac,
"standard_metadata.ingress_port" : ingress_port},
action_name = "cis553Ingress.aiForMe")
sw.WriteTableEntry(table_entry);
table_entry = helper.buildTableEntry(
table_name = "cis553Ingress.tiHandleEthernet",
match_fields = { "hdr.ethernet.dstAddr": "ff:ff:ff:ff:ff:ff",
"standard_metadata.ingress_port" : ingress_port},
action_name = "cis553Ingress.aiForMe")
sw.WriteTableEntry(table_entry);
# Helper function to add an LPM entry to the IPv4 forwarding table.
# prefix_len = 32 indicates that the address must match the full string
def AddRoutingEntry(sw, helper, ip, mac_sa, mac_da, egress_port, prefix_len = 32):
table_entry = helper.buildTableEntry(
table_name = "cis553Ingress.tiIpv4Lpm",
match_fields = { "hdr.ipv4.dstAddr": [ ip, prefix_len ] },
action_name = "cis553Ingress.aiForward",
action_params = { "mac_sa": mac_sa,
"mac_da": mac_da,
"egress_port": egress_port } )
sw.WriteTableEntry(table_entry);
# Helper function to add an entry to the ARP response table.
# oper = 1 checks that it is a request
def AddARPResponse(sw, helper, target_pa, mac_sa, oper = 1):
table_entry = helper.buildTableEntry(
table_name = "cis553Ingress.tiArpResponse",
match_fields = { "hdr.arp.oper": oper,
"hdr.arp.targetPA" : target_pa },
action_name = "cis553Ingress.aiArpResponse",
action_params = { "mac_sa": mac_sa } )
sw.WriteTableEntry(table_entry);
def ProgramSwitch(sw, id, p4info_helper):
mac_h1_to_s1 = "00:00:00:00:01:01"
mac_h2_to_s2 = "00:00:00:00:02:02"
mac_h3_to_s3 = "00:00:00:00:03:03"
mac_s1_to_h1 = "00:00:00:01:01:00"
mac_s1_to_s2 = "00:00:00:01:02:00"
mac_s1_to_s3 = "00:00:00:01:03:00"
mac_s2_to_s1 = "00:00:00:02:02:00"
mac_s2_to_h2 = "00:00:00:02:01:00"
mac_s2_to_s3 = "00:00:00:02:03:00"
mac_s3_to_s1 = "00:00:00:03:02:00"
mac_s3_to_s2 = "00:00:00:03:03:00"
mac_s3_to_h3 = "00:00:00:03:01:00"
if id == 1:
AddEthernetEntries(sw, p4info_helper, mac_s1_to_h1, 1);
AddEthernetEntries(sw, p4info_helper, mac_s1_to_s2, 2);
AddEthernetEntries(sw, p4info_helper, mac_s1_to_s3, 3);
AddRoutingEntry(sw, p4info_helper,
"10.0.1.1", mac_s1_to_h1, mac_h1_to_s1, 1);
AddRoutingEntry(sw, p4info_helper,
"10.0.2.2", mac_s1_to_s2, mac_s2_to_s1, 2);
AddRoutingEntry(sw, p4info_helper,
"10.0.3.3", mac_s1_to_s3, mac_s3_to_s1, 3);
AddARPResponse(sw, p4info_helper, "10.0.1.100", mac_s1_to_h1);
elif id == 2:
AddEthernetEntries(sw, p4info_helper, mac_s2_to_h2, 1);
AddEthernetEntries(sw, p4info_helper, mac_s2_to_s1, 2);
AddEthernetEntries(sw, p4info_helper, mac_s2_to_s3, 3);
AddRoutingEntry(sw, p4info_helper,
"10.0.1.1", mac_s2_to_s1, mac_s1_to_s2, 2);
AddRoutingEntry(sw, p4info_helper,
"10.0.2.2", mac_s2_to_h2, mac_h2_to_s2, 1);
AddRoutingEntry(sw, p4info_helper,
"10.0.3.3", mac_s2_to_s3, mac_s3_to_s2, 3);
AddARPResponse(sw, p4info_helper, "10.0.2.100", mac_s2_to_h2);
elif id == 3:
AddEthernetEntries(sw, p4info_helper, mac_s3_to_h3, 1);
AddEthernetEntries(sw, p4info_helper, mac_s3_to_s1, 2);
AddEthernetEntries(sw, p4info_helper, mac_s3_to_s2, 3);
AddRoutingEntry(sw, p4info_helper,
"10.0.1.1", mac_s3_to_s1, mac_s1_to_s3, 2);
AddRoutingEntry(sw, p4info_helper,
"10.0.2.2", mac_s3_to_s2, mac_s2_to_s3, 3);
AddRoutingEntry(sw, p4info_helper,
"10.0.3.3", mac_s3_to_h3, mac_h3_to_s3, 1);
AddARPResponse(sw, p4info_helper, "10.0.3.100", mac_s3_to_h3);
#while True:
# This control plane is 100% static! We don't need to loop.
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='CIS553 P4Runtime Controller')
parser.add_argument("-b", '--bmv2-json',
help="path to BMv2 switch description (json)",
type=str, action="store", default="build/basic.json")
parser.add_argument("-c", '--p4info-file',
help="path to P4Runtime protobuf description (text)",
type=str, action="store", default="build/basic.p4info")
args = parser.parse_args()
if not os.path.exists(args.p4info_file):
parser.error("File %s does not exist!" % args.p4info_file)
if not os.path.exists(args.bmv2_json):
parser.error("File %s does not exist!" % args.bmv2_json)
p4info_helper = helper.P4InfoHelper(args.p4info_file)
threads = []
print "Connecting to P4Runtime server on s1..."
sw1 = bmv2.Bmv2SwitchConnection('s1', "127.0.0.1:50051", 0)
sw1.MasterArbitrationUpdate()
sw1.SetForwardingPipelineConfig(p4info = p4info_helper.p4info,
bmv2_json_file_path = args.bmv2_json)
t = threading.Thread(target=ProgramSwitch, args=(sw1, 1, p4info_helper))
t.start()
threads.append(t)
print "Connecting to P4Runtime server on s2..."
sw2 = bmv2.Bmv2SwitchConnection('s2', "127.0.0.1:50052", 1)
sw2.MasterArbitrationUpdate()
sw2.SetForwardingPipelineConfig(p4info = p4info_helper.p4info,
bmv2_json_file_path = args.bmv2_json)
t = threading.Thread(target=ProgramSwitch, args=(sw2, 2, p4info_helper))
t.start()
threads.append(t)
print "Connecting to P4Runtime server on s3..."
sw3 = bmv2.Bmv2SwitchConnection('s3', "127.0.0.1:50053", 2)
sw3.MasterArbitrationUpdate()
sw3.SetForwardingPipelineConfig(p4info = p4info_helper.p4info,
bmv2_json_file_path = args.bmv2_json)
t = threading.Thread(target=ProgramSwitch, args=(sw3, 3, p4info_helper))
t.start()
threads.append(t)
for t in threads:
t.join()
|
from django.shortcuts import render, redirect,HttpResponseRedirect
from .forms import RegisterForm
from users.models import Resulttable,Insertposter
from django.db import models
def register(request):
# 只有当请求为 POST 时,才表示用户提交了注册信息
if request.method == 'POST':
form = RegisterForm(request.POST)
# 验证数据的合法性
if form.is_valid():
# 如果提交数据合法,调用表单的 save 方法将用户数据保存到数据库
form.save()
# 注册成功,跳转回首页
return redirect('/')
else:
# 请求不是 POST,表明用户正在访问注册页面,展示一个空的注册表单给用户
form = RegisterForm()
# 渲染模板
# 如果用户正在访问注册页面,则渲染的是一个空的注册表单
# 如果用户通过表单提交注册信息,但是数据验证不合法,则渲染的是一个带有错误信息的表单
return render(request, 'users/register.html', context={'form': form})
def index(request):
return render(request, 'users/..//index.html')
# 为啥?
def check(request):
return render((request, 'users/..//index.html'))
# def showregist(request):
# pass
def showmessage(request):
usermovieid = []
usermovietitle = []
data=Resulttable.objects.filter(userId=1001)
for row in data:
usermovieid.append(row.imdbId)
try:
conn = get_conn()
cur = conn.cursor()
#Insertposter.objects.filter(userId=USERID).delete()
for i in usermovieid:
cur.execute('select * from moviegenre3 where imdbId = %s',i)
rr = cur.fetchall()
for imdbId,title,poster in rr:
usermovietitle.append(title)
print(title)
# print(poster_result)
finally:
conn.close()
return render(request, 'users/message.html', locals())
# USERID = 1002
def recommend1(request):
USERID = int(request.GET["userIdd"]) + 1000
Insertposter.objects.filter(userId=USERID).delete()
#selectMysql()
read_mysql_to_csv('users/static/users_resulttable.csv',USERID) #追加数据,提高速率
ratingfile = os.path.join('users/static', 'users_resulttable.csv')
usercf = UserBasedCF()
userid = str(USERID)#得到了当前用户的id
print(userid)
usercf.generate_dataset(ratingfile)
usercf.calc_user_sim()
usercf.recommend(userid) #得到imdbId号
#先删除所有数据
try:
conn = get_conn()
cur = conn.cursor()
#Insertposter.objects.filter(userId=USERID).delete()
for i in matrix:
cur.execute('select * from moviegenre3 where imdbId = %s',i)
rr = cur.fetchall()
for imdbId,title,poster in rr:
#print(value) #value才是真正的海报链接
if(Insertposter.objects.filter(title=title)):
continue
else:
Insertposter.objects.create(userId=USERID, title=title, poster=poster)
# print(poster_result)
finally:
conn.close()
#results = Insertposter.objects.all() #从这里传递给html= Insertposter.objects.all() # 从这里传递给html
results = Insertposter.objects.filter(userId=USERID)
return render(request,'users/movieRecommend.html', locals())
# return render(request, 'users/..//index.html', locals())
def recommend2(request):
# USERID = int(request.GET["userIddd"]) + 1000
USERID = 1001
Insertposter.objects.filter(userId=USERID).delete()
#selectMysql()
read_mysql_to_csv2('users/static/users_resulttable2.csv',USERID) #追加数据,提高速率
ratingfile2 = os.path.join('users/static', 'users_resulttable2.csv')
itemcf = ItemBasedCF()
#userid = '1001'
userid = str(USERID)#得到了当前用户的id
print(userid)
itemcf.generate_dataset(ratingfile2)
itemcf.calc_movie_sim()
itemcf.recommend(userid) #得到imdbId号
#先删除所有数据
try:
conn = get_conn()
cur = conn.cursor()
#Insertposter.objects.filter(userId=USERID).delete()
for i in matrix2:
cur.execute('select * from moviegenre3 where imdbId = %s',i)
rr = cur.fetchall()
for imdbId,title,poster in rr:
#print(value) #value才是真正的海报链接
if(Insertposter.objects.filter(title=title)):
continue
else:
Insertposter.objects.create(userId=USERID, title=title, poster=poster)
# print(poster_result)
finally:
conn.close()
results = Insertposter.objects.filter(userId=USERID) #从这里传递给html= Insertposter.objects.all() # 从这里传递给html
return render(request, 'users/movieRecommend2.html',locals())
# return HttpResponseRedirect('movieRecommend.html', locals())
def insert(request):
# MOVIEID = int(request.GET["movieId"])
global USERID
USERID = int(request.GET["userId"])+1000
# USERID = {{}}
RATING = float(request.GET["rating"])
IMDBID = int(request.GET["imdbId"])
Resulttable.objects.create(userId=USERID, rating=RATING,imdbId=IMDBID)
#print(USERID)
# return HttpResponseRedirect('/')
return render(request, 'index.html',{'userId':USERID,'rating':RATING,'imdbId':IMDBID})
import sys
import random
import os,math
from operator import itemgetter
import pymysql
import csv
from django.http import HttpResponse
import codecs
def get_conn():
conn = pymysql.connect(host='127.0.0.1', port=3307, user='root', passwd='admin', db='MovieData', charset='utf8')
return conn
def query_all(cur, sql, args):
cur.execute(sql, args)
return cur.fetchall()
def read_mysql_to_csv(filename,user):
with codecs.open(filename=filename, mode='w', encoding='utf-8') as f:
write = csv.writer(f, dialect='excel')
conn = get_conn()
cur = conn.cursor()
cur.execute('select * from users_resulttable')
#sql = ('select * from users_resulttable WHERE userId = 1001')
rr = cur.fetchall()
#results = query_all(cur=cur, sql=sql, args=None)
for result in rr:
#print(result)
write.writerow(result[:-1])
def read_mysql_to_csv2(filename,user):
with codecs.open(filename=filename, mode='a', encoding='utf-8') as f:
write = csv.writer(f, dialect='excel')
conn = get_conn()
cur = conn.cursor()
cur.execute('select * from users_resulttable')
sql = ('select * from users_resulttable WHERE userId = 1001')
rr = cur.fetchall()
results = query_all(cur=cur, sql=sql, args=None)
for result in results:
#print(result)
write.writerow(result[:-1])
import sys
import random
import math
import os
from operator import itemgetter
random.seed(0)
user_sim_mat = {}
matrix = [] #全局变量
matrix2 = []
class UserBasedCF(object):
''' TopN recommendation - User Based Collaborative Filtering '''
def __init__(self):
self.trainset = {} # 训练集
self.testset = {} # 测试集
self.initialset = {} # 存储要推荐的用户的信息
self.n_sim_user = 30
self.n_rec_movie = 10
self.movie_popular = {}
self.movie_count = 0 # 总电影数量
print('Similar user number = %d' % self.n_sim_user, file=sys.stderr)
print('recommended movie number = %d' %
self.n_rec_movie, file=sys.stderr)
@staticmethod
def loadfile(filename):
''' load a file, return a generator. '''
fp = open(filename, 'r', encoding='UTF-8')
for i, line in enumerate(fp):
yield line.strip('\r\n')
# if i % 100000 == 0:
# print ('loading %s(%s)' % (filename, i), file=sys.stderr)
fp.close()
print('load %s success' % filename, file=sys.stderr)
def initial_dataset(self, filename1):
initialset_len = 0
for lines in self.loadfile(filename1):
users, movies, ratings = lines.split(',')
self.initialset.setdefault(users, {})
self.initialset[users][movies] = (ratings)
initialset_len += 1
def generate_dataset(self, filename2, pivot=1.0):
''' load rating data and split it to training set and test set '''
trainset_len = 0
testset_len = 0
for line in self.loadfile(filename2):
# user, movie, rating, _ = line.split('::')
user, movie, rating = line.split(',')
# split the data by pivot
if random.random() < pivot: # pivot=0.7应该表示训练集:测试集=7:3
self.trainset.setdefault(user, {})
self.trainset[user][movie] = (rating) # trainset[user][movie]可以获取用户对电影的评分 都是整数
trainset_len += 1
else:
self.testset.setdefault(user, {})
self.testset[user][movie] = (rating)
testset_len += 1
print('split training set and test set succ', file=sys.stderr)
print('train set = %s' % trainset_len, file=sys.stderr)
print('test set = %s' % testset_len, file=sys.stderr)
def calc_user_sim(self):
movie2users = dict()
for user, movies in self.trainset.items():
for movie in movies:
# inverse table for item-users
if movie not in movie2users:
movie2users[movie] = set()
movie2users[movie].add(user) # 看这个电影的用户id
# print(movie) #输出的是movieId
# print(movie2users[movie]) #输出的是{'userId'...}
# print(movie2users) #movieId:{'userId','userId'...}
# count item popularity at the same time
if movie not in self.movie_popular:
self.movie_popular[movie] = 0
self.movie_popular[movie] += 1
# print ('build movie-users inverse table succ', file=sys.stderr)
# save the total movie number, which will be used in evaluation
self.movie_count = len(movie2users)
print('total movie number = %d' % self.movie_count, file=sys.stderr)
# count co-rated items between users 计算用户之间共同评分的物品
usersim_mat = user_sim_mat
# print ('building user co-rated movies matrix...', file=sys.stderr)
for movie, users in movie2users.items(): # 通过.items()遍历movie2users这个字典里的所有键、值
for u in users:
for v in users:
if u == v:
continue
usersim_mat.setdefault(u, {})
usersim_mat[u].setdefault(v, 0)
usersim_mat[u][v] += 1 / math.log(1 + len(users)) # usersim_mat二维矩阵应该存的是用户u和用户v之间共同评分的电影数目
# print ('build user co-rated movies matrix succ', file=sys.stderr)
# calculate similarity matrix
# print ('calculating user similarity matrix...', file=sys.stderr)
simfactor_count = 0
PRINT_STEP = 20000
for u, related_users in usersim_mat.items():
for v, count in related_users.items():
usersim_mat[u][v] = count / math.sqrt(
len(self.trainset[u]) * len(self.trainset[v]))
simfactor_count += 1
def recommend(self, user):
''' Find K similar users and recommend N movies. '''
matrix.clear() #每次都要清空
K = self.n_sim_user # 这里等于20
N = self.n_rec_movie # 这里等于10
rank = dict() # 用户对电影的兴趣度
# print(self.initialset[user])
watched_movies = self.trainset[user] # user用户已经看过的电影 只包括训练集里的
# 这里之后不能是训练集
# watched_movies = self.initialset[user]
for similar_user, similarity_factor in sorted(user_sim_mat[user].items(),
key=itemgetter(1), reverse=True)[
0:K]: # itemgetter(1)表示对第2个域(相似度)排序 reverse=TRUE表示降序
for imdbid in self.trainset[similar_user]: # similar_user是items里面的键,就是所有用户 similarity_factor是值,就是对应的相似度
if imdbid in watched_movies:
continue # 如果该电影用户已经看过,则跳过
# predict the user's "interest" for each movie
rank.setdefault(imdbid, 0) # 没有值就为0
rank[imdbid] += similarity_factor #rank[movie]就是各个电影的相似度
# 这里是把和各个用户的相似度加起来,而各个用户的相似度只是基于看过的公共电影数目除以这两个用户看过的电影数量积
#print(rank[movie])
# return the N best movies
# rank_ = dict()
rank_ = sorted(rank.items(), key=itemgetter(1), reverse=True)[0:N] #类型是list不是字典了
for key,value in rank_:
matrix.append(key) #matrix为存储推荐的imdbId号的数组
#print(key) #得到了推荐的电影的imdbid号
print(matrix)
#return sorted(rank.items(), key=itemgetter(1), reverse=True)[0:N]
return matrix
class ItemBasedCF(object):
''' TopN recommendation - Item Based Collaborative Filtering '''
def __init__(self):
self.trainset = {}
self.testset = {}
self.n_sim_movie = 20
self.n_rec_movie = 10
self.movie_sim_mat = {}
self.movie_popular = {}
self.movie_count = 0
@staticmethod
def loadfile(filename):
''' load a file, return a generator. '''
fp = open(filename, 'r', encoding='UTF-8')
for i, line in enumerate(fp):
yield line.strip('\r\n')
fp.close()
print('load %s succ' % filename, file=sys.stderr)
def generate_dataset(self, filename, pivot=1.0):
''' load rating data and split it to training set and test set '''
trainset_len = 0
testset_len = 0
for line in self.loadfile(filename):
user, movie, rating = line.split(',')
rating = float(rating)
# split the data by pivot
if random.random() < pivot:
self.trainset.setdefault(user, {})
self.trainset[user][movie] = float(rating)
trainset_len += 1
else:
self.testset.setdefault(user, {})
self.testset[user][movie] = float(rating)
testset_len += 1
print('train set = %s' % trainset_len, file=sys.stderr)
print('test set = %s' % testset_len, file=sys.stderr)
def calc_movie_sim(self):
''' calculate movie similarity matrix '''
print('counting movies number and popularity...', file=sys.stderr)
for user, movies in self.trainset.items():
for movie in movies:
# count item popularity
if movie not in self.movie_popular:
self.movie_popular[movie] = 0
self.movie_popular[movie] += 1
# print('count movies number and popularity succ', file=sys.stderr)
# save the total number of movies
self.movie_count = len(self.movie_popular)
print('total movie number = %d' % self.movie_count, file=sys.stderr)
# count co-rated users between items
itemsim_mat = self.movie_sim_mat
# print('building co-rated users matrix...', file=sys.stderr)
for user, movies in self.trainset.items():
for m1 in movies:
for m2 in movies:
if m1 == m2:
continue
itemsim_mat.setdefault(m1, {})
itemsim_mat[m1].setdefault(m2, 0)
itemsim_mat[m1][m2] += 1 / math.log(1 + len(movies) * 1.0)
simfactor_count = 0
PRINT_STEP = 2000000
for m1, related_movies in itemsim_mat.items():
for m2, count in related_movies.items():
itemsim_mat[m1][m2] = count / math.sqrt(
self.movie_popular[m1] * self.movie_popular[m2])
simfactor_count += 1
if simfactor_count % PRINT_STEP == 0:
print('calculating movie similarity factor(%d)' %
simfactor_count, file=sys.stderr)
def recommend(self, user):
''' Find K similar movies and recommend N movies. '''
K = self.n_sim_movie
N = self.n_rec_movie
matrix2.clear()
rank = {}
watched_movies = self.trainset[user]
for movie, rating in watched_movies.items():
for related_movie, similarity_factor in sorted(self.movie_sim_mat[movie].items(),
key=itemgetter(1), reverse=True)[:K]:
if related_movie in watched_movies:
continue
rank.setdefault(related_movie, 0)
rank[related_movie] += similarity_factor * rating
# return the N best movies
rank_ = sorted(rank.items(), key=itemgetter(1), reverse=True)[:N]
for key,value in rank_:
matrix2.append(key) #matrix为存储推荐的imdbId号的数组
#print(key) #得到了推荐的电影的imdbid号
print(matrix2)
return matrix2
#
if __name__ == '__main__':
ratingfile2 = os.path.join('static', 'users_resulttable.csv') # 一共671个用户
usercf = UserBasedCF()
userId = '1'
# usercf.initial_dataset(ratingfile1)
usercf.generate_dataset(ratingfile2)
usercf.calc_user_sim()
# usercf.evaluate()
usercf.recommend(userId)
# 给用户推荐10部电影 输出的是‘movieId’,兴趣度
|
"""
https://leetcode.com/problems/jump-game-vi/
You are given a 0-indexed integer array nums and an integer k.
You are initially standing at index 0. In one move, you can jump at most k steps forward without going outside the boundaries of the array. That is, you can jump from index i to any index in the range [i + 1, min(n - 1, i + k)] inclusive.
You want to reach the last index of the array (index n - 1). Your score is the sum of all nums[j] for each index j you visited in the array.
Return the maximum score you can get.
"""
from typing import List
class Solution:
def maxResult(self, nums: List[int], k: int) -> int:
if len(nums) == 1:
return nums[0]
dp = [0]* len(nums)
dp[0] = nums[0]
for i in range(1, len(dp)):
arr = dp[i - k :i] if k < i else dp[0:i]
print (arr)
dp[i] = max(dp[i - k :i] if k < i else dp[0:i]) + nums[i]
print (dp)
return dp[-1]
nums = [10,-5,-2,4,0,3]
k = 3
Solution().maxResult(nums, k)
nums = [1,-1,-2,4,-7,3]
k = 2
Solution().maxResult(nums, k=2)
|
import math
import os
import random
import re
import sys
#
# Complete the 'nonDivisibleSubset' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER k
# 2. INTEGER_ARRAY s
#
def nonDivisibleSubset(k, s):
# Write your code here
# Create an exclude dict
excl = {}
excl_sizes = [0] * len(s)
for i in range(len(s)):
excl[i] = []
for j in range(len(s)):
if i == j: continue
if (s[i] + s[j]) % k == 0:
excl[i].append(s[j])
excl_sizes[i] = len(excl[i])
for s_id, excludes in excl.items():
print(s_id, s[s_id], excludes, excl_sizes[s_id])
# Build s' for each starting point
s_p = [0] * len(s)
for s_id, excludes in excl.items():
s_p[s_id] = 1
includes = [s[s_id]]
for j in range(len(s)):
if s_id == j: continue
if s[j] not in excludes:
s_p[s_id] += 1
includes.append(s[j])
for x in excl[j]:
if x not in excludes:
excludes.append(x)
print(f's_p[{s_id}]={s_p[s_id]} includes={includes} excludes={excludes}')
print(f'max non-divisible set size = {max(s_p)}')
return max(s_p)
if __name__ == '__main__':
#n = 15
#k = 7
#s_str = "278 576 496 727 410 124 338 149 209 702 282 718 771 575 436"
n = 10
k = 5
s_str ="770528134 663501748 384261537 800309024 103668401 538539662 385488901 101262949 557792122 46058493"
s = list(map(int, s_str.split()))
result = nonDivisibleSubset(k, s)
print(result)
|
from re import compile, match
REGEX = compile(r'((25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)\.){3}'
r'(25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)$')
def is_valid_IP(strng):
""" is_valid_ip == PEP8 (forced mixedCase by CodeWars) """
return bool(match(REGEX, strng))
|
#
# Copyright © 2021 Uncharted Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
import pandas as pd
from tqdm import tqdm
from time import time
import torch
from torch import nn
from torch.utils.data import TensorDataset, DataLoader
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.modeling import (
BertModel,
PreTrainedBertModel,
BertForSequenceClassification,
)
from pytorch_pretrained_bert.optimization import BertAdam
from .base import DistilBaseModel
import logging
logger = logging.getLogger(__name__)
# --
# Helpers
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
return tokens_a, tokens_b
def examples2dataset(examples, max_seq_length, tokenizer):
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example["text_a"])
tokens_b = tokenizer.tokenize(example["text_b"])
tokens_a, tokens_b = _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
tokens = ["[CLS]"] + tokens_a + ["[SEP]"] + tokens_b + ["[SEP]"]
segment_ids = ([0] * (len(tokens_a) + 2)) + ([1] * (len(tokens_b) + 1))
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
features.append(
{
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids,
"label_id": example["label"],
}
)
all_input_ids = torch.LongTensor([f["input_ids"] for f in features])
all_input_mask = torch.LongTensor([f["input_mask"] for f in features])
all_segment_ids = torch.LongTensor([f["segment_ids"] for f in features])
all_label_ids = torch.LongTensor([f["label_id"] for f in features])
return TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# --
# Model helpers
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x / warmup
else:
return 1.0 - x
class QAModel(PreTrainedBertModel):
def __init__(self, config, num_labels=2, weights=None):
super().__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
if weights is not None:
self.loss_fn = nn.CrossEntropyLoss(torch.FloatTensor(weights))
else:
self.loss_fn = nn.CrossEntropyLoss()
self.use_classifier = True
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(
input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False
)
if not self.use_classifier:
return pooled_output
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss = self.loss_fn(logits.view(-1, self.num_labels), labels.view(-1))
return logits, loss
else:
return logits
# --
# Wrapper
class BERTPairClassification(DistilBaseModel):
def __init__(
self,
model_path,
vocab_path,
columns=["question", "sentence"],
batch_size=32,
learning_rate=5e-5,
epochs=3,
warmup_proportion=0.1,
seed=123,
device="cuda",
):
self.columns = columns
self.batch_size = batch_size
self.learning_rate = learning_rate
self.epochs = epochs
self.warmup_proportion = warmup_proportion
self.model_path = model_path
self.do_lower_case = True
self.device = device
self.tokenizer = BertTokenizer.from_pretrained(
vocab_path, do_lower_case=self.do_lower_case
)
_ = np.random.seed(seed)
_ = torch.manual_seed(seed + 1)
_ = torch.cuda.manual_seed_all(seed + 2)
def _set_lr(self, progress):
lr_this_step = self.learning_rate * warmup_linear(
progress, self.warmup_proportion
)
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr_this_step
def _row2example(self, row):
return {
"text_a": row[self.columns[0]],
"text_b": row[self.columns[1]],
"label": int(row["_label"]),
}
def fit(self, X_train, y_train, U_train=None):
# --
# Prep
X_train = X_train.copy()
X_train["_label"] = y_train
train_examples = list(X_train.apply(self._row2example, axis=1))
label_list = list(set(X_train._label.astype(str)))
self.num_labels = len(label_list)
num_train_steps = int(
len(train_examples) / self.batch_size * float(self.epochs)
)
q_lens = X_train[self.columns[0]].apply(
lambda x: len(self.tokenizer.tokenize(x))
)
s_lens = X_train[self.columns[1]].apply(
lambda x: len(self.tokenizer.tokenize(x))
)
self.max_seq_len = int(np.percentile(q_lens + s_lens, 99) + 1)
train_dataset = examples2dataset(
train_examples, self.max_seq_len, self.tokenizer
)
dataloaders = {
"train": DataLoader(
dataset=train_dataset,
shuffle=True,
batch_size=self.batch_size,
num_workers=4,
),
}
# --
# Define model
self.model = QAModel.from_pretrained(
self.model_path,
num_labels=self.num_labels,
# weights=[0.1, 1],
).to(self.device)
# --
# Optimizer
params = list(self.model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
grouped_params = [
{
"params": [p for n, p in params if not any(nd in n for nd in no_decay)],
"weight_decay": 0.01,
},
{
"params": [p for n, p in params if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
self.optimizer = BertAdam(
params=grouped_params,
lr=self.learning_rate,
warmup=self.warmup_proportion,
t_total=num_train_steps,
)
# --
# Train
train_step = 0
_ = self.model.train()
for epoch_idx in tqdm(range(self.epochs), desc="Epoch"):
train_loss_hist = []
gen = tqdm(dataloaders["train"], desc="train iter")
for step, batch in enumerate(gen):
input_ids, input_mask, segment_ids, label_ids = tuple(
t.to(self.device) for t in batch
)
_, loss = self.model(input_ids, segment_ids, input_mask, label_ids)
loss.backward()
train_loss_hist.append(loss.item())
self._set_lr(train_step / num_train_steps)
self.optimizer.step()
self.optimizer.zero_grad()
train_step += 1
gen.set_postfix(loss=loss.item())
self.train_loss_hist = train_loss_hist
return self
def predict(self, X):
# --
# Prep
X = X.copy()
X["_label"] = -1
examples = list(X.apply(self._row2example, axis=1))
dataset = examples2dataset(examples, self.max_seq_len, self.tokenizer)
dataloaders = {
"test": list(
DataLoader(
dataset=dataset,
shuffle=False,
batch_size=self.batch_size,
num_workers=4,
)
),
}
# --
# Predict
_ = self.model.eval()
all_logits = []
gen = tqdm(dataloaders["test"], desc="score iter")
for step, batch in enumerate(gen):
input_ids, input_mask, segment_ids, _ = tuple(
t.to(self.device) for t in batch
)
with torch.no_grad():
logits = self.model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
all_logits.append(logits)
return np.vstack(all_logits).argmax(axis=-1)
|
from baselines import main
from dp_utils import get_noise_mul, get_renyi_divergence
MAX_GRAD_NORM = 0.1
MAX_EPS = 5
BATCH_SIZES = [512, 1024, 2048, 4096, 8192, 16384]
BASE_LRS = [0.125, 0.25, 0.5, 1.0]
TARGET_EPS = 3
TARGET_EPOCHS = [30, 60, 120]
BN_MULS = [6, 8]
GROUPS = [9, 27, 81]
for target_epoch in TARGET_EPOCHS:
for bs in BATCH_SIZES:
for bn_mul in BN_MULS:
rdp_norm = 2 * get_renyi_divergence(1.0, bn_mul)
mul = get_noise_mul(50000, bs, TARGET_EPS, target_epoch, rdp_init=rdp_norm)
for base_lr in BASE_LRS:
lr = (bs // 512) * base_lr
print(f"epoch={target_epoch}, bs={bs}, bn_mul={bn_mul}, lr={base_lr}*{bs//512}={lr}, mul={mul}")
logdir = f"logs/baselines/cifar10/bs={bs}_lr={lr}_mul={mul:.2f}_bn={bn_mul}"
main(dataset="cifar10", max_grad_norm=MAX_GRAD_NORM,
lr=lr, batch_size=bs, noise_multiplier=mul,
input_norm="BN", bn_noise_multiplier=bn_mul,
max_epsilon=MAX_EPS, logdir=logdir, epochs=150)
for target_epoch in TARGET_EPOCHS:
for bs in BATCH_SIZES:
for group in GROUPS:
mul = get_noise_mul(50000, bs, TARGET_EPS, target_epoch)
for base_lr in BASE_LRS:
lr = (bs // 512) * base_lr
print(f"epoch={target_epoch}, bs={bs}, GN={group}, lr={base_lr}*{bs//512}={lr}, mul={mul}")
logdir = f"logs/baselines/cifar10/bs={bs}_lr={lr}_mul={mul:.2f}_GN={group}"
main(dataset="cifar10", max_grad_norm=MAX_GRAD_NORM,
lr=lr, batch_size=bs, noise_multiplier=mul,
input_norm="GroupNorm", num_groups=group,
max_epsilon=MAX_EPS, logdir=logdir, epochs=150)
|
# Generated by Django 2.0.3 on 2018-04-08 10:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('recipes', '0004_auto_20180407_1005'),
]
operations = [
migrations.AlterField(
model_name='medicinedosage',
name='medicine',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='recipes.MedicineName'),
),
migrations.AlterField(
model_name='medicinerequest',
name='apothecary',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='recipes.Apothecary'),
),
migrations.AlterField(
model_name='medicinerequest',
name='medicine_count',
field=models.SmallIntegerField(default=0),
),
migrations.AlterField(
model_name='medicinerequest',
name='request_confirmation_time',
field=models.DateTimeField(null=True),
),
]
|
#!/usr/bin/env python
# coding: utf-8
import os
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import torch
import core.transforms as T
from utils.plot_utils import plot_single_image
from datasets.coco_to_pd import read_coco_classes
from torchvision.models.detection.faster_rcnn import fasterrcnn_resnet50_fpn
def get_transform(train):
transforms = [T.ToTensor()]
if train:
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
# === pathes
pathes = {}
pathes['project_root'] = '../'
pathes['data'] = os.path.join(pathes['project_root'], 'materials/images/')
# === load image
img_path = os.path.join(pathes['data'], '2007_000720.jpg')
img = Image.open(img_path)
# === load model
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model = fasterrcnn_resnet50_fpn(pretrained=True)
model.to(device)
# === transform image
tr_imgs = get_transform(False)
img_tensor, _ = tr_imgs(img, None)
# === run model
model.eval()
with torch.no_grad():
prediction = model([img_tensor.to(device)])
predict_boxes = prediction[0]['boxes'].cpu().numpy()
predict_labels = prediction[0]['labels'].cpu().numpy()
predict_scores = prediction[0]['scores'].cpu().numpy()
predict_n = len(predict_labels)
# TODO: make use of mask also in plotting the results
if 'masks' in prediction[0].keys():
predict_masks = np.squeeze(prediction[0]['masks'].mul(255).byte().cpu().numpy())
# === threshold
vis_thresh = 0.5
# === prepare results for plotting
coco_classes, _ = read_coco_classes(
os.path.join(pathes['project_root'], 'data/coco_classes.txt'))
coco_list = [x[1] for x in sorted([(k, coco_classes[k]) for k in coco_classes], key=lambda x: x[0])]
bbox_text = []
bboxes = []
for k in range(predict_n):
# if predict_scores[k] > vis_thresh and coco_classes[predict_labels[k]] == 'laptop':
if predict_scores[k] > vis_thresh:
current_box = [predict_boxes[k][0], predict_boxes[k][1],
predict_boxes[k][2] - predict_boxes[k][0], predict_boxes[k][3] - predict_boxes[k][1]]
bboxes.append(current_box)
bbox_text.append(coco_classes[predict_labels[k]] + '[{:3.2f}]'.format(predict_scores[k]))
box_info = {'bbox_text': bbox_text,
'bbox_text_pred': [],
'bbox_text_pred_2': [],
'bboxes': bboxes,
'bboxes_pred': [],
'bboxes_pred_2': [],
'path_image': img_path}
plot_single_image(box_info, fig_size=18, make_print=False)
plt.savefig('../materials/images/out_demo.png')
|
import multiprocessing
import argparse
import pprint
import sys
#=========1=========2=========3=========4=========5=========6=========7=
def load_arguments():
argparser = argparse.ArgumentParser(sys.argv[0])
argparser.add_argument('--dataset_path',
type=str,
default='')
argparser.add_argument('--plot_extensions',
type=str,
default='n')
argparser.add_argument('--convert',
type=str,
default='n')
argparser.add_argument('--cluster_struct',
type=str,
default='y')
argparser.add_argument('--cluster_text',
type=str,
default='y')
argparser.add_argument('--num_extensions',
type=int,
default=15)
argparser.add_argument('--num_processes',
type=int,
default=multiprocessing.cpu_count()-1)
argparser.add_argument('--fill_threshold',
type=int,
default=0.4)
argparser.add_argument('--overwrite_distmat_struct',
type=str,
default='n')
argparser.add_argument('--overwrite_plot_struct',
type=str,
default='n')
argparser.add_argument('--overwrite_tokens_text',
type=str,
default='n')
argparser.add_argument('--overwrite_clusters_text',
type=str,
default='n')
argparser.add_argument('--minibatch_kmeans',
type=str,
default='n')
argparser.add_argument('--num_clusters_start',
type=int,
default=10)
argparser.add_argument('--num_clusters_end',
type=int,
default=0)
args = argparser.parse_args()
print('------------------------------------------------')
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(vars(args))
print('------------------------------------------------')
return args
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.