blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
08e5f8f4f69fbeabf6340c5094a824952f014325 | 8304e371eff46389acf84c250dbb95af7087d260 | /sports_team_system/catalog/migrations/0016_auto_20200607_2019.py | 8e6f4a76e130a18ecf1eca058b40590a4cfb6f52 | [] | no_license | COYADI/System-Analysis | 601dbda423ce6e1549ade2c8864dd53b1ae346d6 | 308b19f13123edeba4f53b91f2a76d3305b39813 | refs/heads/master | 2022-11-02T21:04:55.664105 | 2020-06-11T03:24:20 | 2020-06-11T03:24:20 | 254,568,785 | 0 | 15 | null | 2020-06-08T11:33:55 | 2020-04-10T07:12:24 | HTML | UTF-8 | Python | false | false | 1,041 | py | # Generated by Django 3.0.5 on 2020-06-07 12:19
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0015_auto_20200607_2018'),
]
operations = [
migrations.AlterField(
model_name='noticing',
name='create_time',
field=models.DateTimeField(default=datetime.datetime(2020, 6, 7, 20, 19, 20, 812109)),
),
migrations.AlterField(
model_name='training',
name='create_time',
field=models.DateTimeField(default=datetime.datetime(2020, 6, 7, 20, 19, 20, 808117)),
),
migrations.AlterField(
model_name='voting',
name='create_time',
field=models.DateTimeField(default=datetime.datetime(2020, 6, 7, 20, 19, 20, 810108)),
),
migrations.AlterField(
model_name='voting',
name='expire_time',
field=models.DateTimeField(blank=True, null=True),
),
]
| [
"b06705020@ntu.edu.tw"
] | b06705020@ntu.edu.tw |
04c25ae249b069385ac6991f6b7f60b72769500e | 6af5601738fbb38ace3454e88ddd773a64c72314 | /exercises/petting_zoo/slither_inn.py | 4d4d8bc226a6667de7e78e6aef1a069893f8b743 | [] | no_license | morriscodez/critters-and-croquettes | 82178ed673daa8f4988f40386ea9ca86f462c6d7 | 22368c14cea6a42bcaa9a800ca29242cfd29b741 | refs/heads/main | 2023-04-05T01:47:29.054819 | 2021-04-23T19:55:10 | 2021-04-23T19:55:10 | 360,280,011 | 0 | 0 | null | 2021-04-23T19:55:10 | 2021-04-21T19:13:45 | Python | UTF-8 | Python | false | false | 166 | py | class SlitherInn:
def __init__(self, name):
self.attraction_name = name
self.description = "safe place to fall asleep"
self.animals = [] | [
"dylanrobertmorris@gmail.com"
] | dylanrobertmorris@gmail.com |
78d9bf44728572f4d21268ca5a3a81c35e52cf7e | 3dfb23604deb956cabd3e7d014389548f2b14e27 | /app/trade/apps.py | 33c34302eb16e58b685b3962cd5f3962890a1753 | [] | no_license | huanpython/mysite | 74c285f093a4af888d554d780997a23c25bc626e | 05bdba6174446117efd01f6f8c7d94768cb330fa | refs/heads/master | 2020-07-02T09:25:43.061976 | 2019-08-14T04:03:20 | 2019-08-14T04:03:20 | 201,485,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | from django.apps import AppConfig
class TradeConfig(AppConfig):
name = 'app.trade'
verbose_name = "交易管理" | [
"huanfuan@163.com"
] | huanfuan@163.com |
b49321d49f458783ff2e504b5b2c980bfaac3d46 | d686dd84682038efe027d6ba14a77282b3786287 | /src/e2e/parsers/downstream_config.py | 69bad6b06ae6a14096171b4ec094793c8d511cee | [
"Apache-2.0",
"MIT"
] | permissive | idiap/apam | 2d7e2cfee9a4fab7e194f4aee059d8684d932210 | b1ba6087dcb8d2b864b4a99979bb325fb17f3b99 | refs/heads/main | 2023-03-02T10:00:44.992938 | 2021-02-15T11:00:54 | 2021-02-15T11:00:54 | 335,578,798 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | """ Parser for asr training options """
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by Apoorv Vyas <apoorv.vyas@idiap.ch>
import argparse
import sys
def add_downstream_options(parser):
parser.add_argument(
'--ckpt',
default='',
type=str,
help='Path to upstream pre-trained checkpoint, required if using other than baseline',
required=True
)
parser.add_argument(
'--config',
default='config/asr-downstream.yaml',
type=str, help='Path to downstream experiment config.',
required=True
)
parser.add_argument(
'--upconfig',
default='default',
type=str, help='Path to the option upstream config. Pass default to use from checkpoint',
)
parser.add_argument(
'--cpu',
action='store_true',
help='Disable GPU training.'
)
return parser
def print_downstream_options(args):
sys.stderr.write("""
Downstream Config:
Checkpoint: {ckpt}
ASR Config: {config}
Upconfig: {upconfig}
CPU Training: {cpu}
""".format(**vars(args)))
| [
"philip.abbet@idiap.ch"
] | philip.abbet@idiap.ch |
7b535be1a7823f72cded96377305a79b7e8e5f84 | a408ccea1036482792a79eee9f5b835c1e4a4c8e | /Bolivian_Lowlands/Scenarios/Scenario_2_new.py | fb9dec645e487634be15ef5054d543ff77a4d59e | [
"MIT"
] | permissive | CIE-UMSS/VLIR_Energy_Demand | ae399ace372a7e5263b3276bb1a0ecded937d227 | 3a9c7a034ac6ff668c7734597daf4696f62ef671 | refs/heads/main | 2023-09-01T01:33:37.832158 | 2021-09-25T20:51:11 | 2021-09-25T20:51:11 | 353,682,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,569 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 21 10:25:06 2021
@author: Clau
"""
'''
Paper: Energy sufficiency, lowlands.
SCENARIO 2
'''
from core import User, np
User_list = []
#Defining users
H1 = User("low income", 73)
User_list.append(H1)
H2 = User("high income", 53)
User_list.append(H2)
Public_lighting = User("Public lighting ", 2)
User_list.append(Public_lighting)
HP = User("Health post", 1)
User_list.append(HP)
SA = User("School type A", 1)
User_list.append(SA)
Church = User("Church", 1)
User_list.append(Church)
WSS = User("water supply system", 1)
User_list.append(WSS)
Coliseum = User("Coliseum", 1)
User_list.append(Coliseum)
R = User("Restaurant", 1)
User_list.append(R)
GS = User("Grocery Store 1", 2)
User_list.append(GS)
EB = User("Entertainment Business", 3)
User_list.append(EB)
WS = User("Workshop", 2)
User_list.append(WS)
LAU = User("Lowlands agro-productive unit", 1)
User_list.append(LAU)
IW = User("Irrigation Water", 7)
User_list.append(IW)
#Appliances
#Low Income Households
H1_indoor_bulb = H1.Appliance(H1,3,7,2,120,0.2,10)
H1_indoor_bulb.windows([1082,1440],[0,30],0.35)
H1_outdoor_bulb = H1.Appliance(H1,1,13,2,600,0.2,10)
H1_outdoor_bulb.windows([0,330],[1082,1440],0.35)
H1_TV = H1.Appliance(H1,1,60,3,90,0.1,5)
H1_TV.windows([750,840],[1082,1440],0.35,[0,30])
H1_Antenna = H1.Appliance(H1,1,8,3,90,0.1,5)
H1_Antenna.windows([750,840],[1082,1440],0.35,[0,30])
H1_Phone_charger = H1.Appliance(H1,2,2,1,300,0.2,5)
H1_Phone_charger.windows([1080,1440],[0,0],0.35)
#High income households
H2_indoor_bulb = H2.Appliance(H2,4,7,2,120,0.2,10)
H2_indoor_bulb.windows([1082,1440],[0,30],0.35)
H2_outdoor_bulb = H2.Appliance(H2,2,13,2,600,0.2,10)
H2_outdoor_bulb.windows([0,330],[1082,1440],0.35)
H2_TV = H2.Appliance(H2,2,60,2,120,0.1,5)
H2_TV.windows([1082,1440],[0,60],0.35)
H2_DVD = H2.Appliance(H2,1,8,2,40,0.1,5)
H2_DVD.windows([1082,1440],[0,60],0.35)
H2_Antenna = H2.Appliance(H2,1,8,2,80,0.1,5)
H2_Antenna.windows([1082,1440],[0,60],0.35)
H2_Radio = H2.Appliance(H2,1,36,2,60,0.1,5)
H2_Radio.windows([390,450],[1082,1260],0.35)
H2_Phone_charger = H2.Appliance(H2,4,2,2,300,0.2,5)
H2_Phone_charger.windows([1110,1440],[0,30],0.35)
H2_Freezer = H2.Appliance(H2,1,200,1,1440,0,30, 'yes',2)
H2_Freezer.windows([0,1440],[0,0])
H2_Freezer.specific_cycle_1(5,15,200,15)
H2_Freezer.specific_cycle_2(200,10,5,20)
H2_Freezer.cycle_behaviour([480,1200],[0,0],[0,479],[1201,1440])
H2_Mixer = H2.Appliance(H2,1,50,3,30,0.1,1, occasional_use = 0.33)
H2_Mixer.windows([420,450],[660,750],0.35,[1020,1170])
H2_Fan = H2.Appliance(H2,1,171,1,220,0.27,60)
H2_Fan.windows([720,1080],[0,0])
H2_Laptop = H2.Appliance(H2,1,70,1,90,0.3,30)
H2_Laptop.windows([960,1200],[0,0])
#Health post
HP_indoor_bulb = HP.Appliance(HP,12,7,2,690,0.2,10)
HP_indoor_bulb.windows([480,720],[870,1440],0.35)
HP_outdoor_bulb = HP.Appliance(HP,1,13,2,690,0.2,10)
HP_outdoor_bulb.windows([0,342],[1037,1440],0.35)
HP_Phone_charger = HP.Appliance(HP,5,2,2,300,0.2,5)
HP_Phone_charger.windows([480,720],[900,1440],0.35)
HP_TV = HP.Appliance(HP,1,150,2,360,0.1,60)
HP_TV.windows([480,720],[780,1020],0.2)
HP_radio = HP.Appliance(HP,1,40,2,360,0.3,60)
HP_radio.windows([480,720],[780,1020],0.35)
HP_PC = HP.Appliance(HP,1,200,2,300,0.1,10)
HP_PC.windows([480,720],[1050,1440],0.35)
HP_printer = HP.Appliance(HP,1,100,1,60,0.3,10)
HP_printer.windows([540,1020],[0,0],0.35)
HP_fan = HP.Appliance(HP,2,60,1,240,0.2,60)
HP_fan.windows([660,960],[0,0],0.35)
HP_sterilizer_stove = HP.Appliance(HP,1,600,2,120,0.3,30)
HP_sterilizer_stove.windows([540,600],[900,960],0.35)
HP_needle_destroyer = HP.Appliance(HP,1,70,1,60,0.2,10)
HP_needle_destroyer.windows([540,600],[0,0],0.35)
HP_water_pump = HP.Appliance(HP,1,400,1,30,0.2,10)
HP_water_pump.windows([480,510],[0,0],0.35)
HP_Fridge = HP.Appliance(HP,3,150,1,1440,0,30, 'yes',3)
HP_Fridge.windows([0,1440],[0,0])
HP_Fridge.specific_cycle_1(150,20,5,10)
HP_Fridge.specific_cycle_2(150,15,5,15)
HP_Fridge.specific_cycle_3(150,10,5,20)
HP_Fridge.cycle_behaviour([580,1200],[0,0],[420,579],[0,0],[0,419],[1201,1440])
#School A
SA_indoor_bulb = SA.Appliance(SA,6,7,2,120,0.25,30)
SA_indoor_bulb.windows([480,780],[840,1140],0.2)
SA_outdoor_bulb = SA.Appliance(SA,1,13,1,60,0.2,10)
SA_outdoor_bulb.windows([1007,1080],[0,0],0.35)
SA_TV = SA.Appliance(SA,1,60,2,120,0.1,5, occasional_use = 0.5)
SA_TV.windows([480,780],[840,1140],0.2)
SA_radio = SA.Appliance(SA,3,4,2,120,0.1,5, occasional_use = 0.5)
SA_radio.windows([480,780],[840,1140],0.2)
SA_DVD = SA.Appliance(SA,1,8,2,120,0.1,5, occasional_use = 0.5)
SA_DVD.windows([480,780],[840,1140],0.2)
#Public lighting
Public_lighting_lamp_post = Public_lighting.Appliance(Public_lighting,12,40,2,310,0,300, 'yes', flat = 'yes')
Public_lighting_lamp_post.windows([0,362],[1082,1440],0.1)
#Church
Ch_indoor_bulb = Church.Appliance(Church,10,26,1,210,0.2,60,'yes', flat = 'yes')
Ch_indoor_bulb.windows([1200,1440],[0,0],0.1)
Ch_outdoor_bulb = Church.Appliance(Church,7,26,1,240,0.2,60, 'yes', flat = 'yes')
Ch_outdoor_bulb.windows([1200,1440],[0,0],0.1)
Ch_speaker = Church.Appliance(Church,1,100,1,240,0.2,60)
Ch_speaker.windows([1200,1350],[0,0],0.1)
#Water supply system
WSS_water_pump = WSS.Appliance(WSS,1,1700,2,60,0.2,10,occasional_use = 0.33)
WSS_water_pump.windows([420,720],[840,1020],0.35)
#Coliseum
Lights = Coliseum.Appliance(Coliseum,25,150,2,310,0.1,300, 'yes', flat = 'yes')
Lights.windows([0,336],[1110,1440],0.2)
#Grocery Store
GS_indoor_bulb = GS.Appliance(GS,2,7,2,120,0.2,10)
GS_indoor_bulb.windows([1107,1440],[0,30],0.35)
GS_outdoor_bulb = GS.Appliance(GS,1,13,2,600,0.2,10)
GS_outdoor_bulb.windows([0,330],[1107,1440],0.35)
GS_freezer = GS.Appliance(GS,1,200,1,1440,0,30,'yes',3)
GS_freezer.windows([0,1440],[0,0])
GS_freezer.specific_cycle_1(200,20,5,10)
GS_freezer.specific_cycle_2(200,15,5,15)
GS_freezer.specific_cycle_3(200,10,5,20)
GS_freezer.cycle_behaviour([480,1200],[0,0],[300,479],[0,0],[0,299],[1201,1440])
GS_Radio = GS.Appliance(GS,1,36,2,60,0.1,5)
GS_Radio.windows([390,450],[1140,1260],0.35)
#Restaurant
R_indoor_bulb = R.Appliance(R,2,7,2,120,0.2,10)
R_indoor_bulb.windows([1107,1440],[0,30],0.35)
R_Blender = R.Appliance(R,1,350,2,20,0.375,5)
R_Blender.windows([420,480],[720,780],0.5)
R_freezer = R.Appliance(R,1,200,1,1440,0,30,'yes',3)
R_freezer.windows([0,1440],[0,0])
R_freezer.specific_cycle_1(200,20,5,10)
R_freezer.specific_cycle_2(200,15,5,15)
R_freezer.specific_cycle_3(200,10,5,20)
R_freezer.cycle_behaviour([480,1200],[0,0],[300,479],[0,0],[0,299],[1201,1440])
#Entertainment Business
EB_indoor_bulb = EB.Appliance(EB,2,7,2,120,0.2,10)
EB_indoor_bulb.windows([1107,1440],[0,30],0.35)
EB_outdoor_bulb = EB.Appliance(EB,1,13,2,600,0.2,10)
EB_outdoor_bulb.windows([0,330],[1107,1440],0.35)
EB_Stereo = EB.Appliance(EB,1,150,2,90,0.1,5, occasional_use = 0.33)
EB_Stereo.windows([480,780],[0,0],0.35)
EB_TV = EB.Appliance(EB,1,60,2,120,0.1,5, occasional_use = 0.5)
EB_TV.windows([480,780],[840,1140],0.2)
EB_PC = EB.Appliance(EB,1,50,2,210,0.1,10)
EB_PC.windows([480,780],[840,1140],0.35)
EB_freezer = EB.Appliance(EB,1,200,1,1440,0,30,'yes',3)
EB_freezer.windows([0,1440],[0,0])
EB_freezer.specific_cycle_1(200,20,5,10)
EB_freezer.specific_cycle_2(200,15,5,15)
EB_freezer.specific_cycle_3(200,10,5,20)
EB_freezer.cycle_behaviour([480,1200],[0,0],[300,479],[0,0],[0,299],[1201,1440])
#Workshop
WS_indoor_bulb = WS.Appliance(WS,2,7,2,120,0.2,10)
WS_indoor_bulb.windows([1107,1440],[0,30],0.35)
WS_welding_machine = WS.Appliance(WS,1,5500,1,60,0.5,30,occasional_use = 0.3)
WS_welding_machine.windows([0,1440],[0,0],0.35)
WS_grinding_machine = WS.Appliance(WS,1,750,1,480,0.125,60,occasional_use = 0.3)
WS_grinding_machine.windows([0,1440],[0,0],0.35)
WS_Radio = WS.Appliance(WS,1,36,2,60,0.1,5)
WS_Radio.windows([390,450],[1140,1260],0.35)
#trans
LAU_GD = LAU.Appliance(LAU,1,9360,1,180,0.2,30,occasional_use = 0.33)
LAU_GD.windows([420,1080],[0,0],0.35)
LAU_VW = LAU.Appliance(LAU,1,1170,1,480,0.2,15,occasional_use = 0.82)
LAU_VW.windows([420,1140],[0,0],0.35)
LAU_BT = LAU.Appliance(LAU,1,370,2,900,0.2,180)
LAU_BT.windows([360,930],[1080,1440],0.35)
#Irrigation
IW_water_pump = IW.Appliance(IW,1,1700,2,60,0.2,10,occasional_use = 0.33)
IW_water_pump.windows([420,720],[840,1020],0.35) | [
"noreply@github.com"
] | noreply@github.com |
d2880bfa73bd2dcfe254701168d1ff3a803181e6 | 04d30611d6ccc31b1e204bfa7f83efa50bab5ae6 | /github_data_pipeline_api.py | 483e28940fbc9a501a0cbd12166c7817c0f3d192 | [] | no_license | jerryshenyizhou/crypto_github_data | 957cdee889b0107960eb7669046857ec40b846cb | 2c510b9fbbc3233f4f663c3b36aa42bdf47f764e | refs/heads/master | 2021-09-05T21:46:36.994743 | 2018-01-31T07:12:40 | 2018-01-31T07:12:40 | 115,180,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,659 | py | # coding: utf-8
# In[2]:
import datetime
import json
import warnings
import pandas as pd
import requests
from pandas.io.json import json_normalize
warnings.filterwarnings('ignore')
import sys
reload(sys)
sys.setdefaultencoding('utf8')
sys.path.append('./utils')
from google_sheet_update import *
# In[3]:
# utility function, http request
def getjson(url, auth=0):
if auth == 0:
with open('./utils/secret.json') as json_file:
secret = json.load(json_file)
auth = (str(secret['github']['username']), str(secret['github']['password']))
else:
pass
header = {'x-requested-with': 'XMLHttpRequest'}
mainPage = requests.get(url, auth=auth)
data = mainPage.json()
return data
# In[4]:
# ingest coin github org
def coin_github_org_ingestion():
sheet_key = '1tpOAiuRo9RNKnyPCVTGjc3H9S1miIJD1AimFLg8sv4E'
tab = 'organization'
data = get_googlesheet_data(sheet_key, tab)
return data
# ingest coin github exclude data
def coin_github_exclusion_ingestion():
sheet_key = '1tpOAiuRo9RNKnyPCVTGjc3H9S1miIJD1AimFLg8sv4E'
tab = 'excluding_repos'
data = get_googlesheet_data(sheet_key, tab)
return data
# In[5]:
# ingest coinmarketcap data
def coin_marketcap_ingestion(limit=200):
data = json_normalize(getjson("https://api.coinmarketcap.com/v1/ticker/?limit=" + str(limit)))
return data
# In[6]:
# ingest github repo data
def github_repo_ingestion(github_org_data, trunc_date=datetime.date(2017, 1, 1)):
start_time = datetime.datetime.today()
data = pd.DataFrame()
for symbol in github_org_data.symbol.unique():
for github_org in list(github_org_data[github_org_data.symbol == symbol].github_org):
try:
data_repo = getjson("https://api.github.com/users/" + str(
github_org) + "/repos?sort=updated&direction=desc&per_page=100")
repo_dict = json_normalize(data_repo).set_index('name')
repo_dict['updated_at'] = pd.to_datetime(repo_dict['updated_at'])
repo_dict['symbol'] = symbol
repo_list = repo_dict[repo_dict.updated_at >= trunc_date].index
data = data.append(repo_dict)
print str(github_org) + ' completed!'
except:
print str(github_org) + ' failed!'
pass
# pd.DataFrame.to_csv(token_repo_df,'./data/token_repo_dictionary_'+str(today)+'.csv')
minutes_passed = (datetime.datetime.today() - start_time).seconds / 60
data.pushed_at = pd.to_datetime(data.pushed_at)
print 'finished ingesting coin github repo data! used ' + str(minutes_passed) + ' minutes!'
return data
# In[7]:
# write github org google sheet with coins that needs to be updated with github orgs
def update_no_org_coins(coin_github_org_data, coin_marketcap_data):
coin_org_list = coin_github_org_data.symbol.unique()
coin_total_list = coin_marketcap_data.symbol.unique()
coin_gap_list = list(set(coin_total_list) - set(coin_org_list))
coin_gap_list_df = coin_marketcap_data[coin_marketcap_data.symbol.isin(coin_gap_list)][['symbol', 'id']]
sheet_key = '1tpOAiuRo9RNKnyPCVTGjc3H9S1miIJD1AimFLg8sv4E'
tab = 'undocumented_top_200_coins'
cell_col = 'A'
cell_row = 1
write_cells(coin_gap_list_df, sheet_key, tab, cell_col, cell_row, transpose=0)
return coin_gap_list
# In[8]:
# full contribution list per repo
def get_full_contribution_history(coin_github_repo_data):
start_time = datetime.datetime.today()
data_contributions_entry = pd.DataFrame()
for repo_name in coin_github_repo_data.full_name.unique():
try:
data_repo_contributors = json_normalize(getjson(
"https://api.github.com/repos/" + repo_name + "/stats/contributors?sort=total&direction=desc&per_page=100"))
data_repo_contributors['repo_full_name'] = repo_name
data_repo_contributors = \
data_repo_contributors.dropna(subset=['author.login']).set_index(['repo_full_name', 'author.login'])[
['weeks']]
data_repo_contributors = data_repo_contributors.weeks.apply(pd.Series)
data_repo_contributors = pd.DataFrame(data_repo_contributors.stack())[0].apply(pd.Series)
data_repo_contributors = data_repo_contributors[data_repo_contributors.c > 0]
data_contributions_entry = data_contributions_entry.append(data_repo_contributors)
memory = (data_contributions_entry.memory_usage()).sum() / (1024 ** 2)
minutes_passed = (datetime.datetime.today() - start_time).seconds / 60
print 'repo ' + repo_name + ' flattern completed! used ' + str(
minutes_passed) + ' minutes! ' + 'memory used ' + str(memory) + 'MB'
del data_repo_contributors
except:
print 'repo ' + repo_name + ' flattern failed! used ' + str(
minutes_passed) + ' minutes! ' + 'memory used ' + str(memory) + 'MB'
pass
minutes_passed = (datetime.datetime.today() - start_time).seconds / 60
print 'finished ingesting coin contribution data! used ' + str(minutes_passed) + ' minutes!'
data_contributions_entry['w'] = pd.to_datetime(data_contributions_entry.w, unit='s')
data_contributions_entry = data_contributions_entry.reset_index().drop(['level_2'], axis=1)
data_contributions_entry = data_contributions_entry.rename(
columns={'w': 'week', 'c': 'commits', 'a': 'additions', 'd': 'deletions', 'author.login': 'login'})
return data_contributions_entry
# In[52]:
# pulling repo lists that need to be updated
def generate_update_repo_list(data_contributions_entry_existing, coin_github_repo_data):
# dropping empty rows
data_contributions_entry_existing = data_contributions_entry_existing[data_contributions_entry_existing.commits > 0]
# formatting dates
data_contributions_entry_existing.week = pd.to_datetime(data_contributions_entry_existing.week)
coin_github_repo_data.pushed_at = pd.to_datetime(coin_github_repo_data.pushed_at)
# contribution update_time
contribution_update_time = data_contributions_entry_existing.week.max()
# existing records for last commit week
repo_last_commit_week = pd.DataFrame(
data_contributions_entry_existing.groupby('repo_full_name').week.max()).reset_index()
# latest last commit timestamp from github repo
repo_latest_record_week = coin_github_repo_data[['full_name', 'pushed_at']].rename(
columns={'full_name': 'repo_full_name'})
# merge to generate list of repo lists that have a new push
repo_compare = repo_last_commit_week.merge(repo_latest_record_week, how='right')
repo_compare.week = pd.to_datetime(repo_compare.week).fillna(datetime.datetime(1900, 1, 1))
repo_update_list = repo_compare[((repo_compare.pushed_at - repo_compare.week).dt.days > 7) &
(repo_compare.pushed_at > contribution_update_time - datetime.timedelta(
7))].repo_full_name
return repo_update_list
# In[ ]:
# In[23]:
# full contribution list per repo
def update_contribution_history(data_contributions_entry_existing, coin_github_repo_data):
# generate repo lists that needs to be updated
repo_update_list = generate_update_repo_list(data_contributions_entry_existing, coin_github_repo_data)
print 'number of repos needed to be updated: ' + str(len(repo_update_list))
start_time = datetime.datetime.today()
data_contributions_entry = pd.DataFrame()
for repo_name in repo_update_list:
try:
data_repo_contributors = json_normalize(getjson(
"https://api.github.com/repos/" + repo_name + "/stats/contributors?sort=total&direction=desc&per_page=100"))
data_repo_contributors['repo_full_name'] = repo_name
data_repo_contributors = \
data_repo_contributors.dropna(subset=['author.login']).set_index(['repo_full_name', 'author.login'])[
['weeks']]
data_repo_contributors = data_repo_contributors.weeks.apply(pd.Series)
data_repo_contributors = pd.DataFrame(data_repo_contributors.stack())[0].apply(pd.Series)
data_repo_contributors = data_repo_contributors[data_repo_contributors.c > 0]
data_contributions_entry = data_contributions_entry.append(data_repo_contributors)
memory = (data_contributions_entry.memory_usage()).sum() / (1024 ** 2)
minutes_passed = (datetime.datetime.today() - start_time).seconds / 60
print 'repo ' + repo_name + ' flattern completed! used ' + str(
minutes_passed) + ' minutes! ' + 'memory used ' + str(memory) + 'MB'
del data_repo_contributors
except:
print 'repo ' + repo_name + ' flattern failed! used ' + str(
minutes_passed) + ' minutes! ' + 'memory used ' + str(memory) + 'MB'
pass
minutes_passed = (datetime.datetime.today() - start_time).seconds / 60
print 'finished ingesting coin contribution data! used ' + str(minutes_passed) + ' minutes!'
data_contributions_entry['w'] = pd.to_datetime(data_contributions_entry.w, unit='s')
data_contributions_entry = data_contributions_entry.reset_index().drop(['level_2'], axis=1)
data_contributions_entry = data_contributions_entry.rename(
columns={'w': 'week', 'c': 'commits', 'a': 'additions', 'd': 'deletions', 'author.login': 'login'})
data_contributions_entry_updated = data_contributions_entry_existing[
(~data_contributions_entry_existing.repo_full_name.isin(repo_update_list)) &
(data_contributions_entry_existing.commits > 0)].append(data_contributions_entry)
data_contributions_entry_updated.week = pd.to_datetime(data_contributions_entry_updated.week)
data_contributions_entry_updated = data_contributions_entry_updated[
data_contributions_entry_updated.week >= datetime.date(2009, 1, 1)]
return data_contributions_entry_updated
# In[11]:
# main function, update
print 'start github_data_pipeline! UTC time: '+str(datetime.datetime.today())
coin_github_org_data = coin_github_org_ingestion()
coin_marketcap_data = coin_marketcap_ingestion()
coin_github_repo_data = github_repo_ingestion(coin_github_org_data)
coin_github_exclude_data = coin_github_exclusion_ingestion()
coin_gap_list = update_no_org_coins(coin_github_org_data, coin_marketcap_data)
# update contribution data from existing file
data_contributions_entry_existing = pd.DataFrame.from_csv('./data/latest_data/top_coin_repo_contributions_entry.csv')
data_contributions_entry = update_contribution_history(data_contributions_entry_existing, coin_github_repo_data)
data_contributions_entry = data_contributions_entry[~data_contributions_entry.repo_full_name.isin(coin_github_exclude_data.repo_full_name)]
# pull from scratch
# data_contributions_entry = get_full_contribution_history(coin_github_repo_data)
# In[69]:
# saving to csv
today = datetime.date.today()
pd.DataFrame.to_csv(coin_marketcap_data, './data/latest_data/coin_marketcap_data.csv')
pd.DataFrame.to_csv(coin_github_repo_data, './data/latest_data//top_coin_repo_list.csv')
pd.DataFrame.to_csv(data_contributions_entry, './data/latest_data/top_coin_repo_contributions_entry.csv')
# archiving just token contribution data
pd.DataFrame.to_csv(data_contributions_entry,
'./data/archive_data/top_coin_repo_contributions_entry_' + str(today) + '.csv')
print 'finished github_data_pipeline! UTC time: '+str(datetime.datetime.today()) | [
"jerryshenyizhou@gmail.com"
] | jerryshenyizhou@gmail.com |
3b6282d79f208c7ee3adea1226ff383f5e2e6fd3 | 4edbd8a42011e7f6db0cecf55a3f87a647c2ac1e | /expected_move.py | 9cfae8fc004e6c88df9dab63e846d2516a4b0d8a | [] | no_license | k6116/zebra | fe220ff0c1278f6ea20a06d030080345b540902d | 0aa0ba9a294d557d41377b112a624a294adbebf5 | refs/heads/master | 2022-11-08T07:54:20.415663 | 2020-06-27T19:03:43 | 2020-06-27T19:03:43 | 275,326,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,378 | py | import models
from database import SessionLocal, engine
from models import Stock, Option
from sqlalchemy.orm import Session
from sqlalchemy import and_, or_, desc, asc
import math
import tools
import numpy as np
def get_expected_move(symbol, underlying_price, dte):
two_atm_call_iv = None
strikes = tools.strike_increments(symbol, dte)
# print('strikes')
# print(strikes)
# print('underlying_price')
# print(underlying_price)
if strikes[0] < underlying_price and underlying_price < strikes[len(strikes) - 1]:
two_atm_strikes = two_ntm_strikes(strikes, underlying_price)
two_atm_call_iv = tools.get_option_prop(symbol, two_atm_strikes, 'CALL', 'impliedVolatility', dte)
two_atm_put_iv = tools.get_option_prop(symbol, two_atm_strikes, 'PUT', 'impliedVolatility', dte)
print('dte: ' + str(dte))
print('two_atm_strikes')
print(two_atm_strikes)
# print('two_atm_call_iv')
# print(two_atm_call_iv)
expected_move_iv = calc_expected_move_iv(underlying_price, two_atm_call_iv, two_atm_put_iv, dte)
return expected_move_iv
else:
return None
def calc_expected_move_iv(underlying_price, call_iv, put_iv, dte):
iv_sum = 0
for val in call_iv:
iv_sum = iv_sum + val
# print('iv_sum: ' + str(iv_sum))
for val in put_iv:
iv_sum = iv_sum + val
# print('iv_sum: ' + str(iv_sum))
avg_iv = iv_sum / 4
expected_move = float(underlying_price) * (float(avg_iv) / 100) * (math.sqrt(int(dte)) / math.sqrt(365))
# print('iv: ' + str(avg_iv))
return expected_move
def two_ntm_strikes(strikes, underlying_price):
# find 2 near-the-money strikes
# First find the atm_strike
strike_1 = tools.find_atm_strike_index(strikes, underlying_price)
# If the underlying_price is less than the initial strike price
if (underlying_price < strikes[strike_1]):
strike_2 = strike_1 - 1
else:
strike_2 = strike_1 + 1
return sorted([strikes[strike_1], strikes[strike_2]], key=float)
def get_expected_move_premium(symbol, underlying_price, dte):
strikes = tools.strike_increments(symbol, dte)
if strikes[0] < underlying_price and underlying_price < strikes[len(strikes) - 1]:
if len(strikes) > 1:
two_atm_strikes = two_ntm_strikes(strikes, underlying_price)
two_premium_calls_bids = tools.get_option_prop(symbol, two_atm_strikes, 'CALL', 'bid', dte)
two_premium_calls_asks = tools.get_option_prop(symbol, two_atm_strikes, 'CALL', 'ask', dte)
two_premium_puts_bids = tools.get_option_prop(symbol, two_atm_strikes, 'PUT', 'bid', dte)
two_premium_puts_asks = tools.get_option_prop(symbol, two_atm_strikes, 'PUT', 'ask', dte)
# Since the underlying price won't be exactly on a strike, calculate the weighted difference between the nearest strikes
strike_diff = abs(two_atm_strikes[1] - two_atm_strikes[0])
price_distance = abs(underlying_price - two_atm_strikes[1])
price_distance_percent = price_distance / strike_diff
two_premium_calls_mid = (np.array(two_premium_calls_bids) + np.array(two_premium_calls_asks)) / 2.0
two_premium_puts_mid = (np.array(two_premium_puts_bids) + np.array(two_premium_puts_asks)) / 2.0
two_premium_calls_mid_diff = abs(two_premium_calls_mid[1] - two_premium_calls_mid[0])
two_premium_puts_mid_diff = abs(two_premium_puts_mid[1] - two_premium_puts_mid[0])
premium_call = two_premium_calls_mid[1] + (two_premium_calls_mid_diff * price_distance_percent)
premium_put = two_premium_puts_mid[1] - (two_premium_puts_mid_diff * price_distance_percent)
# print('premium_put')
# print(premium_put)
expected_move_premium = calc_expected_move_premium(underlying_price, premium_call, premium_put, dte)
return expected_move_premium
else:
return None
def calc_expected_move_premium(underlying_price, prem_call, prem_put, dte):
# average the two calls and puts premiums
total_prem = prem_call + prem_put
expected_move_premium_percent = total_prem * 85 / underlying_price
expected_move_calc = expected_move_premium_percent / 100 * underlying_price
return expected_move_calc
| [
"2784285+k6116@users.noreply.github.com"
] | 2784285+k6116@users.noreply.github.com |
4a3ade146a01bc93108ba525a191d0f4fc777c9b | 811f4cdb25e26f3b27640aaa2e2bca93e660d2d7 | /src/anomalib/models/components/flow/all_in_one_block.py | f2ab1e17c372351bdd22788c8bdee20d621f06a3 | [
"CC-BY-SA-4.0",
"CC-BY-SA-3.0",
"CC-BY-NC-SA-4.0",
"Python-2.0",
"Apache-2.0"
] | permissive | openvinotoolkit/anomalib | 4467dfc392398845e816387267cdf979ff76fe15 | 4abfa93dcfcb98771bc768b334c929ff9a02ce8b | refs/heads/main | 2023-09-03T16:49:05.019269 | 2023-08-28T14:22:19 | 2023-08-28T14:22:19 | 423,775,360 | 2,325 | 454 | Apache-2.0 | 2023-09-14T11:21:33 | 2021-11-02T09:11:38 | Python | UTF-8 | Python | false | false | 12,649 | py | """All In One Block Layer."""
# Copyright (c) https://github.com/vislearn/FrEIA
# SPDX-License-Identifier: MIT
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import warnings
from typing import Callable
import torch
import torch.nn.functional as F
from FrEIA.modules import InvertibleModule
from scipy.stats import special_ortho_group
from torch import Tensor, nn
def _global_scale_sigmoid_activation(input: Tensor) -> Tensor:
"""Global scale sigmoid activation.
Args:
input (Tensor): Input tensor
Returns:
Tensor: Sigmoid activation
"""
return 10 * torch.sigmoid(input - 2.0)
def _global_scale_softplus_activation(input: Tensor) -> Tensor:
"""Global scale softplus activation.
Args:
input (Tensor): Input tensor
Returns:
Tensor: Softplus activation
"""
softplus = nn.Softplus(beta=0.5)
return 0.1 * softplus(input)
def _global_scale_exp_activation(input: Tensor) -> Tensor:
"""Global scale exponential activation.
Args:
input (Tensor): Input tensor
Returns:
Tensor: Exponential activation
"""
return torch.exp(input)
class AllInOneBlock(InvertibleModule):
"""Module combining the most common operations in a normalizing flow or similar model.
It combines affine coupling, permutation, and global affine transformation
('ActNorm'). It can also be used as GIN coupling block, perform learned
householder permutations, and use an inverted pre-permutation. The affine
transformation includes a soft clamping mechanism, first used in Real-NVP.
The block as a whole performs the following computation:
.. math::
y = V\\,R \\; \\Psi(s_\\mathrm{global}) \\odot \\mathrm{Coupling}\\Big(R^{-1} V^{-1} x\\Big)+ t_\\mathrm{global}
- The inverse pre-permutation of x (i.e. :math:`R^{-1} V^{-1}`) is optional (see
``reverse_permutation`` below).
- The learned householder reflection matrix
:math:`V` is also optional all together (see ``learned_householder_permutation``
below).
- For the coupling, the input is split into :math:`x_1, x_2` along
the channel dimension. Then the output of the coupling operation is the
two halves :math:`u = \\mathrm{concat}(u_1, u_2)`.
.. math::
u_1 &= x_1 \\odot \\exp \\Big( \\alpha \\; \\mathrm{tanh}\\big( s(x_2) \\big)\\Big) + t(x_2) \\\\
u_2 &= x_2
Because :math:`\\mathrm{tanh}(s) \\in [-1, 1]`, this clamping mechanism prevents
exploding values in the exponential. The hyperparameter :math:`\\alpha` can be adjusted.
"""
def __init__(
self,
dims_in,
dims_c=[],
subnet_constructor: Callable | None = None,
affine_clamping: float = 2.0,
gin_block: bool = False,
global_affine_init: float = 1.0,
global_affine_type: str = "SOFTPLUS",
permute_soft: bool = False,
learned_householder_permutation: int = 0,
reverse_permutation: bool = False,
):
"""
Args:
subnet_constructor:
class or callable ``f``, called as ``f(channels_in, channels_out)`` and
should return a torch.nn.Module. Predicts coupling coefficients :math:`s, t`.
affine_clamping:
clamp the output of the multiplicative coefficients before
exponentiation to +/- ``affine_clamping`` (see :math:`\\alpha` above).
gin_block:
Turn the block into a GIN block from Sorrenson et al, 2019.
Makes it so that the coupling operations as a whole is volume preserving.
global_affine_init:
Initial value for the global affine scaling :math:`s_\mathrm{global}`.
global_affine_init:
``'SIGMOID'``, ``'SOFTPLUS'``, or ``'EXP'``. Defines the activation to be used
on the beta for the global affine scaling (:math:`\\Psi` above).
permute_soft:
bool, whether to sample the permutation matrix :math:`R` from :math:`SO(N)`,
or to use hard permutations instead. Note, ``permute_soft=True`` is very slow
when working with >512 dimensions.
learned_householder_permutation:
Int, if >0, turn on the matrix :math:`V` above, that represents
multiple learned householder reflections. Slow if large number.
Dubious whether it actually helps network performance.
reverse_permutation:
Reverse the permutation before the block, as introduced by Putzky
et al, 2019. Turns on the :math:`R^{-1} V^{-1}` pre-multiplication above.
"""
super().__init__(dims_in, dims_c)
channels = dims_in[0][0]
# rank of the tensors means 1d, 2d, 3d tensor etc.
self.input_rank = len(dims_in[0]) - 1
# tuple containing all dims except for batch-dim (used at various points)
self.sum_dims = tuple(range(1, 2 + self.input_rank))
if len(dims_c) == 0:
self.conditional = False
self.condition_channels = 0
else:
assert tuple(dims_c[0][1:]) == tuple(
dims_in[0][1:]
), f"Dimensions of input and condition don't agree: {dims_c} vs {dims_in}."
self.conditional = True
self.condition_channels = sum(dc[0] for dc in dims_c)
split_len1 = channels - channels // 2
split_len2 = channels // 2
self.splits = [split_len1, split_len2]
try:
self.permute_function = {0: F.linear, 1: F.conv1d, 2: F.conv2d, 3: F.conv3d}[self.input_rank]
except KeyError:
raise ValueError(f"Data is {1 + self.input_rank}D. Must be 1D-4D.")
self.in_channels = channels
self.clamp = affine_clamping
self.GIN = gin_block
self.reverse_pre_permute = reverse_permutation
self.householder = learned_householder_permutation
if permute_soft and channels > 512:
warnings.warn(
(
"Soft permutation will take a very long time to initialize "
f"with {channels} feature channels. Consider using hard permutation instead."
)
)
# global_scale is used as the initial value for the global affine scale
# (pre-activation). It is computed such that
# global_scale_activation(global_scale) = global_affine_init
# the 'magic numbers' (specifically for sigmoid) scale the activation to
# a sensible range.
if global_affine_type == "SIGMOID":
global_scale = 2.0 - torch.log(torch.tensor([10.0 / global_affine_init - 1.0]))
self.global_scale_activation = _global_scale_sigmoid_activation
elif global_affine_type == "SOFTPLUS":
global_scale = 2.0 * torch.log(torch.exp(torch.tensor(0.5 * 10.0 * global_affine_init)) - 1)
self.global_scale_activation = _global_scale_softplus_activation
elif global_affine_type == "EXP":
global_scale = torch.log(torch.tensor(global_affine_init))
self.global_scale_activation = _global_scale_exp_activation
else:
raise ValueError('Global affine activation must be "SIGMOID", "SOFTPLUS" or "EXP"')
self.global_scale = nn.Parameter(torch.ones(1, self.in_channels, *([1] * self.input_rank)) * global_scale)
self.global_offset = nn.Parameter(torch.zeros(1, self.in_channels, *([1] * self.input_rank)))
if permute_soft:
w = special_ortho_group.rvs(channels)
else:
indices = torch.randperm(channels)
w = torch.zeros((channels, channels))
w[torch.arange(channels), indices] = 1.0
if self.householder:
# instead of just the permutation matrix w, the learned housholder
# permutation keeps track of reflection vectors vk, in addition to a
# random initial permutation w_0.
self.vk_householder = nn.Parameter(0.2 * torch.randn(self.householder, channels), requires_grad=True)
self.w_perm = None
self.w_perm_inv = None
self.w_0 = nn.Parameter(torch.FloatTensor(w), requires_grad=False)
else:
self.w_perm = nn.Parameter(
torch.FloatTensor(w).view(channels, channels, *([1] * self.input_rank)), requires_grad=False
)
self.w_perm_inv = nn.Parameter(
torch.FloatTensor(w.T).view(channels, channels, *([1] * self.input_rank)), requires_grad=False
)
if subnet_constructor is None:
raise ValueError("Please supply a callable subnet_constructor" "function or object (see docstring)")
self.subnet = subnet_constructor(self.splits[0] + self.condition_channels, 2 * self.splits[1])
self.last_jac = None
def _construct_householder_permutation(self):
"""Computes a permutation matrix from the reflection vectors that are
learned internally as nn.Parameters."""
w = self.w_0
for vk in self.vk_householder:
w = torch.mm(w, torch.eye(self.in_channels).to(w.device) - 2 * torch.ger(vk, vk) / torch.dot(vk, vk))
for i in range(self.input_rank):
w = w.unsqueeze(-1)
return w
def _permute(self, x, rev=False):
"""Performs the permutation and scaling after the coupling operation.
Returns transformed outputs and the LogJacDet of the scaling operation."""
if self.GIN:
scale = 1.0
perm_log_jac = 0.0
else:
scale = self.global_scale_activation(self.global_scale)
perm_log_jac = torch.sum(torch.log(scale))
if rev:
return ((self.permute_function(x, self.w_perm_inv) - self.global_offset) / scale, perm_log_jac)
else:
return (self.permute_function(x * scale + self.global_offset, self.w_perm), perm_log_jac)
def _pre_permute(self, x, rev=False):
"""Permutes before the coupling block, only used if
reverse_permutation is set"""
if rev:
return self.permute_function(x, self.w_perm)
else:
return self.permute_function(x, self.w_perm_inv)
def _affine(self, x, a, rev=False):
"""Given the passive half, and the pre-activation outputs of the
coupling subnetwork, perform the affine coupling operation.
Returns both the transformed inputs and the LogJacDet."""
# the entire coupling coefficient tensor is scaled down by a
# factor of ten for stability and easier initialization.
a *= 0.1
ch = x.shape[1]
sub_jac = self.clamp * torch.tanh(a[:, :ch])
if self.GIN:
sub_jac -= torch.mean(sub_jac, dim=self.sum_dims, keepdim=True)
if not rev:
return (x * torch.exp(sub_jac) + a[:, ch:], torch.sum(sub_jac, dim=self.sum_dims))
else:
return ((x - a[:, ch:]) * torch.exp(-sub_jac), -torch.sum(sub_jac, dim=self.sum_dims))
def forward(self, x, c=[], rev=False, jac=True):
"""See base class docstring"""
if self.householder:
self.w_perm = self._construct_householder_permutation()
if rev or self.reverse_pre_permute:
self.w_perm_inv = self.w_perm.transpose(0, 1).contiguous()
if rev:
x, global_scaling_jac = self._permute(x[0], rev=True)
x = (x,)
elif self.reverse_pre_permute:
x = (self._pre_permute(x[0], rev=False),)
x1, x2 = torch.split(x[0], self.splits, dim=1)
if self.conditional:
x1c = torch.cat([x1, *c], 1)
else:
x1c = x1
if not rev:
a1 = self.subnet(x1c)
x2, j2 = self._affine(x2, a1)
else:
a1 = self.subnet(x1c)
x2, j2 = self._affine(x2, a1, rev=True)
log_jac_det = j2
x_out = torch.cat((x1, x2), 1)
if not rev:
x_out, global_scaling_jac = self._permute(x_out, rev=False)
elif self.reverse_pre_permute:
x_out = self._pre_permute(x_out, rev=True)
# add the global scaling Jacobian to the total.
# trick to get the total number of non-channel dimensions:
# number of elements of the first channel of the first batch member
n_pixels = x_out[0, :1].numel()
log_jac_det += (-1) ** rev * n_pixels * global_scaling_jac
return (x_out,), log_jac_det
def output_dims(self, input_dims):
return input_dims
| [
"noreply@github.com"
] | noreply@github.com |
3ad493b7dfecdacdc6ea5bd86467d39b3b95ef44 | 8b9dfacd464558d1aacdef387ec3078a03d59158 | /aruco_detect_master/aruco_detect.py | f4ac1f0f3472cc04ad87f0aa78aeb49df4970415 | [] | no_license | Taospirit/HITsz_Course_Code | 56bb4a4327b39d9c45405e367dafc4211b74930b | b2f33bb3e5ce31894b12bfbf2f42cbf482933ed0 | refs/heads/master | 2020-04-10T20:30:07.952396 | 2019-07-10T13:53:38 | 2019-07-10T13:53:38 | 161,269,766 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,294 | py | import cv2 as cv
import cv2.aruco as aruco
import numpy as np
import copy
# IMG_WIDTH =
camera_matrix = np.array(([693.2, 0, 666.8], # 内参矩阵
[0, 693.4, 347.7],
[0, 0, 1]), dtype=np.double)
dist_coefs = np.array([-0.050791, 0.217163, 0.0000878, -0.000388, -0.246122],
dtype=np.double) # k1 k2 p1 p2 k3
VIDEO_WIDTH, VIDEO_HEIGHT = 640, 480
SHOW_WIDTH = 550
def drawPolyLines(img, raw_point_list):
point_list = [[elem[0], elem[1]] for elem in raw_point_list]
pts = np.array(point_list, np.int32)
pts = pts.reshape((-1, 1, 2))
cv.polylines(img, [pts], True, (0, 255, 255))
def saveVideo(cap_save, num):
fourcc = cv.VideoWriter_fourcc(*'XVID')
out = cv.VideoWriter('./aurco_test'+str(num)+'.avi', fourcc, 20.0, (VIDEO_WIDTH, VIDEO_HEIGHT))
while cap_save.isOpened():
ret, frame = cap_save.read()
if ret:
out.write(frame)
cv.imshow('frame', frame)
if cv.waitKey(1) & 0xFF == ord('s'):
print ('End record video!')
break
else:
print ('ret is False...break out!')
break
out.release()
def detectMarkersOrigin(img_origin):
frame = copy.deepcopy(img_origin)
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict)
if ids is not None:
id_show = [[ids[i][0], corners[i][0][0][0], corners[i][0][0][1]] for i in range(len(corners))]
# print (len(ids), type(ids), ids)
rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners, 0.05, camera_matrix, dist_coefs)
for i in range(rvec.shape[0]):
aruco.drawAxis(frame, camera_matrix, dist_coefs, rvec[i, :, :], tvec[i, :, :], 0.03)
aruco.drawDetectedMarkers(frame, corners, ids)
for elem in id_show:
cv.putText(frame, 'id='+str(elem[0]), (elem[1], elem[2]), cv.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2, cv.LINE_AA)
else:
cv.putText(frame, "No Aruco_Markers in sight!", (50, 50), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv.LINE_AA)
cv.namedWindow('Marker_Detect', cv.WINDOW_NORMAL)
cv.resizeWindow('Marker_Detect', (SHOW_WIDTH, int(SHOW_WIDTH*480/640)))
cv.moveWindow('Marker_Detect', 50, 50)
cv.imshow('Marker_Detect', frame)
def detectMarkersMaster(img_origin):
img = copy.deepcopy(img_origin)
cv.namedWindow('Origin_Img', cv.WINDOW_NORMAL)
cv.moveWindow('Origin_Img', 650, 50)
cv.resizeWindow('Origin_Img', (SHOW_WIDTH, int(SHOW_WIDTH*480/640)))
cv.imshow('Origin_Img', img)
cv.namedWindow('Canny_Img', cv.WINDOW_NORMAL)
cv.moveWindow('Canny_Img', 1250, 50)
cv.resizeWindow('Canny_Img', (SHOW_WIDTH, int(SHOW_WIDTH*480/640)))
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
edges = cv.Canny(gray, 100, 200)
cv.imshow('Canny_Img', edges)
drawing = np.zeros(img.shape[:], dtype=np.uint8)
#TODO:
lines_p = cv.HoughLinesP(edges, 0.5, np.pi / 180, 90, minLineLength=10, maxLineGap=15)
if lines_p is not None:
for line in lines_p:
x1, y1, x2, y2 = line[0]
cv.line(img, (x1, y1), (x2, y2), (0, 255, 0), 3, lineType=cv.LINE_AA)
# cv.imshow('Hough_p', img)
#寻找Harris角点
gray = np.float32(gray)
dst = cv.cornerHarris(gray, 2, 3, 0.04)
dst = cv.dilate(dst,None)
img[dst > 0.01*dst.max()]=[0, 0, 255]
cv.imshow('dst', img)
# ret, dst = cv.threshold(dst,0.01*dst.max(),255,0)
# dst = np.uint8(dst)
# #找到重心
# ret, labels, stats, centroids = cv.connectedComponentsWithStats(dst)
# #定义迭代次数
# criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 100, 0.001)
# corners = cv.cornerSubPix(gray, np.float32(centroids),(5,5),(-1,-1),criteria)
# #返回角点
# #绘制
# res = np.hstack((centroids,corners))
# res = np.int0(res)
# img[res[:,1],res[:,0]]=[0,0,255]
# img[res[:,3],res[:,2]] = [0,255,0]
# cv.imwrite('./subpixel5.png',img)
def main():
cap, num = cv.VideoCapture(1), 1
if not cap.isOpened():
print ('Failed to open the camera...')
return -1
while cap.isOpened():
ret, img = cap.read()
detectMarkersOrigin(img)
detectMarkersMaster(img)
key = cv.waitKey(1) & 0xff
if key == 27:
print ("close window for keyboard break")
break
if key == ord('s'):
print ('Start to record video...')
saveVideo(cap, num)
num += 1
cap.release()
cv.destroyAllWindows()
if __name__ == "__main__":
main()
| [
"lintao209@outlook.com"
] | lintao209@outlook.com |
65c961193678438ef37cfc7bff2d0c2383ddd805 | d767da4400de5b0d17bab56eeb678b4ff1052913 | /harifile.py | 1b506a1cd942ed997fb8479f420f87147594750d | [] | no_license | HARI5KRISHNAN/newproject | 9b792f8960bb0656b8e82e9d4a15284f0e72aff1 | d3dec2ce5b025e730157cf83d9df82278d9dda93 | refs/heads/master | 2021-01-05T05:00:00.639829 | 2020-02-16T12:34:21 | 2020-02-16T12:34:21 | 240,888,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | print("welocme to eduerka")
| [
"root@ip-172-31-25-170.ap-southeast-1.compute.internal"
] | root@ip-172-31-25-170.ap-southeast-1.compute.internal |
106c381e4786484cc282efd00c668dcd74a0a99b | 420b062ab05f6dcbe718acfbb7fa92e380b03d25 | /runit_sfdc.py | de72019a86fd4cd675992b4640518d513d99dc89 | [] | no_license | fkoncz/sfdc---PoC-code | e130e5f8addfbd7dcbca55b5a93a3657baa694b6 | 076f27cddac17a7be65a04469cd509373ae62156 | refs/heads/master | 2022-04-03T10:23:24.234233 | 2020-02-14T03:10:00 | 2020-02-14T03:10:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,884 | py | import requests
import xml.dom.minidom
import collections
import time
import os
import zipfile
import base64
import beatbox
from runit_sfdc_ui import *
from random import choice
from string import ascii_lowercase
from Config.config_sfdc import *
from simple_salesforce import Salesforce
sf = Salesforce(username=ADMIN1_USERNAME, password=ADMIN1_PASSWORD, security_token=ADMIN1_TOKEN)
def main():
# -----Admin 1--Getting global Administrator Session ID.
admin_sid = get_user_sid(ADMIN1_USERNAME, ADMIN1_PTK)
# Admin 1--Making sure we will be able to manipulate without any identification
set_ip_range(sysadmin_profile_name, admin_sid)
# -----Super-Admin-----
# -----Admin 1--Because of weak lockout policy, it triggers
# Security Control: Lockout effective period -super-admin
change_lockout_period(admin_sid)
# -----Admin 1--Disable clickjack protection for customer Visualforce pages with standard headers
disable_clickjack_with_standard_headers(admin_sid)
# -----Admin 1--Creating 4 users - due to license limitations,
# the other 2 will be Force.com Free users.
create_user(LSL_USER1_USERNAME, LSL_USER1_ALIAS, LSL_USER1_USERNAME, LSL_USER1_USERNAME, 'Standard Platform User')
create_user(LSL_USER2_USERNAME, LSL_USER2_ALIAS, LSL_USER2_USERNAME, LSL_USER2_USERNAME, 'Force.com - Free User')
create_user(LSL_USER3_USERNAME, LSL_USER3_ALIAS, LSL_USER3_USERNAME, LSL_USER3_USERNAME, 'Force.com - Free User')
create_user(LSL_USER4_USERNAME, LSL_USER4_ALIAS, LSL_USER4_USERNAME, LSL_USER4_USERNAME, 'Force.com - App'
'Subscription User')
# -----Admin 1--set IP range (for admin profile) - making sure we
# will be able to manipulate without any identification
set_ip_range(sysadmin_profile_name, admin_sid)
# Path 1: Account compromise -- User1
# -----User 1--brute force login, Attacker brute forced account successfully,
# triggers Threat: Failed login(e.g. 5 average, 2x)
switch_user_profile_or_role(LSL_USER1_USERNAME, 'System Administrator')
# failUserLogins(SFDC_TEST_USER1, "X", num_failed_attempts)
# -----User 1--Login from remote triggers UBA Risk User: High, activity from unseen browser,
# device, OS, unseen location(including unseen IPs v2) (score approx: 45-50)
# failUserLogins(SFDC_TEST_USER1, SFDC_TEST_USER1_PASSWORD, num_failed_attempts, tor_proxy_ip,
# tor_proxy_port, "Mozilla/1.0 (Windows CE 0.1; Win63; x63; rv:1.1) GeckoX/20100101 Firebug/0.1")
# -----User 1-----UBA Risk User: 10x High, Data export --- Instead of this,
# Attacker set Trusted IP Range to enable backdoor access, triggers Policy alert.
# To verify, in the UI this is at "Network Access"
set_trusted_ip_range(howmany_trusted_ip_range_sets, 'lsl-TrustRange-' + random_string_generator(4), '192.168.0.11',
'192.168.0.200', LSL_USER1_USERNAME, default_user_password)
switch_user_profile_or_role(LSL_USER1_USERNAME, 'Standard Platform User')
# Path 2: Data exfiltration -- User2
# -----User 2--Grant Admin permissions
switch_user_profile_or_role(LSL_USER2_USERNAME, 'System Administrator')
# -----User 2--60+(configurable) Mass Transfer to another account,
# triggers UBA Risk User: Medium, Mass Transfer+After-hr.
# Creating given numbers of mockup account data to have something to transfer.
LSL_USER2_FULLNAME = get_user_full_name(LSL_USER2_USERNAME)
admin1_full_name = get_user_full_name(ADMIN1_USERNAME)
create_mockup_account(howManyMockupAccounts, ADMIN1_USERNAME)
mass_transfer(LSL_USER2_USERNAME, default_user_password, admin1_full_name, LSL_USER2_FULLNAME,
how_many_mass_transfers)
switch_user_profile_or_role(LSL_USER2_USERNAME, 'Force.com - Free User')
# Path#3: Insider Threat--User3
# -----User 3--Admin grant excessive permissions to insider user, triggers Policy alert:
# Profile/Change user permissions
switch_user_profile_or_role(LSL_USER3_USERNAME, 'System Administrator')
# -----User 3--We deploy new Sharing Rules as an insider threat.
# We have some static XML content and if we want to add multiple rules,
# don't want to add the header all the time.
# create some mockup sharing rules.
create_zip_objects()
add_lead_sharing_rule(how_many_sharing_rules, "Read")
close_rules()
deploy_zipfile(LSL_USER3_USERNAME, default_user_password)
# -----User 3--3-Insider user is corrupted by a vendor, he helped vendor to extend
# contract term, triggers Policy alert: Contract Create+Update
response = create_mockup_contract(LSL_USER3_USERNAME, "lsl-Account-firstMockup", "3", "2016-03-01")
update_contract(response['id'])
# -----User 3--4-Before termination, insider user also Mass deleting data,
# triggers UBA Risk User: High, Mass Delete
for x in range(0, howManyMassDelete):
create_mockup_account(howManyMockupAccounts, LSL_USER3_USERNAME)
mass_delete(LSL_USER3_USERNAME, default_user_password)
print("Mass Delete iteration nr.: " + str(x))
# -----User 3--Policy alert: Change user profile
switch_user_profile_or_role(LSL_USER3_USERNAME, 'Force.com - Free User')
# Path 4: Insider Threat--User4
# -----User 4--UBA Risk User: 20x Medium, Reports export, Report Run
# 2 - The 3rd party has the permission to access sensitive data and function,
# he run and export the reports, sale to competitor, triggers UBA Risk User: Medium,
# Reports exported, Report Run
# 3 - The 3rd party also export data, triggers UBA Risk User: High, Data Export
# 4 - For all report activities by the 3rd party, stand out in KSI:
# Top customer report run and Top customer report exported
switch_user_profile_or_role(LSL_USER4_USERNAME, 'System Administrator')
report_name = create_report(howManyReportsCreate, LSL_USER4_USERNAME, default_user_password, "Accounts")
export_report(how_many_export_reports, report_name, LSL_USER4_USERNAME, default_user_password)
switch_user_profile_or_role(LSL_USER4_USERNAME, 'Force.com - App Subscription User')
# Creating a user
def create_user(username, alias, email, last_name, profile_name):
"""
:param username:
:param alias:
:param email:
:param last_name:
:param profile_name:
:return:
"""
profile_id = get_profile_id(profile_name)
try:
sf.User.create({'userName': username,
'Alias': alias,
'Email': email,
'lastName': last_name,
'EmailEncodingKey': 'UTF-8',
'TimeZoneSidKey': 'America/New_York',
'LocaleSidKey': 'en_US',
'profile_id': profile_id,
'LanguageLocaleKey': 'en_US'})
set_password(username, default_user_password)
except Exception as e:
try:
activate_user(username)
set_password(username, default_user_password)
except Exception as e:
set_password(username, default_user_password)
def get_user_full_name(username):
"""
:param username:
:return:
"""
userinfo = sf.query("SELECT FirstName, LastName FROM User WHERE username = '" + username + "'")
dict = collections.OrderedDict(userinfo)
dictitems = list(dict.values())[2]
firstname = list(collections.OrderedDict(dictitems.pop()).values())[1]
lastname = list(collections.OrderedDict(dictitems.pop()).values())[2]
if firstname is None:
fullname = lastname
else:
fullname = firstname + " " + lastname
return fullname
# Resetting a user's password
def set_password(username, default_user_password):
"""
:param username:
:param default_user_password:
:return:
"""
uid = get_user_id(username)
print("\nDefaulting Password for user with UID: " + uid + "\n")
sf2 = beatbox.PythonClient()
sf2.login(ADMIN1_USERNAME, ADMIN1_PASSWORD)
try:
sf2.setPassword(uid, default_user_password)
except Exception as e:
pass
# Login for all users, keep session Ids
def get_user_sid(username, password):
"""
:param username:
:param password:
:return:
"""
login_headers = {
'content-type': 'text/xml',
'charset': 'UTF-8',
'SOAPAction': 'login'
}
login_envelope = """
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:urn="urn:enterprise.soap.sforce.com">
<soapenv:Header>
</soapenv:Header>
<soapenv:Body>
<urn:login>
<urn:username>""" + '' + username + '' + """</urn:username>
<urn:password>""" + '' + password + '' + """</urn:password>
</urn:login>
</soapenv:Body>
</soapenv:Envelope>
"""
login_response = requests.post(partnerURL, login_envelope, headers=login_headers)
dom = xml.dom.minidom.parseString(login_response.text)
user_sid_result = dom.getElementsByTagName('sessionId')
if user_sid_result[0].firstChild.nodeValue is None:
print("\nI wasn't successful. Error was:\n")
print(login_response.text + '\n')
else:
user_sid = user_sid_result[0].firstChild.nodeValue
return user_sid
# This is useful in general to manipulate any user's details
def get_user_id(username):
"""
:param username:
:return:
"""
# Userinfo is an OrderedDict that contains a list that contains another OrderedDict so we need to dig in a bit.
userinfo = sf.query("SELECT Id FROM User WHERE username = '" + username + "'")
dict = collections.OrderedDict(userinfo)
dictitems = list(dict.values())[2]
itemlist = (dictitems.pop())
dict2 = collections.OrderedDict(itemlist)
uid = list(dict2.values())[1]
return uid
def get_user_profile_id(which_user):
"""
:param which_user:
:return:
"""
query = sf.query("SELECT ProfileId FROM User where username = '" + which_user + "'")
dictitems = list(collections.OrderedDict(query).values())[2]
if len(dictitems) == 0:
print("Could not get System Administrator Profile Id. Continuing...\n")
return None
else:
prof_id = list(collections.OrderedDict(dictitems.pop()).values())[1]
return prof_id
def get_profile_id(profile_name):
"""
:param profile_name:
:return:
"""
query = sf.query("SELECT Id FROM Profile WHERE name = '" + profile_name + "'")
dictitems = list(collections.OrderedDict(query).values())[2]
if len(dictitems) == 0:
print("Could not get System Administrator Profile Id. Continuing...\n")
return None
else:
prof_id = list(collections.OrderedDict(dictitems.pop()).values())[1]
return prof_id
def switch_user_profile_or_role(user1, user1_profile, user2_profile=None, how_many_times=None):
"""
:param user1:
:param user1_profile:
:param user2_profile:
:param how_many_times:
:return:
"""
if how_many_times is None:
user_id = get_user_id(user1)
switch_to_profile_id = get_profile_id(user1_profile)
sf.User.update(user_id, {'ProfileId': '' + switch_to_profile_id + ''})
else:
while how_many_times > 0:
user_id = get_user_id(user1)
get_user_profile_id(user1)
switch_between1 = get_profile_id(user1_profile)
switch_between2 = get_profile_id(user2_profile)
sf.User.update(user_id, {'ProfileId': '' + switch_between2 + ''})
print("The " + user1 + "'s profile switched from " + switch_between1 + " to " + switch_between2 +
" Profile Id.")
get_user_profile_id(user1)
sf.User.update(user_id, {'ProfileId': '' + switch_between1 + ''})
print("The " + user1 + "'s profile switched from " + switch_between2 + " to " + switch_between1 +
" Profile Id.")
print("UserProfile switches left: " + str(how_many_times - 1))
how_many_times -= 1
# Reactivate a user if existing
def activate_user(username):
"""
:param username:
:return:
"""
userinfo = sf.query("SELECT IsActive FROM User WHERE username = '" + username + "'")
itemlist = (userinfo.values())[2]
dictitems = list(collections.OrderedDict(userinfo).values())[2]
is_active = list(collections.OrderedDict(dictitems.pop()).values())[1]
if not is_active:
print("User exists, but is not active. Activating.")
sf.User.update(get_user_id(username), {'IsActive': 'true'})
else:
print("User is active, no need to re-enable.")
def create_mockup_account(how_many, owner):
"""
:param how_many:
:param owner:
:return:
"""
owner_id = get_user_id(owner)
sf.Account.create({'type': 'Account',
'Name': 'lsl-Account-firstMockup',
'Website': 'http://www.IamJustAtestWebSite.com',
'owner_id': '' + owner_id + ''})
acc_list = ['lsl-Account-firstMockup']
how_many -= 1
while how_many > 0:
test_data = "lsl-Account-" + random_string_generator(8)
owner_id = get_user_id(owner)
sf.Account.create({'type': 'Account',
'Name': '' + test_data + '',
'Website': 'http://www.IamJustAtestWebSite.com',
'owner_id': '' + owner_id + ''})
print("Some mockup Account " + test_data + " for user: " + owner + " created.")
acc_list.append(test_data)
how_many -= 1
print("Following mockup Accounts have been created: " + str(acc_list))
return acc_list
def get_account_id(account_name):
"""
:param account_name:
:return:
"""
userinfo = sf.query("SELECT Id FROM Account WHERE Name = '" + account_name + "'")
acc_id = list(collections.OrderedDict(list(collections.OrderedDict(userinfo).values())[2].pop()).values())[1]
return acc_id
def create_mockup_contract(owner, account_name, contract_term, start_date):
"""
:param owner:
:param account_name:
:param contract_term:
:param start_date:
:return:
"""
account_id = get_account_id(account_name)
data1 = sf.Contract.create({'AccountId': account_id,
'ContractTerm': contract_term,
'StartDate': start_date,
'owner_id': get_user_id(owner)})
print("Mockup contract for Account " + account_id + " created.")
return data1
def update_contract(user_id):
"""
:param user_id:
:return:
"""
sf.Contract.update(user_id, {'ContractTerm': '75'})
def set_ip_range(profile_name, admin_sid):
"""
:param profile_name:
:param admin_sid:
:return:
"""
update_metadata_envelope = """
<env:Envelope xmlns:env="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<env:Header>
<urn:SessionHeader xmlns:urn="http://soap.sforce.com/2006/04/metadata">
<urn:sessionId>""" + admin_sid + """</urn:sessionId>
</urn:SessionHeader>
</env:Header>
<env:Body>
<updateMetadata xmlns="http://soap.sforce.com/2006/04/metadata">
<metadata xsi:type="Profile">
<fullName>""" + profile_name + """</fullName>
<loginIpRanges>
<endAddress>255.255.255.255</endAddress>
<startAddress>0.0.0.0</startAddress>
</loginIpRanges>
</metadata>
</updateMetadata>
</env:Body>
</env:Envelope>
"""
soap_response = requests.post(metadata_url, update_metadata_envelope, headers=updateMetadataHeader)
dom = xml.dom.minidom.parseString(soap_response.text)
result_element = dom.getElementsByTagName('success')
result_value = result_element[0].firstChild.nodeValue
if len(result_value) == 0:
print("I've encountered an issue. Request response:\n")
print(soap_response.text + "\n")
return None
else:
if result_element[0].firstChild.nodeValue:
print("Login IP range successfully set.")
else:
print("I've encountered an issue. Request response:\n")
print(soap_response.text + "\n")
return None
def change_lockout_period(admin_sid):
"""
:param admin_sid:
:return:
"""
soap_body = """
<env:Envelope xmlns:env="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<env:Header>
<urn:SessionHeader xmlns:urn="http://soap.sforce.com/2006/04/metadata">
<urn:sessionId>""" + admin_sid + """</urn:sessionId>
</urn:SessionHeader>
</env:Header>
<env:Body>
<updateMetadata xmlns="http://soap.sforce.com/2006/04/metadata">
<metadata xsi:type="SecuritySettings">
<fullName>*</fullName>
<passwordPolicies>
<lockoutInterval>""" + lockout_interval + """</lockoutInterval>
</passwordPolicies>
</metadata>
</updateMetadata>
</env:Body>
</env:Envelope>
"""
soap_result = requests.post(metadata_url, soap_body, headers=updateMetadataHeader)
dom = xml.dom.minidom.parseString(soap_result.text)
result_element = dom.getElementsByTagName('success')
result_value = result_element[0].firstChild.nodeValue
if len(result_value) == 0:
print("I've encountered an issue. Request response:\n")
print(soap_result.text + "\n")
return None
else:
if result_element[0].firstChild.nodeValue:
print("New Lockout time successfully set.")
else:
print("I've encountered an issue. Request response:\n")
print(soap_result.text + "\n")
return None
def disable_clickjack_with_standard_headers(admin_sid):
"""
:param admin_sid:
:return:
"""
soap_body = """
<env:Envelope xmlns:env="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<env:Header>
<urn:SessionHeader xmlns:urn="http://soap.sforce.com/2006/04/metadata">
<urn:sessionId>""" + admin_sid + """</urn:sessionId>
</urn:SessionHeader>
</env:Header>
<env:Body>
<updateMetadata xmlns="http://soap.sforce.com/2006/04/metadata">
<metadata xsi:type="SecuritySettings">
<fullName>*</fullName>
<sessionSettings>
<enableClickjackNonsetupUser>false</enableClickjackNonsetupUser>
</sessionSettings>
</metadata>
</updateMetadata>
</env:Body>
</env:Envelope>
"""
soap_result = requests.post(metadata_url, soap_body, headers=updateMetadataHeader)
dom = xml.dom.minidom.parseString(soap_result.text)
result_element = dom.getElementsByTagName('success')
result_value = result_element[0].firstChild.nodeValue
if len(result_value) == 0:
print("I've encountered an issue. Request response:\n")
print(soap_result.text + "\n")
return None
else:
if result_element[0].firstChild.nodeValue:
print("Successfully disabled clickjack protection for customer Visualforce pages with standard headers.")
else:
print("I've encountered an issue. Request response:\n")
print(soap_result.text + "\n")
return None
def random_string_generator(nr):
"""
:param nr:
:return:
"""
rand_string = (''.join(choice(ascii_lowercase) for i in range(nr)))
return rand_string
def create_zip_objects():
"""
:return:
"""
if not os.path.exists(os.path.dirname(rulefile)):
try:
os.makedirs(os.path.dirname(rulefile))
except Exception as e:
pass
with open(rulefile, "w") as f:
f.write("""<?xml version="1.0" encoding="UTF-8"?>
<SharingRules xmlns="http://soap.sforce.com/2006/04/metadata">""" + "\n")
with open('./tmp/unpackaged/package.xml', "w") as f:
f.write("""<?xml version="1.0" encoding="UTF-8"?>
<Package xmlns="http://soap.sforce.com/2006/04/metadata">
<types>
<members>*</members>
<name>SharingRules</name>
</types>
<version>35.0</version>
</Package>""" + "\n")
def add_lead_sharing_rule(how_many, access_level):
"""
:param how_many:
:param access_level:
:return:
"""
while how_many > 0:
full_name = "lsl_" + random_string_generator(4)
label = "lsl-" + random_string_generator(5)
with open(rulefile, "a") as f:
f.write(""" <sharingOwnerRules>
<full_name>""" + full_name + """</full_name>
<accessLevel>""" + access_level + """</accessLevel>
<label>""" + label + """</label>
<sharedTo>
<allInternalUsers></allInternalUsers>
</sharedTo>
<sharedFrom>
<allInternalUsers></allInternalUsers>
</sharedFrom>
</sharingOwnerRules>""" + "\n")
print("Lead sharing rule with label: " + label + " successfully created.")
how_many -= 1
def close_rules():
with open(rulefile, "a+") as f:
f.write("""</SharingRules>""" + "\n")
def get_report_id(report_name, as_user, as_password):
"""
:param report_name:
:param as_user:
:param as_password:
:return:
"""
user_sid = get_user_sid(as_user, as_password)
sf2 = Salesforce(instance_url=instanceURL, session_id=user_sid)
query = sf2.query("SELECT Id FROM Report WHERE Name = '" + report_name + "'")
dictitems = list(collections.OrderedDict(query).values())[2]
report_id = list(collections.OrderedDict(dictitems.pop()).values())[1]
if len(collections.OrderedDict(dictitems.pop())) == 0:
print("Could not get report_id.\n")
return None
else:
return report_id, user_sid
def export_report(how_many, report_name, as_user, as_password):
"""
:param how_many:
:param report_name:
:param as_user:
:param as_password:
:return:
"""
(report_id, user_sid) = get_report_id(report_name, as_user, as_password)
while how_many > 0:
response = requests.get(instanceURL + "/" + report_id + "?view=d&snip&export=1&enc=UTF-8&excel=1",
headers=sf.headers, cookies={'sid': user_sid})
f = open("lsl-report-" + random_string_generator(4) + ".csv", 'w')
f.write(response.text)
f.close()
how_many -= 1
def deploy_zipfile(as_user, as_password):
"""
:param as_user:
:param as_password:
:return:
"""
user_sid = get_user_sid(as_user, as_password)
new_zip = zipfile.ZipFile(packageZipFile, "w")
dir_path = './tmp'
len_dir_path = len(dir_path)
for root, _, files in os.walk(dir_path):
for file in files:
file_path = os.path.join(root, file)
new_zip.write(file_path, file_path[len_dir_path:])
new_zip.close()
with open(packageZipFile, "rb") as f:
bytes_read = f.read()
encoded = base64.b64encode(bytes_read)
b64code = encoded.decode("utf-8")
deploy_headers = {
'content-type': 'text/xml',
'charset': 'UTF-8',
'SOAPAction': 'deploy'
}
deploy_body = """<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:met="http://soap.sforce.com/2006/04/metadata">
<soapenv:Header>
<met:SessionHeader>
<sessionId>""" + user_sid + """</sessionId>
</met:SessionHeader>
</soapenv:Header>
<soapenv:Body>
<deploy xmlns="http://soap.sforce.com/2006/04/metadata">
<ZipFile>""" + b64code + """</ZipFile>
<DeployOptions>
<allowMissingFiles>false</allowMissingFiles>
<autoUpdatePackage>true</autoUpdatePackage>
<checkOnly>false</checkOnly>
<ignoreWarnings>false</ignoreWarnings>
<performRetrieve>false</performRetrieve>
<rollbackOnError>true</rollbackOnError>
<runAllTests>false</runAllTests>
<singlePackage>false</singlePackage>
</DeployOptions>
</deploy>
</soapenv:Body>
</soapenv:Envelope>"""
soap_result = requests.post(metadata_url, deploy_body, headers=deploy_headers)
dom = xml.dom.minidom.parseString(soap_result.text)
result_element = dom.getElementsByTagName('id')
result_value = result_element[0].firstChild.nodeValue
if len(result_value) == 0:
print("I've encountered an issue. Request response:\n")
print(soap_result.text + "\n")
return None
else:
if result_element[0].firstChild.nodeValue:
print("Got deployment ID.")
did = result_element[0].firstChild.nodeValue
else:
print("I've encountered an issue. Request response:\n")
print(soap_result.text + "\n")
return None
time.sleep(2)
check_deploy_headers = {
'content-type': 'text/xml',
'charset': 'UTF-8',
'SOAPAction': 'checkDeployStatus'
}
check_deploy_status = """<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:met="http://soap.sforce.com/2006/04/metadata">
<soapenv:Header>
<met:SessionHeader>
<sessionId>""" + user_sid + """</sessionId>
</met:SessionHeader>
</soapenv:Header>
<soapenv:Body>
<met:checkDeployStatus>
<met:asyncProcessId>""" + did + """</met:asyncProcessId>
<met:includeDetails>true</met:includeDetails>
</met:checkDeployStatus>
</soapenv:Body>
</soapenv:Envelope>"""
soap_result = requests.post(metadata_url, check_deploy_status, headers=check_deploy_headers)
dom = xml.dom.minidom.parseString(soap_result.text)
result_element = dom.getElementsByTagName('status')
result_value = result_element[0].firstChild.nodeValue
if len(result_value) == 0:
print("I've encountered an issue. Request response:\n")
print(soap_result.text + "\n")
return None
else:
if result_element[0].firstChild.nodeValue == 'Succeeded':
print("Deployment succeeded.")
else:
print("I've encountered an issue. Request response:\n")
print(soap_result.text + "\n")
return None
# UBA Risk User: 10x High, Set Trusted IP range.
def set_trusted_ip_range(count, description, start_ip, end_ip, owner, password):
"""
:param count:
:param description:
:param start_ip:
:param end_ip:
:param owner:
:param password:
:return:
"""
user_sid = get_user_sid(owner, password)
soap_body_part1 = """
<env:Envelope xmlns:env="http://schemas.xmlsoap.org/soap/envelope/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<env:Header>
<urn:SessionHeader xmlns:urn="http://soap.sforce.com/2006/04/metadata">
<urn:sessionId>""" + user_sid + """</urn:sessionId>
</urn:SessionHeader>
</env:Header>
<env:Body>
<updateMetadata xmlns="http://soap.sforce.com/2006/04/metadata">
<metadata xsi:type="SecuritySettings">
<fullName>*</fullName>
<networkAccess>"""
soap_body_part2 = """
</networkAccess>
</metadata>
</updateMetadata>
</env:Body>
</env:Envelope>
"""
while count > 0:
ip_range = """
<ipRanges>
<description>""" + description + """</description>
<start>""" + start_ip + """</start>
<end>""" + end_ip + """</end>
</ipRanges>"""
requests.post(metadata_url, soap_body_part1 + ip_range + soap_body_part2, headers=updateMetadataHeader)
print("Added trusted IP Range " + str(count) + " time(s).")
requests.post(metadata_url, soap_body_part1 + soap_body_part2, headers=updateMetadataHeader)
print("Deleted trusted IP Ranges " + str(count) + " times.")
count -= 1
if __name__ == "__main__":
main()
| [
"fkoncz@outlook.com"
] | fkoncz@outlook.com |
61bb250a081ee773aa1aa16082aa203ec3a1eefd | 0378c82c8bcd5501732acc27d1d33b3230f9a393 | /KYLIN_USB/sources/spark/load/sources/020-mds_elt_niveau.py | 1d935431623fd660c2a5018af23ee80f7d2fefbf | [] | no_license | bmwalid/Controlla | 85718a94e47acdc2a90ee06b951282954dc1ed11 | c63febd9aeb1ae268b79ade1880d867a888554d0 | refs/heads/master | 2020-04-29T16:15:27.646599 | 2019-03-19T16:40:36 | 2019-03-19T16:40:36 | 176,252,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,726 | py | # import Libraries
from pyspark.conf import SparkConf
from pyspark.sql import SparkSession
from pyspark.sql.types import *
# init sparkConf
conf = SparkConf()
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \
.set("spark.executor.cores", "1") \
.set("spark.executor.memory", "1G")
# Initialize Spark Session
spark = SparkSession.builder.appName("020-mds_elt_niveau").config(conf=SparkConf()).enableHiveSupport().getOrCreate()
# Data path
path = "s3://decathlon-ods/ods/mds_elt_niveau/*.gz"
# From gz files on S3 to Spark Dataframe
df = spark.read.option("header", "false").option("delimiter", "|").csv(path)
# write to hdfs
df.select("_c0", "_c1", "_c2", "_c6", "_c7", "_c3", "_c4", "_c5") \
.withColumn("org_num_organisation_niv", df["_c0"].cast(IntegerType())) \
.withColumn("niv_num_niveau_niv", df["_c1"].cast(StringType())) \
.withColumn("eln_num_elt_niveau", df["_c2"].cast(IntegerType())) \
.withColumn("tlb_typ_libelle_lib", df["_c6"].cast(StringType())) \
.withColumn("lib_num_libelle_lib", df["_c7"].cast(IntegerType())) \
.withColumn("org_num_organisation_sup", df["_c3"].cast(IntegerType())) \
.withColumn("niv_num_niveau_sup", df["_c4"].cast(IntegerType())) \
.withColumn("eln_num_elt_niveau_sup", df["_c5"].cast(IntegerType())) \
.select("org_num_organisation_niv", "niv_num_niveau_niv", "eln_num_elt_niveau", "tlb_typ_libelle_lib",
"lib_num_libelle_lib", "org_num_organisation_sup", "niv_num_niveau_sup", "eln_num_elt_niveau_sup") \
.repartition(80).write.option("compression", "snappy").mode("overwrite").format("parquet").saveAsTable(
"kylin_usb_mqb.mds_elt_niveau")
# stopping session
spark.sparkContext.stop()
| [
"bmwalide@gmail.com"
] | bmwalide@gmail.com |
413fe0cf74f78a5479abcffb6ba6f1b944f65717 | 59b87e4892a583e1eafaeca8582320b3db6e4435 | /.c9/metadata/environment/products/views.py | 756c77246def161c889dabccb347e037f5dd2284 | [] | no_license | cgaynor91/E-Commerce | 65e112b4a2c66725d27a65847686c497574d1f58 | f7e3e81358f494cd16768e4aba73b19bc16a29ab | refs/heads/master | 2021-07-11T23:26:22.895787 | 2020-03-11T21:47:28 | 2020-03-11T21:47:28 | 246,412,701 | 0 | 0 | null | 2021-06-10T22:39:00 | 2020-03-10T21:32:01 | Python | UTF-8 | Python | false | false | 15,846 | py | {"filter":false,"title":"views.py","tooltip":"/products/views.py","ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":6,"column":65},"end":{"row":6,"column":65},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"hash":"43957824f031b60e8831b61c96c6e4a720a0ef77","undoManager":{"mark":39,"position":39,"stack":[[{"start":{"row":1,"column":0},"end":{"row":1,"column":1},"action":"insert","lines":["d"],"id":2}],[{"start":{"row":1,"column":0},"end":{"row":1,"column":1},"action":"remove","lines":["d"],"id":3}],[{"start":{"row":1,"column":0},"end":{"row":1,"column":1},"action":"insert","lines":["f"],"id":4},{"start":{"row":1,"column":1},"end":{"row":1,"column":2},"action":"insert","lines":["r"]},{"start":{"row":1,"column":2},"end":{"row":1,"column":3},"action":"insert","lines":["o"]},{"start":{"row":1,"column":3},"end":{"row":1,"column":4},"action":"insert","lines":["m"]}],[{"start":{"row":1,"column":4},"end":{"row":1,"column":5},"action":"insert","lines":[" "],"id":5},{"start":{"row":1,"column":5},"end":{"row":1,"column":6},"action":"insert","lines":["."]},{"start":{"row":1,"column":6},"end":{"row":1,"column":7},"action":"insert","lines":["m"]},{"start":{"row":1,"column":7},"end":{"row":1,"column":8},"action":"insert","lines":["o"]},{"start":{"row":1,"column":8},"end":{"row":1,"column":9},"action":"insert","lines":["d"]},{"start":{"row":1,"column":9},"end":{"row":1,"column":10},"action":"insert","lines":["e"]},{"start":{"row":1,"column":10},"end":{"row":1,"column":11},"action":"insert","lines":["l"]},{"start":{"row":1,"column":11},"end":{"row":1,"column":12},"action":"insert","lines":["s"]}],[{"start":{"row":1,"column":12},"end":{"row":1,"column":13},"action":"insert","lines":[" "],"id":6},{"start":{"row":1,"column":13},"end":{"row":1,"column":14},"action":"insert","lines":["i"]},{"start":{"row":1,"column":14},"end":{"row":1,"column":15},"action":"insert","lines":["m"]},{"start":{"row":1,"column":15},"end":{"row":1,"column":16},"action":"insert","lines":["p"]},{"start":{"row":1,"column":16},"end":{"row":1,"column":17},"action":"insert","lines":["o"]},{"start":{"row":1,"column":17},"end":{"row":1,"column":18},"action":"insert","lines":["r"]},{"start":{"row":1,"column":18},"end":{"row":1,"column":19},"action":"insert","lines":["t"]}],[{"start":{"row":1,"column":19},"end":{"row":1,"column":20},"action":"insert","lines":[" "],"id":7},{"start":{"row":1,"column":20},"end":{"row":1,"column":21},"action":"insert","lines":["P"]},{"start":{"row":1,"column":21},"end":{"row":1,"column":22},"action":"insert","lines":["r"]},{"start":{"row":1,"column":22},"end":{"row":1,"column":23},"action":"insert","lines":["o"]},{"start":{"row":1,"column":23},"end":{"row":1,"column":24},"action":"insert","lines":["d"]},{"start":{"row":1,"column":24},"end":{"row":1,"column":25},"action":"insert","lines":["u"]},{"start":{"row":1,"column":25},"end":{"row":1,"column":26},"action":"insert","lines":["c"]},{"start":{"row":1,"column":26},"end":{"row":1,"column":27},"action":"insert","lines":["t"]}],[{"start":{"row":3,"column":0},"end":{"row":4,"column":0},"action":"insert","lines":["",""],"id":8},{"start":{"row":4,"column":0},"end":{"row":4,"column":1},"action":"insert","lines":["d"]},{"start":{"row":4,"column":1},"end":{"row":4,"column":2},"action":"insert","lines":["e"]},{"start":{"row":4,"column":2},"end":{"row":4,"column":3},"action":"insert","lines":["f"]}],[{"start":{"row":4,"column":3},"end":{"row":4,"column":4},"action":"insert","lines":[" "],"id":9},{"start":{"row":4,"column":4},"end":{"row":4,"column":5},"action":"insert","lines":["a"]},{"start":{"row":4,"column":5},"end":{"row":4,"column":6},"action":"insert","lines":["l"]},{"start":{"row":4,"column":6},"end":{"row":4,"column":7},"action":"insert","lines":["l"]},{"start":{"row":4,"column":7},"end":{"row":4,"column":8},"action":"insert","lines":["_"]},{"start":{"row":4,"column":8},"end":{"row":4,"column":9},"action":"insert","lines":["p"]},{"start":{"row":4,"column":9},"end":{"row":4,"column":10},"action":"insert","lines":["r"]},{"start":{"row":4,"column":10},"end":{"row":4,"column":11},"action":"insert","lines":["o"]},{"start":{"row":4,"column":11},"end":{"row":4,"column":12},"action":"insert","lines":["d"]},{"start":{"row":4,"column":12},"end":{"row":4,"column":13},"action":"insert","lines":["u"]},{"start":{"row":4,"column":13},"end":{"row":4,"column":14},"action":"insert","lines":["c"]},{"start":{"row":4,"column":14},"end":{"row":4,"column":15},"action":"insert","lines":["t"]}],[{"start":{"row":4,"column":15},"end":{"row":4,"column":16},"action":"insert","lines":["s"],"id":10}],[{"start":{"row":4,"column":16},"end":{"row":4,"column":18},"action":"insert","lines":["()"],"id":11}],[{"start":{"row":4,"column":17},"end":{"row":4,"column":18},"action":"insert","lines":["r"],"id":12},{"start":{"row":4,"column":18},"end":{"row":4,"column":19},"action":"insert","lines":["e"]},{"start":{"row":4,"column":19},"end":{"row":4,"column":20},"action":"insert","lines":["q"]},{"start":{"row":4,"column":20},"end":{"row":4,"column":21},"action":"insert","lines":["u"]},{"start":{"row":4,"column":21},"end":{"row":4,"column":22},"action":"insert","lines":["e"]},{"start":{"row":4,"column":22},"end":{"row":4,"column":23},"action":"insert","lines":["s"]},{"start":{"row":4,"column":23},"end":{"row":4,"column":24},"action":"insert","lines":["t"]}],[{"start":{"row":4,"column":25},"end":{"row":4,"column":26},"action":"insert","lines":[":"],"id":13}],[{"start":{"row":4,"column":26},"end":{"row":5,"column":0},"action":"insert","lines":["",""],"id":14},{"start":{"row":5,"column":0},"end":{"row":5,"column":4},"action":"insert","lines":[" "]},{"start":{"row":5,"column":4},"end":{"row":5,"column":5},"action":"insert","lines":["p"]},{"start":{"row":5,"column":5},"end":{"row":5,"column":6},"action":"insert","lines":["r"]},{"start":{"row":5,"column":6},"end":{"row":5,"column":7},"action":"insert","lines":["o"]},{"start":{"row":5,"column":7},"end":{"row":5,"column":8},"action":"insert","lines":["d"]},{"start":{"row":5,"column":8},"end":{"row":5,"column":9},"action":"insert","lines":["u"]},{"start":{"row":5,"column":9},"end":{"row":5,"column":10},"action":"insert","lines":["c"]},{"start":{"row":5,"column":10},"end":{"row":5,"column":11},"action":"insert","lines":["t"]},{"start":{"row":5,"column":11},"end":{"row":5,"column":12},"action":"insert","lines":["s"]}],[{"start":{"row":5,"column":12},"end":{"row":5,"column":13},"action":"insert","lines":[" "],"id":15},{"start":{"row":5,"column":13},"end":{"row":5,"column":14},"action":"insert","lines":["="]}],[{"start":{"row":5,"column":14},"end":{"row":5,"column":15},"action":"insert","lines":[" "],"id":16},{"start":{"row":5,"column":15},"end":{"row":5,"column":16},"action":"insert","lines":["P"]},{"start":{"row":5,"column":16},"end":{"row":5,"column":17},"action":"insert","lines":["r"]},{"start":{"row":5,"column":17},"end":{"row":5,"column":18},"action":"insert","lines":["o"]},{"start":{"row":5,"column":18},"end":{"row":5,"column":19},"action":"insert","lines":["d"]},{"start":{"row":5,"column":19},"end":{"row":5,"column":20},"action":"insert","lines":["u"]},{"start":{"row":5,"column":20},"end":{"row":5,"column":21},"action":"insert","lines":["c"]},{"start":{"row":5,"column":21},"end":{"row":5,"column":22},"action":"insert","lines":["t"]}],[{"start":{"row":5,"column":22},"end":{"row":5,"column":23},"action":"insert","lines":["."],"id":17},{"start":{"row":5,"column":23},"end":{"row":5,"column":24},"action":"insert","lines":["o"]},{"start":{"row":5,"column":24},"end":{"row":5,"column":25},"action":"insert","lines":["b"]},{"start":{"row":5,"column":25},"end":{"row":5,"column":26},"action":"insert","lines":["j"]},{"start":{"row":5,"column":26},"end":{"row":5,"column":27},"action":"insert","lines":["e"]},{"start":{"row":5,"column":27},"end":{"row":5,"column":28},"action":"insert","lines":["c"]},{"start":{"row":5,"column":28},"end":{"row":5,"column":29},"action":"insert","lines":["t"]},{"start":{"row":5,"column":29},"end":{"row":5,"column":30},"action":"insert","lines":["s"]}],[{"start":{"row":5,"column":30},"end":{"row":5,"column":31},"action":"insert","lines":["."],"id":18},{"start":{"row":5,"column":31},"end":{"row":5,"column":32},"action":"insert","lines":["a"]},{"start":{"row":5,"column":32},"end":{"row":5,"column":33},"action":"insert","lines":["l"]},{"start":{"row":5,"column":33},"end":{"row":5,"column":34},"action":"insert","lines":["l"]}],[{"start":{"row":5,"column":34},"end":{"row":5,"column":36},"action":"insert","lines":["()"],"id":19}],[{"start":{"row":5,"column":35},"end":{"row":6,"column":0},"action":"insert","lines":["",""],"id":20},{"start":{"row":6,"column":0},"end":{"row":6,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":6,"column":4},"end":{"row":6,"column":8},"action":"remove","lines":[" "],"id":21},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"remove","lines":[" "]},{"start":{"row":5,"column":35},"end":{"row":6,"column":0},"action":"remove","lines":["",""]}],[{"start":{"row":5,"column":36},"end":{"row":6,"column":0},"action":"insert","lines":["",""],"id":22},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]},{"start":{"row":6,"column":4},"end":{"row":6,"column":5},"action":"insert","lines":["r"]},{"start":{"row":6,"column":5},"end":{"row":6,"column":6},"action":"insert","lines":["e"]},{"start":{"row":6,"column":6},"end":{"row":6,"column":7},"action":"insert","lines":["u"]}],[{"start":{"row":6,"column":6},"end":{"row":6,"column":7},"action":"remove","lines":["u"],"id":23}],[{"start":{"row":6,"column":6},"end":{"row":6,"column":7},"action":"insert","lines":["t"],"id":24},{"start":{"row":6,"column":7},"end":{"row":6,"column":8},"action":"insert","lines":["i"]},{"start":{"row":6,"column":8},"end":{"row":6,"column":9},"action":"insert","lines":["r"]},{"start":{"row":6,"column":9},"end":{"row":6,"column":10},"action":"insert","lines":["n"]}],[{"start":{"row":6,"column":9},"end":{"row":6,"column":10},"action":"remove","lines":["n"],"id":25},{"start":{"row":6,"column":8},"end":{"row":6,"column":9},"action":"remove","lines":["r"]},{"start":{"row":6,"column":7},"end":{"row":6,"column":8},"action":"remove","lines":["i"]}],[{"start":{"row":6,"column":7},"end":{"row":6,"column":8},"action":"insert","lines":["u"],"id":26},{"start":{"row":6,"column":8},"end":{"row":6,"column":9},"action":"insert","lines":["r"]},{"start":{"row":6,"column":9},"end":{"row":6,"column":10},"action":"insert","lines":["n"]}],[{"start":{"row":6,"column":10},"end":{"row":6,"column":11},"action":"insert","lines":[" "],"id":27},{"start":{"row":6,"column":11},"end":{"row":6,"column":12},"action":"insert","lines":["r"]},{"start":{"row":6,"column":12},"end":{"row":6,"column":13},"action":"insert","lines":["e"]},{"start":{"row":6,"column":13},"end":{"row":6,"column":14},"action":"insert","lines":["d"]}],[{"start":{"row":6,"column":13},"end":{"row":6,"column":14},"action":"remove","lines":["d"],"id":28}],[{"start":{"row":6,"column":13},"end":{"row":6,"column":14},"action":"insert","lines":["n"],"id":29},{"start":{"row":6,"column":14},"end":{"row":6,"column":15},"action":"insert","lines":["d"]},{"start":{"row":6,"column":15},"end":{"row":6,"column":16},"action":"insert","lines":["e"]},{"start":{"row":6,"column":16},"end":{"row":6,"column":17},"action":"insert","lines":["r"]}],[{"start":{"row":6,"column":17},"end":{"row":6,"column":19},"action":"insert","lines":["()"],"id":30}],[{"start":{"row":6,"column":18},"end":{"row":6,"column":19},"action":"insert","lines":["r"],"id":31},{"start":{"row":6,"column":19},"end":{"row":6,"column":20},"action":"insert","lines":["e"]},{"start":{"row":6,"column":20},"end":{"row":6,"column":21},"action":"insert","lines":["q"]},{"start":{"row":6,"column":21},"end":{"row":6,"column":22},"action":"insert","lines":["u"]},{"start":{"row":6,"column":22},"end":{"row":6,"column":23},"action":"insert","lines":["e"]},{"start":{"row":6,"column":23},"end":{"row":6,"column":24},"action":"insert","lines":["s"]},{"start":{"row":6,"column":24},"end":{"row":6,"column":25},"action":"insert","lines":["t"]},{"start":{"row":6,"column":25},"end":{"row":6,"column":26},"action":"insert","lines":[","]}],[{"start":{"row":6,"column":26},"end":{"row":6,"column":27},"action":"insert","lines":[" "],"id":32}],[{"start":{"row":6,"column":27},"end":{"row":6,"column":29},"action":"insert","lines":["\"\""],"id":33}],[{"start":{"row":6,"column":28},"end":{"row":6,"column":29},"action":"insert","lines":["p"],"id":34},{"start":{"row":6,"column":29},"end":{"row":6,"column":30},"action":"insert","lines":["r"]},{"start":{"row":6,"column":30},"end":{"row":6,"column":31},"action":"insert","lines":["o"]},{"start":{"row":6,"column":31},"end":{"row":6,"column":32},"action":"insert","lines":["d"]},{"start":{"row":6,"column":32},"end":{"row":6,"column":33},"action":"insert","lines":["u"]},{"start":{"row":6,"column":33},"end":{"row":6,"column":34},"action":"insert","lines":["c"]},{"start":{"row":6,"column":34},"end":{"row":6,"column":35},"action":"insert","lines":["t"]},{"start":{"row":6,"column":35},"end":{"row":6,"column":36},"action":"insert","lines":["s"]},{"start":{"row":6,"column":36},"end":{"row":6,"column":37},"action":"insert","lines":["."]},{"start":{"row":6,"column":37},"end":{"row":6,"column":38},"action":"insert","lines":["h"]},{"start":{"row":6,"column":38},"end":{"row":6,"column":39},"action":"insert","lines":["t"]},{"start":{"row":6,"column":39},"end":{"row":6,"column":40},"action":"insert","lines":["m"]},{"start":{"row":6,"column":40},"end":{"row":6,"column":41},"action":"insert","lines":["l"]}],[{"start":{"row":6,"column":42},"end":{"row":6,"column":43},"action":"insert","lines":[","],"id":35}],[{"start":{"row":6,"column":43},"end":{"row":6,"column":44},"action":"insert","lines":[" "],"id":36}],[{"start":{"row":6,"column":44},"end":{"row":6,"column":46},"action":"insert","lines":["{}"],"id":37}],[{"start":{"row":6,"column":45},"end":{"row":6,"column":47},"action":"insert","lines":["\"\""],"id":38}],[{"start":{"row":6,"column":46},"end":{"row":6,"column":47},"action":"insert","lines":["p"],"id":39},{"start":{"row":6,"column":47},"end":{"row":6,"column":48},"action":"insert","lines":["r"]},{"start":{"row":6,"column":48},"end":{"row":6,"column":49},"action":"insert","lines":["o"]},{"start":{"row":6,"column":49},"end":{"row":6,"column":50},"action":"insert","lines":["d"]},{"start":{"row":6,"column":50},"end":{"row":6,"column":51},"action":"insert","lines":["u"]},{"start":{"row":6,"column":51},"end":{"row":6,"column":52},"action":"insert","lines":["c"]},{"start":{"row":6,"column":52},"end":{"row":6,"column":53},"action":"insert","lines":["t"]},{"start":{"row":6,"column":53},"end":{"row":6,"column":54},"action":"insert","lines":["s"]}],[{"start":{"row":6,"column":55},"end":{"row":6,"column":56},"action":"insert","lines":[":"],"id":40}],[{"start":{"row":6,"column":56},"end":{"row":6,"column":57},"action":"insert","lines":[" "],"id":41},{"start":{"row":6,"column":57},"end":{"row":6,"column":58},"action":"insert","lines":["p"]},{"start":{"row":6,"column":58},"end":{"row":6,"column":59},"action":"insert","lines":["r"]},{"start":{"row":6,"column":59},"end":{"row":6,"column":60},"action":"insert","lines":["o"]},{"start":{"row":6,"column":60},"end":{"row":6,"column":61},"action":"insert","lines":["d"]},{"start":{"row":6,"column":61},"end":{"row":6,"column":62},"action":"insert","lines":["u"]},{"start":{"row":6,"column":62},"end":{"row":6,"column":63},"action":"insert","lines":["c"]},{"start":{"row":6,"column":63},"end":{"row":6,"column":64},"action":"insert","lines":["t"]},{"start":{"row":6,"column":64},"end":{"row":6,"column":65},"action":"insert","lines":["s"]}]]},"timestamp":1583336017716} | [
"ubuntu@ip-172-31-36-97.ec2.internal"
] | ubuntu@ip-172-31-36-97.ec2.internal |
d9cb619d465d33a6f910d50958beb4ca360e904b | 47a15501446aa286f89e9ac0f751945d6b86829b | /bqdc.py | 261399d44024c3c85083a7f55f72e7373b1f8f09 | [
"MIT"
] | permissive | karlo0/bqdc | 219ddbf71c0b85a4419eb2759c3a7b57c87afe33 | 1aca9dcd2c519e9ade1988bdee47cf98d51c10f8 | refs/heads/master | 2023-08-24T00:42:19.044385 | 2023-08-14T23:43:35 | 2023-08-17T22:18:04 | 203,542,535 | 0 | 0 | MIT | 2023-08-17T22:18:05 | 2019-08-21T08:39:04 | Python | UTF-8 | Python | false | false | 51,143 | py | r"""
bqdc.py
Python Module to download, upload metadata (Datacatalog Tags, BigQuery table, field descriptions and schema) from Bigquery Tables and its attached tags in DataCatalog.
It can synchronize table descriptions and field descriptions from within Bigquery and constructs tags in Datacatalog for this metadata.
The main funcions are the following:
- The download function stores metadata in an Excel .xlsx file
- The upload function uses the metadata from an Excel .xlsx file (e.g. obtained from a previous download which has since then been updated) and uploads it to BigQuery and DataCatalog
- The synchronize function downloads the metadata and uploads it again to BigQuery and DataCatalog. This can be used to synchronize table and field description metadata that is found in
one of the two GCP apps to upload it to the other too, if it has not been there before
Please check the jupyter notebooks for more detailed information.
The module can only be used when the following conditions are met:
- 2 tag templates in DataCatalog are specified:
- A tag template that is used to attach tags to whole BigQuery tables, in the following referred to as table tag template
- A tag template that is used to attach tags to fields of BigQuery tables, in the following referred to as field tag template
- The table tag template is required to have an attribute with key name 'table_description', that is intended to store table descriptions similar to the attribute 'description' of the BigQuery 'Table' class
- The field tag template is required to have an attribute with key name 'field_description', that is intended to store field/column descriptions similar to the attribute 'description' of the BigQuery 'SchemaField' class
Version: 0.1
Author: Karsten Leonhardt
Date: 21.08.2019
"""
# Connect to the Google Data Catalog python modul
from google.cloud import datacatalog_v1beta1
# Connect to the Google BigQuery python modul
from google.cloud import bigquery
# Connect to the Google Authentification python modul
from google.oauth2 import service_account
import pandas as pd
from collections import OrderedDict
import os
import shutil
import re
import glob
from functools import reduce
class clients:
type_table_ref_bq = bigquery.TableReference
# BigQuery maximum string length
bq_max_string_length = 1024
# DataCatalog maximum string length
dc_max_string_length = 2000
# the current path
CDIR = os.path.dirname(os.path.realpath(__file__))
def __init__(self, PROJECT_ID, PATH_SERVICE_ACCOUNT_KEY):
self.P_ID = PROJECT_ID
self.DS_ID = None
"""get credentials through service account file"""
self.credentials = service_account.Credentials.from_service_account_file(PATH_SERVICE_ACCOUNT_KEY)
"""establish a datacatalog client"""
self.dc_client = datacatalog_v1beta1.DataCatalogClient(credentials=self.credentials)
"""establish a BigQuery client"""
self.bq_client = bigquery.Client(project=PROJECT_ID, credentials = self.credentials)
class toolbox(clients):
pattern_table_descr_bq_pure = re.compile(r"(?P<descr>^[\s\S]*?)\s*Table attributes")
# max length of sheet names in Excel
excel_max_sheet_name_length = 31
overview_sheet_name = 'metadata_of_tables'
def __init__(self, PROJECT_ID, PATH_SERVICE_ACCOUNT_KEY = None, prefer_bq_for_downlad_update = True, logfile = '', do_print_log = False):
"""
This class establishes a connection to both Bigquery and Datacatalog clients and allows for the
manipulation and creation of tags in Datacatalog attached to Bigquery tables and the manipulation of BigQuery table schemas.
Parameters:
-----------
PROJECT_ID: String
Specifies the GCP Project ID of which resources in BigQuery and Datacatalog are requested.
PATH_SERVICE_ACCOUNT_KEY: String, None (Default)
The full path to the Json file containing the service account key. If no string is provided, it searches for a .json file in the current directory and tries to connect to the BQ and DC clients with this file.
prefer_bq_for_download_update: False, True (Default)
When set to true, the table description of BQ is prefered over the DC table description when it exists.
logfile: String, '' (Default)
When the specified string is not empty it will created in the current directory a logfile with the specified string as name. If not provided, no logfile is written
do_print_log: True, False (Default)
if 'True' print log status messages to the stdout aka the screen
Return:
-------
Instance of class 'toolbox'
"""
assert isinstance(PROJECT_ID, str), "The 'PROJECT_ID' argument requires a string to specify the project ID to the GCP project for which BigQuery and DataCatalog resources are requested."
if PATH_SERVICE_ACCOUNT_KEY is None:
service_key_list = glob.glob('*.json')
if len(service_key_list) == 1:
PATH_SERVICE_ACCOUNT_KEY = os.path.join(self.CDIR, service_key_list[0])
elif len(service_key_list) == 0:
raise Exception("No service account key found in the current folder. Please initialise the object with the 'PATH_SERVICE_ACCOUNT_KEY' argument set to the full path (including the json filename with .json extension) of the service account key")
else:
raise Exception("There are more than one .json files in the current folder. Please initialise the object with the 'PATH_SERVICE_ACCOUNT_KEY' argument set to the full path (including the json filename with .json extension) of the service account key")
super().__init__(PROJECT_ID, PATH_SERVICE_ACCOUNT_KEY)
self.sheet = None
self.ds_table_tags_df = None
self.ds_field_tags_dicts = None
self.table_instance_dc = None
self.table_instance_bq = None
self.__table_id = None
self.__table_id_dc = ''
self.__prefer_bq_for_downlad_update = prefer_bq_for_downlad_update
self.__update = False
self.__do_print_log = do_print_log
if len(logfile) > 0:
self.__do_log = True
self.__log = ''
self.__logfile = logfile
else:
self.__do_log = False
def init_tag_templates(self, table_tag_template_str = None, field_tag_template_str = None, LOCATION_TAGS = 'us-central1', table_tag_fields_keys_ordered = [], field_tag_fields_keys_ordered = []):
"""
Initializes tag templates. The whole class requires 2 tag templates:
- a tag template whose id is specified by the 'table_tag_template_str' argument and which is used to attach tags to tables themselves
- a tag template whose id is specified by the 'field_tag_template_str' argument and which is used to attach tags to fields of tables
table_tag_template_str: String, None (Default)
specifies an ID of a tag template that is used to attach tags to tables. The default is None, however the initialisation fails when no string is provided
field_tag_template_str: String, None (Default)
specifies an ID of a tag template that is used to attach tags to fields of tables. The default is None, however the initialisation fails when no string is provided
LOCATION_TAGS: String, 'us-central1' (Default)
The location of the tags. At the moment only 'us-central1' is supported
table_tag_fields_keys_ordered: List of Strings, Empty lis (Default)
A list of the table tag template attribute keys ordered in a list. If this is not provided the internal ordering of the attribute keys is used to set up DataFrame columns
field_tag_fields_keys_ordered: List of Strings, Empty lis (Default)
A list of the field tag template attribute keys ordered in a list. If this is not provided the internal ordering of the attribute keys is used to set up DataFrame columns
"""
assert isinstance(table_tag_template_str, str), "A string must be passed for the 'table_tag_template_str' argument to specify an ID of a tag template that is used to attach tags to tables"
assert isinstance(field_tag_template_str, str), "A string must be passed for the 'field_tag_template_str' argument to specify an ID of a tag template that is used to attach tags to fields of tables"
self.TABLE_TAG_TEMPLATE_STR = table_tag_template_str
self.FIELD_TAG_TEMPLATE_STR = field_tag_template_str
TABLE_TAG_TEMPLATE_PATH=self.dc_client.tag_template_path(self.P_ID, LOCATION_TAGS, self.TABLE_TAG_TEMPLATE_STR)
FIELD_TAG_TEMPLATE_PATH=self.dc_client.tag_template_path(self.P_ID, LOCATION_TAGS, self.FIELD_TAG_TEMPLATE_STR)
try:
self.table_tag_template = self.dc_client.get_tag_template(TABLE_TAG_TEMPLATE_PATH)
except:
msg = "Referencing the tag template used for attaching tags to whole tables failed"
raise Exception(msg)
try:
self.field_tag_template = self.dc_client.get_tag_template(FIELD_TAG_TEMPLATE_PATH)
except:
msg = "Referencing the tag template used for attaching tags to fields of tables failed"
raise Exception(msg)
assert 'table_description' in self.table_tag_template.fields.keys(), "The tag template used for attaching tags to whole tables must contain an attribute with key ID = 'table_description'"
assert 'field_description' in self.field_tag_template.fields.keys(), "The tag template used for attaching tags to fields of tables must contain an attribute with key ID = 'field_description'"
self.__table_tag_fields_keys_ordered = self.__check_complete_ordered_list_of_keys(table_tag_fields_keys_ordered, self.table_tag_template.fields.keys())
self.__field_tag_fields_keys_ordered = self.__check_complete_ordered_list_of_keys(field_tag_fields_keys_ordered, self.field_tag_template.fields.keys())
self.__field_keys_to_ndx = {field_keys: k for k, field_keys in enumerate(self.__field_tag_fields_keys_ordered)}
self.__upload_table_description_bq_init()
pass
def __check_complete_ordered_list_of_keys(self, ordered_keys_in, keys):
set_keys_intersect = set(ordered_keys_in).intersection(set(keys))
set_remaining_keys = set(keys) - set_keys_intersect
ordered_keys = [key for key in ordered_keys_in if key in set_keys_intersect]
for key in set_remaining_keys:
ordered_keys.append(key)
return ordered_keys
def set_dataset(self, DS_ID):
self.DS_ID = DS_ID
pass
def get_table_instance_dc(self, table_id, return_instance = False):
resource_name = "//bigquery.googleapis.com/projects/{}/datasets/{}/tables/{}".format(self.P_ID, self.DS_ID, table_id)
self.table_instance_dc = self.dc_client.lookup_entry(linked_resource=resource_name)
if self.__table_id != table_id:
self.__table_id = table_id
if return_instance:
return self.table_instance_dc
else:
pass
def get_table_instance_bq(self, table_x, return_instance = False):
if(isinstance(table_x, self.type_table_ref_bq)):
self.table_instance_bq = self.bq_client.get_table(table_x)
elif(isinstance(table_x, str)):
try:
self.table_instance_bq = self.bq_client.get_table(self.P_ID+'.'+self.DS_ID+'.'+table_x)
except:
raise Exception('The table can not be found under the specified PROJECT_ID/DATASET_ID')
pass
else:
raise Exception('String or table_reference required as argument')
if self.__table_id != self.table_instance_bq.table_id:
self.__table_id = self.table_instance_bq.table_id
self.get_bq_schema_metadata()
if return_instance:
return self.table_instance_bq
else:
pass
def list_all_tags_entry(self, entry = None):
"""
Prints all the tags attached to an entry (here an entry is a table instance)
"""
if entry is None:
entry = self.table_instance_dc
if entry is not None:
for tag in self.dc_client.list_tags(entry.name):
print(tag)
else:
raise Exception('\nNo datacatalog entry instance provided. Call method again as ''list_all_tags_entry(entry)'' with entry a datacatalog entry instance')
pass
def get_all_tags_table(self, entry = None, delete_tags_not_in_bq_schema = False, make_field_sheet_df = False):
if entry is None:
entry = self.table_instance_dc
if entry is not None:
tags = self.dc_client.list_tags(entry.name)
update_table_instance_bq = False
try:
if self.__table_id != self.table_instance_bq.table_id:
update_table_instance_bq = True
except:
update_table_instance_bq = True
if update_table_instance_bq:
self.get_table_instance_bq(self.__table_id)
tag_columns = []
tag_list = []
if make_field_sheet_df:
field_vals = [[] for i in range(len(self.__field_keys_to_ndx))]
field_names = []
for tag in tags:
if tag.template == self.field_tag_template.name:
tag_column_lower = tag.column.lower()
if tag_column_lower in self.schema_bq_df.index:
tag_columns.append(tag_column_lower)
tag_list.append(tag)
if make_field_sheet_df:
field_names.append(tag_column_lower)
for attr in self.__field_keys_to_ndx.keys():
if attr in tag.fields.keys():
field_vals[self.__field_keys_to_ndx[attr]].append(tag.fields[attr].string_value)
else:
field_vals[self.__field_keys_to_ndx[attr]].append('')
else:
if delete_tags_not_in_bq_schema:
self.dc_client.delete_tag(tag.name)
else:
tag_columns.append(tag.column)
tag_list.append(tag)
if make_field_sheet_df:
field_tags_df = pd.DataFrame.from_dict(dict(zip(self.__field_tag_fields_keys_ordered, field_vals))).set_index(pd.Index(field_names)).applymap(lambda x: '' if x is None else x).astype(str).fillna('')
self.sheet = self.schema_bq_df.join(field_tags_df,lsuffix='_bq', rsuffix='_dc').fillna('')
n_cols = len(self.sheet.columns)
self.sheet.insert(n_cols - 1,'field_description', [
row['field_description_dc']
if ( row['field_description_bq'] is None or len(row['field_description_bq']) == 0 )
else row['field_description_bq']
if ( len(row['field_description_dc']) == 0 or len(row['field_description_dc']) < len(row['field_description_bq']) )
else row['field_description_bq']+row['field_description_dc'][self.bq_max_string_length:]
if len(row['field_description_bq']) == self.bq_max_string_length
else row['field_description_dc']
for index, row in self.sheet.iterrows()
])
self.sheet = self.sheet.drop(columns=['field_description_dc', 'field_description_bq']).astype(str).fillna('').set_index('field_name')
self.tags = dict(zip(tag_columns, tag_list))
if len(self.tags) == 0:
self.tags = None
else:
raise Exception('\nNo datacatalog entry instance provided. Call method again as ''list_all_tags_entry(entry)'' with entry a datacatalog entry instance')
pass
def lookup_and_list_all_tags_entry(self, table_id):
self.list_all_tags_entry(self.get_table_instance_dc(table_id))
pass
def delete_all_tags_entry(self, entry = None):
"""
Deletes all the tags attached to an entry (here an entry is a table instance)
"""
if entry is None:
entry = self.table_instance_dc
if entry is not None:
for tag in self.dc_client.list_tags(entry.name):
self.dc_client.delete_tag(tag.name)
else:
raise Exception('\nNo datacatalog entry instance provided. Call method again as ''delete_all_tags_entry(entry)'' with entry a datacatalog entry instance')
pass
def get_bq_schema_metadata(self, table_instance_bq = None):
if table_instance_bq is None:
table_instance_bq = self.table_instance_bq
if table_instance_bq is not None:
self.schema_bq = table_instance_bq.schema[:]
self.schema_bq_df = pd.DataFrame.from_records((schemafield._key()[0:4] for schemafield in self.schema_bq), columns = ['field_name', 'field_type', 'field_mode', 'field_description']).applymap(lambda x: '' if x is None else x).astype('str').assign(field_name_lower=lambda x: x.field_name.apply(lambda y: y.lower())).set_index('field_name_lower').fillna('')
else:
raise Exception('\nNo BigQuery table instance provided. Call method again as ''get_bq_schema_metadata(entry)'' with entry a BigQuery table instance')
pass
def update_field_tag(self, field_entry_dict, table_instance_dc = None, dict_tags = None):
"""
This function tries to find a field tag with a column field (which is the actual table field name \
and in the code below accessed by tag.column) equals the requested field name as specified with \
field_entry_dict['field_name'].
If such a tag can be found in the DataCatalog for the table instance, then it checks whether the \
field attributes ( specified as the field values of the tag.fields[key] below, where key is a specific \
tag field attribute name (field_format, field_description, field_example)) of the DataCatalog tag have \
different values as the requested/new tag field attributes (which are specified as the values of \
field_entry_dict['field_attributes'][key] where key is again a tag field attribute name)
Only if the new field attribute values differ from the ones in the tag already on Datacatalog,
the tag will be updated.
The function returns:
- True: when the tag has either been updated or does not need to be updated
- False: when the requested tag has not been found, indictating the tag needs to be newly created
"""
if table_instance_dc is None:
table_instance_dc = self.table_instance_dc
if dict_tags is None:
dict_tags = self.tags
found_tag = False
if dict_tags is not None:
field_name = field_entry_dict['field_name']
try:
tag = dict_tags[field_name]
found_tag = True
except KeyError:
pass
update_tag = False
if found_tag:
for key, value in field_entry_dict['field_attributes'].items():
if len(value) > 0:
if key in self.field_tag_template.fields.keys():
if tag.fields[key].string_value != value:
tag.fields[key].string_value = value
update_tag = True
if update_tag:
self.dc_client.update_tag(tag)
return found_tag
def create_field_tag(self, field_entry_dict, table_instance_dc = None, dict_tags = None):
"""
This function creates a field tag for a table instance (which is not the table name! \
An instance object is return by the datacatalog.lookup_entry function and the name member of that
instance is used as the parent when creating the tag with datacatalog.create_tag.
Input:
- table_instance_dc: an instance of a table (we get the instance via the lookup_entry method\
of the datacatalog_v1beta1.DataCatalogClient class)
- field_entry_dict: a dictionary containg the field attributes and corresponding values of the sadc_fieldstored as a dic
"""
if table_instance_dc is None:
table_instance_dc = self.table_instance_dc
if dict_tags is None:
dict_tags = self.tags
if(not self.update_field_tag(field_entry_dict, table_instance_dc = table_instance_dc, dict_tags = dict_tags)):
new_field_tag = datacatalog_v1beta1.types.Tag()
new_field_tag.template = self.field_tag_template.name
create_tag = False
field_name = field_entry_dict['field_name']
if(field_name != ''):
for key, value in field_entry_dict['field_attributes'].items():
if len(value) > 0:
if key in self.field_tag_template.fields.keys():
new_field_tag.fields[key].string_value = value
create_tag = True
if(create_tag):
new_field_tag.column = field_name
if create_tag:
try:
self.dc_client.create_tag(parent=table_instance_dc.name,tag=new_field_tag)
except:
self.to_log('\t\tProblem to write tag to field {} of table {}\n'.format(field_name, self.__table_id))
pass
def update_table_tag(self, table_entry_dict, table_instance_dc = None, dict_tags = None):
"""
This function tries to find a table tag for the table instance.
If such a tag can be found in the DataCatalog, then it checks whether the field attributes \
( specified as the field values of the tag.fields[key] below, where key is a specific \
tag field attribute name (table_description, table_data_source)) of the DataCatalog tag have \
different values as the requested/new field attributes (which are specified as the values of \
table_entry_dict[key] where key is again a tag field attribute)
Only if the new tag field attribute values differ from the ones in the tag already on Datacatalog,
the tag will be updated.
The function returns:
- True: when the tag has either been updated or does not need to be updated
- False: when the requested tag has not been found, indictating the tag needs to be newly created
"""
if table_instance_dc is None:
table_instance_dc = self.table_instance_dc
if dict_tags is None:
dict_tags = self.tags
found_tag = False
if dict_tags is not None:
try:
tag = dict_tags['']
if tag.template == self.table_tag_template.name:
found_tag = True
except KeyError:
pass
update_tag = False
if found_tag:
for key, value in table_entry_dict.items():
if len(value) > 0:
if key in self.table_tag_template.fields.keys():
if tag.fields[key].string_value != value:
tag.fields[key].string_value = value
update_tag = True
if update_tag:
self.dc_client.update_tag(tag)
return found_tag
def create_table_tag(self, table_entry_dict, table_instance_dc = None, dict_tags = None):
if table_instance_dc is None:
table_instance_dc = self.table_instance_dc
if dict_tags is None:
dict_tags = self.tags
if(not self.update_table_tag(table_entry_dict, table_instance_dc, dict_tags)):
new_table_tag = datacatalog_v1beta1.types.Tag()
new_table_tag.template = self.table_tag_template.name
create_tag = False
for key, value in table_entry_dict.items():
if len(value) > 0:
if key in self.table_tag_template.fields.keys():
new_table_tag.fields[key].string_value = value
create_tag = True
if create_tag:
self.dc_client.create_tag(parent=table_instance_dc.name,tag=new_table_tag)
pass
def download(self, tables = None, DS_ID = None, PATH=None):
"""
Downloads metadata of tables in a dataset specified by DS_ID.
- By default metadata for all tables in the dataset is downloaded in an Excel .xlsx file in a folder that has the name of the dataset. For each table a separate sheet of that .xlsx file is created containing the field_names, field_descriptions and more.
- Specifying the parameter 'tables' allows to download metadata for a single or a list of tables.
- For all tables in the dataset table tags metadata is written to a sheet with the name 'metadata_of_tables'
- The PATH specifies the path where the metadata shall be written.
Parameters
----------
tables: String, List of Strings, None (default)
A String or List of Strings specifying the table_ids for which metadata should be downloaded.
If not provided, metadata for all tables in the dataset is downloaded
DS_ID: String, None (default)
dataset_id for which metadata shall be downloaded.
If no dataset_id is provided via DS_ID, the one specified by the member attribute .DS_ID is used which is by default 'sadc_generated'.
PATH: String, None (default)
The PATH where the metadata shall be written.
"""
assert isinstance(tables, list) or isinstance(tables, str) or tables is None, "'Tables' parameter must be String, List or None"
assert isinstance(DS_ID, str) or DS_ID is None, "'DS_ID' parameter must be String or None"
assert isinstance(PATH, str) or PATH is None, "'PATH' parameter must be String or None"
DS_ID_old = self.DS_ID
if DS_ID is None:
DS_ID = self.DS_ID
else:
self.set_dataset(DS_ID)
if DS_ID is not None:
if not self.__update:
self.to_log('# Download\n')
if PATH is not None:
PATH_OUT = os.path.join(PATH, DS_ID)
else:
PATH_OUT = os.path.join(self.CDIR, DS_ID)
if not os.path.exists(PATH_OUT):
os.makedirs(PATH_OUT)
else:
self.to_log('\n\t# Download\n')
self.overview_sheet = construct_overview_sheet(self.table_tag_template, attributes = self.__table_tag_fields_keys_ordered)
table_sheets = construct_table_sheets()
if tables is None:
tables = self.bq_client.list_tables("{}.{}".format(self.P_ID, self.DS_ID))
elif isinstance(tables, str):
tables = [tables]
for table in tables:
try:
self.__table_id = table.table_id
except:
self.__table_id = table
self.to_log('\t{}'.format("Table '{}'".format(self.__table_id)))
self.to_log('\t\t{}'.format('get BigQuery table instance'))
self.get_table_instance_bq(self.__table_id)
self.to_log('\t\t{}'.format('get DataCatalog table instance'))
self.get_table_instance_dc(self.__table_id)
self.to_log('\t\t{}'.format('get all tags and create dataframe with out of field tags and BigQuery schema'))
self.get_all_tags_table(make_field_sheet_df = True)
self.to_log('\t\t{}'.format('append fields dataframe to dict'))
table_sheets.append(self.__table_id, self.sheet)
self.to_log('\t\t{}'.format('append table tag to overview sheet variable'))
self.append_to_overview_sheet()
self.to_log('\n\t{}'.format('make Dictionary out of field metadata dataframes for all specified tables'))
self.ds_field_tags_dicts = table_sheets.get_dict()
self.to_log('\t{}'.format('make Dataframe out of table tag metadata for all specified tables'))
self.ds_table_tags_df = self.overview_sheet.get_dataframe()
if not self.__update:
FULLPATH = os.path.join(PATH_OUT, DS_ID+'.xlsx')
self.to_log('\twrite to {}\n'.format(FULLPATH))
with pd.ExcelWriter(FULLPATH) as writer:
self.ds_table_tags_df.to_excel(writer, sheet_name=self.overview_sheet_name, header=True, index=True)
for table_id, table_df in self.ds_field_tags_dicts.items():
table_df.to_excel(writer, sheet_name=self.shorten_string(table_id, self.excel_max_sheet_name_length), header=True, index=True)
self.set_dataset(DS_ID_old)
else:
raise Exception("No Dataset specified. Please call the function as 'download(DS_ID=dataset_id)' again with dataset_id a string specifying a dataset ID")
pass
def append_to_overview_sheet(self):
table_description_bq = self.table_instance_bq.description
table_description_bq = self.clean_sentence_string(self.pure_table_description_bq(table_description_bq))
dict_table_descr_bq = None
if len(table_description_bq) > 0:
dict_table_descr_bq = {'table_description': table_description_bq}
try:
table_tag = self.tags['']
if self.__prefer_bq_for_downlad_update:
if len(table_description_bq) > 0:
self.overview_sheet.append(self.__table_id, table_tag, dict_table_descr_bq)
else:
self.overview_sheet.append(self.__table_id, table_tag)
else:
self.overview_sheet.append(self.__table_id, table_tag)
except:
self.overview_sheet.append(self.__table_id, alt_tag_vals = dict_table_descr_bq)
pass
def upload(self, tables = None, DS_ID = None, PATH = None, delete_old_tags_before_upload = False, delete_sheet_after_upload = True, upload_from_backup = False):
"""
uploads metadata of tables in a dataset specified by DS_ID.
- By default metadata for all tables in the dataset is uploaded from an Excel .xlsx file in a folder that has the name of the dataset. For each table a separate sheet of that .xlsx file is created containing the field_names, field_descriptions and more.
- Specifying the parameter 'tables' allows to download metadata for a single or a list of tables.
- For all tables in the dataset table tags metadata is in a sheet with the name 'metadata_of_tables'
- The PATH specifies the path where the Excel .xlsx file is contained.
Parameters
----------
tables: String, List of Strings, None (default)
A String or List of Strings specifying the table_ids for which metadata should be downloaded.
If not provided, metadata for all tables in the dataset is downloaded
DS_ID: String, None (default)
dataset_id for which metadata shall be downloaded.
If no dataset_id is provided via DS_ID, the one specified by the member attribute .DS_ID is used which is by default 'sadc_generated'.
PATH: String, None (default)
The PATH where the metadata shall be read from.
delete_old_tags_before_upload: True, False (Default)
If set to True it deletes all tags in the datacatalog for a table instance before writing new ones. If set False the tags in datacalog are updated with the new information but not deleted.
delete_sheet_after_upload: False, True (Default)
If True, the folder including the sheet that has been uploaded will be deleted.
upload_from_backup: True, False (Default)
if True, use the backup Excel sheets for upload
"""
assert isinstance(tables, list) or isinstance(tables, str) or tables is None, "'Tables' parameter must be String, List or None"
assert isinstance(DS_ID, str) or DS_ID is None, "'DS_ID' parameter must be String or None"
assert isinstance(PATH, str) or PATH is None, "'PATH' parameter must be String or None"
DS_ID_old = self.DS_ID
if DS_ID is None:
DS_ID = self.DS_ID
else:
self.set_dataset(DS_ID)
self.delete_old_tags_before_upload = delete_old_tags_before_upload
if DS_ID is not None:
if not self.__update:
self.to_log('\n# Upload\n')
if PATH is None:
PATH = os.path.join(self.CDIR, DS_ID)
if upload_from_backup:
PATH = os.path.join(os.path.join(self.CDIR, 'backup_sheets'), DS_ID)
excel_files = glob.glob(os.path.join(PATH, r"*.xlsx"))
assert len(excel_files) > 0, "No .xlsx files under the path {}".format(PATH)
FULLPATH = os.path.join(PATH, DS_ID+'.xlsx')
try:
self.ds_table_tags_df = pd.read_excel(FULLPATH, sheet_name=self.overview_sheet_name, index_col = 0, dtype = str).fillna('').astype(str).applymap(lambda x: x.strip())
except:
msg = 'Reading {} was not successful. Check path and existence of file.'.format(FULLPATH)
self.to_log('\t\n{}\n'.format(msg))
raise Exception(msg)
if tables is None:
tables = self.ds_table_tags_df.index.to_list()
else:
diff_keys_set = set(tables) - set(self.ds_table_tags_df.index)
assert len(diff_keys_set) == 0, "The tables {} are not contained in the spreadsheet.".format(diff_keys_set)
table_to_ndx = {table_id: k+1 for k, table_id in enumerate(self.ds_table_tags_df.index) if table_id in tables}
self.ds_field_tags_dicts = pd.read_excel(FULLPATH, sheet_name=list(table_to_ndx.values()), index_col = 0, dtype = str)
else:
if tables is None:
tables = self.ds_table_tags_df.index.to_list()
else:
diff_keys_set = set(tables) - set(self.ds_table_tags_df.index)
assert len(diff_keys_set) == 0, "The tables {} are not contained in the spreadsheet.".format(diff_keys_set)
self.to_log('\n\t# Upload\n')
table_to_ndx = {table_id: table_id for table_id in self.ds_table_tags_df.index if table_id in tables}
for table_id, k in table_to_ndx.items():
self.to_log('\t{}'.format("Table '{}'".format(table_id)))
self.__table_id = table_id
self.to_log('\t\t{}'.format('get BigQuery table instance'))
self.get_table_instance_bq(table_id)
self.to_log('\t\t{}'.format('get DataCatalog table instance'))
self.get_table_instance_dc(table_id)
self.to_log('\t\t{}'.format('get all tags'))
self.get_all_tags_table(delete_tags_not_in_bq_schema=True)
self.to_log('\t\t{}'.format('create table tag dictionary'))
self.table_tag_dict = dict(self.ds_table_tags_df.loc[table_id])
self.to_log('\t\t{}'.format('upload table tag'))
self.upload_table_tag()
self.to_log('\t\t{}'.format('upload BigQuery table description'))
self.__upload_table_description_bq()
self.sheet = self.ds_field_tags_dicts[k].fillna('').astype(str).applymap(lambda x: x.strip())
self.to_log('\t\t{}'.format('upload BigQuery and DataCatalog field information'))
self.upload_fields_sheet()
if not self.__update and delete_sheet_after_upload and not upload_from_backup:
shutil.rmtree(PATH)
self.set_dataset(DS_ID_old)
self.write_log()
else:
raise Exception("No Dataset specified. Please call the function as 'upload(DS_ID=dataset_id)' again with dataset_id a string specifying a dataset ID")
pass
def synchronize(self, tables = None, DS_ID = None):
"""
Synchronizes metadata between Bigquery and Datacatalog of tables in a dataset specified by DS_ID.
- By default metadata for all tables in the dataset is downloaded in an Excel .xlsx file in a folder that has the name of the dataset. For each table a separate sheet of that .xlsx file is created containing the field_names, field_descriptions and more.
- Specifying the parameter 'tables' allows to download metadata for a single or a list of tables.
- For all tables in the dataset table tags metadata is written to a sheet with the name 'metadata_of_tables'
Parameters
----------
tables: String, List of Strings, None (default)
A String or List of Strings specifying the table_ids for which metadata should be downloaded.
If not provided, metadata for all tables in the dataset is downloaded
DS_ID: String, None (default)
dataset_id for which metadata shall be downloaded.
If no dataset_id is provided via DS_ID, the one specified by the member attribute .DS_ID is used which is by default 'sadc_generated'.
"""
assert isinstance(tables, list) or isinstance(tables, str) or tables is None, "'Tables' parameter must be String, List or None"
assert isinstance(DS_ID, str) or DS_ID is None, "'DS_ID' parameter must be String or None"
DS_ID_old = self.DS_ID
if DS_ID is None:
DS_ID = self.DS_ID
else:
self.set_dataset(DS_ID)
if DS_ID is not None:
self.to_log('\n# Synchronize\n')
self.__update = True
self.download(tables=tables, DS_ID = DS_ID)
self.upload(tables=tables, DS_ID = DS_ID, delete_sheet_after_upload = False)
self.__update = False
else:
raise Exception("No Dataset specified. Please call the function as 'synchronize(DS_ID=dataset_id)' again with dataset_id a string specifying a dataset ID")
pass
def upload_fields_sheet(self):
for column_name, row in self.sheet.iterrows():
if len(column_name) > 0:
try:
# this tries to get a numeric key value for the column name by checking first whether\
# the column name is in the table schema of BQ
# if it is not found means that this column field is no longer part of the schema
# and skips over that entry
num_index = self.schema_bq_df.index.get_loc(column_name.lower())
has_descr = False
if 'field_description' in row.keys():
has_descr = True
field_description = self.clean_sentence_string(row['field_description'])
field_attributes_dc = {**{key: self.clean_string(row[key]) for key in row.keys() if key not in ['field_description']}, 'field_description': self.clean_sentence_string(row['field_description'])}
else:
field_attributes_dc = {key: self.clean_string(row[key]) for key in row.keys()}
field_entry_dict = {'field_name': column_name.lower(), 'field_attributes': field_attributes_dc}
self.create_field_tag(field_entry_dict)
field_bq = self.schema_bq[num_index]
field_bq_name = field_bq.name
field_bq_field_type = field_bq.field_type
field_bq_mode = field_bq.mode
if has_descr:
field_description_bq = self.shorten_string(field_description, self.bq_max_string_length)
self.schema_bq[num_index] = bigquery.SchemaField(name=field_bq_name, field_type=field_bq_field_type, mode=field_bq_mode, description=field_description_bq)
except KeyError:
pass
else:
break
self.check_non_matching_columns_bq_excel()
self.table_instance_bq.schema = self.schema_bq
num_trials = 1
update_schema = False
while num_trials < 11 and not update_schema:
try:
self.table_instance_bq = self.bq_client.update_table(self.table_instance_bq, ["schema"])
update_schema = True
except Exception as e:
if hasattr(e, 'message'):
err = e.message
else:
err = e
num_trials = num_trials + 1
if num_trials == 11:
self.to_log("\t\t\terror while trying to write schema to BigQuery:")
self.to_log(err)
self.to_log("\t\t\terror occured, this was the last attempt\n")
else:
self.to_log("\t\t\terror while trying to write schema to BigQuery:\n")
self.to_log(err)
self.to_log("\t\t\terror occured, start {}. attempt\n".format(num_trials))
pass
def upload_table_tag(self):
diff_keys_set = set(self.table_tag_dict.keys()) - set(self.table_tag_template.fields.keys())
assert len(diff_keys_set) == 0, "The attribute names {} are no attribute names of the tag template {}".format(diff_keys_set, self.table_tag_template.name)
self.create_table_tag(self.table_tag_dict)
def __upload_table_description_bq_init(self):
"""
This function is only executed during initialisation of the class instance to set parameter for the function upload_table_description_bq
"""
self.__table_attrs = [attr for attr in self.table_tag_template.fields.keys() if attr not in ['table_description']]
max_str_len_extra_metadata_keys = reduce((lambda x,y: max(x,y)), map( lambda x: len(x) , self.table_tag_template.fields.keys()) )
self.__n_int_tab = 5
self.__max_n_tabs = (max_str_len_extra_metadata_keys+1)//self.__n_int_tab
def __upload_table_description_bq(self):
table_description = self.clean_sentence_string(self.table_tag_dict['table_description'])
extra_metadata_string = '\n\nTable attributes:\n\n'
has_extra_metadata = False
for column in self.__table_attrs:
if len(self.table_tag_dict[column]) > 0:
has_extra_metadata = True
column_first_part = column[6:9]
if column[6:9] == 'gcp':
column_first_part = 'GCP'
else:
column_first_part = column[6].upper() + column[7:9]
n_tabs = self.__max_n_tabs - ((len(column)+1)//self.__n_int_tab) + 1
extra_metadata_string = extra_metadata_string + column_first_part \
+ re.sub(r'_+',' ', column[9:]) + ":" + "\t"*n_tabs \
+ self.table_tag_dict[column]
if extra_metadata_string[-1] != '\n':
extra_metadata_string = extra_metadata_string + "\n"
if has_extra_metadata:
self.table_instance_bq.description = table_description + extra_metadata_string
else:
self.table_instance_bq.description = table_description
self.table_instance_bq = self.bq_client.update_table(self.table_instance_bq, ["description"])
pass
def check_non_matching_columns_bq_excel(self, table_instance_dc = None, excel_column_names = None, bq_column_names = None):
if table_instance_dc is None:
table_instance_dc = self.table_instance_dc
if excel_column_names is None:
excel_column_names = self.sheet.index
if bq_column_names is None:
bq_column_names = self.schema_bq_df.index
set_excel_column_fields = set(excel_column_names.map(lambda x: x.lower()))
set_bq_column_fields = set(bq_column_names)
set_not_in_bq = set_excel_column_fields.difference(set_bq_column_fields)
set_not_in_excel = set_bq_column_fields.difference(set_excel_column_fields)
if bool(set_not_in_bq) or bool(set_not_in_excel):
self.to_log('\t\t\tFor the table at the BigQuery path\n \'{}\''.format(table_instance_dc.linked_resource))
self.to_log('\t\t\tIn the following list, entries prefixed with:')
self.to_log('\t\t\t \'<\':\tare contained in the Excel spreadsheet but not in the BigQuery table schema (anymore).\n\t\t\tPlease delete them in the Excel spreadsheet!')
self.to_log('\t\t\t \'>\':\tare contained in the BigQuery table schema but not in the Excel spreadsheet.\n\t\t\t\tPlease add them in the Excel spreadsheet!\n')
if bool(set_not_in_bq):
for column_name in set_not_in_bq:
self.to_log('\t\t\t\t< {}'.format(column_name))
if bool(set_not_in_excel):
if bool(set_not_in_bq):
self.to_log('\n')
for column_name in set_not_in_excel:
self.to_log('\t\t\t\t> {}'.format(column_name))
def to_log(self, message = None):
if isinstance(message, str):
if self.__do_log:
self.__log = self.__log + message
if self.__do_print_log:
print(message)
pass
def write_log(self):
if self.__do_log:
F = open(self.__logfile, "w")
F.write(self.__log)
self.__log = ''
F.close()
@staticmethod
def clean_string(string):
string = string.strip()
if len(string) > 0:
string = re.sub(r'\s+',' ', string)
return string
@classmethod
def clean_sentence_string(cls, string):
string = cls.clean_string(string)
if len(string) > 0:
string = string[0].upper() + string[1:]
if string[-1] != r"." and string[-1] != r"]":
string = string + r"."
return string
@staticmethod
def shorten_string(string, n):
if len(string) < n:
return string
else:
return string[:n]
@classmethod
def pure_table_description_bq(cls, table_description_bq):
if table_description_bq is not None:
try:
table_description_bq_pure = cls.pattern_table_descr_bq_pure.search(table_description_bq).group('descr')
except:
table_description_bq_pure = table_description_bq
return table_description_bq_pure
else:
return ''
class construct_overview_sheet:
def __init__(self, tag_template, attributes = None):
self.__dict_attributes = {item[0]: k for k, item in enumerate(tag_template.fields.items())}
self.__num_el = len(self.__dict_attributes)
self.__list_attributes = [[] for i in range(self.__num_el)]
self.__list_table_id = []
if attributes is None:
self.__attributes_ordered = list(tag_template.fields.keys())
else:
assert isinstance(attributes, list), "'attributes' parameter must be a list"
assert len(set(tag_template.fields.keys()) - set(attributes)) == 0, "The provided attributes are no permutation of the field keys of the provided tag_template"
self.__attributes_ordered = attributes
def append(self, table_id, tag = None, alt_tag_vals = None):
assert isinstance(alt_tag_vals, dict) or alt_tag_vals is None, "'alt_tag_vals' must be of type dict or None"
if alt_tag_vals is None:
alt_tag_vals = {}
self.__list_table_id.append(table_id)
if tag is not None:
for attr, index in self.__dict_attributes.items():
alt_val_not_avail = True
if attr in alt_tag_vals.keys():
self.__list_attributes[index].append(alt_tag_vals[attr])
alt_val_not_avail = False
if alt_val_not_avail:
try:
if(attr == 'table_description'):
self.__list_attributes[index].append(toolbox.clean_sentence_string(tag.fields[attr].string_value))
else:
self.__list_attributes[index].append(tag.fields[attr].string_value)
except:
self.__list_attributes[index].append('')
else:
for attr, index in self.__dict_attributes.items():
if attr in alt_tag_vals.keys():
self.__list_attributes[index].append(alt_tag_vals[attr])
else:
self.__list_attributes[index].append('')
def get_dataframe(self):
return pd.DataFrame.from_dict({'table_id': self.__list_table_id, **{attr: self.__list_attributes[index] for attr, index in self.__dict_attributes.items()}}).fillna('').astype(str).applymap(lambda x: x.strip()).set_index('table_id')[self.__attributes_ordered]
def set_datframe(self, return_df = False):
self.df = self.get_dataframe()
if return_df:
return self.df
else:
pass
class construct_table_sheets:
def __init__(self):
self.__list_table_id = []
self.__list_of_sheet_df = []
def append(self, table_id, sheet):
self.__list_table_id.append(table_id)
self.__list_of_sheet_df.append(sheet)
def get_dict(self):
return OrderedDict(zip(self.__list_table_id, self.__list_of_sheet_df))
def set_dict(self, return_dict = False):
self.dict_sheets = self.get_dict()
if return_dict:
return self.dict_sheets
else:
pass
| [
"karlo1986@gmx.de"
] | karlo1986@gmx.de |
6747e33efcd4f93c3dbf79fe12368de440154955 | b45e649b4580692dd1b8bf63ad29befb3daad95a | /spark/src/main/python/preprocBinning.py | 6c21866ee6f9e294698dfe7cff5be5841bf1c7fa | [] | no_license | xu-hao/FHIR-PIT | 21ea0e5b8796d86f3a931b99e3e7a3f1e58b04a2 | db2fb04e2cc0d9fce2f8043f594f60fdb8f5a8e8 | refs/heads/master | 2021-05-25T09:49:48.084629 | 2021-05-19T20:17:11 | 2021-05-19T20:17:11 | 127,015,534 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | import os
import sys
import json
from preprocPatient import *
from preprocVisit import *
year_start, year_end, config_file, input_dir, output_dir = sys.argv[1:]
for year in range(int(year_start), int(year_end) + 1):
print(year)
input_file_p = f"{input_dir}/{year}/all_patient"
output_file_p = f"{output_dir}/{year}patient"
preproc_patient(config_file, input_file_p, output_file_p)
input_file_v = f"{input_dir}/{year}/all_visit"
output_file_v = f"{output_dir}/{year}visit"
preproc_visit(config_file, input_file_v, output_file_v)
| [
"xuh@cs.unc.edu"
] | xuh@cs.unc.edu |
9125851b1d61361d8d141a4dca0d69cccfebbf50 | b62d6c292e573dca91d33dfbe61a417e18330d50 | /app/KS/image/helpers.py | b773d4bef9998935d10af95089338174274ed702 | [] | no_license | MCapallera/PatternRecognition_JungleSpeed | 7dd2e7854df8c4585612e5455505fcfb6f443972 | d8254abc26152af449fc17881283da46359b712d | refs/heads/master | 2020-05-01T17:54:37.985790 | 2019-05-26T17:22:27 | 2019-05-26T17:22:27 | 177,611,998 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | import numpy
from skimage.filters import threshold_yen
def get_background_color(img):
threshold = threshold_yen(img)
return numpy.mean(img[img > threshold])
| [
"melania.grotti@unifr.ch"
] | melania.grotti@unifr.ch |
89390f2b4c8bd7d25ec8c7791c9b3502343ed13a | ebe20199181927f3eb36aedfe66d5d179f28628f | /concierge/event/models.py | d66ac24619b9e9b28680c89204f8dfd3e80b9f4c | [] | no_license | rajat404/concierge | 8f1c1d826334f8a0364d4b64efebc722b6f6f061 | 60290e70741060c78c860779a19bf81a90058675 | refs/heads/master | 2021-07-01T08:28:27.020737 | 2017-09-03T22:23:12 | 2017-09-03T22:23:12 | 103,437,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,103 | py | # Third Party Imports
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from simple_history.models import HistoricalRecords
# Concierge Imports
from concierge.base.models import SlugModel, TimeStampedModel, UUIDModel
from concierge.quiz.models import Quiz
class Speaker(UUIDModel, TimeStampedModel):
history = HistoricalRecords(table_name='event_speaker_history')
first_name = models.CharField(max_length=120)
last_name = models.CharField(max_length=120)
email = models.EmailField(unique=True, db_index=True)
about = models.TextField(blank=True)
class Meta:
db_table = 'event_speaker'
verbose_name = _('Speaker')
verbose_name_plural = _('Speakers')
def __str__(self):
return '{} {}'.format(self.first_name, self.last_name)
class Event(UUIDModel, TimeStampedModel, SlugModel):
EVENT_CHOICES = (
('EVENT', 'EVENT'),
('SESSION', 'SESSION'),
('MEETUP', 'MEETUP'),
('CONFERENCE', 'CONFERENCE'),
('TALK', 'TALK'),
('WORKSHOP', 'WORKSHOP'),
('DEV_SPRINT', 'DEV SPRINT'),
('PANEL_DISCUSSION', 'PANEL DISCUSSION'),
# TODO: BOF & Open Spaces
)
VISIBILITY_CHOICES = (
('PUBLIC', 'PUBLIC'),
('PRIVATE', 'PRIVATE'),
)
# Need to be nullable, as the value will be populated after creation of the `Event` instance
registration_quiz = models.ForeignKey(Quiz, related_name='event_registration', null=True)
feedback_quiz = models.ForeignKey(Quiz, related_name='event_feedback', null=True)
history = HistoricalRecords(table_name='event_event_history')
kind = models.CharField(max_length=15, choices=EVENT_CHOICES)
happening = models.ForeignKey('self', blank=True, null=True)
speaker = models.ForeignKey(Speaker, related_name='events', null=True, blank=True)
venue = models.CharField(max_length=100, null=True, blank=True)
description = models.TextField(blank=True)
start = models.DateTimeField()
end = models.DateTimeField()
participation_open = models.BooleanField(default=False, help_text='can a user participate in this event')
participation_start = models.DateTimeField(null=True, blank=True)
participation_end = models.DateTimeField(null=True, blank=True)
is_offline = models.BooleanField(default=True)
class Meta:
db_table = 'event_event'
verbose_name = _('Event')
verbose_name_plural = _('Events')
def __str__(self):
return self.slug
def can_participate(self):
# Valiation for `participation_start` & `participation_end` is handled by the serializer
# These value cannot be None
return bool(self.participation_open and (self.participation_start <= timezone.now() < self.participation_end))
class OfflineEvent(UUIDModel, TimeStampedModel):
history = HistoricalRecords(table_name='event_offline_event_history')
event = models.OneToOneField(Event, related_name='offline')
longitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
latitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
address = models.TextField()
address_guidelines = models.TextField()
rsvp_open = models.BooleanField(default=False, help_text='can a participant RSVP for this event')
rsvp_start = models.DateTimeField(null=True, blank=True)
rsvp_end = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = 'event_offline_event'
verbose_name = _('Offline Event')
verbose_name_plural = _('Offline Events')
def __str__(self):
return self.event.slug
class Organisation(UUIDModel, TimeStampedModel, SlugModel):
ORG_CHOICES = (
('HOST', 'HOST'),
('SPONSOR', 'SPONSOR'),
('OTHER', 'OTHER'),
)
history = HistoricalRecords(table_name='organisation_organisation_history')
kind = models.CharField(max_length=15, choices=ORG_CHOICES)
class Meta:
db_table = 'organisation_organisation'
verbose_name = _('Organisation')
verbose_name_plural = _('Organisations')
def __str__(self):
return self.slug
class SponsorCategory(models.Model):
"""To be added via Admin Panel(or Fixture), prior to adding Sponsors"""
name = models.CharField(max_length=50, unique=True)
class Meta:
db_table = 'event_sponsor_category'
verbose_name = _('Sponsor Category')
verbose_name_plural = _('Sponsor Categories')
def __str__(self):
return self.name
class Sponsor(TimeStampedModel):
history = HistoricalRecords(table_name='event_sponsor_history')
event = models.ForeignKey(Event)
organisation = models.ForeignKey(Organisation)
category = models.ForeignKey(SponsorCategory, to_field='name')
class Meta:
db_table = 'event_sponsor'
verbose_name = _('Sponsor')
verbose_name_plural = _('Sponsors')
def __str__(self):
return '{}--{}'.format(self.organisation, self.event)
| [
"404rajat@gmail.com"
] | 404rajat@gmail.com |
cf8828a7f7d1d13855bca279de6c79655a778dcd | 97825c904d512fdfd6a4418d6820eb8fd3cdd202 | /largest_number.py | cf7f5a702a9e0821a28b1fd3e4074a3fe1484bf5 | [] | no_license | charliedmiller/coding_challenges | 97426741778d0498321cb2fec2e7d3c3d3c0b26e | ea6aa0cffb55b2d88f71f108f16c34e9779da9c7 | refs/heads/master | 2023-02-21T21:54:26.500399 | 2021-01-28T23:50:42 | 2021-01-28T23:50:42 | 287,142,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | # Charlie Miller
# Leetcode - 179. Largest Number
# https://leetcode.com/problems/largest-number/
"""
I had the right idea, though needed to look at the sln to get there
https://leetcode.com/problems/largest-number/solution/
Create a comparator that compares the numbers created by ordering
them differently (a then b or b then a) Sort using this
"""
from functools import cmp_to_key
class Solution:
#see what the numbers would look like using each order
def cmp(self,a,b):
a_first = int(a+b)
b_first = int(b+a)
#return the LARGER number as less than (before)
return b_first - a_first
#for edge cases where there are leading zeros
def remove_leading_zeros(self,string):
for start in range(len(string)):
if string[start] != "0":
return string[start:]
return string[len(string)-1:]
def largestNumber(self, nums: List[int]) -> str:
stringified = [str(num) for num in nums]
stringified.sort(key=cmp_to_key(self.cmp))
whole = ''.join(stringified)
return self.remove_leading_zeros(whole) | [
"charliedmiller@gmail.com"
] | charliedmiller@gmail.com |
69b0fa5230cadb504175821f7cb8097e99df18c4 | 901658f002f0d996fe17b9f1a241ccf95bdb82e3 | /home/migrations/0002_auto_20200801_1224.py | 76aa831a18ba7eb063bd34d75b1394c39ac08103 | [] | no_license | OnurBoynuegri/RentHome | 2d0fb308664d1095eaddafd5838982751a497c37 | c20c934f9edbc8d9c7215d0d3f462769b5d048c7 | refs/heads/master | 2022-11-28T04:42:45.158688 | 2020-08-11T15:09:38 | 2020-08-11T15:09:38 | 280,904,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,729 | py | # Generated by Django 3.0.8 on 2020-08-01 09:24
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ContactFormMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=20)),
('email', models.CharField(blank=True, max_length=50)),
('subject', models.CharField(blank=True, max_length=50)),
('message', models.CharField(blank=True, max_length=255)),
('status', models.CharField(choices=[('New', 'New'), ('Read', 'Read')], default='New', max_length=10)),
('ip', models.CharField(blank=True, max_length=20)),
('note', models.CharField(blank=True, max_length=100)),
('create_at', models.DateTimeField(auto_now_add=True)),
('update_at', models.DateTimeField(auto_now=True)),
],
),
migrations.AlterField(
model_name='setting',
name='aboutus',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True),
),
migrations.AlterField(
model_name='setting',
name='contact',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True),
),
migrations.AlterField(
model_name='setting',
name='references',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True),
),
]
| [
"onurboynueğri@gmail.com"
] | onurboynueğri@gmail.com |
1ed5d148e48f6eaac83bf81e500e00be6515e921 | e9534ef4fbaea3cfee556e5da32927ba3860834c | /autoScale/main.py | 0f5a11984e217850e3df919739199b47384e4c69 | [] | no_license | joaoleite/marathon-autoscaling-app | 91791b831428105f4c4a778ffcacecad3edefe99 | ce45b562f006958dc14b88af6611092604cf4cfb | refs/heads/master | 2021-01-19T19:05:17.509686 | 2017-05-09T00:35:50 | 2017-05-09T00:35:50 | 88,398,742 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py | from marathonAPI import MarathonAPI
from rabbitMQ import rabbitMQ
from settings import VAR_MARATHON_PORT, VAR_MARATHON_USE_HTTPS, VAR_MARATHON_PASSWORD, VAR_MARATHON_USER, \
VAR_RABBITMQ_WEB_PORT, VAR_RABBITMQ_PASSWORD, VAR_RABBITMQ_USER, VAR_RABBITMQ_HOST
from settings import MARATHON_INTERVAL_REFRESH_APP
from settings import logger, VAR_MARATHON_HOST
logger.info('Configurating MarathonAPI...')
host = MarathonAPI(host=VAR_MARATHON_HOST, port=VAR_MARATHON_PORT, use_https=VAR_MARATHON_USE_HTTPS, user=VAR_MARATHON_USER, password=VAR_MARATHON_PASSWORD)
logger.info('Configurating RabbitMQ...')
target = rabbitMQ(host=VAR_RABBITMQ_HOST, user=VAR_RABBITMQ_USER, password=VAR_RABBITMQ_PASSWORD, web_port=VAR_RABBITMQ_WEB_PORT)
import asyncio
def callback(n, loop):
try:
host.findAppsWithAutoscaleLabels()
host.scaleApps(target)
except Exception as e:
logger.error(e)
finally:
now = loop.time()
loop.call_at(now + n, callback, n, loop)
async def main(loop):
delta_time = MARATHON_INTERVAL_REFRESH_APP
loop.call_soon(callback, delta_time, loop)
while True:
await asyncio.sleep(1)
event_loop = asyncio.get_event_loop()
try:
event_loop.run_until_complete(main(event_loop))
finally:
event_loop.close()
| [
"joaoleite@gmail.com"
] | joaoleite@gmail.com |
f748579f1a20a23d901c31b88322c26f451af433 | 44ce370130c7cd9a6a763be8dcc62362f57724e7 | /vagrant/data/Math2/cont2_8/run | eb3597973d86ef0dc6f0ed2817058872d9b99942 | [] | no_license | akemery/cnp3-docker | c122aac493868ec4fa2b7795b1fd7110998f93dc | 3f24649ab81e828cf6babcfac7965251cb4dc7e2 | refs/heads/main | 2023-06-21T10:06:05.668090 | 2021-07-26T07:39:42 | 2021-07-26T07:39:42 | 363,279,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | #! /usr/bin/python3
# -*- coding: utf-8 -*-
import sys
sys.path.insert(1, '/course/common')
from inginious import input, feedback, rst
from parsingDomain import compareDomains
correct = "{}" # à modifier /!\
answer = input.get_input("q1")
grade = 0
result = compareDomains(answer, correct)
if result[0]:
feedback.set_problem_result("success","q1")
feedback.set_problem_feedback("Bravo!","q1")
grade += 100
else:
feedback.set_problem_result("failed","q1")
feedback.set_problem_feedback(result[1],"q1")
feedback.set_grade(grade)
if grade == 100 :
feedback.set_global_result("success")
else :
feedback.set_global_result("failed")
| [
"assogba.emery@gmail.com"
] | assogba.emery@gmail.com | |
bed6c7b8a9b18521ccb6830724efd339b5523cb9 | a8cb99c512946691b6c53cf14538b44c39c62e88 | /models.py | a8f9314eba0b89e27e3eaf4139a8ce19fb2b8f63 | [] | no_license | Louise-LuLin/debias-gcn | 3d3f0c57cd22ed6506841c463820eac980ae4769 | 82ee00cce6f5672c8823cf31a2fe9e5b54eed56e | refs/heads/main | 2023-05-03T05:24:45.506722 | 2021-05-17T18:30:15 | 2021-05-17T18:30:15 | 360,814,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,250 | py | import dgl
from dgl.nn import SAGEConv # Define a GraphSAGE model
from dgl.nn import GATConv # Define a GAT model
import torch
import torch.nn as nn
import torch.nn.functional as F
######################################################################
# build a two-layer GraphSAGE model
class GraphSAGE(nn.Module):
def __init__(self, graph, in_dim, hidden_dim, out_dim):
super(GraphSAGE, self).__init__()
self.conv1 = SAGEConv(in_feats=in_dim,
out_feats=hidden_dim,
aggregator_type='mean')
self.conv2 = SAGEConv(in_feats=hidden_dim,
out_feats=out_dim,
aggregator_type='mean')
self.graph = graph
def forward(self, in_feat):
h = self.conv1(self.graph, in_feat)
h = F.relu(h)
h = self.conv2(self.graph, h)
return h
######################################################################
# build a two-layer GAT model
class GATLayer(nn.Module):
def __init__(self, graph, in_dim, out_dim):
super(GATLayer, self).__init__()
self.graph = graph
self.fc = nn.Linear(in_dim, out_dim, bias=False)
self.attn_fc = nn.Linear(2 * out_dim, 1, bias=False)
self.reset_parameters()
def reset_parameters(self):
"""Reinitialize learnable parameters."""
gain = nn.init.calculate_gain('relu')
nn.init.xavier_normal_(self.fc.weight, gain=gain)
nn.init.xavier_normal_(self.attn_fc.weight, gain=gain)
def edge_attention(self, edges):
# attention
z2 = torch.cat([edges.src['z'], edges.dst['z']], dim=1)
a = self.attn_fc(z2)
return {'e': F.leaky_relu(a)}
def message_func(self, edges):
return {'z': edges.src['z'], 'e': edges.data['e']}
def reduce_func(self, nodes):
alpha = F.softmax(nodes.mailbox['e'], dim=1)
h = torch.sum(alpha * nodes.mailbox['z'], dim=1)
return {'h': h}
def forward(self, in_feat):
z = self.fc(in_feat)
self.graph.ndata['z'] = z
self.graph.apply_edges(self.edge_attention)
self.graph.update_all(self.message_func, self.reduce_func)
return self.graph.ndata.pop('h')
class MultiHeadGATLayer(nn.Module):
def __init__(self, graph, in_dim, out_dim, num_heads, merge='cat'):
super(MultiHeadGATLayer, self).__init__()
self.heads = nn.ModuleList()
for i in range(num_heads):
self.heads.append(GATLayer(graph, in_dim, out_dim))
self.merge = merge
def forward(self, h):
head_outs = [attn_head(h) for attn_head in self.heads]
if self.merge == 'cat':
return torch.cat(head_outs, dim=1)
else:
return torch.mean(torch.stack(head_outs))
class GAT(nn.Module):
def __init__(self, graph, in_dim, hidden_dim, out_dim, num_heads):
super(GAT, self).__init__()
self.layer1 = MultiHeadGATLayer(graph, in_dim, hidden_dim, num_heads)
self.layer2 = MultiHeadGATLayer(graph, hidden_dim * num_heads, out_dim, 1)
def forward(self, in_feat):
h = self.layer1(in_feat)
h = F.elu(h)
h = self.layer2(h)
return h | [
"lulin199209@gmail.com"
] | lulin199209@gmail.com |
da6084b7a1b5f3aa319565d6778d11e1160946a3 | d7b403a8e4b124e42feb0b72c502df438f5552f6 | /speech/TextToSpeech.py | afda438ef79dc3cb346c9c3df8dea77969416e10 | [] | no_license | Mallington/Hacker-The-Hardware-bear | ea3b7b7bd1a908ff613cce414d623b2d263c0955 | 4f21318879bedac726c00a26f9c80095f73c1881 | refs/heads/master | 2020-09-16T04:10:39.319071 | 2019-11-24T11:01:43 | 2019-11-24T11:01:43 | 223,648,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | from gtts import gTTS
import os
from datetime import date
class tts():
def __init__(self):
pass
def say(self, message):
tts = gTTS(text=message, lang='en')
now = date.today()
tts.save("{}.mp3".format(now))
os.system("mpg321 {}.mp3".format(now))
| [
"funkymallington@gmail.com"
] | funkymallington@gmail.com |
f93a39f3c7ce5dc35b811f46c70586ec4a00c270 | 4d93acd63ce2835fcd7ea610fcd412b727a4f03e | /08-Markov/decay.py | aa454eea1ad7fb4d3765d62e0e5f8e83dfc8525a | [] | no_license | jsbarbosa/JuanBarbosa_MCA | 41ebcc27bb7dd8a886c9b4c1b416bd7e3cad2e57 | 4f49d17282679ae1fa81d7cc892b6560edf93828 | refs/heads/master | 2021-01-11T17:53:44.115810 | 2017-04-24T17:58:09 | 2017-04-24T17:58:09 | 79,863,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,590 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 13 18:21:00 2017
@author: juan
"""
import numpy as np
import matplotlib.pyplot as plt
obs = np.array([1.5, 1.7, 2])
def rand():
return 2*np.random.random() - 1
def integral(a, b, lm):
return -lm*(func(b, lm) - func(a, lm))
def func(x, lm):
return np.exp(-x/lm)
def probability(x, lm):
p = 1
z = integral(1, 20, lm)
for x_ in x:
p *= func(x_, lm)/z
return p
def bayesian(x, lm):
return probability(x, lm)
def hastings(N, dx = 1):
lambdas = np.ones(N+1)
lambdas[0] = np.random.random()*10.0
for i in range(N):
second = lambdas[i] + dx*rand()
q = bayesian(obs, second)/bayesian(obs, lambdas[i])
alpha = min(q, 1.0)
u = np.random.random()
if u <= alpha and second > 0:
lambdas[i+1] = second
else:
lambdas[i+1] = lambdas[i]
return lambdas
def rubin(N, M, dl):
avs = np.zeros(M)
vas = np.zeros(M)
R = np.zeros(N-2)
chains = np.array([hastings(N, dl) for i in range(M)])
for j in range(2, N):
for i in range(M):
avs[i] = np.mean(chains[i, :j])
vas[i] = np.std(chains[i, :j])**2
total = np.mean(avs)
B = j/(M-1)*np.sum((avs-total)**2)
W = vas.mean()
R[j-2] = (j-1)/j + (B/W)*(M+1)/(j*M)
return R
N = 10000
lm = np.logspace(-3, 3, 5)
for l in lm:
R = rubin(N, 5, l)
plt.plot(R, label="%f"%l)
plt.xscale('log')
plt.yscale('log')
plt.legend()
plt.show()
| [
"js.barbosa10@uniandes.edu.co"
] | js.barbosa10@uniandes.edu.co |
b892725d3b4dcb01c4b54bf97ca0a1f759db0640 | 87a7e63bf86dad9ca5dbe812ea8caaa013ab1856 | /Step6RectangleDivision.py | 31a102235257fba937444e2cf0323933e2ceefe3 | [] | no_license | sugoigroup/CodeTestPython | 884ee5aa7abec588eb04df930b32c824b1a37a93 | ba08dc8c2a7c8f385055b4e21a6de10e73f7e8fd | refs/heads/master | 2023-03-18T12:01:30.055923 | 2021-03-15T13:32:58 | 2021-03-15T13:32:58 | 347,641,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | # 긴변의 길이가 1000이하에서 만들어질수 있는 정사각형 개수가 딱 20개인 직사각형의 가로세로 길이 쌍이 몇 쌍인지 구하시요? 응?
# 단 직사각형의 가로세로 길ㄹ이를 바꾼 경우는 하나로 취급됨.
# 뭔개소리냐
W, N = 1000, 20
def cut(w, h, n):
if w==h:
return n==0
if w>h:
w, h = h, w
q, r = divmod(h, w)
if (n-q<0) or (r==0):
return (n-q==0)
else:
return cut(w,r,n-q)
cnt = 0
for i in range(1, W+1):
for j in range(i, W+1):
if cut(i, j, N):
cnt += 1
print(cnt) | [
""
] | |
ecca9f66970644f8ebd7962b370b64e54427a5c2 | 339ec05910ea45e91222a33ef926d8f108f87486 | /server.py | 24f5afea45ca70ff80b5c4b8f1bc8566d864ad53 | [] | no_license | ajeebbasheer/chat-server | 199eda1b67a2449e300ba693a1f735521c89d236 | 9ffdd57a617bed06f256e3fe2fd1926c34064cc9 | refs/heads/master | 2021-09-13T17:45:04.167524 | 2018-05-02T15:44:19 | 2018-05-02T15:44:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | #/usr/bin/env python
import socket
import sys
import select
CONNECTION_LIST=[]
RECV_BUFFER=4096
PORT=5000
def broadcast(sock,message):
for s in CONNECTION_LIST:
if s != server_socket and socket!=sock:
try:
s.send(message)
except:
s.close()
CONNECTION_LIST.remove(socket)
server_socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
server_socket.bind(("localhost", PORT))
server_socket.listen(10)
CONNECTION_LIST.append(server_socket)
print "Chat server started on port " + str(PORT)
while 1:
read_sockets,write_sockets,error_sockets = select.select(CONNECTION_LIST,[],[])
for sock in read_sockets:
if sock==server_socket:
sockfd, addr = server_socket.accept()
CONNECTION_LIST.append(sockfd)
print "client (%s,%s) is connected" %addr
broadcast(sockfd,"[%s:%s] entered room\n" %addr)
else:
try:
data=sock.recv(RECV_BUFFER)
if data:
broadcast(sock, "\r" + 'machan::<' + str(sock.getpeername()) + '> ::' + data)
except:
broadcast(sock, "client(%s,%s) is offline" %addr)
print "client(%s,%s) is offline " %addr
server_socket.close()
CONNECTION_LIST.remove(sock)
continue
server_socket.close()
| [
"to.ajeeb@gmail.com"
] | to.ajeeb@gmail.com |
96522b5e865f8371b780903ffd0f11fcf1ca2dfa | 6a18c479a83e238484fd481c97fdca776091d4aa | /deepracer/RL_Model_Training/benchmark_dr.py | 4d10f2d5ad61ca5e66c85c7cf425a6a26321f5ae | [
"BSD-3-Clause",
"MIT"
] | permissive | Currycurrycurry/Time-in-State-RL | f07e91fc3f6111869dfa380de87970086895e046 | f106d92c3556b955c108bf3e147bb0b12e60259c | refs/heads/main | 2023-03-20T16:13:53.414126 | 2021-03-09T02:24:31 | 2021-03-09T02:24:31 | 345,659,435 | 0 | 0 | BSD-3-Clause | 2021-03-08T13:07:45 | 2021-03-08T13:07:45 | null | UTF-8 | Python | false | false | 24,399 | py | # Code credits: The model loading code is taken from open AI baselines with modifications done to allow
# variable timing characteristics during evaluation. The deepracer environment is taken from the aws deepracer github
# code with modifications for the variable timing characteristics.
# Changing the sampling and latency input to the model
#multiple models and multiple paths can be added
path1 = 'Deepracer-checkpoints/Latency_138.pb' #Path of saved model
#path2 = 'dr_tf_frozen_model.pb' #Path of saved model
#path3 = 'dr_tf_frozen_model.pb' #Path of saved model
#paths = [path1, path2, path3]
paths = [path1]
#the folder to save the data
data_folder = 'data_dr/'
#the data saved in in folder with this name
#experiments = ['dr_1', 'dr_2', 'dr_3']
experiments = ['dr_1']
latencies = [20, 20, 40, 40, 60, 60, 80, 80, 100, 100, 120, 120]
sampling_sleeps = [0.033, 0.033, 0.040, 0.040, 0.060, 0.060, 0.080, 0.080, 0.100, 0.100, 0.120, 0.120]
directions = [ 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2]
#number of continuous steps to run the car
steps = 5000
# Changing the sampling and latency input to the model
import time
import gym
import queue
import numpy as np
from gym import spaces
from PIL import Image
import os
import math
from rotation import Rotation
from collections import OrderedDict
import random
import bisect
import json
import math
# Type of worker
SIMULATION_WORKER = "SIMULATION_WORKER"
SAGEMAKER_TRAINING_WORKER = "SAGEMAKER_TRAINING_WORKER"
node_type = os.environ.get("NODE_TYPE", SIMULATION_WORKER)
#saving the debug data
import pickle
if node_type == SIMULATION_WORKER:
import rospy
from ackermann_msgs.msg import AckermannDriveStamped
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import GetLinkState, GetModelState, JointRequest
from gazebo_msgs.srv import SetModelState
from std_msgs.msg import Float64
from sensor_msgs.msg import Image as sensor_image
from deepracer_msgs.msg import Progress
from shapely.geometry import Point, Polygon
from shapely.geometry.polygon import LinearRing, LineString
TRAINING_IMAGE_SIZE = (160, 120)
FINISH_LINE = 1000
# REWARD ENUM
CRASHED = -30.0
NO_PROGRESS = -1
FINISHED = 10000000.0
MAX_STEPS = 100000000
# WORLD NAME
EASY_TRACK_WORLD = 'easy_track'
MEDIUM_TRACK_WORLD = 'medium_track'
HARD_TRACK_WORLD = 'hard_track'
# Normalized track distance to move with each reset
ROUND_ROBIN_ADVANCE_DIST = 0.02#0.02 #0.01
# List of required velocity topics, one topic per wheel
VELOCITY_TOPICS = ['/racecar/left_rear_wheel_velocity_controller/command',
'/racecar/right_rear_wheel_velocity_controller/command',
'/racecar/left_front_wheel_velocity_controller/command',
'/racecar/right_front_wheel_velocity_controller/command']
# List of required steering hinges
STEERING_TOPICS = ['/racecar/left_steering_hinge_position_controller/command',
'/racecar/right_steering_hinge_position_controller/command']
# List of all effort joints
EFFORT_JOINTS = ['/racecar/left_rear_wheel_joint', '/racecar/right_rear_wheel_joint',
'/racecar/left_front_wheel_joint','/racecar/right_front_wheel_joint',
'/racecar/left_steering_hinge_joint','/racecar/right_steering_hinge_joint']
# Radius of the wheels of the car in meters
WHEEL_RADIUS = 0.1
# Size of the image queue buffer, we want this to be one so that we consume 1 image
# at a time, but may want to change this as we add more algorithms
IMG_QUEUE_BUF_SIZE = 1
#print(delays_array)
### Gym Env ###
class DeepRacerEnv(gym.Env):
def __init__(self):
self.sampling_rate = 30.0
self.sampling_sleep = (1.0/self.sampling_rate)
#self.sampling_rates = [15.0, 30.0]
self.sampling_rates = [30.0, 30.0]
self.sampling_rate_index = 0
self.latencies = [10.0, 20.0, 40.0, 60.0, 80.0, 100.0, 120.0]
self.latency_index = 0
self.latency_max_num_steps = 500 # for these steps latency will be fixed or change on reset or done after 500.
self.latency_steps = 0
self.latency = 10.0 #10 is the starting latency
self.model_running_time = (2.0/1000.0) #model runtime
screen_height = TRAINING_IMAGE_SIZE[1]
screen_width = TRAINING_IMAGE_SIZE[0]
self.on_track = 0
self.progress = 0
self.yaw = 0
self.x = 0
self.y = 0
self.z = 0
self.distance_from_center = 0
self.distance_from_border_1 = 0
self.distance_from_border_2 = 0
self.steps = 0
self.progress_at_beginning_of_race = 0
self.reverse_dir = False
self.start_ndist = 0.0
# actions -> steering angle, throttle
self.action_space = spaces.Box(low=np.array([-1, 0]), high=np.array([+1, +1]), dtype=np.float32)
# given image from simulator
self.observation_space = spaces.Box(low=0, high=255,
shape=(screen_height, screen_width, 1), dtype=np.uint8)
self.allow_servo_step_signals = True
#stores the time when camera images are received
self.cam_update_time=[]
#stores the time when consequetive actions are send
self.cons_action_send_time=[]
#stores the time when progress updates are received
self.progress_update_time = []
#folder location to store the debug data
self.debug_data_folder = []
self.debug_index = 0
if node_type == SIMULATION_WORKER:
# ROS initialization
rospy.init_node('rl_coach', anonymous=True)
self.ack_publisher = rospy.Publisher('/vesc/low_level/ackermann_cmd_mux/output',
AckermannDriveStamped, queue_size=100)
self.racecar_service = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
self.clear_forces_client = rospy.ServiceProxy('/gazebo/clear_joint_forces',
JointRequest)
# Subscribe to ROS topics and register callbacks
rospy.Subscriber('/progress', Progress, self.callback_progress)
rospy.Subscriber('/camera/zed/rgb/image_rect_color', sensor_image, self.callback_image)
self.world_name = 'hard_track'#rospy.get_param('WORLD_NAME')
self.set_waypoints()
waypoints = self.waypoints
is_loop = np.all(waypoints[0,:] == waypoints[-1,:])
if is_loop:
self.center_line = LinearRing(waypoints[:,0:2])
else:
self.center_line = LineString(waypoints[:,0:2])
self.center_dists = [self.center_line.project(Point(p), normalized=True) for p in self.center_line.coords[:-1]] + [1.0]
self.track_length = self.center_line.length
self.reward_in_episode = 0
self.prev_progress = 0
self.steps = 0
# Create the publishers for sending speed and steering info to the car
self.velocity_pub_dict = OrderedDict()
self.steering_pub_dict = OrderedDict()
for topic in VELOCITY_TOPICS:
self.velocity_pub_dict[topic] = rospy.Publisher(topic, Float64, queue_size=1)
for topic in STEERING_TOPICS:
self.steering_pub_dict[topic] = rospy.Publisher(topic, Float64, queue_size=1)
def get_data_debug(self):
print("center_line",self.center_line)
print("track_length",self.track_length)
def reset(self,inp_x=1.75,inp_y=0.6):
if node_type == SAGEMAKER_TRAINING_WORKER:
return self.observation_space.sample()
# print('Total Reward Reward=%.2f' % self.reward_in_episode,
# 'Total Steps=%.2f' % self.steps)
#self.send_reward_to_cloudwatch(self.reward_in_episode)
self.reward_in_episode = 0
self.reward = None
self.done = False
self.next_state = None
self.image = None
self.steps = 0
self.prev_progress = 0
# Reset car in Gazebo
self.send_action(0, 0) # set the throttle to 0
self.racecar_reset(0, 0)
self.infer_reward_state(0, 0)
self.cam_update_time = []
self.cons_action_send_time = []
self.progress_update_time = []
self.debug_index= self.debug_index+1
return self.next_state
def add_latency_to_image(self,observation):
observation = observation.reshape(observation.shape[0],observation.shape[1],1)
#print('Set latency is:',self.latency*self.latency_max)
#observation[119, 159, 0] = int(self.latency)
#setting the sampling rate
#observation[119, 158, 0] = int(self.sampling_rate)
#print(observation[119, 159, 0],observation[119, 158, 0] )
return observation
def convert_rgb_to_gray(self, observation):
r, g, b = observation[:, :, 0], observation[:, :, 1], observation[:, :, 2]
observation = 0.2989 * r + 0.5870 * g + 0.1140 * b
return observation
def set_next_state(self):
if(self.image!=None):
#t1 = time.time()
image_data = self.image
# Read the image and resize to get the state
#print(image_data.width, image_data.height)
image = Image.frombytes('RGB', (image_data.width, image_data.height), image_data.data, 'raw', 'RGB', 0, 1)
image = image.resize(TRAINING_IMAGE_SIZE, resample=2)
image = np.array(image)
#image = do_randomization(image)
image = self.convert_rgb_to_gray(image)
image = self.add_latency_to_image(image)
self.next_state = image
def racecar_reset(self, ndist, next_index):
rospy.wait_for_service('gazebo/set_model_state')
#random_start = random.random()
prev_index, next_index = self.find_prev_next_waypoints(self.start_ndist)
# Compute the starting position and heading
#start_point = self.center_line.interpolate(ndist, normalized=True)
start_point = self.center_line.interpolate(self.start_ndist, normalized=True)
start_yaw = math.atan2(self.center_line.coords[next_index][1] - start_point.y,
self.center_line.coords[next_index][0] - start_point.x)
start_quaternion = Rotation.from_euler('zyx', [start_yaw, 0, 0]).as_quat()
# Construct the model state and send to Gazebo
model_state = ModelState()
model_state.model_name = 'racecar'
model_state.pose.position.x = start_point.x
model_state.pose.position.y = start_point.y
model_state.pose.position.z = 0
model_state.pose.orientation.x = start_quaternion[0]
model_state.pose.orientation.y = start_quaternion[1]
model_state.pose.orientation.z = start_quaternion[2]
model_state.pose.orientation.w = start_quaternion[3]
model_state.twist.linear.x = 0
model_state.twist.linear.y = 0
model_state.twist.linear.z = 0
model_state.twist.angular.x = 0
model_state.twist.angular.y = 0
model_state.twist.angular.z = 0
self.racecar_service(model_state)
for joint in EFFORT_JOINTS:
self.clear_forces_client(joint)
#keeping track where to start the car
self.reverse_dir = not self.reverse_dir
self.start_ndist = (self.start_ndist + ROUND_ROBIN_ADVANCE_DIST) % 1.0
self.progress_at_beginning_of_race = self.progress
def find_prev_next_waypoints(self, ndist):
if self.reverse_dir:
next_index = bisect.bisect_left(self.center_dists, ndist) - 1
prev_index = next_index + 1
if next_index == -1: next_index = len(self.center_dists) - 1
else:
next_index = bisect.bisect_right(self.center_dists, ndist)
prev_index = next_index - 1
if next_index == len(self.center_dists): next_index = 0
return prev_index, next_index
def step(self, action):
self.latency_steps = self.latency_steps+1
#print('latency set in env:',self.latency)
#bookeeping when the action was send
#self.cons_action_send_time.append([self.steps,time.time()])
latency = (self.latency-2.0)/1000.0
#10ms latency is substracted, because that is the avg default latency observed on the training machine
if latency>0.001:
time.sleep(latency)
else:
latency = 0.0
# Initialize next state, reward, done flag
self.next_state = None
self.reward = None
self.done = False
# Send this action to Gazebo and increment the step count
self.steering_angle = float(action[0])
self.speed = float(action[1])
self.send_action(self.steering_angle, self.speed)
self.steps += 1
#sleep to control sampling rate
to_sleep = (self.sampling_sleep - self.model_running_time - latency)
if to_sleep>0.001:
time.sleep(to_sleep)
# if self.latency_steps == self.latency_max_num_steps:
# #update the latency
# self.latency_index = (self.latency_index+1) % (len(self.latencies))
# self.latency = self.latencies[self.latency_index]
# #update the sampling rate
# self.sampling_rate_index = random.randint(0,1)
# self.sampling_rate = self.sampling_rates[self.sampling_rate_index]
# self.sampling_sleep = (1.0/self.sampling_rate)
# if (self.latency/1000.0)> self.sampling_sleep: # match sampling input to the model and latency
# self.sampling_rate = 1000.0/self.latency
# self.latency_steps = 0
# Compute the next state and reward
self.infer_reward_state(self.steering_angle, self.speed)
return self.next_state, self.reward, self.done, {}
def send_action(self, steering_angle, speed):
# Simple v/r to computes the desired rpm
wheel_rpm = speed/WHEEL_RADIUS
for _, pub in self.velocity_pub_dict.items():
pub.publish(wheel_rpm)
for _, pub in self.steering_pub_dict.items():
pub.publish(steering_angle)
def callback_image(self, data):
self.image = data
#bookeeping when the image was received
#self.cam_update_time.append([self.steps,time.time()])
def callback_progress(self, data):
self.on_track = not (data.off_track)
self.progress = data.progress
self.yaw = data.yaw
self.x = data.x
self.y = data.y
self.z = data.z
self.distance_from_center = data.distance_from_center
self.distance_from_border_1 = data.distance_from_border_1
self.distance_from_border_2 = data.distance_from_border_2
#bookeeping when the progress was received
#self.progress_update_time.append([self.steps,time.time()])
def reward_function (self, on_track, x, y, distance_from_center,
throttle, steering, track_width):
marker_1 = 0.1 * track_width
marker_2 = 0.15 * track_width
marker_3 = 0.20 * track_width
reward = (track_width - distance_from_center) #max reward = 0.44
if distance_from_center >= 0.0 and distance_from_center <= marker_1:
reward = reward * 2.5 #0.90, 0.44 max is scaled to 1.0
elif distance_from_center <= marker_2:
reward = reward * 1.33 #0.85, 0.375 max is scaled to 0.5
elif distance_from_center <= marker_3:
reward = reward * 0.71 #0.80, 0.352 max is scaled to 0.25
else:
reward = 0.001 # may go close to off track
# penalize reward for the car taking slow actions
if throttle < 1.6 and reward>0:
reward *= 0.95
if throttle < 1.4 and reward>0:
reward *= 0.95
return float(reward)
def infer_reward_state(self, steering_angle, throttle):
#state has to be set first, because we need most accurate reward signal
self.set_next_state()
on_track = self.on_track
done = False
if on_track != 1:
reward = CRASHED
done = True
else:
reward = self.reward_function(on_track, self.x, self.y, self.distance_from_center,
throttle, steering_angle, self.road_width)
#after 500 steps in episode we want to restart it
if self.steps==500:
done = True
if reward > 0: #car is not crashed
reward = reward *5.0 #bonus on completing 500 steps
self.reward_in_episode += reward
self.reward = reward
self.done = done
def set_waypoints(self):
if self.world_name.startswith(MEDIUM_TRACK_WORLD):
self.waypoints = vertices = np.zeros((8, 2))
self.road_width = 0.50
vertices[0][0] = -0.99; vertices[0][1] = 2.25;
vertices[1][0] = 0.69; vertices[1][1] = 2.26;
vertices[2][0] = 1.37; vertices[2][1] = 1.67;
vertices[3][0] = 1.48; vertices[3][1] = -1.54;
vertices[4][0] = 0.81; vertices[4][1] = -2.44;
vertices[5][0] = -1.25; vertices[5][1] = -2.30;
vertices[6][0] = -1.67; vertices[6][1] = -1.64;
vertices[7][0] = -1.73; vertices[7][1] = 1.63;
elif self.world_name.startswith(EASY_TRACK_WORLD):
self.waypoints = vertices = np.zeros((2, 2))
self.road_width = 0.90
vertices[0][0] = -1.08; vertices[0][1] = -0.05;
vertices[1][0] = 1.08; vertices[1][1] = -0.05;
else:
self.waypoints = vertices = np.zeros((30, 2))
self.road_width = 0.44
vertices[0][0] = 1.5; vertices[0][1] = 0.58;
vertices[1][0] = 5.5; vertices[1][1] = 0.58;
vertices[2][0] = 5.6; vertices[2][1] = 0.6;
vertices[3][0] = 5.7; vertices[3][1] = 0.65;
vertices[4][0] = 5.8; vertices[4][1] = 0.7;
vertices[5][0] = 5.9; vertices[5][1] = 0.8;
vertices[6][0] = 6.0; vertices[6][1] = 0.9;
vertices[7][0] = 6.08; vertices[7][1] = 1.1;
vertices[8][0] = 6.1; vertices[8][1] = 1.2;
vertices[9][0] = 6.1; vertices[9][1] = 1.3;
vertices[10][0] = 6.1; vertices[10][1] = 1.4;
vertices[11][0] = 6.07; vertices[11][1] = 1.5;
vertices[12][0] = 6.05; vertices[12][1] = 1.6;
vertices[13][0] = 6; vertices[13][1] = 1.7;
vertices[14][0] = 5.9; vertices[14][1] = 1.8;
vertices[15][0] = 5.75; vertices[15][1] = 1.9;
vertices[16][0] = 5.6; vertices[16][1] = 2.0;
vertices[17][0] = 4.2; vertices[17][1] = 2.02;
vertices[18][0] = 4; vertices[18][1] = 2.1;
vertices[19][0] = 2.6; vertices[19][1] = 3.92;
vertices[20][0] = 2.4; vertices[20][1] = 4;
vertices[21][0] = 1.2; vertices[21][1] = 3.95;
vertices[22][0] = 1.1; vertices[22][1] = 3.92;
vertices[23][0] = 1; vertices[23][1] = 3.88;
vertices[24][0] = 0.8; vertices[24][1] = 3.72;
vertices[25][0] = 0.6; vertices[25][1] = 3.4;
vertices[26][0] = 0.58; vertices[26][1] = 3.3;
vertices[27][0] = 0.57; vertices[27][1] = 3.2;
vertices[28][0] = 1; vertices[28][1] = 1;
vertices[29][0] = 1.25; vertices[29][1] = 0.7;
def get_closest_waypoint(self):
res = 0
index = 0
x = self.x
y = self.y
minDistance = float('inf')
for row in self.waypoints:
distance = math.sqrt((row[0] - x) * (row[0] - x) + (row[1] - y) * (row[1] - y))
if distance < minDistance:
minDistance = distance
res = index
index = index + 1
return res
class DeepRacerDiscreteEnv(DeepRacerEnv):
def __init__(self):
DeepRacerEnv.__init__(self)
with open('custom_files/model_metadata.json', 'r') as f:
model_metadata = json.load(f)
self.json_actions = model_metadata['action_space']
self.action_space = spaces.Discrete(len(self.json_actions))
print("Intialized action space")
print(self.json_actions)
print("num of actions",self.action_space )
def step(self, action):
action = int(action)
# Convert discrete to continuous
steering_angle = float(self.json_actions[action]['steering_angle']) * math.pi / 180.0
throttle = float(self.json_actions[action]['speed'])
continous_action = [steering_angle, throttle]
return super().step(continous_action)
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
import json
with open('custom_files/model_metadata.json', 'r') as f:
model_metadata = json.load(f)
json_actions = model_metadata['action_space']
def get_session(frozen_graph):
with tf.gfile.GFile(frozen_graph, "rb") as f:
restored_graph_def = tf.GraphDef()
restored_graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(restored_graph_def, name="",input_map=None)
x = graph.get_tensor_by_name('ppo2_model/Ob:0')
y = graph.get_tensor_by_name('ppo2_model/pi_1/add:0')
sess = tf.Session(graph=graph, config=config)
return sess, x,y
# Automate the testing of the models
#Runs a simple setting
def test_in_simulator(sess, latency, sampling_sleep, total_steps, direction, x, y):
time_taken = []
Actions_Taken = [] #stores speed and steering of the actions taken
total_rewards = []
env = DeepRacerDiscreteEnv()
env.sampling_sleep = sampling_sleep
env.sampling_rate = 1.0/(env.sampling_sleep)
#print(env.sampling_sleep, env.sampling_rate)
env.latency = latency
env.dist_and_speed = []
if direction==2: # when 2, we want to reverse the direction
env.reverse_dir = not env.reverse_dir
steps_done = 0
local_steps = 0
obs = env.reset()
#warmup
obs = obs.reshape(1,120,160,1)
action = sess.run(y, feed_dict={x: obs})
while local_steps<=total_steps:
done = False
obs = env.reset()
while not done and local_steps<=total_steps:
t1 = time.time()
obs = obs.reshape(1,120,160,1)
action = sess.run(y, feed_dict={x: obs})
action = np.argmax(action)
steering_angle = json_actions[action]['steering_angle']
throttle = json_actions[action]['speed']
Actions_Taken.append([steering_angle,throttle])
#updating the exact model runtime
env.model_running_time = (time.time() - t1)
obs, rew, done, _ = env.step(action)
total_rewards.append(rew)
t2 = time.time()
time_taken.append(t2-t1)
local_steps = local_steps + 1
if done:
obs = env.reset()
dist_and_speed = env.dist_and_speed
del env
return total_rewards, Actions_Taken, dist_and_speed, time_taken
# save the data
import pickle
def save_data(path, total_rewards, dist_and_speed, Actions_Taken):
with open(path, 'wb') as f:
print("Saving the data", path)
data = [total_rewards, dist_and_speed, Actions_Taken]
pickle.dump(data, f)
def do_testing(sess, x, y, exp_name):
for i in range(len(latencies)):
latency = latencies[i]
sampling_sleep = sampling_sleeps[i]
direction = directions[i]
total_rewards, Actions_Taken, dist_and_speed, time_taken = test_in_simulator(sess, latency, sampling_sleep, steps, direction, x, y)
path = data_folder + exp_name+'_'+str(direction)+'_'+str(latency)
save_data(path, total_rewards, dist_and_speed, Actions_Taken)
del total_rewards, Actions_Taken, dist_and_speed, time_taken
for i in range(len(paths)):
frozen_graph = paths[i]
exp_name = experiments[i]
sess, x,y = get_session(frozen_graph)
do_testing(sess,x,y, exp_name)
del sess, x, y
print('done')
| [
"sandha.iitr@gmail.com"
] | sandha.iitr@gmail.com |
d39c8a61833fc2f4123d6803bf8dce614ed0a12a | cfc9a8831e5946d738329fad2763d643dec8566f | /src/encoded/tests/test_create_mapping.py | 44d89dea3b1ec2d190ef281061e331a2302547be | [
"MIT"
] | permissive | emi80/encoded | 8e244a66b0d36610dcf8d9a47d385640dfa7987d | 2fe2c2afbd3be21b65b10a189a3bd623ecdaee37 | refs/heads/master | 2021-01-18T12:34:56.060690 | 2015-03-05T21:56:05 | 2015-03-05T21:56:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | import pytest
from ..loadxl import ORDER
@pytest.mark.parametrize('item_type', ORDER)
def test_create_mapping(registry, item_type):
from ..commands.create_mapping import type_mapping
from ..contentbase import TYPES
mapping = type_mapping(registry[TYPES], item_type)
assert mapping
| [
"laurence@lrowe.co.uk"
] | laurence@lrowe.co.uk |
9c93902848978428f5ced5f6c21786783cea6d85 | a4623b72797d87baf17ca48406e36da8af78e7eb | /backend/settings.py | b4dcb752ef5348911c4f31d20ef9747037555f2c | [] | no_license | janefwp/task | f410965a381769864f51d485f72e5c4a0738ebad | f7366c9f8e77a3927acaff3758916779e670cf53 | refs/heads/main | 2023-04-13T19:47:36.614907 | 2021-04-25T09:02:43 | 2021-04-25T09:02:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,957 | py | """
Django settings for backend project.
Generated by 'django-admin startproject' using Django 3.1.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from datetime import timedelta
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#js5i*nzoc5w(4a2v@+m@i0j8z^7()6+ne#^@q^%iur06fdg&7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', '.herokuapp.com',
'https://hospitalbooksystem.herokuapp.com/']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'storages',
'base.apps.BaseConfig',
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
)
}
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'frontend/build')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/images/'
STATICFILES_DIRS = [
BASE_DIR / 'static',
BASE_DIR / 'frontend/build/static'
]
MEDIA_ROOT = BASE_DIR / 'static/images'
STATIC_ROOT = BASE_DIR / 'staticfiles'
CORS_ALLOW_ALL_ORIGINS = True
if os.getcwd() == '/app':
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SECURE_SSL_REDIRECT = True
DEBUG = False
| [
"cuij1012@gmail.com"
] | cuij1012@gmail.com |
7dc54bea595127ad1357734ba33347c1f4cb9188 | c26e98676846ab321065e416ee8b3e2f5b1d4b43 | /PyPoll_Challenge.py | 6fa47eaa8b51b4dfb0beb98ed3f51a144c38e5f7 | [] | no_license | jaredcclarke/Election_Analysis | d510f401555515fdf8e601bfdab5b401b9ef9de9 | 512c2ee41ed8aec440e6e3b7f3459e58211a4e51 | refs/heads/main | 2022-12-26T15:11:30.816855 | 2020-10-12T06:03:51 | 2020-10-12T06:03:51 | 301,816,774 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,497 | py | # -*- coding: UTF-8 -*-
# """PyPoll Homework Challenge Solution."""
# Add our dependencies.
import csv
import os
# Add a variable to load a file from a path.
file_to_load = os.path.join("Resources", "election_results.csv")
# Add a variable to save the file to a path.
file_to_save = os.path.join("analysis", "election_analysis.txt")
# Initialize a total vote counter.
total_votes = 0
total_county_votes = 0
# Candidate Options and candidate votes.
candidate_options = []
candidate_votes = {}
# 1: Create a county list and county votes dictionary.
county_list = []
county_votes = {}
# Track the winning candidate, vote count and percentage
winning_candidate = ""
winning_count = 0
winning_percentage = 0
# 2: Track the largest county and county voter turnout.
county_largest_turnout = ""
largest_turnout_count = 0
largest_county_percentage = 0
# Read the csv and convert it into a list of dictionaries
with open(file_to_load) as election_data:
reader = csv.reader(election_data)
# Read the header
header = next(reader)
# For each row in the CSV file.
for row in reader:
# Add to the total vote count
total_votes = total_votes + 1
# Get the candidate name from each row.
candidate_name = row[2]
# If the candidate does not match any existing candidate add it to
# the candidate list
if candidate_name not in candidate_options:
# Add the candidate name to the candidate list.
candidate_options.append(candidate_name)
# And begin tracking that candidate's voter count.
candidate_votes[candidate_name] = 0
# Add a vote to that candidate's count
candidate_votes[candidate_name] += 1
# 4a: Write a decision statement that checks that the
total_county_votes = total_votes + 1
# 3: Extract the county name from each row.
county_name = row[1]
# county does not match any existing county in the county list.
if county_name not in county_list:
# 4b: Add the existing county to the list of counties.
county_list.append(county_name)
# 4c: Begin tracking the county's vote count.
county_votes[county_name] = 0
# 5: Add a vote to that county's vote count.
county_votes[county_name] += 1
# Save the results to our text file.
with open(file_to_save, "w") as txt_file:
# Print the final vote count (to terminal)
election_results = (
f"\nElection Results\n"
f"-------------------------\n"
f"Total Votes: {total_votes:,}\n"
f"-------------------------\n\n"
f"County Votes:\n")
print(election_results, end="")
txt_file.write(election_results)
# 6a: Write a repetition statement to get the county from the county dictionary.
for county_name in county_votes:
# 6b: Retrieve the county vote count.
votes_county = county_votes[county_name]
# 6c: Calculate the percent of total votes for the county.
county_vote_percentage = float(votes_county) / float(total_votes) * 100
# 6d: Print the county results to the terminal.
county_results = (
f"{county_name}: {county_vote_percentage:.1f}% ({votes_county:,})\n")
print(county_results)
# 6e: Save the county votes to a text file.
txt_file.write(county_results)
# 6f: Write a decision statement to determine the winning county and get its vote count.
if (votes_county > largest_turnout_count) and (county_vote_percentage > largest_county_percentage):
largest_turnout_count = votes_county
largest_county_percentage = county_vote_percentage
county_largest_turnout = county_name
# 7: Print the county with the largest turnout to the terminal.
largest_turnout_summary = (
f"\n"
f"-------------------------\n"
f"Largest County Turnout: {county_largest_turnout}\n"
f"-------------------------\n")
print(largest_turnout_summary)
# 8: Save the county with the largest turnout to a text file.
txt_file.write(largest_turnout_summary)
# Save the final candidate vote count to the text file.
for candidate_name in candidate_votes:
# Retrieve vote count and percentage
votes = candidate_votes.get(candidate_name)
vote_percentage = float(votes) / float(total_votes) * 100
candidate_results = (
f"{candidate_name}: {vote_percentage:.1f}% ({votes:,})\n")
# Print each candidate's voter count and percentage to the
# terminal.
print(candidate_results)
# Save the candidate results to our text file.
txt_file.write(candidate_results)
# Determine winning vote count, winning percentage, and candidate.
if (votes > winning_count) and (vote_percentage > winning_percentage):
winning_count = votes
winning_candidate = candidate_name
winning_percentage = vote_percentage
# Print the winning candidate (to terminal)
winning_candidate_summary = (
f"-------------------------\n"
f"Winner: {winning_candidate}\n"
f"Winning Vote Count: {winning_count:,}\n"
f"Winning Percentage: {winning_percentage:.1f}%\n"
f"-------------------------\n")
print(winning_candidate_summary)
# Save the winning candidate's name to the text file
txt_file.write(winning_candidate_summary)
| [
"jared@Jareds-Air.fios-router.home"
] | jared@Jareds-Air.fios-router.home |
007d263722520390990a2b56a70c7cb4328ec8b9 | 2846b0779aec695c3d2b6673e274a14f5bad374b | /Session info to table/macOS app/Tracks to Table/tracks_to_table.py | f7d4b2789f5e0e70306dadea74eacbeb13a6aed0 | [
"MIT"
] | permissive | fantopop/post-production-scripts | 43aeeec3cd2f0e21b487ce1e8a6d762f4b79a333 | 8192499d6ba716a0f72094c63c167cd2ae384eab | refs/heads/master | 2022-04-30T15:44:37.249831 | 2022-04-19T15:32:34 | 2022-04-19T15:32:34 | 91,468,867 | 24 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17,676 | py | #!/usr/bin/python
"""
tracks_to_table.py
Author: Ilya Putilin
https://github.com/fantopop/post-production-scripts
Special thanks to Philippe Lagadec for HTML.py module for generating HTML tables.
http://www.decalage.info/python/html
This scripts converts .txt file, that could be exported from Pro Tools
using "Export Session Info as Text" command into .csv file.
This CSV file can be easily opened with Number app.
There are two formats available:
- with TRACK_NAME column as one table.
- grouped by TRACK NAME with [--tracks] option.
"""
import sys, csv, argparse
from os import path
# Separator.
sep = '\t'
header = ['#', 'EVENT', 'START', 'END', 'DURATION']
footer = [''] * 5
# TABLE_STYLE_THINBORDER = "border: 1px solid #000000; border-collapse: collapse;"
TABLE_STYLE_THINBORDER = ""
table_style = 'table {border-collapse: collapse;} th, td {border: 1px solid #ccc;padding: 8px;}'
#--- CONSTANTS -----------------------------------------------------------------
# Table style to get thin black lines in Mozilla/Firefox instead of 3D borders
TABLE_STYLE_THINBORDER = "border: 1px solid #000000; border-collapse: collapse;"
#TABLE_STYLE_THINBORDER = "border: 1px solid #000000;"
#=== CLASSES ===================================================================
class TableCell (object):
"""
a TableCell object is used to create a cell in a HTML table. (TD or TH)
Attributes:
- text: text in the cell (may contain HTML tags). May be any object which
can be converted to a string using str().
- header: bool, false for a normal data cell (TD), true for a header cell (TH)
- bgcolor: str, background color
- width: str, width
- align: str, horizontal alignement (left, center, right, justify or char)
- char: str, alignment character, decimal point if not specified
- charoff: str, see HTML specs
- valign: str, vertical alignment (top|middle|bottom|baseline)
- style: str, CSS style
- attribs: dict, additional attributes for the TD/TH tag
Reference: http://www.w3.org/TR/html4/struct/tables.html#h-11.2.6
"""
def __init__(self, text="", bgcolor=None, header=False, width=None,
align=None, char=None, charoff=None, valign=None, style=None,
attribs=None):
"""TableCell constructor"""
self.text = text
self.bgcolor = bgcolor
self.header = header
self.width = width
self.align = align
self.char = char
self.charoff = charoff
self.valign = valign
self.style = style
self.attribs = attribs
if attribs==None:
self.attribs = {}
def __str__(self):
"""return the HTML code for the table cell as a string"""
attribs_str = ""
if self.bgcolor: self.attribs['bgcolor'] = self.bgcolor
if self.width: self.attribs['width'] = self.width
if self.align: self.attribs['align'] = self.align
if self.char: self.attribs['char'] = self.char
if self.charoff: self.attribs['charoff'] = self.charoff
if self.valign: self.attribs['valign'] = self.valign
if self.style: self.attribs['style'] = self.style
for attr in self.attribs:
attribs_str += ' %s="%s"' % (attr, self.attribs[attr])
if self.text:
text = str(self.text)
else:
# An empty cell should at least contain a non-breaking space
text = ' '
if self.header:
return ' <TH%s>%s</TH>\n' % (attribs_str, text)
else:
return ' <TD%s>%s</TD>\n' % (attribs_str, text)
#-------------------------------------------------------------------------------
class TableRow (object):
"""
a TableRow object is used to create a row in a HTML table. (TR tag)
Attributes:
- cells: list, tuple or any iterable, containing one string or TableCell
object for each cell
- header: bool, true for a header row (TH), false for a normal data row (TD)
- bgcolor: str, background color
- col_align, col_valign, col_char, col_charoff, col_styles: see Table class
- attribs: dict, additional attributes for the TR tag
Reference: http://www.w3.org/TR/html4/struct/tables.html#h-11.2.5
"""
def __init__(self, cells=None, bgcolor=None, header=False, attribs=None,
col_align=None, col_valign=None, col_char=None,
col_charoff=None, col_styles=None):
"""TableCell constructor"""
self.bgcolor = bgcolor
self.cells = cells
self.header = header
self.col_align = col_align
self.col_valign = col_valign
self.col_char = col_char
self.col_charoff = col_charoff
self.col_styles = col_styles
self.attribs = attribs
if attribs==None:
self.attribs = {}
def __str__(self):
"""return the HTML code for the table row as a string"""
attribs_str = ""
if self.bgcolor: self.attribs['bgcolor'] = self.bgcolor
for attr in self.attribs:
attribs_str += ' %s="%s"' % (attr, self.attribs[attr])
if self.header:
result = '<THEAD>'
else:
result = ''
result += ' <TR%s>\n' % attribs_str
for cell in self.cells:
col = self.cells.index(cell) # cell column index
if not isinstance(cell, TableCell):
cell = TableCell(cell, header=self.header)
# apply column alignment if specified:
if self.col_align and cell.align==None:
cell.align = self.col_align[col]
if self.col_char and cell.char==None:
cell.char = self.col_char[col]
if self.col_charoff and cell.charoff==None:
cell.charoff = self.col_charoff[col]
if self.col_valign and cell.valign==None:
cell.valign = self.col_valign[col]
# apply column style if specified:
if self.col_styles and cell.style==None:
cell.style = self.col_styles[col]
result += str(cell)
result += ' </TR>\n'
if self.header:
result += '</THEAD>'
return result
#-------------------------------------------------------------------------------
class Table (object):
"""
a Table object is used to create a HTML table. (TABLE tag)
Attributes:
- rows: list, tuple or any iterable, containing one iterable or TableRow
object for each row
- header_row: list, tuple or any iterable, containing the header row (optional)
- border: str or int, border width
- style: str, table style in CSS syntax (thin black borders by default)
- width: str, width of the table on the page
- attribs: dict, additional attributes for the TABLE tag
- col_width: list or tuple defining width for each column
- col_align: list or tuple defining horizontal alignment for each column
- col_char: list or tuple defining alignment character for each column
- col_charoff: list or tuple defining charoff attribute for each column
- col_valign: list or tuple defining vertical alignment for each column
- col_styles: list or tuple of HTML styles for each column
Reference: http://www.w3.org/TR/html4/struct/tables.html#h-11.2.1
"""
def __init__(self, rows=None, border='1', style=None, width=None,
cellspacing=None, cellpadding=4, attribs=None, header_row=None,
col_width=None, col_align=None, col_valign=None,
col_char=None, col_charoff=None, col_styles=None):
"""TableCell constructor"""
self.border = border
self.style = style
# style for thin borders by default
if style == None: self.style = TABLE_STYLE_THINBORDER
self.width = width
self.cellspacing = cellspacing
self.cellpadding = cellpadding
self.header_row = header_row
self.rows = rows
if not rows: self.rows = []
self.attribs = attribs
if not attribs: self.attribs = {}
self.col_width = col_width
self.col_align = col_align
self.col_char = col_char
self.col_charoff = col_charoff
self.col_valign = col_valign
self.col_styles = col_styles
def __str__(self):
"""return the HTML code for the table as a string"""
attribs_str = ""
if self.border: self.attribs['border'] = self.border
if self.style: self.attribs['style'] = self.style
if self.width: self.attribs['width'] = self.width
if self.cellspacing: self.attribs['cellspacing'] = self.cellspacing
if self.cellpadding: self.attribs['cellpadding'] = self.cellpadding
for attr in self.attribs:
attribs_str += ' %s="%s"' % (attr, self.attribs[attr])
result = '<TABLE%s>\n' % attribs_str
# insert column tags and attributes if specified:
if self.col_width:
for width in self.col_width:
result += ' <COL width="%s">\n' % width
# First insert a header row if specified:
if self.header_row:
if not isinstance(self.header_row, TableRow):
result += str(TableRow(self.header_row, header=True))
else:
result += str(self.header_row)
# Then all data rows:
for row in self.rows:
if not isinstance(row, TableRow):
row = TableRow(row)
# apply column alignments and styles to each row if specified:
# (Mozilla bug workaround)
if self.col_align and not row.col_align:
row.col_align = self.col_align
if self.col_char and not row.col_char:
row.col_char = self.col_char
if self.col_charoff and not row.col_charoff:
row.col_charoff = self.col_charoff
if self.col_valign and not row.col_valign:
row.col_valign = self.col_valign
if self.col_styles and not row.col_styles:
row.col_styles = self.col_styles
result += str(row)
result += '</TABLE>'
return result
def table(*args, **kwargs):
'return HTML code for a table as a string. See Table class for parameters.'
return str(Table(*args, **kwargs))
#-------------------------------------------------------------------------------
tab = ' '
class Tag():
'''
A class to provide correct opening and closing tags,
with intendation support via HTML class instance.
Implies usage of the "with" statement:
with Tag('tag', HTML-instance):
<code>
'''
def __init__(self, name, HTML):
self.name = name
self.HTML = HTML
def __enter__(self):
self.HTML.content += tab * self.HTML.indent + '<' + self.name + '>\n'
self.HTML.indent += 1
def __exit__(self, exc_type, exc_value, traceback):
self.HTML.indent -= 1
self.HTML.content += tab * self.HTML.indent + '</' + self.name + '>\n'
class HTML():
'''
HTML() class instance accumulates generated HTML code, handles indentation
and provides several html-tags as methods, returning Tag() class instances.
Common usage pattern:
h = HTML()
with h.html():
with h.head():
with h.title()
h.add('Hello world page')
with h.body():
with h.h1():
h.add('Hello World!')
with h.p():
h.add('This is the HTML code')
print(str(h))
'''
def __init__(self):
self.indent = 0
self.content = '<!DOCTYPE html>\n'
def __str__(self):
return self.content
def add(self, text):
for line in text.split('\n'):
self.content += tab * self.indent + line + '\n'
def html(self):
return Tag('html', self)
def body(self):
return Tag('body', self)
def head(self):
return Tag('head', self)
def title(self):
return Tag('title', self)
def h1(self):
return Tag('h1', self)
def h2(self):
return Tag('h2', self)
def style(self):
return Tag('style', self)
def p(self):
return Tag('p', self)
#-------------------------------------------------------------------------------
class Track():
'''
Stores track name and list of track events:
[NUMBER, CLIP_NAME, START TC, END TC, DURATION TC]
'''
def __init__(self, name):
self.name = name
self.events = []
class Session():
'''
Session() instance reads .txt file, exported from Pro Tools and
stores every tracks EDL as list of Track() instances.
Supports export to .csv and .html formats.
'''
def __init__(self, filename):
# Open session info file for reading
csv_reader = csv.reader(filename, dialect='excel-tab')
# Create array for Track() objects
self.tracks = []
for raw_row in csv_reader:
# Check, whether the row is not empty.
if raw_row:
# Remove all whitespaces from start and end of the cells.
row = [cell.strip() for cell in raw_row]
# Get track name.
if row[0].startswith('TRACK NAME:'):
track = Track(name=row[1])
self.tracks.append(track)
continue
# Skip original header lines.
if row[0].startswith('CHANNEL'):
continue
if len(row) > 6:
track.events.append(row[1:6])
def to_csv(self, filename):
with open(filename, 'w') as outputfile:
csv_writer = csv.writer(outputfile, dialect='excel-tab')
for track in self.tracks:
csv_writer.writerow([''] + [track.name] + ['']*3)
csv_writer.writerow(header)
for line in track.events:
csv_writer.writerow(line)
csv_writer.writerow(footer)
def to_html(self, filename):
h = HTML()
with h.html():
with h.head():
h.add('<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">')
with h.title():
# Add document title
h.add(filename.split('.')[-2].split('/')[-1])
with h.style():
h.add('@media print {')
h.indent += 1
# Add page break after each track's table when printing
h.add('TABLE { page-break-after: always}')
# Configure correct display of table over multiple printing pages
h.add('TR { page-break-inside:avoid; page-break-after:auto }')
h.add('TD { page-break-inside:avoid; page-break-after:auto }')
h.add('THEAD { display:table-header-group }')
h.add('TFOOT { display:table-footer-group }')
# Set default landscape orientation when printing
h.add('@page {size: landscape;}}')
h.indent -= 1
h.add(table_style)
with h.body():
for track in self.tracks:
# Add track name as header
with h.h2():
h.add(track.name)
# Add track's EDL table
h.add(table(track.events,
header_row=header,
width='100%',
border=None,
cellpadding=None,
col_width=['2.5%', '', '5%', '5%', '5%'],
col_align=['center', 'left', 'center', 'center', 'center'],
style=TABLE_STYLE_THINBORDER
))
with open(filename, 'w') as outputfile:
outputfile.write(str(h))
def export(self, filename, to):
outputfile = outname(filename, to)
if to == 'csv':
self.to_csv(outputfile)
else:
self.to_html(outputfile)
print('Source: ' + filename)
print('Result: ' + outputfile)
def outname(filename, ext='csv'):
"""
Constructs output filename from input file,
replacing extension with '.csv'.
Example:
input.txt >>> input.csv
"""
split = (path.basename(filename)).split('.')
l = len(split)
if l > 1:
output = '.'.join(split[0:l-1] + [ext])
else:
output = filename + '.' + ext
return path.join(path.dirname(filename), output)
def main():
parser = argparse.ArgumentParser(
description="Converts '.txt' file from Pro Tools 'Export Session Info as Text' command to '.csv' or '.html' file")
parser.add_argument(
'txt', metavar='textfile', type=argparse.FileType(mode='rU'),
help='session info text file from Pro Tools')
parser.add_argument(
'--to', choices=['csv', 'html'], required=True,
help='export format: "csv" or "html"')
args = parser.parse_args()
# Read session info to Session() object
session = Session(args.txt)
args.txt.close()
# Export to the file of choses format.
session.export(filename=args.txt.name, to=args.to)
if __name__ == '__main__':
main()
| [
"fantopop@gmail.com"
] | fantopop@gmail.com |
04d3c52147bf2d6f5af145bd01926191bd945680 | d4af57cf5b00e13e78966b20b21c0a052d078d3a | /Lab1/Source/wsgi.py | 617a28297304cf4bfa7b756309ba4adf0c6fed80 | [] | no_license | Bersik/Web-Technology-Course | e717aab6df994e21bc6bb4c3944d094bf95328f9 | 3d955921b8187987d86e3339aedba6c4bf9cf01c | refs/heads/master | 2021-05-04T06:56:27.566335 | 2016-12-13T02:14:35 | 2016-12-13T02:14:35 | 70,544,089 | 0 | 1 | null | 2016-12-03T21:45:11 | 2016-10-11T01:32:59 | Python | UTF-8 | Python | false | false | 388 | py | """
WSGI config for Lab1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Source.settings")
application = get_wsgi_application()
| [
"k.sergyk@gmail.com"
] | k.sergyk@gmail.com |
d547d4935857ad9e36b591b0da66d196ef409bef | 51903bfb827916bcf437cb29bf0eeefc438685e3 | /337easy.py | c18de0d1e97c766789122fad9d5b0573eb2f973d | [] | no_license | flyingdan/dailyprogrammer | 186a82e9324a7948058fe66291f9b9ab1828a8f8 | 684f3cb6d83478ddeb125805d69b81a773e22196 | refs/heads/master | 2021-07-19T06:35:20.670278 | 2017-10-25T20:10:07 | 2017-10-25T20:10:07 | 105,844,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | # https://www.reddit.com/r/dailyprogrammer/comments/784fgr/20171023_challenge_337_easy_minimize_maximize/
# Area of circle = (pi)r^2
# Circumference of circle = 2(pi)r
# Area of sector covering x of circumference = rx/2
# x - angle subtended by arc | [
"1051205+flyingdan@users.noreply.github.com"
] | 1051205+flyingdan@users.noreply.github.com |
8fc10d35f9fa5cced3f4939ab0d2ca50d42ab5cb | b5dbf732d26a2a924c85c5a107035be48bfe69cd | /2.7.py | a41cca6bfe45aaf10f7b7a81df3ea5680c11f318 | [] | no_license | Beks667/2.7Hw | 2435bfa58e252357c46819f6987639ca025549be | 4e03706bdfc70f2f94145a50f493f36995d08cdb | refs/heads/main | 2023-04-19T13:10:24.348768 | 2021-05-07T12:44:27 | 2021-05-07T12:44:27 | 365,230,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,341 | py | # class Phone :
# def __init__ (self,brand,model,color):
# self.brand = brand
# self.model = model
# self.color = color
# def show (self):
# print(f"{self.brand},{self.model},{self.color}")
# phone = Phone("Apple", "XS", "black")
# phone.show()
# class Monkey:
# max_age = 12
# loves_bananas = True
# def climb(self):
# print('I am climbing the tree')
# abc = Monkey()
# abc.climb()
# print(abc.max_age)
# abc.climb()
# print(abc.loves_bananas)
# Это через input----------------------------------------------------------------
# class Person:
# def __init__(self,name,age,gender):
# self.name = name
# self.age = age
# self.gender = gender
# def calculate_age(self):
# self.number = int(input('enter year:'))
# print(self.age + self.number)
# p = Person('John', 23, 'male')
# p.calculate_age()
# #Это через self-----------------------------------------------------------------------
# class Person:
# def __init__(self,name,age,gender):
# self.name = name
# self.age = age
# self.gender = gender
# def calculate_age(self,year):
# self.year = year
# print(self.age + self.year)
# p = Person('John', 23, 'male')
# p.calculate_age(10)
# | [
"you@example.com"
] | you@example.com |
f7076115a366f407af38c60d3ee22cb4242a040a | 4a008af61a508c73a41d6907b57272b16bbf4b32 | /ephios/modellogging/models.py | eaddb22fb930e22cc477e58f5723af00c7e68613 | [
"MIT"
] | permissive | alexanderKuehling/ephios | dbe6cf9198d1f9b5c9bb27927726c32271c11eda | 2bec784b1bf5300201701ae1710f699b95cdd0e3 | refs/heads/main | 2023-05-08T02:22:23.163347 | 2021-05-06T16:20:07 | 2021-05-06T16:20:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,744 | py | from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from ephios.modellogging.json import LogJSONDecoder, LogJSONEncoder
from ephios.modellogging.recorders import (
InstanceActionType,
capitalize_first,
recorder_types_by_slug,
)
# pylint: disable=protected-access
class LogEntry(models.Model):
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE,
related_name="logentries",
)
content_object_id = models.PositiveIntegerField(db_index=True)
content_object = GenericForeignKey("content_type", "content_object_id")
attached_to_object_type = models.ForeignKey(
ContentType, on_delete=models.CASCADE, related_name="associated_logentries"
)
attached_to_object_id = models.PositiveIntegerField(db_index=True)
attached_to_object = GenericForeignKey("attached_to_object_type", "attached_to_object_id")
datetime = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
blank=True,
on_delete=models.PROTECT,
related_name="logging_entries",
)
action_type = models.CharField(
max_length=255, choices=[(value, value) for value in InstanceActionType]
)
request_id = models.CharField(max_length=36, null=True, blank=True)
data = models.JSONField(default=dict, encoder=LogJSONEncoder, decoder=LogJSONDecoder)
class Meta:
ordering = ("-datetime", "-id")
verbose_name = _("Log entry")
verbose_name_plural = _("Log entries")
@cached_property
def records(self):
recorder_types = recorder_types_by_slug(self.content_type.model_class())
for recorder in self.data.values():
if not isinstance(recorder, dict) or "slug" not in recorder:
continue
yield recorder_types[recorder["slug"]].deserialize(
recorder["data"], self.content_type.model_class(), self.action_type
)
@property
def content_object_classname(self):
return capitalize_first(self.content_type.model_class()._meta.verbose_name)
@property
def content_object_or_str(self):
return self.content_object or self.data.get("__str__")
def __str__(self):
if self.content_object:
return f"{self.action_type} {type(self.content_object)._meta.verbose_name} {str(self.content_object)}"
return f"{self.action_type} {self.content_type.model} {self.content_object_or_str}"
| [
"noreply@github.com"
] | noreply@github.com |
5934e0899c738d89b998b2594786891958736c99 | 933a4f98b3ab1df987bce525d20ca904b225140f | /scripts/common/gerrit/query.py | ffea6bdf65921d27e1df63902163c310d54f0365 | [
"BSD-3-Clause"
] | permissive | mcgreevy/chromium-build | 3881c489b4d9be2f113da755487808b3593f8156 | f8e42c70146c1b668421ee6358dc550a955770a3 | refs/heads/master | 2020-12-30T12:32:15.685191 | 2017-05-17T06:58:18 | 2017-05-17T06:58:18 | 91,419,271 | 0 | 2 | NOASSERTION | 2020-07-22T09:27:35 | 2017-05-16T05:52:45 | Python | UTF-8 | Python | false | false | 5,859 | py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import urllib
################################################################################
# Gerrit API
################################################################################
class QueryBuilder(object):
"""Class to iteratively construct a Gerrit query string.
This functions as a helper class to simplify explicit versus implicit
quoting and nesting of Gerrit query strings.
Gerrit query semantics are documented here:
https://gerrit-review.googlesource.com/Documentation/user-search.html
"""
def __init__(self, terms, operator):
"""
Initializes a Gerrit query object. This should not be used directly;
instead, one of the supplied constructors (New, NewOR, NewAND) should be
used to create a new builder.
Args:
terms: (list) A list of explicit query parameters to start with. If
'terms' is an existing Query instance, the current instance will be
initialized as a clone.
operator: (str) If not 'None', this term will be implicitly added after
each explicit query term. Suggested values are 'AND' and 'OR'.
"""
self._terms = tuple(terms)
self._operator = operator
@classmethod
def _New(cls, terms, operator=None):
return cls(
[cls._prepareTerm(t) for t in terms],
operator)
@classmethod
def New(cls, *terms):
return cls._New(terms)
@classmethod
def NewOR(cls, *terms):
return cls._New(terms, operator='OR')
@classmethod
def NewAND(cls, *terms):
return cls._New(terms, operator='AND')
@classmethod
def _prepareTerm(cls, value):
"""Analyze the type of 'value' and generates a term from it (see 'add()')"""
if isinstance(value, basestring):
parts = value.split(':', 1)
if len(parts) == 2:
return cls._prepareSelector(parts[0], parts[1])
else:
return cls._prepareString(value, quoted=True)
if isinstance(value, QueryBuilder):
# Return its query verbatim, enclosed in parenthesis
return list(value.termiter())
# Try iterator
it = None
try:
it = iter(value)
except TypeError:
pass
if it is not None:
return tuple(cls._prepareTerm(x) for x in it)
# Default to stringify
return cls._prepareString(str(value), quoted=True)
@classmethod
def _prepareString(cls, value, quoted=False):
"""Constructs a string term."""
if quoted:
value = urllib.quote(value)
return value
@classmethod
def _prepareSelector(cls, key, value):
"""Constructs a selector (e.g., 'label:Code-Review+1') term"""
# Quote key/value individually; the colon does not get quoted
return '%s:%s' % (
cls._prepareString(key, quoted=True),
cls._prepareString(value, quoted=True))
def _cloneWithTerms(self, *terms):
"""Creates a new 'QueryBuilder' with an augmented term set."""
new_terms = self._terms + terms
return self.__class__(new_terms, self._operator)
def add(self, *values):
"""Adds a new query term to the Query.
This is a generic 'add' function that infers how to add 'value' based on
its type and contents. For more specific control, use the specialised
'add*' functions.
The query term ('value') may be any of the following:
- A key:value term, in which case the key and value are quoted but the
colon is left unquoted.
- A single term string, in which case the entire term is quoted
- A QueryBuilder instance, in which case it is embedded as a single term
bounded by parenthesis.
- An iterable of query terms, in which case each term will be formatted
recursively and placed inside parenthesis.
Args:
values: The query term to add (see above).
Returns: (Query) this Query object
"""
terms = []
for value in values:
term = self._prepareTerm(value)
if term is not None:
terms.append(term)
if len(terms) == 0:
return self
return self._cloneWithTerms(*terms)
def addSelector(self, key, value):
"""Adds a 'key:value' term to the query.
The 'key' and 'value' terms will be URL quoted.
Args:
key: (str) the key
value: (str) the value
Returns: (Query) this Query object
"""
return self._cloneWithTerms(self._prepareSelector(key, value))
def addQuoted(self, value):
"""Adds a URL-quoted term to the query.
Args:
value: (str) the value to quote and add
Returns: (Query) this Query object
"""
return self._cloneWithTerms(self._prepareString(value, quoted=True))
def addUnquoted(self, value):
"""Directly adds a term to the query.
Args:
value: (str) the value to add
Returns: (Query) this Query object
"""
return self._cloneWithTerms(self._prepareString(value, quoted=False))
@classmethod
def _formatQuery(cls, terms):
"""Recursive method to convert internal nested string/list to a query"""
formatted_terms = []
for term in terms:
if isinstance(term, (list, tuple)):
if len(term) == 0:
continue
term = '(%s)' % (cls._formatQuery(term))
formatted_terms.append(term)
return '+'.join(formatted_terms)
def termiter(self):
"""Iterator overload to iterate over individual query terms"""
first = True
for param in self._terms:
if first:
first = False
elif self._operator is not None:
yield self._operator
yield param
def __len__(self):
"""Returns: (int) the number of explicit query terms"""
return len(self._terms)
def __str__(self):
"""Constructs a URL-quoted query string from this query's terms"""
return self._formatQuery(self.termiter())
| [
"zty@chromium.org"
] | zty@chromium.org |
c2b8476c5f2885a7c220049fadf41636f702b471 | f2e063c84f0cfa36b47ca5ee0752790ce7ae7a7a | /22 april/second.py | 85c9b8cf0f154f60577713c5eb85ff1cdcd2501c | [] | no_license | niksanand1717/TCS-434 | a1b1ca9ca96b389cb30fb3a104dda122408a52c9 | 0c1c16426d127d84733d26475278f148a1e512d8 | refs/heads/master | 2023-04-17T12:48:11.475162 | 2021-05-01T19:05:38 | 2021-05-01T19:05:38 | 347,687,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # Input n strings in a list and print all the strings ending with a specific character provided by the user
def seive(string):
index = (len(string)-1) - len(match)
if string[index + 1: ] == match:
return string
strnum = int(input("Enter num of strings: "))
strs = []
for i in range(0, strnum):
strs.append(input(f"Enter string {i+1}: "))
global match
matchstr: str = input("Enter the matching character at end of string: ")
match = matchstr
output = list(filter(seive, strs))
print(f"Strings ending with {matchstr}:", output) | [
"nikhilanandgaya01@gmail.com"
] | nikhilanandgaya01@gmail.com |
e1bccde57c18d31ab7ae91528e51e89563c8c9b2 | 3e7b2ebb64e9e324ce47d19def21ae62cc1e56a6 | /Problem-solving/HackerRank/p14- sWAP cASE.py | 5f4f5a0512103085cb85a010c0c4672a7a9a5c87 | [] | no_license | shuvo14051/python-data-algo | 9b6622d9260e95ca9ffabd39b02996f13bdf20d1 | 8f66ff6f2bd88a0ae48dac72e4ea6c5382a836ec | refs/heads/master | 2023-02-03T03:04:01.183093 | 2020-12-13T10:13:15 | 2020-12-13T10:13:15 | 274,106,480 | 0 | 0 | null | 2020-07-05T06:33:28 | 2020-06-22T10:24:05 | Python | UTF-8 | Python | false | false | 194 | py | # n = input()
#
# swaped_n = n.swapcase()
#
# print(swaped_n)
def swap_case(s):
return s.swapcase()
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result) | [
"shuvo1137017@gmail.com"
] | shuvo1137017@gmail.com |
14c4afa7c0d18bc0eb4ddc092102498ed554f5ad | 1af050f5fce1e500d688e325876107d696eb8159 | /pythonprac/bin/easy_install-3.7 | 55d12364ae5d829cf69d629922b828061c904d7d | [] | no_license | Novandev/pythonprac | 30c587b1eb6ba6c1cd9a6f66b59c03c5c98ec0d4 | 78eded9f1d6d9b2f9ffc16e57139b10cc13355e1 | refs/heads/master | 2022-12-13T20:13:53.976110 | 2020-07-27T05:04:55 | 2020-07-27T05:04:55 | 108,077,649 | 0 | 0 | null | 2022-12-08T01:30:30 | 2017-10-24T04:42:31 | Python | UTF-8 | Python | false | false | 278 | 7 | #!/Users/novan/Desktop/Github/pythonprac/pythonprac/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"donovan.adams@students.makeschool.com"
] | donovan.adams@students.makeschool.com |
91a9bfc31c26aa2120baa0b5004b7fc8989683ab | 2fb2291259b27291d379df07712c5449819992a5 | /config.py | 31aadb55b848c588abbf76a2d9b57469d09fbf93 | [] | no_license | ChoungJX/doweb_server | 6d4dcb68c4eab5eda8125543f4c644bdaa4d7dfb | bbbfa34713badcf7f8e58c23171269dca0085437 | refs/heads/master | 2021-07-13T15:59:40.674541 | 2021-04-27T17:23:03 | 2021-04-27T17:23:03 | 244,540,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | import datetime
class index():
SQLALCHEMY_DATABASE_URI = 'sqlite:///service.db'
SQLALCHEMY_TRACK_MODIFICATIONS = True
PERMANENT_SESSION_LIFETIME = datetime.timedelta(hours=2,minutes=30)
SEND_FILE_MAX_AGE_DEFAULT = datetime.timedelta(days=7)
#PERMANENT_SESSION_LIFETIME = datetime.timedelta(seconds=10)
DEBUG = True | [
"zhenglinfeng43@gmail.com"
] | zhenglinfeng43@gmail.com |
ecd72f46add5e5f213fc1533ff3e25f25160af31 | 9de18e1e39c941aeba1781630711cef1d3d4d44c | /experiments/cifar10/conv.py | 41757c9d21758f8c35cf7d9e176d18cd6ff88602 | [] | no_license | BINDS-LAB-UMASS/bindsnet_experiments | cee786ae7e087845f58e0af4a49fa319d4fb81d5 | 8a20be9d1ede021b70ff95cc7e85024ff5a222db | refs/heads/master | 2022-11-12T11:33:20.451028 | 2019-10-01T15:40:28 | 2019-10-01T15:40:28 | 135,615,246 | 41 | 10 | null | 2022-10-28T00:35:03 | 2018-05-31T17:28:04 | Python | UTF-8 | Python | false | false | 14,333 | py | import os
import sys
import torch
import argparse
import numpy as np
import matplotlib.pyplot as plt
from time import time as t
sys.path.append('..')
from utils import print_results, update_curves
from bindsnet.datasets import CIFAR10
from bindsnet.network import Network
from bindsnet.learning import Hebbian
from bindsnet.encoding import bernoulli
from bindsnet.network.monitors import Monitor
from bindsnet.network.nodes import Input, DiehlAndCookNodes
from bindsnet.evaluation import update_ngram_scores, assign_labels
from bindsnet.network.topology import Conv2dConnection, SparseConnection
from bindsnet.analysis.plotting import plot_input, plot_spikes, plot_conv2d_weights
print()
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--n_train', type=int, default=60000)
parser.add_argument('--n_test', type=int, default=10000)
parser.add_argument('--kernel_size', type=int, nargs='+', default=[16])
parser.add_argument('--stride', type=int, nargs='+', default=[4])
parser.add_argument('--n_filters', type=int, default=25)
parser.add_argument('--padding', type=int, default=0)
parser.add_argument('--inhib', type=float, default=100.0)
parser.add_argument('--time', type=int, default=100)
parser.add_argument('--dt', type=float, default=1.0)
parser.add_argument('--intensity', type=float, default=0.5)
parser.add_argument('--progress_interval', type=int, default=10)
parser.add_argument('--update_interval', type=int, default=250)
parser.add_argument('--train', dest='train', action='store_true')
parser.add_argument('--test', dest='train', action='store_false')
parser.add_argument('--plot', dest='plot', action='store_true')
parser.add_argument('--gpu', dest='gpu', action='store_true')
parser.set_defaults(plot=False, gpu=False, train=True)
args = parser.parse_args()
seed = args.seed
n_train = args.n_train
n_test = args.n_test
kernel_size = args.kernel_size
stride = args.stride
n_filters = args.n_filters
padding = args.padding
inhib = args.inhib
time = args.time
dt = args.dt
intensity = args.intensity
progress_interval = args.progress_interval
update_interval = args.update_interval
train = args.train
plot = args.plot
gpu = args.gpu
if len(kernel_size) == 1:
kernel_size = [kernel_size[0], kernel_size[0]]
if len(stride) == 1:
stride = [stride[0], stride[0]]
args = vars(args)
print('\nCommand-line argument values:')
for key, value in args.items():
print('-', key, ':', value)
print()
model = 'conv'
data = 'cifar10'
assert n_train % update_interval == 0 and n_test % update_interval == 0, \
'No. examples must be divisible by update_interval'
params = [seed, n_train, kernel_size, stride, n_filters,
padding, inhib, time, dt, intensity, update_interval]
model_name = '_'.join([str(x) for x in params])
if not train:
test_params = [seed, n_train, n_test, kernel_size, stride, n_filters,
padding, inhib, time, dt, intensity, update_interval]
np.random.seed(seed)
if gpu:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
torch.cuda.manual_seed_all(seed)
else:
torch.manual_seed(seed)
n_examples = n_train if train else n_test
input_shape = [32, 32, 3]
if kernel_size == input_shape:
conv_size = [1, 1]
else:
conv_size = (int((input_shape[0] - kernel_size[0]) / stride[0]) + 1,
int((input_shape[1] - kernel_size[1]) / stride[1]) + 1)
n_classes = 10
n_neurons = n_filters * np.prod(conv_size)
per_class = int(n_neurons / n_classes)
total_kernel_size = int(np.prod(kernel_size))
total_conv_size = int(np.prod(conv_size))
# Build network.
network = Network()
input_layer = Input(n=32*32*3, shape=(1, 3, 32, 32), traces=True)
conv_layer = DiehlAndCookNodes(n=n_filters * total_conv_size, shape=(1, n_filters, *conv_size),
thresh=-64.0, traces=True, theta_plus=0.05, refrac=0)
conv_layer2 = DiehlAndCookNodes(n=n_filters * total_conv_size, shape=(1, n_filters, *conv_size), refrac=0)
conv_conn = Conv2dConnection(input_layer, conv_layer, kernel_size=kernel_size, stride=stride, update_rule=Hebbian,
norm=0.5 * int(np.sqrt(total_kernel_size)), nu=(1e-3, 1e-3), wmax=2.0)
conv_conn2 = Conv2dConnection(input_layer, conv_layer2, w=conv_conn.w, kernel_size=kernel_size, stride=stride,
update_rule=None, nu=(0, 1e-3), wmax=2.0)
w = torch.ones(1, n_filters, conv_size[0], conv_size[1], 1, n_filters, conv_size[0], conv_size[1])
for f in range(n_filters):
for i in range(conv_size[0]):
for j in range(conv_size[1]):
w[0, f, i, j, 0, f, i, j] = 0
w = w.view(conv_layer.n, conv_layer.n)
i = w.nonzero()
v = -inhib * torch.ones(i.shape[0])
w = torch.sparse.FloatTensor(i.t(), v, w.size())
# for fltr1 in range(n_filters):
# for fltr2 in range(n_filters):
# for i1 in range(conv_size):
# for j1 in range(conv_size):
# for i2 in range(conv_size):
# for j2 in range(conv_size):
# if not (i1 == i2 and j1 == j2):
# w[0, fltr1, i1, j1, 0, fltr2, i2, j2] = -inhib
# if fltr1 != fltr2:
# for i in range(conv_size):
# for j in range(conv_size):
# w[0, fltr1, i, j, 0, fltr2, i, j] = -inhib
# for i1 in range(conv_size[0]):
# for j1 in range(conv_size[1]):
# for i2 in range(conv_size[0]):
# for j2 in range(conv_size[1]):
# if not (fltr1 == fltr2 and i1 == i2 and j1 == j2):
# w[0, fltr1, i1, j1, 0, fltr2, i2, j2] = -inhib
# if fltr1 != fltr2:
# for i1 in range(conv_size):
# for j1 in range(conv_size):
# for i2 in range(conv_size):
# for j2 in range(conv_size):
# w[0, fltr1, i1, j1, 0, fltr2, i2, j2] = -inhib
recurrent_conn = SparseConnection(conv_layer, conv_layer, w=w)
network.add_layer(input_layer, name='X')
network.add_layer(conv_layer, name='Y')
network.add_layer(conv_layer2, name='Y_')
network.add_connection(conv_conn, source='X', target='Y')
network.add_connection(conv_conn2, source='X', target='Y_')
network.add_connection(recurrent_conn, source='Y', target='Y')
# Voltage recording for excitatory and inhibitory layers.
voltage_monitor = Monitor(network.layers['Y'], ['v'], time=time)
network.add_monitor(voltage_monitor, name='output_voltage')
# Load CIFAR-10 data.
dataset = CIFAR10(path=os.path.join('..', '..', 'data', 'CIFAR10'), download=True)
if train:
images, labels = dataset.get_train()
else:
images, labels = dataset.get_test()
images *= intensity
# Record spikes during the simulation.
spike_record = torch.zeros(update_interval, time, n_neurons)
# Neuron assignments and spike proportions.
if train:
assignments = -torch.ones_like(torch.Tensor(n_neurons))
proportions = torch.zeros_like(torch.Tensor(n_neurons, n_classes))
rates = torch.zeros_like(torch.Tensor(n_neurons, n_classes))
ngram_scores = {}
else:
path = os.path.join('..', '..', 'params', data, model)
path = os.path.join(path, '_'.join(['auxiliary', model_name]) + '.pt')
assignments, proportions, rates, ngram_scores = torch.load(open(path, 'rb'))
# Sequence of accuracy estimates.
curves = {'all': [], 'proportion': [], 'ngram': []}
if train:
best_accuracy = 0
spikes = {}
for layer in set(network.layers):
spikes[layer] = Monitor(network.layers[layer], state_vars=['s'], time=time)
network.add_monitor(spikes[layer], name='%s_spikes' % layer)
# Train the network.
if train:
print('\nBegin training.\n')
else:
print('\nBegin test.\n')
inpt_ims = None
inpt_axes = None
spike_ims = None
spike_axes = None
weights_im = None
start = t()
for i in range(n_examples):
if i % progress_interval == 0:
print('Progress: %d / %d (%.4f seconds)' % (i, n_train, t() - start))
start = t()
if i % update_interval == 0 and i > 0:
if i % len(labels) == 0:
current_labels = labels[-update_interval:]
else:
current_labels = labels[i % len(images) - update_interval:i % len(images)]
# Update and print accuracy evaluations.
curves, predictions = update_curves(
curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments,
proportions=proportions, ngram_scores=ngram_scores, n=2
)
print_results(curves)
if train:
if any([x[-1] > best_accuracy for x in curves.values()]):
print('New best accuracy! Saving network parameters to disk.')
# Save network to disk.
path = os.path.join('..', '..', 'params', data, model)
if not os.path.isdir(path):
os.makedirs(path)
network.save(os.path.join(path, model_name + '.pt'))
path = os.path.join(path, '_'.join(['auxiliary', model_name]) + '.pt')
torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb'))
best_accuracy = max([x[-1] for x in curves.values()])
# Assign labels to excitatory layer neurons.
assignments, proportions, rates = assign_labels(spike_record, current_labels, n_classes, rates)
# Compute ngram scores.
ngram_scores = update_ngram_scores(spike_record, current_labels, n_classes, 2, ngram_scores)
print()
# Get next input sample.
image = images[i].permute(2, 0, 1)
sample = bernoulli(datum=image, time=time, dt=dt, max_prob=1.0).unsqueeze(1)
inpts = {'X': sample}
# Run the network on the input.
network.run(inpts=inpts, time=time)
retries = 0
while spikes['Y_'].get('s').sum() < 5 and retries < 3:
retries += 1
sample = bernoulli(datum=image, time=time, dt=dt, max_prob=1.0).unsqueeze(1)
inpts = {'X': sample}
network.run(inpts=inpts, time=time)
# Add to spikes recording.
spike_record[i % update_interval] = spikes['Y_'].get('s').view(time, -1)
# Optionally plot various simulation information.
if plot:
_input = image.permute(1, 2, 0).float()
_input /= _input.max()
reconstruction = inpts['X'].sum(0).view(3, 32, 32).permute(1, 2, 0).float()
reconstruction /= reconstruction.max()
w = conv_conn.w
_spikes = {'X': spikes['X'].get('s').view(32*32*3, time),
'Y': spikes['Y'].get('s').view(n_filters * total_conv_size, time),
'Y_': spikes['Y_'].get('s').view(n_filters * total_conv_size, time)}
inpt_axes, inpt_ims = plot_input(
images[i].view(32, 32, 3), reconstruction, label=labels[i], ims=inpt_ims, axes=inpt_axes
)
spike_ims, spike_axes = plot_spikes(spikes=_spikes, ims=spike_ims, axes=spike_axes)
weights_im = plot_conv2d_weights(w, im=weights_im, wmax=0.1)
plt.pause(1e-8)
network.reset_() # Reset state variables.
print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)')
i += 1
if i % len(labels) == 0:
current_labels = labels[-update_interval:]
else:
current_labels = labels[i % len(images) - update_interval:i % len(images)]
# Update and print accuracy evaluations.
curves, predictions = update_curves(
curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments,
proportions=proportions, ngram_scores=ngram_scores, n=2
)
print_results(curves)
if train:
if any([x[-1] > best_accuracy for x in curves.values()]):
print('New best accuracy! Saving network parameters to disk.')
# Save network to disk.
path = os.path.join('..', '..', 'params', data, model)
if not os.path.isdir(path):
os.makedirs(path)
network.save(os.path.join(path, model_name + '.pt'))
path = os.path.join(path, '_'.join(['auxiliary', model_name]) + '.pt')
torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb'))
best_accuracy = max([x[-1] for x in curves.values()])
if train:
print('\nTraining complete.\n')
else:
print('\nTest complete.\n')
print('Average accuracies:\n')
for scheme in curves.keys():
print('\t%s: %.2f' % (scheme, np.mean(curves[scheme])))
# Save accuracy curves to disk.
path = os.path.join('..', '..', 'curves', data, model)
if not os.path.isdir(path):
os.makedirs(path)
if train:
to_write = ['train'] + params
else:
to_write = ['test'] + params
to_write = [str(x) for x in to_write]
f = '_'.join(to_write) + '.pt'
torch.save((curves, update_interval, n_examples), open(os.path.join(path, f), 'wb'))
# Save results to disk.
path = os.path.join('..', '..', 'results', data, model)
if not os.path.isdir(path):
os.makedirs(path)
results = [
np.mean(curves['all']), np.mean(curves['proportion']), np.mean(curves['ngram']),
np.max(curves['all']), np.max(curves['proportion']), np.max(curves['ngram'])
]
if train:
to_write = params + results
else:
to_write = test_params + results
to_write = [str(x) for x in to_write]
name = 'train.csv' if train else 'test.csv'
if not os.path.isfile(os.path.join(path, name)):
with open(os.path.join(path, name), 'w') as f:
if train:
columns = [
'seed', 'n_train', 'kernel_size', 'stride', 'n_filters', 'padding', 'inhib', 'time', 'dt',
'intensity', 'update_interval', 'mean_all_activity', 'mean_proportion_weighting',
'mean_ngram', 'max_all_activity', 'max_proportion_weighting', 'max_ngram'
]
header = ','.join(columns) + '\n'
f.write(header)
else:
columns = [
'seed', 'n_train', 'n_test', 'kernel_size', 'stride', 'n_filters', 'padding', 'inhib', 'time',
'dt', 'intensity', 'update_interval', 'mean_all_activity', 'mean_proportion_weighting',
'mean_ngram', 'max_all_activity', 'max_proportion_weighting', 'max_ngram'
]
header = ','.join(columns) + '\n'
f.write(header)
with open(os.path.join(path, name), 'a') as f:
f.write(','.join(to_write) + '\n')
print()
| [
"djsaunde@umass.edu"
] | djsaunde@umass.edu |
048078b6d92b6a6cea61415ef5bbcb2f3fb1edbb | aef857a784b9028e6f13eddb3584660ac7575c3a | /Homeworks/Homework 2/Q5/Q5b.py | c2c7ea469d32a4a082125212b241531d5c65f572 | [] | no_license | pratikmistry0410/DSA-Spring2020 | 2a72cd21cefb0cce9e5c679be6825c3bbe74503d | 4b6d9a7a0d6ce025cdf0084de99ccface45be2cb | refs/heads/master | 2022-07-19T09:49:20.843643 | 2020-05-19T19:52:45 | 2020-05-19T19:52:45 | 259,977,141 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,145 | py | import time
# Global variable to calculate the complexity for algorithm and store the cutoff value to insertion sort
cutoff = 6
complexity_count = 0
# Function to read the dataset
def readDataset():
filename = "data0.32768" # Dataset file name
file = "/Users/learning/Documents/Pratik Mistry/Sem2/Data Structures and Algorithms/dataset-problem2-hw2/" + filename # Complete File Path
file_object = open(file,"r")
lines = file_object.readlines() # Reading all the lines from the file opened
dataset = []
for line in lines:
line = line.strip()
dataset.append(int(line)) # Casting to int as numbers are read as strings while reading file
return dataset # Return the dataset
# Function to sort the dataset using insertion sort
def insertionSort(arr,low,high):
global complexity_count # Referring global scope variable for counting complexity
for i in range(low+1,high+1,1): # Traversing each array element
temp = arr[i]
index = i
complexity_count+=1 # Increment the count complexity
while index > 0 and arr[index-1] > temp: # Sort the left subarray of the current index
complexity_count+=1 # Increment the count complexity
arr[index] = arr[index-1]
index-=1
arr[index] = temp
# Function to calculate median of the array
def medianOf3(arr,low,mid,high):
if arr[low] > arr [high]:
if arr[high] > arr[mid]:
return high
elif arr[mid]> arr[low]:
return low
else:
return mid
else:
if arr[low] > arr[mid]:
return low
elif arr[mid] > arr[high]:
return high
else:
return mid
# Function to quick sort the array with median of 3 and Cutoff to insertion method
def medianQuickSort_CutOff(data_list,low,high):
if (high - low + 1)<= cutoff: # Base condition to stop resursion while sorting the elements using insertion sort
insertionSort(data_list,low,high)
return
mid = int((low+high)/2)
median = medianOf3(data_list,low,mid,high) # Calculate the median of array
swap(data_list,low,median) # Swap median with lowest index of the array
pivot_partition = partition(data_list,low,high) # Find the pivot/partition
medianQuickSort_CutOff(data_list,low,pivot_partition-1) # Apply quick sort to left subarray
medianQuickSort_CutOff(data_list,pivot_partition+1,high) # Apply quick sort to right subarray
# Function to partition the array and returning the pivot element
def partition(arr,low,high):
global complexity_count # Referring global scope variable for counting complexity
pivot = arr[low] # Selecting lowest element as pivot
left = low
right = high
while left < right:
while arr[right] >= pivot and left < right: # Move from right towards left and check for element less than pivot
complexity_count +=1 # Increment the count complexity
right-=1
if right!=left:
arr[left] = arr[right] # Swap the smaller element at the right to the left of pivot
left+=1
while arr[left] <= pivot and left < right: # Move from left towards right and check for element greater than pivot
complexity_count +=1 # Increment the count complexity
left += 1
if right!=left:
arr[right] = arr[left] # Swap the greater element at the left to the right of pivot
right-=1
arr[left] = pivot
return left
# Function to swap the median and lowest index of the subarray
def swap(data_list,low,median):
temp = data_list[median]
data_list[median] = data_list[low]
data_list[low] = temp
# Driver/Main program to read dataset, and call quick sort with median of 3 and cutoff to insertion printing output
if __name__ == "__main__":
data_list = readDataset() # Reading the dataset
start = time.time()
medianQuickSort_CutOff(data_list,0,len(data_list)-1) # Calling Quick Sort: cutoff to insertion sort
end = time.time()
total_time = end-start # Calculating physical clock time
# Printing the outputs
print("\nThe sorted list using quick sort with cutoff to insertion sort is: ")
print(data_list)
print("\nThe total time taken for quick sort with cutoff to insertion sort is:",total_time*1000 , " ms")
print("\nThe complexity count for quick sort with cutoff to insertion sort is:",complexity_count) | [
"noreply@github.com"
] | noreply@github.com |
16a8903ece9e78204eed8acc202a5f650cf59dd2 | 8a932cf081cdbcdee998b2e71ff5cc57a4799cbb | /pentakillers.py | dadc28ec0e1d3ffb3f9aae8a2664550075afd334 | [
"MIT"
] | permissive | vitaum88/pentakillers | 2c0b910daf38b9b47f315361e0046e4fc6e992ff | c930493bb4ff3ced65a48492569a7f5770c88d41 | refs/heads/main | 2023-06-30T13:42:08.659121 | 2021-08-02T19:04:20 | 2021-08-02T19:04:20 | 392,063,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,711 | py | import cassiopeia as cass
import arrow
import pandas as pd
import time
import requests
'''
Go to https://developer.riotgames.com/ and create a LOGIN. After that, you'll be taken to a screen with the API key.
There are 3 types of API keys in Riot Games:
- Development API (which is the default once you create a developer account): it's a key that needs to be refreshed every 24h
- Personal API: after registering a product (I didn't do it, so the API I've been using is Development), you don't need to
refreseh your api key. There are some restrcitions in the access (such as how many calls per minute/hour etc)
- Production API: this is for a real product, deployed, etc. I didn't even read details about it because it's way out of
the scope of this project.
You can get reference for them in https://developer.riotgames.com/docs/portal#product-registration_application-process
'''
API_KEY = "RGAPI-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"
REGION = 'NA' # can be any region (NA, BR, TR, etc)
def get_curr_data(pentakiller, kill, start_time,):
'''
This function returns the requested info from the pentakiller (items, position, timestamp, etc)
'''
curr_data = {
"summoner": pentakiller['summoner'],
"match id": pentakiller['match'],
"champion": pentakiller['champion'],
"region": REGION,
"x_pos": tuple(kill.get('position').values())[0],
"y_pos": tuple(kill.get('position').values())[1],
"item_1": list(map(lambda x: x if x else "empty slot", pentakiller.get("items")))[0],
"item_2": list(map(lambda x: x if x else "empty slot", pentakiller.get("items")))[1],
"item_3": list(map(lambda x: x if x else "empty slot", pentakiller.get("items")))[2],
"item_4": list(map(lambda x: x if x else "empty slot", pentakiller.get("items")))[3],
"item_5": list(map(lambda x: x if x else "empty slot", pentakiller.get("items")))[4],
"item_6": list(map(lambda x: x if x else "empty slot", pentakiller.get("items")))[5],
"timestamp": start_time
}
return curr_data
def new_kills_heatmap(self):
'''
I am MonkeyPatching the cassiopedia.core.match.Match.kills_heatmap method (because originally it didn't return the FIG image)
Now that it is being returned, I can save to a file. That method was already written by the developers of the
cassiopedia module, and I'm simply updating it for our needs.
'''
if self.map.name == "Summoner's Rift":
rx0, ry0, rx1, ry1 = 0, 0, 14820, 14881
elif self.map.name == "Howling Abyss":
rx0, ry0, rx1, ry1 = -28, -19, 12849, 12858
else:
raise NotImplemented
imx0, imy0, imx1, imy1 = self.map.image.image.getbbox()
def position_to_map_image_coords(position):
x, y = position.x, position.y
x -= rx0
x /= (rx1 - rx0)
x *= (imx1 - imx0)
y -= ry0
y /= (ry1 - ry0)
y *= (imy1 - imy0)
return x, y
import matplotlib.pyplot as plt
size = 8
fig = plt.figure(figsize=(size, size)) # updated this line
plt.imshow(self.map.image.image.rotate(-90))
for p in self.participants:
for kill in p.timeline.champion_kills:
x, y = position_to_map_image_coords(kill.position)
if p.team.side == cass.data.Side.blue:
plt.scatter([x], [y], c="b", s=size * 10)
else:
plt.scatter([x], [y], c="r", s=size * 10)
plt.axis('off')
plt.show()
return fig # added this line
cass.core.match.Match.kills_heatmap = new_kills_heatmap # updating the method
def setup(key, region):
'''
Basic setups for the cassiopedia module - logging, API_KEY and REGION
'''
cass.apply_settings({"logging": {
"print_calls": False,
"print_riot_api_key": False,
"default": "WARNING",
"core": "WARNING"
}})
cass.set_riot_api_key(API_KEY)
cass.set_default_region(REGION)
def get_week_matches(summoner):
'''
This function takes the 'summoner' object and returns the match history for the period of 7 days that the summoner played
'''
now = arrow.utcnow()
last_week = now.shift(days=-7)
since = last_week.floor('day')
until = now.floor('day')
matches = cass.get_match_history(summoner, begin_time=since, end_time=until)
return matches
def get_uri_region(region=REGION):
mapping = {
'BR':'BR1',
'EUNE':'EUN1',
'EUW':'EUW1',
'JP':'JP1',
'KR':'KR',
'LAN':'LA1',
'LAS':'LA2',
'NA':'NA1',
'OCE':'OC1',
'TR':'TR1',
'RU':'RU'
}
return mapping.get(region)
def get_diamonds(page, tier):
'''
Generator for diamond players. Since there's no implementation in the module Cass for diamond (and the # of players is vast), I
created this function. Handle with care not overload the server with thousands of requests.
'''
headers_dict = {"X-Riot-Token": API_KEY}
region_api = str.lower(get_uri_region(REGION))
URL = f"https://{region_api}.api.riotgames.com/lol/league/v4/entries/RANKED_SOLO_5x5/DIAMOND/{tier}?page={page}"
response = requests.get(URL, headers=headers_dict)
players_list = map(lambda x: x.get('summonerId'), response.json())
for player in players_list:
yield player
def get_masters():
'''
Generator for all masters in 'master league'
'''
masters = cass.get_master_league(queue=cass.Queue.ranked_solo_fives)
for master in masters:
yield master
def get_grandmasters():
'''
Generator for all grandmasters in 'grandmaster league'
'''
grandmasters = cass.get_grandmaster_league(queue=cass.Queue.ranked_solo_fives)
for gm in grandmasters:
yield gm
def get_challengers():
'''
Generator for all challengers in 'challenger league'
'''
challengers = cass.get_challenger_league(queue=cass.Queue.ranked_solo_fives)
for challenger in challengers:
yield challenger
def get_participant_info(match):
'''
This function generates a dictionary with the required data from a match if it had a pentakill
'''
pentakiller = None
for participant in match.participants:
if participant.stats.largest_multi_kill >= 5:
pentakiller = {
'summoner':participant.summoner.name,
'match':match.id,
'region':match.region.value,
'champion':participant.champion.name,
'participant':participant,
'participant_id':participant.id,
'items':list(map(lambda x: x.name if x is not None else None, participant.stats.items)),
}
return pentakiller
def get_kills_dict(participant_no, match_id):
'''
This function takes the match that had the kill and the participant that had the pentakill.
It then access the 'frames' of that match's timeline and creates a list of dictionaries of frames events (kills, drops, items built, etc)
Then I only keep the events that had the property 'killerId' (which means it's a kill that a player did, and not a NPC) and
filter only CHAMPION_KILLS (so PvP, and not PvE, for instance).
Then I save into kills_list and return that information
'''
kills_list = []
events = []
match = cass.get_match(match_id)
for frame in match.timeline.frames:
events.extend([x.to_dict() for x in frame.events])
kill_events = [x for x in events if 'killerId' in x]
kills = filter(lambda x: x['killerId']==participant_no and x['type']=='CHAMPION_KILL', kill_events)
kills_list += kills
return kills_list
def get_pentakill(kills_list):
'''
According to LoL wiki, the kills interval must be under 10 seconds until the 4th kill and then 30s (max) in the 5th kill.
That way, I'm looping through all kills and checking if the next 1, 2, 3 and 4 kills are in the time range in relation to
the 0, 1, 2 and 3 kill. The timestamp comes in miliseconds, so I have to multiply by 1000.
When it finds a group of 5 kills that fits the restrictions, breaks out of the loop and returns the first kill.
'''
for i, kill in enumerate(kills_dict):
if all([(kills_dict[i+4]['timestamp'] - kills_dict[i+3]['timestamp'] <= 1000 * 30),
(kills_dict[i+3]['timestamp'] - kills_dict[i+2]['timestamp'] <= 1000 * 10),
(kills_dict[i+2]['timestamp'] - kills_dict[i+1]['timestamp'] <= 1000 * 10),
(kills_dict[i+1]['timestamp'] - kills_dict[i]['timestamp'] <= 1000 * 10)]):
break
return kill
def generate_heatmap(match_id):
'''
Simple function that takes the match_id and saves the heatmap with the match_id in the filename.
'''
match = cass.get_match(match_id)
fig = match.kills_heatmap()
fig.savefig(f"{match_id}_heatmap.png")
setup(API_KEY, REGION)
print('Fetching data for Challengers League:\n')
counter = 0 # I added a counter so we could stop early if we wanted
MATCH_LIST = [] # this match_list is a list where I append all matches that are processed. That way, we can avoid repeated calls for similar matches
PENTAKILLERS_LIST = [] # a list with data from matches that happened to have pentakills
players = get_challengers() # assigned the challengers generator to the variable 'players'
player = next(players, None) # tried to retrieve the next challenger. if the generator is exhausted, this will return None
while player: # loops until the challengers generator is exhausted
counter += 1
print(f"\n{counter}. Evaluating Player: {player.summoner.name}")
matches = get_week_matches(player.summoner)
if not matches:
print(f"No matches in the last 7 days for {player.summoner.name}")
player = next(players, None)
continue
for i, match in enumerate(matches):
print(f"Fetching data for Match {i+1}/{len(matches)}")
if MATCH_LIST.count(match.id):
print("Already fetched this Match")
continue
MATCH_LIST.append(match.id)
pentakillers = get_participant_info(match)
if not pentakillers:
print(f"Match {match.id} did not have any pentakillers...")
continue
print(f"Pentakillers on Match {match.id}: {pentakillers}")
PENTAKILLERS_LIST.append(pentakillers)
print(f"Finished fetching data for Player: {player.summoner.name}")
print('\n--- Waiting 5 seconds to start next Player ---\n') # this is to try to avoig making too many requests and being interrupted
time.sleep(5)
player = next(players, None)
if counter == 50:
break
print("Finished fetching data for Challenger League.\n")
print('Fetching data for GrandMasters League:\n')
counter = 0
players = get_grandmasters() # assigned the grandmasters generator to the variable 'players'
player = next(players, None) # tried to retrieve the next grandmaster. if the generator is exhausted, this will return None
while player: # loops until the challengers generator is exhausted
counter += 1
print(f"\n{counter}. Evaluating Player: {player.summoner.name}")
matches = get_week_matches(player.summoner)
if not matches:
print(f"No matches in the last 7 days for {player.summoner.name}")
player = next(players, None)
continue
for i, match in enumerate(matches):
print(f"Fetching data for Match {i+1}/{len(matches)}")
if MATCH_LIST.count(match.id):
print("Already fetched this Match")
continue
MATCH_LIST.append(match.id)
pentakillers = get_participant_info(match)
if not pentakillers:
print(f"Match {match.id} did not have any pentakillers...")
continue
print(f"Pentakillers on Match {match.id}: {pentakillers}")
PENTAKILLERS_LIST.append(pentakillers)
print(f"Finished fetching data for Player: {player.summoner.name}")
print('\n--- Waiting 5 seconds to start next Player ---\n') # this is to try to avoig making too many requests and being interrupted
time.sleep(5)
player = next(players, None)
if counter == 50:
break
print("Finished fetching data for GrandMaster League.\n")
print('Fetching data for Masters League:\n')
counter = 0
players = get_masters() # assigned the challengers generator to the variable 'players'
player = next(players, None) # tried to retrieve the next master. if the generator is exhausted, this will return None
while player: # loops until the challengers generator is exhausted
counter += 1
print(f"\n{counter}. Evaluating Player: {player.summoner.name}")
matches = get_week_matches(player.summoner)
if not matches:
print(f"No matches in the last 7 days for {player.summoner.name}")
player = next(players, None)
continue
for i, match in enumerate(matches):
print(f"Fetching data for Match {i+1}/{len(matches)}")
if MATCH_LIST.count(match.id):
print("Already fetched this Match")
continue
MATCH_LIST.append(match.id)
pentakillers = get_participant_info(match)
if not pentakillers:
print(f"Match {match.id} did not have any pentakillers...")
continue
print(f"Pentakillers on Match {match.id}: {pentakillers}")
PENTAKILLERS_LIST.append(pentakillers)
print(f"Finished fetching data for Player: {player.summoner.name}")
print('\n--- Waiting 5 seconds to start next Player ---\n') # this is to try to avoig making too many requests and being interrupted
time.sleep(5)
player = next(players, None)
if counter == 50:
break
print("Finished fetching data for Master League.\n")
print('Fetching data for Diamond League:\n')
counter = 0
players = get_diamonds(page=1, tier='I') # assigned the challengers generator to the variable 'players'
player = next(players, None) # tried to retrieve the next diamond. if the generator is exhausted, this will return None
while player: # loops until the challengers generator is exhausted
counter += 1
summoner = cass.get_summoner(id=player)
print(f"\n{counter}. Evaluating Player: {summoner.name}")
matches = get_week_matches(summoner)
if not matches:
print(f"No matches in the last 7 days for {summoner.name}")
player = next(players, None)
continue
for i, match in enumerate(matches):
print(f"Fetching data for Match {i+1}/{len(matches)}")
if MATCH_LIST.count(match.id):
print("Already fetched this Match")
continue
MATCH_LIST.append(match.id)
pentakillers = get_participant_info(match)
if not pentakillers:
print(f"Match {match.id} did not have any pentakillers...")
continue
print(f"Pentakillers on Match {match.id}: {pentakillers}")
PENTAKILLERS_LIST.append(pentakillers)
print(f"Finished fetching data for Player: {summoner.name}")
print('\n--- Waiting 5 seconds to start next Player ---\n') # this is to try to avoig making too many requests and being interrupted
time.sleep(5)
player = next(players, None)
if counter == 50:
break
print("Finished fetching data for Diamond League.\n")
data = []
'''
general printing and returning images for the pentakills
'''
for pentakiller in PENTAKILLERS_LIST:
print(f"Fetching data for Pentakiller '{pentakiller['summoner']}' in Match {pentakiller['match']}:")
print("Generating kills heatmap...",end=' ')
generate_heatmap(pentakiller['match'])
print("Done!")
kills_dict = get_kills_dict(pentakiller['participant_id'], pentakiller['match'])
kill = get_pentakill(kills_dict)
minutes = kill['timestamp']//60000
seconds = int(60*(kill['timestamp']/60000 - minutes))
start_time = f"{minutes:02}:{seconds:02}"
print(f"The Pentakill started at the {start_time} mark, with coordinates {tuple(kill.get('position').values())}.")
print(f"The player finished the game with the following items:\n{pentakiller.get('items')}")
data.append(get_curr_data(pentakiller, kill, start_time))
print('\n')
# exporting datat to a csv file.
pd.DataFrame(data).to_csv('pentakills.csv', index=False, header=True, encoding='utf-8')
| [
"noreply@github.com"
] | noreply@github.com |
824806407a297dd5bce984576799615452155162 | ee6fc02e8392ff780a4f0d1a5789776e4d0b6a29 | /code/practice/abc/abc068/a.py | b3800d499a667ec8af18bf78ae9c85b83569e746 | [] | no_license | mollinaca/ac | e99bb5d5c07159b3ef98cd7067424fa2751c0256 | 2f40dd4333c2b39573b75b45b06ad52cf36d75c3 | refs/heads/master | 2020-12-22T11:02:13.269855 | 2020-09-18T01:02:29 | 2020-09-18T01:02:29 | 236,757,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
print("ABC"+input()) | [
"morisyot@square-enix.com"
] | morisyot@square-enix.com |
4e9439bccd64614f8be9f6d1df393b1b365247a7 | 86059cbbba04e62a1a6b217ea043081d10c55174 | /Tutorial 2/Code/Lambda/lambda2.py | cd103bfd0dd8cfcf1ccfe882d4d4c68f8bb162ff | [] | no_license | AdityaJNair/PythonAssignment | 4d0190a8bf2576fcf7863fea5cd7b195e6060bc5 | 3c0b8cb1d47b29382bc40239fe4735034db1965e | refs/heads/master | 2021-01-20T11:50:55.030343 | 2016-08-18T05:21:20 | 2016-08-18T05:21:20 | 65,005,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | #!/usr/bin/env python3
OPERATION_COUNT = 4
simple_collection_of_lambdas = [None] * OPERATION_COUNT
#set up the lambdas for calculator
def setup_lambdas():
#add
simple_collection_of_lambdas[0] = lambda x, y: x + y
#subtract
simple_collection_of_lambdas[1] = lambda x, y: x - y
#multiply
simple_collection_of_lambdas[2] = lambda x, y: x * y
#divide
simple_collection_of_lambdas[3] = divide
def divide(x, y):
return (x / y)
if __name__ == '__main__':
setup_lambdas()
number_one = int(input("Enter the first number: "))
number_two = int(input("Enter the second number: "))
for x in range(0, OPERATION_COUNT):
print(simple_collection_of_lambdas[x](number_one, number_two))
| [
"adijnair@gmail.com"
] | adijnair@gmail.com |
191e1a055ae6199a5c8835115f3f4c9f0708d3b9 | 68151600bd725c87047020e3f3e10e7b609ef113 | /main/migrations/0013_auto_20200527_2144.py | 55849a1241f1fa2d2e4b1b47eb46e5d3d8f0a01c | [] | no_license | rezendi/scanvine | 8a8bff3dad5342bf257319a35fbc0dd7b322e438 | cc8a41112e2f178617faa42056189d54b0d99785 | refs/heads/master | 2023-07-03T20:39:36.060393 | 2020-11-24T20:15:44 | 2020-11-24T20:15:44 | 257,799,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | # Generated by Django 3.0.5 on 2020-05-27 21:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0012_auto_20200525_2338'),
]
operations = [
migrations.AlterField(
model_name='sharer',
name='category',
field=models.IntegerField(choices=[(-2, 'Personal'), (-1, 'None'), (0, 'Health'), (1, 'Science'), (2, 'Tech'), (3, 'Business'), (4, 'Media')], db_index=True),
),
migrations.CreateModel(
name='FeedShare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('share', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Share')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"jon@rezendi.com"
] | jon@rezendi.com |
4e2cb97de7241505f45b49f05c410dc4676b145b | e94dc3776101d5f3083cb2321ff47f76a269791d | /examples_save/examples_save/medusaCsvProcess.py | 0364397467cc80c76fad33293461c82638994854 | [] | no_license | nanjunda/fc_iscsi_scripts | d004b57c1d5b95eb9a1f196238d887640ecaf96f | 8ea16efb39833daa52223a1fcbd9a8dabe84589e | refs/heads/nanjunda-dev | 2021-09-04T10:11:49.252915 | 2018-01-17T21:38:59 | 2018-01-17T21:38:59 | 112,405,356 | 0 | 0 | null | 2018-01-17T21:35:36 | 2017-11-29T00:26:22 | Python | UTF-8 | Python | false | false | 499 | py | import csv
with open('x.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
next(readCSV)
count = 0
sum4=0.0
sum8=0.0
sum11=0.0
for row in readCSV:
s = str(row[4])
if "Avg" not in s:
count += 1
print(row[4],row[8],row[11])
sum4 += float(row[4])
sum8 += float(row[8])
sum11 += float(row[11])
avg4=sum4/count
avg8=sum8/count
avg11=sum11/count
print (avg4, avg8, avg11)
| [
"noreply@github.com"
] | noreply@github.com |
0d8d56fe358d827f22efe436159e711e93ae5a8c | 87ba7263b2dcffcd952d0dee8dd42a9cc5142286 | /tensforflow_mnist.py | 4568bbd70fd6d989aa360fb55688e845d8d71b85 | [] | no_license | kpodlaski/NeuralNetworks2018 | 40064acf14522229a66333523a3cc7342ce507b4 | 1d143bc51bce94e80eb9e9d6c9b465ef174689ee | refs/heads/master | 2020-04-01T13:39:57.231298 | 2019-01-15T11:02:31 | 2019-01-15T11:02:31 | 153,261,867 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py | import tensorflow as tf
import numpy as np
net = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28,28)),
tf.keras.layers.Dense(30,activation=tf.nn.sigmoid),#tf.nn.relu
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
net.compile(optimizer=tf.train.GradientDescentOptimizer(0.5),
loss='sparse_categorical_crossentropy',#tf.keras.losses.mean_absolute_error , #
metrics=['accuracy']#[tf.keras.metrics.mean_squared_error]
)
mnist = tf.keras.datasets.mnist
(train_inputs, train_labels), (test_inputs, test_labels) = mnist.load_data()
train_inputs, test_inputs = train_inputs / 255.0, test_inputs / 255.0
net.fit(train_inputs, train_labels, epochs=10, batch_size=100)
test_loss, test_acc = net.evaluate(test_inputs, test_labels)
print('Test accuracy:', test_acc)
print('Test loss:', test_loss)
predictions = net.predict(test_inputs)
print("Result : ")
good_response = 0
for elem in range(0,len(test_inputs)):
if np.argmax(predictions[elem]) == test_labels[elem]:
good_response+=1
print(predictions[len(test_inputs)-1])
print(np.argmax(predictions[len(test_inputs)-1]))
print(test_labels[len(test_inputs)-1])
print(good_response/len(test_inputs)*100.0) | [
"podlaski@uni.lodz.pl"
] | podlaski@uni.lodz.pl |
4676da0d782299dd3de0559176956456b31fd9e1 | 5bf1c5acaa09e7c3604a08cadac05fd913401491 | /run_grain_facet_from_params.py | 3b6dbf7a87ba349cac4c67f36cbd37db19a1195d | [] | no_license | gregtucker/mountain_front_model | b836641521c4627e2c6f2267cb7b4449a088d87f | 49ad583c55dc20aaa2bc08729068f2ec39e56f57 | refs/heads/master | 2021-01-17T14:46:53.591850 | 2018-09-24T13:45:04 | 2018-09-24T13:45:04 | 48,451,368 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | # -*- coding: utf-8 -*-
"""
run_grain_facet_from_params.py: demonstrates how to instantiate and run
a GrainFacetSimulator from another Python script, passing parameters via
a dictionary rather than using a separate input file.
A note on time scales, in seconds:
Duration
(sec) (equiv)
--------------
1 s ~ 1 s
10 s ~ 1 min
100 s ~ 1 min
1000 s ~ 1 hr
10,000 s ~ 1 hr
10^5 s ~ 1 day (28 hrs)
10^6 s ~ 1 week (12 days)
10^7 s ~ 3 months
10^8 s ~ 3 years
Created on Sun Jun 26 09:13:46 2016
@author: gtucker
"""
import time
from grain_facet_model import GrainFacetSimulator
params = {
'number_of_node_rows' : 20,
'number_of_node_columns' : 31,
'report_interval' : 5.0,
'run_duration' : 150.0,
'output_interval' : 1000.0,
'plot_interval' : 10.0,
'uplift_interval' : 10.0,
'disturbance_rate' : 0.01,
'weathering_rate' : 0.002,
'friction_coef' : 1.0,
'fault_x' : 8.0,
'cell_width' : 1.0
}
start = time.time()
gridsize = (params['number_of_node_rows'], params['number_of_node_columns'])
gfs = GrainFacetSimulator(gridsize, **params)
gfs.run()
print('Run complete. Run time (sec):')
print(time.time() - start)
| [
"gtucker@colorado.edu"
] | gtucker@colorado.edu |
67ef6df236a7de311ac3d78f352c53cc03be5d79 | e278055ba8efb122e88e6af2bff1e56b207942de | /extractors.py | f48d8f939973ded002832670ca897cbac198a5e1 | [] | no_license | ilay32/wals-project | 3e7f789bda1874909c02c2ea204b66de672f3a7f | 9ff9d16326d4af88655efb952b98a304bb80d028 | refs/heads/master | 2021-04-06T10:59:34.236027 | 2018-03-16T08:40:51 | 2018-03-16T08:40:51 | 83,216,194 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | import re,logging
### globals ####
"""
match first numeric sequence as group(1), and make sure there are no
other numbers after it.
"""
numerizer = re.compile("(^\d+)([^\d]*$)")
"""
(tmp?) fallback for failed numerization.
"""
simplenumerizer = re.compile("(^\d+)")
### extractors ###
def natural(c):
"""
just get the numeric value of the cell
"""
return numerize(c)
def mult2bin(target_value,value):
"""
binarize a multi-valued feature, returning -1 if the value is n,
and 1 otherwise, returns the function that does that
"""
def which(c):
return value if numerize(c) == target_value else -1*value
return which
### helpers ###
def numerize(txt):
"""
if there's no match, it means there is more
than one numeric sequence in the cell, in which
case, print the cell contents so, we can see what's what
"""
m = numerizer.match(txt)
if m:
return int(m.group(1))
else:
logging.warning("can't numerize cell contents: %s",txt)
return int(simplenumerizer.match(txt).group(1))
| [
"silayster@gmail.com"
] | silayster@gmail.com |
866ea041035d1cf1c46a7455c4a1351d919e81e8 | 5b3eb673f6597b90acc98b48852417982924c5d6 | /users/views.py | 1919f9cdb270a67aa66acf8dce15ecc79da27b2b | [] | no_license | ritiksingh8/Buy-It-An-Ecommerce-WebApplication | 3e78d8ca35d167660bdbc5092ddabef41211335c | 6b9fd48e30660fd58ee0b6f3256fdab1b6a9b9a9 | refs/heads/master | 2020-12-01T18:13:46.229547 | 2020-02-16T12:34:15 | 2020-02-16T12:34:15 | 230,723,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,594 | py | from django.shortcuts import render, redirect
from django.contrib import messages
from .forms import UserRegisterForm
from shop.models import Products
from django.contrib.auth.models import User
from .models import Cart
from django.contrib.auth.decorators import login_required
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Account created for {username}!')
return redirect('index')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
@login_required
def new_cart(request,param1,param2):
user=request.user
product=Products.objects.filter(title=param2).first()
cart_row=Cart.objects.filter(user=user).filter(product=product).first()
if param1=='add':
if cart_row is None:
new_cart_row=Cart(user=user,product=product)
new_cart_row.save()
else:
cart_row.quantity=cart_row.quantity+1
cart_row.save()
print("in the else")
elif param1=='remove':
cart_row.quantity=cart_row.quantity-1
cart_row.save()
if cart_row.quantity==0:
cart_row.delete()
if len(Cart.objects.filter(user=user))==0:
empty=True
else:
empty=False
return render(request,'users/cart.html',{'cart_items':Cart.objects.filter(user=user),'add':'add','remove':'remove','empty':empty})
| [
"ritik.singh@spit.ac.in"
] | ritik.singh@spit.ac.in |
601c2feab2272fe4859dece473351049ed440a94 | 9117cee84a90c3c8a93028b5af67799e7ac5a802 | /CCC/CCC '12 S5 - Mouse Journey.py | 8e5e740163672ae2e0b5794ac50f7ba607b0f032 | [] | no_license | Stevan-Zhuang/DMOJ | 7a1fc3b00d553dcbb2e7069d046c4922fdf2286e | b0c7b1e52473e71e3b4a1f15fc34e35c9f5bd92c | refs/heads/master | 2023-06-16T16:34:36.497282 | 2021-07-15T20:09:32 | 2021-07-15T20:09:32 | 292,061,961 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | num_rows, num_cols = [int(data) for data in input().split()]
num_paths = [[0] * num_cols for row in range(num_rows)]
num_paths[0][0] = 1
num_cats = int(input())
for cat in range(num_cats):
row, col = input().split()
row, col = int(row) - 1, int(col) - 1
num_paths[row][col] = -1
for row in range(num_rows):
for col in range(num_cols):
if num_paths[row][col] != -1:
left = num_paths[row][col - 1] if num_paths[row][col - 1] != -1 else 0
up = num_paths[row - 1][col] if num_paths[row - 1][col] != -1 else 0
cur = num_paths[row][col]
num_paths[row][col] = max(left + up, cur)
print(num_paths[-1][-1])
| [
"noreply@github.com"
] | noreply@github.com |
f6d5d30640cd3cfa6990ed9c790c621a34e1b867 | db7aa767430bab121142bf00812751ac6c0da3cc | /bin/2020_day_06.py | f12a032c0798fe86ed0c73d74032f4a5cb698525 | [] | no_license | osterbek/adventofcode | 64f4383e1532987732d1ed9fa673f279c4106bd3 | fdb017f0efdf4fdccc4e41874579e826ec1d02fc | refs/heads/main | 2023-02-05T19:30:25.868594 | 2020-12-25T09:18:18 | 2020-12-25T09:18:18 | 320,618,733 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | from pathlib import Path
if __name__ == '__main__':
content = Path('../input/input_2020_06.txt').read_text().split('\n\n')
dataset = []
for group in range(0, len(content)):
dataset.append(content[group].split('\n'))
questions = [chr(value) for value in range(97, 123)]
solution = [0, 0]
for group in range(0, len(dataset)):
yes = [0 for letter in range(0, len(questions))]
for person in range(0, len(dataset[group])):
for letter in range(0, len(questions)):
if questions[letter] in dataset[group][person]:
yes[letter] += 1
for letter in range(0, len(questions)):
solution[0] += (yes[letter] > 0)
solution[1] += (yes[letter] == len(dataset[group]))
print('Answer part 1 = {:d} '.format(solution[0]))
print('Answer part 2 = {:d} '.format(solution[1]))
| [
"noreply@github.com"
] | noreply@github.com |
1a5134d5029d306a341b613d4b8f710505dd01e7 | 2d18404c29d4031d92d2eea5fdb583d01822824c | /Ecommerce_Project/App_Order/apps.py | 0db55b0dd6104f9a9a0f4c17eed8e851574a5c95 | [] | no_license | ShamimMSAS7/CSE470-Project | 5eb22eb189f51131c27b843f69470c3459579b83 | c0063b42612cead988e5b13c652f18459931a3f9 | refs/heads/main | 2023-08-04T17:00:02.200085 | 2021-09-17T17:33:20 | 2021-09-17T17:33:20 | 402,445,678 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | from django.apps import AppConfig
class AppOrderConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'App_Order'
| [
"shamimmsas7@gmail.com"
] | shamimmsas7@gmail.com |
e8b0c3235cae1f212444dfb8a51751d4dc4ad88f | c0717724c7dc3937252bb4a7bd7c796088db4c5d | /solutions/rijeci.py | f7bb3e0093b8d9f71b50155f01803ba70acde38f | [] | no_license | matthew-cheney/kattis-solutions | 58cd03394ad95e9ca7ffa3de66b69d90647b31ff | d9397ca4715a3ad576046a62bdd6c0fb9542d838 | refs/heads/main | 2023-01-24T12:49:18.871137 | 2020-12-10T04:10:48 | 2020-12-10T04:10:48 | 318,857,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | K = int(input())
A, B = 1, 0
for k in range(K):
A, B = B, A + B
print(A, B) | [
"m.cheney95@outlook.com"
] | m.cheney95@outlook.com |
edb367901d6a40a0fc07e0cb1fbefce67e8619fe | 08b640c667f69b449f02ff6b41d9c6a4bc804947 | /csduarte/ex35/sd2.py | a346aaf891197513b52bd00e419bef3593e7c2f1 | [] | no_license | csduarte/FunPy | db2ad9e60849820a823b0fcf7dd5c15d70ec0c5a | 2a73e975dc6617f1fe11fc5b5ed7243f95bb865d | refs/heads/master | 2021-05-04T11:21:02.010283 | 2016-10-07T00:24:13 | 2016-10-07T00:24:13 | 48,732,399 | 0 | 0 | null | 2016-10-07T00:24:14 | 2015-12-29T06:48:26 | Python | UTF-8 | Python | false | false | 50 | py | # Looks good. Cleared up a few caps and what not.
| [
"csduarte@gmail.com"
] | csduarte@gmail.com |
eccd89a0b9800c91f111c41cba906f0bace5a4ff | f361f4477577716ab3e8796c787157e43b2f9178 | /training/python-training/number_game_2.py | ac3f9357ec9eb22351ea7a4f999f5241704c8e9f | [] | no_license | drewmullen/personal | edba910f5ea3a6311094d2b2047e522e700e5219 | 37995ad786bf1926d02cdb02ac7bb11ead9641ae | refs/heads/master | 2021-09-13T02:48:53.239667 | 2018-04-24T03:15:00 | 2018-04-24T03:15:00 | 83,120,385 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | import random
secret_num = random.randint(1,10)
def main():
counter = 0
while counter < 6:
guess = take_input()
input_eval(guess)
counter+=1
print("Too many guesses, you lose!")
exit()
def take_input():
try:
guess = int(input("Guess a number between 1 and 10: "))
# counter+=1
except ValueError:
print("That's not a number! Try again...")
take_input()
if guess > 10:
print("Thats too high! Try again...")
take_input()
elif guess <= 0:
print("Thats too low! Try again...")
take_input()
else:
return guess
def input_eval(guess):
if guess == secret_num:
print("You got it! The number was {}.".format(secret_num))
exit()
#It took you {} guesses".format(secret_num, counter))
else:
print("That's not it! Try again...")
main()
| [
"drew.mullen@ge.com"
] | drew.mullen@ge.com |
e957134d472c84d28c43e089b40cdb76ab62d681 | 64314270bfa5d14d13944ccf9d1fb15085a1ca68 | /Problema 1/naive_bayes/naive_bayes.py | 3a8f4ac2c700be49419fb56625b482b91b6f575c | [
"MIT"
] | permissive | jarvis-fga/Projetos | bfef5becc31e15850410194a847a3e4396dd31a3 | 59b89527e8b9ddb4caf353eb85b229ec27eae2a3 | refs/heads/master | 2021-01-19T13:41:15.855101 | 2017-11-28T17:12:24 | 2017-11-28T17:12:24 | 100,855,314 | 2 | 1 | null | 2017-11-28T17:12:24 | 2017-08-20T10:54:56 | Jupyter Notebook | UTF-8 | Python | false | false | 974 | py | import csv
def carregar_acessos(arquivo_nome):
dados = []
marcacoes = []
arquivo = open(arquivo_nome, 'rb')
leitor = csv.reader(arquivo)
leitor.next()
for P1,P2,P3,P4,P5,P6,P7,P8,P9,P10,P11,P12,P13,Origem in leitor:
dados.append([float(P1), float(P2), float(P3), float(P4), float(P5), float(P6), float(P7), float(P8), float(P9), float(P10), float(P11), float(P12), float(P13)])
marcacoes.append(Origem)
return dados, marcacoes
def taxa_acerto(resultado, gabarito):
i=0
acertos=0
for r in resultado:
if r == gabarito[i]:
acertos=acertos+1
taxa = 100.0*acertos/len(resultado)
return taxa
dados, marcacoes = carregar_acessos('dados_tratados.csv')
teste, marcacoes_teste = carregar_acessos('dados_teste.csv')
from sklearn.naive_bayes import MultinomialNB
modelo = MultinomialNB()
modelo.fit(dados, marcacoes)
resultado1 = modelo.predict(teste)
taxa_final = taxa_acerto(resultado1, marcacoes_teste)
print("Taxa de acerto em % :")
print(taxa_final)
| [
"lucasandradeunb@gmail.com"
] | lucasandradeunb@gmail.com |
52f7b8091977541e4b8412d8743831f8ae5c963c | 28c69e41de8054279d6b8ebf2788693c1dfde159 | /33 雷家硕 上海/第一次作业/第二节课之熟悉embedding.py | 3aeb07dacb8bdd2d6d78da08db406710abad0ac9 | [] | no_license | edmlover/badouai-tujiban | 45260aad27da78c6c34c991f06568fe2adb7c324 | 088080ea1f617e0ce64655f8389288101e277702 | refs/heads/main | 2023-08-15T11:33:57.830821 | 2021-10-16T14:25:31 | 2021-10-16T14:25:31 | 409,547,422 | 0 | 2 | null | 2021-09-23T10:33:02 | 2021-09-23T10:33:01 | null | UTF-8 | Python | false | false | 422 | py | import torch
import torch.nn as nn
num_embeddings = 6#字符集的大小
embedding_dim = 3#每个字符向量化后的维度
embedding_layer = nn.Embedding(num_embeddings, embedding_dim)#指定生成的随机初始化的矩阵的行列
print(embedding_layer.weight,"随机初始化矩阵")
x = torch.LongTensor([1,2,5])#给定字符编号,也就是输入
embedding_out = embedding_layer(x)
print(embedding_out)
| [
"noreply@github.com"
] | noreply@github.com |
b1c18147b5e2ae4f5dac680af85aad00eeae7872 | 1d8b108cb5720917c2de0b87f58db40349b82c3d | /get_data_gov_sg_met.py | 06904d7660f5904214d6e6b1282f0d4256dbab09 | [
"MIT"
] | permissive | tangshiyuan/access-data-gov-sg | baff5b3124d2b17bc7b3006fbdbf26d0d15f8ec3 | a5b013475162c6ea2deccf42e4ab9d0ea2d97dd5 | refs/heads/master | 2020-03-11T14:31:45.266000 | 2018-04-23T14:09:42 | 2018-04-23T14:09:42 | 130,057,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,666 | py | #!/usr/bin/env python3
"""
get_data_gov_sg_met.py:
Download meteorological station data (and/or air quality data) for a specific month via the
data.gov.sg APIs.
API key requirement:
In order to use this script, an API key needs to be obtained via
https://developers.data.gov.sg.
Usage:
To download a specific month and variable, specify the month (e.g. 2017_02) and variable, e.g.:
./get_data_gov_sg_met.py 2017_02 rainfall
To download data for all variables in a specific month, specify just the month:
./get_data_gov_sg_met.py 2017_02
To download data for all variables from last month, just run the script with no command-line
arguments:
./get_data_gov_sg_met.py
Output files:
Gzipped CSV files, corresponding to different variables, will be saved in data_gov_sg_met_v1/
The file naming convention is as follows:
data_gov_sg_met_v1/<variable>_<yyyy-mm>_c<today>.csv.gz
where <today> is the date on which the file was created.
For example,
data_gov_sg_met_v1/wind-speed_2017-02_c20170526.csv.gz
Information about input data:
For information about the input data used to derive the output CSV files, please see
https://developers.data.gov.sg, https://data.gov.sg/open-data-licence, and
https://data.gov.sg/api-terms.
Author:
Benjamin S. Grandey, 2017
"""
import calendar
import os
import pandas as pd
import requests
import sys
import time
# Get my API keys
from my_api_keys import my_api_dict
# Note: this module, containing my API keys, will not be shared via GitHub
# You can obtain your own API key(s) by registering at https://developers.data.gov.sg
my_key = my_api_dict['data.gov.sg'] # API key for data.gov.sg
# Output directory
here = os.path.abspath(os.path.dirname(__file__))
data_dir = os.path.join(here, 'data_gov_sg_met_v1')
# If directory does not exist, create it
if not os.path.exists(data_dir):
os.makedirs(data_dir)
print('Created {}'.format(data_dir))
def retrieve_data_via_api(variable, dt, n_attempts=10):
"""
Function to attempt to retrieve data for a specific datetime.
Args:
variable: string of variable name used by API (e.g. 'rainfall')
dt: pd.datetime, corresponding to 'date_time' in the API
n_attempts: number of attempts to retry if API connection fails
Returns:
pd.DataFrame containing data (if successful), or None
"""
try:
# Try to connect to API
r = requests.get('https://api.data.gov.sg/v1/environment/{}'.format(variable),
headers={'api-key': my_key},
params={'date_time': dt.strftime('%Y-%m-%dT%H:%M:%S')},
timeout=30)
if r.status_code == 200:
# If API connection was successful, load data into DataFrame, unless no data present
if len(r.json()['items'][0]['readings']) >= 1:
result = pd.DataFrame(r.json()['items'][0]['readings'])
if variable == 'pm25': # necessary due to diff in pm25 API return format
result = result.reset_index()
result = result.rename(columns={'index': 'region'})
result['timestamp_sgt'] = pd.to_datetime(r.json()['items'][0]['timestamp']
.split('+')[0])
else:
result = None
else:
# If API query failed, sleep 10s, then retry recursively (up to n_attempts)
if n_attempts > 1:
print(' dt = {}, r.status_code = {}, (n_attempts-1) = {}. '
'Retrying in 10s.'.format(dt, r.status_code, (n_attempts-1)))
time.sleep(10)
result = retrieve_data_via_api(variable, dt, n_attempts=(n_attempts-1))
else:
print(' dt = {}, r.status_code = {}, (n_attempts-1) = {}. '
'FAILED TO RETRIEVE DATA.'.format(dt, r.status_code, (n_attempts-1)))
result = None
r.close()
except (requests.exceptions.SSLError, requests.exceptions.ConnectionError,
requests.exceptions.ConnectTimeout, requests.exceptions.ReadTimeout):
# If connection failed, sleep 10s, then retry recursively (up to n_attempts)
if n_attempts > 1:
print(' dt = {}, error = {}, (n_attempts-1) = {}. '
'Retrying in 10s.'.format(dt, sys.exc_info()[0], (n_attempts-1)))
time.sleep(10)
result = retrieve_data_via_api(variable, dt, n_attempts=(n_attempts-1))
else:
print(' dt = {}, error = {}, (n_attempts-1) = {}. '
'FAILED TO CONNECT.'.format(dt, sys.exc_info()[0], (n_attempts-1)))
result = None
except KeyError:
# KeyError is encountered, sleep 10s, then retry once only
if n_attempts > 1:
print(' dt = {}, error = {}, (n_attempts-1) = {}. '
'Retrying ONCE in 10s.'.format(dt, sys.exc_info()[0], (n_attempts-1)))
time.sleep(10)
result = retrieve_data_via_api(variable, dt, n_attempts=1)
else:
print(' dt = {}, error = {}, (n_attempts-1) = {}. '
'FAILED TO RETRIEVE DATA.'.format(dt, sys.exc_info()[0], (n_attempts - 1)))
result = None
return result
def download_month(variable, yyyy, mm):
"""
Function to attempt to retrieve data for a specific month.
Args:
variable: string of variable name used by API (e.g. 'rainfall')
yyyy: string containing year (e.g. '2017')
mm: string containing month (e.g. '05')
Output file:
CSV file:
data_gov_sg_met_v1/<variable>_<yyyy-mm>_c<today>.csv
where <today> is today's date.
"""
print('variable = {}, yyyy = {}, mm = {}'.format(variable, yyyy, mm))
# Number of days in month
ndays = calendar.monthrange(int(yyyy), int(mm))[1] # supports leap years
# Time interval dependent on variable
if variable == 'rainfall':
freq = '5 min'
periods = (ndays * 24 * 12) + 1
elif variable == 'pm25':
freq = '60 min'
periods = (ndays * 24 * 1) + 1
else:
freq = '1 min'
periods = (ndays * 24 * 60) + 1
# Datetime range to search through
datetime_range = pd.date_range('{}-{}-01 00:00:00'.format(yyyy, mm),
periods=periods, freq=freq)
# Loop over datetimes
for dt, i in zip(datetime_range, range(len(datetime_range))):
# Attempt to retrieve data via API
temp_df = retrieve_data_via_api(variable, dt)
# If data available and timestamp indicates correct month, then append to DataFrame df
if temp_df is not None:
if temp_df['timestamp_sgt'].loc[0].month == int(mm): # querying 00:00 on 1st day may
try: # may return 23:59 on prev. day
df = df.append(temp_df, ignore_index=True)
except UnboundLocalError: # 1st time, initialise df
df = temp_df
# Indicate progress
perc = i / periods * 100 # percentage progress
print(' {:000.1f}%'.format(perc), end='\r', flush=True)
print() # start new line
# Print summary of number of records
print(' {} records'.format(len(df)))
# Remove duplicates
df = df.drop_duplicates()
print(' {} records after removing duplicates'.format(len(df)))
# Save DataFrame to CSV file
out_filename = '{}/{}_{}_{}_c{}.csv.gz'.format(data_dir, variable, yyyy, mm,
pd.datetime.today().strftime('%Y%m%d'))
df.to_csv(out_filename, index=False, compression='gzip')
print(' Written {}'.format(out_filename))
return 0
if __name__ == '__main__':
# Year and month to get data for
try:
yyyy, mm = sys.argv[1].split('_') # if specified via command-line
except IndexError: # otherwise get data for last month
month_ago = (pd.datetime.today() - pd.Timedelta(1, 'M')) # ~1 month ago (not exact)
yyyy, mm = month_ago.strftime('%Y_%m').split('_')
# Variable(s) to get data for
try:
variables = [sys.argv[2], ] # if specified via command-line
except IndexError: # otherwise get data for all variables
variables = ['rainfall', 'wind-speed', 'wind-direction', 'air-temperature',
'relative-humidity', 'pm25']
# Loop over variables
for variable in variables:
download_month(variable, yyyy, mm)
| [
"benjamin.grandey@gmail.com"
] | benjamin.grandey@gmail.com |
545c14b1b076d85bfb13ffa68e1b7ccb88aab197 | 8ec32cd65ba298a194887566ba9b4d0f8cd18893 | /moviedatabase.py | ea0bf255e32330d3eab089ee0229fca0cc70aa02 | [] | no_license | yvonnebutselaar/data-driven-design | b82f4ec2ab6efbf80852cd654e9e69b337af44ef | 7a723fb57d04eb64a15a5521cd4d87324599ebb7 | refs/heads/master | 2020-03-29T15:49:43.483908 | 2019-01-17T19:41:11 | 2019-01-17T19:41:11 | 150,082,676 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | import json
with open("movies.json") as f:
movies = json.load(f)
ayear = input("What year do you want to see?")
for items in movies:
if str(items["year"]) == ayear:
print(f'{items["title"]} is a movie from {items["year"]}')
f.close() | [
"yvonnebutselaar@gmail.com"
] | yvonnebutselaar@gmail.com |
3e63ae681a9a108917e4e8eb68534010225514bd | cfc804a95325bba903300bdc0984d57316e20861 | /ID Scanner/yolo/frontend.py | 83633e175c23fcf9d88e6e36a170c019421bf846 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Haomin-Yu/Smart-Bartender | 3408d849779d3cec56a8dfb4822790eb5b39afc0 | 52e9ff7e7ab8ffc13e248f20cebf110ed9897a5d | refs/heads/master | 2022-07-22T18:02:04.083038 | 2019-12-10T19:56:33 | 2019-12-10T19:56:33 | 213,276,187 | 1 | 0 | MIT | 2022-07-06T20:25:19 | 2019-10-07T02:00:31 | Jupyter Notebook | UTF-8 | Python | false | false | 6,181 | py | # -*- coding: utf-8 -*-
# This module is responsible for communicating with the outside of the yolo package.
# Outside the package, someone can use yolo detector accessing with this module.
import os
import numpy as np
from yolo.backend.decoder import YoloDecoder
from yolo.backend.loss import YoloLoss
from yolo.backend.network import create_yolo_network
from yolo.backend.batch_gen import create_batch_generator
from yolo.backend.utils.fit import train
from yolo.backend.utils.annotation import get_train_annotations, get_unique_labels
from yolo.backend.utils.box import to_minmax
def get_object_labels(ann_directory):
files = os.listdir(ann_directory)
files = [os.path.join(ann_directory, fname) for fname in files]
return get_unique_labels(files)
def create_yolo(architecture,
labels,
input_size = 416,
anchors = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828],
coord_scale=1.0,
class_scale=1.0,
object_scale=5.0,
no_object_scale=1.0):
n_classes = len(labels)
n_boxes = int(len(anchors)/2)
yolo_network = create_yolo_network(architecture, input_size, n_classes, n_boxes)
yolo_loss = YoloLoss(yolo_network.get_grid_size(),
n_classes,
anchors,
coord_scale,
class_scale,
object_scale,
no_object_scale)
yolo_decoder = YoloDecoder(anchors)
yolo = YOLO(yolo_network, yolo_loss, yolo_decoder, labels, input_size)
return yolo
class YOLO(object):
def __init__(self,
yolo_network,
yolo_loss,
yolo_decoder,
labels,
input_size = 416):
"""
# Args
feature_extractor : BaseFeatureExtractor instance
"""
self._yolo_network = yolo_network
self._yolo_loss = yolo_loss
self._yolo_decoder = yolo_decoder
self._labels = labels
# Batch를 생성할 때만 사용한다.
self._input_size = input_size
def load_weights(self, weight_path, by_name=False):
if os.path.exists(weight_path):
print("Loading pre-trained weights in", weight_path)
self._yolo_network.load_weights(weight_path, by_name=by_name)
else:
print("Fail to load pre-trained weights. Make sure weight file path.")
def predict(self, image, threshold=0.3):
"""
# Args
image : 3d-array (BGR ordered)
# Returns
boxes : array, shape of (N, 4)
probs : array, shape of (N, nb_classes)
"""
def _to_original_scale(boxes):
height, width = image.shape[:2]
minmax_boxes = to_minmax(boxes)
minmax_boxes[:,0] *= width
minmax_boxes[:,2] *= width
minmax_boxes[:,1] *= height
minmax_boxes[:,3] *= height
return minmax_boxes.astype(np.int)
netout = self._yolo_network.forward(image)
boxes, probs = self._yolo_decoder.run(netout, threshold)
if len(boxes) > 0:
boxes = _to_original_scale(boxes)
return boxes, probs
else:
return [], []
def train(self,
img_folder,
ann_folder,
nb_epoch,
saved_weights_name,
batch_size=8,
jitter=True,
learning_rate=1e-4,
train_times=1,
valid_times=1,
valid_img_folder="",
valid_ann_folder="",
first_trainable_layer=None,
is_only_detect=False):
# 1. get annotations
train_annotations, valid_annotations = get_train_annotations(self._labels,
img_folder,
ann_folder,
valid_img_folder,
valid_ann_folder,
is_only_detect)
# 1. get batch generator
train_batch_generator = self._get_batch_generator(train_annotations, batch_size, train_times, jitter=jitter)
valid_batch_generator = self._get_batch_generator(valid_annotations, batch_size, valid_times, jitter=False)
# 2. To train model get keras model instance & loss fucntion
model = self._yolo_network.get_model(first_trainable_layer)
loss = self._get_loss_func(batch_size)
# 3. Run training loop
train(model,
loss,
train_batch_generator,
valid_batch_generator,
learning_rate = learning_rate,
nb_epoch = nb_epoch,
saved_weights_name = saved_weights_name)
def _get_loss_func(self, batch_size):
return self._yolo_loss.custom_loss(batch_size)
def _get_batch_generator(self, annotations, batch_size, repeat_times=1, jitter=True):
"""
# Args
annotations : Annotations instance
batch_size : int
jitter : bool
# Returns
batch_generator : BatchGenerator instance
"""
batch_generator = create_batch_generator(annotations,
self._input_size,
self._yolo_network.get_grid_size(),
batch_size,
self._yolo_loss.anchors,
repeat_times,
jitter=jitter,
norm=self._yolo_network.get_normalize_func())
return batch_generator
| [
"xinyug2@uw.edu"
] | xinyug2@uw.edu |
d5c8cacfbea754c24822fed7168c9ea69133b51e | 56fe5ad50f01e347182f75b984e97f5b7ac4d647 | /security_in_memory_db.py | b3c8dd1f3784fda121e1d4c45d15b0505afd46e2 | [] | no_license | leenjiru/Sonia_books | e6a18033155e808339b6786018c3bdaca99fcf72 | 3cb752160eb789d9155482701fd581eb4aa8d170 | refs/heads/master | 2023-03-24T11:37:41.607157 | 2020-07-22T23:38:17 | 2020-07-22T23:38:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | # this applies when using an in memory db
from models.users import UserModel
from werkzeug.security import safe_str_cmp
# users = [
# {
# 'id': 1,
# 'username': 'Nick',
# 'Password': 'password'
# }
# ]
users = [
UserModel(1, 'Nicky', 'password1')
]
username_mapping = {u.username: u for u in users}
user_id_mapping = {u.id: u for u in users}
# user_id_mapping = {
# 1: {
# 'id': 1,
# 'username': 'Nick',
# 'Password': 'password'
# }
# }
def authenticate(username, password):
user = username_mapping.get(username, None)
if user and safe_str_cmp(user.password, password):
return user
def identity(payload):
user_id = payload['identity']
return user_id_mapping.get(user_id, None)
| [
"www.leenick@gmail.com"
] | www.leenick@gmail.com |
adef9bc871758731eed912cc5563b6f355987ed3 | ec03d9949eb202c9de6c163566d7edffd583db93 | /dags/hello_world.py | 515b4e25dbc49b07eb81865d92302d7164e7deff | [] | no_license | tomatohope/airflow | 770ba89dd68259566969259c08bb63071d24248a | 8cbb9f94edf10fb66bddcd522fa71c8a186b4e6d | refs/heads/master | 2021-07-21T03:26:34.251577 | 2021-01-08T09:31:44 | 2021-01-08T09:31:44 | 231,941,543 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,614 | py | # coding: utf-8
import os
import airflow
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from datetime import datetime as datetime1, timedelta
import datetime as datetime2
# interval time: 与 schedule_interval=timedelta(days=1), 一致
dt = datetime1.now() - datetime2.timedelta(days=1)
airflow_home = '/root/airflow'
os.environ['airflow_home'] = str(airflow_home)
# default_args
default_args = {
'owner': 'user1',
'depends_on_past': False,
# start time: year month day hour minutes seconds
'start_date': datetime1(dt.year, dt.month, dt.day, 10, 2, 0),
'email': ['user1@xxx.com', 'user2@xxx.com'],
'email_on_failure': True,
'email_on_retry': True,
'retries': 0,
'retry_delay': timedelta(seconds=5)
}
# define DAG
dag = DAG(
# display DAG name
dag_id='test',
default_args=default_args,
# interval time: 与 dt = datetime1.now() - datetime2.timedelta(days=1) 一致
schedule_interval=timedelta(days=1),
# 禁用回补 禁止执行过期任务
catchup=False
)
def hello_world_1(ds, **kwargs):
print("hello_world1")
with open("/tmp/a", "at") as f:
f.write("hello----word" + "\n")
# 可以在函数中使用assert断言来判断执行是否正常,也可以直接抛出异常
assert 1 == 1
def hello_world_2(ds, **kwargs):
ret = os.system("python $airflow_home/python_file/print.py")
# 执行状态返回值判断
if ret != 0:
os._exit(-1)
print("Continued....")
# task 1
t1 = PythonOperator(
task_id='hello_world_1',
# 指定要执行的函数
python_callable=hello_world_1,
# 指定归属的dag
provide_context=True,
dag=dag,
retries=0,
)
# task 2
t2 = PythonOperator(
task_id='hello_world_2',
python_callable=hello_world_2,
provide_context=True,
dag=dag,
)
# task plan
#t2.set_upstream(t1) # t2依赖于t1; 等价于 t1.set_downstream(t2);同时等价于 dag.set_dependency('hello_world_1', 'hello_world_2')
# 表示t2这个任务只有在t1这个任务执行成功时才执行
# t1 ##only t1
# t1 >> t2 ## t1 first success && t2
t1 >> t2
# airflow.note
# http://note.youdao.com/noteshare?id=bb4888b561b3468e732361de74c7794e&sub=FD605AE047F04575A92C1DF2BCF9E7EA
# exec time
###############################################
# start_date + schedule_interval
# https://www.jianshu.com/p/5aa4447f48ea
#
# # start_date
#
# if now time ago:
# real
# start
# time: now
# time + schedule_interval
#
# # schedule_interval
# if cron:
# not now
# time: now
# time + schedule_interval | [
"hope.gong@jingdigital.com"
] | hope.gong@jingdigital.com |
c4a735ef7e38d09ec514e43a3557b1d581e8db67 | b3e39afef4ddac88f1391964995c2eefd02818e7 | /CantUseFour.py | e114529ae5c0e7df55d188f30bf5f8355f1ac7c3 | [] | no_license | ommmishra/randomPrograms | 44628e3c4b381e14b8d908342dcb9b7a854284c2 | f52f3b0b485b310b9d5429667fba9422b6af045c | refs/heads/master | 2022-02-25T08:12:25.656096 | 2022-01-19T15:08:14 | 2022-01-19T15:08:14 | 179,322,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | n = "44523432342343243234237"
x = n.replace("4","3")
z = int(n) - int(x)
print(x)
print(z) | [
"ommmishra830@gmail.com"
] | ommmishra830@gmail.com |
4e8a125a7458dd004507e648e9417922ad85affe | 14d7f5f83b6f84871ff6ebfa0af4c17b7115a33f | /eco_models/mpb/integration_stop.py | f391a20c2a14bae90e14d4ebe8bd5777a3fa7d32 | [] | no_license | tonychangmsu/Python_Scripts | 8ca7bc841c94dcab36743bce190357ac2b1698a5 | 036f498b1fc68953d90aac15f0a5ea2f2f72423b | refs/heads/master | 2016-09-11T14:32:17.133399 | 2016-03-28T16:34:40 | 2016-03-28T16:34:40 | 10,370,475 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | # Title: integration_stop.py
# Author: Tony Chang
# Date: 10.26.2015
# Abstract: Attempt to find a solution to determining where the cumulative sum (numerical integration), of a array of
2D matricies sum up to one (find the index)
import numpy as np
#first suppose we have a 3D matrix of values under 1
G = np.random.uniform(0,.05, (365,500,400))
#now develop a cumulative sum for each step
integral_G = np.cumsum(G, axis =0)
#now find out the index of the first axis where the value is equal to one.
index = np.argmax(integral_G>1, axis = 0)
#if any of these equals to 0 then we have a development that didn't complete, and we have a problem
#need more time to finish (i.e. more years to inspect).
#done!
| [
"tony.chang@msu.montana.edu"
] | tony.chang@msu.montana.edu |
986d5bf4bc52b34aa41124711f6c80c7e1957253 | 4538728d33a8700e5bec08ec0d7b05480e042902 | /utils/send_email.py | 7f3a23ccb391e13913a08307e567cd922bcea390 | [] | no_license | Faouzizi/Alerting-System | 5b841a7374da0012bc0a9a8111d8925139ce89a7 | 98fdf263efb0f3e007bf666fb355ec3cede44076 | refs/heads/main | 2023-01-19T02:00:19.773848 | 2020-11-26T01:11:01 | 2020-11-26T01:11:01 | 314,676,909 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py | #############################################################################
########### Import python packages
#############################################################################
import smtplib
import config
from utils.get_templates import get_html_template
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
#############################################################################
########### Send the email
#############################################################################
def send_alerting_email(alert_message,type_alert):
# get the email
message_template = get_html_template(type_alert)
#connect to the SMTP server
s = smtplib.SMTP(host='smtp.gmail.com', port=587)
s.starttls()
s.login(config.smtp_email, config.smtp_password)
# Send the email for each email on the recipient list
for email in config.recipient_list:
msg = MIMEMultipart() # create a message
# add in the actual person name to the message template
message = message_template.substitute()
# setup the parameters of the message
msg['From']=config.smtp_email
msg['To']=email
msg['Subject'] = alert_message
# add in the message body
msg.attach(MIMEText(message, 'html'))
# send the message via the server set up earlier.
s.send_message(msg)
del msg
# Terminate the SMTP session and close the connection
s.quit()
return('email sent :)')
| [
"noreply@github.com"
] | noreply@github.com |
25dd87758892b414426ec0e9c48e05fb4ac4a527 | a4a44ad46cd1306e2da72ff89483b0102fc9787d | /SamplePython/Developer Tool/fab_fabric/pengenalan_dr_web/11_settings.py | 8a018fc9d73ccd0692dcf6cf3d3a40dad2777d35 | [] | no_license | okipriyadi/NewSamplePython | 640eb3754de98e6276f0aa1dcf849ecea22d26b1 | e12aeb37e88ffbd16881a20a3c37cd835b7387d0 | refs/heads/master | 2020-05-22T01:15:17.427350 | 2017-02-21T04:47:08 | 2017-02-21T04:47:08 | 30,009,299 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | """
settings (fabric.context_managers.settings)
When you need to temporarily (i.e. for a certain command chain),
you can use the settings statement (i.e. override env values).
Usage examples:
"""
from fabric.api import settings, sudo
# Perform actions using a different *user*
with settings(user="avionics"):
sudo("cmd")
| [
"oki.priyadi@pacificavionics.net"
] | oki.priyadi@pacificavionics.net |
ad5ff59ea22ba3812b95850738f8c40ca4e9036d | d19a463c154da8e66330450d8462d1c4d6de3cc1 | /server/data/sp500.py | c725d6384d1d1d94739c2f624948942607e13ade | [] | no_license | nhatduy227/StockApp | 7209ede008205de1f8b2997ed2d8d8677bad43ea | 5a2dc2153d92eb8afba1dfd4b61b6a849f237b9b | refs/heads/master | 2023-07-14T06:48:02.937346 | 2021-09-02T18:22:13 | 2021-09-02T18:22:13 | 390,561,887 | 3 | 1 | null | 2021-08-29T23:35:36 | 2021-07-29T01:33:13 | JavaScript | UTF-8 | Python | false | false | 1,728 | py | '''S&P 500 Stock Reader
The script reads in all companines in the Standard & Poor Index, provided by Wikipedia.
It then grabs the historical stock price of each ticker and saves them in PyStore. Then,
continuously adds in end-of-day stocks daily.
'''
import pandas as pd
import yfinance as yf
from sqlalchemy import create_engine
import psycopg2
import io
def read_in_sp500(
URL: str = 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies',
) -> pd.DataFrame:
'''Read in all of Standard & Poor's 500 largest companies
Args:
URL (str): a Wikipedia url holding a data table of all S&P 500 companies
Returns:
pd.DataFrame: a data frame with info on all S&P 500 companies
'''
sp500_df = pd.read_html(URL)[0]
sp500_df['Symbol'] = sp500_df['Symbol'].str.replace('.', '-', regex=True)
return sp500_df
# ----------------- Run only once -------------------
def get_stock_data(
interval: str = '1d',
) -> pd.DataFrame:
'''Retrieves all previous day-by-day stock prices in the S&P500
Note: This function should only run once
'''
sp500_tickers = read_in_sp500()['Symbol']
# Batch calls all stock tickers
sp500_df = yf.download(
tickers=sp500_tickers.to_list(),
interval=interval,
)
sp500_df = sp500_df.stack().reset_index().rename(
columns={'level_1': 'Ticker'})
sp500_df.columns = sp500_df.columns.str.lower().str.replace(' ', '_')
return sp500_df
# ---------------------------------------------------
if __name__ == '__main__':
db_url = 'postgresql://postgres:password@localhost:5432/stocks'
engine = create_engine(db_url)
df = get_stock_data()
df.to_sql('stock_data', engine)
| [
"aowangphilly@gmail.com"
] | aowangphilly@gmail.com |
e8a67b91c6b02523eb9741b4118764bca75190c1 | cbb29f7a30b5de0dc2cb421edc259d6ce2586279 | /tests/fixtures/readme_book.py | a9e759336c311c41d1228ce0baafcb0cb304adf2 | [] | no_license | Madoshakalaka/python-typing-to-typescript | a4ebd58ff22f85fe1d2affcd32390ae5e8f19104 | d5fd19b7f804a5e95f8b62a4d208c98d5b830593 | refs/heads/master | 2023-02-02T05:55:23.095977 | 2020-12-22T22:38:23 | 2020-12-22T22:38:23 | 321,219,843 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | from typing import TypedDict
class Book(TypedDict):
pages: int
chapters: List[Chapter]
Authors: List[str]
class Chapter(TypedDict):
title: str
# short chapters only has one paragraph
content: Union[str, List[Paragraph]]
class Paragraph(TypedDict):
content: str | [
"syan4@ualberta.ca"
] | syan4@ualberta.ca |
64940b59557a57f6050239f90c6e4c8057f3ca09 | 7becb767c5536f450d9aa83821c2a62a0b66175a | /admintools/round_lib/get_round_id.py | 90fc6b42835245bf12461b4b1a8a8f3cf9a678a8 | [] | no_license | urirot/planetWarsRunner | af69837cc8f83902505bbe1f4628aaee7476348a | 76e0458115ebbe581c104d569ad17899dae78fb3 | refs/heads/master | 2021-01-01T17:55:33.012912 | 2018-07-04T19:06:04 | 2018-07-04T19:06:04 | 98,202,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | #! /usr/bin/python
from urllib import urlopen
import json
import sys
from config import *
if len(sys.argv) != 3:
print "Usage: ./get_round_id.py <tournament id> <round number>"
sys.exit(2)
tourn_id = sys.argv[1]
round_number = sys.argv[2]
url = HOST + "tournaments/%s/round_by_name?name=%s" % (tourn_id, round_number)
result = urlopen(url).read()
if not result:
sys.stderr.write("Can't find this round (round number = %s). Are you sure you created it?\n" % round_number)
sys.exit(1)
round_id = str(json.loads(result)["id"])
print round_id
| [
"gazolin@gmail.com"
] | gazolin@gmail.com |
9ae067e5cd4eccc2e3a324cc2e07669caccf8637 | 6630694f401f6f475dd81bb01ff9368db844ccff | /configs/_base_/models/hrnet/hrnet-w48.py | f0604958481ba2af277e3a0f9515dc1423def6c6 | [
"Apache-2.0"
] | permissive | open-mmlab/mmpretrain | 98a4d6b3bb747efc3d50decebf84fc3ffa41076a | d2ccc44a2c8e5d49bb26187aff42f2abc90aee28 | refs/heads/main | 2023-08-30T19:11:24.771498 | 2023-08-23T02:45:18 | 2023-08-23T02:45:18 | 278,415,292 | 652 | 186 | Apache-2.0 | 2023-09-08T08:01:40 | 2020-07-09T16:25:04 | Python | UTF-8 | Python | false | false | 418 | py | # model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HRNet', arch='w48'),
neck=[
dict(type='HRFuseScales', in_channels=(48, 96, 192, 384)),
dict(type='GlobalAveragePooling'),
],
head=dict(
type='LinearClsHead',
in_channels=2048,
num_classes=1000,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
| [
"noreply@github.com"
] | noreply@github.com |
170f4291b543e014fadf954a0e8b37173c22f52f | 965e1e205bf053d93b32be0dab4d45455b42b3a2 | /NLP/PartsSpeech.py | 29aa35ab37a1d1ca416e2d528400a686da8f4ba4 | [] | no_license | panditdandgule/DataScience | 9e58867dd960ec554e0bbb8e4ce93baa226ab927 | 3eb59c129d81a6ba6b45e24113e25e63d19c60cb | refs/heads/master | 2021-07-22T21:44:12.700518 | 2020-05-14T12:01:05 | 2020-05-14T12:01:05 | 166,497,260 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 15 19:59:50 2018
@author: pandit
"""
import nltk
from nltk.corpus import state_union
from nltk.tokenize import PunktSentenceTokenizer
train_text=state_union.raw("2005-GWBush.txt")
sample_text=state_union.raw("2005-GWBush.txt")
custom_sent_tokenizer=PunktSentenceTokenizer(train_text)
tokenized =custom_sent_tokenizer.tokenize(sample_text)
def process_content():
try:
for i in tokenized:
words=nltk.word_tokenize(i)
tagged=nltk.pos_tag(words)
print(tagged)
except Exception as e:
print(str(e))
process_content()
| [
"panditdandgule777@gmail.com"
] | panditdandgule777@gmail.com |
d4a278c814384d490f690a077bab77a109b60b57 | 0ad79e7104500b5988f07e9f19f17a540f07605a | /Python算法指南/动态规划/67_最小和子数组_灵活运用动态规划.py | 3273d8c3606054f4d873463057975d507015c93a | [] | no_license | tonyyo/PycharmProjects | f0ce458ed662e33e75ddffbfcf28b0d1ed638743 | a28620923336c352103858e0ccfc4117d1c4ea01 | refs/heads/master | 2022-09-19T02:02:15.919201 | 2020-06-03T12:57:38 | 2020-06-03T12:57:38 | 263,204,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | class Solution:
def minSubArray(self, nums):
MIN = 65536
SUM = nums[0]
for i in range(1, len(nums)):
SUM = SUM + nums[i] if SUM < 0 else nums[i] # SUM > 0有害于最小和
MIN = min(MIN, SUM)
return MIN
if __name__ == '__main__':
temp = Solution()
List1 = [1, -1, -2, 1]
List2 = [3, -2, 2, 1]
print("输入:" + str(List1))
print(("输出:" + str(temp.minSubArray(List1))))
print("输入:" + str(List2))
print(("输出:" + str(temp.minSubArray(List2))))
| [
"1325338208@qq.com"
] | 1325338208@qq.com |
5d9a526f41762caa210b4add3fe922444501227d | 1546f47a68577c55371d61d9805d890fddf843d5 | /Arthur/events/filters.py | 4c0aeb65c0fcf2f8dd463090bb5dc37135f85a58 | [] | no_license | tadgh/Arthur | b24299fc4edc5eba424e3007389e2a469a38e1a0 | 6ff839904973b8c3ad3eecb677cb9f3e3bbc1934 | refs/heads/master | 2022-12-14T13:34:44.610442 | 2020-04-28T04:37:07 | 2020-04-28T04:37:07 | 189,875,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py |
from django_filters import rest_framework as filters
class LeaderboardFilter(filters.FilterSet):
posted = filters.DateFromToRangeFilter(field_name='date') | [
"garygrantgraham@gmail.com"
] | garygrantgraham@gmail.com |
455eb5dcfc9b2227454cfd2a1ff46a485409a8ad | 3b7d8027e34f2338517d9c34f82bb02eb88de201 | /Vote.py | b8027b299ae5b4a3e556f3b8b20d8304c2ad44ca | [] | no_license | amishofficial/decentralizeVoting | 44dd2dd0fb4f4779a59503ff87e8cbfeea9d0028 | e957e317958173b4ba006518c8b87dfbb765593a | refs/heads/main | 2023-03-08T13:03:45.835971 | 2021-02-24T11:27:40 | 2021-02-24T11:27:40 | 341,876,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | from collections import OrderedDict
from utility.printable import Printable
class Vote(Printable):
"""A transaction which can be added to a vote_block in the votechain.
Attributes:
:voter: The voter of the coins.
:vote_to: The vote_to of the coins.
:signature: The signature of the transaction.
:amount:
"""
def __init__(self, voter, vote_to, signature):
self.voter = voter
self.vote_to = vote_to
self.signature = signature
def to_ordered_dict(self):
"""Converts this transaction into a (hashable) OrderedDict."""
return OrderedDict([('voter', self.voter), ('vote_to', self.vote_to)])
| [
"noreply@github.com"
] | noreply@github.com |
f70d4e2e4894ba7b8637af7ba93f753c0b5faa18 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /115_testing/examples/Github/_Level_1/python_unittests-master/sample_functions.py | 8d63bc99d2a30ac0321b97976440c0d8474e1244 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 204 | py |
def sum(a, b):
return a + b
def contains_numbers(alpha_num_str):
for char in alpha_num_str:
if char.isdigit():
return False
return False
def div(a, b):
return a / b | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
d88adc7061a3c48c6d2afe7420444e5c70762261 | 5297bdd3ccf64be915c05bfc599cb66d52fa6b17 | /memword/api/lessons.py | d2fb313eb68375d2f5e5ce06628007a4cb09c546 | [] | no_license | scdekov/memword | ee04ef4d4ca55084bf678d354ff77b0cb42403ba | bd011358c252ac6e3930dcae15df76c7103c61e0 | refs/heads/master | 2022-12-11T09:13:27.507111 | 2019-11-13T21:37:10 | 2019-11-13T21:37:10 | 146,096,522 | 1 | 1 | null | 2022-12-08T02:50:40 | 2018-08-25T13:14:04 | Python | UTF-8 | Python | false | false | 5,519 | py | from django.contrib.auth import get_user_model
from django.shortcuts import get_object_or_404
from django.utils import timezone
from rest_framework import serializers, viewsets, decorators, status
from rest_framework.response import Response
from memword.api.serializers import TargetSerializer
from memword.models.lesson import Lesson, Question
from memword.logic.target_picker import TargetPicker
from memword.logic.learning_intervals_manager import LearningIntervalsManager
User = get_user_model()
class SubmitQuestionSerializer(serializers.Serializer):
confidence_level = serializers.IntegerField()
question_id = serializers.IntegerField()
answer = serializers.CharField(allow_blank=True, required=False)
def validate(self, data):
if self.context['lesson'].lesson_type == 'exam' and not data.get('answer'):
raise serializers.ValidationError('answer is required when submitting exam question')
return data
def validate_confidence_level(self, confidence_level):
if confidence_level not in range(1, 11):
raise serializers.ValidationError('confidence_level should be between 1 and 10')
return confidence_level
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = ('id', 'target', 'passed', 'correct', 'confidence_level')
target = TargetSerializer()
class LessonSerializer(serializers.ModelSerializer):
class Meta:
model = Lesson
fields = ('id', 'student_id', 'questions', 'lesson_type', 'start_time',
'end_time', 'expected_duration', 'title', 'target_ids', 'planned_start_time')
questions = QuestionSerializer(many=True, read_only=True)
lesson_type = serializers.ChoiceField(allow_blank=True, default=Lesson.TYPE_LECTURE, choices=Lesson.TYPES)
target_ids = serializers.ListField(child=serializers.IntegerField(), write_only=True)
planned_start_time = serializers.DateTimeField(default=timezone.now)
expected_duration = serializers.DurationField(default='60')
def save(self):
# target_ids may need to be validated if they belongs to the current user
target_ids = self.validated_data.pop('target_ids', [])
student_id = self.context['request'].user.id
lesson = super().save(student_id=student_id)
Question.objects.bulk_create([Question(lesson=lesson, target_id=target_id) for target_id in target_ids])
return lesson
class TopTargetsQuerySerializer(serializers.Serializer):
targets_count = serializers.IntegerField(required=False, default=10)
class LessonsViewSet(viewsets.ModelViewSet):
queryset = Lesson.objects.all().order_by('-id')
serializer_class = LessonSerializer
def filter_queryset(self, queryset):
queryset = super().filter_queryset(queryset)
return queryset.filter(student=self.request.user)
@decorators.action(detail=True, methods=['POST'], url_path='@submit-answer')
def submit_answer(self, request, pk):
lesson = self.get_object()
serializer = SubmitQuestionSerializer(data=request.data,
context={'request': request, 'lesson': lesson})
serializer.is_valid(raise_exception=True)
question = get_object_or_404(Question, lesson_id=pk, id=serializer.validated_data['question_id'])
question.confidence_level = serializer.validated_data['confidence_level']
question.passed = True
question.pass_time = timezone.now()
if lesson.lesson_type == 'exam':
question.correct = serializer.validated_data['answer'] == question.target.description
question.save()
if question.lesson.should_finish():
question.lesson.finalize()
LearningIntervalsManager.handle_submitted_question(question)
return Response({'question': QuestionSerializer(question).data})
@decorators.action(detail=True, methods=['POST'], url_path='@start')
def start(self, request, **kwargs):
lesson = self.get_object()
lesson.start_time = timezone.now()
lesson.save()
return Response({'lesson': LessonSerializer(lesson).data})
@decorators.action(detail=True, methods=['POST'], url_path='@duplicate')
def duplicate(self, request, **kwargs):
original_lesson = self.get_object()
# this is suposed to be in atomic transactions
new_lesson = Lesson.objects.create(student_id=request.user.id,
lesson_type=original_lesson.lesson_type,
expected_duration=original_lesson.expected_duration,
planned_start_time=timezone.now())
# start time should be calculated somehow
Question.objects.bulk_create([Question(target_id=question.target_id, lesson_id=new_lesson.id)\
for question in original_lesson.questions.all()])
return Response({'lesson': LessonSerializer(new_lesson).data}, status=status.HTTP_201_CREATED)
@decorators.action(detail=False, url_path='@get-top-targets')
def get_top_targets(self, request):
serializer = TopTargetsQuerySerializer(data=request.GET)
serializer.is_valid(raise_exception=True)
top_targets = TargetPicker.pick_top(request.user, serializer.validated_data['targets_count'])
return Response({'targets': TargetSerializer(top_targets, many=True).data})
| [
"svetlio1994@gmail.com"
] | svetlio1994@gmail.com |
13989c3455665529fd642327e8ef34cd1679e3aa | 743057b36fda035eb8e6378f0c192ca4e2abf76e | /toMap.py | 4e527f566d14f32355e1c1308db25b20b02582a2 | [] | no_license | haegray/Python-and-Java-Files | f53ffa0a6872d7f29328353bbf7e63d98b796250 | 0b6fcc0158711b184ee1bf4b59f6dc835361e03a | refs/heads/master | 2021-01-13T14:39:06.230924 | 2016-12-23T17:10:45 | 2016-12-23T17:10:45 | 76,681,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | #toNumbers.py
def toMap(strList):
for i in strList:
return (map(float,i))
toMap()
| [
"anaisgray.ag@gmail.com"
] | anaisgray.ag@gmail.com |
b593fcc836a48a7354d9240bfcd92cf6de664747 | 992f080016e7de4e3de9ff875a9778404c6c8fdc | /app.py | 528c58018f342d6301ce0278c345f7c43269f087 | [] | no_license | Garasuno/lineBot2 | 9c50903c26fc27947dfca7b5fd8915779aa2b1a4 | 034cf2ed295710cb3f04483cc36f8d0bc15a462d | refs/heads/master | 2020-06-10T13:37:15.646984 | 2019-09-03T07:09:11 | 2019-09-03T07:09:11 | 193,651,533 | 0 | 0 | null | 2019-06-25T06:51:46 | 2019-06-25T06:51:46 | null | UTF-8 | Python | false | false | 1,592 | py | from flask import Flask, jsonify, request
import os
import json
import requests
app = Flask(__name__)
@app.route('/')
def index():
a=os.environ['Authorization']
return "นางสาวนฤภร สัมมา เลขที่ 10 ชั้น ม.4/9"
@app.route("/webhook", methods=['POST'])
def webhook():
if request.method == 'POST':
return "OK"
@app.route('/callback', methods=['POST'])
def callback():
json_line = request.get_json()
json_line = json.dumps(json_line)
decoded = json.loads(json_line)
#user = decoded["events"][0]['replyToken']
user = decoded['originalDetectIntentRequest']['playload']['data']['replyToken']
#usertext = decoded["events"][0]['message']['text']
userText = decoded['queryResult']['intent']['displayname']
#sendText(user,userText)
if (usertext == 'สวัสดี'):
senttext(user , 'เอ่อดีด้วย')
elif (usertext == 'บายจ้า'):
senttext(user , 'บาย')
else :
senttext(user , 'กาว')
return '',200
def sendText(user, text):
LINE_API = 'https://api.line.me/v2/bot/message/reply'
headers = {
'Content-Type': 'application/json; charset=UTF-8',
'Authorization': os.environ['Authorization'] # ตั้ง Config vars ใน heroku พร้อมค่า Access token
}
data = json.dumps({
"replyToken":user,
"messages":[{"type":"text","text":text}]
})
r = requests.post(LINE_API, headers=headers, data=data) # ส่งข้อมูล
if __name__ == '__main__':
app.run()
| [
"noreply@github.com"
] | noreply@github.com |
ee1b43a7f7c4f3012ce2cae10f1509013f318252 | 4564b5786932bd5a94f442a2eae170564e12640b | /python/NaverMovieCrawling.py | 9f08c4b89a304193341b12545e911ac0ae60723a | [] | no_license | slomeer/sparta | 60feb9d5e9ecebedca1d08735f4c99912deb39c6 | bd96e3700c10bebd0c3e742fb35f4151f88a7e89 | refs/heads/master | 2021-03-15T04:44:54.479873 | 2020-06-15T06:44:46 | 2020-06-15T06:44:46 | 246,825,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | import requests
from bs4 import BeautifulSoup
# 타겟 URL을 읽어서 HTML를 받아오고,
headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
data = requests.get('https://movie.naver.com/movie/sdb/rank/rmovie.nhn?sel=pnt&date=20200303',headers=headers)
# HTML을 BeautifulSoup이라는 라이브러리를 활용해 검색하기 용이한 상태로 만듦
# soup이라는 변수에 "파싱 용이해진 html"이 담긴 상태가 됨
# 이제 코딩을 통해 필요한 부분을 추출하면 된다.
soup = BeautifulSoup(data.text, 'html.parser') # data.txt는 웹사이트에서 우리가 받는 html
# 분석된 html 파일이 soup에 들어가 있는 상태
movies = soup.select('#old_content > table > tbody > tr')
for i, movie in enumerate(movies):
# movie 안에 a 가 있으면,
a_tag = movie.select_one('td.title > div > a')
rate = movie.select_one('td.point')
if a_tag is not None:
# a의 text를 찍어본다.
print(i, end=' ')
print(a_tag.text, end=' ')
print(rate.text)
| [
"61731412+slomeer@users.noreply.github.com"
] | 61731412+slomeer@users.noreply.github.com |
6c34ab7d080c5769a3dcf15d2a5cfd4dce22ea9d | 394742b366c0eed8997e8c4058daa1e122fffdf3 | /Lesson23/manage.py | 3081204679636e0df707aa01ca17a805669e6dc5 | [
"MIT"
] | permissive | IslamRaslambekov/HomeWork | e293468d73998f7f5e5e8f50a318546a01a6d593 | a1454c3539edf7475de686383cee8db9f1bdf448 | refs/heads/master | 2022-05-07T02:46:48.916601 | 2022-04-29T01:38:42 | 2022-04-29T01:38:42 | 243,369,694 | 0 | 1 | MIT | 2022-04-14T10:25:28 | 2020-02-26T21:29:16 | CSS | UTF-8 | Python | false | false | 664 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Lesson24.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"rodoslav12345@gmail.com"
] | rodoslav12345@gmail.com |
0dac53703ab198d385005c1bd7a5a57d670af88e | caee06b143be5117a667e0f14ed6cf54958e85c1 | /page/showreview.py | 0b3dcc9721d9eed2b09256eae20318e2959a16f8 | [
"Apache-2.0",
"MIT"
] | permissive | ctesting/critic | 720b68331aac81443e53ccd0c4c6cb4c3b75b5ec | 8ba956d124279d0fca9d4522fb0ee6970e863588 | refs/heads/master | 2021-01-17T13:43:41.205293 | 2013-03-15T20:34:47 | 2013-03-15T20:34:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49,272 | py | # -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2012 Jens Lindström, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import time
import re
import datetime
import calendar
import traceback
import dbutils
import gitutils
import htmlutils
import page.utils
import log.html
import review.utils as review_utils
import review.html as review_html
import review.comment as review_comment
import configuration
import diff
import profiling
import linkify
import extensions
try:
from customization.paths import getModuleFromFile
except:
def getModuleFromFile(repository, filename):
try:
base, rest = filename.split("/", 1)
return base + "/"
except:
return None
class SummaryColumn(log.html.SummaryColumn):
def __init__(self, review, linkToCommit):
log.html.SummaryColumn.__init__(self, linkToCommit)
self.__review = review
self.__cache = {}
def fillCache(self, db, review):
cursor = db.cursor()
cursor.execute("""SELECT DISTINCT assignee, child
FROM fullreviewuserfiles
JOIN changesets ON (changesets.id=changeset)
WHERE review=%s
AND state='pending'""",
(review.id,))
for user_id, commit_id in cursor:
self.__cache.setdefault(commit_id, set()).add(user_id)
def render(self, db, commit, target):
user_ids = self.__cache.get(commit.getId(db))
if user_ids:
users = ["%s:%s" % (user.fullname, user.status) for user in dbutils.User.fromIds(db, [user_id for user_id in user_ids])]
target.setAttribute("critic-reviewers", ",".join(sorted(users)))
log.html.SummaryColumn.render(self, db, commit, target)
class ApprovalColumn:
APPROVED = 1
TOTAL = 2
def __init__(self, user, review, type, cache):
self.__user = user
self.__review = review
self.__type = type
self.__cache = cache
@staticmethod
def fillCache(db, user, review, cache, profiler):
cursor = db.cursor()
profiler.check("fillCache")
cursor.execute("""SELECT child, state, COUNT(*), SUM(deleted), SUM(inserted)
FROM changesets
JOIN reviewfiles ON (changeset=changesets.id)
WHERE review=%s
GROUP BY child, state""",
(review.id,))
for commit_id, state, nfiles, deleted, inserted in cursor:
data = cache.get(commit_id)
if not data: data = cache[commit_id] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
if state == 'reviewed':
data[3] += nfiles
data[4] += deleted
data[5] += inserted
data[0] += nfiles
data[1] += deleted
data[2] += inserted
profiler.check("fillCache: total")
cursor.execute("""SELECT child, COALESCE(reviewfilechanges.to, reviewfiles.state) AS effective_state, COUNT(*), SUM(deleted), SUM(inserted)
FROM changesets
JOIN reviewfiles ON (changeset=changesets.id)
JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id)
LEFT OUTER JOIN reviewfilechanges ON (reviewfilechanges.file=reviewfiles.id
AND reviewfilechanges.uid=reviewuserfiles.uid
AND reviewfilechanges.state='draft')
WHERE review=%s
AND reviewuserfiles.uid=%s
GROUP BY child, effective_state""",
(review.id, user.id))
for commit_id, state, nfiles, deleted, inserted in cursor:
data = cache.get(commit_id)
if state == 'reviewed':
data[9] += nfiles
data[10] += deleted
data[11] += inserted
data[6] += nfiles
data[7] += deleted
data[8] += inserted
profiler.check("fillCache: user")
def __calculate(self, db, commit):
return self.__cache.get(commit.id, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def className(self, db, commit):
if commit:
(total_nfiles, total_deleted, total_inserted,
approved_nfiles, approved_deleted, approved_inserted,
user_total_nfiles, user_total_deleted, user_total_inserted,
user_approved_nfiles, user_approved_deleted, user_approved_inserted) = self.__calculate(db, commit)
if user_approved_nfiles == user_total_nfiles:
category = ""
else:
category = " user"
else:
category = ""
if self.__type == ApprovalColumn.APPROVED:
return "approval" + category
else:
return "total" + category
def heading(self, target):
if self.__type == ApprovalColumn.APPROVED:
target.text("Pending")
else:
target.text("Total")
def render(self, db, commit, target):
(total_nfiles, total_deleted, total_inserted,
approved_nfiles, approved_deleted, approved_inserted,
user_total_nfiles, user_total_deleted, user_total_inserted,
user_approved_nfiles, user_approved_deleted, user_approved_inserted) = self.__calculate(db, commit)
if self.__type == ApprovalColumn.APPROVED:
if user_approved_nfiles == user_total_nfiles:
if approved_nfiles == total_nfiles:
target.text()
elif approved_deleted == total_deleted and approved_inserted == total_inserted:
target.span().text("?? %")
else:
target.span().text("%d %%" % int(100.0 * ((total_deleted + total_inserted) - (approved_deleted + approved_inserted)) / (total_deleted + total_inserted)))
elif user_approved_deleted == user_total_deleted and user_approved_inserted == user_total_inserted:
target.span().text("?? %")
else:
target.span().text("%d %%" % int(100.0 * ((user_total_deleted + user_total_inserted) - (user_approved_deleted + user_approved_inserted)) / (user_total_deleted + user_total_inserted)))
else:
if user_approved_deleted == user_total_deleted and user_approved_inserted == user_total_inserted:
target.span().text("-%d/+%d" % (total_deleted, total_inserted))
else:
target.span().text("-%d/+%d" % (user_total_deleted, user_total_inserted))
def notModified(req, db, user, review):
value = req.getRequestHeader("If-None-Match")
return review.getETag(db, user) == value
def usesExperimentalFeature(req, db, review):
return False
def renderShowReview(req, db, user):
profiler = profiling.Profiler()
cursor = db.cursor()
if user.getPreference(db, "commit.diff.compactMode"): default_compact = "yes"
else: default_compact = "no"
compact = req.getParameter("compact", default_compact) == "yes"
highlight = req.getParameter("highlight", None)
review_id = req.getParameter("id", filter=int)
review = dbutils.Review.fromId(db, review_id, load_commits=False, profiler=profiler)
profiler.check("create review")
if not review:
raise page.utils.DisplayMessage, ("Invalid Review ID", "%d is not a valid review ID." % review_id)
if review.getETag(db, user) == req.getRequestHeader("If-None-Match"):
raise page.utils.NotModified
profiler.check("ETag")
# if usesExperimentalFeature(req, db, review):
# def renderMessage(target):
# url = "%s/r/%d" % (configuration.URL_PER_TYPE['development'], review.id)
# p = target.p(style="padding-top: 1em")
# p.text("Sorry, this review uses experimental features currently only available in the development version of Critic. Because of that, it can only be displayed there.")
# p = target.p(style="padding-top: 1em")
# p.b().a(href=url).text(url)
# yield page.utils.displayMessage(db, req, user, "Experimental Feature Alert!", message=renderMessage)
# return
repository = review.repository
prefetch_commits = {}
cursor.execute("""SELECT sha1, child
FROM changesets
JOIN reviewchangesets ON (reviewchangesets.changeset=changesets.id)
JOIN commits ON (commits.id=changesets.child)
WHERE review=%s""",
(review.id,))
prefetch_commits.update(dict(cursor))
profiler.check("commits (query)")
cursor.execute("""SELECT old_head, commits1.sha1, new_head, commits2.sha1, new_upstream, commits3.sha1
FROM reviewrebases
LEFT OUTER JOIN commits AS commits1 ON (commits1.id=old_head)
LEFT OUTER JOIN commits AS commits2 ON (commits2.id=new_head)
LEFT OUTER JOIN commits AS commits3 ON (commits3.id=new_upstream)
WHERE review=%s""",
(review.id,))
rebases = cursor.fetchall()
if rebases:
has_finished_rebases = False
for old_head_id, old_head_sha1, new_head_id, new_head_sha1, new_upstream_id, new_upstream_sha1 in rebases:
if old_head_id:
prefetch_commits[old_head_sha1] = old_head_id
if new_head_id:
prefetch_commits[new_head_sha1] = new_head_id
has_finished_rebases = True
if new_upstream_id:
prefetch_commits[new_upstream_sha1] = new_upstream_id
profiler.check("auxiliary commits (query)")
if has_finished_rebases:
cursor.execute("""SELECT commits.sha1, commits.id
FROM commits
JOIN reachable ON (reachable.commit=commits.id)
WHERE branch=%s""",
(review.branch.id,))
prefetch_commits.update(dict(cursor))
profiler.check("actual commits (query)")
prefetch_commits = gitutils.FetchCommits(repository, prefetch_commits)
document = htmlutils.Document(req)
html = document.html()
head = html.head()
body = html.body(onunload="void(0);")
def flush(target=None):
return document.render(stop=target, pretty=not compact)
def renderHeaderItems(target):
has_draft_items = review_utils.renderDraftItems(db, user, review, target)
target = target.div("buttons")
if not has_draft_items:
if review.state == "open":
if review.accepted(db):
target.button(id="closeReview", onclick="closeReview();").text("Close Review")
else:
if user in review.owners or user.getPreference(db, "review.pingAnyReview"):
target.button(id="pingReview", onclick="pingReview();").text("Ping Review")
if user in review.owners or user.getPreference(db, "review.dropAnyReview"):
target.button(id="dropReview", onclick="dropReview();").text("Drop Review")
if user in review.owners and not review.description:
target.button(id="writeDescription", onclick="editDescription();").text("Write Description")
else:
target.button(id="reopenReview", onclick="reopenReview();").text("Reopen Review")
target.span("buttonscope buttonscope-global")
profiler.check("prologue")
page.utils.generateHeader(body, db, user, renderHeaderItems)
cursor.execute("SELECT 1 FROM fullreviewuserfiles WHERE review=%s AND state='pending' AND assignee=%s", (review.id, user.id))
hasPendingChanges = bool(cursor.fetchone())
if hasPendingChanges:
head.setLink("next", "showcommit?review=%d&filter=pending" % review.id)
profiler.check("header")
document.addExternalStylesheet("resource/showreview.css")
document.addExternalStylesheet("resource/review.css")
document.addExternalStylesheet("resource/comment.css")
document.addExternalScript("resource/showreview.js")
document.addExternalScript("resource/review.js")
document.addExternalScript("resource/comment.js")
document.addExternalScript("resource/autocomplete.js")
document.addInternalScript(user.getJS())
document.addInternalScript("var owners = [ %s ];" % ", ".join(owner.getJSConstructor() for owner in review.owners))
document.addInternalScript("var updateCheckInterval = %d;" % user.getPreference(db, "review.updateCheckInterval"));
log.html.addResources(document)
document.addInternalScript(review.getJS())
target = body.div("main")
basic = target.table('paleyellow basic', align='center')
basic.col(width='10%')
basic.col(width='60%')
basic.col(width='30%')
h1 = basic.tr().td('h1', colspan=3).h1()
h1.text("r/%d: " % review.id)
h1.span(id="summary").text("%s" % review.summary, linkify=linkify.Context(db=db, review=review))
h1.a("edit", href="javascript:editSummary();").text("[edit]")
def linkToCommit(commit):
cursor.execute("SELECT 1 FROM commits JOIN changesets ON (child=commits.id) JOIN reviewchangesets ON (changeset=changesets.id) WHERE sha1=%s AND review=%s", (commit.sha1, review.id))
if cursor.fetchone():
return "%s/%s?review=%d" % (review.repository.name, commit.sha1, review.id)
return "%s/%s" % (review.repository.name, commit.sha1)
def row(heading, value, help, right=None, linkify=False, cellId=None):
main_row = basic.tr('line')
main_row.td('heading').text("%s:" % heading)
if right is False: colspan = 2
else: colspan = None
if callable(value): value(main_row.td('value', id=cellId, colspan=colspan).preformatted())
else: main_row.td('value', id=cellId, colspan=colspan).preformatted().text(value, linkify=linkify, repository=review.repository)
if right is False: pass
elif callable(right): right(main_row.td('right', valign='bottom'))
else: main_row.td('right').text()
if help: basic.tr('help').td('help', colspan=3).text(help)
def renderBranchName(target):
target.code("branch").text(review.branch.name, linkify=linkify.Context())
if repository.name != user.getPreference(db, "defaultRepository"):
target.text(" in ")
target.code("repository").text("%s:%s" % (configuration.base.HOSTNAME, repository.path))
cursor.execute("""SELECT id, remote, remote_name, disabled, previous
FROM trackedbranches
WHERE repository=%s
AND local_name=%s""",
(repository.id, review.branch.name))
row = cursor.fetchone()
if row:
trackedbranch_id, remote, remote_name, disabled, previous = row
target.p("tracking disabled" if disabled else "tracking").text("tracking")
target.code("branch").text(remote_name, linkify=linkify.Context(remote=remote))
target.text(" in ")
target.code("repository").text(remote, linkify=linkify.Context())
if previous:
target.span("lastupdate").script(type="text/javascript").text("document.write('(last fetched: ' + shortDate(new Date(%d)) + ')');" % (calendar.timegm(previous.utctimetuple()) * 1000))
if user in review.owners:
buttons = target.div("buttons")
if disabled:
buttons.button("enabletracking", onclick="enableTracking(%d);" % trackedbranch_id).text("Enable Tracking")
else:
buttons.button("disabletracking", onclick="triggerUpdate(%d);" % trackedbranch_id).text("Update Now")
buttons.button("disabletracking", onclick="disableTracking(%d);" % trackedbranch_id).text("Disable Tracking")
def renderReviewers(target):
if review.reviewers:
for index, reviewer in enumerate(review.reviewers):
if index != 0: target.text(", ")
span = target.span("user %s" % reviewer.status)
span.span("name").text(reviewer.fullname)
if reviewer.status == 'absent':
span.span("status").text(" (%s)" % reviewer.getAbsence(db))
elif reviewer.status == 'retired':
span.span("status").text(" (retired)")
else:
target.i().text("No reviewers.")
cursor.execute("""SELECT reviewfilters.id, reviewfilters.uid, reviewfilters.directory, reviewfilters.file
FROM reviewfilters
JOIN users ON (reviewfilters.uid=users.id)
WHERE reviewfilters.review=%s
AND reviewfilters.type='reviewer'
AND users.status!='retired'""",
(review.id,))
rows = cursor.fetchall()
reviewer_filters_hidden = []
if rows:
table = target.table("reviewfilters reviewers")
row = table.thead().tr("h1")
row.th("h1", colspan=4).text("Custom filters:")
filter_data = {}
reviewfilters = {}
for filter_id, user_id, directory_id, file_id in rows:
filter_user = dbutils.User.fromId(db, user_id)
if file_id: path = dbutils.describe_file(db, file_id)
else: path = dbutils.describe_directory(db, directory_id) + "/"
reviewfilters.setdefault(filter_user.fullname, []).append(path)
filter_data[(filter_user.fullname, path)] = (filter_id, filter_user)
count = 0
tbody = table.tbody()
for fullname in sorted(reviewfilters.keys()):
original_paths = sorted(reviewfilters[fullname])
trimmed_paths = diff.File.eliminateCommonPrefixes(original_paths[:])
first = True
for original_path, trimmed_path in zip(original_paths, trimmed_paths):
row = tbody.tr("filter")
if first:
row.td("username", rowspan=len(original_paths)).text(fullname)
row.td("reviews", rowspan=len(original_paths)).text("reviews")
first = False
row.td("path").span().innerHTML(trimmed_path)
filter_id, filter_user = filter_data[(fullname, original_path)]
href = "javascript:removeReviewFilter(%d, %s, 'reviewer', %s, %s);" % (filter_id, filter_user.getJSConstructor(), htmlutils.jsify(original_path), "true" if filter_user != user else "false")
row.td("remove").a(href=href).text("[remove]")
count += 1
tfoot = table.tfoot()
tfoot.tr().td(colspan=4).text("%d line%s hidden" % (count, "s" if count > 1 else ""))
if count > 10:
tbody.setAttribute("class", "hidden")
reviewer_filters_hidden.append(True)
else:
tfoot.setAttribute("class", "hidden")
reviewer_filters_hidden.append(False)
buttons = target.div("buttons")
if reviewer_filters_hidden:
buttons.button("showfilters", onclick="toggleReviewFilters('reviewers', $(this));").text("%s Custom Filters" % ("Show" if reviewer_filters_hidden[0] else "Hide"))
if review.applyfilters and review.repository.parent and not review.applyparentfilters:
buttons.button("applyparentfilters", onclick="applyParentFilters();").text("Apply Upstream Filters")
buttons.button("addreviewer", onclick="addReviewer();").text("Add Reviewer")
buttons.button("manage", onclick="location.href='managereviewers?review=%d';" % review.id).text("Manage Assignments")
def renderWatchers(target):
if review.watchers:
for index, watcher in enumerate(review.watchers):
if index != 0: target.text(", ")
span = target.span("user %s" % watcher.status)
span.span("name").text(watcher.fullname)
if watcher.status == 'absent':
span.span("status").text(" (%s)" % watcher.getAbsence(db))
elif watcher.status == 'retired':
span.span("status").text(" (retired)")
else:
target.i().text("No watchers.")
cursor.execute("""SELECT reviewfilters.id, reviewfilters.uid, reviewfilters.directory, reviewfilters.file
FROM reviewfilters
JOIN users ON (reviewfilters.uid=users.id)
WHERE reviewfilters.review=%s
AND reviewfilters.type='watcher'
AND users.status!='retired'""",
(review.id,))
rows = cursor.fetchall()
watcher_filters_hidden = []
if rows:
table = target.table("reviewfilters watchers")
row = table.thead().tr("h1")
row.th("h1", colspan=4).text("Custom filters:")
filter_data = {}
reviewfilters = {}
for filter_id, user_id, directory_id, file_id in rows:
filter_user = dbutils.User.fromId(db, user_id)
if file_id: path = dbutils.describe_file(db, file_id)
else: path = dbutils.describe_directory(db, directory_id) + "/"
reviewfilters.setdefault(filter_user.fullname, []).append(path)
filter_data[(filter_user.fullname, path)] = (filter_id, filter_user)
count = 0
tbody = table.tbody()
for fullname in sorted(reviewfilters.keys()):
original_paths = sorted(reviewfilters[fullname])
trimmed_paths = diff.File.eliminateCommonPrefixes(original_paths[:])
first = True
for original_path, trimmed_path in zip(original_paths, trimmed_paths):
row = tbody.tr("filter")
if first:
row.td("username", rowspan=len(original_paths)).text(fullname)
row.td("reviews", rowspan=len(original_paths)).text("watches")
first = False
row.td("path").span().innerHTML(trimmed_path)
filter_id, filter_user = filter_data[(fullname, original_path)]
href = "javascript:removeReviewFilter(%d, %s, 'watcher', %s, %s);" % (filter_id, filter_user.getJSConstructor(), htmlutils.jsify(original_path), "true" if filter_user != user else "false")
row.td("remove").a(href=href).text("[remove]")
count += 1
tfoot = table.tfoot()
tfoot.tr().td(colspan=4).text("%d line%s hidden" % (count, "s" if count > 1 else ""))
if count > 10:
tbody.setAttribute("class", "hidden")
watcher_filters_hidden.append(True)
else:
tfoot.setAttribute("class", "hidden")
watcher_filters_hidden.append(False)
buttons = target.div("buttons")
if watcher_filters_hidden:
buttons.button("showfilters", onclick="toggleReviewFilters('watchers', $(this));").text("%s Custom Filters" % ("Show" if watcher_filters_hidden[0] else "Hide"))
buttons.button("addwatcher", onclick="addWatcher();").text("Add Watcher")
if user not in review.reviewers and user not in review.owners:
if user not in review.watchers:
buttons.button("watch", onclick="watchReview();").text("Watch Review")
elif review.watchers[user] == "manual":
buttons.button("watch", onclick="unwatchReview();").text("Stop Watching Review")
def renderEditOwners(target):
target.button("description", onclick="editOwners();").text("Edit Owners")
def renderEditDescription(target):
target.button("description", onclick="editDescription();").text("Edit Description")
def renderRecipientList(target):
cursor.execute("SELECT uid, fullname, include FROM reviewrecipientfilters JOIN users ON (uid=id) WHERE review=%s", (review.id,))
default_include = True
included = dict((owner.fullname, owner.id) for owner in review.owners)
excluded = {}
for user_id, fullname, include in cursor:
if user_id == 0: default_include = include
elif include: included[fullname] = user_id
elif user_id not in review.owners: excluded[fullname] = user_id
mode = None
users = None
buttons = []
opt_in_button = False
opt_out_button = False
if default_include:
if excluded:
mode = "Everyone except "
users = excluded
opt_out_button = user.fullname not in excluded
opt_in_button = not opt_out_button
else:
mode = "Everyone."
opt_out_button = True
else:
if included:
mode = "No-one except "
users = included
opt_in_button = user.fullname not in included
opt_out_button = not opt_in_button
else:
mode = "No-one at all."
opt_in_button = True
if user in review.owners or user in review.reviewers or user in review.watchers:
if opt_in_button:
buttons.append(("Include me, please!", "includeRecipient(%d);" % user.id))
if opt_out_button:
buttons.append(("Exclude me, please!", "excludeRecipient(%d);" % user.id))
target.span("mode").text(mode)
if users:
container = target.span("users")
first = True
for fullname in sorted(users.keys()):
if first: first = False
else: container.text(", ")
container.span("user", critic_user_id=users[fullname]).text(fullname)
container.text(".")
if buttons:
container = target.div("buttons")
for label, onclick in buttons:
container.button(onclick=onclick).text(label)
row("Branch", renderBranchName, "The branch containing the commits to review.", right=False)
row("Owner%s" % ("s" if len(review.owners) > 1 else ""), ", ".join(owner.fullname for owner in review.owners), "The users who created and/or owns the review.", right=renderEditOwners)
if review.description:
row("Description", review.description, "A longer description of the changes to be reviewed.", linkify=linkToCommit, cellId="description", right=renderEditDescription)
row("Reviewers", renderReviewers, "Users responsible for reviewing the changes in this review.", right=False)
row("Watchers", renderWatchers, "Additional users who receive e-mails about updates to this review.", right=False)
row("Recipient List", renderRecipientList, "Users (among the reviewers and watchers) who will receive any e-mails about the review.", right=False)
profiler.check("basic")
review_state = review.getReviewState(db)
profiler.check("review state")
progress = target.table('paleyellow progress', align='center')
progress_header = progress.tr().td('h1', colspan=3).h1()
progress_header.text("Review Progress")
progress_header_right = progress_header.span("right")
progress_header_right.text("Display log: ")
progress_header_right.a(href="showreviewlog?review=%d&granularity=module" % review.id).text("[per module]")
progress_header_right.text()
progress_header_right.a(href="showreviewlog?review=%d&granularity=file" % review.id).text("[per file]")
progress_h1 = progress.tr().td('percent', colspan=3).h1()
title_data = { 'id': 'r/%d' % review.id,
'summary': review.summary,
'progress': str(review_state) }
if review.state == "closed":
progress_h1.img(src=htmlutils.getStaticResourceURI("seal-of-approval-left.png"),
style="position: absolute; margin-left: -80px; margin-top: -100px")
progress_h1.text("Finished!")
elif review.state == "dropped":
progress_h1.text("Dropped...")
elif review.state == "open" and review_state.accepted:
progress_h1.img(src=htmlutils.getStaticResourceURI("seal-of-approval-left.png"),
style="position: absolute; margin-left: -80px; margin-top: -100px")
progress_h1.text("Accepted!")
progress_h1.div().span("remark").text("Hurry up and close it before anyone has a change of heart.")
else:
progress_h1.text(review_state.getProgress())
if review_state.issues:
progress_h1.span("comments").text(" and ")
progress_h1.text("%d" % review_state.issues)
progress_h1.span("comments").text(" issue%s" % (review_state.issues > 1 and "s" or ""))
if review_state.getPercentReviewed() != 100.0:
cursor = db.cursor()
cursor.execute("""SELECT 1
FROM reviewfiles
LEFT OUTER JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id)
WHERE reviewfiles.review=%s
AND reviewfiles.state='pending'
AND reviewuserfiles.uid IS NULL""",
(review.id,))
if cursor.fetchone():
progress.tr().td('stuck', colspan=3).a(href="showreviewlog?review=%d&granularity=file&unassigned=yes" % review.id).text("Not all changes have a reviewer assigned!")
cursor.execute("""SELECT uid, MIN(reviewuserfiles.time)
FROM reviewfiles
JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id)
WHERE reviewfiles.review=%s
AND reviewfiles.state='pending'
GROUP BY reviewuserfiles.uid""",
(review.id,))
def total_seconds(delta):
return delta.days * 60 * 60 * 24 + delta.seconds
now = datetime.datetime.now()
pending_reviewers = [(dbutils.User.fromId(db, user_id), total_seconds(now - timestamp)) for (user_id, timestamp) in cursor.fetchall() if total_seconds(now - timestamp) > 60 * 60 * 8]
if pending_reviewers:
progress.tr().td('stragglers', colspan=3).text("Needs review from")
for reviewer, seconds in pending_reviewers:
if reviewer.status == 'retired': continue
elif reviewer.status == 'absent': warning = " absent"
elif not reviewer.getPreference(db, "email.activated"): warning = " no-email"
else: warning = ""
if seconds < 60 * 60 * 24:
hours = seconds / (60 * 60)
duration = " (%d hour%s)" % (hours, "s" if hours > 1 else "")
elif seconds < 60 * 60 * 24 * 7:
days = seconds / (60 * 60 * 24)
duration = " (%d day%s)" % (days, "s" if days > 1 else "")
elif seconds < 60 * 60 * 24 * 30:
weeks = seconds / (60 * 60 * 24 * 7)
duration = " (%d week%s)" % (weeks, "s" if weeks > 1 else "")
else:
duration = " (wake up!)"
progress.tr().td('straggler' + warning, colspan=3).text("%s%s" % (reviewer.fullname, duration))
if user in review.owners:
progress.tr().td('pinging', colspan=3).span().text("Send a message to these users by pinging the review.")
title_format = user.getPreference(db, 'ui.title.showReview')
try:
document.setTitle(title_format % title_data)
except Exception, exc:
document.setTitle(traceback.format_exception_only(type(exc), exc)[0].strip())
profiler.check("progress")
check = profiler.start("ApprovalColumn.fillCache")
def linkToCommit(commit):
return "%s?review=%d" % (commit.sha1[:8], review.id)
approval_cache = {}
ApprovalColumn.fillCache(db, user, review, approval_cache, profiler)
check.stop()
summary_column = SummaryColumn(review, linkToCommit)
summary_column.fillCache(db, review)
profiler.check("SummaryColumn.fillCache")
columns = [(10, log.html.WhenColumn()),
(60, summary_column),
(16, log.html.AuthorColumn()),
(7, ApprovalColumn(user, review, ApprovalColumn.APPROVED, approval_cache)),
(7, ApprovalColumn(user, review, ApprovalColumn.TOTAL, approval_cache))]
def renderReviewPending(db, target):
if not user.isAnonymous():
target.text("Filter: ")
if hasPendingChanges:
target.a(href="showcommit?review=%d&filter=pending" % review.id, title="All changes you need to review.").text("[pending]")
target.text()
if user in review.reviewers:
target.a(href="showcommit?review=%d&filter=reviewable" % review.id, title="All changes you can review, including what you've already reviewed.").text("[reviewable]")
target.text()
target.a(href="showcommit?review=%d&filter=relevant" % review.id, title="All changes that match your filters.").text("[relevant]")
target.text()
target.text("Manual: ")
target.a(href="filterchanges?review=%d" % review.id, title="Manually select what files to display of the changes from all commits.").text("[full]")
target.text()
target.a(href="javascript:void(filterPartialChanges());", title="Manually select what what files to display of the changes in a selection of commits.").text("[partial]")
req.addResponseHeader("ETag", review.getETag(db, user))
if user.getPreference(db, "review.useMustRevalidate"):
req.addResponseHeader("Cache-Control", "must-revalidate")
yield flush(target)
try:
try: prefetch_commits.getCommits(db)
except AttributeError: raise Exception, prefetch_commits.error
profiler.check("FetchCommits.getCommits()")
cursor.execute("""SELECT child
FROM changesets
JOIN reviewchangesets ON (reviewchangesets.changeset=changesets.id)
JOIN commits ON (commits.id=changesets.child)
WHERE review=%s""",
(review.id,))
commits = [gitutils.Commit.fromId(db, repository, commit_id) for (commit_id,) in cursor]
cursor.execute("""SELECT id, old_head, new_head, new_upstream, uid, branch
FROM reviewrebases
WHERE review=%s""",
(review.id,))
all_rebases = [(rebase_id,
gitutils.Commit.fromId(db, repository, old_head),
gitutils.Commit.fromId(db, repository, new_head) if new_head else None,
dbutils.User.fromId(db, user_id),
gitutils.Commit.fromId(db, repository, new_upstream) if new_upstream is not None else None,
branch_name)
for rebase_id, old_head, new_head, new_upstream, user_id, branch_name in cursor]
bottom_right = None
finished_rebases = filter(lambda item: item[2] is not None, all_rebases)
current_rebases = filter(lambda item: item[2] is None, all_rebases)
if current_rebases:
assert len(current_rebases) == 1
def renderCancelRebase(db, target):
target.button("cancelrebase").text("Cancel Rebase")
if user == current_rebases[0][3]:
bottom_right = renderCancelRebase
else:
def renderPrepareRebase(db, target):
target.button("preparerebase").text("Prepare Rebase")
bottom_right = renderPrepareRebase
if finished_rebases:
cursor.execute("""SELECT commit
FROM reachable
WHERE branch=%s""",
(review.branch.id,))
actual_commits = [gitutils.Commit.fromId(db, repository, commit_id) for (commit_id,) in cursor]
else:
actual_commits = []
log.html.render(db, target, "Commits (%d)", commits=commits, columns=columns, title_right=renderReviewPending, rebases=finished_rebases, branch_name=review.branch.name, bottom_right=bottom_right, review=review, highlight=highlight, profiler=profiler, user=user, extra_commits=actual_commits)
yield flush(target)
profiler.check("log")
except gitutils.GitError, error:
div = target.div("error")
div.h1().text("Error!")
if error.sha1:
div.text("The commit %s is missing from the repository." % error.sha1)
else:
div.text("Failed to read commits from the repository: %s" % error.message)
all_chains = review.getCommentChains(db, user, skip=set(['commits', 'lines']))
profiler.check("chains (load)")
if all_chains:
issue_chains = filter(lambda chain: chain.type == "issue", all_chains)
draft_issues = filter(lambda chain: chain.state == "draft", issue_chains)
open_issues = filter(lambda chain: chain.state == "open", issue_chains)
addressed_issues = filter(lambda chain: chain.state == "addressed", issue_chains)
closed_issues = filter(lambda chain: chain.state == "closed", issue_chains)
note_chains = filter(lambda chain: chain.type == "note", all_chains)
draft_notes = filter(lambda chain: chain.state == "draft", note_chains)
open_notes = filter(lambda chain: chain.state != "draft" and chain.state != "empty", note_chains)
else:
open_issues = []
open_notes = []
chains = target.table("paleyellow comments", align="center", cellspacing=0)
h1 = chains.tr("h1").td("h1", colspan=3).h1().text("Comments")
links = h1.span("links")
if all_chains:
links.a(href="showcomments?review=%d&filter=all" % review.id).text("[display all]")
if not user.isAnonymous():
links.a(href="showcomments?review=%d&filter=all&blame=%s" % (review.id, user.name)).text("[in my commits]")
cursor.execute("""SELECT count(commentstoread.comment) > 0
FROM commentchains
JOIN comments ON (comments.chain=commentchains.id)
JOIN commentstoread ON (commentstoread.comment=comments.id)
WHERE commentchains.review=%s
AND commentstoread.uid=%s""",
[review.id, user.id])
if cursor.fetchone()[0]:
links.a(href="showcomments?review=%d&filter=toread" % review.id).text("[display unread]")
def renderChains(target, chains):
for chain in chains:
row = target.tr("comment %s %s" % (chain.type, chain.state))
row.td("author").text(chain.user.fullname)
row.td("title").a(href="showcomment?chain=%d" % chain.id).innerHTML(chain.leader())
ncomments = chain.countComments()
nunread = chain.countUnread()
cell = row.td("when")
if ncomments == 1:
if nunread: cell.b().text("Unread")
else: cell.text("No replies")
else:
if nunread: cell.b().text("%d of %d unread" % (nunread, ncomments))
else: cell.text("%d repl%s" % (ncomments - 1, "ies" if ncomments > 2 else "y"))
if draft_issues:
h2 = chains.tr("h2", id="draft-issues").td("h2", colspan=3).h2().text("Draft Issues")
h2.a(href="showcomments?review=%d&filter=draft-issues" % review.id).text("[display all]")
h2.a(href="showcomments?review=%d&filter=draft-issues&blame=%s" % (review.id, user.name)).text("[in my commits]")
renderChains(chains, draft_issues)
if open_issues:
h2 = chains.tr("h2", id="open-issues").td("h2", colspan=3).h2().text("Open Issues")
h2.a(href="showcomments?review=%d&filter=open-issues" % review.id).text("[display all]")
h2.a(href="showcomments?review=%d&filter=open-issues&blame=%s" % (review.id, user.name)).text("[in my commits]")
renderChains(chains, open_issues)
if addressed_issues:
h2 = chains.tr("h2", id="addressed-issues").td("h2", colspan=3).h2().text("Addressed Issues")
h2.a(href="showcomments?review=%d&filter=addressed-issues" % review.id).text("[display all]")
h2.a(href="showcomments?review=%d&filter=addressed-issues&blame=%s" % (review.id, user.name)).text("[in my commits]")
renderChains(chains, addressed_issues)
if closed_issues:
h2 = chains.tr("h2", id="closed-issues").td("h2", colspan=3).h2().text("Resolved Issues")
h2.a(href="showcomments?review=%d&filter=closed-issues" % review.id).text("[display all]")
h2.a(href="showcomments?review=%d&filter=closed-issues&blame=%s" % (review.id, user.name)).text("[in my commits]")
renderChains(chains, closed_issues)
if draft_notes:
h2 = chains.tr("h2", id="draft-notes").td("h2", colspan=3).h2().text("Draft Notes")
h2.a(href="showcomments?review=%d&filter=draft-notes" % review.id).text("[display all]")
h2.a(href="showcomments?review=%d&filter=draft-notes&blame=%s" % (review.id, user.name)).text("[in my commits]")
renderChains(chains, draft_notes)
if open_notes:
h2 = chains.tr("h2", id="notes").td("h2", colspan=3).h2().text("Notes")
h2.a(href="showcomments?review=%d&filter=open-notes" % review.id).text("[display all]")
h2.a(href="showcomments?review=%d&filter=open-notes&blame=%s" % (review.id, user.name)).text("[in my commits]")
renderChains(chains, open_notes)
buttons = chains.tr("buttons").td("buttons", colspan=3)
buttons.button(onclick="CommentChain.create('issue');").text("Raise Issue")
buttons.button(onclick="CommentChain.create('note');").text("Write Note")
profiler.check("chains (render)")
yield flush(target)
cursor.execute("""SELECT DISTINCT reviewfiles.file, theirs.uid
FROM reviewfiles
JOIN reviewuserfiles AS yours ON (yours.file=reviewfiles.id)
JOIN reviewuserfiles AS theirs ON (theirs.file=yours.file AND theirs.uid!=yours.uid)
WHERE reviewfiles.review=%s
AND yours.uid=%s""",
(review.id, user.id))
rows = cursor.fetchall()
profiler.check("shared assignments (query)")
if rows:
reviewers = {}
for file_id, user_id in rows:
reviewers.setdefault(file_id, {})[user_id] = set()
shared = target.table('paleyellow shared', align='center', cellspacing=0)
row = shared.tr('h1')
shared_header = row.td('h1', colspan=2).h1()
shared_header.text("Shared Assignments")
shared_buttons = row.td('buttons', colspan=2).span(style="display: none")
shared_buttons.button("confirm").text("Confirm")
shared_buttons.button("cancel").text("Cancel")
granularity = "module"
def moduleFromFile(file_id):
filename = dbutils.describe_file(db, file_id)
return getModuleFromFile(repository, filename) or filename
def formatFiles(files):
paths = sorted([dbutils.describe_file(db, file_id) for file_id in files])
if granularity == "file":
return diff.File.eliminateCommonPrefixes(paths)
else:
modules = set()
files = []
for path in paths:
module = getModuleFromFile(path)
if module: modules.add(module)
else: files.append(path)
return sorted(modules) + diff.File.eliminateCommonPrefixes(files)
files_per_team = review_utils.collectReviewTeams(reviewers)
teams_per_modules = {}
profiler.check("shared assignments (collect teams)")
for team, files in files_per_team.items():
modules = set()
for file_id in files:
modules.add(moduleFromFile(file_id))
teams_per_modules.setdefault(frozenset(modules), set()).update(team)
for modules, team in teams_per_modules.items():
row = shared.tr("reviewers")
cell = row.td("reviewers")
members = sorted([dbutils.User.fromId(db, user_id).fullname for user_id in team])
for member in members: cell.text(member).br()
row.td("willreview").innerHTML("<span class='also'>also</span> review changes in")
cell = row.td("files")
for path in diff.File.eliminateCommonPrefixes(sorted(modules)):
cell.span("file").innerHTML(path).br()
directory_ids = "[ %s ]" % ", ".join([str(dbutils.find_directory(db, path=path[:-1])) for path in modules if path.endswith("/")])
file_ids = "[ %s ]" % ", ".join([str(dbutils.find_file(db, path=path)) for path in modules if not path.endswith("/")])
user_ids = "[ %s ]" % ", ".join(map(str, team))
cell = row.td("buttons")
cell.button("accept", critic_directory_ids=directory_ids, critic_file_ids=file_ids, critic_user_ids=user_ids).text("I will review this!")
cell.button("deny", critic_directory_ids=directory_ids, critic_file_ids=file_ids, critic_user_ids=user_ids).text("They will review this!")
yield flush(target)
profiler.check("shared assignments")
cursor.execute("SELECT batches.id, users.fullname, batches.comment, batches.time FROM batches JOIN users ON (users.id=batches.uid) WHERE batches.review=%s ORDER BY batches.id DESC", [review.id])
rows = cursor.fetchall()
if rows:
notes = dict([(chain.id, chain) for chain in open_notes])
batches = target.table("paleyellow batches", align="center", cellspacing=0)
batches.tr().td("h1", colspan=3).h1().text("Work Log")
for batch_id, user_fullname, chain_id, when in rows:
row = batches.tr("batch")
row.td("author").text(user_fullname)
title = "<i>No comment</i>"
if chain_id:
if chain_id in notes:
title = notes[chain_id].leader()
else:
for chain in all_chains:
if chain.id == chain_id:
title = chain.leader()
break
row.td("title").a(href="showbatch?batch=%d" % batch_id).innerHTML(title)
row.td("when").text(time.strftime("%Y-%m-%d %H:%M", when.timetuple()))
profiler.check("batches")
profiler.output(db, user, target)
yield flush()
if review.branch.head:
try: head_according_to_git = repository.revparse(review.branch.name)
except: head_according_to_git = None
head_according_to_us = review.branch.head.sha1
if head_according_to_git != head_according_to_us:
# The git repository disagrees with us. Potentially harmful updates
# to the branch will be rejected by the git hook while this is the
# case, but this means that "our" head might not be referenced at
# all and thus that it might be GC:ed by the git repository at some
# point. To avoid that, add a keepalive reference.
repository.keepalive(head_according_to_us)
yield "\n<!-- branch head mismatch: git=%s, us=%s (corrected) -->" % (head_according_to_git[:8] if head_according_to_git else "N/A", head_according_to_us[:8])
| [
"jl@opera.com"
] | jl@opera.com |
a15574410724ba4ab74e9a614969967bd761fc75 | 242fc8beff7e1e51b5cdd9bfa3d118267f56861d | /shunting_yard/parser.py | dd42d72177d0963e445e9387f19599b0eaaefeaf | [] | no_license | gmkvaal/shunting_yard | dac0dd780cf2c779f410edad54f72618c5379f80 | d84b0c4aa3c36aff435ede5252f143182f656fc0 | refs/heads/master | 2021-07-23T12:14:48.666536 | 2017-11-03T13:08:13 | 2017-11-03T13:08:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,794 | py | from typing import List
from collections import namedtuple
from enum import Enum, auto
from .tokenizer import tokenizer
StateRet = namedtuple('StateRet', ['next_state', 'increment'])
class StateType(Enum):
NUMBER = auto()
def classify_token(token: dict, operator_stack: List[str], output_queue: List[str]) -> StateRet:
"""Classifies tokens
Args:
token: Currently read token.
operator_stack: Stack of operators
output_queue: Tokens in RPN order
Returns:
Tuple of: Next state, if increment
"""
print(token['name'], [operator['name'] for operator in output_queue],
[operator['name'] for operator in operator_stack])
if token['type'] == StateType.NUMBER:
output_queue.append(token)
return StateRet(classify_token, True)
if token['type'] == 'OPERATOR':
return StateRet(operator, False)
if token['type'] == 'FUNCTION':
operator_stack.append(token)
return StateRet(classify_token, True)
if token['type'] == 'LEFT_PARENTHESIS':
operator_stack.append(token)
return StateRet(classify_token, True)
if token['type'] == 'RIGHT_PARENTHESIS':
return StateRet(right_parenthesis, False)
if token['type'] == 'SKIP':
return StateRet(classify_token, True)
def operator(token: dict, operator_stack: List[str], output_queue: List[str]) -> StateRet:
"""Called when a token is classified as an operator
Appends to stack of the operator stack is empty, if the last token
in the stack is a function, or if the token is right associative.
Else, pops operators from the stack
Args:
token: Currently read token.
operator_stack: Stack of operators
output_queue: Tokens in RPN order
Returns:
Tuple of: Next state, if increment
"""
del output_queue # Not used in this state
if len(operator_stack) == 0 or operator_stack[-1]['precedence'] is None:
operator_stack.append(token)
return StateRet(classify_token, True)
elif token['associativity'] == 'RIGHT':
operator_stack.append(token)
return StateRet(classify_token, True)
else:
return StateRet(pop_operators, False)
def pop_operators(token: dict, operator_stack: List[str], output_queue: List[str]) -> StateRet:
"""Pops operators from the stack
Operators are popped from the operator stack to the output queue
until reaching an operator with lower precedence or the stack is empty
Args:
token: Currently read token.
operator_stack: Stack of operators
output_queue: Tokens in RPN order
Returns:
Tuple of: Next state, if increment
"""
if (len(operator_stack) > 0
and operator_stack[-1]['precedence'] is not None
and operator_stack[-1]['precedence'] >= token['precedence']
and operator_stack[-1]['associativity'] == 'LEFT'):
output_queue.append(operator_stack.pop())
return StateRet(pop_operators, False)
else:
operator_stack.append(token)
return StateRet(classify_token, True)
def right_parenthesis(token: dict, operator_stack: List[str], output_queue: List[str]) -> StateRet:
"""Called when a token is classified as a right parenthesis
Operators are popped from the operator stack to the output queue
until reaching a left parenthesis
Args:
token: Currently read token.
operator_stack: Stack of operators
output_queue: Tokens in RPN order
Returns:
Tuple of: Next state, if increment
"""
del token # Not used in this state
if operator_stack == []:
raise Exception('Mismatching parentheses')
elif operator_stack[-1]['type'] != 'LEFT_PARENTHESIS':
output_queue.append(operator_stack.pop())
return StateRet(right_parenthesis, False)
else:
operator_stack.pop()
return StateRet(post_right_parenthesis, False)
def post_right_parenthesis(token: dict, operator_stack: List[str], output_queue: List[str]) -> StateRet:
"""Called after brackets are matched
If a function is atop of the stack it is poped to the output queue
Args:
token: Currently read token.
operator_stack: Stack of operators
output_queue: Tokens in RPN order
Returns:
Tuple of: Next state, if increment
"""
if len(operator_stack) > 0 and operator_stack[-1]['type'] == 'FUNCTION':
output_queue.append(operator_stack.pop())
return StateRet(classify_token, True)
def empty_operator_stack(operator_stack: List[str], output_queue: List[str]) -> None:
""" Pops remaining operators from the operator stack to the output queue
Args:
token: Currently read token.
operator_stack: Stack of operators
output_queue: Tokens in RPN order
"""
while len(operator_stack) > 0:
output_queue.append(operator_stack.pop())
def shunting_yard(input_string: str) -> List[str]:
""" Engine of shunting yard parser finite state machine algorithm
Args:
input_string: A mathematical expression
Returns:
A list of tokens ordered in Reverse Polish Notation
"""
operator_stack = []
output_queue = []
token_list = tokenizer(input_string)
state = classify_token
idx = 0
while True:
token = token_list[idx]
#print(token['name'], state.__name__, operator_stack)
return_state = state(token, operator_stack, output_queue)
if return_state.increment:
idx += 1
state = return_state.next_state
if idx == len(token_list):
empty_operator_stack(operator_stack, output_queue)
break
return output_queue
| [
"guttorm.kvaal@gmail.com"
] | guttorm.kvaal@gmail.com |
3f2d06c3d1274caa259fdb14604ed63acc54c4a3 | e950cafc9feeeacba9d40b18477dab43bb8737bf | /src/lab1/tfs_client.py | a184cf7077222781caa9649835a758e543e1b1d3 | [
"MIT-0"
] | permissive | aws-samples/aws-inf1-gcr-workshop | fe21b4637f09a2c51a977aaea999a20c31b43277 | a5712c17a66809fc60cd57a056a00df3b9b2fc8e | refs/heads/master | 2022-04-23T20:34:53.914422 | 2020-04-28T02:36:42 | 2020-04-28T02:36:42 | 254,085,220 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,791 | py | '''
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
'''
import numpy as np
import grpc
import tensorflow as tf
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.applications.resnet50 import decode_predictions
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
tf.keras.backend.set_image_data_format('channels_last')
if __name__ == '__main__':
channel = grpc.insecure_channel('localhost:8500')
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
img_file = tf.keras.utils.get_file(
"./kitten_small.jpg",
"https://raw.githubusercontent.com/awslabs/mxnet-model-server/master/docs/images/kitten_small.jpg")
img = image.load_img(img_file, target_size=(224, 224))
img_array = preprocess_input(image.img_to_array(img)[None, ...])
request = predict_pb2.PredictRequest()
request.model_spec.name = 'resnet50_inf1_serve'
request.inputs['input'].CopyFrom(
tf.contrib.util.make_tensor_proto(img_array, shape=img_array.shape))
result = stub.Predict(request)
prediction = tf.make_ndarray(result.outputs['output'])
print(decode_predictions(prediction))
| [
"zhazhn@amazon.com"
] | zhazhn@amazon.com |
716139a42c9d3176dc33f024f607d86df864e7be | b5cd2a7828acdaebd49964ac93be7085bea0d011 | /carte.py | c4e4e9aa68d71d16ec5026c7e3144cfd0e940827 | [] | no_license | stevchen99/pythonRoboc | e5576930cd9295c08336ad3754c7a4e326e5a8a5 | 5301be8e918ce6b3c36b491e02c8aaddd520eb4e | refs/heads/master | 2020-12-08T16:44:46.439760 | 2020-01-17T18:03:39 | 2020-01-17T18:03:39 | 233,036,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | # -*-coding:Utf-8 -*
"""Ce module contient la classe Carte."""
class Carte:
"""Objet de transition entre un fichier et un labyrinthe."""
def __init__(self, nom, chaine):
self.nom = nom
self.labyrinthe = creer_labyrinthe_depuis_chaine(chaine)
def __repr__(self):
return "<Carte {}>".format(self.nom)
| [
"stevchen99@gmail.com"
] | stevchen99@gmail.com |
962ad189b3695ad55e5db43027b6e869b2817147 | fb408595c1edee0be293302c6d7bfc0c77d37c46 | /python/DP/DP_2096.py | a5753e0e8dda2057310f4dee0f056e7940fbb74d | [] | no_license | as950118/Algorithm | 39ad25519fd0e42b90ddf3797a61239862ad79b5 | 739a7d4b569057cdb6b6faa74254512b83d02bb1 | refs/heads/master | 2023-07-21T12:38:00.653579 | 2023-07-19T06:57:17 | 2023-07-19T06:57:17 | 125,176,176 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | n = int(input())
arr = [0]*(3)
dp_max = [0]*(3)
dp_min = [0]*(3)
arr = list(map(int, input().split()))
temp = arr[:]
dp_max = temp[:]
dp_min = temp[:]
for i in range(1, n):
arr = list(map(int, input().split()))
temp[0] = max(dp_max[0], dp_max[1]) + arr[0]
temp[1] = max(dp_max[0], dp_max[1], dp_max[2]) + arr[1]
temp[2] = max(dp_max[1], dp_max[2]) + arr[2]
dp_max = temp[:]
temp[0] = min(dp_min[0], dp_min[1]) + arr[0]
temp[1] = min(dp_min[0], dp_min[1], dp_min[2]) + arr[1]
temp[2] = min(dp_min[1], dp_min[2]) + arr[2]
dp_min = temp[:]
print(max(dp_max), min(dp_min))
| [
"na_qa@icloud.com"
] | na_qa@icloud.com |
f443e27275903b151314c40311f6464aafca1b44 | 72784799e5436e8a96462bdbcb29baeb644dcc7f | /utilities/animate.py | 2c562e41c8ec2e736db293f0f772a55ff0091345 | [] | no_license | simonsben/undergrad_thesis | 31dd205cb734f7c876b5053040e5ab0bf8fbd5cb | 8458d00ae6525602b944279c2c280149a5957cb1 | refs/heads/master | 2020-04-02T10:46:55.255322 | 2019-04-08T06:01:48 | 2019-04-08T06:01:48 | 154,354,775 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | from matplotlib.pylab import figure, show, savefig, title, axis, draw
from networkx import spring_layout, draw_networkx_edges, draw_networkx_nodes
from matplotlib.animation import FuncAnimation
def add_node(graph, i, plot_layout):
# draw_networkx_edges(graph, plot_layout, alpha=.3)
# draw_networkx_nodes(node, plot_layout, node_size=100, edgecolors='k', node_color='w')
i += 1
draw()
def animate_creation(network, blocking=True, save_plot=True):
_title = 'Free-Scale Network'
fig = figure(_title)
axis('off')
graph = network.network_plot
plot_layout = spring_layout(graph)
init_nodes = graph.nodes[:3]
init_edges = graph.edges[:2]
draw_networkx_nodes(graph, plot_layout, nodelist=init_nodes, node_size=100, edgecolors='k', node_color='w')
draw_networkx_edges(graph, plot_layout, edgelist=init_edges, alpha=.3)
draw()
show()
i = 3
animation = FuncAnimation(fig, add_node, fargs=(graph, i, plot_layout))
| [
"simons.ben0@gmail.com"
] | simons.ben0@gmail.com |
52a608c85aa5b18e530c6cb0cae1d8d2f58b7ec4 | 14d8418ca5990217be67aee89fdaa310db03fbba | /test_stats_d_graph_display.py | fffe014750a15f323e8f39408530e03c6133cae4 | [
"Apache-2.0"
] | permissive | sachanta/lm-sdk-python | 3a16457bd2d5b880a0d238a88a9d1d5b8d9675f0 | e476d415c7279457f79b5d032a73d950af2fe96b | refs/heads/master | 2023-08-03T08:39:42.842790 | 2021-09-13T07:20:56 | 2021-09-13T07:20:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | # coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. Note: For Python SDKs, the REQUEST parameters can contain camelCase or an underscore. However, the RESPONSE parameters will always contain an underscore. For example, the REQUEST parameter can be testLocation or test_location. The RESPONSE parameter will be test_location. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import logicmonitor_sdk
from logicmonitor_sdk.models.stats_d_graph_display import StatsDGraphDisplay # noqa: E501
from logicmonitor_sdk.rest import ApiException
class TestStatsDGraphDisplay(unittest.TestCase):
"""StatsDGraphDisplay unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStatsDGraphDisplay(self):
"""Test StatsDGraphDisplay"""
# FIXME: construct object with mandatory attributes with example values
# model = logicmonitor_sdk.models.stats_d_graph_display.StatsDGraphDisplay() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"bamboo@build01.us-west-1.logicmonitor.net"
] | bamboo@build01.us-west-1.logicmonitor.net |
d5816bf17fecd71e306ba321ee4fd6bda1913e63 | 2c4f00e65671467ed14b33f4f6b574bd9944eaea | /test.py | eceeb7bd27460d1c67779648392af96cb4ff1456 | [
"MIT"
] | permissive | ashafer01/chain | 20a459a7a4f0bc43668cfe71f58d568689c61f23 | e7ea9b3fb2b92459c581dfc0ebc424300cd333c0 | refs/heads/master | 2020-07-06T14:18:42.242072 | 2019-08-18T20:37:19 | 2019-08-18T20:37:19 | 203,046,524 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | import unittest
from chain import chain, args
class TestChain(unittest.TestCase):
def test_chain(self):
def _test_x(x):
return x + 'x'
def _test_y(x):
return x + 'y'
def _test_z(x):
return x + 'z'
def _test_2(a,b):
return a+b
def _test_3(a,b,c):
return a+b+c
chain_res = chain(args('w') | _test_x | _test_y | args('2'), _test_2, _test_z, args('3', '4'), _test_3)
native_res = _test_3(_test_z(_test_2(_test_y(_test_x('w')), '2')), '3', '4')
self.assertEqual(chain_res, native_res)
def test_return_args(self):
def _test_ret_args(x):
res = args('hello', world=x)
return res
def _test_accept_args(hello, world=''):
return hello + ' ' + world
def _test_1(x):
return '1 ' + x
def _test_2(x):
return x + ' 2'
res = chain(args('test') | _test_1 | _test_2 | _test_ret_args | _test_accept_args)
expected = 'hello 1 test 2'
self.assertEqual(res, expected)
| [
"ashafer@pm.me"
] | ashafer@pm.me |
7bc0cb59175906afc9c0c195273cb6c3843800f3 | f213d968b6d43ca27c52d8d61311c054be0e204c | /tutorial2.py | 84a2831dee2e41157438f719de429b18aaf113bf | [] | no_license | datasqr/OpenCV | f9a8c161735fd3b31f6a65b73b41e5ed7fda783c | 3fe8d52e1ea133f3c402ae7fd2606d1f716e0215 | refs/heads/master | 2021-01-25T04:57:50.930615 | 2015-01-07T19:25:34 | 2015-01-07T19:25:34 | 28,535,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | # http://docs.opencv.org/trunk/doc/py_tutorials/py_gui/py_drawing_functions/py_drawing_functions.html
#Drawing Functions
import numpy as np
import cv2
# Create a black image
img = np.zeros((250,250,3), np.uint8)
print img
# Draw a diagonal blue line with thickness of 5 px
#cv2.line(img,(0,0),(511,511),(255,0,0),5)
#scv2.rectangle(img,(384,0),(510,128),(0,255,0),3)
#cv2.circle(img,(447,63), 63, (0,0,255), -1)
#cv2.ellipse(img,(256,256),(100,50),0,0,180,255,-1)
pts = np.array([[10,5],[20,30],[50,10]], np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(img,[pts],True,(0,255,255))
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"mateusz.zbikowski@gmail.com"
] | mateusz.zbikowski@gmail.com |
5e85990864fdcde2e416a6a7d1e2c645b29cd5de | 93b866284ca1ac29c5005555f2cb30454a0fb5cf | /Problems/59-Problem/Problem 59.py | 7f38e437f006d4d25166047a46688def172fbf69 | [] | no_license | FrancoisdeFouchecour/Projet-Euler | c2b17d1e35fbd10a708ba3221825a62a17818382 | 0cf70457c0418264c2eff7cdd0e92a07b61ecb07 | refs/heads/master | 2021-12-25T05:44:08.054648 | 2021-11-27T21:47:42 | 2021-11-27T21:47:42 | 168,253,571 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,087 | py | import time
import numpy as np
problem_number = 59
def decimal_to_binar(n):
if n == 0:
return 0
return n%2 + 10*decimal_to_binar(n//2)
def binar_to_decimal(n):
if n == 1 or n == 0:
return n
else:
return int(str(n)[-1]) + 2* binar_to_decimal(int(str(n)[:-1]))
def XOR(A, B):
a = str(decimal_to_binar(A))
b = str(decimal_to_binar(B))
while len(a) < len(b):
a = '0'+a
while len(a) > len(b):
b = '0'+b
c = ''
for i in range(len(a)):
if a[i] == b[i]:
c += '0'
else:
c += '1'
return binar_to_decimal(c)
def decipher(text, key):
plain_text = ""
for i in range(len(text)):
plain_text += chr(XOR(text[i], key[i%3]))
return plain_text
#read data
number_file = open("data.txt", "r")
raw_data = number_file.read()
number_file.close()
list_name = []
word = ""
for char in raw_data:
if char == ',':
list_name.append(int(word))
word = ""
elif char == '\n':
list_name.append(int(word))
elif char != '"':
word += char
#Solution
def solution(input_list):
result = 0
length = len(input_list)
normal_frequency = [11.682, 4.434, 5.238, 3.174, 2.799, 4.027, 1.642, 4.200, 7.294, 0.511, 0.456, 2.415, 3.826, 2.284, 7.631, 4.319, 0.222, 2.826, 6.686, 15.978, 1.183, 0.824, 5.497, 0.045, 0.763, 0.045]
score = np.infty
for a in range(97, 123):
for b in range(97, 123):
for c in range(97, 123):
key = [a, b, c]
new_text = [0 for i in range(length)]
for i in range(len(new_text)):
new_text[i] = XOR(input_list[i], key[i%3])
letter_frequency = [[0 for j in range(26)] for i in range(3)]
for i in range(len(new_text)):
if 65 <= new_text[i] and new_text[i] <= 90:
letter_frequency[i%3][new_text[i] - 65] += 1
elif 97 <= new_text[i] and new_text[i] <= 122:
letter_frequency[i%3][new_text[i] - 97] += 1
new_score = 0
for i in range(3):
for j in range(26):
letter_frequency[i][j] = letter_frequency[i][j]/(length//3)
new_score += abs(letter_frequency[i][j] - normal_frequency[j])
if new_score < score:
score = new_score
result = sum(new_text)
return result
#Test & Result
fichier = open("Solution "+str(problem_number)+".txt", "w")
string = ""
begin_problem = time.time()
problem_value = solution(list_name)
end_problem = time.time()
problem_time = end_problem - begin_problem
string += "RESULT PROBLEM #"+str(problem_number)+"\n\n"
string += "Output: "+str(problem_value)+"\n"
string += "Computation time: "+str(problem_time)+" sec\n"
string += "\n\n\nCurrent date & time: " + time.strftime("%c")
fichier.write(string)
fichier.close()
| [
"francois.de-salivet-de-fouchecour@polytechnique.edu"
] | francois.de-salivet-de-fouchecour@polytechnique.edu |
85f5b6db2f09ac1feae49616d3363ce62a7724e1 | bab70d19d523e616ebaa1f74c114e88fd5d2ad83 | /CAhw2/RunMeForTest.py | af2bd375ebc9796f9d89d82792c8c2ae4b9716d8 | [] | no_license | FuHsinyu/Cellular-Automata-model | 4ef3351735aee8a4d268f67d3d04b36809bb33d2 | 2a0bdf505fd0b793caee5b1b708fb9d591532ebb | refs/heads/master | 2021-05-06T19:49:16.148410 | 2018-06-13T01:55:14 | 2018-06-13T01:55:14 | 112,194,748 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | from pycx_gui import GUI
from myModel import CAmodel
import matplotlib.pyplot as plt
import numpy as np
import random
#change digit in line 18 and run to get a FAST TESTING RESULT
initRow = [0] * 100
for i in range(100):
initRow[i] = random.randrange(2)
r = 1 #range
systemSize = r * 2 + 1
k = 2 #base
rule = 0 #decimal rule initilized
cycleLenList = [] #final cycle length recording list
cycleReachedBool = False #booleanB
resultDict = dict() #hash results with respect to rules
while rule < 6: #By changing these for FAST TEST
model = CAmodel(r, k, rule, initRow, systemSize)
#def __init__(self, range, base, rule, initRow, systemSize):
runTime = 0 #run times
while runTime < 10e4:
currentRow = tuple(model.currentRow)
if currentRow not in resultDict:
resultDict[currentRow] = runTime
#print(resultDict)
else:
cycleLenList.append(runTime - resultDict[currentRow])
cycleReachedBool = True
break
runTime += 1
model.step()
if not cycleReachedBool:
cycleLenList.append(-1)
rule += 1 #
cycleReachedBool = False
resultDict.clear()
model.draw(cycleLenList) | [
"247047.niuitmo.ru"
] | 247047.niuitmo.ru |
ed3cea97ae571dfe1f0a45dba14fc43b93212a84 | fb21a8f1fc02f5cee6f0a759e336561726d3b184 | /experiments/lstm-notcut/run.py | 74e6c8e8a8f1be2abab441657d41651360c17bd5 | [] | no_license | yamaguchi-milkcocholate/GoogleBrain-VentilatorPressurePrediction | 76632353ff25a0c9ad8db51ef1f4b728954537b5 | 1996bb81f5b6880a20b8e39c681fecef0bc8a201 | refs/heads/main | 2023-09-05T17:25:46.980274 | 2021-11-24T04:40:50 | 2021-11-24T04:40:50 | 410,795,933 | 0 | 0 | null | 2021-11-04T01:28:27 | 2021-09-27T08:06:55 | Python | UTF-8 | Python | false | false | 6,082 | py | from random import seed
import numpy as np
import pandas as pd
import json
import os
import sys
import gc
import shutil
from pprint import pprint
from pathlib import Path
from typing import *
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from sklearn.metrics import mean_absolute_error as mae
from sklearn.preprocessing import RobustScaler, PowerTransformer, MinMaxScaler
from sklearn.model_selection import KFold
import sys
print(str(Path(__file__).resolve().parent.parent.parent))
sys.path.append(str(Path(__file__).resolve().parent.parent.parent))
from src.utils import (
seed_every_thing,
fetch_data,
Config,
plot_metric,
reduce_tf_gpu_memory,
reduce_mem_usage,
fetch_custom_data,
CustomL1Loss
)
def build_model(config: Config, n_features) -> keras.models.Sequential:
model = keras.models.Sequential([keras.layers.Input(shape=(config.cut, n_features))])
for n_unit in config.n_units:
model.add(
keras.layers.Bidirectional(
keras.layers.LSTM(
n_unit,
return_sequences=True,
)
)
)
for n_unit in config.n_dense_units:
model.add(keras.layers.Dense(n_unit, activation="selu"))
model.add(keras.layers.Dense(1))
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=config.lr),
loss='mae')
return model
def main(config: Dict[str, Any]):
config = Config().update(config)
seed_every_thing(seed=config.seed)
reduce_tf_gpu_memory(gpu_id=config.gpu_id)
basedir = Path(__file__).resolve().parent
datadir = basedir / ".." / ".." / "data"
logdir = basedir / ".." / ".." / "logs" / config.dirname
cachedir = basedir / ".." / ".." / "cache"
os.makedirs(logdir, exist_ok=True)
config.to_json(logdir / "config.json")
_, test_df, submission_df = fetch_custom_data(datadir=datadir, n_splits=config.n_splits)
test_df["count"] = (np.arange(test_df.shape[0]) % 80).astype(int)
test_preds_idx = test_df["count"] < config.cut
test_df = test_df[test_preds_idx].reset_index(drop=True)
test_df["pressure"] = 0
train_df = reduce_mem_usage(pd.read_csv(cachedir / f"train-10fold-debug{config.debug}.csv"))
test_df = reduce_mem_usage(pd.read_csv(cachedir / f"test-10fold-debug{config.debug}.csv"))
kfolds = train_df.iloc[0::config.cut]['kfold'].values
features = list(train_df.drop(["kfold", "pressure"], axis=1).columns)
pprint(features)
print(len(features))
cont_features = [f for f in features if ("RC_" not in f) and ("R_" not in f) and ("C_" not in f) and ("u_out" not in f)]
pprint(cont_features)
RS = RobustScaler()
train_df[cont_features] = RS.fit_transform(train_df[cont_features])
test_df[cont_features] = RS.transform(test_df[cont_features])
train_data, test_data = train_df[features].values, test_df[features].values
train_data = train_data.reshape(-1, config.cut, train_data.shape[-1])
targets = train_df[["pressure"]].to_numpy().reshape(-1, config.cut)
test_data = test_data.reshape(-1, config.cut, test_data.shape[-1])
with tf.device(f"/GPU:{config.gpu_id}"):
valid_preds = np.empty_like(targets)
test_preds = []
for fold in range(config.n_splits):
train_idx, test_idx = (kfolds != fold), (kfolds == fold)
print("-" * 15, ">", f"Fold {fold+1}", "<", "-" * 15)
savedir = logdir / f"fold{fold}"
os.makedirs(savedir, exist_ok=True)
X_train, X_valid = train_data[train_idx], train_data[test_idx]
y_train, y_valid = targets[train_idx], targets[test_idx]
model = build_model(config=config, n_features=len(features))
# es = EarlyStopping(
# monitor="val_loss",
# patience=config.es_patience,
# verbose=1,
# mode="min",
# restore_best_weights=True,
# )
customL1 = CustomL1Loss(
X_valid=X_valid,
y_valid=y_valid,
u_outs=X_valid[:, :, features.index("u_out")],
filepath=savedir / "weights_custom_best.h5"
)
check_point = ModelCheckpoint(
filepath=savedir / "weights_best.h5",
monitor="val_loss",
verbose=1,
save_best_only=True,
mode="min",
save_weights_only=True,
)
schedular = ReduceLROnPlateau(
mode="min", **config.schedular
)
history = model.fit(
X_train,
y_train,
validation_data=(X_valid, y_valid),
epochs=config.epochs,
batch_size=config.batch_size,
callbacks=[check_point, schedular, customL1]
)
model.save_weights(savedir / "weights_final.h5")
model.load_weights(savedir / "weights_custom_best.h5")
pd.DataFrame(history.history).to_csv(savedir / "log.csv")
plot_metric(filepath=savedir / "log.csv", metric="loss")
valid_preds[test_idx, :] = model.predict(X_valid).squeeze()
test_preds.append(model.predict(test_data).squeeze().reshape(-1, 1).squeeze())
del model, X_train, X_valid, y_train, y_valid
keras.backend.clear_session()
gc.collect()
pd.DataFrame(valid_preds).to_csv(logdir / "valid_preds.csv")
if not config.debug:
submission_df.loc[test_preds_idx, "pressure"] = np.median(test_preds, axis=0)
submission_df.to_csv(logdir / "submission.csv", index=False)
shutil.copyfile(Path(__file__), logdir / "script.py")
if __name__ == "__main__":
cnf_file = sys.argv[1]
cfg_file_path = Path(__file__).resolve().parent / cnf_file
with open(cfg_file_path, "rb") as f:
config = json.load(f)
main(config=config)
| [
"zuuuubo.tetsu@outlook.jp"
] | zuuuubo.tetsu@outlook.jp |
1bcb03882cd8aba3be9ee674c15f8f2ca62224bf | 42170b78e5277d96a33b8d796e3075a378723aa8 | /dataxHWSp2021/Practice Homework/Numpy_Pandas/student/tests/q1d.py | dca0b114bbcac2ff4f73cc5d33860855e83c8837 | [
"Apache-2.0"
] | permissive | 6shun/datax | e3d4d32295c26a6e62c6cd1ae2cabdd9b2f1addf | f912d22c838b511d3ada4ecfa3548afd80437b74 | refs/heads/main | 2023-03-03T09:51:35.255111 | 2022-01-21T16:13:34 | 2022-01-21T16:13:34 | 338,253,155 | 1 | 0 | null | 2021-02-12T07:42:14 | 2021-02-12T07:42:14 | null | UTF-8 | Python | false | false | 392 | py | test = { 'name': 'q1d',
'points': 1,
'suites': [ { 'cases': [ { 'code': '>>> isinstance(d, float)\nTrue',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| [
"52470060+Mahan-Tajrobehkar@users.noreply.github.com"
] | 52470060+Mahan-Tajrobehkar@users.noreply.github.com |
9663d2aacfb226c81bf9757446cc9fe4df27d7ce | a732353686f6b22561edc6905e0243c2a2038667 | /szamlazz/models.py | 2ccb14534612b5d25db06aac0c8d415cc442cf5f | [
"MIT"
] | permissive | freemanPy/szamlazz.py | 34fac9bf94df6f628119d27908e43a834e4c1371 | 2822d1c6ea19178131fc38c283d68ef350dcfde9 | refs/heads/master | 2023-08-12T04:07:37.381429 | 2021-09-12T21:18:18 | 2021-09-12T21:18:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,854 | py | import base64
import logging
from pathlib import Path
from requests.models import Response
from typing import NamedTuple, Tuple
from urllib.parse import unquote
# noinspection PyPep8Naming
import xml.etree.ElementTree as ET
__all__ = ["Header", "Merchant", "Buyer", "Item", "Disbursement", "SzamlazzResponse", "PdfDataMissingError", "EmailDetails", ] # "WayBill"
logger = logging.getLogger(__name__)
class PdfDataMissingError(Exception):
pass
class Header(NamedTuple):
"""<fejlec>"""
creating_date: str = "" # <keltDatum>2020-01-20</keltDatum>
payment_date: str = "" # <teljesitesDatum>2020-01-20</teljesitesDatum>
due_date: str = "" # <fizetesiHataridoDatum>2020-01-20</fizetesiHataridoDatum>
payment_type: str = "Átutalás" # <fizmod>Átutalás</fizmod>
currency: str = "HUF" # <penznem>HUF</penznem>
invoice_language: str = "hu" # <szamlaNyelve>hu</szamlaNyelve> // can be: de, en, it, hu, fr, ro, sk, hr
invoice_comment: str = "" # <megjegyzes>Invoice comment</megjegyzes>
name_of_bank: str = "MNB" # <arfolyamBank>MNB</arfolyamBank>
exchange_rate: float = 0.0 # <arfolyam>0.0</arfolyam>
order_number: str = "" # <rendelesSzam></rendelesSzam>
pro_forma_number_ref: str = "" # <dijbekeroSzamlaszam></dijbekeroSzamlaszam>
deposit_invoice: bool = False # <elolegszamla>false</elolegszamla>
invoice_after_deposit_invoice: bool = False # <vegszamla>false</vegszamla>
correction_invoice: bool = False # <helyesbitoszamla>false</helyesbitoszamla>
number_of_corrected_invoice: str = "" # <helyesbitettSzamlaszam></helyesbitettSzamlaszam>
proforma_invoice: bool = False # <dijbekero>false</dijbekero>
invoice_prefix: str = "" # <szamlaszamElotag></szamlaszamElotag>
invoice_number: str = "" # <szamlaszam>E-TST-2011-1</szamlaszam> // needed for reverse_invoice|storno only
invoice_template: str = "" # <!-- Codomain: 'SzlaMost' | 'SzlaAlap' | 'SzlaNoEnv' | 'Szla8cm' | 'SzlaTomb' | 'SzlaFuvarlevelesAlap' -->
class Merchant(NamedTuple):
"""<elado>"""
bank_name: str = "" # <bank>BB</bank>
bank_account_number: str = "" # <bankszamlaszam>11111111-22222222-33333333</bankszamlaszam>
reply_email_address: str = "" # <emailReplyto> </emailReplyto>
email_subject: str = "" # <emailTargy>Invoice notification</emailTargy>
email_text: str = "" # <emailSzoveg>mail text</emailSzoveg>
class Buyer(NamedTuple):
"""<vevo>"""
name: str = "" # <nev>Kovacs Bt.</nev>
zip_code: str = "" # <irsz>2030</irsz>
city: str = "" # <telepules>Érd</telepules>
address: str = "" # <cim>Tárnoki út 23.</cim>
email: str = "" # <email>buyer@example.com</email>
send_email: bool = False # <sendEmail>false</sendEmail>
tax_number: str = "" # <adoszam>12345678-1-42</adoszam>
tax_number_eu: str = "" # <adoszamEU>HU55555555</adoszamEU> // needed for reverse_invoice|storno only
delivery_name: str = "" # <postazasiNev>Kovács Bt. mailing name</postazasiNev>
delivery_zip: str = "" # <postazasiIrsz>2040</postazasiIrsz>
delivery_city: str = "" # <postazasiTelepules>Budaörs</postazasiTelepules>
delivery_address: str = "" # <postazasiCim>Szivárvány utca 8.</postazasiCim>
identification: str = "" # <azonosito>1234</azonosito>
phone_number: str = "" # <telefonszam>Tel:+3630-555-55-55, Fax:+3623-555-555</telefonszam>
comment: str = "" # <megjegyzes>Call extension 214 from the reception</megjegyzes>
# class WayBill(NamedTuple):
# """<fuvarlevel>"""
# <!-- waybill/confinement note, you do not need this: omit the entire tag -->
# uticel: str = "" #
# futarSzolgalat: str = "" #
class ItemLedger(NamedTuple):
# language=XML
"""
<sequence>
<element name="gazdasagiEsem" type="string" maxOccurs="1" minOccurs="0"></element>
<element name="gazdasagiEsemAfa" type="string" maxOccurs="1" minOccurs="0"></element>
<element name="arbevetelFokonyviSzam" type="string" maxOccurs="1" minOccurs="0"></element>
<element name="afaFokonyviSzam" type="string" maxOccurs="1" minOccurs="0"></element>
<element name="elszDatumTol" type="date" maxOccurs="1" minOccurs="0"></element>
<element name="elszDatumIg" type="date" maxOccurs="1" minOccurs="0"></element>
</sequence>
"""
economic_event: str = "" # <gazdasagiesemeny></gazdasagiesemeny>
economic_event_tax: str = "" # <gazdasagiesemenyafa></gazdasagiesemenyafa>
sales_ledger_number: str = ""
vat_ledger_number: str = ""
settlement_date_from: str = ""
settlement_date_to: str = ""
class Item(NamedTuple):
name: str = "" # <megnevezes>Elado izé</megnevezes>
identifier: str = "" # <azonosito>ASD-123</azonosito>
quantity: str = "" # <mennyiseg>1.0</mennyiseg>
quantity_unit: str = "" # <mennyisegiEgyseg>db</mennyisegiEgyseg>
unit_price: str = "" # <nettoEgysegar>10000</nettoEgysegar>
vat_rate: str = "" # <afakulcs>27</afakulcs>
margin_tax_base: float = "" # <arresAfaAlap>10.25</arresAfaAlap>
net_price: str = "" # <nettoErtek>10000.0</nettoErtek>
vat_amount: str = "" # <afaErtek>2700.0</afaErtek>
gross_amount: str = "" # <bruttoErtek>12700.0</bruttoErtek>
comment_for_item: str = "" # <megjegyzes>lorem ipsum</megjegyzes>
item_ledger: ItemLedger = "" # <element name="tetelFokonyv" type="tns:tetelFokonyvTipus" maxOccurs="1" minOccurs="0"></element>
class Disbursement(NamedTuple):
date: str
title: str
amount: float
description: str = ""
class EmailDetails(NamedTuple):
addresses: str
reply_to_address: str
subject: str
body_text: str = ""
class SzamlazzResponse:
def __init__(self,
response: Response,
xml_namespace: str,
):
self.xml_namespace = xml_namespace
self.__response = response
self.__action_success: bool = False
content_type = response.headers.get("Content-Type")
if content_type == "application/octet-stream":
# Parse XML and map into class members
root = ET.fromstring(self.__response.text)
self.__pdf: str = self.__get_tag_text(root, "pdf")
self.__pdf_bytes: bytes = b""
self.__action_success: bool = True if (self.__get_tag_text(root, "sikeres") == "true") else False
else:
self.__pdf_bytes: bytes = response.content
self.__pdf: str = base64.b64encode(self.__pdf_bytes).decode("ascii")
# Error Handling
self.error_code: str = response.headers.get("szlahu_error_code")
self.error_message: str = response.headers.get("szlahu_error")
if self.error_message:
self.error_message = unquote(self.error_message)
self.http_request_success: str = "false" if self.error_code else "true"
# Extract Details
self.invoice_number: str = response.headers.get("szlahu_szamlaszam")
self.invoice_net_price: str = response.headers.get("szlahu_nettovegosszeg")
self.invoice_gross_price: str = response.headers.get("szlahu_bruttovegosszeg")
self.receivables: str = response.headers.get("szlahu_kintlevoseg")
self.buyer_account_url: str = response.headers.get("szlahu_vevoifiokurl")
if self.buyer_account_url:
self.buyer_account_url = unquote(response.headers.get("szlahu_vevoifiokurl"))
self.payment_method: str = response.headers.get("szlahu_fizetesmod")
self.__has_errors = self.error_code or self.error_message
if self.has_errors:
logger.error(f"Error Code: {self.error_code}")
logger.error(f"Error Message: {self.error_message}")
@property
def action_success(self) -> bool:
return self.__action_success
@property
def has_errors(self):
return self.__has_errors
@property
def ok(self):
"""
Shortcut to the original response's attribute with the same name
"""
return self.__response.ok
@property
def response(self) -> Response:
"""
Original HTTP Response object returned by the requests package
:return: requests.models.Response
"""
return self.__response
@property
def text(self) -> str:
"""
Shortcut to the original response's attribute with the same name
"""
return self.__response.text
def get_pdf_base64(self) -> str:
"""
Get PDF from response in Base64 format
:return: PDF (in Base64 format)
:rtype: str
"""
if (not self.__pdf) and (not self.__pdf_bytes):
raise PdfDataMissingError("No PDF was returned. Check the value of szamlaLetoltes|invoice_download")
return self.__pdf
def get_pdf_bytes(self) -> bytes:
pdf_base64 = self.get_pdf_base64()
return base64.b64decode(pdf_base64) if pdf_base64 else self.__pdf_bytes
def write_pdf_to_disk(self, pdf_output_path: Path):
if not pdf_output_path.parent.exists():
raise FileNotFoundError(f"Output file's parent folder is missing: {pdf_output_path.parent.as_posix()}")
data = self.get_pdf_bytes()
with open(pdf_output_path, "wb+") as f:
f.write(data)
def print_errors(self) -> Tuple[str, str]:
"""
Prints the returned error_code and error_message
:return: Tuple[error_code, error_message]
"""
if self.has_errors:
print("error_code:", self.error_code)
print("error_message:", self.error_message)
return self.error_code, self.error_message
def __get_tag_text(self, root: ET.Element, tag_name):
tag = root.find(f"{self.xml_namespace}{tag_name}")
return tag.text if tag is not None else None
| [
"kristof.daja@semcon.com"
] | kristof.daja@semcon.com |
ed0a4b5efada1939d7898d3beec9436407d38b31 | 7c208711405aa6bd99106b94697028734e3fb1f9 | /app/campaign_rec.py | f3bbb988956ae76c790a81ac4f90a42ced4f46be | [] | no_license | tb16/fundraising-campaign | 8e280dcec7820c6b65ac7ce0b7a4edc68cde735d | a091d67fb3e314d081b6c8d3ec3a447bf134d498 | refs/heads/master | 2021-01-11T18:17:27.329103 | 2016-10-18T19:32:35 | 2016-10-18T19:32:35 | 69,335,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,454 | py | import pandas as pd
import numpy as np
import requests
import bs4
from string import punctuation
import cPickle as pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from tokenizer import tokenize
'''
campaign recommendation using cosine similarity of vectorised stories.
'''
df = pd.read_csv('../data/featured_data1.csv')
def bag_of_words(df):
'''
Applies Tfidf vectorizer to descriptions in the
dataframe.
Returns the vectorizer instance and sparse matrix.
'''
vectorizer = TfidfVectorizer(max_features = 4000, decode_error='ignore', max_df = 0.90, min_df= 2, stop_words = 'english', tokenizer = tokenize)
vectorizer.fit(df.story)
sparse = vectorizer.fit_transform(df.story)
return vectorizer, sparse
def pickle_vec(vectorizer, sparse):
'''
Pickle the vectorizer instance and sparse matrix
'''
v = open('../data/vectorizer.pkl', 'w')
pickle.dump(vectorizer, v)
v.close()
s = open('../data/sparse_mat.pkl', 'w')
pickle.dump(sparse, s)
s.close()
def get_success_index(df):
'''
returns the indices of successsful campaigns from the dataframe
'''
indices = df[df.percentage>=0.5].index.tolist()
return np.array(indices)
def download(url, *a, **kw):
'''
download and returns the html parsed beautifulsoup
'''
_user_agent = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36')
kw.setdefault('headers', {})['User-Agent'] = _user_agent
return bs4.BeautifulSoup(requests.get(url, *a, **kw).text, 'html.parser')
def search_url(title):
'''
url search for gofund me website given a title
'''
search_url = 'https://www.gofundme.com/mvc.php?'
soup = download(search_url, params={'term' : title, 'route': 'search'})
for tile in soup.select('.search_tile'):
try:
return 'https:'+tile.select('.name')[0]['href']
except:
continue
return 'link not found'
def similar_campaign(vector, vectorizer, sparse_mat):
'''
Finds the similar success story to the given campaign. returns top 3 campaigns
and keywords. similarity from cosine similarity with tfidf vectors. top words
from tfidf values of a story
'''
feature_names = np.array(vectorizer.get_feature_names())
similarity = linear_kernel(vector, sparse_mat)
top_indices_story = np.argsort(similarity.flatten())[-1::-1]
success_indices = []
for top_index in top_indices_story:
if df.percentage[top_index] >= 0.5:
success_indices.append(top_index)
keywords = []
for idx in success_indices[:3]:
keywords_indices = np.argsort(sparse_mat[idx].toarray()).flatten()[-1:-11:-1]
keywords.append(' '.join(feature_names[keywords_indices]))
print success_indices[:3]
output_df = df.iloc[success_indices[:3]]
output_df['keywords'] = keywords
output_df['url'] = map(search_url, output_df.title)
output_df.reset_index(inplace = True)
return output_df[['category', 'days','title', 'story', 'friends','shares', 'goal', 'percentage', 'keywords', 'url']]
if __name__ == '__main__':
# df = df[df['percentage'] >= 0.50]
# df.to_csv('../data/featured_data_success.csv', index = False)
vectorizer, sparse = bag_of_words(df)
pickle_vec(vectorizer, sparse)
| [
"Komal@Teks-MacBook-Pro.local"
] | Komal@Teks-MacBook-Pro.local |
9e954532ac48853decc0acb672da57bb9369029b | 94aa3e2f78a8cc9a5b32af6a78197e79e876af3d | /Ex09_4_uses_only.py | e2e7285eefc034e3f5338572a40ccb07a565c9ac | [] | no_license | ishwarjindal/Think-Python | 7c41e190a86280d99b6e5bd1d2dcdcf84394bb92 | 9e997f87189357ad12b9893e2da3960843b0b853 | refs/heads/master | 2020-05-19T13:31:14.417642 | 2019-10-06T03:44:27 | 2019-10-06T03:44:27 | 185,041,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | #Author : Ishwar Jindal
#Created On : 26-May-2019 05:19 PM IST
#Purpose : Find words that uses only the specificed letters
def has_no_char(word, char_to_be_missing):
for char in word:
if char == char_to_be_missing:
return False
return True
def uses_only(word, letters_to_use):
for letter in word:
if has_no_char(letters_to_use, letter) == False:
continue
else:
return False
return True
print("main started")
total = 0
matched = 0
letters_to_use = input("Enter the superset of letters to be used in word : ")
fin = open("words.txt")
for line in fin:
total = total + 1
word = line.strip()
if uses_only(word, letters_to_use):
matched = matched + 1
print(str.format("{0} does have all letters in {1}", word, letters_to_use))
print(str.format("{0} words out of {1} i.e. {2}% have all their letters in {3}", matched, total, round(matched*100/total, 2), letters_to_use))
print("main ended")
| [
"ishwarjindal@MacBook-Pro-4.local"
] | ishwarjindal@MacBook-Pro-4.local |
0649ed61513fbe6a09949d36e72df66a81bef44a | ec1eb0dd3ceeff03ab1e20c80259c5868a1a5004 | /project2/rl_federated_nas/glace_search_svhn.py | 556cb4600ca063cf21cae147f2417b3d5384845e | [] | no_license | dixiyao/CS385 | 601a7aebaa9a7644be472fbdaa002c7ca76a5816 | 405a5632dde1fef96ccb301c0994d783776c7108 | refs/heads/main | 2023-06-02T01:10:12.892223 | 2021-06-20T09:30:17 | 2021-06-20T09:30:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,604 | py | import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import random
import copy
from torch.autograd import Variable
from model_search import Network
from model_search_local import MaskedNetwork
from architect import Architect
from federated import sample_mask, client_update, fuse_weight_gradient, init_gradient, client_weight_param, extract_index
from data_distribution import _data_transforms_cifar10, even_split, none_iid_split
from noniid_svhn import client_data
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--client_batch_size', type=int, default=256, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.001, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=1000, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=10000, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=16, help='num of init channels')
parser.add_argument('--layers', type=int, default=8, help='total number of layers')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--train_portion', type=float, default=0.9, help='portion of training data')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--arch_learning_rate', type=float, default=3e-3, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')
parser.add_argument('--arch_baseline_decay', type=float, default=0.99, help='weight decay for reward baseline')
parser.add_argument('--client', type=int, default=10, help='number of clients')
parser.add_argument('--glace_epoch', type=int, default=10000, help='number of epoch for freezing alpha')
parser.add_argument('--non_iid', action='store_true', default=False, help='use non iid dataset')
parser.add_argument('--fed_non_iid',action='store_true',default=False,help='use non iid distribution in FedNAS(CVPR20)')
parser.add_argument('--fed_selection',default=None,help='prepared distribution')
args = parser.parse_args()
args.save = 'search-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
CIFAR_CLASSES = 10
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
criterion = nn.CrossEntropyLoss()
# criterion = criterion.cuda()
global_model = Network(args.init_channels, CIFAR_CLASSES, args.layers, criterion)
# global_model = global_model.cuda()
# logging.info("param size = %fMB", utils.count_parameters_in_MB(global_model))
global_optimizer = torch.optim.SGD(
global_model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
#train_transform, valid_transform = _data_transforms_cifar10()
#dataset = dset.SVHN(root='../data', download=True, transform=train_transform)
'''
testds=[]
for (img, label) in dataset:
if label in [0,2,3,5,9]:
testds.append((img,label))
random.shuffle(testds)
dataset=copy.deepcopy(testds)
'''
train_queues = []
if args.fed_non_iid:
train_queues=client_data(args.data,args.client,args.client_batch_size)
else:
if args.non_iid:
user_split = none_iid_split(dataset, num_user=args.client)
else:
user_split = even_split(dataset, args.client)
for i in range(args.client):
train_data = user_split[i]
num_train = len(train_data)
indices = list(range(num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.client_batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices),
pin_memory=True, num_workers=2)
train_queues.append(train_queue)
# valid_data = user_split[-1]
# num_train = len(valid_data)
# indices = list(range(num_train))
# valid_queue = torch.utils.data.DataLoader(
# valid_data, batch_size=32,
# sampler=torch.utils.data.sampler.SubsetRandomSampler(indices),
# pin_memory=True, num_workers=2)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
global_optimizer, int(args.epochs), eta_min=args.learning_rate_min)
global_architect = Architect(global_model, args)
init_gradient(global_model)
global_accuracy = []
client_accuracy = []
total_loss = []
for epoch in range(args.epochs):
scheduler.step()
lr = scheduler.get_lr()[0]
logging.info('epoch %d lr %e', epoch, lr)
# genotype = global_model.genotype()
# logging.info('genotype = %s', genotype)
#
client_models = []
epoch_acc = []
epoch_loss = []
epoch_index_normal = []
epoch_index_reduce = []
for client_idx in range(args.client):
mask_normal = sample_mask(global_model.alphas_normal)
mask_reduce = sample_mask(global_model.alphas_reduce)
index_normal = extract_index(mask_normal)
index_reduce = extract_index(mask_reduce)
client_model = MaskedNetwork(args.init_channels, CIFAR_CLASSES, args.layers, criterion, mask_normal, mask_reduce)
client_models.append(client_model)
epoch_index_normal.append(index_normal)
epoch_index_reduce.append(index_reduce)
# copy weight of global model to client models
# alphas in client models are actually gates, and equal to 1 forever
client_weight_param(global_model, client_models)
for client_idx in range(args.client):
client_model = client_models[client_idx]
client_models[client_idx], acc, loss = client_update(train_queues[client_idx], client_model, criterion)
epoch_acc.append(acc)
epoch_loss.append(loss)
avg_acc = float(torch.mean(torch.Tensor(epoch_acc)))
avg_loss = float(torch.mean(torch.Tensor(epoch_loss)))
logging.info("client accuracy: " + str(epoch_acc))
logging.info("client loss: " + str(epoch_loss))
logging.info("client accuracy: "+str(avg_acc)+" , loss: "+str(avg_loss))
client_accuracy.append(avg_acc)
total_loss.append(avg_loss)
fuse_weight_gradient(global_model,client_models)
global_optimizer.step()
global_optimizer.zero_grad()
# if epoch > args.glace_epoch:
# global_architect.step(epoch_acc,epoch_index_normal,epoch_index_reduce)
if (epoch+1) % args.report_freq == 0:
# valid_acc, valid_obj = infer(valid_queue,global_model,criterion)
# logging.info('valid_acc %f', valid_acc)
# global_accuracy.append(valid_acc)
logging.info("alphas normal")
logging.info(F.softmax(global_model.alphas_normal, dim=-1))
logging.info("alphas reduce")
logging.info(F.softmax(global_model.alphas_reduce, dim=-1))
utils.save(global_model, os.path.join(args.save, 'weights_epoch'+str(epoch)+'.pt'))
logging.info("*** final log ***")
logging.info("loss")
logging.info(total_loss)
logging.info("client accuracy")
logging.info(client_accuracy)
logging.info("global accuracy")
logging.info(global_accuracy)
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model = model.cuda()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(async=True)
logits = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
# if step % args.report_freq == 0:
# logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
model = model.cpu()
return top1.avg, objs.avg
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
2804da30f453e1a5fd2133a3e59538c1b210397e | 8b37b0378f1b423629633c7cc8e22becca640641 | /runs/deep_forest_ranger_outpost.py | dbfba7bec08ca23410551657f4d2fcd1b330c2b2 | [] | no_license | destor/zombii | 11b7617a1205b04da10f351b834151651d1e150b | f9e658c128e6fc8c4551237143d2c1c1a5ccea9f | refs/heads/master | 2021-09-23T08:56:06.863946 | 2018-09-21T04:39:30 | 2018-09-21T04:39:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | # vim: syn=python
from align import *
FILE = [
{
'announce': '9w',
},
{
'path': '6 w;sw;7 w;sw;5 w;4 sw;6 w;sw;w;sw;enter',
'name': '__announce__',
'announce': 'Deep Forest Ranger Outpost: Grizmogh',
'summary': True,
'skip': 4,
},
{
'target': 'commander',
'alignment': SLIGHTLY_GOOD,
'announce': 'Grizmogh 4.5m',
'out': 'out',
'in': 'enter',
'warnings': "Uses 'hurl axes' randomly",
'skip': 2,
},
{
'announce': 'Deep Forest Ranger Outpost',
},
{
'path': 'out;ne;e;ne;6 e;4 ne;5 e;ne;7 e;ne;6 e',
'name': '__announce__',
'announce': '9w',
},
{
'name': 'Unknown',
},
]
| [
"alexander@schrepfer.us"
] | alexander@schrepfer.us |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.