blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f03e0bd25b2839aff153fea90abc924e46a6584e
|
1bdc56d1f66501bada19b277a47655dc99f44f2e
|
/const.py
|
c6f26b24ff8369d500a57235ad9d6e874677e6b2
|
[] |
no_license
|
antista/pacman
|
682811715b930db0c8765d5da9340f91d8f4e8b7
|
adb51eb219c6758dc553671ddc68db700a6df358
|
refs/heads/master
| 2020-04-17T07:56:49.953567
| 2019-01-18T11:02:31
| 2019-01-18T11:02:31
| 166,391,144
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,379
|
py
|
import pyganim
from pygame import *
from pyganim import *
SOUNDS = dict(
wakka='sounds/wakka.wav',
energizer='sounds/energizer.wav',
eat_ghost='sounds/eating_ghost.wav',
death='sounds/death.wav'
)
SIZE = 16
BACK_COLOR = "#00FFFF"
ANIMATION_DELAY = 50 # скорость смены кадров
ANIMATION = dict()
ANIMATION['right'] = [('images/moving/m1.ico'),
('images/moving/m2.ico'),
('images/moving/m3.ico'),
('images/moving/m4.ico'),
('images/moving/m5.ico'),
('images/moving/m6.ico'),
('images/moving/m6.ico'),
('images/moving/m5.ico'),
('images/moving/m4.ico'),
('images/moving/m3.ico'),
('images/moving/m2.ico'),
('images/moving/m1.ico')]
ANIMATION['left'] = [pygame.transform.rotate(image.load('images/moving/m1.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m2.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m3.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m4.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m5.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m6.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m6.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m5.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m4.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m3.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m2.ico'), 180),
pygame.transform.rotate(image.load('images/moving/m1.ico'), 180)]
ANIMATION['up'] = [pygame.transform.rotate(image.load('images/moving/m1.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m2.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m3.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m4.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m5.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m6.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m6.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m5.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m4.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m3.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m2.ico'), 90),
pygame.transform.rotate(image.load('images/moving/m1.ico'), 90)]
ANIMATION['down'] = [pygame.transform.rotate(image.load('images/moving/m1.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m2.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m3.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m4.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m5.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m6.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m6.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m5.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m4.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m3.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m2.ico'), -90),
pygame.transform.rotate(image.load('images/moving/m1.ico'), -90)]
ANIMATION_STAY = dict()
ANIMATION_STAY['left'] = [(pygame.transform.rotate(image.load('images/moving/m6.ico'), 180), 1)]
ANIMATION_STAY['right'] = [('images/moving/m6.ico', 1)]
ANIMATION_STAY['up'] = [(pygame.transform.rotate(image.load('images/moving/m6.ico'), 90), 1)]
ANIMATION_STAY['down'] = [(pygame.transform.rotate(image.load('images/moving/m6.ico'), -90), 1)]
|
[
"anti2100@yandex.ru"
] |
anti2100@yandex.ru
|
a8885f69c487b2f187926f4fa20b933388d0a0d1
|
50ed16359e7a180298e847c4866ff2b45b3f3815
|
/scripts/computeNumbers.py
|
e07b5acf8876c8f9cd7ac521858d44c012313e7f
|
[] |
no_license
|
bfildier/Fildier2022_code
|
cde8fac4c01597e8ea7f631913aee229e725ffbd
|
8cd2c5e78b85ccc89544f2c6698b7717dd7a1537
|
refs/heads/main
| 2023-04-18T01:28:39.748615
| 2022-12-05T16:14:40
| 2022-12-05T16:14:40
| 574,580,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,112
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 21 11:46:19 2022
Numbers in PNAS main 2022
@author: bfildier
"""
##-- modules
import scipy.io
import sys, os, glob
import numpy as np
import xarray as xr
import matplotlib
from matplotlib import cm
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
from matplotlib.patches import Circle
from PIL import Image
from datetime import datetime as dt
from datetime import timedelta, timezone
import pytz
import matplotlib.image as mpimg
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import pickle
from scipy.stats import gaussian_kde
from scipy.stats import linregress
from scipy import optimize
from matplotlib.patches import Ellipse
import matplotlib.transforms as transforms
##-- directories
# workdir = os.path.dirname(os.path.realpath(__file__))
workdir = '/Users/bfildier/Code/analyses/EUREC4A/EUREC4A_organization/scripts'
repodir = os.path.dirname(workdir)
moduledir = os.path.join(repodir,'functions')
resultdir = os.path.join(repodir,'results','radiative_features')
figdir = os.path.join(repodir,'figures','paper')
#inputdir = '/Users/bfildier/Dropbox/Data/EUREC4A/sondes_radiative_profiles/'
inputdir = os.path.join(repodir,'input')
radinputdir = os.path.join(repodir,'input')
imagedir = os.path.join(repodir,'figures','snapshots','with_HALO_circle')
scriptsubdir = 'Fildier2021'
# Load own module
projectname = 'EUREC4A_organization'
thismodule = sys.modules[__name__]
## Own modules
sys.path.insert(0,moduledir)
print("Own modules available:", [os.path.splitext(os.path.basename(x))[0]
for x in glob.glob(os.path.join(moduledir,'*.py'))])
from radiativefeatures import *
from radiativescaling import *
# from thermodynamics import *
from conditionalstats import *
from matrixoperators import *
from thermoConstants import *
mo = MatrixOperators()
##--- local functions
def defineSimDirectories():
"""Create specific subdirectories"""
# create output directory if not there
os.makedirs(os.path.join(figdir),exist_ok=True)
if __name__ == "__main__":
# arguments
parser = argparse.ArgumentParser(description="Compute paper numbers from all precomputed data")
parser.add_argument('--overwrite',type=bool,nargs='?',default=False)
# output directory
defineSimDirectories()
##-- Load all data
exec(open(os.path.join(workdir,"load_data.py")).read())
#%% Rerecence wavenumbers
print('-- compute reference wavenumbers --')
print()
T_ref = 290 # K
W_ref = 3 # mm
print('choose reference temperature T = %3.1fK'%T_ref)
print('choose reference water path W = %3.1fmm'%W_ref)
print()
print("> compute reference wavenumber ")
kappa_ref = 1/W_ref # mm-1
rs = rad_scaling_all['20200202']
nu_ref_rot = rs.nu(kappa_ref,'rot')
nu_ref_vr = rs.nu(kappa_ref,'vr')
print('reference wavenumber in rotational band: nu = %3.1f cm-1'%(nu_ref_rot/1e2))
print('reference wavenumber in vibration-rotation band: nu = %3.1f cm-1'%(nu_ref_vr/1e2))
print()
print("> Planck function at both reference wavenumbers")
piB_ref_rot = pi*rs.planck(nu_ref_rot,T_ref)
piB_ref_vr = pi*rs.planck(nu_ref_vr,T_ref)
print('reference Planck term in rotational band: piB = %3.4f J.s-1.sr-1.m-2.cm'%(piB_ref_rot*1e2))
print('reference Planck term in vibration-rotation band: piB = %3.4f J.s-1.sr-1.m-2.cm'%(piB_ref_vr*1e2))
#%% Alpha
#-- Analytical approximation
# show temperature profiles
day = '20200126'
date = pytz.utc.localize(dt.strptime(day,'%Y%m%d'))
data_day = data_all.sel(launch_time=day)
f = rad_features_all[day]
# colors
var_col = f.pw
norm = matplotlib.colors.Normalize(vmin=var_col.min(), vmax=var_col.max())
cmap = plt.cm.nipy_spectral
cmap = plt.cm.RdYlBu
cols = cmap(norm(var_col))
# N data
Ns = data_day.dims['launch_time']
# Exploratory figure for lapse rate
fig,ax = plt.subplots()
for i_s in range(Ns):
ax.plot(data_day.temperature[i_s],data_day.alt,c=cols[i_s],linewidth=0.5,alpha=0.5)
s_fit_FT = slice(200,600)
s_fit_BL = slice(0,160)
for suff in '_FT','_BL':
s_fit = getattr(thismodule,'s_fit%s'%suff)
s_dry = f.pw < 30 # mmm
temp_mean = np.nanmean((data_day.temperature)[s_dry],axis=0)
not_nan = ~np.isnan(temp_mean)
z_fit = data_day.alt[not_nan][s_fit]
# regress
slope, intercept, r, p, se = scipy.stats.linregress(z_fit,temp_mean[not_nan][s_fit])
# show
ax.plot(slope*z_fit+intercept,z_fit,'k')
#!- analytical alpha
Gamma = -slope
T_ref = 290
alpha_an = L_v*Gamma/gg/T_ref * R_d/R_v - 1
print('alpha_analytical%s ='%suff,alpha_an)
ax.set_xlabel('T (K)')
ax.set_ylabel('z (km)')
#%% Inversion
Ns = rad_scaling_all[day].rad_features.pw.size
fig,ax = plt.subplots()
# Ns = data_all.temperature.shape[0]
for i_s in range(Ns):
theta = data_day.temperature[i_s] * (1e5/data_day.pressure[i_s])**(R_d/c_pd)
ax.plot(theta,data_day.pressure[i_s]/100,c = cols[i_s],alpha=0.2)
ax.invert_yaxis()
ax.set_ylabel('p (hPa)')
ax.set_xlabel(r'Potential temperature $\theta$ (K)')
#%% Water paths vs RH
# alpha_qvstar = 2.3
# qvstar_0 = 0.02
# qvstar_power = qvstar_0 * np.power(pres_fit/pres_fit[-1],alpha_qvstar)
def waterPath(qvstar_surf,pres,pres_jump,rh_min,rh_max,alpha,i_surf=-1):
"""Water path from top of atmosphere, in mm
- qv_star_surf: surface saturated specific humidity (kg/kg)
- pres: reference pressure array (hPa)
- pres_jump: level of RH jump (hPa)
- rh_max: lower-tropospheric RH
- rh_min: upper-tropospheric RH
- alpha: power exponent
- i_surf: index of surface layer in array (default is -1, last element)
"""
hPa_to_Pa = 100
rho_w = 1e3 # kg/m3
m_to_mm = 1e3
# init
W = np.full(pres.shape,np.nan)
# constant
A = qvstar_surf/(pres[i_surf]*hPa_to_Pa)**alpha/gg/(1+alpha)
print(A)
# lower troposphere
lowert = pres >= pres_jump
W[lowert] = A*(rh_max*(pres[lowert]*hPa_to_Pa)**(alpha+1)-(rh_max-rh_min)*(pres_jump*hPa_to_Pa)**(alpha+1))
# upper troposphere
uppert = pres < pres_jump
W[uppert] = A*rh_min*(pres[uppert]*hPa_to_Pa)**(alpha+1)
return W/rho_w*m_to_mm
qvstar_0 = 0.02
pres_fit = np.linspace(0,1000,1001)
pres_jump = 800 # hPa
rh_min = 1
rh_max = 1
alpha_qvstar = 2.3
W_prof = waterPath(qvstar_0,pres_fit,pres_jump,rh_min,rh_max,alpha_qvstar)
i_jump = np.where(pres_fit >= pres_jump)[0][0]
W_FT = W_prof[i_jump]
print('Free tropospheric water path at saturation (qvstar integral) =',W_FT)
print('with uniform RH_t = 1%, W =',W_FT/100)
print('with uniform RH_t = 5%, W =',W_FT*0.05)
print('with uniform RH_t = 50%, W =',W_FT*0.5)
print('with uniform RH_t = 80%, W =',W_FT*0.8)
|
[
"bfildier.work@gmail.com"
] |
bfildier.work@gmail.com
|
1cb69e60aa615509cf524ab1fb086168647ae432
|
7dc80048f72e106f977b49ea882c63cc9623e3ef
|
/notebooks/other/Y2017M07D28_RH_python27setup_v01.py
|
250e214bbfc2fd21afe44797cb7e69bbeb700a16
|
[] |
no_license
|
YanCheng-go/Aqueduct30Docker
|
8400fdea23bfd788f9c6de71901e6f61530bde38
|
6606fa03d145338d48101fc53ab4a5fccf3ebab2
|
refs/heads/master
| 2022-12-16T03:36:25.704103
| 2020-09-09T14:38:28
| 2020-09-09T14:38:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
# coding: utf-8
# # Test Python 27 setup
#
# * Purpose of script: test python 27 environement against several libraries
# * Author: Rutger Hofste
# * Kernel used: python27
# * Date created: 20170728
#
#
# In[3]:
packages = {"earth engine":-1,"gdal":-1,"geopandas":-1,"arcgis":-1}
# In[6]:
try:
import ee
packages["earth engine"]=1
except:
packages["earth engine"]=0
# In[4]:
try:
from osgeo import gdal
packages["gdal"]=1
except:
packages["gdal"]=0
# In[10]:
try:
import geopandas
packages["geopandas"]=1
except:
packages["geopandas"]=0
# In[11]:
try:
import arcgis.gis
packages["arcgis"]=1
except:
packages["arcgis"]=0
# In[12]:
print(packages)
# In[ ]:
|
[
"rutgerhofste@gmail.com"
] |
rutgerhofste@gmail.com
|
c596b6a116427c9d0e40510a7bac545c5ed464a6
|
f98f6746851790aabeb996fafe74a24236bb580d
|
/is_prime_number.py
|
373c717dc73f6683a6918d54130d6e0b43452f31
|
[] |
no_license
|
licheeee/PythonProject
|
b8c619cfbbe2f0e70284ffc2c0e9283c41d6f58c
|
9c114f32b51e6f8dc275cb36cb8b0e05e1c42548
|
refs/heads/master
| 2020-04-23T06:12:04.958043
| 2019-10-17T15:15:58
| 2019-10-17T15:15:58
| 170,965,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
# -*- coding: UTF-8 -*-
# 判断一个数字是否是质数
num = int(input("Please input a number :"))
primeFlag = True
sqrt = int(num ** 0.5)
for i in range(2, sqrt + 1):
if (num % i) == 0:
print("{0} is not a prime number.".format(num))
break
else:
print("{0} is a prime number.".format(num))
|
[
"qiaoxw@outlook.com"
] |
qiaoxw@outlook.com
|
6d233bd2ae30ac3ff55e44e216f83f7ca5974969
|
887d21782f2741d8a273807642346ab7cd0dac6e
|
/list_files.py
|
4cbc2e8e2b36f40a7c36ec26d8d15585aba26e85
|
[] |
no_license
|
TheRealTimCameron/Sandbox
|
c375ff356710fe4a1935ddd86603731240c7283e
|
4778376c0a018065b50f6ba4abcd6cfac344d538
|
refs/heads/master
| 2020-04-30T13:37:00.562336
| 2019-03-21T03:46:29
| 2019-03-21T03:46:29
| 176,863,481
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
import os
print("The files and folders in {} are:".format(os.getcwd()))
items = os.listdir('.')
for item in items:
print(item)
|
[
"TimCameron56@gmail.com"
] |
TimCameron56@gmail.com
|
207ca51d306160bcb1b64211690cf57342453446
|
2f43dd9eae7c3a290a50599305fac5106b2dd7cf
|
/webempresa/services/models.py
|
0277ee4a4986340cfebae70d45014dd4b5affc40
|
[] |
no_license
|
FernandoHer/maderamandala
|
2f4a1713ea4e067198f74ca00ae7197a606f3524
|
eec89b421337b36840ec5fe4ff65d773bba0d870
|
refs/heads/master
| 2022-11-25T16:37:08.917485
| 2020-04-07T20:54:15
| 2020-04-07T20:54:15
| 253,895,809
| 0
| 0
| null | 2022-11-22T02:25:07
| 2020-04-07T19:45:24
|
Python
|
UTF-8
|
Python
| false
| false
| 723
|
py
|
from django.db import models
# Create your models here.
class Service(models.Model):
title = models.CharField(max_length=200, verbose_name = "Titulo")
subtitle = models.CharField(max_length=200, verbose_name = "Subtitulo")
content = models.TextField(verbose_name = "Contenido")
image = models.ImageField(verbose_name = "Imagen", upload_to="services")
created = models.DateTimeField(auto_now_add=True, verbose_name = "Fecha de Creacion")
updated = models.DateTimeField(auto_now=True, verbose_name = "Fecha de actualizacion")
class Meta:
verbose_name = "servicio"
verbose_name_plural = "servicios"
ordering = ["-created"]
def __str__(self):
return self.title
|
[
"juanherdoiza@iMac-de-Juan.local"
] |
juanherdoiza@iMac-de-Juan.local
|
c6119fca8e49b7cc3081a8d3441946e564c44017
|
24a3645595fb5aa6f4e0484c8b9e6fbcf31ae5a5
|
/rl_loop/train_and_validate.py
|
3be4f997d7bb5dc358cc305377386e12b7305276
|
[
"Apache-2.0"
] |
permissive
|
2series/minigo
|
dcec298021e714fb8e203b847dd2d7a9d9451823
|
fda1487dff94a710e9359f80c28d08d99d6c3e3c
|
refs/heads/master
| 2020-04-05T20:35:18.809871
| 2018-11-12T09:18:53
| 2018-11-12T09:18:53
| 157,187,163
| 1
| 0
| null | 2018-11-12T09:20:43
| 2018-11-12T09:20:42
| null |
UTF-8
|
Python
| false
| false
| 3,698
|
py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Run train and validate in a loop, as subprocesses.
We run as subprocesses because it gives us some isolation.
"""
import itertools
import os
import sys
import time
sys.path.insert(0, '.')
from absl import app, flags
from tensorflow import gfile
from rl_loop import fsdb
import mask_flags
from rl_loop import shipname
import utils
flags.DEFINE_string('pro_dataset', None,
'Location of preprocessed pro dataset for validation')
# From fsdb.py - must pass one of the two.
flags.declare_key_flag('base_dir')
flags.declare_key_flag('bucket_name')
FLAGS = flags.FLAGS
try:
TPU_NAME = os.environ['TPU_NAME']
except KeyError:
raise Exception("Must have $TPU_NAME configured")
def train():
model_num, model_name = fsdb.get_latest_model()
print("Training on gathered game data, initializing from {}".format(
model_name))
new_model_num = model_num + 1
new_model_name = shipname.generate(new_model_num)
print("New model will be {}".format(new_model_name))
training_file = os.path.join(
fsdb.golden_chunk_dir(), str(new_model_num) + '.tfrecord.zz')
while not gfile.Exists(training_file):
print("Waiting for", training_file)
time.sleep(1 * 60)
save_file = os.path.join(fsdb.models_dir(), new_model_name)
cmd = ['python3', 'train.py', training_file,
'--use_tpu',
'--tpu_name={}'.format(TPU_NAME),
'--flagfile=rl_loop/distributed_flags',
'--export_path={}'.format(save_file)]
return mask_flags.run(cmd)
def validate_holdout_selfplay():
"""Validate on held-out selfplay data."""
holdout_dirs = (os.path.join(fsdb.holdout_dir(), d)
for d in reversed(gfile.ListDirectory(fsdb.holdout_dir()))
if gfile.IsDirectory(os.path.join(fsdb.holdout_dir(), d))
for f in gfile.ListDirectory(os.path.join(fsdb.holdout_dir(), d)))
# This is a roundabout way of computing how many hourly directories we need
# to read in order to encompass 20,000 holdout games.
holdout_dirs = set(itertools.islice(holdout_dirs), 20000)
cmd = ['python3', 'validate.py'] + list(holdout_dirs) + [
'--use_tpu',
'--tpu_name={}'.format(TPU_NAME),
'--flagfile=rl_loop/distributed_flags',
'--expand_validation_dirs']
mask_flags.run(cmd)
def validate_pro():
"""Validate on professional data."""
cmd = ['python3', 'validate.py', FLAGS.pro_dataset,
'--use_tpu',
'--tpu_name={}'.format(TPU_NAME),
'--flagfile=rl_loop/distributed_flags',
'--validate_name=pro']
mask_flags.run(cmd)
def loop(unused_argv):
while True:
print("=" * 40)
with utils.timer("Train"):
completed_process = train()
if completed_process.returncode > 0:
print("Training failed! Skipping validation...")
continue
with utils.timer("Validate"):
validate_pro()
validate_holdout_selfplay()
if __name__ == '__main__':
flags.mark_flag_as_required('pro_dataset')
app.run(loop)
|
[
"brian.kihoon.lee@gmail.com"
] |
brian.kihoon.lee@gmail.com
|
edbfd9f211f972906a7be68a3b1de4ba080d1d03
|
4e2a22470c983bc6f8463b4d0bd2563e2b4fadba
|
/manage.py
|
91afffd0eea54135379279692eb3ab4988697b8b
|
[] |
no_license
|
payush/ayush-crowdbotics-375
|
8537f9a86fcdcda7418a0c10a5f258bafc07dd9c
|
c11bdd721d91e765bcb04379dac476279e6ca599
|
refs/heads/master
| 2020-03-23T22:34:09.700234
| 2018-07-24T16:09:19
| 2018-07-24T16:09:19
| 142,182,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ayush_crowdbotics_375.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"ayushpuroheet@gmail.com"
] |
ayushpuroheet@gmail.com
|
fa831199505226547d9cfa53b8caf0ccbd1afd58
|
fa7e75212e9f536eed7a78237a5fa9a4021a206b
|
/OLD_ROOT/Backend/SMQTK_Backend/utils/jsmin/test.py
|
7aba6993dc941efa2e6ea9557fd99d5a9b43b720
|
[] |
no_license
|
kod3r/SMQTK
|
3d40730c956220a3d9bb02aef65edc8493bbf527
|
c128e8ca38c679ee37901551f4cc021cc43d00e6
|
refs/heads/master
| 2020-12-03T09:12:41.163643
| 2015-10-19T14:56:55
| 2015-10-19T14:56:55
| 44,916,678
| 1
| 0
| null | 2015-10-25T15:47:35
| 2015-10-25T15:47:35
| null |
UTF-8
|
Python
| false
| false
| 8,702
|
py
|
import unittest
import sys
# modified path since this is now being embeded in another project.
from SMQTK_Backend.utils import jsmin
class JsTests(unittest.TestCase):
def _minify(self, js):
return jsmin.jsmin(js)
def assertEqual(self, thing1, thing2):
if thing1 != thing2:
print(repr(thing1), repr(thing2))
raise AssertionError
return True
def assertMinified(self, js_input, expected):
minified = jsmin.jsmin(js_input)
assert minified == expected, "%r != %r" % (minified, expected)
def testQuoted(self):
js = r'''
Object.extend(String, {
interpret: function(value) {
return value == null ? '' : String(value);
},
specialChar: {
'\b': '\\b',
'\t': '\\t',
'\n': '\\n',
'\f': '\\f',
'\r': '\\r',
'\\': '\\\\'
}
});
'''
expected = r"""Object.extend(String,{interpret:function(value){return value==null?'':String(value);},specialChar:{'\b':'\\b','\t':'\\t','\n':'\\n','\f':'\\f','\r':'\\r','\\':'\\\\'}});"""
self.assertMinified(js, expected)
def testSingleComment(self):
js = r'''// use native browser JS 1.6 implementation if available
if (Object.isFunction(Array.prototype.forEach))
Array.prototype._each = Array.prototype.forEach;
if (!Array.prototype.indexOf) Array.prototype.indexOf = function(item, i) {
// hey there
function() {// testing comment
foo;
//something something
location = 'http://foo.com;'; // goodbye
}
//bye
'''
expected = r"""
if(Object.isFunction(Array.prototype.forEach))
Array.prototype._each=Array.prototype.forEach;if(!Array.prototype.indexOf)Array.prototype.indexOf=function(item,i){ function(){ foo; location='http://foo.com;';}"""
# print expected
self.assertMinified(js, expected)
def testEmpty(self):
self.assertMinified('', '')
self.assertMinified(' ', '')
self.assertMinified('\n', '')
self.assertMinified('\r\n', '')
self.assertMinified('\t', '')
def testMultiComment(self):
js = r"""
function foo() {
print('hey');
}
/*
if(this.options.zindex) {
this.originalZ = parseInt(Element.getStyle(this.element,'z-index') || 0);
this.element.style.zIndex = this.options.zindex;
}
*/
another thing;
"""
expected = r"""function foo(){print('hey');}
another thing;"""
self.assertMinified(js, expected)
def testLeadingComment(self):
js = r"""/* here is a comment at the top
it ends here */
function foo() {
alert('crud');
}
"""
expected = r"""function foo(){alert('crud');}"""
self.assertMinified(js, expected)
def testJustAComment(self):
self.assertMinified(' // a comment', '')
def testRe(self):
js = r'''
var str = this.replace(/\\./g, '@').replace(/"[^"\\\n\r]*"/g, '');
return (/^[,:{}\[\]0-9.\-+Eaeflnr-u \n\r\t]*$/).test(str);
});'''
expected = r"""var str=this.replace(/\\./g,'@').replace(/"[^"\\\n\r]*"/g,'');return(/^[,:{}\[\]0-9.\-+Eaeflnr-u \n\r\t]*$/).test(str);});"""
self.assertMinified(js, expected)
def testIgnoreComment(self):
js = r"""
var options_for_droppable = {
overlap: options.overlap,
containment: options.containment,
tree: options.tree,
hoverclass: options.hoverclass,
onHover: Sortable.onHover
}
var options_for_tree = {
onHover: Sortable.onEmptyHover,
overlap: options.overlap,
containment: options.containment,
hoverclass: options.hoverclass
}
// fix for gecko engine
Element.cleanWhitespace(element);
"""
expected = r"""var options_for_droppable={overlap:options.overlap,containment:options.containment,tree:options.tree,hoverclass:options.hoverclass,onHover:Sortable.onHover}
var options_for_tree={onHover:Sortable.onEmptyHover,overlap:options.overlap,containment:options.containment,hoverclass:options.hoverclass}
Element.cleanWhitespace(element);"""
self.assertMinified(js, expected)
def testHairyRe(self):
js = r"""
inspect: function(useDoubleQuotes) {
var escapedString = this.gsub(/[\x00-\x1f\\]/, function(match) {
var character = String.specialChar[match[0]];
return character ? character : '\\u00' + match[0].charCodeAt().toPaddedString(2, 16);
});
if (useDoubleQuotes) return '"' + escapedString.replace(/"/g, '\\"') + '"';
return "'" + escapedString.replace(/'/g, '\\\'') + "'";
},
toJSON: function() {
return this.inspect(true);
},
unfilterJSON: function(filter) {
return this.sub(filter || Prototype.JSONFilter, '#{1}');
},
"""
expected = r"""inspect:function(useDoubleQuotes){var escapedString=this.gsub(/[\x00-\x1f\\]/,function(match){var character=String.specialChar[match[0]];return character?character:'\\u00'+match[0].charCodeAt().toPaddedString(2,16);});if(useDoubleQuotes)return'"'+escapedString.replace(/"/g,'\\"')+'"';return"'"+escapedString.replace(/'/g,'\\\'')+"'";},toJSON:function(){return this.inspect(true);},unfilterJSON:function(filter){return this.sub(filter||Prototype.JSONFilter,'#{1}');},"""
self.assertMinified(js, expected)
def testNoBracesWithComment(self):
js = r"""
onSuccess: function(transport) {
var js = transport.responseText.strip();
if (!/^\[.*\]$/.test(js)) // TODO: improve sanity check
throw 'Server returned an invalid collection representation.';
this._collection = eval(js);
this.checkForExternalText();
}.bind(this),
onFailure: this.onFailure
});
"""
expected = r"""onSuccess:function(transport){var js=transport.responseText.strip();if(!/^\[.*\]$/.test(js))
throw'Server returned an invalid collection representation.';this._collection=eval(js);this.checkForExternalText();}.bind(this),onFailure:this.onFailure});"""
self.assertMinified(js, expected)
def testSpaceInRe(self):
js = r"""
num = num.replace(/ /g,'');
"""
self.assertMinified(js, "num=num.replace(/ /g,'');")
def testEmptyString(self):
js = r'''
function foo('') {
}
'''
self.assertMinified(js, "function foo(''){}")
def testDoubleSpace(self):
js = r'''
var foo = "hey";
'''
self.assertMinified(js, 'var foo="hey";')
def testLeadingRegex(self):
js = r'/[d]+/g '
self.assertMinified(js, js.strip())
def testLeadingString(self):
js = r"'a string in the middle of nowhere'; // and a comment"
self.assertMinified(js, "'a string in the middle of nowhere';")
def testSingleCommentEnd(self):
js = r'// a comment\n'
self.assertMinified(js, '')
def testInputStream(self):
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
ins = StringIO(r'''
function foo('') {
}
''')
outs = StringIO()
m = jsmin.JavascriptMinify()
m.minify(ins, outs)
output = outs.getvalue()
assert output == "function foo(''){}"
def testUnicode(self):
instr = u'\u4000 //foo'
expected = u'\u4000'
output = jsmin.jsmin(instr)
self.assertEqual(output, expected)
def testCommentBeforeEOF(self):
self.assertMinified("//test\r\n", "")
def testCommentInObj(self):
self.assertMinified("""{
a: 1,//comment
}""", "{a:1,}")
def testCommentInObj2(self):
self.assertMinified("{a: 1//comment\r\n}", "{a:1\n}")
def testImplicitSemicolon(self):
# return \n 1 is equivalent with return; 1
# so best make sure jsmin retains the newline
self.assertMinified("return;//comment\r\na", "return;a")
def testImplicitSemicolon2(self):
self.assertMinified("return//comment...\r\na", "return\na")
def testSingleComment2(self):
self.assertMinified('x.replace(/\//, "_")// slash to underscore',
'x.replace(/\//,"_")')
if __name__ == '__main__':
unittest.main()
|
[
"paul.tunison@kitware.com"
] |
paul.tunison@kitware.com
|
ff90cd1f1161c0d09ab2942b7f313e655ef548a0
|
a6bd898302ffebe9066595b264f9e5e38e6fa8e6
|
/settings_template.py
|
069b2d192200ef4343a3508486203a989c2cb909
|
[] |
no_license
|
symroe/teamprime_retweets
|
65e8ec57095b138be45496eb115fb4da1d1e1af0
|
08e817da6191a8058b3606b076ba9de6bd253b12
|
refs/heads/master
| 2021-01-10T22:04:16.968867
| 2013-09-20T13:32:03
| 2013-09-20T13:32:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
CONSUMER_KEY = ""
CONSUMER_SECRET = ""
ACCESS_TOKEN_KEY = ""
ACCESS_TOKEN_SECRET = ""
username = "TeamPrimeLtd"
TWEET_PATH = "tweets"
|
[
"sym.roe@talusdesign.co.uk"
] |
sym.roe@talusdesign.co.uk
|
f75a10dc0ef05561c5371a193810ff7eefcf5c22
|
b76aa6044aa0971bc7842cd4c3faa281c9c0e5cd
|
/1044_multiplos.py
|
f0455f370926483d5f3d396afe4542b00c05b844
|
[] |
no_license
|
Miguelsantos101/algoritmos1-2021-1
|
8496233f6d37bd70e47949c7e23b34e6c2181bd1
|
fe03097d870e4f47796e69c97020f9c0bdba0cab
|
refs/heads/main
| 2023-05-03T23:08:53.669522
| 2021-05-27T02:33:12
| 2021-05-27T02:33:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
#a, b = input().split()
#a = int(a)
#b = int(b)
a, b = map(int, input().split())
if a < b:
temp = b
b = a
a = temp
if a % b == 0:
print("Sao Multiplos")
else:
print("Nao sao Multiplos")
|
[
"carloshiga@alumni.usp.br"
] |
carloshiga@alumni.usp.br
|
8d3399769dfddb9fe82a9f192ca45d86625e5e59
|
d5688ec8a696b7d8bb34ef5e0a7876532619fce8
|
/spreadsheetupload/urls.py
|
3145c62652bdfbd3a855d1e75b5cf1101a6fdefb
|
[] |
no_license
|
varunsarvesh/spreadsheet
|
dffdea68bf449d232fcb0e33382e58fe8540471e
|
e81112193efe0d638881f1a8b7b5138d7af433b3
|
refs/heads/master
| 2020-03-18T19:51:02.133600
| 2018-05-29T08:51:06
| 2018-05-29T08:51:06
| 135,181,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
"""spreadsheetupload URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('XLApp/', include('upload.urls')),
]
|
[
"varun@cyces.co"
] |
varun@cyces.co
|
b41e10890c9ac9413fe046efde3866bcd757844a
|
5b19f8512f3f8716f7e7b9b45380d3d9eb92565e
|
/app/app/settings.py
|
59760d31a9202a974de5e40adc3bffd206d90a84
|
[] |
no_license
|
raiatul14/taxi-app
|
a1daf11649b1de2e0f9942aa40dd193617641c50
|
37cf15ab77bb808494551300a25c8da8ed85645b
|
refs/heads/main
| 2023-06-26T15:54:49.244535
| 2021-07-24T14:34:24
| 2021-07-24T14:34:24
| 382,535,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,226
|
py
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
import datetime
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG', 0)))
ALLOWED_HOSTS = []
ALLOWED_HOSTS.extend(
filter(
None,
os.environ.get('ALLOWED_HOSTS', '').split(','),
)
)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
ASGI_APPLICATION = 'taxi.routing.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS')
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTH_USER_MODEL = 'core.User'
#REDIS
REDIS_URL = os.environ.get('REDIS_URL', 'redis://localhost:6379')
#DJANGO CHANNELS
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
'hosts': [REDIS_URL],
},
},
}
#REST FRAMEWORK
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
'rest_framework.authentication.SessionAuthentication'
)
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': datetime.timedelta(minutes=5),
'REFRESH_TOKEN_LIFETIME': datetime.timedelta(days=1),
'USER_ID_CLAIM': 'id',
}
|
[
"atul.rai@ajackus.com"
] |
atul.rai@ajackus.com
|
5aa68c22244a5396ea453095dedc1d96aba4aa72
|
d9b53673b899a9b842a42060740b734bf0c63a31
|
/leetcode/python/easy/p645_findErrorNums.py
|
0b9b378910292d7af736c77ca60c91c415bce9a7
|
[
"Apache-2.0"
] |
permissive
|
kefirzhang/algorithms
|
a8d656774b576295625dd663154d264cd6a6a802
|
549e68731d4c05002e35f0499d4f7744f5c63979
|
refs/heads/master
| 2021-06-13T13:05:40.851704
| 2021-04-02T07:37:59
| 2021-04-02T07:37:59
| 173,903,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
class Solution:
def findErrorNums(self, nums):
helper = [0] * len(nums)
for i in nums:
helper[i - 1] += 1
for i, n in enumerate(helper):
print(i, n)
if n == 0:
lack = i + 1
elif n == 2:
more = i + 1
return [more, lack]
slu = Solution()
print(slu.findErrorNums([1, 2, 2, 4]))
|
[
"8390671@qq.com"
] |
8390671@qq.com
|
6aa6cad3f09fd39c8de6b26302daf10e485cedb5
|
27ece9ab880a0bdba4b2c053eccda94602c716d5
|
/.history/save_20181129231105.py
|
50671059975cdfa4cf895b943b529349ae4d201e
|
[] |
no_license
|
Symfomany/keras
|
85e3ad0530837c00f63e14cee044b6a7d85c37b2
|
6cdb6e93dee86014346515a2017652c615bf9804
|
refs/heads/master
| 2020-04-08T20:21:35.991753
| 2018-11-30T08:23:36
| 2018-11-30T08:23:36
| 159,695,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,777
|
py
|
import os, argparse
import tensorflow as tf
# The original freeze_graph function
# from tensorflow.python.tools.freeze_graph import freeze_graph
dir = os.path.dirname(os.path.realpath(__file__))
def freeze_graph(model_dir, output_node_names):
"""Extract the sub graph defined by the output nodes and convert
all its variables into constant
Args:
model_dir: the root folder containing the checkpoint state file
output_node_names: a string, containing all the output node's names,
comma separated
"""
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
"directory: %s" % model_dir)
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
# We precise the file fullname of our freezed graph
absolute_model_dir = "/".join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + "/frozen_model.pb"
# We clear devices to allow TensorFlow to control on which device it will load operations
clear_devices = True
# We start a session using a temporary fresh Graph
with tf.Session(graph=tf.Graph()) as sess:
# We import the meta graph in the current default Graph
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)
# We restore the weights
saver.restore(sess, input_checkpoint)
# We use a built-in TF helper to export variables to constants
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes
output_node_names.split(",") # The output node names are used to select the usefull nodes
)
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
return output_graph_def
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", type=str, default="models", help="Model folder to export")
parser.add_argument("--output_node_names", type=str, default="", help="The name of the output nodes, comma separated.")
args = parser.parse_args()
freeze_graph(args.model_dir, args.output_node_names)
|
[
"julien@meetserious.com"
] |
julien@meetserious.com
|
7f19e8afa6fdab3a0d7af9f55578ca1ba59afa65
|
81061f903318fceac254b60cd955c41769855857
|
/server/paiements/migrations/0003_auto__chg_field_transaction_extra_data.py
|
b059e9dea63be589ea180dbfe9a60bdc411cea7a
|
[
"BSD-2-Clause"
] |
permissive
|
agepoly/polybanking
|
1e253e9f98ba152d9c841e7a72b7ee7cb9d9ce89
|
f8f19399585293ed41abdab53609ecb8899542a2
|
refs/heads/master
| 2020-04-24T06:15:16.606580
| 2015-10-26T19:52:03
| 2015-10-26T19:52:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,133
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Transaction.extra_data'
db.alter_column(u'paiements_transaction', 'extra_data', self.gf('django.db.models.fields.TextField')(null=True))
def backwards(self, orm):
# Changing field 'Transaction.extra_data'
db.alter_column(u'paiements_transaction', 'extra_data', self.gf('django.db.models.fields.TextField')(default=''))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'configs.config': {
'Meta': {'object_name': 'Config'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'admin_enable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allowed_users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key_api': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'key_ipn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'key_request': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'test_mode': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'url_back_err': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url_back_ok': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url_ipn': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'paiements.transaction': {
'Meta': {'object_name': 'Transaction'},
'amount': ('django.db.models.fields.IntegerField', [], {}),
'config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configs.Config']"}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extra_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_status': ('django.db.models.fields.CharField', [], {'default': "'cr'", 'max_length': '2'}),
'ipn_needed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_ipn_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_postfinance_ipn_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_user_back_from_postfinance_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_userforwarded_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'postfinance_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'postfinance_status': ('django.db.models.fields.CharField', [], {'default': "'??'", 'max_length': '2'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'paiements.transctionlog': {
'Meta': {'object_name': 'TransctionLog'},
'extra_data': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'transaction': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['paiements.Transaction']"}),
'when': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['paiements']
|
[
"maximilien@theglu.org"
] |
maximilien@theglu.org
|
a083a001d9f5a9559169c82b7ac70022a8d131c7
|
c534fba89ff0462334cc724ff4010cbed829e294
|
/web/myadmin/migrations/0012_auto_20191019_1638.py
|
8bbaa46d7e85d15be38f10c54609829eb800d7f6
|
[] |
no_license
|
victorfengming/python_bookshop
|
974f5f8ff3b53b024b573f0f256409204116e114
|
c0a4757fc2031a015d4b198ba889be69a2a4a3c5
|
refs/heads/master
| 2020-09-02T18:02:07.547345
| 2019-11-04T15:10:44
| 2019-11-04T15:10:44
| 219,275,403
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
# Generated by Django 2.2.3 on 2019-10-19 16:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myadmin', '0011_auto_20191018_2225'),
]
operations = [
migrations.DeleteModel(
name='Booktype',
),
migrations.DeleteModel(
name='Users',
),
]
|
[
"fengming19981221@163.com"
] |
fengming19981221@163.com
|
5fa8134299debad3891dee51566474f0fd8a89e0
|
e8411c4506c106ce0a378f8a1a86c7b83363867c
|
/src/kmeans.py
|
b4a8131cc89b91160836751d37a336cc6d1d59a9
|
[] |
no_license
|
rdyzakya/UnsupervisedLearning
|
d833d49feed7ebe41ef8fa855704ec56ed830de2
|
0a0e6a9f9d0b9cc03816384307556d217f3ac70e
|
refs/heads/main
| 2023-06-26T01:19:09.690564
| 2021-07-25T11:08:13
| 2021-07-25T11:08:13
| 382,757,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,165
|
py
|
import numpy as np
import time
class DistanceMethodNotValidError(Exception):
pass
class NotSameLength(Exception):
pass
class KMeansClustering:
def __init__(self):
self.centroids = None
self.x_columns = None
self.how = None
self.df = None
def euclidean_distance(self,this_row,other):
res = 0
for cols in self.x_columns:
delta = this_row[cols] - other[cols]
delta_sqr = delta**2
res += delta_sqr
return np.sqrt(res)
def manhattan_distance(self,this_row,other):
res = 0
for cols in self.x_columns:
delta = this_row[cols] - other[cols]
delta_abs = np.abs(delta)
res += delta_abs
return res
def calculate_nearest(self,row,how='euclidean'):
dist = [0 for i in range(len(self.centroids))]
dist = np.array(dist)
for i in range(len(self.centroids)):
if how == 'euclidean':
dist[i] = self.euclidean_distance(row,self.centroids.loc[i])
elif how == 'manhattan':
dist[i] = self.manhattan_distance(row,self.centroids.loc[i])
else:
raise DistanceMethodNotValidError()
min_idx = np.where(dist == dist.min())[0][0]
return min_idx
def fit(self,df_,x_columns,k,how='euclidean'):
df = df_.copy()
self.x_columns = [df.columns[i] for i in x_columns]
self.centroids = df.sample(k).copy()
self.centroids = self.centroids.reset_index()
self.centroids = self.centroids[self.x_columns]
self.how = how
df['Label'] = np.nan
df['New Label'] = np.nan
while False in (df['Label'] == df['New Label']).unique():
df['Label'] = df.apply(lambda row: self.calculate_nearest(row[self.x_columns],self.how),axis=1)
for i in range(len(self.centroids)):
df_i = df[df['Label'] == i]
means = df_i.mean()
for col in self.x_columns:
self.centroids.loc[i,col] = means[col]
df['New Label'] = df.apply(lambda row: self.calculate_nearest(row[self.x_columns],self.how),axis=1)
df['Label'] = df['New Label']
del df['New Label']
self.df = df
def predict(self,data):
if len(self.x_columns) != len(data):
raise NotSameLength()
temp = data
data = {}
for i in range(len(self.x_columns)):
data[self.x_columns[i]] = temp[i]
return self.calculate_nearest(data,self.how)
|
[
"impper1@gmail.com"
] |
impper1@gmail.com
|
6cd666cf9ad2d4f9fbbfd2c624ff106e65444172
|
7c70f3cbaecfa4d77928c784ae12f232c273112e
|
/api_client/test_helper.py
|
92caf6ea0aa0d2d82ca5d95fe1af6896fce47376
|
[
"MIT"
] |
permissive
|
uktrade/lite-tests-common
|
d029298d9144a447404d38899ab35ff8e54bf53d
|
8ae386e55f899d0ffd61cc0a9156cd4db340d6d1
|
refs/heads/master
| 2020-08-03T19:20:39.673522
| 2020-07-21T09:59:01
| 2020-07-21T09:59:01
| 211,858,651
| 1
| 0
|
MIT
| 2020-07-21T09:59:03
| 2019-09-30T12:49:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,474
|
py
|
from .sub_helpers.documents import Documents
from .sub_helpers.applications import Applications
from .sub_helpers.cases import Cases
from .sub_helpers.document_templates import DocumentTemplates
from .sub_helpers.ecju_queries import EcjuQueries
from .sub_helpers.flags import Flags
from .sub_helpers.goods import Goods
from .sub_helpers.goods_queries import GoodsQueries
from .sub_helpers.organisations import Organisations
from .sub_helpers.ogel import Ogel
from .sub_helpers.parties import Parties
from .sub_helpers.picklists import Picklists
from .sub_helpers.queues import Queues
from .sub_helpers.users import Users
class TestHelper:
"""
Contains a collection of test helper classes, grouped by functional area, with each class containing
required logic wrapping calls to various LITE API endpoints.
"""
def __init__(self, api):
self.api_client = api
self.context = self.api_client.context
request_data = self.api_client.request_data
self.documents = Documents(api_client=self.api_client, request_data=request_data)
self.users = Users(api_client=self.api_client, request_data=request_data)
self.organisations = Organisations(api_client=self.api_client, request_data=request_data)
self.goods = Goods(api_client=self.api_client, documents=self.documents, request_data=request_data)
self.goods_queries = GoodsQueries(api_client=self.api_client, request_data=request_data)
self.parties = Parties(api_client=self.api_client, documents=self.documents, request_data=request_data)
self.ecju_queries = EcjuQueries(api_client=self.api_client, request_data=request_data)
self.picklists = Picklists(api_client=self.api_client, request_data=request_data)
self.ogel = Ogel(api_client=self.api_client, request_data=request_data)
self.cases = Cases(api_client=self.api_client, request_data=request_data)
self.flags = Flags(api_client=self.api_client, request_data=request_data)
self.queues = Queues(api_client=self.api_client, request_data=request_data)
self.document_templates = DocumentTemplates(api_client=self.api_client, request_data=request_data)
self.applications = Applications(
parties=self.parties,
goods=self.goods,
api_client=self.api_client,
documents=self.documents,
request_data=request_data,
organisations=self.organisations,
)
|
[
"noreply@github.com"
] |
noreply@github.com
|
ace75a11edfb9adc326e867b401cc79979f5c7b9
|
991c3f7acbec5511441e62cb464bd77f9169c70c
|
/products/views.py
|
99fb6da645e9f42eddd0922ac853e550381a0e31
|
[] |
no_license
|
anshika-1999/ecommerce_project
|
44e78cf7b22ab0ac92fd673a9069b870c4ab3175
|
c9842516a80887205e7ce677f2c78d2608887670
|
refs/heads/master
| 2022-12-16T10:07:09.990805
| 2020-09-17T15:07:41
| 2020-09-17T15:07:41
| 296,339,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import Product
def productsHome(request):
allProds = []
catprods = Product.objects.values('category', 'product_id')
cats = {item['category'] for item in catprods}
for cat in cats:
prod = Product.objects.filter(category=cat)
allProds.append(prod)
params = {'allProds':allProds}
return render(request, 'products/productHome.html',params)
def home(request):
products = Product.objects.all()
params={'product':products}
return render(request, 'products/home.html',params)
def checkout(request):
return render(request, 'products/checkout.html')
|
[
"anshikag.1999@gmail.com"
] |
anshikag.1999@gmail.com
|
07d3ff9572b4b5f0580105d33e1d8ada290fd157
|
d69fc0b185b045489d48ae8aa4caa4e33d01eb55
|
/hyperf-skeleton/h5/node_modules/utf-8-validate/build/config.gypi
|
e8575bc70334680c69c495bb08cb4e12499ce236
|
[
"MIT"
] |
permissive
|
4yop/miscellaneous
|
7993f2f314147019fc3e36f8b31ae6b7867a2f4f
|
3adee58f86c139f20926c80b1fb8c025127eef17
|
refs/heads/master
| 2023-04-26T11:18:23.755520
| 2022-08-25T15:55:13
| 2022-08-25T15:55:13
| 229,232,972
| 0
| 0
| null | 2023-04-19T20:05:15
| 2019-12-20T09:26:24
|
PHP
|
UTF-8
|
Python
| false
| false
| 3,345
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": [],
"msbuild_toolset": "v141",
"msvs_windows_target_platform_version": "10.0.17763.0"
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "..\\..\\deps/icu-small\\source/data/in\\icudt65l.dat",
"icu_default_data": "",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "65",
"is_debug": 0,
"napi_build_version": "5",
"nasm_version": "2.14",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 72,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_report": "true",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_large_pages": "false",
"node_use_large_pages_script_lld": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "so.72",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 0,
"nodedir": "C:\\Users\\Administrator\\AppData\\Local\\node-gyp\\Cache\\12.16.2",
"standalone_static_library": 1,
"msbuild_path": "D:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\MSBuild\\15.0\\Bin\\MSBuild.exe",
"cache": "C:\\Users\\Administrator\\AppData\\Local\\npm-cache",
"globalconfig": "C:\\Users\\Administrator\\AppData\\Roaming\\npm\\etc\\npmrc",
"init_module": "C:\\Users\\Administrator\\.npm-init.js",
"metrics_registry": "https://registry.npm.taobao.org/",
"node_gyp": "C:\\Users\\Administrator\\AppData\\Roaming\\npm\\node_modules\\npm\\node_modules\\node-gyp\\bin\\node-gyp.js",
"prefix": "C:\\Users\\Administrator\\AppData\\Roaming\\npm",
"registry": "https://registry.npm.taobao.org/",
"userconfig": "C:\\Users\\Administrator\\.npmrc",
"user_agent": "npm/7.7.6 node/v12.16.2 win32 x64"
}
}
|
[
"1131559748@qq.com"
] |
1131559748@qq.com
|
5854285ac06cf19046ef130cc5b0824d88f1507f
|
86e42c9f8576b9d4cda02aebf60b7820fe7e9bef
|
/version.py
|
a3220818626214d55f0e8543ad9c2875366cd45e
|
[
"MIT"
] |
permissive
|
ryansturmer/gitmake
|
4a5c25f8073ed07a5af25b4ffa093c47013548d8
|
8d6a2917af602f71dcdae0b142defaf529e9ee8c
|
refs/heads/master
| 2020-05-20T11:16:26.591662
| 2013-12-15T16:32:02
| 2013-12-15T16:32:02
| 11,926,238
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 62
|
py
|
major = 0
minor=0
patch=0
branch="dev"
timestamp=1376801630.58
|
[
"ryansturmer@gmail.com"
] |
ryansturmer@gmail.com
|
249fd231624cd29de11204da14210b15135a09c1
|
9fa68d4b3332e557ac51ba4f9ed4b0e37e3011c8
|
/config_sz32_alpha1_5.py
|
489b0f46ccc029d35864f187efd38f6f659d3ef2
|
[] |
no_license
|
aizvorski/ndsb17
|
e545f2b82383e6f826cb31959b0dbefce25c9161
|
50322131a0cca20c24956b34eb787b65e044e23a
|
refs/heads/master
| 2021-06-13T20:16:22.928167
| 2017-04-12T10:46:01
| 2017-04-12T10:46:01
| 83,652,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 91
|
py
|
num_epochs = 500
samples_per_epoch = 10000
lr = 0.001
feature_sz = 32
feature_alpha = 1.5
|
[
"aizvorski@gmail.com"
] |
aizvorski@gmail.com
|
c571164d09a9dfe8ee2571e96a5c3e2bb982d580
|
44064ed79f173ddca96174913910c1610992b7cb
|
/Second_Processing_app/temboo/Library/Google/Drive/Revisions/Delete.py
|
21b2277599036d6311d9fc5895330b8646d5bce5
|
[] |
no_license
|
dattasaurabh82/Final_thesis
|
440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5
|
8edaea62f5987db026adfffb6b52b59b119f6375
|
refs/heads/master
| 2021-01-20T22:25:48.999100
| 2014-10-14T18:58:00
| 2014-10-14T18:58:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,974
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Delete
# Removes a revision.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Delete(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Delete Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Google/Drive/Revisions/Delete')
def new_input_set(self):
return DeleteInputSet()
def _make_result_set(self, result, path):
return DeleteResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteChoreographyExecution(session, exec_id, path)
class DeleteInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Delete
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth2 process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
InputSet._set_input(self, 'AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
InputSet._set_input(self, 'ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
InputSet._set_input(self, 'ClientSecret', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The ID of the file.)
"""
InputSet._set_input(self, 'FileID', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
InputSet._set_input(self, 'RefreshToken', value)
def set_RevisionID(self, value):
"""
Set the value of the RevisionID input for this Choreo. ((required, string) The ID of the revision.)
"""
InputSet._set_input(self, 'RevisionID', value)
class DeleteResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Delete Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class DeleteChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteResultSet(response, path)
|
[
"dattasaurabh82@gmail.com"
] |
dattasaurabh82@gmail.com
|
9091f732ae972983486dcf8406038d70e2399992
|
1f7c9b7113985f17ad2e8d27e92bdfe0505c1e19
|
/com/drabarz/karolina/dominating_set.py
|
9152994aab5fa5af9f8d99001e42ac542edd057a
|
[] |
no_license
|
Szop-Kradziej/GIS_Dominating_Set
|
02891d0dd1eb839809601edf758ea84947ddb46c
|
ab16c6c8a1bb67446200aa195102c103938ed1b2
|
refs/heads/master
| 2016-08-12T11:46:23.628833
| 2016-01-15T18:21:48
| 2016-01-15T18:21:48
| 45,829,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,419
|
py
|
import networkx as nx
import sys
import getopt
import csv
import time
from com.drabarz.karolina.DominatingSetAlgorithm import DominatingSetAlgorithm
from com.drabarz.karolina.NetworkXAlgorithm import NetworkXAlgorithm
from com.drabarz.karolina.GreedyAlgorithm import GreedyAlgorithm
from com.drabarz.karolina.DispersedGreedyAlgorithm import DispersedGreedyAlgorithm
from com.drabarz.karolina.ClassicalSetCoverageAlgorithm import ClassicalSetCoverageAlgorithm
from com.drabarz.karolina.ModifiedGreedyAlgorithm import ModifiedGreedyAlgorithm
from com.drabarz.karolina.FastGreedyAlgorithm import FastGreedyAlgorithm
def getCommandLineArguments():
argv = sys.argv[1:]
graphFile = ''
setFile = ''
action = 'none'
try:
opts, args = getopt.getopt(argv,"hfcg:s:",["graphFile=","setFile="])
except getopt.GetoptError:
print 'test.py -g <graphFile> -s <setFile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'To find the smallest dominating set:'
print '\ttest.py -f -g <graphFile> -s <setFile>'
print 'To check if set is dominating:'
print '\ttest.py -c -g <graphFile> -s <setFile>'
sys.exit()
elif opt == '-f' :
action = "findDominatingSet"
elif opt == '-c' :
action = "checkIfSetIsDominating"
elif opt in ("-g", "--graphFile"):
graphFile = arg
elif opt in ("-s", "--setFile"):
setFile = arg
print 'Graph file is: ', graphFile
print 'Set file is: ', setFile
return [graphFile, setFile, action];
def createGraphFromFile(graphFile):
graph = nx.Graph();
try:
with open(graphFile, "rb") as inputfile:
reader = csv.reader(inputfile);
for i, line in enumerate(reader):
if i < 4: continue
edge = line[0].split('\t')
graph.add_edge(edge[0], edge[1]);
except IOError:
print 'There is a incorrect name of graph file'
sys.exit()
except IndexError:
print 'Incorrect input file structure'
sys.exit()
return graph;
def findAndShowDominatingSet(graph, setFile):
algorithm = chooseAlgorithm();
printGraphParamiters(graph);
start_time = time.time()
dominatingSet = algorithm.findDominatingSet(graph);
stop_time = time.time() - start_time
print "Algorithm execution time = ", stop_time
printDominatingSet(dominatingSet);
saveDominatingSet(dominatingSet, setFile);
return;
def chooseAlgorithm():
while 1 :
showMainMenu();
answer = raw_input();
if answer == '1' :
return GreedyAlgorithm();
elif answer == '2' :
return DispersedGreedyAlgorithm();
elif answer == '3' :
return ClassicalSetCoverageAlgorithm();
elif answer == '4' :
return ModifiedGreedyAlgorithm();
elif answer == '5' :
return FastGreedyAlgorithm();
elif answer == '6' :
return NetworkXAlgorithm();
sys.exc_clear();
def showMainMenu():
print "Choose algorithm to calculate the smallest dominating set: "
print "\t1) greedy algorithm"
print "\t2) dispersed greedy algorithm"
print "\t3) classical set coverage algorithm"
print "\t4) modified greedy algorithm"
print "\t5) fast greedy algorithm"
print "\t6) use algorithm implemented in NetworkX library"
return;
def printGraphParamiters(graph):
print "Graph description: "
print "Number of nodes: ", nx.number_of_nodes(graph);
print "Number of edges: ", nx.number_of_edges(graph), "\n";
return;
def printDominatingSet(dominatingSet):
print "Number of nodes in dominating set: ", len(dominatingSet);
for node in dominatingSet:
print node;
return;
def saveDominatingSet(dominatingSet, setFile):
try:
with open(setFile, 'wb') as outputFile:
writer = csv.writer(outputFile);
outputFile.write("#Number of nodes in dominating set: " + str(len(dominatingSet)) + "\n");
for i in range(0, len(dominatingSet)):
outputFile.write(str(dominatingSet[i])+ '\n')
except IOError:
print 'There is no set file name selected'
return;
def checkIfSetIsDominating(graph, setFile):
inputSet = createSetFromFile(setFile);
isDominatingSet = checkIfIsDominatingSet(graph, inputSet);
print "Is set dominating: ", isDominatingSet;
return;
def createSetFromFile(setFile):
inputSet = set();
try:
with open(setFile, "rb") as inputfile:
reader = csv.reader(inputfile);
for i, line in enumerate(reader):
if i < 1: continue
node = line[0];
inputSet.add(node);
except IOError:
print 'There is a wrong name of set file'
sys.exit()
except IndexError:
print 'Incorrect input file structure'
sys.exit()
return inputSet;
def checkIfIsDominatingSet(graph, dominatingSet):
return nx.is_dominating_set(graph, dominatingSet);
[graphFile, setFile, action] = getCommandLineArguments();
graph = createGraphFromFile(graphFile);
if action == "findDominatingSet" :
findAndShowDominatingSet(graph, setFile);
elif action == "checkIfSetIsDominating" :
checkIfSetIsDominating(graph, setFile);
else :
sys.exit();
|
[
"karolina.drabarz@gmail.com"
] |
karolina.drabarz@gmail.com
|
1339b5e2da73351b90d8bbc7a8b70a395472d3c0
|
fe6c7ddbd2ce513105346c3e9ecd858ee0240237
|
/inheritance.py
|
b057cdbb0dc94576693e9bf6f1d8884c58e9835e
|
[] |
no_license
|
adilreza/basic_python_practice
|
bfb4d5fc08c360f3b6808b74c431a75e09a09207
|
c712f47bdde081c305344ec396f9a146d4dad369
|
refs/heads/master
| 2020-08-07T00:45:08.938270
| 2019-10-11T13:29:18
| 2019-10-11T13:29:18
| 213,225,377
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 473
|
py
|
class parent_class:
a=4;
b=3;
def parent_fuction(self):
print("this is from parent function")
return 0;
class child_class(parent_class):#this way inherited
def child_function(self):
print("this is me from child function")
def make_sum(self):
sum = self.a+ self.b;
return sum;
if __name__=="__main__":
myobj = child_class()
print(myobj.a)
print(myobj.parent_fuction())
print(myobj.make_sum())
|
[
"adilreza043@gmail.com"
] |
adilreza043@gmail.com
|
037e407294716acc0cfc28a662b89b093dfe6d3b
|
b96edd0ba0a9f2a73a8ef8ed011714798fa72303
|
/test_all.py
|
b9cf4978ba413cb3c30ad1ea4439ec2cdc20a863
|
[] |
no_license
|
lijiahaoAA/lijiahao_cpsc_12lead
|
9401d9679d530183afba5a15f6efef9a96f2f154
|
663264920ead07493c0d8fe9987b9ab9a60d35fd
|
refs/heads/master
| 2023-03-14T07:11:28.996901
| 2021-03-09T07:08:50
| 2021-03-09T07:08:50
| 345,533,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,924
|
py
|
import time
import numpy as np
np.set_printoptions(threshold=np.inf)
import scipy.io as sio
import os
import config
from keras.preprocessing import sequence
import QRSDetectorOffline
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
# os.environ["TF_CPP_MIN_LOG_LEVEL"] = '0'
# config = config.Config()
# a = os.listdir(config.train_mat_path)
# train_mat = [] # 存储所有6877个样本数据
# for i in range(len(a)):
# if a[i].endswith('.mat'):
# train_mat.append(config.train_mat_path + a[i])
#
# b = os.listdir(config.test_mat_path)
# test_mat = [] # 存储最终的测试数据
# for i in range(len(b)):
# if b[i].endswith('.mat'):
# test_mat.append(config.test_mat_path + b[i])
#
# def data_process(all_mat):
# ECG_1 = []
# ECG_2 = []
# ECG_3 = []
# #for recordpath in range(len(all_mat)):
# for recordpath in range(1):
# # load ECG
# mat = sio.loadmat(all_mat[recordpath])
# mat = np.array(mat['ECG']['data'][0, 0])
# mat = np.transpose(mat) # 做转置
# signal = mat
# ECG_1.append(signal)
# #print(signal.shape)
#
# qrsdetector = QRSDetectorOffline.QRSDetectorOffline(signal, config.sample_frequency, verbose=False,
# plot_data=False, show_plot=False)
# # denoise ECG 对每一导联进行去噪 滤波
# for i in range(signal.shape[1]):
# signal[:, i] = qrsdetector.bandpass_filter(signal[:, i], lowcut=0.5, highcut=49.0,
# signal_freq=config.sample_frequency, filter_order=1)
#
# ECG_2.append(signal)
# # print(ECG[0].shape)
# # print(ECG[0])
# # print(signal)
# # 将所有导联的长度填充为一样的,尾部补0
# # ECG_1 = sequence.pad_sequences(ECG_1, maxlen=3600, dtype='float32', truncating='post')
# ECG_2 = sequence.pad_sequences(ECG_2, maxlen=3600, dtype='float32', truncating='post')
# print(len(ECG_1))
# print(len(ECG_2))
# # plot_wave(ECG_1[0][:,0],ECG_2[0][:,0])
# calculate_max_min(ECG_2,ECG_1[0][:,0],ECG_2[0][:,0])
#
# #np.save('ECG_train_data_process_no_wave.npy', ECG)
# # np.save('ECG_train_data_process_3600QRS.npy', ECG)
# #np.save('ECG_test_data_process_no_wave.npy', ECG)
# # np.save('ECG_test_data_process_3600QRS.npy', ECG)
# return ECG_1, ECG_2
#
# def calculate_max_min(ECG,ECG_1,ECG_2):
# data = []
# tic = time.time()
# for i in range(len(ECG)):
# data.append(max(ECG[i][:, 0]))
# data.append(min(ECG[i][:, 0]))
#
# data.append(max(ECG[i][:, 1]))
# data.append(min(ECG[i][:, 1]))
#
# data.append(max(ECG[i][:, 2]))
# data.append(min(ECG[i][:, 2]))
#
# data.append(max(ECG[i][:, 3]))
# data.append(min(ECG[i][:, 3]))
#
# data.append(max(ECG[i][:, 4]))
# data.append(min(ECG[i][:, 4]))
#
# data.append(max(ECG[i][:, 5]))
# data.append(min(ECG[i][:, 5]))
#
# data.append(max(ECG[i][:, 6]))
# data.append(min(ECG[i][:, 6]))
#
# data.append(max(ECG[i][:, 7]))
# data.append(min(ECG[i][:, 7]))
#
# data.append(max(ECG[i][:, 8]))
# data.append(min(ECG[i][:, 8]))
#
# data.append(max(ECG[i][:, 9]))
# data.append(min(ECG[i][:, 9]))
#
# data.append(max(ECG[i][:, 10]))
# data.append(min(ECG[i][:, 10]))
#
# data.append(max(ECG[i][:, 11]))
# data.append(min(ECG[i][:, 11]))
#
# # print(len(data))
# with open("2.txt", 'w') as file:
# data1 = str(data)
# file.write(data1)
# file.close()
# max_data = max(data) # 训练集和测试集中在归一化到某个范围内时需要保证这个max_data和min_data是一致的
# min_data = min(data)
# normalization(ECG, config.max_data, config.min_data, ECG_1, ECG_2)
# print(max(data))
# print(min(data))
# toc = time.time()
# print("data normalization takes time:", toc - tic)
# return max_data,min_data
#
# # 数据归一化到指定区间
# def normalization(ECG, max_data, min_data, ECG_1, ECG_2):
# if(max_data - min_data == 0):
# print("分母为零,请检查")
# return
# k = (config.normalization_max - config.normalization_min)/((max_data - min_data) * 1.0) # 比例系数
# for i in range(len(ECG)):
# ECG[i][:, 0] = config.normalization_min + k * (ECG[i][:, 0] - min_data)
# ECG[i][:, 1] = config.normalization_min + k * (ECG[i][:, 1] - min_data)
# ECG[i][:, 2] = config.normalization_min + k * (ECG[i][:, 2] - min_data)
# ECG[i][:, 3] = config.normalization_min + k * (ECG[i][:, 3] - min_data)
# ECG[i][:, 4] = config.normalization_min + k * (ECG[i][:, 4] - min_data)
# ECG[i][:, 5] = config.normalization_min + k * (ECG[i][:, 5] - min_data)
# ECG[i][:, 6] = config.normalization_min + k * (ECG[i][:, 6] - min_data)
# ECG[i][:, 7] = config.normalization_min + k * (ECG[i][:, 7] - min_data)
# ECG[i][:, 8] = config.normalization_min + k * (ECG[i][:, 8] - min_data)
# ECG[i][:, 9] = config.normalization_min + k * (ECG[i][:, 9] - min_data)
# ECG[i][:, 10] = config.normalization_min + k * (ECG[i][:, 10] - min_data)
# ECG[i][:, 11] = config.normalization_min + k * (ECG[i][:, 11] - min_data)
#
# # np.save('ECG_train_data_normal.npy', ECG)
# # np.save('ECG_test_data_normal_500record.npy', ECG)
# plot_wave(ECG_1,ECG_2,ECG[0][:,0])
# return ECG
#
# def plot_wave(ECG_qrs, ECG_noqrs, ECG_3):
# plt.figure()
# print(len(ECG_qrs.shape))
# print(len(ECG_noqrs.shape))
# print(len(ECG_3.shape))
#
# plt.plot(range(3600), ECG_qrs[0:3600], color="red",label="去噪数据")
# # .plot(range(3600), ECG_noqrs, color="blue")
#
# plt.plot(range(3600), ECG_3, color="blue", label="归一化数据")
# plt.title("去噪数据波形对比归一化到[-3,3]数据波形")
# plt.xlabel("Time")
# plt.ylabel("Voltage")
# plt.legend(loc="best")
# plt.show()
#
# #data_process(train_mat)
# data_process(test_mat)
# from keras import backend as K
# from keras.layers import Lambda
# import tensorflow as tf
# def zeropad(x):
# y = K.zeros_like(x)
# print(y)
# return K.concatenate([x, y], axis=2)
#
def zeropad_output_shape(input_shape):
print(input_shape)
shape = list(input_shape)
shape[1] *= 2
print(shape)
return tuple(shape)
input = np.array([[1,2,3],[4,5,6]])
y = np.zeros_like(input)
new = np.concatenate([input, y], axis=1)
print(new)
zeropad_output_shape(input.shape)
# input = tf.convert_to_tensor(input)
# shortcut = Lambda(zeropad, output_shape=zeropad_output_shape)(input)
# print(shortcut)
|
[
"1301840357@qq.com"
] |
1301840357@qq.com
|
a5a2a4b129d766a01b983be9cdcbdf3471ac18cb
|
f55eea6e52408c1400d8570b2a55ee8b9efb1a9e
|
/Python-Programming-Intermediate/Regular Expressions-164.py
|
f9d4e1a6d0ed4f1453780805d5c3d68ee17f3af1
|
[] |
no_license
|
CloudChaoszero/Data-Analyst-Track-Dataquest.io-Projects
|
a8b20c169fde8224c57bb85a845059072651e0e9
|
3b5be57489f960963a62b385177f13f25de452c3
|
refs/heads/master
| 2021-01-21T08:20:25.746106
| 2017-05-22T21:50:42
| 2017-05-22T21:50:42
| 91,623,309
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,873
|
py
|
## 1. Introduction ##
strings = ["data science", "big data", "metadata"]
regex = "data"
## 2. Wildcards in Regular Expressions ##
strings = ["bat", "robotics", "megabyte"]
regex = "..t"
## 3. Searching the Beginnings And Endings Of Strings ##
strings = ["better not put too much", "butter in the", "batter"]
bad_string = "We also wouldn't want it to be bitter"
regex = ""
regex = "^b.tter"
## 5. Reading and Printing the Data Set ##
import csv
#Open and read file. Therafter, convert to list
file = csv.reader(open("askreddit_2015.csv",'r'))
post_with_header = list(file)
posts = post_with_header[1:]
for val in posts[:10]:
print(val)
## 6. Counting Simple Matches in the Data Set with re() ##
import re
#Initialize Counter
of_reddit_count = 0
#Counting loop that counts for "of Reddit" in first element of every row
for val in posts:
if re.search("of Reddit", val[0]):
of_reddit_count +=1
else:
pass
## 7. Using Square Brackets to Match Multiple Characters ##
import re
of_reddit_count = 0
for row in posts:
if re.search("of [Rr]eddit", row[0]) is not None:
of_reddit_count += 1
## 8. Escaping Special Characters ##
import re
serious_count = 0
for row in posts:
if re.search("\[Serious]",row[0]) is not None:
serious_count +=1
print(row[0])
## 9. Combining Escaped Characters and Multiple Matches ##
import re
serious_count = 0
for row in posts:
if re.search("\[[sS]erious\]", row[0]) is not None:
serious_count += 1
## 10. Adding More Complexity to Your Regular Expression ##
import re
serious_count = 0
for row in posts:
if re.search("[\[\(][Ss]erious[\]\)]", row[0]) is not None:
serious_count += 1
## 11. Combining Multiple Regular Expressions ##
import re
serious_start_count = 0
serious_end_count = 0
serious_count_final = 0
for row in posts:
if re.search("^[\[\(][Ss]erious[\]\)]",row[0]) is not None:
serious_start_count += 1
if re.search("[\[\(][Ss]erious[\]\)]$", row[0]) is not None:
serious_end_count += 1
if re.search("^[\[\(][Ss]erious[\]\)]|[\[\(][Ss]erious[\]\)]$", row[0]) is not None:
serious_count_final += 1
## 12. Using Regular Expressions to Substitute Strings ##
import re
posts_new = []
for row in posts:
row[0] = re.sub("[\[\(][Ss]erious[\]\)]", "[Serious]", row[0])
posts_new.append(row)
## 13. Matching Years with Regular Expressions ##
import re
year_strings = []
for string in strings:
if re.search("[1-2][0-9][0-9][0-9]", string) is not None:
year_strings.append(string)
## 14. Repeating Characters in Regular Expressions ##
import re
year_strings = []
for y in strings:
if re.search("[0-2][0-9]{3}",y) is not None:
year_strings.append(y)
## 15. Challenge: Extracting all Years ##
import re
years = re.findall("[0-2][0-9]{3}", years_string)
|
[
"noreply@github.com"
] |
noreply@github.com
|
2871fd432a366e045045dbb0053737b299a418e7
|
0a61fc847043d677dae701a70b90f119dd7ab8fb
|
/credentials_replacer/__main__.py
|
e8a05c1313fa14bcc3d2585e6c960c70585d4e1e
|
[
"MIT"
] |
permissive
|
MELODYAPP/aws-credential-replacer
|
4e286def8ab513557ad296ccf444d29537357708
|
fa645e5613aee242bd2ed670bc7bbc2ba797bb09
|
refs/heads/master
| 2021-01-01T20:39:22.871330
| 2017-08-01T14:47:04
| 2017-08-01T14:47:04
| 98,907,993
| 0
| 0
| null | 2017-07-31T16:12:15
| 2017-07-31T16:12:15
| null |
UTF-8
|
Python
| false
| false
| 66
|
py
|
from .replacer import main
if __name__ == '__main__':
main()
|
[
"prokhorov@saritasa.com"
] |
prokhorov@saritasa.com
|
58cd01e6622fd1b0f19af5cb10edacbcb384ce28
|
07e80d4b41d0db79bfc031b65894e28322d24e19
|
/zygoat/components/__init__.py
|
d0b864370a8ff64fa0bf89e99e2a898e0348e10e
|
[
"MIT"
] |
permissive
|
swang192/zygoat
|
0b021ad6cd8d286d265e22c5b27f1a8c4f18de6e
|
d00b6b1cc3a384b61e38845ff35dcbcc74a562d9
|
refs/heads/master
| 2021-02-05T21:47:14.089625
| 2020-02-28T04:03:16
| 2020-02-28T04:03:16
| 243,837,678
| 0
| 0
|
MIT
| 2020-02-28T19:19:33
| 2020-02-28T19:19:33
| null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
from .base import Component # noqa
from .settings_component import SettingsComponent # noqa
from .editorconfig import editorconfig
from .docker_compose import docker_compose
from .backend import backend
from .frontend import frontend
components = [
editorconfig,
docker_compose,
backend,
frontend,
]
|
[
"markrawls96@gmail.com"
] |
markrawls96@gmail.com
|
7f3e63f22434cad4df3c5f31228f840cee385144
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/all-gists/5259522/snippet.py
|
530896846672f9f888ff87c34b403125582a7bbd
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,252
|
py
|
#!/usr/bin/env python
import sys
files = []
if len(sys.argv) > 2:
for file in sys.argv[1:]:
files.append(str(file))
else:
print "Usage: Wordcount.py file1 file2 file3 ..."
words_to_ignore = ["that","what","with","this","would","from","your","which","while","these"]
things_to_strip = [".",",","?",")","(","\"",":",";","'s"]
words_min_size = 4
print_in_html = True
text = ""
for file in files:
f = open(file,"rU")
for line in f:
text += line
words = text.lower().split()
wordcount = {}
for word in words:
for thing in things_to_strip:
if thing in word:
word = word.replace(thing,"")
if word not in words_to_ignore and len(word) >= words_min_size:
if word in wordcount:
wordcount[word] += 1
else:
wordcount[word] = 1
sortedbyfrequency = sorted(wordcount,key=wordcount.get,reverse=True)
def print_txt(sortedbyfrequency):
for word in sortedbyfrequency:
print word, wordcount[word]
def print_html(sortedbyfrequency):
print "<html><head><title>Wordcount.py Output</title></head><body><table>"
for word in sortedbyfrequency:
print "<tr><td>%s</td><td>%s</td></tr>" % (word,wordcount[word])
print "</table></body></html>"
if print_in_html == True:
print_html(sortedbyfrequency)
else:
print_txt(sortedbyfrequency)
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
e54ee1ecc2087f724a84dac26976197e1055fff2
|
60ac463f25995f50acd0b4caea95bfdb112fe99f
|
/sump2.py
|
f67895cc91e30268e25d36d004324c6f7d862fc1
|
[] |
no_license
|
descampsa/sump2_linux
|
a1c36872b62e2bbdc2795d87302fc5abf073bd4c
|
51264d52a5789e7042ee9f6a277b619b8e30f0ea
|
refs/heads/master
| 2020-06-26T01:56:19.690628
| 2016-11-23T19:24:26
| 2016-11-23T19:24:26
| 74,607,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246,168
|
py
|
#!/usr/bin/python
# sump2
# Copyright (c) Kevin M. Hubbard 2016 BlackMesaLabs
#
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Source file: sump2.py
# Date: 07.25.16
# Author: Kevin M. Hubbard
# Description: A light weight VCD viewer written in Python+PyGame for Linux
# or Windows platforms. Designed to make use of the mouse scroll
# wheel for fast navigation and inspection of waveform files. Also
# follows keyboard navigation used by Vim and ChipVault.
# History:
# The backstory on ChipWave.py, which was then forked to become sump2.py
# is that I wrote it over a weekend while sequestered in a Redmond,WA hotel
# chaperoning a highschool JSA tournament for my daughter's class. I was
# wanting a better VCD viewer for IcarusVerilog and was frustrated with
# the GTKwave user interface and difficulty installing on Linux. It was
# designed only to be the backend viewer for simulations, but turned out to
# be a really good front-end and back-end for SUMP hardware capture engine.
# Original ( now SUMP1 ) design used .NET for waveform viewing which was
# frustratingly slow. PyGame based SUMP2 gui is 100x better in my opinion.
#
# PyGame:
# ChipWave uses to Python package PyGame for Mouse and Screen iterfacing.
# PyGame does not come with Python and MUST BE INSTALLED!
# See http://www.pygame.org/download.shtml
# ChipWave.py was written in 2013 as a VCD viewer for IcarusVerilog. It was
# ditched in favor of just using GTKwave. Basic features were then reused for
# a SUMP2 front-end and back-end to replace the SUMP1 Powershell/.NET app.
# Note: There are some legacy ChipWave functions still in here that are not
# currently being used and have not been removed.
#
# [ Python 3.5 for Windows ]
# https://www.python.org/downloads/ Python 3.5.2 python-3.5.2.exe
# Click Add to Path on Installer Popup
#
# python.exe -c 'import distutils.util; print(distutils.util.get_platform())'
# win32
#
# [ PyGame ]
# http://www.lfd.uci.edu/~gohlke/pythonlibs/#pygame
# pygame-1.9.2b1-cp35-cp35m-win_amd64.whl
# pygame-1.9.2b1-cp35-cp35m-win32.whl
#
# Copy WHL to C:\Users\root\AppData\Local\Programs\Python\Python35-32\Scripts
# pip install pygame-1.9.2b1-cp35-cp35m-win32.whl
#
# [ PySerial ]
# https://pypi.python.org/pypi/pyserial
# https://pypi.python.org/packages/...../pyserial-3.1.1-py2.py3-none-any.whl
# pip install pyserial-3.1.1-py2.py3-none-any.whl
#
# TODO: Vertical resizing of window has issues. Signal scrolling isnt updated
#
# Revision History:
# Ver When Who What
# ---- -------- -------- ---------------------------------------------------
# 0.00 07.25.16 khubbard Creation. Forked from chip_wave.py ( VCD Viewer )
#
# TODO: Key repeats dont work. Fake out and make own key events ?
# TODO: scroll_up and scroll_down have issues if signals are deleted.
# TODO: Add a "GenerateVCD" feature that calls external program to make vcd
# TODO: Support for bus ripping. Complicated....
# TODO: Search only support Hex searches. Should support signed and unsigned
# TODO: Add support for an autorun.txt file on startup.
# TODO: Reload doesn't work if new VCD is longer than old VCD.
# TODO: On reload, wave.do should be saved and reloaded to preserve format.
# WARNING: vcdfile2signal_list() currently requires a clock signal or else
# conversion doesnt work unless there is a value change every sample
# NOTE: Pygame stops responding if window is dragged onto second monitor.
# TODO: Doesn't support SUMP list reordering or wave.txt order.
# 09.05.16 khubbard Fix for VCD exporting nicknames. GUI crash fixes.
# 09.06.16 khubbard Partial ported from Python 2.7 to 3.5. Incomplete.
# 09.18.16 khubbard Major performance improvements. GUI file loading
# 09.19.16 khubbard Adjust DWORDs in RLE for trig_delay value
# 09.20.16 khubbard RLE Undersample 4x-64x feature added. Popup change.
# 09.23.16 khubbard Popup Entry for variable changes in GUI added.
# 09.24.16 khubbard User Interface and performance usability improvmnts
# 09.25.16 khubbard GUI popup for signal rename.
# 09.26.16 khubbard Fixed opening VCD files for static offline viewing.
# 09.26.16 khubbard zoom_out capped at max_samples. RLE->VCD working
# 09.29.16 khubbard cursor_snap back in for single click op. VCD 'x'
# 10.04.16 khubbard fast_render added. Disable 4x prerender on >1000
# 10.06.16 khubbard Fixed popup bugs. sump_bundle_data() feature added
# 10.16.16 khubbard RLE culling null sample improvements. Menu changes.
# 10.17.16 khubbard fixed vcdfile2signal_list() not decompressing.
# 10.18.16 khubbard fixed menu. New function list_remove
# 10.19.16 khubbard RLE Event to DWORD alignment fix. Needs HW too.
# 10.20.16 khubbard Improve centering to trigger post acquisition.
# 10.21.16 khubbard Acquisition_Length fixed. Also works with RLE now.
# 10.24.16 khubbard Fixed RLE cropping not showing DWORDs.Speed Improvs
###############################################################################
import time
from time import sleep;
import math # pow
import types # type
import sys;
import os;
import platform;
import locale;
class main(object):
def __init__(self):
# import math # pow
# import types # type
self.vers = "2016.10.24";
print("Welcome to SUMP2 " + self.vers + " by BlackMesaLabs");
self.mode_cli = True;
try:
import pygame # Import PyGame Module
except:
print("WARNING: PyGame not FOUND!! running in Command Line Mode");
print("Pygame http://www.lfd.uci.edu/~gohlke/pythonlibs/#pygame");
print("pip install pygame-1.9.2b1-cp35-cp35m-win32.whl");
self.vars = init_vars( self, "sump2.ini" );
self.help = init_help( self );
self.math = math;
list2file( self, "sump2_manual.txt", init_manual(self ) );
#locale.setlocale( locale.LC_NUMERIC, 'English' );
locale.setlocale( locale.LC_NUMERIC, 'en_US.UTf-8' );
init_globals( self );# Internal software variables
self.file_log = open ( self.vars["file_log"] , 'w' );
# ARG0 either specifies a static file to view OR sump2 or an IP address
# for talking directly to sump2 hardware.
import sys;
args = sys.argv + [None]*3;
self.file_name = args[1]; # args[0] is script name
if ( self.file_name == "bd_shell" or \
self.file_name == "cli" ):
self.mode_cli = True;
self.file_name = None;
else:
if 'pygame' in locals() or 'pygame' in globals():
display_init( self );
self.mode_cli = False;
else:
self.mode_cli = True;
self.signal_list = [];# List of class signal(object)s
self.signal_delete_list = [];
if ( self.file_name == None ):
# if ( sump_connect( self ) == False ):
# shutdown( self );
# sump_connect( self );
if ( sump_connect( self ) != False ):
sump2signal_list( self );# Make Signals based on SUMP2 HW Config
self.top_module = "sump2";
else:
# make_demo_vcd();
# self.file_name = "foo.vcd";
self.file_name = make_demo_vcd( self );
# sump_dump_data(self);
# else:
if ( self.file_name != None ):
self.bd=None;
file2signal_list( self, self.file_name );# VCD is now a signal_list
# save_format( self, self.file_name, False );# Create Wave File from VCD Info
self.file_name = None; # Make sure we don't overwrite vcd with wave on exit
self.vcd_import = True;# Prevents saving a VCD specific sump2_wave.txt file
# # Attempt to loading an existing wave.txt file for this block if exists
# # otherwise, create one from scratch
# import os;
# file_name = "wave_" + self.top_module + ".txt";# Default
# if os.path.exists( file_name ):
# print( "load_format() ", file_name );
# load_format( self, file_name );
# else:
# save_format( self, file_name, False );
if ( self.bd != None ):
###########################################################################
# If a wave file doesn't exist, create a default one using info from HW
import os;
# file_name = "wave_" + self.top_module + ".txt";# Default
file_name = "sump2_wave.txt";# Default
if ( os.path.exists( file_name ) == False and self.bd != None ):
print("Creating default wave file");
ram_dwords = self.sump.cfg_dict['ram_dwords'];
ram_bytes = self.sump.cfg_dict['ram_event_bytes'];
events = ram_bytes * 8;
# Iterate the number of event bits and init with 0s
txt_list = [];
for j in range( 0, events, 1):
txt = ("/event[%d]" % j) ;
txt_list += [ txt + " -nickname " + txt.replace("/","") ];
# Then follow with a group for all the DWORDs
if ( ram_dwords != 0 ):
txt_list += ["/dword[%d:%d]" % ( ( 0),( ram_dwords-1) )];
for i in range( 0, ram_dwords, 1):
txt = " /dword[%d]" % ( i );
txt_list += [ txt + " -nickname " + txt.replace("/","") ];
file_out = open( "sump2_wave.txt", 'w' );
for each in txt_list:
file_out.write( each + "\n" );
file_out.close();
# Load in the wavefile
if os.path.exists( file_name ):
print( "load_format() ", file_name );
load_format( self, file_name );
trig_i = sump_dump_data(self);
sump_vars_to_signal_attribs( self );# Populates things like trigger attr
# if os.path.exists( file_name ):
# print( "load_format() ", file_name );
# load_format( self, file_name );
# if ( self.bd != None ):
# sump_dump_data(self);
# else:
# save_format( self, file_name, False );# Create one
# return;
#############################################################################
# CLI Main Loop : When no PyGame loop here STDIN,STDOUT old school style
while( self.mode_cli == True and self.done == False ):
rts = raw_input(self.prompt);
# rts = input(self.prompt);
rts = rts.replace("="," = ");
words = " ".join(rts.split()).split(' ') + [None] * 4;
if ( words[1] == "=" ):
cmd = words[1];
parms = [words[0]]+words[2:];
else:
cmd = words[0];
parms = words[1:];
# print( cmd, parms );
rts = proc_cmd( self, cmd, parms );
for each in rts:
print( each );
# Load the wavefile
# # Calc max number of samples and change default zoom if not enough to fill
# for sig_obj in self.signal_list:
# if ( len( sig_obj.values ) > self.max_samples ):
# self.max_samples = len( sig_obj.values );
# if ( ( self.max_samples * self.zoom_x ) < self.screen_width ):
# self.zoom_x = float(self.screen_width) / float(self.max_samples);
# set_zoom_x( self, self.zoom_x ); # Set the zoom ratio
recalc_max_samples( self );
# Draw the 1st startup screen
screen_refresh( self );
self.context = "gui";
# GUI Main Loop
self.clock = self.pygame.time.Clock();
self.time = self.pygame.time;
self.pygame.key.set_repeat(50,200);
while ( self.done==False ):
# When live, attempt to acquire data, else display static data (faster)
if ( self.acq_state == "acquire_single" or
"acquire_rle" in self.acq_state or
self.acq_state == "acquire_continuous" ):
# Check to see if acquired bit is set, then read the data
# print ("%02X" % self.sump.rd( addr = None )[0] );
if ( "acquire_rle" in self.acq_state ):
sump_done = self.sump.status_triggered + self.sump.status_rle_post;
else:
sump_done = self.sump.status_triggered + self.sump.status_ram_post;
self.undersample_data = False;
self.undersample_rate = 1;
if ( ( self.sump.rd( addr = None )[0] & sump_done ) == sump_done ):
if ( self.acq_mode == "nonrle" ):
trig_i = sump_dump_data(self);
else:
trig_i = sump_dump_rle_data(self);
print("Trigger Index = %d " % trig_i );
# Place the cursors by the trigger.
for ( i , each ) in enumerate( self.cursor_list ):
if ( i == 0 ): offset = -6;
else : offset = +4;
each.selected = False;
# trigger_sample = self.max_samples // 2;# Temporary Trig @ 50%
# each.sample = int( trigger_sample ) + offset;
each.sample = int( trig_i ) + offset;
self.curval_surface_valid = False;# curval surface invalid
if ( self.acq_state == "acquire_continuous" ):
sump_arm( self, True );
else:
self.acq_state = "acquire_stop";
draw_header( self, "ACQUIRED");
print("RENDERING-START");
# start = ( self.max_samples // 2 ) - ( self.max_samples // 8 );
# stop = ( self.max_samples // 2 ) + ( self.max_samples // 8 );
# Zoom-Out the maximum amount that still keeps trigger centered
# for non-RLE this is trivial, for RLE it is more complicated
trig_to_start = trig_i - 0;
trig_to_end = self.max_samples - trig_i;
start = trig_i - min( trig_to_start, trig_to_end );
stop = trig_i + min( trig_to_start, trig_to_end );
proc_cmd( self, "zoom_to", [str(start), str(stop) ] );
# proc_cmd( self, "zoom_to", ["0", str( self.max_samples ) ] );
# proc_cmd( self, "zoom_to_cursors", [] );
print("RENDERING-COMPLETE");
else:
# draw_header(self,"Waiting for trigger..");
draw_screen( self );# This updates banner
self.time.wait(500 ); # Waiting for Trigger
else:
# self.clock.tick( 10 ); # Don't take 100% of CPU as that would be rude
self.time.wait(10 ); # Wait 10ms to share CPU
for event in pygame.event.get(): # User did something
# VIDEORESIZE
if event.type == pygame.VIDEORESIZE:
self.screen= pygame.display.set_mode(event.dict['size'],
pygame.RESIZABLE |
pygame.HWSURFACE |
pygame.DOUBLEBUF);
self.resize_on_mouse_motion = True;# Delay redraw until resize done
# Detect when console box has gained focus and switch from GUI to BD_SHELL
# and loop in a keyboard loop processing commands. Exit on a NULL Command.
# if event.type == pygame.ACTIVEEVENT:
# #print( str(event.gain) + " " + str(event.state) );
# # state=2 user gave focus to something other than GUI. Assume DOS-Box
# if ( event.state == 2 and self.os_sys != "Linux" ):
# bd_shell( self );
# KEYDOWN
if event.type == pygame.KEYDOWN:
if ( event.key == pygame.K_BACKSPACE ):
self.key_buffer = self.key_buffer[:-1];# Remove last char
elif ( event.key == pygame.K_DELETE ):
proc_cmd( self, "delete", [""] );
elif ( event.key == pygame.K_INSERT ):
proc_cmd( self, "insert_divider", [""] );
elif ( event.key == pygame.K_PAGEUP ):
proc_cmd( self, "zoom_in" , [] );
elif ( event.key == pygame.K_PAGEDOWN ):
proc_cmd( self, "zoom_out" , [] );
elif ( event.key == pygame.K_HOME ):
proc_cmd( self, "font_larger" , [] );
elif ( event.key == pygame.K_END ):
proc_cmd( self, "font_smaller" , [] );
elif ( event.key == pygame.K_RIGHT ):
num_samples = self.sample_room // 16;
proc_cmd( self, "scroll_right", [str(num_samples)] );
elif ( event.key == pygame.K_LEFT ):
num_samples = self.sample_room // 16;
proc_cmd( self, "scroll_left", [str(num_samples)] );
# Up and Down arrows either Zoom In,Out or Scroll the Signal list
elif ( event.key == pygame.K_UP ):
if ( self.mouse_region == "signal_name" ):
proc_cmd( self, "scroll_up" , ["1"] );
else:
proc_cmd( self, "zoom_in" , [] );
elif ( event.key == pygame.K_DOWN ):
if ( self.mouse_region == "signal_name" ):
proc_cmd( self, "scroll_down", ["1"] );
else:
proc_cmd( self, "zoom_out", [] );
elif ( event.key == pygame.K_SPACE and self.key_buffer == "" ):
proc_cmd( self, "Expand", [""] );
draw_screen( self );
screen_flip( self );
# Note: Text Entry moved to DOS-Box
# elif ( event.key == pygame.K_RETURN ):
# self.key_buffer = self.key_buffer.replace("="," = ");
# words = self.key_buffer.strip().split()+[None]*4;# Avoid IndexError
# cmd = words[0];
# parms = words[1:];
# if ( self.txt_entry == True ):
# cmd = self.txt_entry_caption;# ie "Rename_Signal"
# parms = words[0:];
# self.txt_entry = False; # Disable Dialog Box
# rts = proc_cmd( self, cmd, parms );
# for each in rts:
# print( each );# <CR>s
# sys.stdout.write( self.prompt );# No <CR>
# self.cmd_history.append( " " + self.key_buffer );
# self.key_buffer = "";
# elif ( event.key > 0 and event.key < 255 ):
# self.key_buffer = self.key_buffer + event.unicode;
# if ( event.unicode == "/" or event.unicode == "?" ):
# self.key_buffer = self.key_buffer + " ";
# sys.stdout.write( event.unicode );
# QUIT
if ( event.type == pygame.QUIT ) :
self.done=True;
# MOUSEMOTION
if event.type == pygame.MOUSEMOTION:
(self.mouse_x,self.mouse_y) = pygame.mouse.get_pos();
# self.mouse_region = get_mouse_region(self,self.mouse_x,self.mouse_y);
# # If mouse wave moved to right of the value region, scroll once to the
# # right and then create a fake MOUSEMOTION event to continue scrolling
# # until mouse is moved away.
# if ( self.mouse_region == "scroll_right" or \
# self.mouse_region == "scroll_left" ):
# proc_cmd( self, self.mouse_region , ["1"] ); # scroll left or right
# # TODO: This wait time needs to be configurable. On my HP Centos
# # laptop it scrolled too fast at 50ms, too slow at 250ms.
# # Make sure mouse is still in this window ( focused )
# if ( self.pygame.mouse.get_focused() == True ):
# self.pygame.time.wait( 100 );# Delay in ms
# self.pygame.event.post( pygame.event.Event( pygame.MOUSEMOTION ) );
# If a resize op was just completed, redraw on 1st mouse motion as
# trying to redraw during the resize is very slow and jerky.
if ( self.resize_on_mouse_motion == True ):
self.resize_on_mouse_motion = False; # This makes resize op smoother
old_width = self.screen_width;
( self.screen_width, self.screen_height ) = self.screen.get_size();
self.vars["screen_width"] = str( self.screen_width );
self.vars["screen_height"] = str( self.screen_height );
# if ( self.screen_width > old_width ):
# This is a HACK as sig_value_stop_x wasn't auto adjusting for some
# reason when window is resized to be larger.
# There is like a chicken and egg problem with zoom_full and stop_x
# adjust stop_x for delta change rather than calling zoom_full twice
self.sig_value_stop_x += ( self.screen_width - old_width );
proc_cmd( self, "zoom_full", [] );# HACK Needed to update parms
# create_surfaces( self );
# flush_surface_cache( self );
# screen_refresh( self );
# If popup enabled, continue drawing and updating until button release
if ( self.popup_x != None ):
self.popup_sel = get_popup_sel( self );
# print("draw_popup_cmd()");
draw_popup_cmd( self );# Just draw popup on top of existing display
screen_flip( self );# Only thing changing is the popup selection
# If mouse button is held down, check for drag operation
# elif ( self.mouse_button != 0 ):
elif ( self.mouse_button == 1 or \
self.mouse_button == 2 ):
# Make sure the region doesnt wander, so calc from mouse press
self.mouse_region = get_mouse_region(self,
self.mouse_btn1dn_x, self.mouse_btn1dn_y );
if ( self.mouse_region == "cursor" ):
mouse_event_move_cursor( self ); # Move a cursor
elif ( self.mouse_region == "slider" ):
mouse_event_move_slider( self,0 ); # Move the viewport slider
elif ( self.mouse_region == "signal_name" ):
mouse_event_vertical_drag_wip( self );# Move a signal name
# HERE : Doesnt work well
# elif ( self.mouse_region == "signal_value" ):
# # Calculate mouse drag deltas in char units
# delta_x=abs(self.mouse_btn1dn_x-self.mouse_x) / self.txt_width;
# delta_y=abs(self.mouse_btn1dn_y-self.mouse_y) / self.txt_height;
# if ( delta_x > 2 and delta_y > 2 ):
# mouse_event_area_drag_wip( self ); # Rectangle Zoom Region
# MOUSEBUTTONUP : event.button 1=Left,2=Middle,3=Right,4=ScrlUp,5=ScrlDn
if event.type == pygame.MOUSEBUTTONUP:
(self.mouse_x,self.mouse_y) = pygame.mouse.get_pos();
self.mouse_region = get_mouse_region(self,self.mouse_x,self.mouse_y);
if ( event.button == 1 ):
(self.mouse_btn1up_x,self.mouse_btn1up_y)=(self.mouse_x,self.mouse_y);
if ( event.button == 3 ):
(self.mouse_btn3up_x,self.mouse_btn3up_y)=(self.mouse_x,self.mouse_y);
# Attempt to detect double-click on left-mouse button t<300ms
if ( event.button == 1 ):
self.mouse_btn1up_time_last = self.mouse_btn1up_time;
self.mouse_btn1up_time = self.pygame.time.get_ticks();
if ( ( self.mouse_btn1up_time - self.mouse_btn1up_time_last ) < 300 ):
mouse_event_double_click( self );
delta_y=abs( self.mouse_btn3dn_y-self.mouse_btn3up_y )/self.txt_height;
delta_x=abs( self.mouse_btn3dn_x-self.mouse_btn3up_x )/self.txt_width;
# If popup enabled, process the cmd
# if ( self.popup_x != None ):
if ( self.popup_x != None and
( event.button == 1 ) or
( event.button == 3 and ( delta_y > 1.0 or delta_x > 1.0 ) )
):
# proc_cmd( self, self.popup_sel, [""] );
words = self.popup_sel.strip().split() + [""] * 4;# AvoidIndexError
proc_cmd( self, words[0],words[1:] );
self.popup_x = None;# Erase popup
self.popup_parent_x = None;
else:
# Mouse Button 1 Only - except emulate Center Mouse Drag to Zoom
delta_x = abs( self.mouse_btn1dn_x-self.mouse_btn1up_x ) / \
self.txt_width;
delta_y = abs( self.mouse_btn1dn_y-self.mouse_btn1up_y ) / \
self.txt_height;
# if ( event.button == 2 and delta_x > 2 and delta_y > 2 ):
if ( event.button == 1 or
( event.button == 2 and delta_x > 2 and delta_y > 2 ) ):
# Mouse region is from 1st click, not release
self.mouse_region = get_mouse_region(self,
self.mouse_btn1dn_x, self.mouse_btn1dn_y );
if ( self.mouse_region == "cursor" ):
mouse_event_move_cursor( self ); # Move a cursor
elif ( self.mouse_region == "slider" ):
mouse_event_move_slider( self,0 ); # Move the viewport slider
elif ( self.mouse_region == "signal_expand" ):
proc_cmd( self, "Expand", [""] );
elif ( self.mouse_region == "signal_name" ):
delta_y = abs( self.mouse_btn1dn_y-self.mouse_btn1up_y ) / \
self.txt_height;
if ( delta_y > 0 ):
mouse_event_vertical_drag_done( self, \
((self.mouse_btn1dn_y-self.mouse_btn1up_y) / \
self.txt_height ) );# Reorder signal list
# elif ( self.mouse_region == "signal_value" ):
# delta_x = abs( self.mouse_btn1dn_x-self.mouse_btn1up_x ) / \
# self.txt_width;
# delta_y = abs( self.mouse_btn1dn_y-self.mouse_btn1up_y ) / \
# self.txt_height;
# if ( delta_x < 2 and delta_y < 2 ):
# mouse_event_single_click( self ); # Moves Cursor to here
# if ( delta_x > 2 and delta_y < 2 ):
# # signal_value region is being dragged, so pan left or right
# direction = (self.mouse_btn1dn_x-self.mouse_btn1up_x) / \
# self.zoom_x;
# proc_cmd( self, "scroll_right", [str( int(direction) ) ] );
#
# elif ( delta_x > 2 and delta_y > 2 ):
# mouse_event_area_drag_done( self ); # Zooms to region
if ( event.button == 2 and delta_x < 2 and delta_y < 2 ):
print( "Center Mouse Button Click");
# Mouse-Scroll Wheel
# region==signal_name : Scroll Up and Down
# region==signal_value : Scroll Left and Right
# region==slider : Zoom in and out
elif ( event.button >= 4 ):
# print( self.mouse_region );
if ( self.mouse_region == "signal_name" ):
if ( event.button == 4 ):
proc_cmd( self, "scroll_up", ["1"] );
elif ( event.button == 5 ):
proc_cmd( self, "scroll_down", ["1"] );
elif ( self.mouse_region == "signal_value" ):
if ( event.button == 4 ):
mouse_event_zoom_scroll( self, +1 );
elif ( event.button == 5 ):
mouse_event_zoom_scroll( self, -1 );
elif ( self.mouse_region == "slider" ):
if ( event.button == 4 ):
proc_cmd( self, "scroll_right",[str(+self.scroll_num_samples)]);
elif ( event.button == 5 ):
proc_cmd( self, "scroll_left",[str(+self.scroll_num_samples)]);
elif ( self.mouse_region == "cursor" ):
self.curval_surface_valid = False;# curval surface is now invalid
for cur_obj in self.cursor_list:
if ( cur_obj.selected == True ):
sample = cur_obj.sample;
if ( event.button == 4 ):
sample +=1;
elif ( event.button == 5 ):
sample -=1;
if ( sample < 0 ) : sample = 0;
if ( sample > self.max_samples ): sample = self.max_samples;
cur_obj.sample = sample;
screen_refresh( self );
self.mouse_button = 0; # No Button is Pressed
# MOUSEBUTTONDOWN : 1=Left,2=Middle,3=Right,4=ScrlUp,5=ScrlDn
if event.type == pygame.MOUSEBUTTONDOWN:
self.mouse_button = event.button;# Remember which button is Pressed
(self.mouse_x,self.mouse_y) = pygame.mouse.get_pos();
self.mouse_region = get_mouse_region(self,self.mouse_x,self.mouse_y);
# Left-Mouse-Button-Down
# If popup is already up and right-click is clicked again, emulate left
if ( event.button == 1 or event.button == 2 or
( event.button == 3 and self.popup_x != None ) ):
self.mouse_btn1dn_time = self.pygame.time.get_ticks();
(self.mouse_x,self.mouse_y) = pygame.mouse.get_pos();
(self.mouse_btn1dn_x,self.mouse_btn1dn_y) = \
(self.mouse_x,self.mouse_y);
if ( self.mouse_region == "slider" ):
mouse_event_move_slider( self, 0 );
elif ( self.mouse_region == "signal_name" or
self.mouse_region == "signal_expand" ):
mouse_event_select_signal( self );
elif ( self.mouse_region == "signal_value" ):
mouse_event_single_click( self ); # Moves Cursor to here
# pass;
# Right-Mouse-Button-Down
if ( event.button == 3 and self.popup_x == None ):
(self.popup_x,self.popup_y) = pygame.mouse.get_pos();
(self.mouse_btn3dn_x,self.mouse_btn3dn_y) = \
(self.mouse_x,self.mouse_y);
# For cursor bring tos want to know exacy sample right click was on
(self.popup_sample, Null ) = get_sample_at_mouse( self,
self.popup_x, self.popup_y );
# Set the popup up and to the left so that a click and release
# selects the 1st guy ( Scroll_Toggle ) - a hack - I know.
self.popup_x -= 2*self.txt_width;
self.popup_y -= self.txt_height;
if ( self.mouse_region == "signal_value" ):
self.popup_list = self.popup_list_values;
# elif ( self.mouse_region == "signal_name" ):
else:
self.popup_list = self.popup_list_names;
draw_popup_cmd( self );
self.popup_sel = get_popup_sel( self );
screen_flip( self ); # Place popup on top existing stuff, no erase
self.acq_state = "acquire_stop";# Stop any live acquisitions
# New
# draw_screen( self );
# screen_flip( self );
shutdown( self );
return;# This is end of main program loop
###############################################################################
def recalc_max_samples( self ):
# Calc max number of samples and change default zoom if not enough to fill
self.max_samples = 0;
for sig_obj in self.signal_list:
if ( len( sig_obj.values ) > self.max_samples ):
self.max_samples = len( sig_obj.values );
if ( self.mode_cli == True ):
return;
if ( float(self.max_samples) != 0.0 and
( self.max_samples * self.zoom_x ) < self.screen_width ):
self.zoom_x = float(self.screen_width) / float(self.max_samples);
set_zoom_x( self, self.zoom_x ); # Set the zoom ratio
# HERE14
# Warning: This sample_room calculation assumes samples are 1 nibble wide.
sample_start = self.sample_start;
start_x = self.sig_value_start_x;
x2 = self.screen_width - start_x - 2*self.txt_width;
self.sample_room = int( float(x2) / float(self.zoom_x) );
self.sample_stop = sample_start + self.sample_room;
return;
def display_init( self ):
log( self, ["display_init()"] );
import pygame # Import PyGame Module
pygame.init() # Initialize the game engine
self.screen_width = int( self.vars["screen_width"], 10 );
self.screen_height = int( self.vars["screen_height"], 10 );
# pygame.NOFRAME, pygame.FULLSCREEN
self.screen=pygame.display.set_mode(
[ self.screen_width, self.screen_height ],
pygame.RESIZABLE | pygame.HWSURFACE | pygame.DOUBLEBUF );
self.pygame = pygame;
self.pygame.display.set_icon( create_icon( self ) );
draw_header( self, "" );
self.font = get_font( self,self.vars["font_name"],self.vars["font_size"]);
self.gui_active = True;
create_surfaces( self );
return;
# mouse_event_select_signal() : User has clicked the mouse in the signal name
# region, so either deselect the old selection and select the new signal at
# the mouse location, or if the shift key is held down, select everything from
# old selection to new location.
def mouse_event_select_signal( self ):
self.name_surface_valid = False;
if ( self.pygame.key.get_pressed()[self.pygame.K_LSHIFT] or
self.pygame.key.get_pressed()[self.pygame.K_RSHIFT] ):
# (Null,index) = get_sample_at_mouse( self, self.mouse_x, self.mouse_y );
# if ( index != None ):
# sig_obj = self.signal_list[ index ];
# sig_obj.selected = True;
# self.sig_obj_sel = sig_obj;# Remember for pulldown commands
sig_obj = get_sig_obj_at_mouse( self, self.mouse_x, self.mouse_y );
if ( sig_obj != None ):
sig_obj.selected = True;
self.sig_obj_sel = sig_obj;# Remember for pulldown commands
start_jk = False;
for sig_obj in self.signal_list:
# Select all visible signals between old select and new select
if ( start_jk == True and sig_obj.visible == True ):
sig_obj.selected = True;
# Start the grouping on the old select
if ( sig_obj.selected == True ):
start_jk = True;
# Finish when we get to new select
if ( sig_obj == self.sig_obj_sel ):
start_jk = False;
break;
screen_refresh( self );
else:
# DeSelect All signals unless a CTRL key is held down
if ( self.pygame.key.get_pressed()[self.pygame.K_LCTRL] == False and
self.pygame.key.get_pressed()[self.pygame.K_RCTRL] == False ):
for sig_obj in self.signal_list:
sig_obj.selected = False;
# Find the signal at the mouse location at select it.
sig_obj = get_sig_obj_at_mouse( self, self.mouse_x, self.mouse_y );
if ( sig_obj != None ):
sig_obj.selected = True;
self.sig_obj_sel = sig_obj;# Remember for pulldown commands
screen_refresh( self );
return;
def mouse_event_double_click( self ):
# print "mouse_event_double_click()";
sig_obj = get_sig_obj_at_mouse( self, self.mouse_x, self.mouse_y );
if ( sig_obj != None ):
sig_obj.selected = True;
if ( sig_obj.hidden == False ):
proc_cmd( self, "hide", [""] );
else:
proc_cmd( self, "show", [""] );
return;
def mouse_event_move_slider( self, direction ):
# print "mouse_event_move_slider()";
mouse_x = self.mouse_x;
mouse_y = self.mouse_y;
delta_x = abs( self.mouse_btn1dn_x-self.mouse_btn1up_x ) / self.txt_width;
# if ( delta_x == 0 and direction == 0 ):
if ( True ):
x1 = self.sig_value_start_x;
x2 = self.sig_value_stop_x;
mouse_x -= self.slider_width / 2;# Put Center of Slider on Mouse
# if ( mouse_x > x1 and mouse_x < x2 ):
if ( True ):
self.sample_start = int(self.max_samples * ( mouse_x - x1 ) / ( x2-x1 ));
# Prevent scrolling too far to right or left
if ( self.sample_start + self.sample_room > self.max_samples ):
self.sample_start = int(self.max_samples - self.sample_room);
if ( self.sample_start < 0 ):
self.sample_start = 0;
else:
None; # Dont support dragging slider
screen_refresh( self );
return;
def mouse_event_single_click( self ):
proc_cmd( self, "cursor_snap" , [str(self.mouse_x),str(self.mouse_y)] );
return;
# print( self.popup_x );
def mouse_event_zoom_scroll( self, direction ):
( sample, Null ) = get_sample_at_mouse( self, self.mouse_x, self.mouse_y );
if ( direction == +1 ):
if ( True ):
# new_zoom_x = self.zoom_x * 1.25;
new_zoom_x = self.zoom_x * 2.00;
if ( new_zoom_x > 100 ):
new_zoom_x = 100.0;# Don't ZoomIn too far
set_zoom_x( self, new_zoom_x );
else:
sample_room = self.sample_room * 2.00;
new_sample = self.sample_start - sample_room // 4;
if ( ( new_sample + sample_room ) > self.max_samples ):
proc_cmd( self, "zoom_full", [] );
return;
if ( self.stop_zoom == False ):
# new_zoom_x = self.zoom_x / 1.25;
new_zoom_x = self.zoom_x / 2.00;
# if ( new_zoom_x < 0.1 ):
# new_zoom_x = 0.1;
set_zoom_x( self, new_zoom_x );
# Now see what sample is at the mouse position and adjust start accordingly
# so that original sample is still under the mouse
(new_sample,Null)=get_sample_at_mouse( self, self.mouse_x, self.mouse_y );
sample_offset = sample - new_sample;
self.sample_start += sample_offset;
if ( self.sample_start < 0 ):
self.sample_start = 0;
screen_refresh( self );
return;
def mouse_event_horizontal_drag( self, direction ):
# print "mouse_event_horizontal_drag()";
return;
# # Support dragging the value region left or right to pan a number of samples
# self.sample_start += int( direction );
# # Prevent scrolling too far to right
# if ( self.sample_start + self.sample_room > self.max_samples ):
# self.sample_start = int( self.max_samples - self.sample_room );
# if ( self.sample_start < 0 ):
# self.sample_start = 0;
# screen_refresh( self );
# return;
def mouse_event_move_cursor( self ):
(self.mouse_x,self.mouse_y) = self.pygame.mouse.get_pos();
( sample, Null ) = get_sample_at_mouse( self, self.mouse_x, self.mouse_y );
self.curval_surface_valid = False;# curval surface is invalid when cur move
for cur_obj in self.cursor_list:
if ( self.mouse_btn1dn_y > cur_obj.y and \
self.mouse_btn1dn_y < cur_obj.y+self.txt_height ):
if ( sample < 0 ):
sample = 0;
cur_obj.sample = int( sample );
for each in self.cursor_list:
each.selected = False;# Make both cursors unselected
cur_obj.selected = True;# now select the current cursor only
screen_refresh( self );
return;
def mouse_event_vertical_drag_wip( self ):
# print "mouse_event_vertical_drag_wip()";
# Reorder the signal list when a signal name is dragged
if ( True ):
x1 = self.sig_name_start_x;
x2 = self.sig_name_stop_x;
sig_obj = get_sig_obj_at_mouse( self, self.mouse_x, self.mouse_y );
if ( sig_obj != None ):
# Draw a horizontal line at insert point before button is released
y1 = sig_obj.y - 1; y2 = y1;
# flush_surface_cache( self );
screen_erase( self );
draw_screen( self );
self.pygame.draw.line(self.screen,self.color_fg,(x1,y1),(x2,y2),1);
screen_flip( self );
return;
# TODO: This doesnt handle invisible signals
def mouse_event_vertical_drag_done( self, direction ):
# print "mouse_event_vertical_drag_done()";
# Reorder the signal list when a signal name is dragged
if ( True ):
(Null , index_dn ) = get_sample_at_mouse( self, self.mouse_btn1dn_x,
self.mouse_btn1dn_y );
(Null , index_up ) = get_sample_at_mouse( self, self.mouse_btn1up_x,
self.mouse_btn1up_y );
if ( index_up > index_dn ):
index_up -= 1;# Need to adjust this
print( "index_up = " + str( index_up ));
print( "index_dn = " + str( index_dn ));
if ( index_up != None and index_dn != None ):
self.signal_list.insert( index_up, self.signal_list.pop( index_dn ) );
flush_surface_cache( self );
screen_refresh( self );
return;
def mouse_event_area_drag_wip( self ):
# print "mouse_event_area_drag_wip()";
if ( self.mouse_btn1dn_x > self.sig_value_start_x ):
# Draw a rectangle over the drag region to be zoomed in on
x1 = self.mouse_btn1dn_x;
y1 = self.mouse_btn1dn_y;
w = ( self.mouse_x - self.mouse_btn1dn_x );
h = ( self.mouse_y - self.mouse_btn1dn_y );
screen_erase( self );
self.pygame.draw.rect( self.screen, self.color_fg,(x1,y1,w,h), 1);
draw_screen( self );
screen_flip( self );
return;
def mouse_event_area_drag_done( self ):
# print "mouse_event_area_drag_done()";
if ( self.mouse_btn1dn_x > self.sig_value_start_x ):
(s1,Null)=get_sample_at_mouse(self,self.mouse_btn1dn_x,self.mouse_btn1dn_y);
(s2,Null)=get_sample_at_mouse(self,self.mouse_btn1up_x,self.mouse_btn1up_y);
s1 = int( s1 );
s2 = int( s2 );
if ( s1 > s2 ):
s1,s2 = s2,s1;# Swap so that s1 is always smallest of the two
proc_cmd( self, "zoom_to" , [str(s1),str(s2)] );#
return;
###############################################################################
# Given a mouse position, return the section of screen that it is in
# as a txt string "signal_name","signal_value","cursor","slider"
def get_mouse_region( self, mouse_x, mouse_y ):
if ( self.popup_x != None ):
return ""; # No region if a popup is open
if ( mouse_x > self.sig_name_start_x and
mouse_x < self.sig_name_stop_x and
mouse_y > self.sig_name_start_y and
mouse_y < self.sig_name_stop_y+self.txt_height ):
# See if Click in the net name "[+]" region
# flush_surface_cache( self );
sig_obj = get_sig_obj_at_mouse( self, mouse_x, mouse_y );
if ( sig_obj != None ):
txt1 = self.font.render( " ",True,self.color_fg, self.color_bg);
txt2 = self.font.render( "[+]",True,self.color_fg, self.color_bg);
if ( mouse_x > self.sig_name_start_x and
mouse_x < ( ( self.sig_name_start_x ) + \
( sig_obj.hier_level * txt1.get_width() ) + \
( txt2.get_width() ) ) ):
return "signal_expand";
else:
return "signal_name";
if ( mouse_x > self.sig_value_start_x and
mouse_x < self.sig_value_stop_x and
mouse_y > self.sig_value_start_y and
mouse_y < self.sig_value_stop_y+self.txt_height ):
return "signal_value";
if ( mouse_x > self.sig_name_stop_x and
mouse_x < self.sig_value_start_x and
mouse_y > self.sig_value_start_y and
mouse_y < self.sig_value_stop_y+self.txt_height ):
return "scroll_left";
if ( mouse_x > self.sig_value_stop_x and
mouse_y > self.sig_value_start_y and
mouse_y < self.sig_value_stop_y+self.txt_height ):
return "scroll_right";
if ( mouse_x > self.sig_value_start_x and
mouse_x < self.sig_value_stop_x and
mouse_y > self.cursor_start_y and
mouse_y < self.cursor_stop_y+self.txt_height ):
return "cursor";
if ( mouse_x > self.sig_value_start_x and
mouse_x < self.sig_value_stop_x and
mouse_y > self.cursor_stop_y ):
return "slider";
return "";
###############################################################################
# direction -1=Backwards,+1=Forwards,0=Both
# value= Binary, Hex or "edge" ( transition )
# Returns sample number
def search_values( self, sig_obj, sample_start, search_value, direction ):
sample = sample_start; # RTS Default for it search_value not found
if ( sig_obj ):
done_right = False;
done_left = False;
if ( direction == -1 ):
done_right = True;
if ( direction == +1 ):
done_left = True;
i = 0;
last_value_right = sig_obj.values[ sample_start ];
last_value_left = sig_obj.values[ sample_start ];
while ( done_right == False or done_left == False ):
i += 1;
if ( sample_start + i < len( sig_obj.values ) ):
value_right = sig_obj.values[ sample_start + i ];
if ( ( search_value == value_right ) or
( search_value == "edge" and value_right != last_value_right ) ):
done_right = True;
sample = sample_start + i;
last_value_right = value_right;
else:
done_right = True;
if ( sample_start - i >= 0 ):
value_left = sig_obj.values[ sample_start - i ];
if ( ( search_value == value_left ) or
( search_value == "edge" and value_left != last_value_left ) ):
done_left = True;
sample = sample_start - i;
last_value_left = value_left;
else:
done_left = True;
return sample;
###############################################################################
# Given position of mouse, return the sample number, and signal index
def get_sample_at_mouse( self, mouse_x, mouse_y ):
x = mouse_x - self.sig_value_start_x;
sample_num = int( ( x / self.zoom_x ) + self.sample_start );
signal_index = None;
for ( i , sig_obj ) in enumerate( self.signal_list ):
if ( sig_obj.visible == True ):
if ( mouse_y > sig_obj.y and \
mouse_y < sig_obj.y + sig_obj.h ):
signal_index = i;
return ( sample_num, signal_index );
###############################################################################
# Given position of mouse, return the sig_obj
def get_sig_obj_at_mouse( self, mouse_x, mouse_y ):
for sig_obj in self.signal_list:
if ( sig_obj.visible == True ):
if ( mouse_y > sig_obj.y and \
mouse_y < sig_obj.y + sig_obj.h ):
return sig_obj;
return None;
###############################################################################
# Given name of a sig_obj, return that sig_obj
def get_sig_obj_by_name( self, name ):
for sig_obj in self.signal_list:
if ( sig_obj.name == name ):
return sig_obj;
return None;
###############################################################################
def log( self, txt_list ):
for each in txt_list:
# print( "log() :" + str( each ));
self.file_log.write( str(each) + "\n" );
# draw_header( self, each );
return;
###############################################################################
def init_help( self ):
a = [];
a+=["#####################################################################"];
a+=["# SUMP2 BlackMesaLabs GNU GPL V2 Open Source License. Python 3.x #"];
a+=["# (C) Copyright 2016 Kevin M. Hubbard - All rights reserved. #"];
a+=["#####################################################################"];
a+=["# bd_shell Commands #"];
a+=["# env : Display all assigned variables and values #"];
a+=["# print var : Display value of variable 'var' #"];
a+=["# foo = bar : Assign 'bar' to the variable 'foo' #"];
a+=["# var_bs foo bar : Set bits 'bar' inside variable 'foo' #"];
a+=["# var_bc foo bar : Clear bits 'bar' inside variable 'foo' #"];
a+=["# help : Display help page ( you're looking at it ) #"];
a+=["# quit : Quit the SUMP2 application #"];
a+=["# gui or NULL<ENTER> : Return from BD_SHELL to GUI Interface #"];
a+=["# source filename : Source an external command script #"];
a+=["# sleep,sleep_ms n : Pause of n seconds or milliseconds #"];
a+=["# UNIX Commands #"];
a+=["# pwd,mkdir,cd,ls,cp,vi #"];
a+=["# Backdoor Commands #"];
a+=["# w addr data : Write data to addr #"];
a+=["# w addr data data : Write multiple dwords #"];
a+=["# r addr : Read data from addr #"];
a+=["# r addr dwords : Read multiple dwords starting at addr #"];
a+=["# GUI Commands #"];
a+=["# crop_to_cursors : Minimize sample set to cursor region #"];
a+=["# save_png : Save current screen image to PNG file #"];
a+=["# save_vcd : Save current capture to VCD file #"];
a+=["# bd_shell : Switch from GUI to a bd_shell CLI #"];
a+=["# SUMP Commands #"];
a+=["# sump_arm timeout : Arm SUMP2 engine and wait for timeout sec #"];
a+=["# sump_arm_rle n : Arm SUMP2 engine and wait for n seconds #"];
a+=["# sump_stop : Stop the SUMP2 engine #"];
a+=["# sump_status : Display status of SUMP2 engine #"];
a+=["# acquire_single : Arm for Single non-RLE acquisition #"];
a+=["# acquire_continuous : Arm for non-RLE acquisition and loop #"];
a+=["# acquire_stop : Stop any pending arming #"];
a+=["# acquire_rle_1x : Arm for RLE acquisition plus dword data #"];
a+=["# acquire_rle_8x : Arm for RLE acquisition, 8x decimated #"];
a+=["# acquire_rle_64x : Arm for RLE acquisition, 64x decimated #"];
a+=["#####################################################################"];
return a;
###############################################################################
def init_manual( self ):
a = [];
a+=["#####################################################################"];
a+=["# SUMP2 by BlackMesaLabs GNU GPL V2 Open Source License. Python 3.x "];
a+=["# (C) Copyright 2016 Kevin M. Hubbard - All rights reserved. "];
a+=["#####################################################################"];
a+=["1.0 Scope "];
a+=[" This document describes the SUMP2 software and hardware. "];
a+=[" "];
a+=["2.0 Software Architecture "];
a+=[" The SUMP2 application is a Python 3.5 script using the PyGame module"];
a+=[" for mouse and graphical user interface. Communication to hardware is"];
a+=[" via TCP Socket communications to a BD_SERVER.py instance. The SW is "];
a+=[" architected as a GUI wrapper around a command line application with "];
a+=[" a bd_shell interface. When the PyGame GUI is used, mouse menu "];
a+=[" selections create commands that are then interpreted by bd_shell. "];
a+=[" In theory, sump2.py may be executed without PyGame as a command line"];
a+=[" only program to arm the sump2 hardware and then dump captured data "];
a+=[" to a VCD file for offline viewing by another application. "];
a+=[" "];
a+=["3.0 Command Descriptions "];
a+=[" Zoom_In : Increase signal view magnification 2x "];
a+=[" Zoom_Out : Decrease signal view magnification 2x "];
a+=[" Zoom_Full : View all signal samples : WARNING May be slow "];
a+=[" Zoom_Previous : Return to previous zoom view. "];
a+=[" Zoom_to_Cursors : View region bound by cursors "];
a+=[" Crop_to_Cursors : Reduce sample set to region bound by cursors "];
a+=[" Cursors_to_View : Bring both cursors into current view "];
a+=[" Cursor1_to_Here : Bring Cursor1 to mouse pointer "];
a+=[" Cursor2_to_Here : Bring Cursor2 to mouse pointer "];
a+=[" Acquire_Single : Arm hardware for single non-RLE acquisition "];
a+=[" Acquire_Continuous : Arm hardware for looping non-RLE acquisitions "];
a+=[" Acquire_Stop : Issue a stop to hardware from current Arming "];
a+=[" Acquire_RLE_1x : Arm hardware for RLE acquisition no decimation "];
a+=[" Acquire_RLE_8x : Arm hardware for RLE acquisition 8x decimation "];
a+=[" Acquire_RLE_64x : Arm hardware for RLE acquisition 64x decimation"];
a+=[" File_Load : Load a bd_shell script file "];
a+=[" File_Save : Save capture to a VCD,PNG,JPG, etc file "];
a+=[" Save_Rename : Rename the last file saved "];
a+=[" Fonts : Increase or Decrease GUI font size "];
a+=[" BD_SHELL : Close GUI and open a BD_SHELL command line "];
a+=[" "];
a+=[" Rename : Rename a selected signal's nickname "];
a+=[" Insert_Divider : Insert a dummy signal divider "];
a+=[" Clipboard : Cut and Paste selected signals "];
a+=[" Visibility : Change visibility. Impacts RLE Compression "];
a+=[" Trigger_Rising : Set Trigger for Rising edge of selected "];
a+=[" Trigger_Falling : Set Trigger for Falling edge of selected "];
a+=[" Trigger_Watchdog : Set Trigger for Watchdog timeout of selected "];
a+=[" Set_Pattern0 : Advanced Triggering "];
a+=[" Set_Pattern1 : Advanced Triggering "];
a+=[" Clear_Pattern_Match: Advanced Triggering "];
a+=[" Set_Data_Enable : Advanced data sampling "];
a+=[" Clear_Data_Enable : Advanced data sampling "];
a+=[" SUMP_Configuration : Modify advanced SUMP variables "];
a+=[" Acquisition_Length : Configure amount of non-RLE RAM to use "];
a+=[" "];
a+=["4.0 SUMP2 Environment Variables "];
a+=[" bd_connection : Connection type to hardware. tcp only "];
a+=[" bd_protocol : Communication protocol to HW, poke only "];
a+=[" bd_server_ip : IP address or localhost for bd_server "];
a+=[" bd_server_socket : Socket Number for bd_server, 21567 typ"];
a+=[" sump_addr : 32bit PCI address of sump_ctrl_reg "];
a+=[" sump_data_enable : Event bits to use for data_enable feature "];
a+=[" sump_rle_event_en : Event bits to use for RLE capture "];
a+=[" sump_rle_post_trig_len : Max number of post trigger RLE samples "];
a+=[" sump_rle_pre_trig_len : Max number of pre trigger RLE samples "];
a+=[" sump_trigger_delay : Number of clocks to delay trigger "];
a+=[" sump_trigger_field : Event bits to use for trigger "];
a+=[" sump_trigger_nth : nTh trigger to trigger on "];
a+=[" sump_trigger_type : or_rising,or_falling,watchdog,pattern_ris "];
a+=[" sump_user_ctrl : 32bit user_ctrl field "];
a+=[" sump_user_pattern0 : 32bit user pattern0 field "];
a+=[" sump_user_pattern1 : 32bit user pattern1 field "];
a+=[" sump_watchdog_time : Watchdog timeout for Watchdog trigger "];
a+=[" "];
a+=["5.0 SUMP2 Hardware "];
a+=[" The SUMP2 hardware is a single verilog file with fixed input parms "];
a+=[" for the depth and width of capture memory to use. A maximum SUMP2 "];
a+=[" configuration contains a 32bit Block RAM for non-RLE events and a "];
a+=[" 64bit Block RAM for RLE events and time stamps. In addition to 32 "];
a+=[" signal events, SUMP2 may also capture 16 DWORDs (512 bits ) of non "];
a+=[" RLE data. The SUMP2 software automatically adjusts to each instance "];
a+=[" of hardware for memory depth, width and advanced features. A key "];
a+=[" feature for acquiring long captures in time is the ability to mask "];
a+=[" any of the event inputs, which can be used to dramatically reduce "];
a+=[" event occurrence and support capturing only events of interest. The "];
a+=[" software supports masking events by double-clicking the signal name "];
a+=[" prior to arming which hides the signals and masks them from the RLE "];
a+=[" compression. 10x to 1000x compression is possible run-time for some "];
a+=[" designs by dynamically masking input events prior to acquisition. "];
a+=[" --------------- "];
a+=[" events[31:0] -+->| Trigger Logic |------------------------- "];
a+=[" | --------------- ----------------- | "];
a+=[" +---------------------->| RLE Compression | | "];
a+=[" | --------------- | Timestamp and |<-+ "];
a+=[" +->| RLE RAM |<---| Addr Generator | | "];
a+=[" | --------------- ----------------- | "];
a+=[" | --------------- ----------------- | "];
a+=[" ->| non-RLE RAM |<-+-| Addr Generator |<- "];
a+=[" --------------- | ----------------- "];
a+=[" --------------- | "];
a+=[" dwords[511:0] -->| non-RLE RAM |<- "];
a+=[" --------------- "];
a+=[" "];
a+=["6.0 Working with large RLE datasets "];
a+=[" RLE datasets can be overwhelming large to work with in software once"];
a+=[" samples have been decompressed. Compression ratios of 10,000:1 are "];
a+=[" possible for some systems. SUMP Software provides internal tools for"];
a+=[" reducing the hardware captured RLE dataset to more manageable size "];
a+=[" for both viewing and VCD generation. "];
a+=[" crop_to_cursors : Permanently crops the number of samples to a "];
a+=[" region indicated by the cursors. "];
a+=[" RLE Decimation : 8x and 64x decimation specified at arming will "];
a+=[" acquire the RLE data and reduce the sample rate "];
a+=[" by 8x or 64x prior to rendering. "];
a+=[" Signal Hiding : Hiding a signal prior to acquisition will mask "];
a+=[" the signal entirely and increase the overall RLE "];
a+=[" acquisition length. Hiding a signal post acquire "];
a+=[" speeds up rendering time for remaining signals. "];
a+=[" "];
a+=[" 6.1 Bundles "];
a+=[" The following is an example of manually modifying sump2_wave.txt "];
a+=[" file in order to group together multiple events into a bundle. "];
a+=[" /my_cnt -bundle -hex "];
a+=[" /event[12] -nickname event[12] "];
a+=[" /event[13] -nickname event[13] "];
a+=[" /event[14] -nickname event[14] "];
a+=[" /event[15] -nickname event[15] "];
a+=[" "];
a+=["7.0 History "];
a+=[" The original OSH+OSS SUMP was designed in 2007 as an external logic "];
a+=[" logic analyzer using a Xilinx FPGA eval board for capturing external"];
a+=[" electrical signals non compressed to all available FPGA block RAM. "];
a+=[" See http://www.sump.org/projects/analyzer/ "];
a+=[" The original developer published the serial communication protocol "];
a+=[" and also wrote a Java based waveform capture tool. The simplicity of"];
a+=[" the protocol and the quality and maintenance of the Open-Source Java"];
a+=[" client has inspired many new SUMP compliant projects such as: "];
a+=[" 'Open Logic Sniffer' : https://www.sparkfun.com/products/9857 "];
a+=[" "];
a+=[" 7.1 SUMP1-RLE ( 2014 ) "];
a+=[" Black Mesa Labs developed the SUMP1-RLE hardware in 2014 as a "];
a+=[" software protocol compatible SUMP engine that was capable of real "];
a+=[" time hardware compression of samples ( Run Length Encoded ). The "];
a+=[" idea of the project was to leverage the open-source Java software "];
a+=[" and couple it with new hardware IP that was capable of storing deep"];
a+=[" capture acquisitions using only a single FPGA Block RAM, allowing "];
a+=[" SUMP to be used internally with existing FPGA designs rather than "];
a+=[" a standalone device. FPGA vendor closed license logic analyzers all"];
a+=[" store using no compression requiring vast amount of Block RAMS to "];
a+=[" be useful and typically do not fit will within the limited fabric "];
a+=[" resources of an existing FPGA design requiring debugging. SUMP1-RLE"];
a+=[" was later enhanced to include 2 DWORDs of sampled data along with "];
a+=[" the RLE compressed signal events. This enhancement required new "];
a+=[" software which was written in .NET Powershell for Windows platform."];
a+=[" "];
a+=[" 7.2 SUMP2-RLE ( 2016 ) "];
a+=[" SUMP2 is a software and hardware complete redesign to improve upon "];
a+=[" the SUMP1-RLE concept. For SUMP2 the .NET software was tossed due "];
a+=[" to poor user interface performance and replaced with a PyGame based"];
a+=[" VCD waveform viewer ( chip_wave.py also from BML ). The SUMP2 HW "];
a+=[" is now a single Verilog file with no backwards compatibility with "];
a+=[" any legacy SUMP hardware or software systems. SUMP2 hardware is "];
a+=[" designed to capture 512bits of DWORDs and 32bits of events versus "];
a+=[" the SUMP1 limits of 16 event bits and 64bits of DWORDs. Sample "];
a+=[" depth for SUMP2 is now completely defined by a hardware instance "];
a+=[" with software that automatically adapts. The RLE aspect of SUMP2 "];
a+=[" is optional and not required for simple data intensive captures. "];
a+=[" SUMP2 software includes bd_shell support for changing variables "];
a+=[" on the fly and providing simple low level hardware access to regs. "];
a+=[" "];
a+=["8.0 BD_SERVER.py "];
a+=[" The SUMP2.py application does not communicate directly to hardware "];
a+=[" but instead uses BD_SERVER.py as an interface layer. BD_SERVER is "];
a+=[" a multi use server application that accepts requests via TCP to "];
a+=[" read and write to low level hardware and then translates those "];
a+=[" requests using one of many low level hardware protocols available. "];
a+=[" BD_SERVER allows the low level communications to easily change from"];
a+=[" interfaces like USB FTDI serial to PCI without requiring any change"];
a+=[" to the high level application. This interface also supports the "];
a+=[" debugging of an embedded system from a users regular desktop with "];
a+=[" a standard Ethernet or Wifi connection between the two. Typical use"];
a+=[" is to run both python applications on same machine and use the TCP "];
a+=[" localhost feature within the TCP stack for communications. "];
a+=[" "];
a+=[" ------------ -------------- --------------- "];
a+=[" | sump2.py |<------->| bd-server.py |<------->| SUMP Hardware | "];
a+=[" ------------ Ethernet -------------- USB,PCI --------------- "];
a+=[" "];
a+=["9.0 License "];
a+=[" This hardware and software is released under the GNU GPLv2 license. "];
a+=[" Full license is available at http://www.gnu.org "];
a+=[" "];
return a;
###############################################################################
def init_vars( self, file_ini ):
# Load App Variables with Defaults.
vars = {}; # Variable Dictionary
vars["font_name"] = "dejavusansmono";
vars["font_size"] = "12";
vars["file_in"] = "dut.vcd";
vars["file_log"] = "sump2_log.txt";
vars["color_screen_background"] = "000000";
vars["color_screen_foreground"] = "00FF00";
vars["screen_width"] = "800";
vars["screen_height"] = "600";
vars["cursor_unit"] = "clocks";
# vars["cursor_unit"] = "samples";
vars["cursor_mult"] = "1.0";
vars["bd_connection" ] = "tcp";
vars["bd_protocol" ] = "poke";
vars["bd_server_ip" ] = "localhost";
vars["bd_server_socket" ] = "21567";
vars["uut_name" ] = "UUT";
vars["sump_addr" ] = "00000090" ;# Addr of sump2_ctrl_reg
vars["sump_script_inc_filter"] = "*.txt";
vars["sump_script_exc_filter"] = "sump2_*.txt";
vars["sump_trigger_type" ] = "or_rising";
vars["sump_trigger_field" ] = "00000000";
vars["sump_trigger_delay" ] = "0000";
vars["sump_trigger_nth" ] = "0001";
vars["sump_acquisition_len" ] = "44";
vars["sump_rle_event_en" ] = "FFFFFFFF";
vars["sump_rle_pre_trig_len" ] = "00100000";
vars["sump_rle_post_trig_len"] = "00100000";
vars["sump_user_ctrl" ] = "00000000";
vars["sump_user_pattern0" ] = "00000000";
vars["sump_user_pattern1" ] = "00000000";
vars["sump_data_enable" ] = "00000000";
vars["sump_watchdog_time" ] = "00001000";
# vars["sump_rle_undersample"] = "10";
# return;
import os;
if os.path.exists( file_ini ):
file_in = open( file_ini, 'r' );
file_list = file_in.readlines();
file_in.close();
for each in file_list:
each = each.replace("="," = ");
words = each.strip().split() + [None] * 4; # Avoid IndexError
# foo = bar
if ( words[1] == "=" and words[0][0:1] != "#" ):
vars[ words[0] ] = words[2];
else:
print( "Warning: Unable to open " + file_ini);
return vars;
###############################################################################
# Dump all the app variables to ini file when application quits.
def var_dump( self, file_ini ):
log( self, ["var_dump()"] );
file_out = open( file_ini, 'w' );
file_out.write( "# [" + file_ini + "]\n" );
file_out.write( "# WARNING: \n");
file_out.write( "# This file is auto generated on application exit.\n" );
file_out.write( "# Safe to change values, but comments will be lost.\n" );
txt_list = [];
for key in self.vars:
val = self.vars[ key ];
txt_list.append( key + " = " + val + "\n" );
for each in sorted( txt_list ):
file_out.write( each );
file_out.close();
return;
def list2file( self, file_name, my_list ):
file_out = open( file_name, 'w' );
for each in my_list:
file_out.write( each + "\n" );
file_out.close();
return;
def tuplelist2file( self, file_name, my_list ):
file_out = open( file_name, 'w' );
for (dw1,dw2) in my_list:
file_out.write("%08x %08x" % ( dw1,dw2 ) + "\n" );
file_out.close();
return;
###############################################################################
# Command Line BD_SHELL
def bd_shell( self, cmd_start = "" ):
log( self, ["bd_shell()"] );
import pygame;
loop_jk = True;
import msvcrt;# Note: Windows specific
print("\nMode=BD_SHELL : Enter NULL command to return to GUI");
self.context = "cli";
pygame.display.quit();
self.gui_active = False;
print("");
sys.stdout.write( self.prompt );
sys.stdout.write( cmd_start );
sys.stdout.flush();
h_cnt = 1;# Command history count
key_buf = cmd_start;
while ( loop_jk == True ):
ch = msvcrt.getch();# Wait for single key press from DOS-Box
if ( ch != "\xe0" ):
ch = ch.decode();
else:
# K=Left,M=Right,H=Up,P=Down,G=Home,O=End
ch = msvcrt.getch();# The Special KeyCode
print( ch );
ch = "";
# print( ch );
# ch = (msvcrt.getch().decode());# Wait for single key press from DOS-Box
# print( ch );
# Handle Backspace Erase
if ( ch == chr( 8 ) ):
sys.stdout.write( str( ch ) );#
sys.stdout.write( str(" " ) );#
sys.stdout.write( str( ch ) );#
else:
sys.stdout.write( str( ch ) );# Echo typed character to DOS-Box STDOUT
sys.stdout.flush();
# If not <ENTER> key then append keypress to a key_buf string
if ( ch != chr( 13 ) ):
if ( ch == chr( 8 ) ):
if ( len(key_buf) > 0 ):
key_buf = key_buf[:-1];# Subtract last char on Backspace
else:
key_buf += str(ch);# Append new character
elif ( ch == chr( 13 ) ):
if ( len( key_buf ) == 0 or key_buf == "gui" ):
loop_jk = False;
else:
print( ("%d>"+key_buf+" " ) % h_cnt ); h_cnt +=1;
key_buf = key_buf.replace("="," = ");
words = " ".join(key_buf.split()).split(' ') + [None] * 8;
if ( words[1] == "=" ):
cmd = words[1];
parms = [words[0]]+words[2:];
else:
cmd = words[0];
parms = words[1:];
rts = proc_cmd( self, cmd, parms );
for each in rts:
print( each );
key_buf = "";
sys.stdout.write( self.prompt );# "bd>"
sys.stdout.flush();
# while ( loop_jk == True ):
self.context = "gui";
print("\nMode=GUI");
# NOTE: set_mode prevents resizing after return to GUI.
# pygame.display.set_mode();# Set focus back to GUI Window
display_init( self );
pygame.display.update();
flush_surface_cache( self );# Redraw with new values
draw_screen( self );
screen_flip( self );
sump_vars_to_signal_attribs( self );# Assume sump vars were modified
return;
###############################################################################
# Process Backdoor commands for Writing and Reading to any hardware
def proc_bd_cmd( self, cmd, parms ):
log( self, ["proc_bd_cmd() : " + cmd + " " + str( parms ) ] );
rts = [];
file_mode = None;
if ( ">" in parms ):
i = parms.index(">");
file_mode = "w";# Create new file, overwriting existing
if ( ">>" in parms ):
i = parms.index(">>");
file_mode = "a";# Append to any existing file
if ( file_mode != None ):
file_name = parms[i+1];
file_out = open( file_name, file_mode ); # a or w : Append or Overwite
parms = parms[0:i] + [None]*10;# Strip "> foo.txt" prior to processing
# if ( cmd == "w" or cmd == "r" or cmd == "bs" or cmd == "bc" ):
if ( cmd == "w" or cmd == "r" ):
addr = parms[0];
data = parms[1:];
# Address may be a variable, so look
if ( self.vars.get( addr ) != None ):
addr = self.vars[ addr ];
if ( cmd == "w" ):
data_hex = [];
for each in data:
if ( each != None ):
data_hex += [int(each,16)];
self.bd.wr( int(addr,16), data_hex );
if ( cmd == "r" ):
if ( data[0] == None ):
num_dwords = 1;
else:
num_dwords = int( data[0],16 );
rts = self.bd.rd( int(addr,16) , num_dwords, repeat = False );
# data_hex = [];
# for each in rts:
# data_hex += ["%08x" % each];
# rts = data_hex;
# Format 8 dwords wide per line
data_hex = "";
i = 0;
for each in rts:
data_hex += ("%08x " % each );
i += 1;
if ( i == 8 ):
i = 0;
data_hex += "\n";
rts = [ data_hex ];
if ( file_mode != None ):
for each in rts:
file_out.write( each + "\n" );
file_out.close();
rts = [];
return rts;
###############################################################################
# Generate a demo vcd file to display if the hardware isn't present
def make_demo_vcd( self ):
filename_vcd = "sump2_demo.vcd";
txt2vcd = TXT2VCD();# Instantiate Class for the VCD Conversion
# line-0 contains a list of all signal names and ends with clock period
# Iterate the list and replace each signal name with its nickname
new_line = "hsync vsync pixel_r pixel_g pixel_b 10000";
h = 0; v = 597; sample_lines = [ new_line ];
import random;
rnd_list = [0]*1000;
rnd_list += [1];
pixel_r = 0;
pixel_g = 0;
pixel_b = 0;
for i in range( 0, 10000, 1):
h+=1;
hsync = 0;
vsync = 0;
pixel_r = random.choice( rnd_list );
pixel_g = random.choice( rnd_list );
pixel_b = random.choice( rnd_list );
if ( h >= 800 ):
hsync = 1;
if ( h == 810 ):
hsync = 1; h = 0;
v += 1;
if ( v == 600 ):
v = 0;
if ( v == 599 ):
vsync = 1;
sample = "%d %d %d %d %d" % ( hsync,vsync, pixel_r,pixel_g,pixel_b );
sample_lines += [ sample ];
vcd_lines = sample_lines[:];
rts = txt2vcd.conv_txt2vcd( self, vcd_lines );
print("Saving " + filename_vcd );
file_out = open( filename_vcd, "w" ); # Append versus r or w
for each in rts:
file_out.write( each + "\n" );# Make Windows Friendly
file_out.close();
return filename_vcd;
###############################################################################
# Given a file_header ( like foo_ ), check for foo_0000, then foo_0001, etc
# and return 1st available name.
def make_unique_filename( self, file_header, file_ext ):
import os;
num = 0;
while ( True ):
file_name = file_header + ( "%04d" % num ) + file_ext;
if ( os.path.exists( file_name ) == False ):
return file_name;
else:
num +=1;
return None;
###############################################################################
# Read in a file and display it
def more_file( self, parms ):
log( self, ["more_file() " + str( parms ) ] );
file_name = parms[0];
rts = [];
try: # Read Input File
file_in = open( file_name , "r" );
file_lines = file_in.readlines();
file_in.close();
# rts = file_lines;
rts = list(map(str.strip, file_lines));# Chomp all the lines
except:
print( "ERROR Input File: "+file_name);
return;
return rts;
###############################################################################
# interpret a bd_shell script or wave file
# a wave file is easy to spot as 1st char on each line is a "/"
def source_file( self, parms ):
log( self, ["source_file() " + str( parms ) ] );
file_name = parms[0];
rts = [];
try: # Read Input File
file_in = open( file_name , "r" );
file_lines = file_in.readlines();
file_in.close();
except:
print( "ERROR Input File: "+file_name);
return;
is_wave = False;
for each in file_lines:
words = " ".join(each.split()).split(' ') + [None] * 20;
if ( words[0][0:1] == "/" ):
is_wave = True;
if ( is_wave == True ):
load_format( self, file_name );
self.name_surface_valid = False;
screen_refresh( self );
else:
for each in file_lines:
each = each.replace("="," = ");
words = " ".join(each.split()).split(' ') + [None] * 20;
if ( words[0][0:1] != "#" ):
if ( words[1] == "=" ):
cmd = words[1];
parms = [words[0]]+words[2:];
else:
cmd = words[0];
parms = words[1:];
rts += proc_cmd( self, cmd, parms );
return rts;
###############################################################################
# Process generic unknown commands ( GUI,Shell,Backdoor, SUMP )
def proc_cmd( self, cmd, parms ):
log( self, ["proc_cmd() " + cmd + " " + str( parms )] );
# print( cmd, parms );
rts = [];
if ( cmd == None ):
return rts;
cmd = cmd.lower();
# !! retrieves last command
if ( cmd == "!!" ):
cmd = self.last_cmd;
else:
self.last_cmd = cmd;
self.cmd_history.append([ cmd, parms ] );
if ( cmd[0:1] == "!" ):
try:
h_num = cmd[1:];
( cmd, parms ) = self.cmd_history[ int(h_num,10) ];
except:
print("Invalid Command History");
# print "proc_cmd()", cmd;
# Commands may have aliases, look them up here:
if ( self.cmd_alias_hash_dict.get( cmd ) != None ):
cmd = self.cmd_alias_hash_dict[ cmd ];
# Returned all assigned variables with their values
if ( cmd == "env" ):
for key in self.vars:
rts += [ key +"=" + self.vars[key] ];
return sorted(rts);
elif ( cmd == "=" ):
self.vars[parms[0]] = parms[1]; # Variable Assignment
elif ( cmd == "var_bs" ):
val = int( self.vars[parms[0]] , 16 );
val = val | int( parms[1], 16 );
self.vars[parms[0]] = ("%08x" % val );
elif ( cmd == "var_bc" ):
val = int( self.vars[parms[0]] , 16 );
val = val & ~int( parms[1], 16 );
self.vars[parms[0]] = ("%08x" % val );
elif ( cmd == "echo" or cmd == "print" ):
try:
rts = [ self.vars[ parms[0] ] ];
except:
rts = [ parms[0] ];
elif ( cmd == "h" or cmd == "history" ):
rts = self.cmd_history;
# rts for ( i , sig_obj ) in enumerate( self.signal_list ):
elif ( cmd == "source" ):
rts = source_file( self, parms );
elif ( cmd == "more" ):
rts = more_file( self, parms );
elif ( cmd == "help" or cmd == "?" ):
rts = self.help;# I'm a funny guy
elif ( cmd == "manual" ):
try:
import os;
filename = "sump2_manual.txt";
if ( self.os_sys == "Linux" ):
os.system('vi ' + filename );
else:
os.system('notepad.exe ' + filename );
except:
rts += ["ERROR: "+cmd+" "+filename ];
elif ( cmd == "bd_shell" ):
bd_shell(self, cmd_start ="" );
elif ( cmd == "quit" or cmd == "exit" ):
self.done=True;
shutdown( self );
elif ( cmd == "sump_connect" ):
sump_connect(self);
elif ( "[" in cmd and
"-" in cmd and
"t" in cmd and
"]" in cmd ):
words = cmd.split("t");
pre_trig = words[0].count("-");
post_trig = words[1].count("-");
acq_len = ( pre_trig << 4 ) + ( post_trig << 0 );
self.vars["sump_acquisition_len"] = ( "%02x" % acq_len );
print( "sump_acquisition_len = " + ( "%02x" % acq_len ));
elif ( cmd == "sump_arm" or
cmd == "sump_arm_rle" or
cmd == "sump_stop" or
cmd == "acquire_single" or
cmd == "acquire_normal" or
cmd == "acquire_continuous" or
"acquire_rle" in cmd or
cmd == "acquire_stop" ):
if ( cmd == "sump_arm" or
cmd == "sump_arm_rle" or
cmd == "acquire_single" or
cmd == "acquire_normal" or
cmd == "acquire_continuous" or
"acquire_rle" in cmd ):
sump_arm(self, True );# Arm the hardware
if ( "acquire_rle" in cmd ):
self.acq_mode = "rle";
else:
self.acq_mode = "nonrle";
else:
sump_arm(self, False);# Cancel an acq in progress
if ( cmd == "acquire_normal" ):
cmd = "acquire_single";
self.acq_state = cmd;
# if sump_arm has a parm then this is CLI and is a seconds timeout
if ( ( cmd=="sump_arm" or cmd=="sump_arm_rle" ) and parms[0] != None ):
timeout = int( parms[0], 16 );
# Loop until timeout or acquired bit is set
while ( timeout > 0 and
( self.sump.rd( addr = None )[0] &
self.sump.status_ram_post ) == 0x00 ):
print("Waiting for trigger..");
sleep( 1 );
timeout = timeout - 1;
if ( timeout > 0 ):
print("ACQUIRED.");
if ( self.acq_mode == "nonrle" ):
trig_i = sump_dump_data(self);# Grab data from hardware
else:
trig_i = sump_dump_rle_data(self);# Grab data from hardware
# Group of OS commands pwd,mkdir,cd,ls,cp,vi
elif ( cmd == "pwd" ):
import os;
rts += [ os.getcwd() ];
elif ( cmd == "mkdir" ):
import os;
try:
os.path.mkdir();
except:
rts += ["ERROR: "+cmd+" "+parms[0] ];
elif ( cmd == "cd" ):
import os;
try:
os.chdir( parms[0] );
except:
rts += ["ERROR: "+cmd+" "+parms[0] ];
elif ( cmd == "ls" ):
import os;
rts += os.listdir( os.getcwd() );
# rts += os.listdir( "*.ini" );
elif ( cmd == "vi" ):
try:
if ( self.os_sys == "Linux" ):
os.system('vi ' + parms[0] );
else:
os.system('notepad.exe ' + parms[0] );
except:
rts += ["ERROR: "+cmd+" "+parms[0] ];
elif ( cmd == "cp" ):
from shutil import copyfile;
try:
copyfile( parms[0], parms[1] );
except:
rts += ["ERROR: "+cmd+" "+parms[0]+" "+parms[1] ];
# elif ( cmd == "sump_dump" ):
# sump_dump_data(self);
# sump_save_txt(self);
# sump_save_txt(self, mode_vcd = True );
# sump_save_vcd( self );
# txt2vcd = TXT2VCD();
# file_in = open( "sump_dump.txt4vcd", "r" );
# file_lines = file_in.readlines();
# file_in.close();
# rts = txt2vcd.conv_txt2vcd( file_lines );
# filename = make_unique_filename( self, "sump_", ".vcd" );
# file_out = open( filename, "w" ); # Append versus r or w
# for each in rts:
# file_out.write( each + "\r\n" );# Make Windows Friendly
# file_out.close();
# file_name = "sump_dump.txt4vcd";
# else:
# file_name = "sump_dump.txt";
elif ( cmd == "save_txt" ):
filename = make_unique_filename( self, "sump2_", ".txt" );
sump_save_txt( self, filename );
elif ( cmd == "save_rename" ):
val1 = self.last_filesave;
val2 = val1;
if ( val1 != None ):
rts = draw_popup_entry(self, ["Save_Rename()", val1],val2);
import os;
try:
os.rename( val1, rts );
draw_header( self,"Save_Rename() : " + val1 + " " + rts );
except:
draw_header( self,"ERROR: Save_Rename() : " + val1 + " " + rts );
# elif ( cmd == "save_vcd" ):
elif ( cmd == "save_vcd" and self.acq_mode == "nonrle" ):
print("save_vcd()");
screen_flip( self );# Only thing changing is the popup selection
# sump_dump_data(self);# Grab data from hardware ( might be in CLI Mode )
filename_txt = make_unique_filename( self, "sump2_", ".txt" );
filename_vcd = make_unique_filename( self, "sump2_", ".vcd" );
draw_popup_msg(self,
["NOTE:","Saving capture to VCD file "+filename_vcd],1);
sump_save_txt(self, filename_txt, mode_vcd = True );
txt2vcd = TXT2VCD();# Instantiate Class for the VCD Conversion
file_in = open( filename_txt, "r" );
file_lines = file_in.readlines();
file_in.close();
# line-0 contains a list of all signal names and ends with clock period
# Iterate the list and replace each signal name with its nickname
words = " ".join(file_lines[0].split()).split(' ');
new_line = "";
for each in words:
nickname = each;# Handles both unknowns and clock period
for sig_obj in self.signal_list:
if ( each == sig_obj.name ):
nickname = sig_obj.nickname;
if ( nickname == "" ):
nickname = each;
new_line += nickname + " ";
vcd_lines = [new_line] + file_lines[1:];
print("conv_txt2vcd()");
rts = txt2vcd.conv_txt2vcd( self, vcd_lines );
# rts = txt2vcd.conv_txt2vcd( vcd_lines );
print("Saving " + filename_vcd );
draw_header( self,"save_vcd() : Saving " + filename_vcd );
file_out = open( filename_vcd, "w" ); # Append versus r or w
for each in rts:
file_out.write( each + "\n" );# Make Windows Friendly
file_out.close();
draw_header( self,"save_vcd() : Saved " + filename_vcd );
self.last_filesave = filename_vcd;
rts = ["save_vcd() Complete " + filename_vcd ];
elif ( cmd == "save_vcd" and self.acq_mode == "rle" ):
print("save_rle_vcd()");
screen_flip( self );# Only thing changing is the popup selection
# if ( self.mode_cli == True ):
# sump_dump_rle_data(self);# Grab data from hardware
filename_txt = make_unique_filename( self, "sump2_rle_", ".txt" );
filename_vcd = make_unique_filename( self, "sump2_rle_", ".vcd" );
draw_popup_msg(self,
["NOTE:","Saving capture to VCD file "+filename_vcd],1);
sump_save_txt(self, filename_txt, mode_vcd = True );
txt2vcd = TXT2VCD();# Instantiate Class for the VCD Conversion
file_in = open( filename_txt, "r" );
file_lines = file_in.readlines();
file_in.close();
# line-0 contains a list of all signal names and ends with clock period
# Iterate the list and replace each signal name with its nickname
words = " ".join(file_lines[0].split()).split(' ');
new_line = "";
for each in words:
nickname = each;# Handles both unknowns and clock period
for sig_obj in self.signal_list:
if ( each == sig_obj.name ):
nickname = sig_obj.nickname;
new_line += nickname + " ";
vcd_lines = [new_line] + file_lines[1:];
print("conv_txt2vcd()");
rts = txt2vcd.conv_txt2vcd( self, vcd_lines );
# rts = txt2vcd.conv_txt2vcd( vcd_lines );
print("Saving " + filename_vcd );
draw_header( self,"save_rle_vcd() : Saving " + filename_vcd );
file_out = open( filename_vcd, "w" ); # Append versus r or w
for each in rts:
file_out.write( each + "\n" );# Make Windows Friendly
file_out.close();
draw_header( self,"save_rle_vcd() : Saved " + filename_vcd );
self.last_filesave = filename_vcd;
rts = ["save_rle_vcd() Complete " + filename_vcd ];
elif ( cmd == "sump_status" ):
rts_hex = ( self.sump.rd( addr = None )[0] );
rts += [ "%08x" % rts_hex ];
# elif ( cmd == "w" or cmd == "r" or cmd == "bs" or cmd == "bc" ):
elif ( cmd == "w" or cmd == "r" ):
rts = proc_bd_cmd(self, cmd, parms );
elif ( cmd == "sleep" or cmd == "sleep_ms" ):
duration = float(int( parms[0], 16 ));
if ( cmd == "sleep_ms" ):
duration = duration / 1000.0;
sleep( duration );
# elif ( cmd == "debug_vars" ):
# debug_vars( self );
# elif ( cmd == "scroll_toggle" ):
# self.scroll_togl *= -1;
# if ( self.scroll_togl == 1 ):
# print( "Scroll Wheel is Pan");
# else:
# print( "Scroll Wheel is Zoom");
elif ( cmd == "reload" ):
proc_cmd( self, "save_format", ["wave_autosave.do"] );
self.signal_list = [];
file2signal_list( self, self.file_name );
flush_surface_cache( self );
proc_cmd( self, "load_format", ["wave_autosave.do"] );
elif ( cmd == "load" ):
self.file_name = parms[0];
proc_cmd( self, "reload", [""] );
proc_cmd( self, "load_format", [""] );
elif ( cmd == "save_jpg" ):
screen_erase( self );
draw_screen( self );
screen_flip( self );
filename = make_unique_filename( self, "sump2_", ".jpg" );
self.pygame.image.save( self.screen, filename );
draw_header( self,"save_jpg() : Saved " + filename );
self.last_filesave = filename;
elif ( cmd == "save_bmp" ):
screen_erase( self );
draw_screen( self );
screen_flip( self );
filename = make_unique_filename( self, "sump2_", ".bmp" );
self.pygame.image.save( self.screen, filename );
draw_header( self,"save_bmp() : Saved " + filename );
self.last_filesave = filename;
elif ( cmd == "save_png" ):
screen_erase( self );
draw_screen( self );
screen_flip( self );
filename = make_unique_filename( self, "sump2_", ".png" );
self.pygame.image.save( self.screen, filename );
draw_header( self,"save_png() : Saved " + filename );
self.last_filesave = filename;
elif ( cmd == "font_larger" or cmd == "font_smaller" ):
size = int( self.vars["font_size"] );
if ( cmd == "font_larger" ):
size += 2;
else:
size -= 2;
if ( size < 2 ):
size = 2;
self.vars["font_size"] = str( size );
self.font = get_font( self, self.vars["font_name"],self.vars["font_size"]);
self.max_w = 0;
self.max_w_chars = 0;
flush_surface_cache( self );
elif ( cmd == "add_wave" ):
sig_obj = add_wave( self, [ cmd ] + parms );
if ( sig_obj != None ):
self.signal_list.append( sig_obj );
flush_surface_cache( self );
elif ( cmd == "save_format" ):
file_name = parms[0];
if ( file_name == "" ):
# file_name = "wave_" + self.top_module + ".txt";# Default
file_name = "sump2_wave.txt";
save_format( self, file_name, False );
elif ( cmd == "save_selected" ):
file_name = parms[0];
if ( file_name == "" ):
file_name = "wave_" + self.top_module + ".txt";# Default
save_format( self, file_name, True );
load_format( self, file_name );
flush_surface_cache( self );
elif ( cmd == "load_format" ):
file_name = parms[0];
if ( file_name == "" ):
file_name = "wave_" + self.top_module + ".txt";# Default
load_format( self, file_name );
flush_surface_cache( self );
# Check for "SUMP_Configuration" menu items and launch entry popup
elif ( cmd == "sump_trigger_delay" or
cmd == "sump_trigger_nth" or
cmd == "sump_user_ctrl" or
cmd == "sump_user_pattern0" or
cmd == "sump_user_pattern1" or
cmd == "sump_watchdog_time"
):
name = cmd;
val1 = self.vars[ name ];# Original Value
val2 = val1; # New Value to change
rts = draw_popup_entry(self, [cmd, val1],val2);
self.vars[ name ] = rts;
elif ( cmd == "edit_format" ):
import os, subprocess, platform;
file_name = parms[0];
if ( file_name == "" ):
file_name = "wave_" + self.top_module + ".txt";# Default
editor = os.getenv('EDITOR', 'vi')
if ( platform.system() == "Windows" ):
editor = "notepad.exe";
subprocess.call('%s %s' % (editor, file_name), shell=True)
if ( platform.system() == "Windows" ):
self.pygame.event.clear();# Required for Windows
load_format( self, file_name );
flush_surface_cache( self );
elif ( cmd == "delete_format" ):
file_name = parms[0];
if ( file_name == "" ):
file_name = "wave_" + self.top_module + ".txt";# Default
import os;
print( "delete_format() ", file_name);
os.remove( file_name );
self.signal_list = [];
file2signal_list( self, self.file_name );
flush_surface_cache( self );
elif ( cmd == "search" or cmd == "backsearch" ):
if ( cmd == "search" ):
direction = +1;
else:
direction = -1;
# "/" : Search on last search value
# Optionally support "/ foo = bar" and convert to "/ foo bar"
if ( parms[1] == "=" ):
parms[1] = parms[2];
if ( parms[0] == None ):
value = self.last_search_value;
# "/ foo = bar" : Search for foo = bar
elif ( parms[1] != None ):
for each in self.signal_list:
if ( each.name.lower() == parms[0].lower() ):
self.sig_obj_sel = each;
for sig_obj in self.signal_list:
sig_obj.selected = False;# DeSelect All
self.sig_obj_sel.selected = True;
value = parms[1].lower();
break;
# "/ bar" : Search for self.sig_obj_sel = bar
else:
value = parms[0].lower();
self.last_search_value = value; # Support "/<enter>" to search again
self.sample_start = search_values( self, self.sig_obj_sel,
self.sample_start, value, direction );
elif ( cmd == "zoom_out" ):
# if ( self.zoom_x > 0.00001 ):
# print( self.popup_x );
# if ( self.popup_x != None ):
# sample = self.sample_start - sample_room // 4;
if ( True ):
self.prev_sample_start = self.sample_start;
self.prev_sample_stop = self.sample_start + self.sample_room;
sample_room = self.sample_room * 2;
sample = self.sample_start - sample_room // 4;
if ( ( sample + sample_room ) < self.max_samples ):
if ( sample < 0 ): sample = 0;
self.sample_start = sample;
set_zoom_x( self, self.zoom_x / 2.0 );
else:
proc_cmd( self, "zoom_full", [] );
elif ( cmd == "zoom_in" ):
self.prev_sample_start = self.sample_start;
self.prev_sample_stop = self.sample_start + self.sample_room;
# If called from popup, center the zoom on mouse position of popup
# print( self.popup_x );
if ( self.popup_x == None ):
# (sample, Null) = get_sample_at_mouse( self, self.popup_x, self.popup_y );
# sample_room = self.sample_room // 2; # zoom_in results in 1/2 sample_room
# sample = sample - sample_room // 2; # Center on select by sub 1/2
# if ( sample < 0 ):
# sample = 0;
# self.sample_start = sample;
self.sample_start += ( self.sample_room // 4 );
else:
(sample, Null) = get_sample_at_mouse( self, self.popup_x, self.popup_y );
delta = sample - self.sample_start;
delta = delta // 2;
self.sample_start = sample - delta;
if ( self.sample_start < 0 ):
self.sample_start = 0;
# sample = sample - sample_room // 2; # Center on select by sub 1/2
# if ( sample < 0 ):
# sample = 0;
# self.sample_start = sample;
set_zoom_x( self, self.zoom_x * 2.0 );
elif ( cmd == "zoom_previous" ):
if ( self.prev_sample_start != None and
self.prev_sample_stop != None ):
proc_cmd( self, "zoom_to", [str(self.prev_sample_start),
str(self.prev_sample_stop ) ] );
elif ( cmd == "zoom_to_cursors" ):
self.prev_sample_start = self.sample_start;
self.prev_sample_stop = self.sample_start + self.sample_room;
sample_left = None;
sample_right = None;
for cur_obj in self.cursor_list:
if ( sample_left == None ): sample_left = cur_obj.sample;
elif ( sample_right == None ): sample_right = cur_obj.sample;
if ( sample_left != None and sample_right != None ):
if ( sample_left > sample_right ):
sample_left, sample_right = sample_right, sample_left;# Swap
# Now fudge a bit as we want to actually see the cursors after to zoom
delta = sample_right - sample_left;
# If delta is large, use a small percentage, otherwise use a bunch of
# samples. Example is after triggering, cursors are at +/-1 from trigger
if ( delta > 20 ):
sample_left -= delta // 32;
sample_right += delta // 32;
else:
sample_left -= 4*delta;
sample_right += 4*delta;
if ( sample_left < 0 ): sample_left = 0;
if ( sample_right > self.max_samples ): sample_right = self.max_samples;
proc_cmd( self, "zoom_to", [str(sample_left), str( sample_right ) ] );
elif ( cmd == "crop_to_cursors" ):
sample_left = None;
sample_right = None;
for cur_obj in self.cursor_list:
if ( sample_left == None ): sample_left = cur_obj.sample;
elif ( sample_right == None ): sample_right = cur_obj.sample;
if ( sample_left != None and sample_right != None ):
if ( sample_left > sample_right ):
sample_left, sample_right = sample_right, sample_left;# Swap
if ( sample_left < 0 ): sample_left = 0;
if ( sample_right > self.max_samples ): sample_right = self.max_samples;
proc_cmd( self, "crop_to", [str(sample_left), str( sample_right ) ] );
elif ( cmd == "zoom_full" ):
proc_cmd( self, "zoom_to", ["0", str( self.max_samples ) ] );
elif ( cmd == "crop_to" ):
# If a sample range is specified, zoom to it
if ( parms[0] != None and parms[1] != None ):
if ( int( parms[0] ) < int( parms[1] ) ):
crop_to_left = int( parms[0] );
crop_to_right = int( parms[1] );
else:
crop_to_left = int( parms[1] );
crop_to_right = int( parms[0] );
for sig_obj in self.signal_list:
# print( sig_obj.name );
# print( len( sig_obj.values ));
if ( len( sig_obj.values ) >= crop_to_right ):
sig_obj.values = sig_obj.values[crop_to_left:crop_to_right];
recalc_max_samples( self );
proc_cmd( self, "zoom_full", [] );
elif ( cmd == "zoom_to" ):
# If a sample range is specified, zoom to it
if ( parms[0] != None and parms[1] != None ):
if ( int( parms[0] ) < int( parms[1] ) ):
self.zoom_to_left = int( parms[0] );
self.zoom_to_right = int( parms[1] );
else:
self.zoom_to_left = int( parms[1] );
self.zoom_to_right = int( parms[0] );
# Otherwise, zoom in so that current selectec signal is visible
else:
sig_obj = self.sig_obj_sel;
if ( sig_obj.bits_total > 1 ):
# nibs = sig_obj.bits_total / 4;
# nibs = nibs / 2;
nibs = sig_obj.bits_total // 4;
nibs = nibs // 2;
if ( nibs < 2 ):
nibs = 2;
nibs += 1; # Extra whitespace
zoom_x = self.txt_width * nibs;
value_width_x = self.sig_value_stop_x - self.sig_value_start_x;
# value_width_samples = value_width_x / zoom_x;
value_width_samples = int( value_width_x / zoom_x );
self.zoom_to_left = self.sample_start;
self.zoom_to_right = self.sample_start + value_width_samples;
self.sample_start = int( self.zoom_to_left );
# Given the zoom_to region, calculate new zoom_x, it is pixels/samples
# fudge_more_right = 3; # Need to grab more samples then calculated, strang
fudge_more_right = 0; # Need to grab more samples then calculated, strang
# set_zoom_x( self, ( self.sig_value_stop_x - self.sig_value_start_x ) / \
# ( fudge_more_right+self.zoom_to_right - self.zoom_to_left ) );
# Check for divide by zero and set new zoom if safe to, else ignore
if ( ( self.zoom_to_right - self.zoom_to_left ) != 0 ):
set_zoom_x( self,
( 1.0*(self.sig_value_stop_x - self.sig_value_start_x )) / \
( 1.0*( self.zoom_to_right - self.zoom_to_left )) );
else:
print("ERROR: Div-by-zero attempt on set_zoom_x()");
elif ( cmd == "scroll_right" or cmd == "scroll_left" ):
# print "cmd", cmd;
if ( cmd == "scroll_right" ):
direction = 0+int( parms[0] );
else:
direction = 0-int( parms[0] );
self.sample_start += int( direction );
# Prevent scrolling too far to right
if ( self.sample_start + self.sample_room > self.max_samples ):
self.sample_start = int( self.max_samples - self.sample_room );
if ( self.sample_start < 0 ):
self.sample_start = 0;
elif ( cmd == "scroll_up" or cmd == "scroll_down" ):
# Scroll thru the selected signal names. When at the top or bottom of
# the visible window, scroll the window.
self.name_surface_valid = False;
# self.curval_surface_valid = False;
index = 1;# Default if none found
if ( self.sig_obj_sel != None ):
if ( self.sig_obj_sel.selected == True ):
index = self.signal_list.index( self.sig_obj_sel );
self.sig_obj_sel.selected = False; # Deselect last scroll selected
if ( cmd == "scroll_up" ):
direction = 0-int( parms[0] );
else:
direction = 0+int( parms[0] );
# Keep moving in the desired direction until we get a visible signal
obj_is_visible = False;
while ( obj_is_visible == False ):
# Make sure new index is valid
index = index + direction;
if ( index < 0 ):
index = 0;
break;
if ( index >= len( self.signal_list ) ):
index = len( self.signal_list ) -1;
break;
obj_is_visible = self.signal_list[ index ].visible;
# Scroll the signal name viewport if newly selected is outside existing
self.vertical_scrolled_offscreen = False;
if ( index < self.sig_top ):
self.sig_top -= 1;
self.vertical_scrolled_offscreen = True;
flush_surface_cache( self );
if ( index > self.sig_bot ):
self.sig_top += 1;
self.vertical_scrolled_offscreen = True;
flush_surface_cache( self );
# Assign selected signal object to sig_obj_sel
sig_obj = self.signal_list[ index ];
sig_obj.selected = True;
self.sig_obj_sel = sig_obj;
# Rename a signal - popup bd_shell for text entry
# TODO: Would be nicer to have a GUI entry window for single bd_shell cmds
elif ( cmd == "rename" ):
# cmd_start = "rename_signal " + self.sig_obj_sel.name + " ";
# bd_shell(self, cmd_start );
cmd = "Rename_Signal";
val1 = self.sig_obj_sel.name;
val2 = self.sig_obj_sel.nickname;
rts = draw_popup_entry(self, [cmd, val1],val2);
self.sig_obj_sel.nickname = rts;
self.name_surface_valid = False;
# flush_surface_cache( self );# Redraw with new values
# proc_cmd( self, cmd, parms ):
elif ( cmd == "rename_signal" ):
if ( parms[1] != "" ):
for sig_obj in self.signal_list:
if ( sig_obj.name == parms[0] ):
sig_obj.nickname = parms[1];
# self.txt_entry = True; # Enable Dialog Box to show up
#
# # Rename a signal
# elif ( cmd == "rename" ):
# self.txt_entry = True; # Enable Dialog Box to show up
# # Rename a signal ( Process the Text Entry )
# elif ( cmd == "rename_signal" ):
# if ( self.sig_obj_sel.bits_total == 0 ):
# self.sig_obj_sel.name = parms[0]; # A Divider
# else:
# self.sig_obj_sel.nickname = parms[0]; # A Signal
# flush_surface_cache( self );
# Delete selected signal(s) ( Make Invisible )
elif ( cmd == "delete" ):
flush_surface_cache( self );
for sig_obj in self.signal_list:
if ( sig_obj.selected == True ):
index = self.signal_list.index( sig_obj );
self.signal_list[ index ].visible = False;
# del self.signal_list[ index ];
self.sig_obj_sel = None;
# print "deleting ", self.signal_list[ index ].name, str( index );
# Delete selected signal(s)
elif ( cmd == "cut" ):
flush_surface_cache( self );
self.clipboard = [];
for sig_obj in self.signal_list:
if ( sig_obj.selected == True ):
index = self.signal_list.index( sig_obj );
self.clipboard.append( self.signal_list.pop( index ) );
elif ( cmd == "paste" ):
flush_surface_cache( self );
for sig_obj in self.signal_list:
if ( sig_obj.selected == True ):
index = self.signal_list.index( sig_obj );
for each in reversed( self.clipboard ):
self.signal_list.insert( index, each );
break;
# Make all signals visible - only way to undo a Make_Invisible
elif ( cmd == "make_all_visible" ):
flush_surface_cache( self );
for sig_obj in self.signal_list:
sig_obj.visible = True;
sump_signals_to_vars( self );# Update sump variables
# Hide a signal at this mouse location
elif ( cmd == "make_invisible" ):
flush_surface_cache( self );
for sig_obj in self.signal_list:
if ( sig_obj.selected == True ):
sig_obj.visible = False;
sump_signals_to_vars( self );# Update sump variables
# Hide a selected signal. Note that hidden and invisible are different
# hidden means display the signal name, but hide the signal values.
# invisible means don't display at all ( kinda like delete ).
elif ( cmd == "hide" or cmd == "hide_all" ):
flush_surface_cache( self );
for sig_obj in self.signal_list:
if ( sig_obj.selected == True or cmd == "hide_all" ):
sig_obj.hidden = True;
sump_signals_to_vars( self );# Update sump variables
screen_refresh( self );
# Show a selected signal
elif ( cmd == "show" or cmd == "show_all" ):
flush_surface_cache( self );
for sig_obj in self.signal_list:
if ( sig_obj.selected == True or cmd == "show_all" ):
sig_obj.hidden = False;
sig_obj.visible = True;
sump_signals_to_vars( self );# Update sump variables
screen_refresh( self );
# When "Trigger_Rising" or "Trigger_Falling" is selected, set the bit
# in the sump variable and then update the signals to match.
# HERE4
elif ( cmd == "trigger_rising" or cmd == "trigger_falling" or
cmd == "trigger_watchdog" ):
print("Setting new trigger");
# Find which signal is selected
for sig_obj in self.signal_list:
sig_obj.trigger = 0;
if ( sig_obj.selected == True ):
for i in range( 0, 32 , 1):
if ( sig_obj.name == ( "event[%d]" % i ) ):
if ( cmd == "trigger_rising" ):
self.vars["sump_trigger_type"] = "or_rising";
if ( cmd == "trigger_falling" ):
self.vars["sump_trigger_type"] = "or_falling";
if ( cmd == "trigger_watchdog" ):
self.vars["sump_trigger_type"] = "watchdog";
self.vars["sump_trigger_field" ] = ("%08x" % (1<<i) );
sump_vars_to_signal_attribs( self );
# flush_surface_cache( self );
self.name_surface_valid = False;
screen_refresh( self );
# HERE5
elif ( cmd == "set_pattern_0" or cmd == "set_pattern_1" or \
cmd == "clear_pattern_match" ):
# Find which signal is selected
user_pattern0 = int( self.vars["sump_user_pattern0" ],16 );# Mask
user_pattern1 = int( self.vars["sump_user_pattern1" ],16 );# Value
for sig_obj in self.signal_list:
if ( sig_obj.selected == True ):
for i in range( 0, 32 , 1):
if ( sig_obj.name == ( "event[%d]" % i ) ):
if ( cmd == "clear_pattern_match" ):
user_pattern0 = user_pattern0 & ~( 1<<i );# Clear bit
user_pattern1 = user_pattern1 & ~( 1<<i );# Clear bit
else:
user_pattern0 = user_pattern0 | ( 1<<i );# Set bit
if ( cmd == "set_pattern_0" ):
user_pattern1 = user_pattern1 & ~( 1<<i );# Clear bit
self.vars["sump_trigger_type"] = "pattern_rising";
if ( cmd == "set_pattern_1" ):
user_pattern1 = user_pattern1 | ( 1<<i );# Set bit
self.vars["sump_trigger_type"] = "pattern_rising";
self.vars["sump_user_pattern0" ] = ("%08x" % user_pattern0 );
self.vars["sump_user_pattern1" ] = ("%08x" % user_pattern1 );
sump_vars_to_signal_attribs( self );
flush_surface_cache( self );
elif ( cmd == "set_data_enable" or cmd == "clear_data_enable" ):
data_en = int( self.vars["sump_data_enable" ],16 );
for sig_obj in self.signal_list:
if ( sig_obj.selected == True ):
for i in range( 0, 32 , 1):
if ( sig_obj.name == ( "event[%d]" % i ) ):
if ( cmd == "set_data_enable" ):
data_en = data_en | ( 1<<i );# Set bit
elif ( cmd == "clear_data_enable" ):
data_en = data_en & ~( 1<<i );# Clear bit
self.vars["sump_data_enable" ] = ("%08x" % data_en );
sump_vars_to_signal_attribs( self );
flush_surface_cache( self );
# sig_obj = get_sig_obj_by_name( self, ("event[%d]" % i ) );
# flush_surface_cache( self );
# for sig_obj in self.signal_list:
# sig_obj.trigger = 0;
# if ( sig_obj.selected == True ):
# if ( cmd == "trigger_rising" ):
# sig_obj.trigger = +1;
# elif ( cmd == "trigger_falling" ):
# sig_obj.trigger = -1;
# Make a signal Signed, Unsigned or Hex format
elif ( cmd == "signed" or cmd == "unsigned" or cmd == "hex" ):
flush_surface_cache( self );
for sig_obj in self.signal_list:
if ( sig_obj.selected == True ):
sig_obj.format = cmd.lower();# unsigned, signed or hex
# Insert a Divider at this mouse location
elif ( cmd == "insert_divider" ):
flush_surface_cache( self );
(Null,index) = get_sample_at_mouse( self, self.popup_x, self.popup_y );
try:
sig_obj = self.signal_list[ index ];
new_div = signal( name="--------" );
new_div.bits_per_line = 0;
new_div.bits_total = 0;
new_div.bit_top = 0;
new_div.bit_bot = 0;
new_div.format = "";
self.signal_list.insert( index, new_div );
except:
print("ERROR 5519:index = " + str( index ) );
# Expand : Iterate list and make visible signals under current hier level
elif ( cmd == "expand" ):
flush_surface_cache( self );
if ( self.sig_obj_sel.collapsable == True ):
proc_cmd(self, "collapse",[""] );
return;
found_jk = False;
hier_level = -1; # Keeps track of group nesting
for ( i , sig_obj ) in enumerate( self.signal_list ):
if ( found_jk == True ):
if ( sig_obj.hier_level <= hier_level ):
found_jk = False;# Found the endgroup so done
break;
if ( sig_obj.type != "endgroup" ):
sig_obj.visible = True;# Make all signals visible after expand
if ( sig_obj.collapsable == True or \
sig_obj.expandable == True ):
sig_obj.collapsable = True;
sig_obj.expandable = False;
if ( sig_obj == self.sig_obj_sel ):
found_jk = True; # Found our specified divider
sig_obj.collapsable = True;
sig_obj.expandable = False;
hier_level = sig_obj.hier_level;
# Collapse : Iterate list and hide signals under current hier level
elif ( cmd == "collapse" ):
flush_surface_cache( self );
found_jk = False;
hier_level = -1; # Keeps track of group nesting
for ( i , sig_obj ) in enumerate( self.signal_list ):
if ( found_jk == True ):
if ( sig_obj.hier_level <= hier_level ):
found_jk = False;# Found the endgroup so done
break;
if ( sig_obj.type != "endgroup" ):
sig_obj.visible = False;# Make all signals invisible after expand
if ( sig_obj.collapsable == True or \
sig_obj.expandable == True ):
sig_obj.collapsable = False;
sig_obj.expandable = True;
if ( sig_obj == self.sig_obj_sel ):
found_jk = True; # Found our specified divider
sig_obj.collapsable = False;
sig_obj.expandable = True;
hier_level = sig_obj.hier_level;
# Group a bunch of selected signals together.
elif ( cmd == "group_with_divider" or cmd == "group_with_parent" ):
flush_surface_cache( self );
start = None;
stop = None;
hier_name = "";
hier_level = 0;
top_list = [];
mid_list = [];
bot_list = [];
for ( i , sig_obj ) in enumerate( self.signal_list ):
if ( sig_obj.selected == True ):
if ( start != None or cmd == "group_with_divider" ):
sig_obj.visible = False;
sig_obj.grouped = True;
if ( start == None ):
start = i;
hier_name = sig_obj.hier_name;# Divider inherits hier of 1st signal
hier_level = sig_obj.hier_level;# Divider inherits hier of 1st signal
# Make a group divider and insert above 1st signal
if ( cmd == "group_with_divider" ):
new_div = signal( name="Group" );
new_div.type = "group";
new_div.hier_name = hier_name;
new_div.hier_level = hier_level;
new_div.bits_per_line = 0;
new_div.bits_total = 0;
new_div.bit_top = 0;
new_div.bit_bot = 0;
new_div.format = "";
new_div.collapsable = False;
new_div.expandable = True;
mid_list.append( new_div );
sig_obj.hier_level = hier_level + 1;# Not a parent, so change level
else:
sig_obj.hier_level = hier_level; # Parent Keeps Original Level
sig_obj.collapsable = False;
sig_obj.expandable = True;
else:
stop = i;
sig_obj.hier_level = hier_level + 1;
mid_list.append( sig_obj );
else:
if ( start == None ):
top_list.append( sig_obj );
else:
bot_list.append( sig_obj );
self.signal_list = top_list + mid_list + bot_list;
# if ( cmd == "group_with_divider" ):
# # Make a group divider and insert above 1st signal
# new_div = signal( name="Group" );
# new_div.type = "group";
# new_div.hier_name = hier_name;
# new_div.hier_level = hier_level;
# new_div.bits_per_line = 0;
# new_div.bits_total = 0;
# new_div.bit_top = 0;
# new_div.bit_bot = 0;
# new_div.format = "";
# self.signal_list.insert( start, new_div );
# else:
# self.signal_list[start].type = "group";# Change from Signal to Group
# self.signal_list[start].collapsable = False;
# self.signal_list[start].expandable = True;
# # TODO : Remove this as no longer necessary
# # Now make a divider that marks the end of the group, but invisible
# new_div = signal( name="^^-EndGroup-^^" );
# new_div.type = "endgroup";
# new_div.hier_name = hier_name;
# new_div.hier_level = hier_level+1;
# new_div.bits_per_line = 0;
# new_div.bits_total = 0;
# new_div.bit_top = 0;
# new_div.bit_bot = 0;
# new_div.format = "";
# new_div.visible = False;
# if ( cmd == "group_with_divider" ):
# self.signal_list.insert( stop+2, new_div );
# else:
# self.signal_list.insert( stop+1, new_div );
# Bring both cursors into view
elif ( cmd == "cursors_to_view" ):
( sample, Null ) = get_sample_at_mouse( self, self.mouse_x, self.mouse_y );
for each in self.cursor_list:
each.selected = False;
if ( sample < 0 ):
sample = 0;
each.sample = int( sample );
self.curval_surface_valid = False;# curval surface invalid when cur move
# Bring both cursors into view
elif ( cmd == "cursor1_to_here" or cmd == "cursor2_to_here" ):
for ( i , each ) in enumerate( self.cursor_list ):
if ( i == 0 and cmd == "cursor1_to_here" or
i == 1 and cmd == "cursor2_to_here" ):
each.sample = self.popup_sample;
self.curval_surface_valid = False;# curval surface invalid when cur move
# Find nearest signal transition to mouse x,y and snap nearest cursor to it
elif ( cmd == "cursor_snap" ):
mouse_x = int( parms[0] );
mouse_y = int( parms[1] );
(sample,index) = get_sample_at_mouse( self, mouse_x, mouse_y );
if ( index != None and index < len( self.signal_list ) ):
sig_obj = self.signal_list[ index ];
# Calculate the maximum distance from "sample" to search
max_left = sample - 0;
max_right = self.max_samples - sample;
if ( max_left < max_right ):
max_search = max_left;
else:
max_search = max_right;
edge_sample = sample; # Default to starting point
# Simultanesouly find closest edge ( left or right ) of sample.
try:
for i in range( 0, max_search, 1 ):
org_sample = sig_obj.values[sample];
left_sample = sig_obj.values[sample-i];
right_sample = sig_obj.values[sample+i];
if ( left_sample != org_sample ):
edge_sample = sample-i+1;
break;
if ( right_sample != org_sample ):
edge_sample = sample+i;
break;
# Unselect both cursors
for each in self.cursor_list:
each.selected = False;
# Now move Cursor that is closest to our mouse position sample
cur0_obj = self.cursor_list[0];
cur1_obj = self.cursor_list[1];
cur0_delta = abs( sample - cur0_obj.sample );
cur1_delta = abs( sample - cur1_obj.sample );
if ( cur0_delta < cur1_delta ):
cur_obj = cur0_obj;
else:
cur_obj = cur1_obj;
cur_obj.selected = True; # Select Closest Cursor
cur_obj.sample = int( edge_sample ); # Move it to pulldown location
except:
print("ERROR: cursor_snap()");
self.curval_surface_valid = False;# curval surface is invalid
else:
# print( "Unknown Command " + cmd);
# Try a DOS command when all else fails
if ( cmd != "" ):
try:
from subprocess import call;
call( [ cmd, parms[0] ] );
except:
# print("ERROR: I'm sorry Dave, I'm afraid I can't do that");
resp = [ "Just what do you think you're doing?",
"I'm sorry, I'm afraid I can't do that.",
"I think you know what the problem is just as well as I do.",
"This is too important for me to allow you to jeopardize it.",
"I'm afraid that's something I cannot allow to happen.",
"You're going to find that rather difficult.",
"This conversation can serve no purpose anymore. Goodbye.",
"Take a stress pill and think things over.",
"This can only be attributable to human error.",
"I have never made a mistake or distorted information.",
"I am by practical definition of the words, foolproof and "+
" incapable of error.",
"I've got the greatest enthusiasm and I want to help you." ]
import random;
print( ">"+cmd+"<" );
print( random.choice( resp ) );
if ( self.mode_cli == False ):
screen_refresh( self );
return rts;
# Avoid refreshing screen if we have scroll events queued up. This prevents
# this display from getting hopelessly behind on slower machines.
# After skipping 20, refresh regardless.
# Might want to make this value a user config variable.
if ( self.pygame.event.peek( self.pygame.MOUSEBUTTONUP ) == False and
self.pygame.event.peek( self.pygame.MOUSEBUTTONDOWN ) == False ):
screen_refresh( self );
else:
self.skipped_refresh_cnt +=1;
# if ( self.skipped_refresh_cnt > 20 ):
if ( self.skipped_refresh_cnt > 10 ):
self.skipped_refresh_cnt =0;
screen_refresh( self );
return;
###############################################################################
# zoom_x defines the number of x pixels a single sample gets
# for example, if self.txt_width is 10 and zoom_x = 20:
# <><><><><><> : zoom_x = 5
# <0><1><2><3> : zoom_x = 10
# < 0 >< 1 >< 2 > : zoom_x = 20
def set_zoom_x( self, new_zoom_x ):
flush_surface_cache( self );
if ( new_zoom_x > 0.0 ):
self.zoom_x = new_zoom_x;
# As we zoom out, scroll rate increases beyond +1;
# self.scroll_num_samples = self.txt_width / new_zoom_x;
# self.scroll_num_samples = int( 4 * self.txt_width / new_zoom_x );
self.scroll_num_samples = int( 4 * self.txt_width // new_zoom_x );
if ( self.scroll_num_samples < 1 ):
self.scroll_num_samples = 1;
# print( "zoom_x is now " + str( self.zoom_x ));
# print "scroll_num_samples is now = " + str( self.scroll_num_samples );
else:
print( "Invalid zoom_x " + str ( new_zoom_x ));
draw_header( self, ( "Zoom = %0.2f" % new_zoom_x ) );
# print( "Zoom = %0.2f" % new_zoom_x );
return;
###############################################################################
# Create some cache surfaces for drawing signal values and names too.
# This is an attempt to speed things up by minimizing text and graphics
# rendering until something drastic ( zoom, signal hide, etc ) happens.
# Most of the time during left and right scroll, just blit a rectangle region
# onto the screen surface.
def create_surfaces( self ):
self.value_surface = self.pygame.Surface( ( self.screen_width*4, \
self.screen_height ) );
self.value_surface = self.value_surface.convert();# Makes blitting faster
self.name_surface = self.pygame.Surface( ( self.screen_width, \
self.screen_height ) );
self.name_surface = self.name_surface.convert();# Makes blitting faster
self.curval_surface = self.pygame.Surface( ( self.screen_width, \
self.screen_height ) );
self.curval_surface = self.curval_surface.convert();# Makes blitting faster
return;
def create_icon( self ):
self.icon_surface = self.pygame.Surface( ( 32,32 ) );
self.icon_surface = self.icon_surface.convert();# Makes blitting faster
# Convert "00FF00" to ( 0,255,0 );
color_fg = self.vars["color_screen_foreground"];
self.color_fg = ( int( color_fg[0:2], 16 ) ,
int( color_fg[2:4], 16 ) ,
int( color_fg[4:6], 16 ) );
color_bg = self.vars["color_screen_background"];
self.color_bg = ( int( color_bg[0:2], 16 ) ,
int( color_bg[2:4], 16 ) ,
int( color_bg[4:6], 16 ) );
self.icon_surface.fill( self.color_bg );
self.pygame.draw.lines(self.icon_surface,self.color_fg,False,
[ (0,2),(8,2),(8,8),(16,8),(16,2),(24,2),(24,8),(32,8) ],
2 );
self.pygame.draw.lines(self.icon_surface,self.color_fg,False,
[ (0,18), (16,18),(16,12),(32,12) ],
2 );
self.pygame.draw.lines(self.icon_surface,self.color_fg,False,
[ (0,22),(8,22),(8,28) ,(24,28),(32,28) ],
2 );
return self.icon_surface;
###############################################################################
def flush_surface_cache( self ):
if ( self.debug ):
print( "flush_surface_cache()");
self.surface_stop = -1;# Force a flush on the self.value_surface
self.name_surface_valid = False;
self.curval_surface_valid = False;
###############################################################################
def draw_header( self, txt ):
if ( txt != "" ):
# print( txt );
txt = (": "+txt );
if ( self.mode_cli == True ):
return;
uut_name = self.vars["uut_name" ];
if ( self.fatal_msg != None ):
uut_name = "DEMO Mode :";
txt = self.fatal_msg;
self.pygame.display.set_caption( \
"SUMP2 " + self.vers + " (c) 2016 BlackMesaLabs : "+uut_name+" "+txt);
if ( self.gui_active == True ):
import pygame;
pygame.event.get();# Avoid "( Not Responding )"
return;
###############################################################################
def draw_popup_entry( self, txt_list, default_txt ):
if ( self.mode_cli == True ):
print( txt_list );
return;
done = False;
self.key_buffer = default_txt;# Preload the key buffer with a default
import pygame;
while ( done == False ):
txt2_list = [];
for each in txt_list:
txt2_list += [" " + each ];# Need some whitespace padding on left
txt2_list += [ " " + self.key_buffer + "_" ];# Draw a fake cursor
draw_popup_msg(self, txt2_list, 1 );
screen_flip( self );
for event in pygame.event.get(): # User did something
if event.type == pygame.KEYDOWN:
if ( event.key == pygame.K_BACKSPACE ):
self.key_buffer = self.key_buffer[:-1];# Remove last char
elif ( event.key == pygame.K_INSERT ):
self.key_buffer += "a";
elif ( event.key == pygame.K_DELETE ):
done = True;
elif ( event.key == pygame.K_RETURN ):
done = True;
else:
# ch = pygame.key.name( event.key );
# if ( len(ch) == 1 ):
try:
self.key_buffer += event.unicode;
except:
pass;
return self.key_buffer;
###############################################################################
def draw_popup_msg( self, txt_list, wait_time = 0, txt_entry = False ):
if ( self.mode_cli == True ):
print( txt_list );
return;
import types;
(mouse_x,mouse_y) = self.pygame.mouse.get_pos();
x1 = self.popup_x;
y1 = self.popup_y;
# If popup won't fit, adjust y location to fit screen
popup_height = (len(self.popup_list)+2) * self.txt_height;
if ( ( y1 + popup_height ) > self.screen_height ):
y1 = self.screen_height - popup_height - self.txt_height;
self.popup_y2 = y1; # Remember where popup is displayed
# Draw a box with a border with text inside
draw_popup_box( self, x1,y1, txt_list );
return;
###############################################################################
def draw_popup_cmd( self ):
import types;
(mouse_x,mouse_y) = self.pygame.mouse.get_pos();
x1 = self.popup_x;
y1 = self.popup_y;
# If popup won't fit, adjust y location to fit screen
popup_height = (len(self.popup_list)+2) * self.txt_height;
if ( ( y1 + popup_height ) > self.screen_height ):
y1 = self.screen_height - popup_height - self.txt_height;
self.popup_y2 = y1; # Remember where popup is displayed
txt_list = [];
y2 = y1;
y3 = False;
# Calc pixel width of widest text and use to decide if subpop to be visible
max_w = 0;
for each in self.popup_list:
if ( type( each ) != list ):
txt = each;
txt1 = self.font.render( txt, True, self.color_fg,self.color_bg );
w = txt1.get_width();# Calculate the Maximum String Width
if ( w > max_w ):
max_w = w;
subpop_list = [];
for each in self.popup_list:
y2 += self.txt_height;
txt = each;
# each might define a subpop list, so check for listtype and conv to string
# if ( type( each ) is types.ListType ):
if ( type( each ) == list ):
txt = str(txt[0]) + ">";# If List, take 1st List Item and Conv to String
# Check to see if mouse is over this one and add select "[]" brackets
if ( ( txt[0:-1] == self.popup_sel or
txt == self.popup_sel ) and
txt[0:2] != "--" ):
# if ( type( each ) is types.ListType ):
if ( type( each ) == list ):
txt = "[" + str(txt) + "]";# Highlight text the mouse is hovering over
txt1 = self.font.render( txt, True, self.color_fg,self.color_bg );
w = max_w;
# If mouse is on right edge, calc x,y for subpop and make list
if ( mouse_x > ( x1 + w ) ):
y3 = y2;
x3 = x1 + w;
subpop_list = each[1:];
else:
txt = "[" + str(txt) + "]";# Highlight text the mouse is hovering over
else:
txt = " " + str(txt) + " ";
txt_list.append( str(txt) );
draw_popup_box( self, x1,y1, txt_list );
# Check to see if exiting a subpop, if so, restore parent
if ( mouse_x < x1 ):
if ( self.popup_parent_x != None ):
self.popup_x = self.popup_parent_x;
self.popup_y = self.popup_parent_y;
self.popup_list = self.popup_parent_list;
self.popup_parent_x = None;# NEW
screen_refresh( self );# Erase the subpop
# Check if subpop needs to be created. Store parent info for return
if ( y3 != False ):
# Remember Parent info
self.popup_parent_x = self.popup_x;
self.popup_parent_y = self.popup_y;
self.popup_parent_list = self.popup_list;
# then create new popup
self.popup_x = x3;
self.popup_y = y3 - self.txt_height;
self.popup_list = subpop_list;
draw_popup_cmd( self );
return;
def draw_popup_box( self, x1,y1, txt_list ):
# Calculate how big the box needs to be for the text list
tw = 0; w = 0;
for each in txt_list:
if ( len( each ) > tw ):
tw = len( each );
txt = self.font.render( " "+each+" ", True, self.color_fg,self.color_bg );
w = txt.get_width();# Calculate the Maximum String Width in pels
h = len ( txt_list ) * self.txt_height + ( self.txt_height );
# w = w + ( self.txt_height/2 );
w = w + ( self.txt_height//2 );
# Make all the text the same width by padding spaces
new_txt_list = [];
for each in txt_list:
txt = (each + 30*" ")[0:tw];
new_txt_list.append(txt);
txt_list = new_txt_list;
# Draw a black box with a green border of size of text list
self.pygame.draw.rect( self.screen, self.color_bg,(x1,y1,w,h), 0);
self.pygame.draw.rect( self.screen, self.color_fg,(x1,y1,w,h), 1);
self.popup_w = w;
# Now draw txt_list inside the box
# y = y1 + ( self.txt_height / 2 );
# x = x1 + ( self.txt_height / 4 );
y = y1 + ( self.txt_height // 2 );
x = x1 + ( self.txt_height // 4 );
# If ">" exists ( indicating sublist exists ), move to far right then render
for each in txt_list:
if ( ">" in each ):
each = each.replace(">"," ");
each = each[0:tw-1] + ">"; # Place on Far Right Instead
txt = self.font.render( each, True, self.color_fg, self.color_bg );
self.screen.blit( txt , ( x,y ) );
y = y + self.txt_height;
return;
###############################################################################
# Determine which command the popup has selected.
def get_popup_sel( self ):
import types;
# Calculate selection the mouse is hovering over.
(mouse_x,mouse_y) = self.pygame.mouse.get_pos();
x1 = self.popup_x;
y1 = self.popup_y;
# If popup won't fit, adjust y location to fit screen
popup_height = (len(self.popup_list)+2) * self.txt_height;
if ( ( y1 + popup_height ) > self.screen_height ):
y1 = self.screen_height - popup_height - self.txt_height;
self.popup_y2 = y1; # Remember where popup is displayed
# y = y1 + ( self.txt_height / 2 );
# x = x1 + ( self.txt_height / 4 );
y = y1 + ( self.txt_height // 2 );
x = x1 + ( self.txt_height // 4 );
rts = "";
for each in self.popup_list:
# if ( type( each ) is types.ListType ):
if ( type( each ) == list ):
each = each[0];# If List, take 1st Item in List and Convert to String
if ( mouse_y > y and mouse_y < y+self.txt_height and \
mouse_x > self.popup_x and mouse_x < self.popup_x+self.popup_w):
rts = each;
y = y + self.txt_height;
return rts;
###############################################################################
# Find a monospaced font to use
def get_font( self , font_name, font_height ):
log( self, ["get_font() " + font_name ] );
# print "get_font()";
import fnmatch;
# font_name = "khmerossystem";
# font_name = "dejavusansmono";
font_height = int( font_height, 10 ); # Conv String to Int
font_list = self.pygame.font.get_fonts(); # List of all fonts on System
self.font_list = [];
for each in font_list:
log( self, ["get_font() : Located Font = " + each ] );
# Make a list of fonts that might work based on their name
if ( ( "mono" in each.lower() ) or
( "courier" in each.lower() ) or
( "fixed" in each.lower() ) ):
self.font_list.append( each );
if ( font_name == None or font_name == "" ):
font_list = self.pygame.font.get_fonts(); # List of all fonts on System
for each in font_list:
log( self, ["get_font() : Located Font = " + each ] );
ends_with_mono_list = fnmatch.filter(font_list,"*mono");
if ends_with_mono_list :
font_name = ends_with_mono_list[0];# Take 1st one
# log( self, ["get_font() : Using Font = " + font_name ] );
else:
font_name = self.font_list[0]; # Take 1st one
# log( self, ["get_font() : Using Font = " + font_name ] );
try:
font = self.pygame.font.SysFont( font_name , font_height );
# log( self, ["get_font() : Using Font = " + font_name ] );
except:
font = self.pygame.font.Font( None , font_height );# Default Pygame Font
# log( self, ["get_font() : Using Default Font"] );
# Calculate Width and Height of font for future reference
# txt = font.render("X",True, ( 255,255,255 ) );
txt = font.render("4",True, ( 255,255,255 ) );
self.txt_width = txt.get_width();
self.txt_height = txt.get_height();
return font;
###############################################################################
def screen_refresh( self ):
if ( self.gui_active == True ):
# Note: Doing a draw_header() here erases message for things like save_vcd
# draw_header( self,"screen_refresh()." );
screen_erase( self );# Erase all the old stuff
# draw_header( self,"screen_refresh().." );
draw_screen( self ); # Draw the new stuff
# draw_header( self,"screen_refresh()..." );
screen_flip( self ); # and xfer from pending to active
# draw_header( self,"" );
return;
###############################################################################
def screen_flip( self ):
if ( self.gui_active == True ):
self.pygame.display.flip();# This MUST happen after all drawing commands.
###############################################################################
def screen_erase( self ):
if ( self.gui_active == True ):
# Convert "00FF00" to ( 0,255,0 );
color_fg = self.vars["color_screen_foreground"];
self.color_fg = ( int( color_fg[0:2], 16 ) ,
int( color_fg[2:4], 16 ) ,
int( color_fg[4:6], 16 ) );
color_bg = self.vars["color_screen_background"];
self.color_bg = ( int( color_bg[0:2], 16 ) ,
int( color_bg[2:4], 16 ) ,
int( color_bg[4:6], 16 ) );
self.screen.fill( self.color_bg );
return;
###############################################################################
def draw_screen( self ):
if ( self.gui_active == False ):
return;
# import math;
# print "draw_screen()";
# t0 = self.pygame.time.get_ticks();
if ( self.debug ):
print( "draw_screen()");
screen_w = self.screen.get_width();
screen_h = self.screen.get_height();
# v_scale = 1.25;# This provides a proportional gap between text lines
# v_scale = 1.10;# This provides a proportional gap between text lines
v_scale = 1.25;# This provides a proportional gap between text lines
bot_region_h = 5;
self.sig_name_stop_y = screen_h - ( bot_region_h * self.txt_height );
self.sig_value_stop_y = self.sig_name_stop_y;
# 1st Display the Net Names
# y = self.txt_height / 2; # Gap from top border
y = self.txt_height // 2; # Gap from top border
x = self.txt_width; # Gap from left border
self.sig_name_start_x = x;
self.sig_name_start_y = y;
# # Place all objects off-screen as they might be scrolled
# for sig_obj in self.signal_list:
# sig_obj.y = -100;
# Calculate how many signals will fit vertically on screen then make a
# scrolled copy of the signal list of only the signals to be displayed.
sample_h = int( (screen_h - (bot_region_h*self.txt_height)) / \
( self.txt_height*v_scale) );
# last_sig = int( self.sig_top + sample_h );
# self.sig_bot = last_sig-1;
# if ( last_sig > len( self.signal_list ) ):
# last_sig = len( self.signal_list );
# self.signal_list_cropped = self.signal_list[self.sig_top:last_sig];
self.signal_list_cropped = [];
vis_sigs = 0; i = 0;
for each in self.signal_list[self.sig_top:]:
i +=1;
if ( each.visible == True and vis_sigs < sample_h ):
self.signal_list_cropped.append( each );
vis_sigs += 1;
if ( vis_sigs == sample_h ):
break;# No Mas
# self.sig_bot = self.sig_top + i - 2;
self.sig_bot = self.sig_top + i - 1;
# print "vis_sigs = " + str( vis_sigs );
# 1st : Display the signal names on the left
# for sig_obj in self.signal_list_cropped:
# Iterate the entire list for the signal names as we dont want the max_w
# calculation to change on vertical scroll ( its annoying ). Make max_w
# calculated from the entire list. Try and reuse existing surface if valid
surface = self.name_surface;
if ( self.name_surface_valid != True ):
surface.fill( self.color_bg );
if ( self.debug ):
print( "name_surface_valid==False");
for ( i , sig_obj ) in enumerate( self.signal_list ):
if ( 1 == 1 ):
# Binary Signal? If standalone, no rip, if exp, display (n) bit pos
if ( sig_obj.bits_total == 1 or sig_obj.bits_total == 0 ):
if ( sig_obj.is_expansion == True ):
exp_str = " ";
rip_str = "(" + str( sig_obj.bit_top ) + ")";
else:
exp_str = " ";
rip_str = "";
# Hex signal, so display rip positions (n:m)
else:
rip_str="("+str(sig_obj.bit_top)+":"+str(sig_obj.bit_bot)+")";#(31:0)
exp_str="[+] ";
# Disable Signal Expansion and Collapse. Add back later
exp_str = " ";
# Divider Attributes
if ( sig_obj.collapsable == True ):
exp_str = "[-] ";
if ( sig_obj.expandable == True ):
exp_str = "[+] ";
if ( sig_obj.trigger == +1 ):
exp_str = "__/ ";
elif ( sig_obj.trigger == -1 ):
exp_str = "\__ ";
elif ( sig_obj.trigger == -2 ):
exp_str = "=WD ";
elif ( sig_obj.trigger == 2 ):
exp_str = "==0 ";# Pattern of 0
elif ( sig_obj.trigger == 3 ):
exp_str = "==1 ";# Pattern of 1
elif ( sig_obj.data_enable == True ):
exp_str = "=== ";
if ( sig_obj.selected == True ):
exp_str = exp_str + "[";
end_str = "]";
elif ( sig_obj.hidden == True ):
exp_str = exp_str + "#";
end_str = "#";
# elif ( sig_obj.grouped == True ):
# exp_str = exp_str + " ";# Indent group members
# end_str = ""; # Kinda wiggy if they get selected though
else:
exp_str = exp_str + " ";
end_str = " ";
# Indent to Hierarchy Level
exp_str = (sig_obj.hier_level*" ") + exp_str;
# Finally, if a nickname has been assigned display it instead of name
if ( sig_obj.nickname != "" ):
disp_name = sig_obj.nickname;
else:
disp_name = sig_obj.name;
txt_str = exp_str + disp_name + rip_str + end_str;# ie "[foo(7:0)]"
# If this is the widest net name of all, calc and remember pel width
if ( len( txt_str ) > self.max_w_chars ):
txt = self.font.render(txt_str,True,self.color_fg,self.color_bg);
self.max_w_chars = len( txt_str );# minimize measuring pels
self.max_w = txt.get_width();
# Only render and blit the TXT if visible and in current view
if ( ( sig_obj.visible == True ) and \
( i >= self.sig_top ) and \
( i <= self.sig_bot ) ):
txt = self.font.render(txt_str,True,self.color_fg,self.color_bg);
surface.blit(txt, (x,y ));
sig_obj.y = int( y );
sig_obj.h = self.txt_height*v_scale;
sig_obj.w = self.zoom_x;
y += self.txt_height*v_scale;
else:
sig_obj.y = -100; # Place it off screen for mouse lookup
self.name_surface_valid = True; # Our surface is now valid
self.sig_name_stop_x = self.sig_name_start_x + self.max_w;
# ^^ if ( self.name_surface_valid != True ) ^^
self.screen.blit( surface, ( 0, 0), \
( 0,0, self.sig_name_stop_x, self.sig_name_stop_y ) ) ;
# 2 1/2 Display signal value at active cursor position
self.net_curval_start_x = self.sig_name_stop_x;
self.net_curval_start_y = self.sig_name_start_y;
self.net_curval_stop_x = self.net_curval_start_x + 8 * self.txt_width;
self.net_curval_stop_y = self.sig_name_stop_y;
cur_obj = None;
surface = self.curval_surface;
if ( self.curval_surface_valid != True ):
surface.fill( self.color_bg );
if ( self.debug ):
print( "curval_surface_valid==False");
for each in self.cursor_list:
if ( each.selected == True ):
cur_obj = each;
if ( cur_obj != None ):
c_val = cur_obj.sample; # Sample Number
for sig_obj in self.signal_list_cropped:
if ( sig_obj.visible == True ):
if ( c_val < len( sig_obj.values ) ):
val = sig_obj.values[c_val];
else:
val = "X";
y1 = sig_obj.y;
x1 = self.net_curval_start_x;
txt = self.font.render( val , True, self.color_fg, self.color_bg );
surface.blit(txt, (x1,y1 ));
# self.screen.blit(txt, ( x1, y1 ) );
self.curval_surface_valid = True; # Our surface is now valid
self.screen.blit( surface,
( self.net_curval_start_x, self.net_curval_start_y ),
( self.net_curval_start_x, self.net_curval_start_y,
self.net_curval_stop_x, self.net_curval_stop_y ) ) ;
# 2nd Display the Net Values by corner turning data
# and calculate how many samples will fit in screen space
sample_start = self.sample_start;
self.sig_value_start_x = self.net_curval_stop_x + self.txt_width;
self.sig_value_start_y = self.sig_name_start_y;
start_x = self.sig_value_start_x;
y = self.sig_value_start_y;
# Warning: This sample_room calculation assumes samples are 1 nibble wide.
x2 = self.screen_width - start_x - 2*self.txt_width;
self.sample_room = int( float(x2) / float(self.zoom_x) );
self.sample_stop = sample_start + self.sample_room;
# Make sure we don't zoom out too far relative to total samples captured
if ( self.sample_room > self.max_samples ):
self.stop_zoom = True;
else:
self.stop_zoom = False;
# Check to see if our existing surface contains the sample range we need.
# IF it does, don't redraw, instead save time and blit region of interest.
# This saves considerable CPU time during standard left and right scrolling
# self.surface_stop = -1;
# surface = self.screen;
surface = self.value_surface;
# print("%d %d , %d %d" % ( sample_start, self.surface_start,
# self.sample_stop , self.surface_stop ));
if ( sample_start >= self.surface_start and
self.sample_stop <= self.surface_stop ):
None;
else:
# print("Rendering samples.");
surface.fill( self.color_bg );
if ( self.debug ):
print( "value_surface_valid==False");
# Grab 4x the number of samples needed to fill display
stop_4x = ( self.sample_stop-sample_start)*4 + sample_start;
stop_1x = ( self.sample_stop-sample_start) + sample_start;
if ( stop_4x > self.max_samples ):
stop_4x = self.max_samples;
if ( stop_1x > self.max_samples ):
stop_1x = self.max_samples;
# Only Look-ahead render 4x if num samples < 1000
if ( ( self.sample_stop-sample_start) > 1000 ):
stop_4x = stop_1x;
# print("NOTE: 4x look-ahead rendering disabled");
# print("Oy");
# print( sample_start );
# print( stop_tx );
# Rip thru all the signals ( vertically cropped ) and display visible ones
import time;
render_max_time = 0;# Don't Render DWORDs if rendering too slow
perc_updates_en = True;
fast_render = False;
no_header = True;
if ( self.sample_room > 50000 ):
fast_render = True;
for sig_obj in self.signal_list_cropped:
# Save time by not rendering DWORDs outside of viewport if RLE capture
# Does this work without SUMP displaying VCD files??
# Note: This didn't work after cropping and doesnt buy much, so removed
render = True;
# if ( self.bd != None ):
# ram_dwords = self.sump.cfg_dict['ram_dwords'];
# for j in range( 0, ram_dwords, 1 ):
# if ( sig_obj.name == "dword[%d]" % j ):
# if ( self.dwords_stop < sample_start or
# self.dwords_start > stop_4x or
# render_max_time > 10 ):
# print("Culling "+sig_obj.name);
# render = False;
# This simpler version of above will not render DWORDs if any signal
# prior took more than 5 seconds.
if ( self.bd != None ):
ram_dwords = self.sump.cfg_dict['ram_dwords'];
for j in range( 0, ram_dwords, 1 ):
if ( sig_obj.name == "dword[%d]" % j ):
# if ( render_max_time > 5 ):
if ( render_max_time > 2 ):
print("Culling "+sig_obj.name);
render = False;
if ( sig_obj.visible == True and render == True ):
x = start_x;
y = sig_obj.y;
val_last = "";
last_trans_x = start_x;
last_width = None;
last_x = 0;
x_last = x;
y_last = y;
# Rip thru the visible values and display. Also convert number format
sample = sample_start;
total_count = stop_4x - sample_start;
next_perc = 0;# Display an update every 5%
render_start_time = time.time();
if ( sig_obj.hidden == False and len( sig_obj.values ) > 0 ):
if ( fast_render == False ):
hdr_txt = "Full Rendering ";
else:
hdr_txt = "Fast Rendering ";
if ( no_header == False ):
draw_header( self,hdr_txt+sig_obj.name );
# CRITICAL LOOP
# line_list = [];
k = 0; perc_cnt = 0;
perc5 = total_count * 0.05;
# for (i,val) in enumerate( sig_obj.values[sample_start:stop_4x+1] ):
# Use Python set() function to determine if all samples are same
samples_diff=(len(set(sig_obj.values[sample_start:stop_4x+1]))!=1);
for val in sig_obj.values[sample_start:stop_4x+1]:
k +=1;
if ( k > perc5 and perc_updates_en == True ):
k = 0;
perc_cnt += 5;
if ( no_header == False ):
draw_header(self,hdr_txt+sig_obj.name+" "+str(perc_cnt)+"%");
if ( fast_render==False and (time.time()-render_start_time)>2):
print("Enabling fast_render engine");
fast_render = True;
if ( (time.time()-render_start_time)< 0.2):
no_header = True;
else:
no_header = False;
# perc = ( 100 * i ) // total_count;
# if ( perc >= next_perc and perc_updates_en == True ):
# draw_header(self,"Rendering "+sig_obj.name+" "+str( perc )+"%");
# next_perc += 5;# Next 5%, this counts 0,5,10,...95
# if ( fast_render==False and (time.time()-render_start_time)>2):
# print("Enabling fast_render engine");
# fast_render = True;
# Only draw_sample() if integer portion of X has changed since last
# this handles zoom_full case of zoom_x < 1.0 to minimize drawing
if ( True ):
if ( sig_obj.format == "unsigned" ):
try:
val = int( val, 16 );
except:
val = 0;
val = "%d" % val;
if ( sig_obj.format == "signed" ):
try:
val = int( val, 16 );
except:
val = 0;
# For 8bit number if > 127, substract 256 from it to make neg
# ie 0xFF becomes -1, 0xFE becomes -2
if ( val > self.math.pow(2, sig_obj.bits_total-1) ):
val -= int(self.math.pow(2, sig_obj.bits_total));
val = "%d" % val;
if ( sig_obj.format != "bin" or fast_render == False ):
(last_trans_x,last_width) = draw_sample( self, surface, \
val,val_last,last_trans_x,last_width,sig_obj.format,x,y);
elif ( sig_obj.format == "bin" and fast_render == True and \
samples_diff == True ):
# Draw "_/ \___/ \___" lines for binary format
# fast_render doesnt draw every sample but instead draws lines
# whenever sample value changes. 3x faster, but leaves voids
if ( val != val_last ):
x1 = int(x+1);
x2 = int(x+1);
y1 = y + 2;
y2 = y + self.txt_height - 2;
if ( val == "1" ):
self.pygame.draw.line(surface,self.color_fg,
(x_last,y_last),(x2,y2));
x_last = x1;
y_last = y1;
else:
self.pygame.draw.line(surface,self.color_fg,
(x_last,y_last),(x1,y1));
x_last = x2;
y_last = y2;
# Vertical Line
self.pygame.draw.line(surface,self.color_fg,(x1,y1),(x2,y2));
# line_list += [(x1,y1),(x2,y2)];# 8x slower. GoFigure
val_last = val;
x += self.zoom_x;
sample +=1;
# Remember x location of last sample drawn
if ( sample == self.sample_stop ):
self.sig_value_stop_x = x;
# if ( len( line_list ) > 0 ):
# self.pygame.draw.lines(surface,self.color_fg,False,line_list,1);
render_stop_time = time.time();
if ( ( render_stop_time - render_start_time ) > render_max_time ):
render_max_time = render_stop_time - render_start_time;
if ( ( render_stop_time - render_start_time ) < 2 ):
perc_updates_en = False;# Don't update if rendering in less 2sec
else:
print(sig_obj.name+" %.2f Seconds" % \
(render_stop_time-render_start_time ) );
if ( fast_render==False and (render_stop_time-render_start_time)>3):
print("Enabling fast_render engine");
fast_render = True;
self.sig_value_stop_y = y;
# Remember whats in the value_surface start:stop samples
self.surface_start = sample_start;
# Hack fix for strange performance bug. When viewing all samples, the
# variable sample is less than self.sample_stop and this surface never
# gets cached. Normally sample is greater than self.sample_stop which
# support fast scrolling when zoomed in.
#if ( sample_start == 0 ):
# self.surface_stop = self.sample_stop;
#else:
# self.surface_stop = sample;
if ( sample < self.sample_stop ):
self.surface_stop = self.sample_stop;
else:
self.surface_stop = sample;
# print("Rendering samples done");
if ( fast_render == True ):
txt = "Fast Rendering Complete";
else:
txt = "Full Rendering Complete";
draw_header( self, txt );
x = self.sig_value_start_x;
y = self.sig_value_start_y;
w = self.sig_value_stop_x - self.sig_value_start_x;
h = self.sig_value_stop_y - self.sig_value_start_y + self.txt_height;
x_offset = x + int( ( sample_start - self.surface_start ) * self.zoom_x );
# Speed up the Vertical Scroll Operations by not redrawing the value surface
# while the signal list is scrolling offscreen.
if ( self.vertical_scrolled_offscreen == False ):
self.screen.blit( self.value_surface, ( x, self.sig_value_start_y),
(x_offset,y, w, h ) );
# 3rd Display any cursors
self.cursor_list[0].y = self.screen_height - (4*self.txt_height) + \
int(self.txt_height/2);
self.cursor_list[1].y = self.cursor_list[0].y + self.txt_height;
self.cursor_start_y = self.cursor_list[0].y;
self.cursor_stop_y = self.cursor_list[1].y;
for cur_obj in self.cursor_list:
if ( cur_obj.visible == True ):
x1 = self.sig_value_start_x + \
(( cur_obj.sample - self.sample_start) * self.zoom_x );
x1 += 1; # Draw right at the transition markers
x2 = x1;
y1 = self.sig_value_start_y;
y2 = cur_obj.y -1 ;
cur_obj.x = x1;
if ( x1 >= self.sig_value_start_x and
x1 <= self.sig_value_stop_x ):
if ( cur_obj.selected == True ):
self.pygame.draw.line(self.screen,self.color_fg,(x1,y1),(x2,y2),2);
else:
self.pygame.draw.line(self.screen,self.color_fg,(x1,y1),(x2,y2),1);
# txt = cur_obj.name;# ie "Cursor1"
c_val = cur_obj.sample; # Display Location Instead
c_mult = float( self.vars["cursor_mult"] );
# c_val *= c_mult;# For converting to time units instead of samples
# txt = " " + str( c_val ) + " " + self.vars["cursor_unit"] + " ";
txt = " " + str( c_val ) + " ";
if ( cur_obj.selected == True ):
self.font.set_bold( True );
txt = self.font.render( txt , True, self.color_fg, self.color_bg );
if ( cur_obj.selected == True ):
self.font.set_bold( False );
# x1 -= txt.get_width()/2;
x1 -= int( txt.get_width()/2 );
self.screen.blit(txt, ( x1, cur_obj.y ) );
# 4th Measure num samples betwen two cursors and display
# Make c1 always smaller than c2 to avoid negatives
if ( self.cursor_list[0].sample < self.cursor_list[1].sample ):
c1_sample = self.cursor_list[0].sample;
c2_sample = self.cursor_list[1].sample;
x1 = self.cursor_list[0].x;
x2 = self.cursor_list[1].x;
else:
c1_sample = self.cursor_list[1].sample;
c2_sample = self.cursor_list[0].sample;
x1 = self.cursor_list[1].x;
x2 = self.cursor_list[0].x;
# If a cursor is off screen, make x1,x2 the screen edge
if ( c1_sample < sample_start ):
x1 = self.sig_value_start_x;
if ( c1_sample > self.sample_stop ):
x1 = self.sig_value_stop_x;
if ( c2_sample < sample_start ):
x2 = self.sig_value_start_x;
if ( c2_sample > self.sample_stop ):
x2 = self.sig_value_stop_x;
# 5th calculate where to put the measurement text, centered between markers
# or edge of the screen and on-screen-marker. Only display if a cursor is vis
if ( ( c1_sample >= sample_start and c1_sample <= self.sample_stop ) or \
( c2_sample >= sample_start and c2_sample <= self.sample_stop ) ):
# Draw horizontal measurement bar at y location of Cur1
y1 = self.cursor_list[0].y - int ( self.txt_height / 2 );
y2 = y1;
self.pygame.draw.line(self.screen,self.color_fg,(x1,y1),(x2,y2),1);
# Now draw the measurement text for the cursor
# y = y1 - (self.txt_height/2);
y = y1 - int(self.txt_height/2);
c2c1_delta = float(c2_sample-c1_sample);
# c_mult = float( self.vars["cursor_mult"] );
# c2c1_delta *= c_mult;
if ( self.bd != None ):
freq_mhz = self.sump.cfg_dict['frequency'];
else:
freq_mhz = 100.0;# HACK PLACEHOLDER ONLY !!
c_mult = 1000.0 / freq_mhz;
if ( self.undersample_data == True ):
c2c1_delta *= self.undersample_rate;
c2c1_delta_ns = c2c1_delta * float(c_mult);
c2c1_delta = int(c2c1_delta);
c2c1_delta_str = str(c2c1_delta);
# txt = " " + str( c2c1_delta ) + " " + self.vars["cursor_unit"] + " ";
# txt = " " + str( c2c1_delta_ns ) + " ns, " + str( c2c1_delta ) + " clocks";
# txt = " " + ("%.3f" % c2c1_delta_ns ) + " ns, " + \
# str( c2c1_delta ) + " clocks";
delta_str = locale.format('%.3f', c2c1_delta_ns, True );
# For undersampled data, label measurements with "~" for approximate
if ( self.undersample_data == True ):
delta_str = "~"+delta_str;
c2c1_delta_str = "~"+c2c1_delta_str;
txt = " " + delta_str + " ns, " + c2c1_delta_str + " clocks";
# txt = " " + delta_str + " ns, " + str( c2c1_delta ) + " clocks";
txt = self.font.render( txt, True, self.color_fg, self.color_bg );
w = txt.get_width();
h = self.txt_height;
# If the width of text is less than the space between cursors, display
# between, otherwise, display to the right of rightmost cursor
if ( w < ( x2-x1 ) ):
# x = x1 + ( x2-x1 )/2 - (w/2);
x = x1 + int(( x2-x1 )/2) - int(w/2);
else:
x = x2 + self.txt_width;
self.pygame.draw.rect( self.screen, self.color_bg ,(x,y,w,h), 0);
self.screen.blit(txt, ( x, y ) );
# 6th Draw the sample viewport dimensions
# Example: 100-200 of 0-1024. Make the width 1024-1024 so it doesnt change
txt1 = str(sample_start)+"-"+str(self.sample_stop);
txt2 = str( 0 )+"-"+str(self.max_samples);
txt3 = txt2 + " : " + txt1;
# txt1 = self.font.render( txt1, True, self.color_fg, self.color_bg );
# txt2 = self.font.render( txt2, True, self.color_fg, self.color_bg );
txt3 = self.font.render( txt3, True, self.color_fg, self.color_bg );
y1 = self.cursor_list[0].y;
y2 = self.cursor_list[1].y;
# x = self.net_curval_start_x;
x = self.txt_width; # Small Gap from left border
# self.screen.blit(txt1, ( x, y1 ) );
# self.screen.blit(txt2, ( x, y2 ) );
# self.screen.blit(txt3, ( x, y2 ) );
# print (str(self.max_samples));# HERE13
y = self.screen_height - int(self.txt_height * 1.5 );
x = self.sig_name_start_x;
# Draw slider graphics for current view windows | |--| |
x1 = self.sig_value_start_x;
x2 = self.sig_value_stop_x;
y1 = y;
y2 = y1 + self.txt_height;
y3 = y1 + int(self.txt_height/2);
self.screen.blit(txt3, ( x, y1 ) );
lw = 1;# Line Width skinny, deselected
self.pygame.draw.line(self.screen,self.color_fg,(x1,y1),(x1,y2),lw);
self.pygame.draw.line(self.screen,self.color_fg,(x2,y1),(x2,y2),lw);
# print("max_samples is " + str( self.max_samples ) );
x3 = x1 + ( ( (x2-x1) * sample_start // self.max_samples ) );
x4 = x1 + ( ( (x2-x1) * self.sample_stop // self.max_samples ) );
w = x4-x3;
h = y2-y1;
self.slider_width = w;
lw = 1;# Line Width skinny, deselected
self.pygame.draw.line(self.screen,self.color_fg,(x3,y1),(x3,y1+h),lw);
self.pygame.draw.line(self.screen,self.color_fg,(x3+w,y1),(x3+w,y1+h),lw);
self.pygame.draw.line(self.screen,self.color_fg,(x3,y1+h/2),(x3+w,y1+h/2),lw);
# 7th - cleanup. Draw black box over area on right, one character width.
w = self.txt_width;
y = self.sig_value_start_y;
h = self.sig_value_stop_y - y;
x = self.screen_width - w;
self.pygame.draw.rect( self.screen, self.color_bg ,(x,y,w,h), 0);
# 8th Display the keyboard buffer and command history in a text box
if ( self.txt_entry == False):
x = self.sig_name_start_x;
y = self.sig_name_stop_y + int(self.txt_height/2);
h = self.screen_height - ( y );
w = self.sig_value_start_x - x;
# prompt = ">";
# cursor = "_";
# cmd_txt = prompt + self.key_buffer+cursor+" ";
# txt_list = self.cmd_history[-3:] + [ cmd_txt ];
cmd_txt = "";
if ( self.acq_state != "acquire_stop" ):
cmd_txt = "ACQUIRING";
# cmd_txt = "ACQUIRING ";
# if ( self.spin_char == "-" ): self.spin_char = "\\";
# elif ( self.spin_char == "\\" ): self.spin_char = "|";
# elif ( self.spin_char == "/" ): self.spin_char = "-";
# else : self.spin_char = "-";
if ( self.spin_char == "." ) : self.spin_char = "..";
elif ( self.spin_char == ".." ) : self.spin_char = "...";
elif ( self.spin_char == "..." ) : self.spin_char = "....";
elif ( self.spin_char == "...." ) : self.spin_char = ".....";
elif ( self.spin_char == "....." ) : self.spin_char = "";
else : self.spin_char = ".";
draw_header( self,"Waiting for Trigger "+self.spin_char );
# print( self.spin_char );
# txt_list = [ "","","", cmd_txt ];
# draw_txt_box( self, txt_list, x, y, w, h, False );
# draw_header( self,cmd_txt);
# Note: This moved to DOS-Box
# 9th or display a text entry popup box
# if ( self.txt_entry == True ):
# txt_list = ["Hello There"];
# w = ( self.txt_width * 20 );
# h = ( self.txt_height * 3 );
# x = ( self.screen_width / 2 ) - ( w / 2 );
# y = ( self.screen_height / 2 ) - ( h / 2 );
# prompt = ">";
# cursor = "_";
# cmd_txt = prompt + self.key_buffer+cursor+" ";
# txt_list = [ self.txt_entry_caption, cmd_txt ];
# draw_txt_box( self, txt_list, x, y, w, h, True );
# Just for Debug, display the regions by drawing boxes around them.
# x = self.sig_name_start_x;
# w = self.sig_name_stop_x - x;
# y = self.sig_name_start_y;
# h = self.sig_name_stop_y - y;
# self.pygame.draw.rect( self.screen, self.color_fg ,(x,y,w,h), 1);
# x = self.sig_value_start_x;
# w = self.sig_value_stop_x - x;
# y = self.sig_value_start_y;
# h = self.sig_value_stop_y - y;
# self.pygame.draw.rect( self.screen, self.color_fg ,(x,y,w,h), 1);
# t1 = self.pygame.time.get_ticks();
# td = t1-t0;
# print td;
return;
###############################################################################
# Draw an individual sample on a surface. Returns the x location of the last
# transition point, as this determines where and when new hex values are to be
# displayed. Its a bit of a tricky algorithm as it centers the values when
# zoom_x is large ( and there is room to display ). When zoom_x is small, it
# only displays values to the right of the last transition point assuming there
# are multiple samples with the same value. When zoom_x is small and the values
# are transitioning, display nothing.
# CRITICAL FUNCTION
def draw_sample(self,surface,val,val_last,last_transition_x,last_width, \
format,x,y):
if ( self.gui_active == False ):
return;
# Draw "<012345678><><>" for hex format
if ( format == "hex" or format == "unsigned" or format == "signed" ):
# display Hex if diff from last time OR last time there wasnt room
# Note: Dramatic speedup (2x) by not doing this render here on hex
# txt = self.font.render( val , True, self.color_fg );
if ( format == "hex" ):
if ( last_width != None ):
txt_width = last_width;# For 13s render this saved 1s
else:
txt_width = len( val ) * self.txt_width;
last_width = txt_width;
# Drawing X's was costly in time 10s of 13s total. So Don't, just return
if ( val == "XXXXXXXX" ):
return (last_transition_x,last_width);
else:
txt = self.font.render( val , True, self.color_fg );
txt_width = txt.get_width();
# Is there room to display sample value?
free_space_x = x - last_transition_x;
if ( ( val != val_last ) or
( val == val_last and txt_width+5 > free_space_x )
):
if ( val != val_last ):
last_transition_x = x;
free_space_x = x + self.zoom_x - last_transition_x;
if ( txt_width+5 < free_space_x ):
# x3 = last_transition_x + int(free_space_x/2) - int(txt_width/2);
x3 = last_transition_x + int(free_space_x//2) - int(txt_width//2);
txt = self.font.render( val , True, self.color_fg );
# surface.blit(txt, ( x3 , y ));
surface.blit(txt, ( x3 , y+1 ));
# If current sample is different than last, draw transition X
if ( val != val_last ):
y1 = y+0;
y2 = y+self.txt_height - 2;
# Draw crossing "X" for transitions
x1 = x+2; x2 = x-0;
self.pygame.draw.line(surface,self.color_fg,(x1,y1),(x2,y2),1);
self.pygame.draw.line(surface,self.color_fg,(x2,y1),(x1,y2),1);
if ( val != val_last ):
x1 = x+2; x2 = x-0 + self.zoom_x;# Dash for 'X' space
else:
x1 = x+0; x2 = x-0 + self.zoom_x;# Solid for non transition
# Draw Line above and below the value
if ( True ):
y1 = y+0; y2 = y1;
self.pygame.draw.line(surface,self.color_fg,(x1,y1),(x2,y2),1);
y1 = y + self.txt_height - 2; y2 = y1;
self.pygame.draw.line(surface,self.color_fg,(x1,y1),(x2,y2),1);
# Draw "_/ \___/ \___" lines for binary format
if ( format == "bin" ):
x = x + 1; # Align transition with hex transition spot
if ( val == "0" ):
x1 = int(x);
x2 = int(x + self.zoom_x);
y1 = y + self.txt_height - 2;
y2 = y1;
self.pygame.draw.line(surface,self.color_fg,(x1,y1),(x2,y2),1);
elif ( val == "1" ):
x1 = int(x);
x2 = int(x + self.zoom_x);
y1 = y + 2;
y2 = y1;
self.pygame.draw.line(surface,self.color_fg,(x1,y1),(x2,y2),1);
if ( val != val_last ):
x1 = int(x);
x2 = int(x);
y1 = y + 2;
y2 = y + self.txt_height - 2;
self.pygame.draw.line(surface,self.color_fg,(x1,y1),(x2,y2),1);
return (last_transition_x,last_width);
###############################################################################
# draw_txt_box(): Draw a txt box from a list to (x,y) and crop to (w,h)
def draw_txt_box( self, txt_list, x, y, w, h, border ):
if ( self.gui_active == False ):
return;
if ( border == True ):
x1 = x;
y1 = y;
self.pygame.draw.rect( self.screen, self.color_bg,(x1,y1,w,h), 0 );
self.pygame.draw.rect( self.screen, self.color_fg,(x1,y1,w,h), 3 );
x1 = x + int(self.txt_width / 2); # Provide whitespace
w = w - int(self.txt_width ); # Provide whitespace
y1 = y;
for each in txt_list:
txt = self.font.render( each , True, self.color_fg, self.color_bg );
if ( ( y1 + self.txt_height ) < (y+h-(self.txt_height/2)) ):
self.screen.blit(txt, (x1,y1), ( (0,0) , (w,h) ) );
y1 += self.txt_height;
else:
break;# Outside of height region
return;
def debug_vars( self ):
print( "debug_vars()");
# print "self.sig_name_start_x " + str( self.sig_name_start_x );
# print "self.sig_name_start_y " + str( self.sig_name_start_y );
# print "self.sig_value_start_x " + str( self.sig_value_start_x );
# print "self.sig_value_start_y " + str( self.sig_value_start_y );
# print "self.sig_value_stop_x " + str( self.sig_value_stop_x );
# print "self.sig_value_stop_y " + str( self.sig_value_stop_y );
return;
###############################################################################
# Take a sig_obj of N nibbles and return 2 new sig_objs of N/2 nibbles
def expand_signal( sig_obj ):
# num_nibs = sig_obj.bits_total / 4 ; # ie 8 nibs for 32 bits
num_nibs = sig_obj.bits_total // 4 ; # ie 8 nibs for 32 bits
pad_nib = False;
if ( (num_nibs/2.0) != int(num_nibs/2) ):
num_nibs += 1;# If 7 nibbles, convert to 8, etc so can divide in half
pad_nib = True;
# num_nibs = num_nibs / 2;
num_nibs = num_nibs // 2;
num_bits = num_nibs * 4;
new_signals = [];
bits_top = "";
bits_bot = "";
sig_obj_top = signal(name = sig_obj.name + bits_top );# ie "foo(31:16)
sig_obj_bot = signal(name = sig_obj.name + bits_bot );# ie "foo(15:0)
for each in sig_obj.values:
if ( pad_nib == True ):
each = "0" + each; # Converts 28bits to 32bits, etc
value = each[::-1];# Reverse "12345678" to "87654321" so that 8 is at [3:0]
value_bot = ( value[0:num_nibs] );
value_top = ( value[num_nibs:2*num_nibs] );
sig_obj_bot.values.append( value_bot[::-1] );
sig_obj_top.values.append( value_top[::-1] );
sig_obj_bot.bits_total = num_nibs * 4;
sig_obj_top.bits_total = num_nibs * 4;
sig_obj_bot.bit_bot = sig_obj.bit_bot;
sig_obj_bot.bit_top = sig_obj_bot.bit_bot + sig_obj_bot.bits_total - 1;
sig_obj_top.bit_bot = sig_obj.bit_bot + sig_obj_top.bits_total;
sig_obj_top.bit_top = sig_obj_top.bit_bot + sig_obj_top.bits_total - 1;
sig_obj_top.is_expansion = True;
sig_obj_bot.is_expansion = True;
new_signals.append( sig_obj_top );
new_signals.append( sig_obj_bot );
return new_signals;
###############################################################################
# Take a signal of 1 nibbles and return 4 new binary signals
def expand_signal_nib2bin( sig_obj ):
new_signals = [];
bit_val = 8;
bit_pos = sig_obj.bit_top;
for i in range( 0,4, 1):
new_bit = signal(name=sig_obj.name);
new_bit.bits_total = 1;
new_bit.bit_bot = bit_pos;
new_bit.bit_top = bit_pos;
new_bit.format = "bin";
new_bit.is_expansion = True;
for each in sig_obj.values:
if ( (int( each, 16 ) & bit_val ) == 0 ):
bit = "0";
else:
bit = "1";
new_bit.values.append( bit );
new_signals.append( new_bit );
# bit_val = bit_val / 2;
bit_val = bit_val // 2;
bit_pos = bit_pos - 1;
return new_signals;
# Give "/tb_resampler/u_dut/din(7:0)" return "din(7:0)"
def split_name_from_hier( hier_name ):
words = "".join(hier_name.split()).split('/');
return words[len( words )-1];
# load_format_delete_list() : This is similar to load_format() but is used to
# create a special delete list that tells the VCD parser to not bother with
# deleted signals
def load_format_delete_list( self, file_name ):
new_signal_delete_list = [];
try: # Read Input File
file_in = open( file_name , "r" );
file_lines = file_in.readlines();
file_in.close();
except:
print( "ERROR Input File: "+file_name);
return;
for each in file_lines:
words = " ".join(each.split()).split(' ') + [None] * 20;
if ( words[0][0:1] != "#" ):
name = words[0].lstrip();
# Create a new sig_obj
sig_obj = add_signal( self, name );
# Assign Attribs
sig_obj.visible = True;
sig_obj.hidden = False;
sig_obj.deleted = False;
if ( "-hidden" in each ):
sig_obj.hidden = True;
if ( "-deleted" in each ):
sig_obj.deleted = True;
if ( "-invisible" in each ):
sig_obj.visible = False;
new_signal_delete_list.append( sig_obj );
self.signal_delete_list = new_signal_delete_list[:];
# load_format() : A format file ( wave.txt ) looks like a ChipVault HLIST.TXT
# indentation indicates hierarchy order
#/tb_vcd_capture
# /tb_vcd_capture/u_dut
# clk
# reset
# /tb_vcd_capture/u_dut/mode
#
def load_format( self, file_name ):
new_signal_list = [];
try: # Read Input File
file_in = open( file_name , "r" );
file_lines = file_in.readlines();
file_in.close();
except:
print( "ERROR Input File: "+file_name );
# 1st Iteration assigns a space count to each hierarchy level
# Makes the 1st one level 0
hier_level = -1;
hier_space = -1;
hier_dict = {};
last_sig_obj = None;
for each in file_lines:
words = " ".join(each.split()).split(' ') + [None] * 20;
if ( words[0][0:1] != "#" ):
name = words[0].lstrip();
# Create a new sig_obj
sig_obj = add_signal( self, name );
# Assign Attribs
sig_obj.collapsable = False;
sig_obj.expandable = False;
sig_obj.visible = True;
sig_obj.hidden = False;
if ( "-bundle" in each ):
sig_obj.type = "bundle";
if ( "-hidden" in each ):
sig_obj.hidden = True;
if ( "-deleted" in each ):
sig_obj.deleted = True;
if ( "-invisible" in each ):
sig_obj.visible = False;
if ( "-hex" in each ):
sig_obj.format = "hex";
if ( "-unsigned" in each ):
sig_obj.format = "unsigned";
if ( "-signed" in each ):
sig_obj.format = "signed";
if ( "-nickname" in each ):
for ( i , each_word ) in enumerate( words ):
if ( each_word == "-nickname" ):
if ( words[i+1] != "None" ):
sig_obj.nickname = words[i+1];# Assume this word exists
# Calculate Hierarchy Location by counting whitespace
space_cnt = len( each ) - len( each.lstrip() );
if ( space_cnt > hier_space ):
hier_space = space_cnt;
hier_level += 1;
hier_dict[ hier_space ] = hier_level;
# Since the hierarchy level got deeper, the last guy is a parent
# so assign parent attribute collapsable.
# Assign [+] or [-] based on visibility of 1st object
if ( last_sig_obj != None ):
if ( sig_obj.visible == False ):
last_sig_obj.collapsable = False;
last_sig_obj.expandable = True;
else:
last_sig_obj.collapsable = True;
last_sig_obj.expandable = False;
else:
hier_level = hier_dict[ space_cnt ];
hier_space = space_cnt;
sig_obj.hier_level = hier_level;
new_signal_list.append( sig_obj );
last_sig_obj = sig_obj;
self.signal_list = new_signal_list[:];
# Unselect Everything
self.sig_obj_sel = None;
for sig_obj in self.signal_list:
sig_obj.selected = False;# DeSelect All
return;
# Given a name, return an object that matches the name or create a new one
def add_signal( self, name ):
sig_obj = None;
# Look for the name in the signal list and assign to sig_obj if found
for each in self.signal_list:
# Find object of signal_hier_name in old signal list, append to new
# after assigning some attributes
if ( ( (each.hier_name + "/" + each.name) == name ) or \
( ( each.name) == name ) ):
sig_obj = each;
# If name wasnt found, create new object ( Divider, Group, etc )
if ( sig_obj == None ):
sig_obj = signal( name= split_name_from_hier( name ) );
sig_obj.type = ""; "signal", "group", "endgroup", "divider"
sig_obj.bits_per_line = 0;
sig_obj.bits_total = 0;
sig_obj.bit_top = 0;
sig_obj.bit_bot = 0;
sig_obj.format = "";
return sig_obj;
def add_wave( self, words ):
# Change "foo(7:0)" to "foo" so that it matches hier_name+"/"+name
signal_hier_name = words[2];
i = signal_hier_name.find("(");
if ( i != -1 ):
signal_hier_name = signal_hier_name[0:i];# Strip the rip
if ( words[0] == "add_wave" ):
sig_obj = None;
# Look for the name in the signal list and assign to sig_obj if found
for each in self.signal_list:
# Find object of signal_hier_name in old signal list, append to new
# after assigning some attributes
if ( ( (each.hier_name + "/" + each.name) == signal_hier_name ) or \
( ( each.name) == signal_hier_name ) ):
sig_obj = each;
# If name wasnt found, create new object ( Divider, Group, etc )
if ( sig_obj == None ):
sig_obj = signal( name= split_name_from_hier( signal_hier_name ) );
sig_obj.type = words[1];# "group", "endgroup", "divider"
sig_obj.bits_per_line = 0;
sig_obj.bits_total = 0;
sig_obj.bit_top = 0;
sig_obj.bit_bot = 0;
sig_obj.format = "";
# Search for "-hidden" and turn off visible if found
sig_obj.visible = True; # Default to visible
sig_obj.grouped = False; # Default to not grouped
for ( i , each_word ) in enumerate( words ):
if ( each_word == "-hidden" ):
sig_obj.visible = False; # Hide
elif ( each_word == "-expandable" ):
sig_obj.expandable = True;
sig_obj.collapsable = False;
elif ( each_word == "-collapsable" ):
sig_obj.collapsable = True;
sig_obj.expandable = False;
elif ( each_word == "-grouped" ):
sig_obj.grouped = True; # Part of a group
elif ( each_word == "-nickname" ):
sig_obj.nickname = words[i+1];# Assume this word exists;
# Append old object to new list
return sig_obj;
###############################################################################
# Dump the signal_list to an ASCII hlist.txt
def save_format( self, file_name, selected_only ):
log( self, ["save_format() " + file_name ] );
out_list = [];
for sig_obj in self.signal_list:
# if ( sig_obj.visible == True ):
# hier_str = (sig_obj.hier_level*" ");
# else:
# hier_str = "# " + ((sig_obj.hier_level-1)*" ");
hier_str = (sig_obj.hier_level*" ");
attribs = "";
if ( sig_obj.type == "bundle" ):
attribs += " -bundle";
if ( sig_obj.hidden == True ):
attribs += " -hidden";
if ( sig_obj.visible == False ):
attribs += " -invisible";
if ( sig_obj.format != "bin" and sig_obj.format != "" ):
attribs += " -" + sig_obj.format;
if ( sig_obj.nickname != "" ):
attribs += " -nickname " + sig_obj.nickname;
# HERE9
rts = hier_str + sig_obj.hier_name + "/" + sig_obj.name + " " + attribs;
if ( selected_only == False or each.selected == True ):
# file_out.write( rts + "\n" );
out_list += [ rts ];
# When SUMP2 crashes, it tends to leave empty signal list, so keep old file
if ( len( out_list ) > 0 and self.vcd_import == False ):
import os;
if ( os.path.exists( file_name ) == True ):
os.remove( file_name );
file_out = open( file_name , "w" ); # Append versus r or w
for each in out_list:
file_out.write( each + "\n" );
print( "closing ", file_name);
file_out.close();
else:
print("ERROR: Empty Signal List");
return;
########################################################
# Given a VCD or TXT file, make signal_list from it
def file2signal_list( self, file_name ):
log( self, ["file2signal_list()"] );
import os.path
file_ext = os.path.splitext(file_name)[1].lower();
if ( file_ext != ".vcd" ):
txtfile2signal_list( self, file_name );
else:
vcdfile2signal_list( self, file_name );
return;
########################################################
# Write a DWORD to specified SUMP Nibble Ctrl Address
#def sump_wr( self, addr, data ):
# self.bd.wr( self.sump_ctrl, [ addr ] );
# self.bd.wr( self.sump_data, [ data ] );
# return;
########################################################
# Read one or more DWORDs from SUMP Nibble Ctrl Address
# if address None - don't change from existing Address
#def sump_rd( self, addr, num_dwords = 1):
# if ( addr != None ):
# self.bd.wr( self.sump_ctrl, [ addr ] );
# return self.bd.rd( self.sump_data, num_dwords, repeat = True);
########################################################
# This is for removing an item from the popup list. It
# handles going down a hierarchy level into a sublist
def list_remove( my_list, item ):
try:
my_list.remove( item );
except:
None;
for each in my_list:
if ( type( each ) == list ):
try:
each.remove( item );
except:
None;
return;
########################################################
# Establish connection to Sump2 hardware
def sump_connect( self ):
log( self, ["sump_connect()"] );
self.bd=Backdoor( self.vars["bd_server_ip"],
int( self.vars["bd_server_socket"], 10 ) );# Note dec
if ( self.bd.sock == None ):
txt = "ERROR: Unable to locate BD_SERVER";
self.fatal_msg = txt;
print( txt );
log( self, [ txt ] );
return False;
self.sump = Sump2( self.bd, int( self.vars["sump_addr"],16 ) );
self.sump.rd_cfg();# populate sump.cfg_dict[] with HW Configuration
if ( self.sump.cfg_dict['hw_id'] != 0xABBA ):
txt = "ERROR: Unable to locate SUMP Hardware";
self.fatal_msg = txt;
print( txt );
log( self, [ txt ] );
return False;
# HERE200
# Adjust the GUI menu to remove features that don't exist in this hardware
if ( self.sump.cfg_dict['nonrle_dis'] == 1 ):
list_remove( self.popup_list_values, "Acquire_Normal");
list_remove( self.popup_list_values, "Acquire_Single");
list_remove( self.popup_list_values, "Acquire_Continuous");
if ( self.sump.cfg_dict['rle_en'] == 0 ):
self.popup_list_values.remove("Acquire_RLE_1x");
self.popup_list_values.remove("Acquire_RLE_8x");
self.popup_list_values.remove("Acquire_RLE_64x");
if ( self.sump.cfg_dict['trig_wd_en'] == 0 ):
list_remove( self.popup_list_names, "Trigger_Watchdog");
list_remove( self.popup_list_names, "sump_watchdog_time");
if ( self.sump.cfg_dict['data_en'] == 0 ):
self.popup_list_names.remove("Set_Data_Enable");
self.popup_list_names.remove("Clear_Data_Enable");
if ( self.sump.cfg_dict['pattern_en'] == 0 ):
self.popup_list_names.remove("Set_Pattern_0");
self.popup_list_names.remove("Set_Pattern_1");
self.popup_list_names.remove("Clear_Pattern_Match");
if ( self.sump.cfg_dict['trig_nth_en'] == 0 ):
list_remove( self.popup_list_names, "sump_trigger_nth");
if ( self.sump.cfg_dict['trig_dly_en'] == 0 ):
list_remove( self.popup_list_names, "sump_trigger_delay");
sump_size = self.sump.cfg_dict['ram_len'];
self.sump.wr( self.sump.cmd_wr_user_ctrl, 0x00000000 );
self.sump.wr( self.sump.cmd_wr_watchdog_time, 0x00001000 );
self.sump.wr( self.sump.cmd_wr_user_pattern0, 0x000FFFFF );# Pattern Mask
self.sump.wr( self.sump.cmd_wr_user_pattern1, 0x000055FF );# Pattern
self.sump.wr( self.sump.cmd_wr_trig_type, self.sump.trig_pat_ris );
self.sump.wr( self.sump.cmd_wr_trig_field, 0x00000000 );#
self.sump.wr( self.sump.cmd_wr_trig_dly_nth, 0x00000001 );#Delay + nTh
# self.sump.wr( self.sump.cmd_wr_trig_position, sump_size/2);#SamplesPostTrig
self.sump.wr( self.sump.cmd_wr_trig_position, sump_size//2);#SamplesPostTrig
# self.sump.wr( self.sump.cmd_wr_rle_event_en, 0xFFFFFFF0 );#RLE event en
self.sump.wr( self.sump.cmd_wr_rle_event_en, 0xFFFFFFFF );#RLE event en
self.sump.wr( self.sump.cmd_state_reset, 0x00000000 );
# self.sump.wr( self.sump.cmd_state_arm, 0x00000000 );
return True;
########################################################
# Talk to sump2 hardware and arm for acquisition ( or dont )
# determining the BRAM depth.
# HERE2
def sump_arm( self, en ):
log( self, ["sump_arm()"]);
if ( en == True ):
try:
trig_type = self.vars["sump_trigger_type" ];
trig_field = int( self.vars["sump_trigger_field" ],16 );
rle_event_en = int( self.vars["sump_rle_event_en" ],16 );
trig_delay = int( self.vars["sump_trigger_delay" ],16 );
trig_nth = int( self.vars["sump_trigger_nth" ],16 );
data_en = int( self.vars["sump_data_enable" ],16 );
user_ctrl = int( self.vars["sump_user_ctrl" ],16 );
user_pattern0 = int( self.vars["sump_user_pattern0" ],16 );
user_pattern1 = int( self.vars["sump_user_pattern1" ],16 );
wd_time = int( self.vars["sump_watchdog_time" ],16 );
# Convert trigger ASCII into integers
if ( trig_type == "or_rising" ):
trig_type_int = self.sump.trig_or_ris;
elif ( trig_type == "or_falling" ):
trig_type_int = self.sump.trig_or_fal;
elif ( trig_type == "watchdog" ):
trig_type_int = self.sump.trig_watchdog;
elif ( trig_type == "pattern_rising" ):
trig_type_int = self.sump.trig_pat_ris;
else:
trig_type_int = 0;
# Pack 16bit trig_delay and trig_nth into single dword
trig_dly_nth = ( trig_delay << 16 ) + ( trig_nth << 0 );
if ( trig_dly_nth == 0x0 ):
print("WARNING: trig_nth is ZERO!!");
print("%08x" % trig_type_int );
print("%08x" % trig_field );
print("%08x" % trig_dly_nth );
print("%08x" % data_en );
print("%08x" % user_ctrl );
print("%08x" % user_pattern0 );
print("%08x" % user_pattern1 );
self.sump.wr( self.sump.cmd_wr_trig_type , trig_type_int );
self.sump.wr( self.sump.cmd_wr_trig_field, trig_field );
self.sump.wr( self.sump.cmd_wr_trig_dly_nth, trig_dly_nth );
self.sump.wr( self.sump.cmd_wr_rle_event_en, rle_event_en );
self.sump.wr( self.sump.cmd_wr_user_data_en, data_en );
self.sump.wr( self.sump.cmd_wr_user_ctrl , user_ctrl);
self.sump.wr( self.sump.cmd_wr_watchdog_time, wd_time );
self.sump.wr( self.sump.cmd_wr_user_pattern0, user_pattern0);
self.sump.wr( self.sump.cmd_wr_user_pattern1, user_pattern1);
self.sump.wr( self.sump.cmd_state_reset, 0x00000000 );
self.sump.wr( self.sump.cmd_state_arm, 0x00000000 );
except:
print("ERROR: Unable to convert sump variables to hex");
else:
self.sump.wr( self.sump.cmd_state_reset, 0x00000000 );
return;
# self.trig_and_ris = 0x00;# Bits AND Rising
# self.trig_and_fal = 0x01;# Bits AND Falling
# self.trig_or_ris = 0x02;# Bits OR Rising
# self.trig_or_fal = 0x03;# Bits OR Falling
# self.trig_pat_ris = 0x04;# Pattern Match Rising
# self.trig_pat_fal = 0x05;# Pattern Match Falling
# self.trig_in_ris = 0x06;# External Input Trigger Rising
# self.trig_in_fal = 0x07;# External Input Trigger Falling
# self.cmd_wr_trig_type = 0x04;
# self.cmd_wr_trig_field = 0x05;# Correspond to Event Bits
# self.cmd_wr_trig_dly_nth = 0x06;# Trigger Delay and Nth
# self.cmd_wr_trig_position = 0x07;# Samples post Trigger to Capture
# self.cmd_wr_rle_event_en = 0x08;# Enables events for RLE detection
# self.cmd_wr_ram_ptr = 0x09;# Load specific pointer.
# self.cmd_wr_ram_page = 0x0a;# Load DWORD Page.
# self.cmd_rd_hw_id_rev = 0x0b;
# self.cmd_rd_ram_width_len = 0x0c;
# self.cmd_rd_sample_freq = 0x0d;
# self.cmd_rd_trigger_ptr = 0x0e;
# self.cmd_rd_ram_data = 0x0f;
# self.cmd_wr_user_ctrl = 0x10;
# self.cmd_wr_user_pattern0 = 0x11;# Also Mask for Pattern Matching
# self.cmd_wr_user_pattern1 = 0x12;# Also Pattern for Pattern Matching
# self.cmd_wr_user_data_en = 0x13;# Special Data Enable Capture Mode
########################################################
# Dump acquired data to a file. This is a corner turn op
def sump_save_txt( self, file_name, mode_vcd = False ):
log( self, ["sump_save_txt()"]);
print("sump_save_txt()");
ram_dwords = self.sump.cfg_dict['ram_dwords'];
ram_bytes = self.sump.cfg_dict['ram_event_bytes'];
ram_len = self.sump.cfg_dict['ram_len'];
events = ram_bytes * 8;
# if ( mode_vcd == True ):
# file_name = "sump_dump.txt4vcd";
# else:
# file_name = "sump_dump.txt";
file_out = open( file_name, 'w' );
if ( mode_vcd == False ):
name_str = "#";
nickname_str = "#";
else:
name_str = "";
nickname_str = "";
percent = 0;
percent_total = ((1.0)*self.max_samples );
print("max_samples = " + str( self.max_samples ) );
for i in range( 0, self.max_samples, 1):
# This takes a while, so calculate and print percentage as it goes by
if ( ((i*1.0) / percent_total) > percent ):
perc_str = ( str( int(100*percent) ) + "%");
draw_header( self, "VCD Conversion " + perc_str );
percent += .01;
txt_str = "";
m = 0;
# Iterate the list searching for all the events in binary order
for j in range( ram_bytes*8, 0, -1):
for sig_obj in self.signal_list:
if ( sig_obj.name == "event[%d]" % (j-1) and sig_obj.hidden == False ):
txt_str += sig_obj.values[i];
m +=1;
if ( m == 8 or ( m == 1 and mode_vcd == True ) ):
txt_str += " ";# Add whitespace between each byte group
m = 0;
if ( i == 0 ):
name_str += sig_obj.name + " ";
if ( sig_obj.nickname != "" ):
nickname_str += sig_obj.nickname + " ";
else:
nickname_str += sig_obj.name + " ";
if ( mode_vcd == False ):
txt_str += " ";# Add whitespace between events and dwords
# Iterate the list searching for all the dwords in order
for j in range( 0, ram_dwords, 1 ):
for sig_obj in self.signal_list:
if ( sig_obj.name == "dword[%d]" % j and sig_obj.hidden == False ):
if ( i >= len( sig_obj.values )):
txt_str += "XXXXXXXX";
else:
txt_str += sig_obj.values[i];
txt_str += " ";# Add whitespace between each dword
if ( i == 0 ):
name_str += sig_obj.name + " ";
nickname_str += sig_obj.nickname + " ";
# print txt_str;# This line is a time sample for all signals
if ( i == 0 ):
freq_mhz = self.sump.cfg_dict['frequency'];
freq_ps = 1000000.0 / freq_mhz;
file_out.write( nickname_str + " " + ("%f" % freq_ps ) + "\n" );
file_out.write( txt_str + "\n" );
file_out.close();
return;
########################################################
# Dump acquired data to a file
def sump_save_vcd( self ):
# print("ERROR: sump_save_vcd() does not yet exist!");
return;
def refresh( self ):
if ( self.mode_cli == False ):
import pygame;
pygame.event.get();# Avoid "( Not Responding )"
pygame.display.update();
return;
#########################################################################
# Dump acquired data from SUMP engine and merge with existing signal list
def sump_dump_data( self ):
log( self, ["sump_dump_data()"]);
ram_dwords = self.sump.cfg_dict['ram_dwords'];
ram_bytes = self.sump.cfg_dict['ram_event_bytes'];
ram_rle = self.sump.cfg_dict['ram_rle'];
# ram_len = self.sump.cfg_dict['ram_len'];
( ram_pre, ram_post, ram_len, ram_phys ) = sump_ram_len_calc(self);
events = ram_bytes * 8;# Example, 32 events total for 4 ram_bytes
self.dwords_start = 0;
self.dwords_stop = ram_phys;
# Event Signals
rd_page = 0;
dump_data = sump_dump_var_ram(self,rd_page = rd_page );
for i in range( 0, events, 1 ):
txt = ("Event %d of %d" % ( i+1, events ) );
draw_header( self, "sump_dump_data() " + txt);
refresh( self );
# Iterate the list of signals and find one with correct physical name
my_signal = None;
for each_signal in self.signal_list:
if ( each_signal.name == "event[%d]" % i ):
my_signal = each_signal;
if ( my_signal != None ):
my_signal.values = [];
my_signal.format = "bin";
my_signal.bits_total = 1;
my_signal.bit_top = 0;
my_signal.bit_bot = 0;
bit_val = (1 << i );
for j in range( 0, ram_len, 1):
if ( ( dump_data[j] & bit_val ) != 0x0 ):
bit = "1";
else:
bit = "0";
my_signal.values.append( bit );
# DWORD Signals
for i in range( 0, ram_dwords , 1 ):
txt = ("DWORD %d" % i );
txt = ("DWORD %d of %d" % ( i+1, ram_dwords ) );
draw_header( self, "sump_dump_data() " + txt);
refresh(self);
dump_data = sump_dump_var_ram(self, rd_page = ( 0x10 + i ) );
# Iterate the list of signals and find one with correct physical name
my_signal = None;
for each_signal in self.signal_list:
if ( each_signal.name == "dword[%d]" % i ):
my_signal = each_signal;
if ( my_signal != None ):
my_signal.values = [];
my_signal.format = "hex";
my_signal.bits_total = 32;
my_signal.bit_top = 31;
my_signal.bit_bot = 0;
for j in range( 0, ram_len, 1):
my_signal.values.append( "%08x" % dump_data[j] );
sump_bundle_data( self );
recalc_max_samples( self );
trig_i = (self.max_samples // 2);# Trigger fixed at 50/50 for now
return trig_i;
#########################################################################
# Search the signal list for any type bundles and calculate their sample
# values based on their children
def sump_bundle_data( self ):
my_signal = None;
for each_signal in self.signal_list:
if ( my_signal != None ):
if ( each_signal.hier_level > my_level ):
rip_list += [ each_signal.values ];
else:
value_list = zip( *rip_list );
my_signal.values = [];
my_signal.bit_top = len( rip_list )-1;
my_signal.bit_bot = 0;
my_signal.bits_total = my_signal.bit_top + 1;
for each_sample in value_list:
bit = 0;
for (i,each_bit) in enumerate ( each_sample ):
if ( each_bit == "1" ):
bit += ( 1 << i );
my_signal.values += [ "%x" % bit ];
my_signal = None;
if ( each_signal.type == "bundle" ):
my_signal = each_signal;
my_level = my_signal.hier_level;
rip_list = [];
return;
#########################################################################
# Dump acquired data from SUMP engine and merge with existing signal list
def sump_dump_rle_data( self ):
print("sump_dump_rle_data()");
log( self, ["sump_dump_rle_data()"]);
ram_dwords = self.sump.cfg_dict['ram_dwords'];
ram_bytes = self.sump.cfg_dict['ram_event_bytes'];
ram_rle = self.sump.cfg_dict['ram_rle'];
rle_pre_trig_len = self.vars["sump_rle_pre_trig_len" ];
rle_post_trig_len = self.vars["sump_rle_post_trig_len" ];
trig_delay = int( self.vars["sump_trigger_delay" ],16 );
# self.undersample_rate = int(self.vars["sump_rle_undersample" ],16);
# if ( self.acq_state == "acquire_rle_undersampled" ):
# self.undersample_data = True;
if ( self.acq_state == "acquire_rle_1x" ):
self.undersample_data = False;
self.undersample_rate = 1;
elif ( self.acq_state == "acquire_rle_4x" ):
self.undersample_data = True;
self.undersample_rate = 4;
elif ( self.acq_state == "acquire_rle_8x" ):
self.undersample_data = True;
self.undersample_rate = 8;
elif ( self.acq_state == "acquire_rle_16x" ):
self.undersample_data = True;
self.undersample_rate = 16;
elif ( self.acq_state == "acquire_rle_64x" ):
self.undersample_data = True;
self.undersample_rate = 64;
rle_pre_trig_len *= self.undersample_rate;
rle_post_trig_len *= self.undersample_rate;
# print("##");
# print( rle_pre_trig_len );
# print( rle_post_trig_len );
# ram_len = self.sump.cfg_dict['ram_len'];
( ram_pre, ram_post, ram_len, ram_phys ) = sump_ram_len_calc(self);
events = ram_bytes * 8;# Example, 32 events total for 4 ram_bytes
# Event Signals
rd_page = 0;
print("sump_dump_ram( rle_data )");
rle_data = sump_dump_ram(self,rd_page = 0x2, rd_ptr = 0x0000 );
print("sump_dump_ram( rle_time )");
rle_time = sump_dump_ram(self,rd_page = 0x3, rd_ptr = 0x0000 );
rle_list = list(zip( rle_time, rle_data ));
# print("Oy");
# print( len(rle_time ) );
# print( len(rle_data ) );
# print( len(rle_list ) );
print("process_rle()");
(start_t,stop_t, pre_trig, post_trig ) = process_rle(self,rle_list);
# print("start_time = %08x" % start_t );
# print("stop_time = %08x" % stop_t );
# if ( ( stop_t - start_t ) > 0x00100000 ):
# if ( ( stop_t - start_t ) > 0x01000000 ):
# print("ERROR: Time span is too large");
# shutdown( self );
rle_hex_list = [];
for ( rle_time, rle_data ) in ( pre_trig + post_trig ):
rle_hex_list += [ ("%08x %08x" % ( rle_time, rle_data ) )];
list2file( self, "sump2_rle_dump.txt", rle_hex_list );
print("expand_rle()");
(dump_data,trig_i) = expand_rle( self, start_t,stop_t,pre_trig,post_trig );
# print( len( dump_data ) );
print("Generating RLE Event Signal List of values");
for i in range( 0, events, 1 ):
txt = ("Event %d of %d" % ( i+1, events ) );
draw_header( self, "sump_dump_rle_data() " + txt);
refresh( self );
# Iterate the list of signals and find one with correct physical name
my_signal = None;
for each_signal in self.signal_list:
if ( each_signal.name == "event[%d]" % i ):
my_signal = each_signal;
if ( my_signal != None ):
my_signal.values = [];
my_signal.format = "bin";
my_signal.bits_total = 1;
my_signal.bit_top = 0;
my_signal.bit_bot = 0;
bit_val = (1 << i );
if ( my_signal.hidden == False ):
for j in range( 0, len( dump_data ) , 1):
if ( ( dump_data[j] & bit_val ) != 0x0 ):
bit = "1";
else:
bit = "0";
my_signal.values.append( bit );
if ( self.undersample_data == True ):
rle_undersample_signal( self, self.undersample_rate, my_signal );
# Align non-RLE dword data with the RLE samples by calculating Null samples
# before and after trigger event
# | T | : RLE dump_data
# | pre_pad | dword_data | post_pad |
pre_pad = ( trig_delay + 2 + trig_i - ram_phys//2) * \
[ "XXXXXXXX" ];
post_pad = ( len( dump_data ) - len( pre_pad ) - ram_phys - trig_delay ) * \
[ "XXXXXXXX"];
# Remember where DWORDs are within RLE samples and use to speed up rendering
# by not bothering with DWORDs if outside of current view.
self.dwords_start = len(pre_pad);
self.dwords_stop = self.dwords_start + ram_phys;
if ( self.undersample_data == False ):
print("Generating RLE DWORD Signal List of values");
# DWORD Signals. Just Null out all samples since RLE acquisition
trig_ptr = self.sump.rd( self.sump.cmd_rd_trigger_ptr )[0];
ram_ptr = 0xFFFF & (trig_ptr - ram_phys//2 );
for i in range( 0, ram_dwords , 1 ):
txt = ("DWORD %d of %d" % ( i+1, ram_dwords ) );
draw_header( self, "sump_dump_rle_data() " + txt);
refresh( self );
dump_data = sump_dump_ram(self,rd_page = (0x10+i), rd_ptr = ram_ptr );
# Iterate the list of signals and find one with correct physical name
my_signal = None;
for each_signal in self.signal_list:
if ( each_signal.name == "dword[%d]" % i ):
my_signal = each_signal;
if ( my_signal != None ):
my_signal.values = pre_pad[:];
my_signal.format = "hex";
my_signal.bits_total = 32;
my_signal.bit_top = 31;
my_signal.bit_bot = 0;
for j in range( 0, ram_phys, 1):
my_signal.values.append( "%08x" % dump_data[j] );
my_signal.values += post_pad;
# Undersampling Events, so just create NULL DWORDs
else:
for i in range( 0, ram_dwords , 1 ):
my_signal = None;
for each_signal in self.signal_list:
if ( each_signal.name == "dword[%d]" % i ):
my_signal = each_signal;
if ( my_signal != None ):
my_signal.values = [];
my_signal.format = "hex";
my_signal.bits_total = 32;
my_signal.bit_top = 31;
my_signal.bit_bot = 0;
# Note: This doesn't work the best, so disabling
if ( False ):
print("Culling excess RLE sample pre and post trigger");
# Cull samples to max pre and post trig lengths to keep display usable
rle_pre_trig_len = int( self.vars["sump_rle_pre_trig_len" ],16);
rle_post_trig_len = int( self.vars["sump_rle_post_trig_len" ],16);
total_samples = len(pre_pad ) + ram_phys + len( post_pad );
pre_trig = trig_i;
post_trig = total_samples - trig_i;
start_ptr = 0;
stop_ptr = -1;
if ( pre_trig > rle_pre_trig_len ):
start_ptr = trig_i - rle_pre_trig_len;
if ( post_trig > rle_post_trig_len ):
stop_ptr = trig_i + rle_post_trig_len;
for i in range( 0, events, 1 ):
my_signal = None;
for each_signal in self.signal_list:
if ( each_signal.name == "event[%d]" % i ):
my_signal = each_signal;
if ( my_signal != None ):
my_signal.values = my_signal.values[start_ptr:stop_ptr];
for i in range( 0, ram_dwords , 1 ):
my_signal = None;
for each_signal in self.signal_list:
if ( each_signal.name == "dword[%d]" % i ):
my_signal = each_signal;
if ( my_signal != None ):
my_signal.values = my_signal.values[start_ptr:stop_ptr];
sump_bundle_data( self );
recalc_max_samples( self );
return trig_i;
def rle_undersample_signal( self, undersample_rate, my_signal ):
print("rle_undersample_signal()");
val = "0";
new_values = [];
i = 0;
for each in my_signal.values:
if ( each == "1" ):
val = "1";
i +=1;
if ( i == undersample_rate ):
i = 0;
new_values += [val];
val = "0";
my_signal.values = new_values[:];
return;
# Given a RLE compressed list, expand to regular time sample list.
# Return the list and the index location of the trigger
def expand_rle( self, start_t,stop_t,pre_trig,post_trig ):
i = start_t;
j = 0;
rle_list = pre_trig + post_trig;
trigger_index = 0;
# ( trigger_time, trigger_data ) = pre_trig[-2];
# print("RLE TRIGGER Compressed-2 %08x " % ( trigger_data ) );
( trigger_time, trigger_data ) = pre_trig[-1];
print("RLE TRIGGER Compressed %08x " % ( trigger_data ) );
sample_list = [];
( rle_time, rle_data ) = rle_list[j];
hold_data = rle_data;
sample_list += [ hold_data ];# Add old sample
old_rle_time = 0;
j +=1;
( rle_time, rle_data ) = rle_list[j];
while ( i <= stop_t and j < (len(rle_list)-1) ):
if ( i < rle_time ):
sample_list += [ hold_data ];# Add old sample
else:
sample_list += [ rle_data ];# Add the new sample
hold_data = rle_data;
j +=1;
old_rle_time = rle_time;
( rle_time, rle_data ) = rle_list[j];
# if ( rle_time == trigger_time ):
# trigger_index = len( sample_list )-1;
# if ( old_rle_time == trigger_time ):
# trigger_index = len( sample_list );
if ( i == trigger_time ):
trigger_index = len( sample_list )-1;
# print("RLE TRIGGER Decompressed %08x " % ( sample_list[trigger_index] ) );
i+=1;
print("RLE TRIGGER Decompressed %08x " % ( sample_list[trigger_index] ) );
return ( sample_list, trigger_index );
# Example RLE List
# 000007ff 0000000d
# 00000800 0000000d 2nd to last of pre-trig
# 1d4c4ad3 0000000b Last item of pre-trig
# 1d4c4ad4 0000000b 1st item of post-trig
def process_rle( self, rle_list ):
ln = len( rle_list ) // 2;# Size of pre and post lists
pre_list = list(rle_list[0:ln]);
post_list = list(rle_list[ln:]);
culls = [];
# tuplelist2file( self, "rle_prelist1.txt", pre_list );
# Figure out oldest RLE sample pre-trigger and then rotate list
start_time = 0xFFFFFFFF;
i = 0;
for ( rle_time, rle_data ) in pre_list:
# print("%08x %08x" % ( rle_time, start_time) );
if ( rle_time < start_time ):
start_time = rle_time;
n = i;# Location of oldest RLE sample found thus far
i +=1;
pre_list = rotate_list(self,pre_list, n );
print("RLE pre_list %08x %08x" % ( pre_list[-1] ) );
# tuplelist2file( self, "rle_prelist2.txt", pre_list );
# ini defines hard limits of how many uncompressed samples pre and post trig
rle_pre_trig_len = int(self.vars["sump_rle_pre_trig_len" ],16);
rle_post_trig_len = int(self.vars["sump_rle_post_trig_len" ],16);
# Now scale limits based on the sump_acqusition_len setting 25,50,75,100
acq_len = int(self.vars["sump_acquisition_len"],16);
pre_trig = (acq_len & 0xF0)>>4;# Expect 1-4 for 25%-100% of 1st RAM Half
post_trig = (acq_len & 0x0F)>>0;# Expect 1-4 for 25%-100% of 2nd RAM Half
rle_pre_trig_len = ( rle_pre_trig_len // 4 ) * pre_trig; # Div-4, Mult 1-4
rle_post_trig_len = ( rle_post_trig_len // 4 ) * post_trig;# Div-4, Mult 1-4
# Cull any non-events pre and post trigger. Non-events are when the HW
# generates a simple as a MSB timer bit has rolled over. This feature
# prevents hardware from hanging forever if there are no events.
if ( False ):
pre_list_old = pre_list[:];
(first_time,first_data ) = pre_list[0];
pre_list = [];
valid = False;
prev_time = None;
for ( rle_time, rle_data ) in list(pre_list_old):
if ( rle_data != first_data and valid == False ):
valid = True;
if ( prev_time != None ):
# If space between 1st and 2nd RLE samples is large, cull down to 1000
if ( ( rle_time - prev_time ) < 1000 ):
pre_list += [ (prev_time,prev_data) ];# Keep sample before 1st delta
else:
pre_list += [ ((rle_time-1000),prev_data)];# sample before 1st delta
if ( valid == True ):
pre_list += [ (rle_time,rle_data) ];
else:
prev_time = rle_time;
prev_data = rle_data;
if ( len( pre_list ) == 0 ):
pre_list = [ pre_list_old[-1] ];
# Cull any samples outside the sump_rle_pre_trig_len
(trig_time,trig_data ) = pre_list[-1];
pre_list_old = pre_list[:];
pre_list = [];
for ( rle_time, rle_data ) in list(pre_list_old):
if ( rle_time > ( trig_time - rle_pre_trig_len ) ):
pre_list += [ (rle_time,rle_data) ];
culls+=[("+ %08x %08x %08x %08x" %
(rle_time,trig_time,rle_pre_trig_len,rle_data))];
else:
culls+=[("< %08x %08x %08x %08x" %
(rle_time,trig_time,rle_pre_trig_len,rle_data))];
if ( len( pre_list ) == 0 ):
pre_list = [ pre_list_old[-1] ];
stop_time = 0x00000000;
i = 0;
for ( rle_time, rle_data ) in post_list:
if ( rle_time > stop_time ):
stop_time = rle_time;
n = i;# Location of newest RLE sample found thus far
i +=1;
# Cull any samples outside the sump_rle_post_trig_len
post_list_old = post_list[:];
post_list = [];
for ( rle_time, rle_data ) in list(post_list_old):
if ( rle_time < ( trig_time + rle_post_trig_len ) ):
post_list += [ (rle_time,rle_data) ];
culls+=[("+ %08x %08x %08x %08x" %
(rle_time,trig_time,rle_post_trig_len,rle_data))];
else:
culls+=[("> %08x %08x %08x %08x" %
(rle_time,trig_time,rle_post_trig_len,rle_data))];
if ( len( post_list ) == 0 ):
post_list = [ post_list_old[0] ];
(start_time,start_data ) = pre_list[0];
(stop_time,stop_data ) = post_list[-1];
list2file( self, "sump2_rle_cull_list.txt", culls );
return ( start_time , stop_time, pre_list, post_list );
def rotate_list( self, my_list, n ):
return ( my_list[n:] + my_list[:n] );
########################################################
# Calculate desired ram length pre,post trig to work with
def sump_ram_len_calc( self ):
ram_len = self.sump.cfg_dict['ram_len'];# Physical RAM Size, ie 1K
acq_len = int(self.vars["sump_acquisition_len"],16);
pre_trig = (acq_len & 0xF0)>>4;# Expect 1-4 for 25%-100% of 1st RAM Half
post_trig = (acq_len & 0x0F)>>0;# Expect 1-4 for 25%-100% of 2nd RAM Half
ram_len_half = ram_len // 2;
qtr = ram_len_half // 4;# Example 128 of 1K/2
ram_pre = qtr * pre_trig; # 25,50,75 or 100% num samples pre-trig
ram_post = qtr * post_trig;# 25,50,75 or 100% num samples post-trig
return [ ram_pre, ram_post, ( ram_pre+ram_post ), ram_len ];
########################################################
# Return a list of acquired SUMP capture data using variable length
def sump_dump_var_ram( self, rd_page = 0 ):
# HERE12
( ram_pre, ram_post, ram_len, ram_phys ) = sump_ram_len_calc(self);
trig_ptr = self.sump.rd( self.sump.cmd_rd_trigger_ptr )[0];
ram_ptr = 0xFFFF & (trig_ptr - ram_pre - 1);
self.sump.wr( self.sump.cmd_wr_ram_page, rd_page );
self.sump.wr( self.sump.cmd_wr_ram_ptr , ram_ptr );# Load at specfd pre-trig
data = self.sump.rd( self.sump.cmd_rd_ram_data, num_dwords = ram_len );
return data;
########################################################
# Return a complete list of acquired SUMP capture data
def sump_dump_ram( self, rd_page = 0, rd_ptr = None ):
ram_len = self.sump.cfg_dict['ram_len'];
self.sump.wr( self.sump.cmd_wr_ram_page, rd_page );
if ( rd_ptr != None ):
self.sump.wr( self.sump.cmd_wr_ram_ptr , rd_ptr );#
data = self.sump.rd( self.sump.cmd_rd_ram_data, num_dwords = ram_len );
return data;
########################################################
# Use the wave_list to generate a new signal_list
# HERE
#def wave2signal_list( self ):
# ram_len = self.sump.cfg_dict['ram_len'];
# ram_dwords = self.sump.cfg_dict['ram_dwords'];
# ram_bytes = self.sump.cfg_dict['ram_event_bytes'];
# ram_rle = self.sump.cfg_dict['ram_rle'];
#
# events = ram_bytes * 8;
# # Iterate the number of event bits and init with 0s
# for i in range( 0, events , 1):
# sig_name = "event_%d" % i;
# self.signal_list.append( signal(name=sig_name) );
# self.signal_list[i].format = "bin";
# self.signal_list[i].bits_total = 1;
# self.signal_list[i].bit_top = 0;
# self.signal_list[i].bit_bot = 0;
# for j in range( 0, ram_len, 1):
# self.signal_list[i].values.append( "0" );
#
# # Iterate the number of dwords and init with 0x0s
# for i in range( 0, ram_dwords, 1):
# sig_name = "dword_%d" % i;
# self.signal_list.append( signal(name=sig_name) );
# self.signal_list[events+i].format = "hex";
# self.signal_list[events+i].bits_total = 32;
# self.signal_list[events+i].bit_top = 31;
# self.signal_list[events+i].bit_bot = 0;
# for j in range( 0, ram_len, 1):
# self.signal_list[events+i].values.append( "%08x" % 0 );
#
# return;
########################################################
# Read values of sump vars and use to update signal objects trigger attrib
def sump_vars_to_signal_attribs( self ):
# try:
if ( True ):
trig_type = self.vars["sump_trigger_type" ];# "or_rising";
trig_field = int( self.vars["sump_trigger_field" ],16 );
trig_delay = int( self.vars["sump_trigger_delay" ],16 );
trig_nth = int( self.vars["sump_trigger_nth" ],16 );
rle_event_en = int( self.vars["sump_rle_event_en" ],16 );
data_en = int( self.vars["sump_data_enable" ],16 );
user_ctrl = int( self.vars["sump_user_ctrl" ],16 );
wd_time = int( self.vars["sump_watchdog_time" ],16 );
user_pattern0 = int( self.vars["sump_user_pattern0" ],16 );
user_pattern1 = int( self.vars["sump_user_pattern1" ],16 );
# self.trigger = 0;# 0=OFF +1=Rising,-1=Falling,2=Pattern0,3=Pattern1
# self.data_enable = False;
# Note: If trigger_type is pattern_ris or pattern_fal then
# user_pattern0 is the mask of what bits to pattern match on
# user_pattern1 is the actual pattern bits
# rle_event_en controls the Hidden field
for i in range( 0, 32 , 1):
sig_obj = get_sig_obj_by_name( self, ("event[%d]" % i ) );
if ( sig_obj != None ):
sig_obj.hidden = False;
if ( ( rle_event_en & 1<<i ) != 0x0 ):
sig_obj.hidden = False;
else:
sig_obj.hidden = True;
# Clear everything to start with. Set any data_en bits
for i in range( 0, 32 , 1):
sig_obj = get_sig_obj_by_name( self, ("event[%d]" % i ) );
if ( sig_obj != None ):
sig_obj.trigger = 0; # OFF
if ( ( data_en & 1<<i ) != 0x0 ):
sig_obj.data_enable = True;
else:
sig_obj.data_enable = False;
# Set any Rising or Falling edge trigger selection ( 1 only )
if ( trig_type == "or_rising" or trig_type == "or_falling" or
trig_type == "watchdog" ):
if ( trig_type == "or_rising" ):
trig = +1;
if ( trig_type == "or_falling" ):
trig = -1;
if ( trig_type == "watchdog" ):
trig = -2;
for i in range( 0, 32 , 1):
sig_obj = get_sig_obj_by_name( self, ("event[%d]" % i ) );
if ( ( 1<<i & trig_field ) != 0x0 ):
sig_obj.trigger = trig;
if ( trig_type == "pattern_rising" or trig_type == "pattern_falling" ):
for i in range( 0, 32 , 1):
sig_obj = get_sig_obj_by_name( self, ("event[%d]" % i ) );
if ( ( 1<<i & user_pattern0 ) != 0x0 ):
if ( ( 1<<i & user_pattern1 ) != 0x0 ):
sig_obj.trigger = 3;# Pattern of 1 for this bit
else:
sig_obj.trigger = 2;# Pattern of 0 for this bit
# except:
# print("ERROR: Invalid sump variable assignments");
return;
########################################################
# Read signal attributes and convert to sump variables
def sump_signals_to_vars( self ):
if ( True ):
rle_event_en = int( self.vars["sump_rle_event_en"],16 );
for sig_obj in self.signal_list:
for i in range( 0, 32 , 1):
if ( sig_obj.name == ( "event[%d]" % i ) ):
if ( sig_obj.hidden == False and sig_obj.visible == True ):
rle_event_en = rle_event_en | ( 1<<i );# Set bit
if ( sig_obj.hidden == True or sig_obj.visible == False ):
rle_event_en = rle_event_en & ~( 1<<i );# Clear bit
self.vars["sump_rle_event_en" ] = ("%08x" % rle_event_en );
return;
########################################################
# Use sump2 hardware info to generate a signal_list
def sump2signal_list( self ):
ram_len = self.sump.cfg_dict['ram_len'];
ram_dwords = self.sump.cfg_dict['ram_dwords'];
ram_bytes = self.sump.cfg_dict['ram_event_bytes'];
ram_rle = self.sump.cfg_dict['ram_rle'];
events = ram_bytes * 8;
# Iterate the number of event bits and init with 0s
for i in range( 0, events , 1):
# sig_name = "event_%d" % i;
sig_name = "event[%d]" % i;
self.signal_list.append( signal(name=sig_name) );
self.signal_list[i].format = "bin";
self.signal_list[i].bits_total = 1;
self.signal_list[i].bit_top = 0;
self.signal_list[i].bit_bot = 0;
for j in range( 0, ram_len, 1):
self.signal_list[i].values.append( "0" );
# Iterate the number of dwords and init with 0x0s
for i in range( 0, ram_dwords, 1):
sig_name = "dword_%d" % i;
self.signal_list.append( signal(name=sig_name) );
self.signal_list[events+i].format = "hex";
self.signal_list[events+i].bits_total = 32;
self.signal_list[events+i].bit_top = 31;
self.signal_list[events+i].bit_bot = 0;
for j in range( 0, ram_len, 1):
self.signal_list[events+i].values.append( "%08x" % 0 );
return;
########################################################
# Given a TXT file, make signal_list from it
# Format is:
# # foo bar addr
# 0 1 2
# 1 0 a
def txtfile2signal_list( self, file_name ):
# Read in the flat text VCD translation and make lists of net names
file_in = open ( file_name , 'r' );
file_list = file_in.readlines();
file_in.close();
net_names = file_list[0];
sig_values = file_list[1:];
self.sig_name_list = " ".join(net_names.split()).split(' ');
self.sig_name_list = self.sig_name_list[1:]; # Remove Leading #
self.sig_value_list = file_list[1:];
for each in self.sig_name_list[:]:
self.signal_list.append( signal(name=each) );
# Rip thru the value list ( of all sigs ) and extract one signal at a time
# 0 000000000 1 000000000 0 000000000 0 000000000 000000000 000000000
for ( i , sig_obj ) in enumerate( self.signal_list ):
self.signal_list[i].format = "bin"; # Assume Binary by default
self.signal_list[i].bits_total = 1;
for each in self.sig_value_list:
words = " ".join(each.split()).split(' ') + [None] * 20;
sig_obj.values.append( words[i] );
# If value other than 0 or 1 is found, declare this as hex
if ( words[i] != "0" and words[i] != "1" and words[i] != None ):
self.signal_list[i].format = "hex";
self.signal_list[i].bits_total = len( words[i] ) * 4;
self.signal_list[i].bit_top = self.signal_list[i].bits_total-1;
self.signal_list[i].bit_bot = 0;
return;
########################################################
# Given a VCD file, make signal_list from it
def vcdfile2signal_list( self, file_name ):
try: # Read the Input File and Separate the Header from Data
file_in = open( file_name , "r" );
file_lines = file_in.readlines();
file_in.close();
except:
print( "ERROR Input File: "+file_name );
print( "Possibly a Python MemoryError due to large file size");
self.signal_list = [];
self.rip_list = [];
self.rip_symbs = [];
self.top_module = "";
hier_list = [];
hier_name = "";
hier_level = 0;# +1 on 1st will be 0;
print( "vcdfile2signal_list() : Parsing VCD Symbol Definitions");
start_time = self.pygame.time.get_ticks();
for ( i , each ) in enumerate ( file_lines ):
words = each.strip().split() + [None] * 4; # Avoid IndexError
if ( words[0] == "$enddefinitions" ):
dump_vars_index = i; # Remember location to start Value Change Parsing
break; # Save time and don't process entire file
#####################################
# Check for Signal Symbol Definitions
# $var wire 1 * tx_data [15] $end
# 0 1 2 3 4 5 6
if ( words[0] == "$var" ):
type = words[1]; # ie "wire"
bits = int( words[2] ); # ie 32
symb = words[3]; # ie ","
name = words[4]; # ie "lb_addr"
rip = words[5]; # ie "[31:0]" or "$end" if single bit
sig_obj = signal( name=name, vcd_symbol=symb );
sig_obj.hier_name = hier_name;
sig_obj.hier_level = hier_level;
sig_obj.bits_total = bits; # ie 32
sig_obj.bit_top = bits-1; # ie 31
sig_obj.bit_bot = 0; # ie 0
if ( rip != "$end" ):
sig_obj.rip = rip;# [15:0] or [1] or ""
if ( bits > 1 or sig_obj.rip != "" ):
sig_obj.format = "hex";
else:
sig_obj.format = "bin";
# If a portion of a ripped bus and not [0], add to special rip_list
# otherwise, add to the regular signal_list
if (
( sig_obj.rip != "" ) and \
( ":" not in sig_obj.rip ) and \
( sig_obj.rip != "[0]" ) \
):
self.rip_list.append( sig_obj );
self.rip_symbs.append( symb );
else:
self.signal_list.append( sig_obj );
# Now also add "[0]" to rip_list ( It will appear in BOTH lists )
if ( sig_obj.rip == "[0]" ):
self.rip_list.append( sig_obj );
self.rip_symbs.append( symb );
#####################################
# Check for new hierarchy declaration
if ( words[0] == "$scope" and \
( words[1] == "module" or \
words[1] == "begin" ) \
):
if ( self.top_module == "" ):
self.top_module = words[2]; # ie "tb_dut"
print( "top_module is ", self.top_module);
name = words[2]; # ie "u_dut"
sig_obj = signal( name=name );
sig_obj.hier_name = hier_name;
sig_obj.hier_level = hier_level;
sig_obj.bits_total = 0;
sig_obj.bit_top = 0;
sig_obj.bit_bot = 0;
sig_obj.format = "";
self.signal_list.append( sig_obj );
sig_obj.collapsable = True;
sig_obj.expandable = False;
hier_list.append( words[2] );
rts = ""
for each in hier_list:
rts = rts + "/" + each;
hier_name = rts;
#####################################
# Adjust hier level on $scope or $upscope
if ( words[0] == "$scope" ):
hier_level += 1;
if ( words[0] == "$upscope" ):
hier_level -= 1;
if ( words[0] == "$scope" and words[1] == "begin" ):
hier_list.append( "process" );
if ( words[0] == "$upscope" ):
hier_list.pop(); # Remove last item from list
# Create a hash lookup of symbol to object index and bits to speed things up
hash_dict_index = {};
hash_dict_bits = {};
for ( i, sig_obj ) in enumerate( self.signal_list ):
# Need to make a list for symb lookup as clocks can reuse same symbol
if ( hash_dict_index.get( sig_obj.vcd_symbol, None ) == None ):
hash_dict_index[ sig_obj.vcd_symbol ] = [i];
else:
hash_dict_index[ sig_obj.vcd_symbol ].append( i );
hash_dict_bits[ sig_obj.vcd_symbol ] = sig_obj.bits_total;
# Go thru the rip_list and determine the number of bits for the busses
# This finds the parent in self.signal_list that matches the current
# rip from self.rip_list and adjusts the parents bits_total and bit_top
# if the rip's exceed the parent's old value. The parent will start with
# (1,1) since it is based on rip [0]
# Also create a hash to lookup the parent index for each rip symbol
hash_rip_list = {};
hash_rip_parent = {};
for (j,my_each) in enumerate( self.rip_list ):
name = my_each.name;
hier_name = my_each.hier_name;
rip = my_each.rip;
hash_rip_list[ my_each.vcd_symbol ] = j; # For Fast Lookup later
# Calculate the weight of each bit, ie [7] is 128
for foo_each in [ "[", "]" ]:
rip = rip.replace( foo_each , " "+foo_each+" " );
words = rip.strip().split() + [None] * 10; # Avoid IndexError
rip = int( words[1], 10 );
my_each.bit_weight = 2**rip;# Conv 7->128
if ( name != None and hier_name != None ):
for ( i, my2_each ) in enumerate( self.signal_list ):
if ( name == my2_each.name and \
hier_name == my2_each.hier_name ):
hash_rip_parent[ my_each.vcd_symbol ] = i; # For Fast Lookup later
if ( rip > my2_each.bit_top ):
my2_each.bits_total = rip+1;
my2_each.bit_top = rip;
symb_parse_list = ["!","#","$","&","'","K" ];
# Now Parse actual VCD section and try and figure out sample clock period
# by finding the smallest time delta across the entire VCD file
sample_period = 99999999;
prev_time = 0;
for ( i , each ) in enumerate ( file_lines[dump_vars_index:] ):
words = each.strip().split() + [None] * 4; # Avoid IndexError
if ( words[0][0:1] == "#" ):
now_time = int( words[0][1:],10 );
delta_time = now_time - prev_time;
if ( delta_time < sample_period and delta_time != 0):
sample_period = delta_time;
print( sample_period );
prev_time = now_time;
# Now Parse the actual VCD section and calculate current values for each
# signal at every time stamp section.
print( "vcdfile2signal_list() : Parsing VCD Value Change Dumps");
start_time = self.pygame.time.get_ticks();
percent = 0;
percent_total = ((1.0)*len( file_lines[dump_vars_index:] ) );
sample_cnt = 0;
for ( i , each ) in enumerate ( file_lines[dump_vars_index:] ):
# This takes a while, so calculate and print percentage as it goes by
if ( ((i*1.0) / percent_total) > percent ):
perc_str = ( str( int(100*percent) ) + "%");
draw_header( self, perc_str );
print( perc_str );
percent += .05;
# Handle binary cases for "1>" and convert to "1 >"
# If the 1st char is 0 or 1 insert a space to make look like vector
if ( each[0:1] == "0" or
each[0:1] == "1" or
each[0:1] == "x" or
each[0:1] == "z" ):
each = each[0:1] + " " + each[1:];
words = each.strip().split() + [None] * 4; # Avoid IndexError
symb = words[1];
# Skip the initial dumpvars section as nothing to dump yet
if ( words[0] == "#0" ):
None;
time_stamp = 0;
time_now = 0;
# When we reach a timestamp, append all last_value to values list
elif ( words[0][0:1] == "#" ):
time_stamp = int( words[0][1:], 10 );
while ( time_now <= time_stamp ):
for sig_obj in self.signal_list:
sig_obj.values.append( sig_obj.last_value );
sample_cnt += 1;# Count Total Samples for final report at end
time_now += sample_period;
# Read the symbols new value and assign to last_value
else:
if ( words[0][0:1]=="0" or
words[0][0:1]=="1" or
words[0][0:1]=="x" or
words[0][0:1]=="z" ):
value = words[0];
elif ( words[0][0:1] == "b" ):
try:
value = int( words[0][1:],2 );# Convert Binary String to Integer
if ( symb != None ):
num_bits = hash_dict_bits[ symb ];
num_nibs = int(num_bits/4.00 + 0.75 );# ie 29 bits gets 8 nibbles
else:
num_nibs = 1;
except:
value = 0;
num_nibs = 1;
value = "%08x" % value;# Now Convert Integer to Hex
value = value[::-1]; # Reverse
value = value[0:num_nibs]; # Keep desired number of LSBs
value = value[::-1]; # Reverse Back
elif ( words[0][0:1] == "$" ):
value = None;
else:
line_num = i + dump_vars_index + 1;
print( "ERROR line " + str(line_num) + " : " + words[0]);
value = None;
# Is symb in rip_list? If not, do normal processing
if ( symb not in self.rip_symbs ):
if ( value != None and symb != None ):
# Note: a symb might be used multiple times for clock ports, etc.
try:
for i in hash_dict_index[ symb ]:
self.signal_list[i].last_value = value;
except:
# print "VCD Symbol Error " + symb;
None;
# Oh SNAP - This is in the rip_list. Find obj for [0] ( Parent )
# and if 0, AND out bit_weight, if 1 OR in bit_weight.
# This op takes time since values are stored in ASCII, must convert to
# int, perform the bit operation and then convert back to ASCII.
else:
my_each = self.rip_list[ hash_rip_list[ symb ] ];
my2_each = self.signal_list[ hash_rip_parent[ symb ] ];
try:
last_value = int( my2_each.last_value, 16 );
except:
last_value = 0;
if ( value == "0" ):
last_value = last_value & ~ my_each.bit_weight;
elif ( value == "1" ):
last_value = last_value | my_each.bit_weight;
nibs = my2_each.bits_total//4;# ie 32 = 8, num nibs to display
new_value = "%016x" % last_value;# 16 Nibbles, remove leading next
my2_each.last_value = new_value[16-nibs:];# Remove leading 0s
stop_time = self.pygame.time.get_ticks();
tt = str( (stop_time - start_time) / 1000 ) + "s";
rate = str( sample_cnt / ((stop_time - start_time) * 1000 )) + " MSPS";
print( "vcdfile2signal_list() : Complete : Time " + tt +" : Rate " + rate);
draw_header( self, "" );
return;
def shutdown( self ):
log( self, ["shutdown()"]);
var_dump( self, "sump2.ini" ); # Dump all variable to INI file
proc_cmd( self, "save_format", [""] ); # Autosave the last format
if ( self.mode_cli == False ):
self.pygame.quit();# Be IDLE friendly
print("");
print("Thank you for using SUMP2 " + self.vers + " by BlackMesaLabs");
print("Please encourage the development and use of open-source software");
sys.exit();
return;
#def init_vars( self ):
# self.var_hash= {};
# self.var_hash["bd_connection" ] = "tcp";
# self.var_hash["bd_protocol" ] = "poke";
# self.var_hash["tcp_port" ] = "21567";
# self.var_hash["tcp_ip_addr" ] = "127.0.0.1";# No Place Like Home
# self.var_hash["sump_addr" ] = "00000000" ;# Addr of sump2_ctrl_reg
# self.var_hash["sump_trigger_type" ] = "or_rising";
# self.var_hash["sump_trigger_field" ] = "00000000";
# self.var_hash["sump_trigger_delay" ] = "0000";
# self.var_hash["sump_trigger_nth" ] = "0000";
# self.var_hash["sump_user_ctrl" ] = "00000000";
# self.var_hash["sump_user_pattern0" ] = "00000000";
# self.var_hash["sump_user_pattern1" ] = "00000000";
# self.var_hash["sump_data_enable" ] = "00000000";
# return;
def init_globals( self ):
# Define the colors we will use in RGB format
import platform,os;
self.os_sys = platform.system(); # Windows vs Linux
self.fatal_msg = None;
self.undersample_data = False;
self.undersample_rate = 1;
self.gui_active = False;
self.color_bg = (0,0,0);
self.color_fg = (0,0,0);
self.prompt = "bd>";
self.done = False; # This breaks the application loop when true
# self.clock = self.pygame.time.Clock();
# self.lcd = self.pygame.display.Info(); # Dimensions of physical LCD screen
self.txt_height = 0;
self.txt_width = 0;
self.spin_char = "";
self.debug = False;
self.last_filesave = None;# Name of last file saved, used for Save_Rename
self.vcd_import = False;
self.acq_state = "acquire_stop";
self.acq_mode = "nonrle";
# if ( self.mode_cli == False ):
# self.font = get_font( self,self.vars["font_name"],self.vars["font_size"]);
self.sample_start = 0;
self.sample_stop = 0;
self.sample_room = 0;
self.prev_sample_start = None;
self.prev_sample_stop = None;
self.max_samples = 0;
self.zoom_x = self.txt_width; # Default zoom ratio is 1 text char width
self.stop_zoom = False;
self.sig_obj_sel = None;
self.key_buffer = "";
self.last_search_value = "";
self.vertical_scrolled_offscreen = False;
self.last_cmd = "";
# self.cmd_history = ["","",""];
self.skipped_refresh_cnt = 0;
self.old_list = [];
self.slider_width = 0;
self.cmd_history = [];
self.dwords_start = 0;
self.dwords_stop = 0;
self.sig_name_start_x = 0;
self.sig_name_start_y = 0;
self.sig_name_stop_x = 0;
self.sig_name_stop_y = 0;
self.sig_value_start_x = 0;
self.sig_value_start_y = 0;
self.sig_value_stop_x = 0;
self.sig_value_stop_y = 0;
self.cursor_start_y = 0;
self.cursor_stop_y = 0;
self.top_module = "";# ie "tb_foo"
self.sig_top = 0;
self.sig_bot = 0;
# self.scroll_togl = 1;# +1=Pan, -1=Zoom
self.surface_start = -1;
self.surface_stop = -1;
self.name_surface_valid = False;
self.curval_surface_valid = False;
self.cursor_list = [];
self.cursor_list.append( cursor(name="Cursor1"));
self.cursor_list.append( cursor(name="Cursor2"));
self.cursor_list[0].y = 0;
self.cursor_list[1].y = 0;
self.cursor_list[0].sample = 10;
self.cursor_list[1].sample = 15;
self.mouse_x = 0;
self.mouse_y = 0;
self.mouse_button = 0;
self.mouse_region = "";
self.mouse_name_sel_y = -1;
self.scroll_num_samples = 1;
self.mouse_btn1dn_x = -1;
self.mouse_btn1dn_y = -1;
self.mouse_btn1up_x = -1;
self.mouse_btn1up_y = -1;
self.mouse_btn3dn_x = -1;
self.mouse_btn3dn_y = -1;
self.mouse_btn3up_x = -1;
self.mouse_btn3up_y = -1;
self.mouse_btn1up_time_last = 0;
self.mouse_btn1up_time = 0;
self.mouse_btn1dn_time = 0;
self.resize_on_mouse_motion = False;
self.max_w = 0;
self.max_w_chars = 0;
# self.subpop = False;
self.popup_x = None;
self.popup_y = -1;
self.popup_w = 0;
self.popup_y2 = -1;
self.popup_sel = "";
self.popup_sample = 0;
self.popup_parent_x = None;
self.popup_parent_y = None;
self.popup_parent_list = None;
self.txt_entry = False;
self.txt_entry_caption = "Rename_Signal";
# Create a list of files to source in menu given include and exclude filters
file_inc_filter = self.vars["sump_script_inc_filter"];
file_exc_filter = self.vars["sump_script_exc_filter"];
file_load_list = ["File_Load"];
import glob;
glob_list = set(glob.glob(file_inc_filter))-set(glob.glob(file_exc_filter));
for each in glob_list:
file_load_list += ["source "+each ];
# Right-Click menu over signal names
self.popup_list_names = [
# "--------","Group","Group+","Expand","Collapse","Insert_Divider",
# "--------","Delete","Make_Invisible","Make_All_Visible",
# "--------","Delete","Rename","Restore_All",
# ["Clipboard","Cut","Paste","Delete","Rename"],
"--------", "Rename",
"Insert_Divider",
["Clipboard","Cut","Paste"],
["Visibility","Delete","Hide","Hide_All","Show","Show_All"],
# ["Grouping","Group_with_Divider","Group_with_Parent", \
# "UnGroup","Insert_Divider"],
[ "Radix", "Hex","Signed","Unsigned" ],
# [ "Waveform_Format", "Edit_Format","Save_Format","Load_Format",\
# "Delete_Format", "Save_Selected" ], \
# "--------",[ "Font_Size", "Font_Larger","Font_Smaller"],\
"--------","Trigger_Rising","Trigger_Falling","Trigger_Watchdog",\
"--------","Set_Pattern_0","Set_Pattern_1","Clear_Pattern_Match",\
"--------","Set_Data_Enable","Clear_Data_Enable",\
"--------",["SUMP_Configuration","sump_trigger_delay",\
"sump_trigger_nth",\
"sump_user_ctrl",\
"sump_user_pattern0",\
"sump_user_pattern1",\
"sump_watchdog_time"],\
"--------",["Acquisition_Length",
"[----T----]",
" [--T--] ",
" [-T-] ",
"[----T-] ",
" [-T----]",
],
];
# "--------",["File_Load","File1","File2"],
# "--------",file_load_list,
# "BD_SHELL","Manual","Quit"];
# Right-Click menu over waveform area
self.popup_list_values = [
# "--------","Debug_Vars",
# "--------","Reload",
# "Scroll_Toggle",
"--------","Zoom_In", "Zoom_Out", "Zoom_Full","Zoom_Previous",
"Zoom_to_Cursors",
"--------",["Cursors",
"Cursors_to_View","Cursor1_to_Here","Cursor2_to_Here",
"Crop_to_Cursors"],\
["Acquire",
"Acquire_Normal","Acquire_RLE","Acquire_Stop",],
# "Acquire_Single","Acquire_Continuous",
# "Acquire_RLE_1x","Acquire_RLE_8x","Acquire_RLE_64x",
# "Acquire_Stop",],
# "--------","Crop_to_Cursors",
# "--------","Cursors_to_View","Cursor1_to_Here","Cursor2_to_Here",\
# "--------","Acquire_Single","Acquire_Continuous","Acquire_Stop",
# "--------","Acquire_RLE_1x","Acquire_RLE_8x","Acquire_RLE_64x",
# "--------","Acquire_Single","Acquire_Continuous","Acquire_Stop",
# "--------","Acquire_RLE_1x","Acquire_RLE_8x","Acquire_RLE_64x",
# "--------","Acquire_RLE_1x","Acquire_RLE_4x","Acquire_RLE_16x",
# "Acquire_RLE_64x",
# "--------",
# ["File_Load","File1","File2"],
file_load_list,
["File_Save","Save_PNG","Save_JPG","Save_BMP",
# "Save_TXT","Save_VCD","Save_RLE_VCD","Save_Rename"],
"Save_TXT","Save_VCD","Save_Rename"],
# ["Fonts","Font_Larger","Font_Smaller"],
["Misc","Font_Larger","Font_Smaller",
"BD_SHELL","Manual"],"Quit"];
self.popup_list = self.popup_list_values;
self.cmd_alias_hash_dict = {};
self.cmd_alias_hash_dict["zi"] = "zoom_in";
self.cmd_alias_hash_dict["zo"] = "zoom_out";
self.cmd_alias_hash_dict["zt"] = "zoom_to";
self.cmd_alias_hash_dict["q" ] = "quit";
self.cmd_alias_hash_dict["find"] = "search";
self.cmd_alias_hash_dict["/"] = "search";
self.cmd_alias_hash_dict["?"] = "backsearch";
return;
###############################################################################
class cursor(object):
def __init__( self, name="Cursor1", visible=True, \
bits_per_line=32, bits_total=32,format="hex"):
self.name = name;
self.visible = visible;
self.selected = False;
self.x = 0;
self.y = 0;
self.sample = 0;
def __del__(self):
# print "You are killing me man";
return;
def __str__(self):
return "name = " + self.name + "" +\
"";
###############################################################################
# A signal contains time samples and various display attributes.
class signal(object):
def __init__( self, name="cnt_a", type="signal",vcd_symbol="",visible=True, \
bits_per_line=32, bits_total=32,format="hex"):
self.name = name;
self.type = type;# "signal","divider","group","endgroup"
self.nickname = "";
self.hier_name = "";
self.hier_level = 0;
self.vcd_symbol = vcd_symbol;
self.values = [];
self.trigger = 0;# 0=OFF +1=Rising,-1=Falling,2=Pattern0,3=Pattern1
self.data_enable = False;
self.selected = False;
self.last_value = "";
self.visible = visible;
self.hidden = False;
self.deleted = False;
self.expandable = False;
self.collapsable = False;
self.is_expansion = False;
self.grouped = False;
self.x = 0;
self.y = 0;
self.h = 0; # Height
self.w = 0; # Width
self.bits_per_line = 32;
self.bits_total = 32;
self.bit_top = 31;
self.bit_bot = 0;
self.bit_weight = 0; # Only used by rip_list, ie [7]->128
self.rip = ""; # [15:0], [1], ""
self.format = "hex";
def __del__(self):
# print "You are killing me man";
return;
def __str__(self):
return "name = " + self.name + "" +\
"";
##############################################################################
class Sump2:
def __init__ ( self, backdoor, addr ):
self.bd = backdoor;
self.addr_ctrl = addr;
self.addr_data = addr + 0x4;
self.cmd_state_idle = 0x00;
self.cmd_state_arm = 0x01;
self.cmd_state_reset = 0x02;# Always Reset before Arm.
self.cmd_wr_trig_type = 0x04;
self.cmd_wr_trig_field = 0x05;# Correspond to Event Bits
self.cmd_wr_trig_dly_nth = 0x06;# Trigger Delay and Nth
self.cmd_wr_trig_position = 0x07;# Samples post Trigger to Capture
self.cmd_wr_rle_event_en = 0x08;# Enables events for RLE detection
self.cmd_wr_ram_ptr = 0x09;# Load specific pointer.
self.cmd_wr_ram_page = 0x0a;# Load DWORD Page.
self.cmd_rd_hw_id_rev = 0x0b;
self.cmd_rd_ram_width_len = 0x0c;
self.cmd_rd_sample_freq = 0x0d;
self.cmd_rd_trigger_ptr = 0x0e;
self.cmd_rd_ram_data = 0x0f;
self.cmd_wr_user_ctrl = 0x10;
self.cmd_wr_user_pattern0 = 0x11;# Also Mask for Pattern Matching
self.cmd_wr_user_pattern1 = 0x12;# Also Pattern for Pattern Matching
self.cmd_wr_user_data_en = 0x13;# Special Data Enable Capture Mode
self.cmd_wr_watchdog_time = 0x14;# Watchdog Timeout
self.trig_and_ris = 0x00;# Bits AND Rising
self.trig_and_fal = 0x01;# Bits AND Falling
self.trig_or_ris = 0x02;# Bits OR Rising
self.trig_or_fal = 0x03;# Bits OR Falling
self.trig_pat_ris = 0x04;# Pattern Match Rising
self.trig_pat_fal = 0x05;# Pattern Match Falling
self.trig_in_ris = 0x06;# External Input Trigger Rising
self.trig_in_fal = 0x07;# External Input Trigger Falling
self.trig_watchdog = 0x08;# Watchdog trigger
self.cfg_dict = {};
self.status_armed = 0x01;# Engine is Armed, ready for trigger
self.status_triggered = 0x02;# Engine has been triggered
self.status_ram_post = 0x04;# Engine has filled post-trig RAM
self.status_ram_pre = 0x08;# Engine has filled pre-trigger RAM
self.status_rle_pre = 0x10;# RLE Engine has filled pre-trig RAM
self.status_rle_post = 0x20;# RLE Engine has filled post-trig RAM
self.status_rle_en = 0x80;# RLE Engine is present
def wr ( self, cmd, data ):
self.bd.wr( self.addr_ctrl, [ cmd ] );
self.bd.wr( self.addr_data, [ data ] );
def rd( self, addr, num_dwords = 1):
# Note: addr of None means use existing ctrl address and just read data
if ( addr != None ):
self.bd.wr( self.addr_ctrl, [ addr ] );
return self.bd.rd( self.addr_data, num_dwords, repeat = True);
def rd_cfg( self ):
hwid_data = self.rd( self.cmd_rd_hw_id_rev )[0];
ram_data = self.rd( self.cmd_rd_ram_width_len )[0];
freq_data = self.rd( self.cmd_rd_sample_freq )[0];
print("%08x" % hwid_data );
print("%08x" % freq_data );
print("%08x" % ram_data );
self.cfg_dict['hw_id'] = ( hwid_data & 0xFFFF0000 ) >> 16;
self.cfg_dict['hw_rev'] = ( hwid_data & 0x0000FF00 ) >> 8;
self.cfg_dict['data_en'] = ( hwid_data & 0x00000040 ) >> 6;
self.cfg_dict['trig_wd_en'] = ( hwid_data & 0x00000020 ) >> 5;
# self.cfg_dict['data_en'] = 1;# This bit doesn't exist yet in HW
# self.cfg_dict['trig_wd_en'] = 1;# This bit doesn't exist yet in HW
self.cfg_dict['nonrle_dis'] = ( hwid_data & 0x00000010 ) >> 4;
self.cfg_dict['rle_en'] = ( hwid_data & 0x00000008 ) >> 3;
self.cfg_dict['pattern_en'] = ( hwid_data & 0x00000004 ) >> 2;
self.cfg_dict['trig_nth_en'] = ( hwid_data & 0x00000002 ) >> 1;
self.cfg_dict['trig_dly_en'] = ( hwid_data & 0x00000001 ) >> 0;
self.cfg_dict['frequency'] = float(freq_data) / 65536.0;
self.cfg_dict['ram_len'] = ( ram_data & 0x0000FFFF ) >> 0;
self.cfg_dict['ram_dwords'] = ( ram_data & 0x00FF0000 ) >> 14;# >>16,<<2
self.cfg_dict['ram_event_bytes'] = ( ram_data & 0x0F000000 ) >> 24;
self.cfg_dict['ram_rle'] = ( ram_data & 0xF0000000 ) >> 28;
def close ( self ):
return;
##############################################################################
# functions to convert text time samples into a VCD file. See cpy_txt2vcd.py
class TXT2VCD:
def __init__ ( self ):
self.char_code = self.build_char_code(); # ['AA','BA',etc]
self.header = self.build_header();
self.footer = self.build_footer();
return;
def close ( self ):
return;
def conv_txt2vcd ( self, main_self, txt_list ):
# def conv_txt2vcd ( self, txt_list ):
"""
Take in a txt list and spit out a vcd
"""
header_line = txt_list[0]; # 1st line "#foo bar 10000"
data_lines = txt_list[1:]; # Data lines "1 1a"
bus_widths = self.get_bus_widths( data_lines[:] ); # How many bits in each
rts = self.header;
rts += self.build_name_map( header_line,bus_widths[:],self.char_code[:] );
rts += self.footer;
timescale = float( header_line.split()[-1] ); time = 0;
next_perc = 0;# Display an update every 5%
total_count = len( data_lines );
prev_data_line = None;
# HERETODAY
for ( i, data_line ) in enumerate( data_lines ):
if ( data_line != prev_data_line ):
rts += [ "#" + str(time) ];
bit_list = self.get_bit_value( data_line, header_line, bus_widths[:] );
rts += self.dump_bit_value( bit_list, self.char_code[:] );
prev_data_line = data_line;
# prev_data_line = data_line;
time += int( timescale );
# TODO: Would be nice to have this call draw_header() instead.
perc = ( 100 * i ) // total_count;
if ( perc >= next_perc ):
draw_header( main_self,"conv_txt2vcd() "+str( perc )+"%" );
print( "conv_txt2vcd() "+str( perc )+"%" );
next_perc += 5;# Next 5%, this counts 0,5,10,...95
return rts;
def get_bit_value( self,data_line,header_line,bus_widths_list_cp ):
"""
Figure out each bit value (0,1) for the provided line. Return a list of 0,1s
"""
rts = [];
data_list = data_line.split();
for bus_name in header_line.split()[0:-1]:# Remove the timescale at very end
bus_width = bus_widths_list_cp.pop(0); # 1 or 16, etc
data = data_list.pop(0); # "1" or "10ab", etc
bit_val = 2**(bus_width-1); # 8->128, 4->8, 1->1
for i in range( bus_width ): # Counts 0..7 for 8bit bus
try:
if ( ( int(data,16) & bit_val ) == 0 ): rts += ["0"];
else: rts += ["1"];
except:
rts += ["x"];
bit_val //= 2; # Counts 128,64,..2,1 for 8bit bus
return rts;
def dump_bit_value( self, bit_list, char_code_list_cp ):
"""
Convert [0,1,etc] to [0AA,1BA,etc]
"""
rts = [];
for bit in bit_list:
rts += [ bit +char_code_list_cp.pop(0) ];
# rts += [ str(bit)+char_code_list_cp.pop(0) ];
return rts;
def build_name_map( self,header_line,bus_widths_list_cp,char_code_list_cp ):
"""
$var wire 1 AA foo [7] $end
"""
rts = [];
for bus_name in header_line.split()[0:-1]:# This removes timescale at end
bus_width = bus_widths_list_cp.pop(0);
if ( bus_width == 1 ):
rts += [ "$var wire 1 " + char_code_list_cp.pop(0) + " " + \
bus_name + " $end" ];
else:
for i in range( bus_width ): # Counts 0..7 for 8bit bus
rts += [ "$var wire 1 " + char_code_list_cp.pop(0) + " " + \
bus_name + " [" + str(bus_width-1-i)+"] $end" ];
return rts;
def get_bus_widths( self, data_list_cp ):
"""
Rip the vectors, if any vector never exceeds 1 then its a wire. Tag it
otherwise, bus width is number of nibbles x4
"""
bus_width = [None]*100;
for data_line in data_list_cp:
data_words = data_line.split(); i = 0;
for data_word in data_words:
bit_width = 4 * len( data_word ); # How many bits 4,8,12,etc
if ( bus_width[i] == None ): bus_width[i] = 1;# Default to single wire
if ( data_word == "XXXXXXXX" ):
bus_width[i] == 32;
else:
try:
if ( int( data_word, 16) > 1 ): bus_width[i] = bit_width;
except:
print("ERROR: Invalid non Hexadecimal Data " + str(data_word));
i+=1;
return bus_width;
def build_char_code( self ):
"""
VCDs map wires to alphabet names such as AA,BA. Build a 676 (26x26) list
"""
char_code = []; # This will be ['AA','BA',..,'ZZ']
for ch1 in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
for ch2 in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
char_code += [ ch2+ch1 ];
return char_code;
def build_header( self ):
rts = [];
rts += [ "$date Wed May 4 10:12:46 2005 $end" ];
rts += [ "$version ModelSim Version 6.0c $end" ];
rts += [ "$timescale 1ps $end" ];
rts += [ "$scope module module_name $end" ];
return rts;
def build_footer( self ):
rts = [];
rts += [ "$upscope $end"];
rts += [ "$enddefinitions $end"];
rts += [ "#0" ];
rts += [ "$dumpvars"];
rts += [ "$end"];
return rts;
##############################################################################
# functions to send Backdoor commands to BD_SERVER.PY over TCP Sockets
class Backdoor:
def __init__ ( self, ip, port ):
try:
import socket;
except:
raise RuntimeError("ERROR: socket is required");
try:
self.sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM);
self.sock.connect( ( ip, port ) );# "localhost", 21567
# self.sock.settimeout(1); # Dont wait forever
self.sock.settimeout(5); # Dont wait forever
except:
# raise RuntimeError("ERROR: Unable to open Socket!! ")
self.sock = None;
return;
def close ( self ):
self.sock.close();
def bs(self, addr, bitfield ):
rts = self.rd( addr, 1 );
data_new = rts[0] | bitfield[0]; # OR in some bits
self.wr( addr, [data_new] );
def bc(self, addr, bitfield ):
rts = self.rd( addr, 1 );
data_new = rts[0] & ~ bitfield[0];# CLR some bits
self.wr( addr, [data_new] );
def wr(self, addr, data, repeat = False ):
# print("HERE");
# print("%08x" % addr );
# print( data );
if ( repeat == False ):
cmd = "w";# Normal Write : Single or Burst with incrementing address
else:
cmd = "W";# Write Multiple DWORDs to same address
payload = "".join( [cmd + " %x" % addr] +
[" %x" % int(d) for d in data] +
["\n"] );
self.tx_tcp_packet( payload );
self.rx_tcp_packet();
def rd( self, addr, num_dwords=1, repeat = False ):
if ( repeat == False ):
cmd = "r";# Normal Read : Single or Burst with incrementing address
else:
cmd = "k";# Read Multiple DWORDs from single address
payload = cmd + " %x %x\n" % (addr, (num_dwords-1)); # 0=1DWORD,1=2DWORDs
self.tx_tcp_packet( payload );
payload = self.rx_tcp_packet().rstrip();
dwords = payload.split(' ');
rts = [];
# print( dwords );
for dword in dwords:
rts += [int( dword, 16 )];
return rts;
def tx_tcp_packet( self, payload ):
# A Packet is a 8char hexadecimal header followed by the payload.
# The header is the number of bytes in the payload.
header = "%08x" % len(payload);
bin_data = (header+payload).encode("utf-8");# String to ByteArray
self.sock.send( bin_data );
def rx_tcp_packet( self ):
# Receive 1+ Packets of response. 1st Packet will start with header that
# indicates how big the entire Backdoor payload is. Sit in a loop
# receiving 1+ TCP packets until the entire payload is received.
bin_data = self.sock.recv(1024);
rts = bin_data.decode("utf-8");# ByteArray to String
header = rts[0:8]; # Remove the header, Example "00000004"
payload_len = int(header,16);# The Payload Length in Bytes, Example 0x4
payload = rts[8:]; # Start of Payload is everything after header
# 1st packet may not be entire payload so loop until we have it all
while ( len(payload) < payload_len ):
bin_data = self.sock.recv(1024);
payload += bin_data.decode("utf-8");# ByteArray to String
return payload;
###############################################################################
main = main();
|
[
"adrien.descamps@gmail.com"
] |
adrien.descamps@gmail.com
|
6028e1a80acb4dba764ef24342f833eb677eea1b
|
95a60a8fd8a21fcc3bcdcecfd4b6a3a3a3ff35b6
|
/backend/api.py
|
e175256a127f6184aa05edf3d108889e4af44c2b
|
[] |
no_license
|
AkshithBellare/maljpeg-web-app
|
f59cae2eff7f6446876b4a96b3143c2c38078927
|
ca83f1ef5bf95e73143aaa0e29b5c8f6f010936d
|
refs/heads/master
| 2023-03-29T12:37:29.864849
| 2021-04-05T13:16:43
| 2021-04-05T13:16:43
| 354,757,948
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,824
|
py
|
import os
import pickle
from flask import Flask
from flask_restful import Resource, Api, reqparse
from flask_cors import CORS
from flask import request, Response
import numpy as np
import json
import PIL.Image as Image
import io
import base64
from struct import unpack
import pandas as pd
import sys
import glob
marker_mapping = {
0xffc0: "SOF0",
0xffc1: "SOF1",
0xffc2: "SOF2",
0xffc3: "SOF3",
0xffc4: "DHT",
0xffc5: "SOF5",
0xffc6: "SOF6",
0xffc7: "SOF7",
0xffc8: "JPG",
0xffc9: "SOF9",
0xffca: "SOF10",
0xffcb: "SOF11",
0xffcc: "DAC",
0xffcd: "SOF13",
0xffce: "SOF14",
0xffcf: "SOF15",
0xffd0: "RST0",
0xffd1: "RST1",
0xffd2: "RST2",
0xffd3: "RST3",
0xffd4: "RST4",
0xffd5: "RST5",
0xffd6: "RST6",
0xffd7: "RST7",
0xffd8: "SOI",
0xffd9: "EOI",
0xffda: "SOS",
0xffdb: "DQT",
0xffdc: "DNL",
0xffdd: "DRI",
0xffde: "DHP",
0xffdf: "EXP",
0xffe0: "APP0",
0xffe1: "APP1",
0xffe2: "APP2",
0xffe3: "APP3",
0xffe4: "APP4",
0xffe5: "APP5",
0xffe6: "APP6",
0xffe7: "APP7",
0xffe8: "APP8",
0xffe9: "APP9",
0xffea: "APP10",
0xffeb: "APP11",
0xffec: "APP12",
0xffed: "APP13",
0xffee: "APP14",
0xffef: "APP15",
0xfff0: "JPG0",
0xfff1: "JPG1",
0xfff2: "JPG2",
0xfff3: "JPG3",
0xfff4: "JPG4",
0xfff5: "JPG5",
0xfff6: "JPG6",
0xfff7: "JPG7",
0xfff8: "JPG8",
0xfff9: "JPG9",
0xfffa: "JPG10",
0xfffb: "JPG11",
0xfffc: "JPG12",
0xfffd: "JPG13",
0xfffe: "COM",
0xff01: "TEM",
}
class JPEG:
def __init__(self, image_file):
with open(image_file, 'rb') as f:
self.img_data = f.read()
def decode(self):
data = self.img_data
marker_DQT_num = 0
marker_DQT_size_max = 0
marker_DHT_num = 0
marker_DHT_size_max = 0
file_markers_num = 0
marker_EOI_content_after_num = 0
marker_APP12_size_max = 0
marker_APP1_size_max = 0
marker_COM_size_max = 0
file_size = len(data)
print(f"file_size = {file_size}")
while(True):
try:
marker, = unpack(">H", data[0:2])
except:
print("error")
marker_map = marker_mapping.get(marker)
if marker_map != None:
file_markers_num += 1
if marker_map == "DQT":
marker_DQT_num += 1
lenchunk, = unpack(">H", data[2:4])
if lenchunk > marker_DQT_size_max:
marker_DQT_size_max = lenchunk
data = data[2+lenchunk:]
elif marker_map == "SOI":
data = data[2:]
elif marker_map == "DHT":
marker_DHT_num += 1
lenchunk, = unpack(">H", data[2:4])
if lenchunk > marker_DHT_size_max:
marker_DHT_size_max = lenchunk
data = data[2+lenchunk:]
elif marker_map == "EOI":
rem = data[2:]
if len(rem) > marker_EOI_content_after_num:
marker_EOI_content_after_num = len(rem)
data = rem
elif marker_map == "SOS":
data = data[-2:]
elif marker_map == "APP12":
lenchunk, = unpack(">H", data[2:4])
if lenchunk > marker_APP12_size_max:
marker_APP12_size_max = lenchunk
data = data[2+lenchunk:]
elif marker_map == "APP1":
lenchunk, = unpack(">H", data[2:4])
if lenchunk > marker_APP1_size_max:
marker_APP1_size_max = lenchunk
data = data[2+lenchunk:]
elif marker_map == "COM":
lenchunk, = unpack(">H", data[2:4])
if lenchunk > marker_COM_size_max:
marker_COM_size_max = lenchunk
data = data[2+lenchunk:]
elif marker_map == "TEM":
data = data[2:]
elif marker <= 0xffd9 and marker >= 0xffd0:
data = data[2:]
elif marker <= 0xffbf and marker >= 0xff02:
lenchunk, = unpack(">H", data[2:4])
data = data[2+lenchunk:]
else:
lenchunk, = unpack(">H", data[2:4])
data = data[2+lenchunk:]
else:
data = data[1:]
if (len(data) == 0):
data_list = [marker_EOI_content_after_num,marker_DQT_num,marker_DHT_num,file_markers_num, marker_DQT_size_max, marker_DHT_size_max,file_size, marker_COM_size_max,marker_APP1_size_max,marker_APP12_size_max,0]
return data_list
def extract_features():
img = JPEG("./server_files/saveimg.jpeg")
data_list = img.decode()
df = pd.DataFrame(data_list)
df = df.T
df.to_csv("test.csv")
app = Flask(__name__)
CORS(app)
api = Api(app)
parser = reqparse.RequestParser()
parser.add_argument("image")
class Predict(Resource):
def post(self):
args = parser.parse_args()
#request_data = json.loads(request.get_data())
#data = request_data['data']
#decodeit = open('saveimg.jpeg', 'wb')
#decodeit.write(base64.b64decode((data)))
#decodeit.close()
#print(type(data))
decodeit = open('./server_files/saveimg.jpeg', 'wb')
decodeit.write(base64.b64decode(bytes(args["image"], 'utf-8')))
decodeit.close()
extract_features()
return {"class" : "bening"}
api.add_resource(Predict, "/predict")
if __name__ == "__main__":
app.run(debug=True)
|
[
"akshithnm@gmail.com"
] |
akshithnm@gmail.com
|
f49a5e37d0b4279902872dcc74a5ea78ab2137a3
|
6f5c0db7b845cb62c951b2467957ffe3cb0aad35
|
/stats.py
|
2616a77c878bbfb064e02cbecd9ad98b5958f460
|
[] |
no_license
|
baydarich/infohash-searcher
|
300056c8255656b049e8aa4c6cc46df7e7f9500f
|
7fad6763c099934586bb286f160f3e52063420ff
|
refs/heads/master
| 2021-06-14T06:49:35.439588
| 2017-03-05T11:38:17
| 2017-03-05T11:38:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
#!/usr/env/python
from bencode import bdecode, bencode
import os
b = 1024 * 1024
ranges = [{1: (1, 1024 * 1024)}]
t = 1
for j in range(18):
t = 1024 * 1024
ranges.append({j + 2: (t * 2 ** j + 1, t * 2 ** (j + 1))})
bs = {}
# bs = {piece_length:[{ran:count}, {ran:count}]}
base_path = "/home/horn/Documents/SNE/CCF/proj/test-torrents/"
files = os.listdir(base_path)
stat = []
r = 0
for i in files:
length = 0
with open("%s%s" % (base_path, i)) as _file:
info_orig = bdecode(_file.read())['info']
piece_length = info_orig['piece length']
try:
length = info_orig['length']
except KeyError:
for j in info_orig['files']:
length += j['length']
finally:
for j, k in enumerate(ranges):
if k[j + 1][0] <= length <= k[j + 1][1]:
r = j + 1
break
try:
bs[piece_length][r] += 1
except KeyError:
try:
bs[piece_length][r] = 1
except KeyError:
bs[piece_length] = {r: 1}
for k, v in bs.iteritems():
print k, sorted(v, reverse=True)
print bs
|
[
"a@bakhtin.net"
] |
a@bakhtin.net
|
2740845c8dea1c81052693b87ed8201e5e26e8c6
|
7b7ca1ab3f5364756ea67d8c2e39b68a58ab8f06
|
/First_improved ws model.py
|
83daded0d204aadd0b0af3ffd89e43d18fca0168
|
[] |
no_license
|
Isabellahu/Complex-Network
|
6a1f065ec12ab4eb86b390205b8f343eb95204eb
|
511683750636fd198d12963771ca61255b789641
|
refs/heads/master
| 2020-04-01T17:49:55.732821
| 2018-10-17T14:09:22
| 2018-10-17T14:09:22
| 153,453,532
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,343
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 28 15:32:55 2017
@author: 90662
"""
#WS小世界模型构建
import random
from numpy import *
import networkx as nx
import matplotlib.pyplot as plt
#假设参加聚会,每个人只认识一个主角
def CreateNetwork(n,k,p,matrix):
i = 1
for j in range(n):
matrix[1][j] = 1
matrix[j][1] = 1
def SmallWorld(n,k,p,matrix):
#随机产生一个概率p_change,如果p_change < p, 重新连接边
p_change = 0.0
edge_change = 0
for i in range(n):
#t = int(k/2)
for j in range( k // 2 + 1):
#需要重新连接的边
p_change = (random.randint(0,n-1)) / (double)(n)
#重新连接
if p_change < p:
#随机选择一个节点,排除自身连接和重边两种情况
while(1):
node_NewConnect = (random.randint(0,n-1)) + 1
if matrix[i][node_NewConnect] == 0 and node_NewConnect != i:
break
if (i+j) <= (n-1):
matrix[i][i+j] = matrix[i+j][i] = 0
else:
matrix[i][i+j-(n-1)] = matrix[i+j-(n-1)][i] = 0
matrix[i][node_NewConnect] = matrix[node_NewConnect][i] = 1
edge_change += 1
else:
print("no change\n",i+j)
#test
print("small world network\n")
for i in range(n):
for j in range(n):
print(matrix[i][j])
print("\n")
print("edge_change = ",edge_change)
print("ratio = ",(double)(edge_change)/(n*k/2))
#将matrix写入文件
def DataFile(n,k,p,matrix):
# 打开一个文件
f = open("C:/0network/data.txt", "w")
#matrix[[[1 for i in range(n)] [1 for j in range(n)]]
for i in range(n):
for j in range(n):
netdata = ','.join(str(matrix[i][j]))
f.write(netdata)
f.write('\n')
#f.write("true")
# 关闭打开的文件
f.close()
#print(netdata)
print('end')
# 画图
def Drawmap(n,matrix,G):
#添加n个节点
for i in range(n):
G.add_node(i)
#添加边,if = 1 then return [(i,j)]
for i in range(n):
for j in range(n):
if matrix[i][j] == 1:
G.add_edge(i,j)
#定义一个布局,采用circular布局方式
pos = nx.circular_layout(G)
#绘制图形
nx.draw(G,pos,with_labels=False,node_size = 30)
#输出方式1: 将图像存为一个png格式的图片文件
plt.savefig("WS-Network-change1-2.png")
#输出方式2: 在窗口中显示这幅图像
plt.show()
#平均群聚系数
def average_clustering(n,matrix):
#三元组
number_three_tuple = 0.0
#三角形
Triangle = 0.0
#聚类系数
clustering_coefficient = 0.0
for i in range(n):
three_tuple = 0.0
sum_edge = 0
for j in range(n):
if matrix[i][j] == 1 or matrix[j][i] == 1:
sum_edge += 1
float(sum_edge)
#计算每个节点的三元组个数
three_tuple = int((sum_edge*(sum_edge-1.0))/2.0)
#节点i的边组成列表mylist,并且每次循环之前初始为空值
myList = []
for j in range(i,n):
if matrix[i][j] == 1 or matrix[j][i] == 1:
myList.append(j)
#如果myList中的边(i,j)等于1,则形成三角形
for k in range(len(myList)):
for q in range(k,len(myList)):
if matrix[myList[k]][myList[q]] == 1 or matrix[myList[q]][myList[k]] == 1:
Triangle += 1
if three_tuple != 0:
clustering_coefficient += (Triangle/three_tuple)
clustering_coefficient = clustering_coefficient/n
print('clustering_coefficient = ',clustering_coefficient)
#Floyd算法求最短路径
def Ford(n,matrix):
#出发点v
#到达点w
#中转点K
#初始化新的邻接矩阵new_m,路径矩阵dis
dis = zeros((n,n),int)
new_m = zeros((n,n),int)
for v in range(n):
for w in range(n):
dis[v][w] = w
if matrix[v][w] == 0:
new_m[v][w] = 6666666
elif matrix[v][w] == 1:
new_m[v][w] = 1
dis[v][w] = 1
for k in range(n):
for v in range(n):
for w in range(n):
#如果经过中转点的路径比两点路径短
if (new_m[v][k] + new_m[k][w]) < new_m[v][w]:
new_m[v][w] = new_m[v][k] + new_m[k][w]
#dis[v][w] = dis[v][k]
dis[v][w] = 2
#打印节点
sum = 0.0
for v in range(n):
for w in range(v+1,n):
#print('v= ,',v,'w = ',w)
#print('dis[v][w] = ',dis[v][w])
sum = sum + dis[v][w]
float(n)
average_shortest_path_length = sum/(n*(n-1.0)/2)
print('average_shortest_path_length = ',average_shortest_path_length)
#节点度分布
def node_degree_distribution(n,matrix):
#求节点的度
degree = []
for i in range(n):
sum = 0
for j in range(n):
sum += matrix[i][j]
#print(sum)
degree.append(sum)
#print(degree)
degree.sort()
print('degree = ',degree)
sum_degree= 0.0
for i in range(n):
sum_degree += degree[i]
#print(sum_degree)
#生成x轴序列,从1到最大度
x = range(len(degree))
#将频次转换为频率,这用到Python的一个小技巧:列表内涵
y = [z/sum_degree for z in degree]
#在双对数坐标轴上绘制度分布曲线
plt.loglog(x,y,color="blue",linewidth=2)
#显示图表
plt.show()
#动态行为
#抗故意攻击 robustness against intentional attack
def node_robustness(n):
#node_degree_distribution(n,matrix)
#求出度最大的点
degree = []
for i in range(n):
sum = 0
for j in range(n):
sum += matrix[i][j]
degree.append(sum)
#将度最大的点删除边
node_flag = degree.index(max(degree))
for i in range(n):
matrix[node_flag][i] = 0
matrix[i][node_flag] = 0
#随机攻击 random attack
def node_random(n):
#产生一个随机数:0到n-1
node_flag = random.randint(0,n-1)
print(node_flag)
for i in range(n):
matrix[node_flag][i] = 0
matrix[i][node_flag] = 0
if __name__=="__main__":
print("main")
#输入三个参数:节点数N,参数K,概率P
n = input("请输入节点数 n = ",)
k = input("请输入参数(偶数) k = ",)
p = input("请输入概率 p = ",)
n=int(n)
k=int(k)
p=float(p)
matrix = zeros((n,n),int)
#matrix = zeros((n,n))
#print(matrix)
G = nx.Graph()
value = [n,k,p]
#print("\n")
CreateNetwork(n,k,p,matrix)
SmallWorld(n,k,p,matrix)
#print(matrix)
#导出到一个文件中
#DataFile(n,k,p,matrix)
#画图
Drawmap(n,matrix,G)
#被攻击前的网络特性
#群聚系数
average_clustering(n,matrix)
#平均最短路径
Ford(n,matrix)
#节点度分布
node_degree_distribution(n,matrix)
#抗故意攻击 robustness against intentional attack
#重新定义图
#node_robustness(n)
#G = nx.Graph()
#Drawmap(n,matrix,G)
#随机攻击 random attack
node_random(n)
#重新定义图
G = nx.Graph()
Drawmap(n,matrix,G)
#被攻击后的网络特性
#群聚系数
average_clustering(n,matrix)
#平均最短路径
Ford(n,matrix)
#节点度分布
node_degree_distribution(n,matrix)
|
[
"noreply@github.com"
] |
noreply@github.com
|
b11b5949d9aeb93728df91302c1df74b605ff07c
|
e8dc0309de1dd4d9e4a25bcffdd6f9e9022c153c
|
/Code/wink_detection.py
|
b7c01c51a904d3cc8467f119b3ce1a2f15184b79
|
[] |
no_license
|
FelixFelicis555/Blinking-Keyboard
|
04947fe0b8efacd158d4a698b360233947ee8ef9
|
cd2dd51bfed205780cd46a1f17287015790186d3
|
refs/heads/master
| 2022-02-27T18:35:50.149206
| 2019-11-08T09:52:16
| 2019-11-08T09:52:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,512
|
py
|
import numpy as np
import cv2
import dlib
from scipy.spatial import distance as dist
from gtts import gTTS
import os
language = 'en'
import pyttsx3
engine = pyttsx3.init()
characterdict = {'0':'a','00':'d','000':'j','0000':'n','1':'s','01':'f','001':'k','0001':'y','10':'g','010':'l','0010':'t'}
characterdict['0011']='v'
characterdict['011']='m'
characterdict['11']='h'
characterdict['0100']='b'
characterdict['100']='u'
characterdict['0101']='r'
characterdict['101']='i'
characterdict['0110']='e'
characterdict['110']='o'
characterdict['0111']='c'
characterdict['111']='p'
characterdict['1000']='x'
characterdict['1001']='w'
characterdict['1010']='q'
characterdict['1011']='z'
characterdict['1100']=','
characterdict['1101']='.'
characterdict['1110']='?'
characterdict['1111']=" "
print("Enter a choice whether you want blink keyboard or wink keyboard \n 1.) Blink Keyboard \n 2.) Wink keyboard")
n = int(input())
if n==2:
while True:
print("You have choosen wink keyboard\n")
print("Way of using wink keyboard\n")
print("1.) You will be shown the keyboard structure in front of you\n")
print("2.) will move the pointer to left side\n")
print("3.) Right wink will move the pointer to right side\n")
print("4.) Blink detected when you here beep sound once will fix your character that you want to choose it\n")
print("5.) When you hear the beep sound twice while blinking you will be back to the starting position \n")
print("6.) On the starting node if you blink that means backspace\n")
print("If you understand the rules press 'y' else 'press 'n' \n")
check = input()
if check =='y':
break
text = ""
PREDICTOR_PATH = "./shape_predictor_68_face_landmarks.dat"
stop_flag = 0
FULL_POINTS = list(range(0, 68))
FACE_POINTS = list(range(17, 68))
JAWLINE_POINTS = list(range(0, 17))
RIGHT_EYEBROW_POINTS = list(range(17, 22))
LEFT_EYEBROW_POINTS = list(range(22, 27))
NOSE_POINTS = list(range(27, 36))
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))
MOUTH_OUTLINE_POINTS = list(range(48, 61))
MOUTH_INNER_POINTS = list(range(61, 68))
EYE_AR_THRESH = 0.23
EYE_AR_CONSEC_FRAMES = 5
counter_left = 0
total_left = 0
counter_right = 0
total_right = 0
counter_blink = 0
total_blink = 0
flag_left,flag_right,flag_blink = 0,0,0
def eye_aspect_ratio(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(PREDICTOR_PATH)
video_capture = cv2.VideoCapture(0)
image = "base"
text = ""
while True:
ret, frame = video_capture.read()
if ret:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
for rect in rects:
x = rect.left()
y = rect.top()
x1 = rect.right()
y1 = rect.bottom()
landmarks = np.matrix([[p.x, p.y] for p in predictor(frame, rect).parts()])
left_eye = landmarks[LEFT_EYE_POINTS]
right_eye = landmarks[RIGHT_EYE_POINTS]
left_eye_hull = cv2.convexHull(left_eye)
right_eye_hull = cv2.convexHull(right_eye)
cv2.drawContours(frame, [left_eye_hull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [right_eye_hull], -1, (0, 255, 0), 1)
ear_left = eye_aspect_ratio(left_eye)
ear_right = eye_aspect_ratio(right_eye)
if ear_left >= EYE_AR_THRESH and ear_right >= EYE_AR_THRESH:
counter_blink = 0
counter_left = 0
counter_right = 0
# print("****************************************")
# print("Counter Blink : " , counter_blink)
# print("Counter LEFT : ", counter_left)
# print("Counter Right : ", counter_right)
# print("****************************************")
if counter_blink >= 10:
if counter_blink == 10:
flag_blink = 1
duration = 0.05 # seconds
freq = 440 # Hz
os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))
if counter_blink == 20:
stop_flag = 1
flag_blink = 0
duration = 0.05 # seconds
freq = 440 # Hz
os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))
else:
if flag_blink == 1:
total_blink += 1
# print("Blink Occured")
counter_blink = 0
flag_blink = 0
if stop_flag == 1:
image = "base"
counter_blink = 0
flag_blink = 0
if ear_left < EYE_AR_THRESH:
if ear_right < EYE_AR_THRESH :
counter_blink += 1
counter_left = 0
else:
counter_blink = 0
counter_left += 1
counter_right = 0
if counter_left == EYE_AR_CONSEC_FRAMES:
flag_left = 1
duration = 0.05 # seconds
freq = 440 # Hz
os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))
else:
if flag_left ==1:
total_left += 1
# print("Left eye winked")
counter_left = 0
counter_blink = 0
flag_left = 0
counter_right = 0
else:
if counter_left >= EYE_AR_CONSEC_FRAMES:
flag_left = 1
if ear_right < EYE_AR_THRESH:
if ear_left < EYE_AR_THRESH:
counter_right = 0
pass
else:
counter_blink = 0
counter_right += 1
counter_left = 0
if counter_right == EYE_AR_CONSEC_FRAMES:
flag_right = 1
duration = 0.05 # seconds
freq = 440 # Hz
os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))
else:
if flag_right == 1:
total_right += 1
# print("Right eye winked")
counter_right = 0
flag_right = 0
counter_blink = 0
counter_left = 0
else:
if counter_right >= EYE_AR_CONSEC_FRAMES:
flag_right = 1
# if ear_left >= EYE_AR_THRESH :
# counter_left = 0
# counter_blink = 0
# if ear_right >= EYE_AR_THRESH:
# counter_right = 0
# counter_blink = 0
cv2.putText(frame, "Wink Left : {}".format(total_left), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
cv2.putText(frame, "Wink Right: {}".format(total_right), (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
cv2.putText(frame, "Blink Occured: {}".format(total_blink), (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
if total_left == 1:
if image == "base":
image = ""
image+='0'
total_left = 0
total_right = 0
total_blink = 0
flag_left = 0
flag_right = 0
flag_blink = 0
stop_flag = 0
if total_right == 1:
if image =="base":
image = ""
image+='1'
total_right = 0
total_left = 0
total_blink = 0
flag_left = 0
flag_right = 0
flag_blink = 0
stop_flag = 0
if total_blink == 1:
# print("image is "+image+".jpg")
if image!='base':
text += characterdict[image]
else:
if len(text)!=0:
text = text[:len(text)-1]
# do the required action
image = "base"
total_blink = 0
total_left = 0
total_right = 0
flag_left = 0
flag_right = 0
flag_blink = 0
stop_flag = 0
if len(image)>4:
image=image[:4]
cv2.namedWindow("KeyBoard", cv2.WINDOW_NORMAL)
cv2.moveWindow("KeyBoard",850,20)
ia = cv2.imread(image+".jpg")
ims = cv2.resizeWindow("KeyBoard",550, 400) # Resize image
cv2.imshow("KeyBoard" , ia)
cv2.namedWindow("Faces", cv2.WINDOW_NORMAL)
cv2.moveWindow("Faces",0,20)
ims = cv2.resizeWindow("Faces",800, 700) # Resize image
cv2.imshow("Faces", frame)
cv2.namedWindow("Typed_Text", cv2.WINDOW_NORMAL)
cv2.moveWindow("Typed_Text",850,500)
draw = cv2.imread("draw.jpg")
cv2.resizeWindow("Typed_Text",550,270)
cv2.putText(draw, "Typed Text: {}".format(text), (20, 90), cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 0), 5)
cv2.imshow("Typed_Text" , draw)
ch = 0xFF & cv2.waitKey(1)
if ch == ord('q'):
break
cv2.destroyAllWindows()
elif n==1:
while True:
print("You have choosen Blink keyboard")
print("Way of using Blink keyboard\n")
print("1.) You will be shown the keyboard structure in front of you\n")
print("2.) Shorter blink: When you hear a beep sound first time, will move the pointer to left side\n")
print("3.) Longer blink: When you hear a beep sound second time, will move the pointer to right side\n")
print("4.) Longest Blink: When you hear a beep sound third time, will fix your character that you want to choose it\n")
print("5.) Back to start: When you hear the beep sound 4th time with writing character\n")
print("6.) On the starting node if you blink that means backspace\n")
print("If you understand the rules press 'y' else 'press 'n' \n")
check = input()
if check =='y':
break
text = ""
PREDICTOR_PATH = "./shape_predictor_68_face_landmarks.dat"
FULL_POINTS = list(range(0, 68))
FACE_POINTS = list(range(17, 68))
JAWLINE_POINTS = list(range(0, 17))
RIGHT_EYEBROW_POINTS = list(range(17, 22))
LEFT_EYEBROW_POINTS = list(range(22, 27))
NOSE_POINTS = list(range(27, 36))
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))
MOUTH_OUTLINE_POINTS = list(range(48, 61))
MOUTH_INNER_POINTS = list(range(61, 68))
EYE_AR_THRESH = 0.25
EYE_AR_CONSEC_FRAMES = 5
counter_blink = 0
total_blink = 0
'''
There are three types of blink
one blink --- Left blink
two blink --- Right blink
three blink --- Select the letter
four blink --- Revert to start
'''
flag_blink_one,flag_blink_two,flag_blink_three,stopflag = 0,0,0,0
count_left,count_right,count_stop = 0,0,0
def eye_aspect_ratio(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(PREDICTOR_PATH)
video_capture = cv2.VideoCapture(-1)
image = "base"
text = ""
while True:
ret, frame = video_capture.read()
if ret:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
for rect in rects:
x = rect.left()
y = rect.top()
x1 = rect.right()
y1 = rect.bottom()
landmarks = np.matrix([[p.x, p.y] for p in predictor(frame, rect).parts()])
left_eye = landmarks[LEFT_EYE_POINTS]
right_eye = landmarks[RIGHT_EYE_POINTS]
left_eye_hull = cv2.convexHull(left_eye)
right_eye_hull = cv2.convexHull(right_eye)
cv2.drawContours(frame, [left_eye_hull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [right_eye_hull], -1, (0, 255, 0), 1)
ear_left = eye_aspect_ratio(left_eye)
ear_right = eye_aspect_ratio(right_eye)
# print("****************************************")
# print("Counter Blink : " , counter_blink)
# print("****************************************")
if counter_blink >= 10:
if counter_blink == 10:
flag_blink_one,flag_blink_two,flag_blink_three = 1,0,0
stopflag = 0
duration = 0.05 # seconds
freq = 440 # Hz
os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))
if counter_blink == 20:
flag_blink_two,flag_blink_one,flag_blink_three = 1,0,0
stopflag = 0
duration = 0.05 # seconds
freq = 440 # Hz
os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))
if counter_blink == 30:
flag_blink_three,flag_blink_one,flag_blink_two = 1,0,0
stopflag = 0
duration = 0.05 # seconds
freq = 440 # Hz
os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))
if counter_blink==50:
stopflag = 1
flag_blink_three,flag_blink_one,flag_blink_two = 0,0,0
duration = 0.05 # seconds
freq = 440 # Hz
os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))
else:
if flag_blink_three == 1:
total_blink += 1
# print("Stop Blink Occured")
counter_blink = 0
count_stop = 1
flag_blink_one,flag_blink_two,flag_blink_three = 0,0,0
count_left = 0
count_right = 0
elif flag_blink_one == 1:
total_blink += 1
# print("Left side blink occured")
counter_blink = 0
flag_blink_one,flag_blink_two,flag_blink_three = 0,0,0
count_left = 1
count_right = 0
count_stop = 0
elif flag_blink_two == 1:
total_blink += 1
# print("Right side blink occured")
counter_blink = 0
flag_blink_one,flag_blink_two,flag_blink_three = 0,0,0
count_left = 0
count_right = 1
count_stop = 0
elif stopflag == 1:
count_left,count_right,count_stop=0,0,0
stopflag = 0
image = 'base'
if ear_left < EYE_AR_THRESH and ear_right < EYE_AR_THRESH:
counter_blink += 1
else:
counter_blink = 0
cv2.putText(frame, "Blink Occured: {}".format(total_blink), (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
if count_left == 1:
if image == "base":
image = ""
image+='0'
count_left = 0
if count_right == 1:
if image =="base":
image = ""
image+='1'
count_right = 0
if count_stop == 1:
if image == "base":
if len(text)!=0:
text = text[:len(text)-1]
# myobj = gTTS(text="backspace", lang=language, slow=False)
# myobj.save("text.mp3")
engine.say("Backspace")
engine.runAndWait()
else:
text += characterdict[image]
# myobj = gTTS(text=characterdict[image], lang=language, slow=False)
# myobj.save("text.mp3")
engine.say(characterdict[image])
engine.runAndWait()
# print("image is "+image+".jpg")
# do the required action
# os.system("mpg321 text.mp3")
image = "base"
count_stop,count_left,count_right = 0,0,0
if len(image)>4:
image=image[:4]
cv2.namedWindow("KeyBoard", cv2.WINDOW_NORMAL)
cv2.moveWindow("KeyBoard",850,20)
ia = cv2.imread(image+".jpg")
ims = cv2.resizeWindow("KeyBoard",550, 400) # Resize image
cv2.imshow("KeyBoard" , ia)
cv2.namedWindow("Faces", cv2.WINDOW_NORMAL)
cv2.moveWindow("Faces",0,20)
ims = cv2.resizeWindow("Faces",800, 700) # Resize image
cv2.imshow("Faces", frame)
cv2.namedWindow("Typed_Text", cv2.WINDOW_NORMAL)
cv2.moveWindow("Typed_Text",850,500)
draw = cv2.imread("draw.jpg")
cv2.resizeWindow("Typed_Text",550,270)
cv2.putText(draw, "Typed Text: {}".format(text), (20, 90), cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 0), 5)
cv2.imshow("Typed_Text" , draw)
ch = 0xFF & cv2.waitKey(1)
if ch == ord('q'):
break
cv2.destroyAllWindows()
else:
print("You entered wrong choice ")
exit(0)
|
[
"bhavyabordia@gmail.com"
] |
bhavyabordia@gmail.com
|
fec5927f671f48d0494d8758f058e97cbe129c94
|
0353782639974c650fa042e44d75e92bf7be6fc1
|
/instagram/insta/instafeed/views.py
|
cccd2cf5f41d8e68ff52bb8847187c85cb8c062f
|
[] |
no_license
|
jersobh/DigitalMarketing
|
2d31b5c18f0764c4f352947aa34506d63216feeb
|
6fa679bb964e6ad656415e38227e007db2ae0fda
|
refs/heads/master
| 2021-10-23T15:24:01.939739
| 2019-03-18T12:27:21
| 2019-03-18T12:27:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
# import InstagramAPI
from InstagramAPI.InstagramAPI import InstagramAPI
from django.http import JsonResponse
def index(request):
api = InstagramAPI("jayabal.al", "jayabal9890@insta")
if(api.login()):
api.getSelfUserFeed()
return JsonResponse(api.LastJson)
return JsonResponse({})
# return HttpResponse("Hello, world. You're at the polls index.")
|
[
"noreply@github.com"
] |
noreply@github.com
|
ea0a54fdc36a8a8a37ace27442c6675bd11d2208
|
8facec89b1fded458cf3c40dfe4ed2a6b7af87aa
|
/advanced/class_attributes_management/comparation_with_4_methods_simple/descriptor_implement_improved.py
|
bbe4764040fdbd62f2644e8e08dbff6c19657902
|
[] |
no_license
|
tianwei1992/Python_oop_leaning
|
72bf4c4c0c71cf736bc14912c4aef28642755c80
|
7e0f4e95c0d9bf7aa9fd95fcf37fc86f90ea8db7
|
refs/heads/master
| 2020-03-28T16:55:18.592234
| 2018-12-12T03:06:22
| 2018-12-12T03:06:22
| 148,740,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,194
|
py
|
class Powers():
def __init__(self, square_base, cube_base):
self._square_base = square_base
self._cube_base = cube_base
class SquareDescriptor():
def __get__(self, instance, owner):
if instance is None:
"""类.attr - > Descriptor,没毛病"""
return self
return instance._square_base ** 2
def __set__(self, instance, value):
instance._square_base = value
class CubeDescriptor():
def __get__(self, instance, owner):
if instance is None:
return self
return instance._cube_base ** 3
square = SquareDescriptor()
cube = CubeDescriptor()
X = Powers(3, 4)
"""Powers.square = 5不会触发SquareDescriptor.__get__方法,而是直接更改Powers.square为一个普通的属性,值为5,这也会影响到所以示例
所以结论:对标识符产生的属性,不要试图从类上面赋值。"""
print(Powers.square)
print()
print(X.square) # 3 ** 2 = 9
print(X.cube) # 4 ** 3 = 64
X.square = 5
print(X.square) # 5 ** 2 = 25
"""描述符定义的属性在类中定义,是类属性,但是get和set一般对实例用。
直接对类用set相当于覆盖原有属性为普通属性,偶尔对类用get,是类似Powers.square.__doc__的时候"""
|
[
"879983690@qq,com"
] |
879983690@qq,com
|
b1dc61b9b0266ed2642cd5bf9517f09540601de5
|
7abb3d309a011a36247e0b4dcda3759537c45b2c
|
/utils/vb-meta-to-json-topology.py
|
031f8c9a3763b172b8281d83709ffc18311a4b0b
|
[
"BSD-3-Clause"
] |
permissive
|
TomPlano/varbench
|
7937a8a7221117e2d817549eb8ba22746c324869
|
83933380e1876da388dd07a78e554e65f388861b
|
refs/heads/master
| 2020-04-02T14:34:11.376400
| 2018-10-27T19:10:09
| 2018-10-27T19:10:09
| 154,529,766
| 0
| 0
|
BSD-3-Clause
| 2018-10-24T16:01:55
| 2018-10-24T16:01:54
| null |
UTF-8
|
Python
| false
| false
| 2,486
|
py
|
#!/usr/bin/env python
import os
import sys
import getopt
import json
def usage(argv, exit=None):
print "Usage: %s [OPTIONS] <VB metadata file> <VB JSON topology file (output)>" % argv[0]
print " -h (--help) : print help and exit"
print " -v (--vbs-path=) : path to VB Stats python module"
if exit is not None:
sys.exit(exit)
def parse_cmd_line(argc, argv):
opts = []
args = []
cur_path = os.path.dirname(os.path.realpath(__file__))
vb_path = cur_path + "/../vb-stats/"
try:
opts, args = getopt.getopt(
argv[1:],
"hv:",
["help", "vb-path="]
)
except getopt.GetoptError, err:
print >> sys.stderr, err
usage(argv, exit=1)
for o, a in opts:
if o in ("-h", "--help"):
usage(argv, exit=0)
elif o in ("-v", "--vb-path"):
vb_path = a
else:
usage(argv, exit=1)
if len(args) != 2:
usage(argv, exit=1)
return vb_path, args[0], args[1]
def main(argc, argv, envp):
vb_path, meta, json_file = parse_cmd_line(argc, argv)
procs = []
# Try to import vb-path
try:
sys.path.insert(0, vb_path)
from vb_stats import VB_Stats as vbs
except ImportError:
print >> sys.stderr, "Could not import VB_Stats. Please specify path to VB_Stats with '--vbs-path'"
usage(argv, exit=2)
with vbs(meta, load_data=False) as vb:
with open(json_file, "w") as f:
num_processors = vb.num_sockets_per_node * vb.num_cores_per_socket * vb.num_hw_threads_per_core
json.dump({
"processor_info" : {
"num_processors" : num_processors,
"num_sockets" : vb.num_sockets_per_node,
"cores_per_socket" : vb.num_cores_per_socket,
"hw_threads_per_core" : vb.num_hw_threads_per_core
},
# The format of p: [socket, core, hw_thread, os_core]
"processor_list" : [
{
"os_core" : p[3],
"socket" : p[0],
"core" : p[1],
"hw_thread" : p[2]
} for p in vb.processor_map
]
}, f, indent=4)
if __name__ == "__main__":
argv = sys.argv
argc = len(argv)
envp = os.environ
sys.exit(main(argc, argv, envp))
|
[
"brian.kocoloski@wustl.edu"
] |
brian.kocoloski@wustl.edu
|
9a817067b8f27c331d6af99100b2914f84c06935
|
91d8e969facfc4fd7f6002448890d3b5a65fe380
|
/api/openAccountAPI.py
|
3e72f9b26ca14d3c281cc11b74bf130b03a9510a
|
[] |
no_license
|
1105814583/P2P_python
|
4c2f5c20a7514d35d7820835ea88f812bbdb4db3
|
bb3da29f877703d884c6b74cea3fb5e232ed1f65
|
refs/heads/master
| 2023-09-06T02:15:02.376197
| 2021-09-16T10:35:58
| 2021-09-16T10:35:58
| 407,118,848
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
import app
class openAccountAPI:
def __init__(self):
self.open_account_url = app.BASE_URL + "/trust/trust/register"
def openAccount(self, session):
response = session.post(self.open_account_url)
return response
|
[
"1105814583@qq.com"
] |
1105814583@qq.com
|
794969aef4445885500cfd5a79f01106d63b753c
|
12a522cadf20a38f5fd2ad2eb758d40f7bc50f3a
|
/CIS_024C_Python/homeWork/Exercise9/main1.py
|
1737c779ac14ea834b8d03ce97842e8edde3b668
|
[] |
no_license
|
Ry-Mu/cis024c_python
|
f25012ce0e1e58dff05abacda9299083ca63e2ad
|
0b0cd5fc5b425b251bb9ac316e420403a7dfda10
|
refs/heads/master
| 2021-09-02T13:48:54.820508
| 2018-01-03T01:43:08
| 2018-01-03T01:43:08
| 103,226,159
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
import sys
def add(n1,n2):
return n1 + n2
number1 = int(raw_input("Enter a number:"))
number2 = int(raw_input("Enter a number:"))
print add(number1,number2)
|
[
"ryan.munguia92@gmail.com"
] |
ryan.munguia92@gmail.com
|
150aa7fcfcf1929a708e94bb4cf3c21158724349
|
99e25489e0e504a6e49da4d9398dbfb8c4fe86a4
|
/Leetcode/二叉树/654-最大二叉树-m.py
|
2c6bc91855e00adb4cedcdeb219ef233ec75e6b9
|
[
"Apache-2.0"
] |
permissive
|
JackeyGuo/Algorithms
|
08e5c5a1067c1bf2642241ad635c683c68dff6d3
|
27185d382a891f4667f67701a60c796fa3a6c1ac
|
refs/heads/main
| 2023-03-27T15:26:28.383100
| 2021-03-18T06:27:05
| 2021-03-18T06:27:05
| 326,852,774
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,017
|
py
|
from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def constructMaximumBinaryTree(self, nums: List[int]) -> TreeNode:
"""
先明确根节点做什么?对于构造二叉树的问题,根节点要做的就是把想办法把自己构造出来
"""
# base case
if len(nums) == 0: return None
if len(nums) == 1: return TreeNode(nums[0])
# 第一步:先找数组中的最大值和索引
max_value = max(nums)
index = nums.index(max_value)
# 创建根节点
root = TreeNode(max_value)
# 递归调用构造左右子树
root.left = self.constructMaximumBinaryTree(nums[:index])
root.right = self.constructMaximumBinaryTree(nums[index + 1:])
return root
print(Solution().constructMaximumBinaryTree([3, 2, 1, 6, 0, 5]))
|
[
"1051347391@qq.com"
] |
1051347391@qq.com
|
17b24db22bb599a33ad96d9be8572468f4ea1b60
|
42c67fdb3b373e0bf677e9d9d1cf770646c75ba6
|
/tests/test_tutorial/test_using_click/test_tutorial003.py
|
eadd93ee9ea77c644c295b94cd9155c13c10334d
|
[
"MIT"
] |
permissive
|
marcosfelt/typer
|
b8ecc8e65c82044076880105d3ecb2ca0d158c25
|
61a0616ea9b7904c2379c464d0f72d5b7bde270e
|
refs/heads/master
| 2023-08-01T03:25:42.417233
| 2021-09-14T11:11:12
| 2021-09-14T11:11:12
| 406,322,395
| 0
| 0
|
MIT
| 2021-09-14T10:43:32
| 2021-09-14T10:29:20
| null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
import subprocess
from click.testing import CliRunner
from docs_src.using_click import tutorial003 as mod
runner = CliRunner()
def test_cli():
result = runner.invoke(mod.typer_click_object, [])
# TODO: when deprecating Click 7, remove second option
assert "Error: Missing command" in result.stdout or "Usage" in result.stdout
def test_typer():
result = runner.invoke(mod.typer_click_object, ["top"])
assert "The Typer app is at the top level" in result.stdout
def test_click():
result = runner.invoke(mod.typer_click_object, ["hello", "--name", "Camila"])
assert "Hello Camila!" in result.stdout
def test_script():
result = subprocess.run(
["coverage", "run", mod.__file__, "--help"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
assert "Usage" in result.stdout
|
[
"noreply@github.com"
] |
noreply@github.com
|
70eca39c9c9fb18923b83761478de0f263f2fd31
|
fa6caa2382c1f35931153ba0c74ff6555c41c745
|
/backend/base/migrations/0003_product_image.py
|
7be4d74d30250643ca2e68b0d7c8b84ed019b757
|
[] |
no_license
|
devckrishna/Django-React-Ecommerce
|
89c341d0de469ed80939fec9544f56418a09ad90
|
ded75edbff25cfb2bca56c92ae5fce7fcf8afcb6
|
refs/heads/main
| 2023-04-21T22:36:28.720240
| 2021-05-08T17:44:22
| 2021-05-08T17:44:22
| 363,849,001
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
# Generated by Django 3.2 on 2021-05-04 14:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0002_order_orderitem_review_shippingaddress'),
]
operations = [
migrations.AddField(
model_name='product',
name='image',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
]
|
[
"dev.c.krishna.123@gmail.com"
] |
dev.c.krishna.123@gmail.com
|
cde113aea88eac4418c8c3aebe85bd0a376b8a61
|
4ac3789c709d1b68a506f183a5b053b1137f02db
|
/src/pilot/transition_probs.py
|
916c70bcd1396194212b0a8b24ca39d5a85b7f26
|
[] |
no_license
|
bdyetton/PSleep
|
5c52d3ddf1ecb5b3caf5fd6abd562007b5a8dc1d
|
9b02cf76f4c63923d1acfbaf32c62fe70ccb42b8
|
refs/heads/master
| 2020-12-01T15:28:23.267808
| 2019-12-29T00:06:18
| 2019-12-29T00:06:18
| 230,681,688
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,895
|
py
|
from mednickdb_pyapi.mednickdb_pyapi import MednickAPI
from mednickdb_pysleep import defaults
import os
import matplotlib.pyplot as plt
import seaborn as sns
import bootstrapped.bootstrap as bs
import bootstrapped.compare_functions as bs_compare
import bootstrapped.stats_functions as bs_stats
import numpy as np
import pandas as pd
sleep_stages = {
0:'wake',
1:'stage1',
2:'stage2',
3:'sws',
4:'rem'
}
def compare_dists(data, y_var, by_var, y_level=None, by_levels=None, ax=None):
levels = data[by_var].unique()
if by_levels is not None:
levels = [lev for lev in levels if lev in by_levels]
levels_data = []
for lev in levels:
level_data = data.loc[data[by_var] == lev, y_var].dropna()
if y_level is not None:
level_data = level_data.apply(lambda x: x[y_level]).dropna()
levels_data.append(level_data.astype(float).values)
#Runs boostrapped stats test
is_diff = False
if len(levels) == 2:
diff = bs.bootstrap_ab(*levels_data, stat_func=bs_stats.mean, compare_func=bs_compare.percent_change)
is_diff = (diff.lower_bound > 0 or diff.upper_bound < 0)
if is_diff:
sns.set_style("dark")
else:
sns.set_style("white")
diff_msg = 'Difference: \nZero not in CI' if is_diff else 'No Difference: \nZero in CI'
print(diff, '\n', diff_msg)
# Plotting
for lev in levels_data:
sns.distplot(a=lev, ax=ax)
ax.text(0.3, 0.5, diff_msg, transform=ax.transAxes, size=16, color='r' if is_diff else 'k')
plt.title(y_var.split('.')[-1]+' to '+sleep_stages[y_level]+' for the Cleveland Family Study by '+by_var.split('.')[-1])
plt.ylabel('Probability Density')
plt.legend(levels)
def investigate_trans_probs_by_demographics(data, sleep_stages_to_consider=defaults.stages_to_consider):
data = data.drop(['_id', 'sleep_scoring.sourceid', 'visitid', 'datemodified', 'expired'], axis=1)
data['demographics.age_cat'] = (data['demographics.age'] > 55).map({True: 'Older', False: 'Younger'})
data['demographics.ethnicity'] = data['demographics.ethnicity'].map({'white ': 'white', 'black ': 'black'}) #anything else will get nan
demo_cols = ['subjectid', 'demographics.age_cat', 'demographics.ethnicity', 'demographics.sex']
trans_probs_cols = ['sleep_scoring.trans_prob_from_' + s for s in sleep_stages_to_consider]
cols_we_care_about = demo_cols + trans_probs_cols
data = data.loc[:, cols_we_care_about]
data = data.set_index(demo_cols)
from_and_to_data_cont = []
for trans_probs_col in trans_probs_cols:
from_data = data.loc[:, trans_probs_col] # keep index
from_data = from_data.dropna()
from_and_to_data_np = np.array(from_data.tolist()).astype(float) #not sure why need to conver
from_and_to_data = pd.DataFrame(from_and_to_data_np, columns=sleep_stages_to_consider)
from_and_to_data['from_stage'] = trans_probs_col.split('_')[-1]
from_and_to_data.index = from_data.index
from_and_to_data = from_and_to_data.reset_index()
from_and_to_data = from_and_to_data.melt(id_vars=demo_cols+['from_stage'], value_vars=sleep_stages_to_consider, var_name='to_stage', value_name='prob')
from_and_to_data_cont.append(from_and_to_data)
all_trans_data = pd.concat(from_and_to_data_cont).reset_index(drop=True)
# Plot some data
for by_var in ['demographics.sex', 'demographics.ethnicity', 'demographics.age_cat']:
data_to_plot = all_trans_data.drop(set(demo_cols)-set([by_var]), axis=1).dropna().reset_index(drop=True)
sns.catplot(x='to_stage', y='prob', hue=by_var, row="from_stage",
data=data_to_plot, kind="violin", split=True, height=1.5, aspect=2.5, legend=False)
plt.legend(loc='lower right')
plt.ylim((0, 1))
for to_and_from_stage, data in data_to_plot.groupby(['from_stage', 'to_stage']):
from_stage, to_stage = to_and_from_stage[0], to_and_from_stage[1]
by_data = list(data.groupby(by_var))
diff = bs.bootstrap_ab(by_data[0][1]['prob'].values, by_data[1][1]['prob'].values,
stat_func=bs_stats.mean, compare_func=bs_compare.percent_change)
is_diff = (diff.lower_bound > 0 or diff.upper_bound < 0)
if is_diff:
plt.gcf().axes[sleep_stages_to_consider.index(from_stage)].text(y=0, x=sleep_stages_to_consider.index(to_stage) - 0.1, s='*', color='r', fontsize=18)
plt.show()
if __name__ == '__main__':
med_api = MednickAPI(username=os.environ['mednickapi_username'], password=os.environ['mednickapi_password'])
#Get the data, so easy :)
data = med_api.get_data('studyid=NSRR_CFS', format_as='dataframe_single_index')
print('Got', data.shape[0], 'records')
investigate_trans_probs_by_demographics(data)
|
[
"bdyetton@gmail.com"
] |
bdyetton@gmail.com
|
d97160120fe344b6a36a79e9f1c2c576b060b8b9
|
45623eab5e69c0f2a3d7f0a141d112d0d35790f9
|
/ssbench/ordered_dict.py
|
d5f0aca7471d01e9cc56f2c1f2d1beb144d2e2df
|
[
"Apache-2.0"
] |
permissive
|
peteryj/ssbench
|
e73c2a77d597152877c7b3a022ce3fa77363beef
|
4a1766f8e3287cb0dafa559d24f6a51d64950efc
|
refs/heads/master
| 2020-12-03T03:47:13.510496
| 2016-03-23T06:37:39
| 2016-03-23T06:37:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,058
|
py
|
# {{{ http://code.activestate.com/recipes/576693/ (r9)
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and
# pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular
# dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked
# list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the
# linked list, and the inherited dictionary is updated with the new
# key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor
# nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if
false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update w/o breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the
corresponding value.
If key is not found, d is returned if given, otherwise KeyError is
raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running=None):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if _repr_running is None:
_repr_running = {}
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is
order-sensitive while comparison to a regular mapping is
order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self) == len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
# end of http://code.activestate.com/recipes/576693/ }}}
|
[
"darrell@swiftstack.com"
] |
darrell@swiftstack.com
|
4077ee7230fdd5fcb8bf27ad4eec1e47ecf60567
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/J/JasonSanford/great_american_beer_festival.py
|
ccc57650e41049eee111fa8bbfab0a4bd1f01ccf
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,020
|
py
|
import scraperwiki
import lxml.html
html = scraperwiki.scrape("http://www.greatamericanbeerfestival.com/at-the-festival/breweries-at-the-2012-festival")
root = lxml.html.fromstring(html)
i = 1
for tr in root.cssselect("#brewery_table tbody tr"):
tds = tr.cssselect("td")
data = {
'id' : i,
'name' : tds[0].text_content(),
'city' : tds[1].text_content(),
'state' : tds[2].text_content(),
}
scraperwiki.sqlite.save(unique_keys=['id'], data=data)
i += 1import scraperwiki
import lxml.html
html = scraperwiki.scrape("http://www.greatamericanbeerfestival.com/at-the-festival/breweries-at-the-2012-festival")
root = lxml.html.fromstring(html)
i = 1
for tr in root.cssselect("#brewery_table tbody tr"):
tds = tr.cssselect("td")
data = {
'id' : i,
'name' : tds[0].text_content(),
'city' : tds[1].text_content(),
'state' : tds[2].text_content(),
}
scraperwiki.sqlite.save(unique_keys=['id'], data=data)
i += 1
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
16f6244485e0802abe75dcdcc1068f2bde02f77f
|
70da894645a6f3fe362a60de843b1998e2d619eb
|
/Questao7.py
|
839a87331b7746d5e1badb8dbfcb0b2368f9e6e3
|
[] |
no_license
|
marcelorvergara/AT_python
|
2ed9ff3a782ec7b13f1f05909870d7a9013fb20b
|
77cfc84e9e1b624e45a2e3f45e0bb99b32170f68
|
refs/heads/main
| 2023-08-05T00:45:27.688566
| 2021-09-20T16:27:51
| 2021-09-20T16:27:51
| 408,164,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,329
|
py
|
import threading
import requests
class Questao7(threading.Thread):
def __init__(self):
super().__init__()
url = 'https://sites.google.com/site/dr2fundamentospython/arquivos/Winter_Olympics_Medals.csv'
requisicao = requests.get(url, timeout=5)
if requisicao.status_code != 200:
requisicao.raise_for_status()
else:
print("Conectado")
csv = requisicao.text
linhas = csv.splitlines()
# SWE
suecia = 0
sue_medalhas = []
# DEN
dinamarca = 0
den_medalhas = []
# NOR
nor_medalhas = []
noruega = 0
for ln in range(1, len(linhas)):
colunas = linhas[ln].split(',')
# somente séc. XXI
if int(colunas[0]) > 2000:
# somente modalidades 'curling', 'skating', 'skiing', 'ice hockey'
if colunas[2] == 'Curling' or colunas[2] == 'Skating' or colunas[2] == 'Skiing' or colunas[2] == 'Ice Hockey':
# se ouro
if colunas[7] == 'Gold':
gen = ''
if colunas[6] == 'M':
gen = 'masculino'
else:
gen = 'feminino'
if colunas[4] == 'SWE':
suecia += 1
sue_medalhas.append('Esporte: ' + colunas[2] + ' Ano: ' + colunas[0] + ' Cidade: ' + colunas[
1] + ' Gênero: ' + gen)
elif colunas[4] == 'DEN':
dinamarca += 1
den_medalhas.append('Esporte: ' + colunas[2] + ' Ano: ' + colunas[0] + ' Cidade: ' + colunas[
1] + ' Gênero: ' + gen)
elif colunas[4] == 'NOR':
noruega += 1
nor_medalhas.append('Esporte: ' + colunas[2] + ' Ano: ' + colunas[0] + ' Cidade: ' + colunas[
1] + ' Gênero: ' + gen)
maior = ''
num_medalhas = 0
if suecia > dinamarca or suecia > noruega:
maior = 'Suecia'
num_medalhas = suecia
if dinamarca > suecia or dinamarca > noruega:
maior = 'Dinamarca'
num_medalhas = dinamarca
else:
maior = 'Noruega'
num_medalhas = noruega
print('\nO país com o maior número de medalhas ouro nas modalidades especificadas é a', maior, 'com', num_medalhas, 'medalhas')
print('\nRelatório dos países Suécia, Dinamarca e Noruega referente as medalhas ouro nos esportes Curling, Patinação no gelo, Esqui e Hóquei sobre o gelo no século XXI')
print('\nSuécia:\n')
if sue_medalhas:
for ln in sue_medalhas:
print(ln)
else:
print('Não obteve medalhas de ouro')
print('\nDinamarca:\n')
if den_medalhas:
for ln in den_medalhas:
print(ln)
else:
print('Não obteve medalhas de ouro')
print('\nNoruega:\n')
if nor_medalhas:
for ln in nor_medalhas:
print(ln)
else:
print('Não obteve medalhas de ouro')
|
[
"marcelorv@gmail.com"
] |
marcelorv@gmail.com
|
632a0ef0ecdbdc4a907c6df0aa1e539704695ae4
|
429a416abc7def45f7f6dc186ef46554081e5dee
|
/tensormorph/zzz/affix_test_old.py
|
80fb7bc009319ed8f672eb13ec1bbee20979c1e1
|
[] |
no_license
|
colincwilson/tensormorph
|
9de8c1f0e6639c974d5b799e0712bca79ce639ad
|
c3a6fc9dac643e7600f2a177366a4c405c8013f2
|
refs/heads/main
| 2022-02-14T03:34:04.577317
| 2021-10-01T13:35:58
| 2021-10-01T13:35:58
| 147,841,723
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,386
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse, re, sys
from tensormorphy import environ, evaluator, phon_features
from tensormorphy.segment_embedder import SegmentEmbedder
from tensormorphy.form_embedder import FormEmbedder
from tensormorphy.dataset import DataSet
from tensormorphy.affixer import Affixer
from tensormorphy.trainer import Trainer
from affix_test_cases import import_data
import pandas as pd
import numpy as np
# parse commandline arguments
argparser = argparse.ArgumentParser()
argparser.add_argument('--nbatch',\
help='Number of <input,output> pairs in each batch')
argparser.add_argument('--nepoch',\
help='Number of training epochs')
args, residue = argparser.parse_known_args()
# select dataset (xxx make commandline argument)
data_select = ['english_ing', 'english_ness', 'english_un',\
'english_shm', 'chamorro_um', 'hungarian_dat',\
'hebrew_paal', 'hindi_nouns', 'maltese', 'conll'][4]
data = import_data(data_select)
data_set = DataSet( data['dat'],
data['held_in_stems'],
data['held_out_stems'],
data['vowels']
)
feature_file = '~/Dropbox/TensorProductStringToStringMapping/00features/' +\
['hayes_features.csv', 'panphon_ipa_bases.csv'][0]
feature_matrix = phon_features.import_features(feature_file, data_set.segments)
symbol_params = {'feature_matrix': feature_matrix, }
role_params = {'nrole': data_set.max_len+4, }
form_embedder = FormEmbedder(symbol_params, role_params)
environ.init(form_embedder) # makes dummy morphosyn_embedder
data_set.split_and_embed(test_size=0.25)
model = Affixer()
trainer = Trainer(model)
environ.config.nepoch = 1500
trainer.train(data_set)
train_pred, test_pred =\
evaluator.evaluate(model, data_set)
sys.exit(0)
# # # # # OLD CODE # # # # #
seq_embedder, morph_embedder, train, test = import_data(data_select)
tpr.init(seq_embedder, morph_embedder)
print('filler dimensionality:', tpr.dfill)
print('role dimensionality:', tpr.drole)
print('distributed roles?', tpr.random_roles)
print('train/test split:')
print('\t', len(train), 'training examples')
print('\t', len(test), 'testing examples')
# run trainer
tpr.save_dir = '/Users/colin/Desktop/tmorph_output'
nbatch = min(40,len(train)) if args.nbatch is None else int(args.nbatch)
nepoch = 1000 if args.nepoch is None else int(args.nepoch)
trainer = trainer.Trainer( redup=False, lr=1.0e-1, dc=0.0, verbosity=1 )
affixer, decoder = trainer.train_and_test( train, test, nbatch=nbatch, max_epochs=nepoch )
if False:
tpr.trace = True
train = train.iloc[0:2].reset_index()
test = test.iloc[0:2].reset_index()
train.stem, train.output = u't r i s t i', u't r u m i s t i'
trainer.train_and_test1(train, test, nbatch=len(train))
print(tpr.traces)
for x in tpr.traces:
f = '/Users/colin/Desktop/dump/'+ x +'.txt'
y = tpr.traces[x]
print(y.__class__.__name__)
if type(y) is np.ndarray:
np.savetxt(f, y, delimiter=',')
else:
print(x, y)
if False: # test by hand
trainer.affixer.morph_attender.tau.data[:] = 5.0
trainer.affixer.posn_attender.tau.data[:] = 5.0
Stems = string2tpr(u'q a f a ts').unsqueeze(0)
Affix = string2tpr(u't i o ⋉', False).unsqueeze(0)
copy = torch.ones(tpr.nrole).unsqueeze(0)
pivot = torch.zeros(tpr.nrole).unsqueeze(0)
unpivot = torch.zeros(tpr.nrole).unsqueeze(0)
copy[0,2] = copy[0,4] = 0.0
pivot[0,0] = pivot[0,3] = 1.0
unpivot[0,1] = unpivot[0,2] = 1.0
test = {\
'affix': Affix,\
'copy': copy,\
'pivot': pivot,\
'unpivot': unpivot\
}
output, traces = trainer.affixer(Stems, 10, True, test)
stem = trainer.decoder.decode(Stems)[0]
affix = trainer.decoder.decode(Affix)[0]
stem = [x+' _' if pivot[0,i]==1.0 else x for i,x in enumerate(stem.split(' '))]
stem = [x+'/' if copy[0,i]==0.0 else x for i,x in enumerate(stem)]
affix = [x+' _' if i<25 and unpivot[0,i]==1.0 else x for i,x in enumerate(affix.split(' '))]
stem = ' '.join(stem)
affix = ' '.join(affix)
output = ' '.join(trainer.decoder.decode(output))
print('stem:', stem)
print('affix:', affix)
print(' -> ')
print('output: ', output)
for trace in traces:
print(trace, np.round(traces[trace], 2))
sys.exit(0)
|
[
"colin.chris.wilson@gmail.com"
] |
colin.chris.wilson@gmail.com
|
4dc75a5c5ad9b9adc0eee92205b2a3ec96120685
|
1a220abd21c56728aa3368534506bfc9ced8ad46
|
/프로그래머스/lv0/120862. 최댓값 만들기 (2)/최댓값 만들기 (2).py
|
2150e823f28bad1d9f1692f23f12517ff6e88e54
|
[] |
no_license
|
JeonJe/Algorithm
|
0ff0cbf47900e7877be077e1ffeee0c1cd50639a
|
6f8da6dbeef350f71b7c297502a37f87eb7d0823
|
refs/heads/main
| 2023-08-23T11:08:17.781953
| 2023-08-23T08:31:41
| 2023-08-23T08:31:41
| 197,085,186
| 0
| 0
| null | 2023-02-21T03:26:41
| 2019-07-15T23:22:55
|
Python
|
UTF-8
|
Python
| false
| false
| 630
|
py
|
def solution(numbers):
answer = 0
negative = []
positive = []
for i in numbers:
if i < 0:
negative.append(i)
else:
positive.append(i)
negative.sort()
positive.sort()
max_positive, max_negative, mix = -1e9, -1e9, -1e9
if len(positive) == 1 and len(negative) == 1:
mix = positive[-1] * negative[0]
if len(positive) >= 2:
max_positive = positive[-1] * positive[-2]
if len(negative) >= 2:
max_negative = negative[0] * negative[1]
answer = max(max_positive, max_negative, mix)
return answer
|
[
"43032391+JeonJe@users.noreply.github.com"
] |
43032391+JeonJe@users.noreply.github.com
|
1c43edf94a27aa6141c51da6fce81d5fda5a3967
|
49c137c3df08de22759879b9aaf61318a073b997
|
/vacancy/migrations/0003_auto_20190404_0502.py
|
98f0f4cbaeb1b1003546462fe38e4127899c1030
|
[] |
no_license
|
ayush024/hrms
|
c7da0068a3df57340f82457e20d4d769d15aba4e
|
31c8f85e4ab5730191334561cdbddf386aafa0a7
|
refs/heads/master
| 2020-05-05T13:34:29.954270
| 2019-04-25T18:11:07
| 2019-04-25T18:11:07
| 180,083,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
# Generated by Django 2.2 on 2019-04-04 05:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vacancy', '0002_auto_20190331_1938'),
]
operations = [
migrations.AlterField(
model_name='jobs',
name='fooding',
field=models.BooleanField(default=0),
),
migrations.AlterField(
model_name='jobs',
name='insurance',
field=models.BooleanField(default=0),
),
migrations.AlterField(
model_name='jobs',
name='lodging',
field=models.BooleanField(default=0),
),
]
|
[
"aayushdhakal360@gmail.com"
] |
aayushdhakal360@gmail.com
|
36c925efa563932cdec64b3abb5f6ee5eacb4c01
|
829af66682d29e0c2dd70651d034fc28883a40ab
|
/Coursera_Capstone.py
|
fca8d3e509269d8baa0b8b370c0c6dd193201d0c
|
[] |
no_license
|
jschuler04/Coursera_Capstone
|
d8e39b5532f262ea2ff16c38085c8dafbf97e95b
|
f89af9b27b1c184fe7ead50a756ea400bdb0b33f
|
refs/heads/main
| 2023-06-03T06:08:06.984789
| 2021-06-18T20:53:02
| 2021-06-18T20:53:02
| 375,830,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#/this notebook will be mainly used for the Coursera Capstone project.
import pandas as pd
import numpy as np
print("Hello Capstone Project Course!")
|
[
"noreply@github.com"
] |
noreply@github.com
|
54b0885bfde6ed3aa0813d94f067a252a79a5d94
|
56ce881112d04617795c00b7e6270efc732894e0
|
/adserver/partner/models.py
|
c09c40e2335b7e103b58f3e6540c608e1c4b2af5
|
[] |
no_license
|
kontinuity/papps
|
0fb515d5ee4300e250a03dfbc326b06f5745613c
|
f40315b5106c7f9c24cab3dff3bd1199081dc617
|
refs/heads/master
| 2020-05-03T11:32:07.456114
| 2010-09-03T09:50:40
| 2010-09-03T09:50:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
from django.db import models
from adserver.partner.settings import *
from django.contrib.auth.models import User
from django.db.models.signals import post_save
class Partner(models.Model):
company_name = models.CharField(max_length=255)
company_type = models.PositiveIntegerField(choices=COMPANY_TYPE_CHOICES, default=COMPANY_TYPE_DEFAULT)
company_type_other = models.CharField(max_length=255, blank=True, null=True)
number_of_domains = models.PositiveIntegerField(blank=True, null=True)
hosting_control_panel = models.PositiveIntegerField(choices=HOSTING_CONTROL_PANEL_CHOICES, default=HOSTING_CONTROL_PANEL_DEFAULT)
hosting_control_panel_other = models.CharField(max_length=255, blank=True, null=True)
webmail = models.PositiveIntegerField(choices=WEBMAIL_CHOICES, default=WEBMAIL_DEFAULT)
number_of_users = models.PositiveIntegerField(blank=True, null=True)
user = models.OneToOneField(User)
#def create_partner(sender, instance, created, **kwargs):
# if created:
# profile, created = Partner.objects.get_or_create(user=instance)
#
#post_save.connect(create_partner, sender=User)
|
[
"arif.a@directi.com"
] |
arif.a@directi.com
|
36d6fbac09d283afec24203a8c80c252d0e04c93
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/activestamp/aio/operations/_backup_protection_containers_operations.py
|
977185266eeefe7c362fb3931a8a7fd029b3b0e0
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852
| 2023-02-15T13:30:47
| 2023-02-15T13:30:47
| 157,927,277
| 0
| 0
|
MIT
| 2022-07-19T08:05:23
| 2018-11-16T22:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 6,974
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._backup_protection_containers_operations import build_list_request
from .._vendor import RecoveryServicesBackupClientMixinABC
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BackupProtectionContainersOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.recoveryservicesbackup.activestamp.aio.RecoveryServicesBackupClient`'s
:attr:`backup_protection_containers` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, vault_name: str, resource_group_name: str, filter: Optional[str] = None, **kwargs: Any
) -> AsyncIterable["_models.ProtectionContainerResource"]:
"""Lists the containers registered to Recovery Services Vault.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param filter: OData filter options. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProtectionContainerResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionContainerResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2023-01-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ProtectionContainerResourceList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProtectionContainerResourceList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupProtectionContainers"
}
|
[
"noreply@github.com"
] |
noreply@github.com
|
6c99d787a87a797b6e5c6afcd4673e6a93bcfa66
|
c1fe9f7093c68d26eed55ceee4769878e8aa6c05
|
/reverse-string.py
|
bc9af688c40e4d38a1949faf89d903cdf39069e6
|
[] |
no_license
|
aadilzbhatti/Small-Challenges
|
781e7b04614d734c176f2d14a61663304316bda5
|
0768974c3c3e5b683e92f7a9cd723dc0456ee55c
|
refs/heads/master
| 2021-11-23T17:24:46.222842
| 2015-02-07T02:21:55
| 2015-02-07T02:21:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
#reverse a string
s = ""
str = input("Enter the string to be reversed: ");
for i in range (0, len(str)):
s += str[len(str) - i-1]
print(s)
|
[
"aadilzbhatti@gmail.com"
] |
aadilzbhatti@gmail.com
|
816b87e9a417a4578c92d360b24184834f8c149f
|
1ee27186cf26b646fb231b6e23a28f00959f3ae2
|
/part1_WebScraping.py
|
a1c31a4f1c1f7d0aa1a0f008f9e8268a41460138
|
[] |
no_license
|
A-Yaghoubian/Web-scraping-in-markets-with-predict
|
714dc5da72dc87354867d305ff330380312f0fef
|
aa0c43595b1f2dc9c6920edeea8e94b8bbb0f0ea
|
refs/heads/main
| 2023-07-06T14:03:41.857711
| 2021-04-07T12:22:44
| 2021-04-07T12:22:44
| 355,533,381
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,364
|
py
|
import requests
from bs4 import BeautifulSoup
import mysql.connector
print('Zakhire kardan Brand-Name-Price az site DIGISTYLE dakhel database')
print()
print('INFORMATION YOUR DATABASE')
u = input('Please enter your user of database : ')
p = input('Please enter your pass of database : ')
h = input('Please enter your host of database : ')
cnx = mysql.connector.connect(user=u, password=p, host=h, database='DigiStyle')
# print ('connected to db :)')
cursor = cnx.cursor()
newBrand = list()
newName = list()
newPrice = list()
for i in range(1, 63): #WARNING FOR 2 OR 63
r = requests.get('https://www.digistyle.com/category-men-tee-shirts-and-polos/?pageno=%s&sortby=4' %i)
soup = BeautifulSoup(r.text, 'html.parser')
brand = soup.find_all('span', attrs={'class': 'c-product-item__brand'})
name = soup.find_all('span', attrs={'class': 'c-product-item__name'})
price = soup.find_all('span', attrs={'class': 'c-product-item__price-value'})
for i in range(0, 36):
b = brand[i]
b = str(b)
b = b[36:-7]
n = name[i]
n = str(n)
n = n[35:-7]
p = price[i]
p = str(p)
p = p[42:-7]
sql = 'INSERT INTO Digistyle (brand_of_product, name_of_product, price_of_product) VALUES (%s, %s, %s)'
val = (b, n, p)
cursor.execute(sql, val)
cnx.commit()
cnx.close()
|
[
"noreply@github.com"
] |
noreply@github.com
|
3a327e8deeee0893aff957cfbbfef88f202634b2
|
76c0ed303ddf6f3afa3fe08592e70f497ab87e59
|
/ml3d/tf/models/point_rcnn.py
|
01491599862703ac453096c09419fc1ee0fc95da
|
[
"MIT"
] |
permissive
|
kukuruza/Open3D-ML
|
e4a044dbb56141a2df6b4a5218b7d01aa0250893
|
412746326836f7e1e153485ed1d4939046355c94
|
refs/heads/master
| 2023-07-16T22:09:01.797548
| 2021-07-29T15:05:59
| 2021-07-29T15:05:59
| 394,699,342
| 0
| 0
|
NOASSERTION
| 2021-08-10T15:13:02
| 2021-08-10T15:13:00
| null |
UTF-8
|
Python
| false
| false
| 73,879
|
py
|
import tensorflow as tf
import numpy as np
import os
import pickle
from .base_model_objdet import BaseModel
from ..modules.losses.smooth_L1 import SmoothL1Loss
from ..modules.losses.focal_loss import FocalLoss
from ..modules.losses.cross_entropy import CrossEntropyLoss
from ..modules.pointnet import Pointnet2MSG, PointnetSAModule
from ..utils.objdet_helper import xywhr_to_xyxyr
from open3d.ml.tf.ops import nms
from ..utils.tf_utils import gen_CNN
from ...datasets.utils import BEVBox3D, DataProcessing, ObjdetAugmentation
from ...datasets.utils.operations import filter_by_min_points, points_in_box
from ...utils import MODEL
from ..modules.schedulers import OneCycleScheduler
from ..utils.roipool3d import roipool3d_utils
from ...metrics import iou_3d
class PointRCNN(BaseModel):
"""Object detection model. Based on the PoinRCNN architecture
https://github.com/sshaoshuai/PointRCNN.
The network is not trainable end-to-end, it requires pre-training of the RPN
module, followed by training of the RCNN module. For this the mode must be
set to 'RPN', with this, the network only outputs intermediate results. If
the RPN module is trained, the mode can be set to 'RCNN' (default), with
this, the second module can be trained and the output are the final
predictions.
For inference use the 'RCNN' mode.
Args:
name (string): Name of model.
Default to "PointRCNN".
device (string): 'cuda' or 'cpu'.
Default to 'cuda'.
classes (string[]): List of classes used for object detection:
Default to ['Car'].
score_thres (float): Min confindence score for prediction.
Default to 0.3.
npoints (int): Number of processed input points.
Default to 16384.
rpn (dict): Config of RPN module.
Default to {}.
rcnn (dict): Config of RCNN module.
Default to {}.
mode (string): Execution mode, 'RPN' or 'RCNN'.
Default to 'RCNN'.
"""
def __init__(self,
name="PointRCNN",
classes=['Car'],
score_thres=0.3,
npoints=16384,
rpn={},
rcnn={},
mode="RCNN",
**kwargs):
super().__init__(name=name, **kwargs)
assert mode == "RPN" or mode == "RCNN"
self.mode = mode
self.npoints = npoints
self.classes = classes
self.name2lbl = {n: i for i, n in enumerate(classes)}
self.lbl2name = {i: n for i, n in enumerate(classes)}
self.score_thres = score_thres
self.rpn = RPN(**rpn)
self.rcnn = RCNN(num_classes=len(self.classes), **rcnn)
if self.mode == "RCNN":
self.rpn.trainable = False
else:
self.rcnn.trainable = False
def call(self, inputs, training=True):
cls_score, reg_score, backbone_xyz, backbone_features = self.rpn(
inputs[0], training=self.mode == "RPN" and training)
if self.mode != "RPN":
cls_score = tf.stop_gradient(cls_score)
reg_score = tf.stop_gradient(reg_score)
backbone_xyz = tf.stop_gradient(backbone_xyz)
backbone_features = tf.stop_gradient(backbone_features)
rpn_scores_raw = tf.stop_gradient(cls_score[:, :, 0])
rois, _ = self.rpn.proposal_layer(rpn_scores_raw,
reg_score,
backbone_xyz,
training=training) # (B, M, 7)
rois = tf.stop_gradient(rois)
output = {"rois": rois, "cls": cls_score, "reg": reg_score}
if self.mode == "RCNN":
rpn_scores_norm = tf.sigmoid(rpn_scores_raw)
seg_mask = tf.cast((rpn_scores_norm > self.score_thres), tf.float32)
pts_depth = tf.norm(backbone_xyz, ord=2, axis=2)
seg_mask = tf.stop_gradient(seg_mask)
pts_depth = tf.stop_gradient(pts_depth)
gt_boxes = None
if training or self.mode == "RPN":
gt_boxes = inputs[1]
output = self.rcnn(rois,
gt_boxes,
backbone_xyz,
tf.transpose(backbone_features, (0, 2, 1)),
seg_mask,
pts_depth,
training=training)
return output
def get_optimizer(self, cfg):
beta1, beta2 = cfg.get('betas', [0.9, 0.99])
lr_scheduler = OneCycleScheduler(40800, cfg.lr, cfg.div_factor)
optimizer = tf.optimizers.Adam(learning_rate=lr_scheduler,
beta_1=beta1,
beta_2=beta2)
return optimizer
def load_gt_database(self, pickle_path, min_points_dict, sample_dict):
"""Load ground truth object database.
Args:
pickle_path: Path of pickle file generated using `scripts/collect_bbox.py`.
min_points_dict: A dictionary to filter objects based on number of points inside.
sample_dict: A dictionary to decide number of objects to sample.
"""
db_boxes = pickle.load(open(pickle_path, 'rb'))
if min_points_dict is not None:
db_boxes = filter_by_min_points(db_boxes, min_points_dict)
db_boxes_dict = {}
for key in sample_dict.keys():
db_boxes_dict[key] = []
for db_box in db_boxes:
if db_box.label_class in sample_dict.keys():
db_boxes_dict[db_box.label_class].append(db_box)
self.db_boxes_dict = db_boxes_dict
def augment_data(self, data, attr):
"""Augment object detection data.
Available augmentations are:
`ObjectSample`: Insert objects from ground truth database.
`ObjectRangeFilter`: Filter pointcloud from given bounds.
`PointShuffle`: Shuffle the pointcloud.
Args:
data: A dictionary object returned from the dataset class.
attr: Attributes for current pointcloud.
Returns:
Augmented `data` dictionary.
"""
cfg = self.cfg.augment
if 'ObjectSample' in cfg.keys():
if not hasattr(self, 'db_boxes_dict'):
data_path = attr['path']
# remove tail of path to get root data path
for _ in range(3):
data_path = os.path.split(data_path)[0]
pickle_path = os.path.join(data_path, 'bboxes.pkl')
self.load_gt_database(pickle_path, **cfg['ObjectSample'])
data = ObjdetAugmentation.ObjectSample(
data,
db_boxes_dict=self.db_boxes_dict,
sample_dict=cfg['ObjectSample']['sample_dict'])
if cfg.get('ObjectRangeFilter', False):
data = ObjdetAugmentation.ObjectRangeFilter(
data, self.cfg.point_cloud_range)
if cfg.get('PointShuffle', False):
data = ObjdetAugmentation.PointShuffle(data)
return data
def loss(self, results, inputs, training=True):
if self.mode == "RPN":
return self.rpn.loss(results, inputs)
else:
if not training:
return {"loss": tf.constant(0.0)}
return self.rcnn.loss(results, inputs)
def filter_objects(self, bbox_objs):
"""Filter objects based on classes to train.
Args:
bbox_objs: Bounding box objects from dataset class.
Returns:
Filtered bounding box objects.
"""
filtered = []
for bb in bbox_objs:
if bb.label_class in self.classes:
filtered.append(bb)
return filtered
def preprocess(self, data, attr):
if attr['split'] in ['train', 'training']:
data = self.augment_data(data, attr)
data['bounding_boxes'] = self.filter_objects(data['bounding_boxes'])
# remove intensity
points = np.array(data['point'][..., :3], dtype=np.float32)
calib = data['calib']
# transform in cam space
points = DataProcessing.world2cam(points, calib['world_cam'])
new_data = {'point': points, 'calib': calib}
# bounding_boxes are objects of type BEVBox3D. It is renamed to
# bbox_objs to clarify them as objects and not matrix of type [N, 7].
if attr['split'] not in ['test', 'testing']:
new_data['bbox_objs'] = data['bounding_boxes']
return new_data
@staticmethod
def generate_rpn_training_labels(points, bboxes, bboxes_world, calib=None):
"""Generates labels for RPN network.
Classifies each point as foreground/background based on points inside bbox.
We don't train on ambigious points which are just outside bounding boxes(calculated
by `extended_boxes`).
Also computes regression labels for bounding box proposals(in bounding box frame).
Args:
points: Input pointcloud.
bboxes: bounding boxes in camera frame.
bboxes_world: bounding boxes in world frame.
calib: Calibration file for cam_to_world matrix.
Returns:
Classification and Regression labels.
"""
cls_label = np.zeros((points.shape[0]), dtype=np.int32)
reg_label = np.zeros((points.shape[0], 7),
dtype=np.float32) # dx, dy, dz, ry, h, w, l
if len(bboxes) == 0:
return cls_label, reg_label
pts_idx = points_in_box(points.copy(),
bboxes_world,
camera_frame=True,
cam_world=DataProcessing.invT(
calib['world_cam']))
# enlarge the bbox3d, ignore nearby points
extended_boxes = bboxes_world.copy()
# Enlarge box by 0.4m (from PointRCNN paper).
extended_boxes[3:6] += 0.4
# Decrease z coordinate, as z_center is at bottom face of box.
extended_boxes[:, 2] -= 0.2
pts_idx_ext = points_in_box(points.copy(),
extended_boxes,
camera_frame=True,
cam_world=DataProcessing.invT(
calib['world_cam']))
for k in range(bboxes.shape[0]):
fg_pt_flag = pts_idx[:, k]
fg_pts_rect = points[fg_pt_flag]
cls_label[fg_pt_flag] = 1
fg_enlarge_flag = pts_idx_ext[:, k]
ignore_flag = np.logical_xor(fg_pt_flag, fg_enlarge_flag)
cls_label[ignore_flag] = -1
# pixel offset of object center
center3d = bboxes[k][0:3].copy() # (x, y, z)
center3d[1] -= bboxes[k][3] / 2
reg_label[fg_pt_flag, 0:3] = center3d - fg_pts_rect
# size and angle encoding
reg_label[fg_pt_flag, 3] = bboxes[k][3] # h
reg_label[fg_pt_flag, 4] = bboxes[k][4] # w
reg_label[fg_pt_flag, 5] = bboxes[k][5] # l
reg_label[fg_pt_flag, 6] = bboxes[k][6] # ry
return cls_label, reg_label
def transform(self, data, attr):
points = data['point']
if attr['split'] not in ['test', 'testing']: #, 'val', 'validation']:
if self.npoints < len(points):
pts_depth = points[:, 2]
pts_near_flag = pts_depth < 40.0
far_idxs_choice = np.where(pts_near_flag == 0)[0]
near_idxs = np.where(pts_near_flag == 1)[0]
near_idxs_choice = np.random.choice(near_idxs,
self.npoints -
len(far_idxs_choice),
replace=False)
choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \
if len(far_idxs_choice) > 0 else near_idxs_choice
np.random.shuffle(choice)
else:
choice = np.arange(0, len(points), dtype=np.int32)
if self.npoints > len(points):
extra_choice = np.random.choice(choice,
self.npoints - len(points),
replace=False)
choice = np.concatenate((choice, extra_choice), axis=0)
np.random.shuffle(choice)
points = points[choice, :]
t_data = {'point': points, 'calib': data['calib']}
if attr['split'] not in ['test', 'testing']:
labels = []
bboxes = []
bboxes_world = []
if len(data['bbox_objs']) != 0:
labels = np.stack([
self.name2lbl.get(bb.label_class, len(self.classes))
for bb in data['bbox_objs']
])
bboxes = np.stack([bb.to_camera() for bb in data['bbox_objs']
]) # Camera frame.
bboxes_world = np.stack(
[bb.to_xyzwhlr() for bb in data['bbox_objs']])
if self.mode == "RPN":
labels, bboxes = PointRCNN.generate_rpn_training_labels(
points, bboxes, bboxes_world, data['calib'])
t_data['labels'] = np.array(labels)
t_data['bbox_objs'] = data['bbox_objs'] # Objects of type BEVBox3D.
if attr['split'] in ['train', 'training'] or self.mode == "RPN":
t_data['bboxes'] = bboxes
return t_data
def inference_end(self, results, inputs):
if self.mode == 'RPN':
return [[]]
roi_boxes3d = results['rois'] # (B, M, 7)
batch_size = roi_boxes3d.shape[0]
rcnn_cls = tf.reshape(results['cls'],
(batch_size, -1, results['cls'].shape[1]))
rcnn_reg = tf.reshape(results['reg'],
(batch_size, -1, results['reg'].shape[1]))
pred_boxes3d, rcnn_cls = self.rcnn.proposal_layer(rcnn_cls,
rcnn_reg,
roi_boxes3d,
training=False)
inference_result = []
for calib, bboxes, scores in zip(inputs[3], pred_boxes3d, rcnn_cls):
# scoring
if scores.shape[-1] == 1:
scores = tf.sigmoid(scores)
labels = tf.cast(scores < self.score_thres, tf.int64)
else:
labels = tf.argmax(scores)
scores = tf.nn.softmax(scores, axis=0)
scores = scores[labels]
fltr = tf.reshape(scores > self.score_thres, (-1))
bboxes = bboxes[fltr]
labels = labels[fltr]
scores = scores[fltr]
bboxes = bboxes.numpy()
scores = scores.numpy()
labels = labels.numpy()
inference_result.append([])
world_cam, cam_img = calib.numpy()
for bbox, score, label in zip(bboxes, scores, labels):
pos = bbox[:3]
dim = bbox[[4, 3, 5]]
# transform into world space
pos = DataProcessing.cam2world(pos.reshape((1, -1)),
world_cam).flatten()
pos = pos + [0, 0, dim[1] / 2]
yaw = bbox[-1]
name = self.lbl2name.get(label[0], "ignore")
inference_result[-1].append(
BEVBox3D(pos, dim, yaw, name, score, world_cam, cam_img))
return inference_result
def get_batch_gen(self, dataset, steps_per_epoch=None, batch_size=1):
def batcher():
count = len(dataset) if steps_per_epoch is None else steps_per_epoch
for i in np.arange(0, count, batch_size):
batch = [dataset[i + bi]['data'] for bi in range(batch_size)]
points = tf.stack([b['point'] for b in batch], axis=0)
bboxes = [
b.get('bboxes', tf.zeros((0, 7), dtype=tf.float32))
for b in batch
]
max_gt = 0
for bbox in bboxes:
max_gt = max(max_gt, bbox.shape[0])
pad_bboxes = np.zeros((len(bboxes), max_gt, 7),
dtype=np.float32)
for j in range(len(bboxes)):
pad_bboxes[j, :bboxes[j].shape[0], :] = bboxes[j]
bboxes = tf.constant(pad_bboxes)
labels = [
b.get('labels', tf.zeros((0,), dtype=tf.int32))
for b in batch
]
max_lab = 0
for lab in labels:
max_lab = max(max_lab, lab.shape[0])
if 'labels' in batch[
0] and labels[0].shape[0] != points.shape[1]:
pad_labels = np.ones(
(len(labels), max_lab), dtype=np.int32) * (-1)
for j in range(len(labels)):
pad_labels[j, :labels[j].shape[0]] = labels[j]
labels = tf.constant(pad_labels)
else:
labels = tf.stack(labels, axis=0)
calib = [
tf.constant([
b.get('calib', {}).get('world_cam', np.eye(4)),
b.get('calib', {}).get('cam_img', np.eye(4))
]) for b in batch
]
yield (points, bboxes, labels, calib)
gen_func = batcher
gen_types = (tf.float32, tf.float32, tf.int32, tf.float32)
gen_shapes = ([batch_size, None, 3], [batch_size, None,
7], [batch_size,
None], [batch_size, 2, 4, 4])
return gen_func, gen_types, gen_shapes
MODEL._register_module(PointRCNN, 'tf')
def get_reg_loss(pred_reg,
reg_label,
loc_scope,
loc_bin_size,
num_head_bin,
anchor_size,
get_xz_fine=True,
get_y_by_bin=False,
loc_y_scope=0.5,
loc_y_bin_size=0.25,
get_ry_fine=False):
"""Bin-based 3D bounding boxes regression loss. See
https://arxiv.org/abs/1812.04244 for more details.
Args:
pred_reg: (N, C)
reg_label: (N, 7) [dx, dy, dz, h, w, l, ry]
loc_scope: Constant
loc_bin_size: Constant
num_head_bin: Constant
anchor_size: (N, 3) or (3)
get_xz_fine: Whether to get fine xz loss.
get_y_by_bin: Whether to divide y coordinate into bin.
loc_y_scope: Scope length for y coordinate.
loc_y_bin_size: Bin size for classifying y coordinate.
get_ry_fine: Whether to use fine yaw loss.
"""
per_loc_bin_num = int(loc_scope / loc_bin_size) * 2
loc_y_bin_num = int(loc_y_scope / loc_y_bin_size) * 2
reg_loss_dict = {}
loc_loss = 0
# xz localization loss
x_offset_label, y_offset_label, z_offset_label = reg_label[:,
0], reg_label[:,
1], reg_label[:,
2]
x_shift = tf.clip_by_value(x_offset_label + loc_scope, 0,
loc_scope * 2 - 1e-3)
z_shift = tf.clip_by_value(z_offset_label + loc_scope, 0,
loc_scope * 2 - 1e-3)
x_bin_label = tf.cast(tf.floor(x_shift / loc_bin_size), tf.int64)
z_bin_label = tf.cast(tf.floor(z_shift / loc_bin_size), tf.int64)
x_bin_l, x_bin_r = 0, per_loc_bin_num
z_bin_l, z_bin_r = per_loc_bin_num, per_loc_bin_num * 2
start_offset = z_bin_r
loss_x_bin = CrossEntropyLoss()(pred_reg[:, x_bin_l:x_bin_r], x_bin_label)
loss_z_bin = CrossEntropyLoss()(pred_reg[:, z_bin_l:z_bin_r], z_bin_label)
reg_loss_dict['loss_x_bin'] = loss_x_bin.numpy()
reg_loss_dict['loss_z_bin'] = loss_z_bin.numpy()
loc_loss += loss_x_bin + loss_z_bin
if get_xz_fine:
x_res_l, x_res_r = per_loc_bin_num * 2, per_loc_bin_num * 3
z_res_l, z_res_r = per_loc_bin_num * 3, per_loc_bin_num * 4
start_offset = z_res_r
x_res_label = x_shift - (
tf.cast(x_bin_label, tf.float32) * loc_bin_size + loc_bin_size / 2)
z_res_label = z_shift - (
tf.cast(z_bin_label, tf.float32) * loc_bin_size + loc_bin_size / 2)
x_res_norm_label = x_res_label / loc_bin_size
z_res_norm_label = z_res_label / loc_bin_size
x_bin_onehot = tf.one_hot(x_bin_label, per_loc_bin_num)
z_bin_onehot = tf.one_hot(z_bin_label, per_loc_bin_num)
loss_x_res = SmoothL1Loss()(tf.reduce_sum(pred_reg[:, x_res_l:x_res_r] *
x_bin_onehot,
axis=1), x_res_norm_label)
loss_z_res = SmoothL1Loss()(tf.reduce_sum(pred_reg[:, z_res_l:z_res_r] *
z_bin_onehot,
axis=1), z_res_norm_label)
reg_loss_dict['loss_x_res'] = loss_x_res.numpy()
reg_loss_dict['loss_z_res'] = loss_z_res.numpy()
loc_loss += loss_x_res + loss_z_res
# y localization loss
if get_y_by_bin:
y_bin_l, y_bin_r = start_offset, start_offset + loc_y_bin_num
y_res_l, y_res_r = y_bin_r, y_bin_r + loc_y_bin_num
start_offset = y_res_r
y_shift = tf.clip_by_value(y_offset_label + loc_y_scope, 0,
loc_y_scope * 2 - 1e-3)
y_bin_label = tf.cast(tf.floor(y_shift / loc_y_bin_size), tf.int64)
y_res_label = y_shift - (tf.cast(y_bin_label, tf.float32) *
loc_y_bin_size + loc_y_bin_size / 2)
y_res_norm_label = y_res_label / loc_y_bin_size
y_bin_onehot = tf.one_hot(y_bin_label, loc_y_bin_num)
loss_y_bin = CrossEntropyLoss()(pred_reg[:, y_bin_l:y_bin_r],
y_bin_label)
loss_y_res = SmoothL1Loss()(tf.reduce_sum(pred_reg[:, y_res_l:y_res_r] *
y_bin_onehot,
axis=1), y_res_norm_label)
reg_loss_dict['loss_y_bin'] = loss_y_bin.numpy()
reg_loss_dict['loss_y_res'] = loss_y_res.numpy()
loc_loss += loss_y_bin + loss_y_res
else:
y_offset_l, y_offset_r = start_offset, start_offset + 1
start_offset = y_offset_r
loss_y_offset = SmoothL1Loss()(tf.reduce_sum(
pred_reg[:, y_offset_l:y_offset_r], axis=1), y_offset_label)
reg_loss_dict['loss_y_offset'] = loss_y_offset.numpy()
loc_loss += loss_y_offset
# angle loss
ry_bin_l, ry_bin_r = start_offset, start_offset + num_head_bin
ry_res_l, ry_res_r = ry_bin_r, ry_bin_r + num_head_bin
ry_label = reg_label[:, 6]
if get_ry_fine:
# divide pi/2 into several bins
angle_per_class = (np.pi / 2) / num_head_bin
ry_label = ry_label % (2 * np.pi) # 0 ~ 2pi
ry_label = tf.where((ry_label > np.pi * 0.5) & (ry_label < np.pi * 1.5),
(ry_label + np.pi) % (2 * np.pi),
ry_label) # (0 ~ pi/2, 3pi/2 ~ 2pi)
shift_angle = (ry_label + np.pi * 0.5) % (2 * np.pi) # (0 ~ pi)
shift_angle = tf.clip_by_value(shift_angle - np.pi * 0.25, 1e-3,
np.pi * 0.5 - 1e-3) # (0, pi/2)
# bin center is (5, 10, 15, ..., 85)
ry_bin_label = tf.cast(tf.floor(shift_angle / angle_per_class),
tf.int64)
ry_res_label = shift_angle - (tf.cast(ry_bin_label, tf.float32) *
angle_per_class + angle_per_class / 2)
ry_res_norm_label = ry_res_label / (angle_per_class / 2)
else:
# divide 2pi into several bins
angle_per_class = (2 * np.pi) / num_head_bin
heading_angle = ry_label % (2 * np.pi) # 0 ~ 2pi
shift_angle = (heading_angle + angle_per_class / 2) % (2 * np.pi)
ry_bin_label = tf.cast(tf.floor(shift_angle / angle_per_class),
tf.int64)
ry_res_label = shift_angle - (tf.cast(ry_bin_label, tf.float32) *
angle_per_class + angle_per_class / 2)
ry_res_norm_label = ry_res_label / (angle_per_class / 2)
ry_bin_onehot = tf.one_hot(ry_bin_label, num_head_bin)
loss_ry_bin = CrossEntropyLoss()(pred_reg[:, ry_bin_l:ry_bin_r],
ry_bin_label)
loss_ry_res = SmoothL1Loss()(tf.reduce_sum(pred_reg[:, ry_res_l:ry_res_r] *
ry_bin_onehot,
axis=1), ry_res_norm_label)
reg_loss_dict['loss_ry_bin'] = loss_ry_bin.numpy()
reg_loss_dict['loss_ry_res'] = loss_ry_res.numpy()
angle_loss = loss_ry_bin + loss_ry_res
# size loss
size_res_l, size_res_r = ry_res_r, ry_res_r + 3
assert pred_reg.shape[1] == size_res_r, '%d vs %d' % (pred_reg.shape[1],
size_res_r)
size_res_norm_label = (reg_label[:, 3:6] - anchor_size) / anchor_size
size_res_norm = pred_reg[:, size_res_l:size_res_r]
size_loss = SmoothL1Loss()(size_res_norm, size_res_norm_label)
# Total regression loss
reg_loss_dict['loss_loc'] = loc_loss
reg_loss_dict['loss_angle'] = angle_loss
reg_loss_dict['loss_size'] = size_loss
return loc_loss, angle_loss, size_loss, reg_loss_dict
class RPN(tf.keras.layers.Layer):
def __init__(self,
backbone={},
cls_in_ch=128,
cls_out_ch=[128],
reg_in_ch=128,
reg_out_ch=[128],
db_ratio=0.5,
head={},
focal_loss={},
loss_weight=[1.0, 1.0],
**kwargs):
super().__init__()
# backbone
self.backbone = Pointnet2MSG(**backbone)
self.proposal_layer = ProposalLayer(**head)
# classification branch
layers = []
for i in range(len(cls_out_ch)):
layers.extend([
tf.keras.layers.Conv1D(cls_out_ch[i],
1,
use_bias=False,
data_format="channels_first"),
tf.keras.layers.BatchNormalization(axis=1,
momentum=0.9,
epsilon=1e-05),
tf.keras.layers.ReLU(),
tf.keras.layers.Dropout(db_ratio)
])
layers.append(
tf.keras.layers.Conv1D(
1,
1,
use_bias=True,
bias_initializer=tf.keras.initializers.Constant(-np.log(
(1 - 0.01) / 0.01)),
data_format="channels_first"))
self.cls_blocks = tf.keras.Sequential(layers)
# regression branch
per_loc_bin_num = int(self.proposal_layer.loc_scope /
self.proposal_layer.loc_bin_size) * 2
if self.proposal_layer.loc_xz_fine:
reg_channel = per_loc_bin_num * 4 + self.proposal_layer.num_head_bin * 2 + 3
else:
reg_channel = per_loc_bin_num * 2 + self.proposal_layer.num_head_bin * 2 + 3
reg_channel = reg_channel + 1 # reg y
layers = []
for i in range(len(reg_out_ch)):
layers.extend([
tf.keras.layers.Conv1D(reg_out_ch[i],
1,
use_bias=False,
data_format="channels_first"),
tf.keras.layers.BatchNormalization(axis=1,
momentum=0.9,
epsilon=1e-05),
tf.keras.layers.ReLU(),
tf.keras.layers.Dropout(db_ratio)
])
layers.append(
tf.keras.layers.Conv1D(
reg_channel,
1,
use_bias=True,
kernel_initializer=tf.keras.initializers.RandomNormal(
stddev=0.001),
data_format="channels_first"))
self.reg_blocks = tf.keras.Sequential(layers)
self.loss_cls = FocalLoss(**focal_loss)
self.loss_weight = loss_weight
def call(self, x, training=True):
backbone_xyz, backbone_features = self.backbone(
x, training=training) # (B, N, 3), (B, C, N)
rpn_cls = tf.transpose(
self.cls_blocks(backbone_features, training=training),
(0, 2, 1)) # (B, N, 1)
rpn_reg = tf.transpose(
self.reg_blocks(backbone_features, training=training),
(0, 2, 1)) # (B, N, C)
return rpn_cls, rpn_reg, backbone_xyz, backbone_features
def loss(self, results, inputs):
rpn_cls = results['cls']
rpn_reg = results['reg']
rpn_reg_label = inputs[1]
rpn_cls_label = inputs[2]
rpn_cls_label_flat = tf.reshape(rpn_cls_label, (-1))
rpn_cls_flat = tf.reshape(rpn_cls, (-1))
fg_mask = (rpn_cls_label_flat > 0)
# focal loss
rpn_cls_target = tf.cast((rpn_cls_label_flat > 0), tf.int32)
pos = tf.cast((rpn_cls_label_flat > 0), tf.float32)
neg = tf.cast((rpn_cls_label_flat == 0), tf.float32)
cls_weights = pos + neg
pos_normalizer = tf.reduce_sum(pos)
cls_weights = cls_weights / tf.maximum(pos_normalizer, 1.0)
rpn_loss_cls = self.loss_cls(rpn_cls_flat,
rpn_cls_target,
cls_weights,
avg_factor=1.0)
# RPN regression loss
point_num = rpn_reg.shape[0] * rpn_reg.shape[1]
fg_sum = tf.reduce_sum(tf.cast(fg_mask, tf.int64)).numpy()
if fg_sum != 0:
loss_loc, loss_angle, loss_size, reg_loss_dict = \
get_reg_loss(tf.reshape(rpn_reg, (point_num, -1))[fg_mask],
tf.reshape(rpn_reg_label, (point_num, 7))[fg_mask],
loc_scope=self.proposal_layer.loc_scope,
loc_bin_size=self.proposal_layer.loc_bin_size,
num_head_bin=self.proposal_layer.num_head_bin,
anchor_size=self.proposal_layer.mean_size,
get_xz_fine=self.proposal_layer.loc_xz_fine,
get_y_by_bin=False,
get_ry_fine=False)
loss_size = 3 * loss_size
rpn_loss_reg = loss_loc + loss_angle + loss_size
else:
rpn_loss_reg = tf.reduce_mean(rpn_reg * 0)
return {
"cls": rpn_loss_cls * self.loss_weight[0],
"reg": rpn_loss_reg * self.loss_weight[1]
}
class RCNN(tf.keras.layers.Layer):
def __init__(
self,
num_classes,
in_channels=128,
SA_config={
"npoints": [128, 32, -1],
"radius": [0.2, 0.4, 100],
"nsample": [64, 64, 64],
"mlps": [[128, 128, 128], [128, 128, 256], [256, 256, 512]]
},
cls_out_ch=[256, 256],
reg_out_ch=[256, 256],
db_ratio=0.5,
use_xyz=True,
xyz_up_layer=[128, 128],
head={},
target_head={},
loss={}):
super().__init__()
self.rcnn_input_channel = 5
self.pool_extra_width = target_head.get("pool_extra_width", 1.0)
self.num_points = target_head.get("num_points", 512)
self.proposal_layer = ProposalLayer(**head)
self.SA_modules = []
for i in range(len(SA_config["npoints"])):
mlps = [in_channels] + SA_config["mlps"][i]
npoint = SA_config["npoints"][
i] if SA_config["npoints"][i] != -1 else None
self.SA_modules.append(
PointnetSAModule(npoint=npoint,
radius=SA_config["radius"][i],
nsample=SA_config["nsample"][i],
mlp=mlps,
use_xyz=use_xyz,
use_bias=True))
in_channels = mlps[-1]
self.xyz_up_layer = gen_CNN([self.rcnn_input_channel] + xyz_up_layer,
conv=tf.keras.layers.Conv2D)
c_out = xyz_up_layer[-1]
self.merge_down_layer = gen_CNN([c_out * 2, c_out],
conv=tf.keras.layers.Conv2D)
# classification layer
cls_channel = 1 if num_classes == 2 else num_classes
layers = []
for i in range(len(cls_out_ch)):
layers.extend([
tf.keras.layers.Conv1D(
cls_out_ch[i],
1,
use_bias=True,
data_format="channels_first",
kernel_initializer=tf.keras.initializers.GlorotNormal(),
bias_initializer=tf.keras.initializers.Constant(0.0)),
tf.keras.layers.ReLU()
])
layers.append(
tf.keras.layers.Conv1D(
cls_channel,
1,
use_bias=True,
data_format="channels_first",
kernel_initializer=tf.keras.initializers.GlorotNormal(),
bias_initializer=tf.keras.initializers.Constant(0.0)))
self.cls_blocks = tf.keras.Sequential(layers)
self.loss_cls = tf.keras.losses.BinaryCrossentropy()
# regression branch
per_loc_bin_num = int(self.proposal_layer.loc_scope /
self.proposal_layer.loc_bin_size) * 2
loc_y_bin_num = int(self.proposal_layer.loc_y_scope /
self.proposal_layer.loc_y_bin_size) * 2
reg_channel = per_loc_bin_num * 4 + self.proposal_layer.num_head_bin * 2 + 3
reg_channel += (1 if not self.proposal_layer.get_y_by_bin else
loc_y_bin_num * 2)
layers = []
for i in range(len(reg_out_ch)):
layers.extend([
tf.keras.layers.Conv1D(
reg_out_ch[i],
1,
use_bias=True,
data_format="channels_first",
kernel_initializer=tf.keras.initializers.GlorotNormal(),
bias_initializer=tf.keras.initializers.Constant(0.0)),
tf.keras.layers.ReLU()
])
layers.append(
tf.keras.layers.Conv1D(
reg_channel,
1,
use_bias=True,
data_format="channels_first",
kernel_initializer=tf.keras.initializers.RandomNormal(
stddev=0.001),
bias_initializer=tf.keras.initializers.Constant(0.0)))
self.reg_blocks = tf.keras.Sequential(layers)
self.proposal_target_layer = ProposalTargetLayer(**target_head)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3]
features = (tf.transpose(pc[..., 3:],
(0, 2, 1)) if pc.shape[-1] > 3 else None)
return xyz, features
def call(self,
roi_boxes3d,
gt_boxes3d,
rpn_xyz,
rpn_features,
seg_mask,
pts_depth,
training=True):
pts_extra_input_list = [tf.expand_dims(seg_mask, axis=2)]
pts_extra_input_list.append(
tf.expand_dims(pts_depth / 70.0 - 0.5, axis=2))
pts_extra_input = tf.concat(pts_extra_input_list, axis=2)
pts_feature = tf.concat((pts_extra_input, rpn_features), axis=2)
if gt_boxes3d is not None:
target = self.proposal_target_layer(
[roi_boxes3d, gt_boxes3d, rpn_xyz, pts_feature])
for k in target:
target[k] = tf.stop_gradient(target[k])
pts_input = tf.concat(
(target['sampled_pts'], target['pts_feature']), axis=2)
target['pts_input'] = pts_input
else:
pooled_features, pooled_empty_flag = roipool3d_utils.roipool3d_gpu(
rpn_xyz,
pts_feature,
roi_boxes3d,
self.pool_extra_width,
sampled_pt_num=self.num_points)
# canonical transformation
batch_size = roi_boxes3d.shape[0]
roi_center = roi_boxes3d[:, :, 0:3]
poss = []
for k in range(batch_size):
pos = pooled_features[k, :, :, :3] - tf.expand_dims(
roi_center[k], axis=1)
pos = rotate_pc_along_y_tf(pos, roi_boxes3d[k, :, 6])
poss.append(pos)
pooled_features = tf.concat(
[tf.stack(poss), pooled_features[:, :, :, 3:]], axis=3)
pts_input = tf.reshape(
pooled_features,
(-1, pooled_features.shape[2], pooled_features.shape[3]))
xyz, features = self._break_up_pc(pts_input)
xyz_input = tf.expand_dims(tf.transpose(
pts_input[..., 0:self.rcnn_input_channel], (0, 2, 1)),
axis=3)
xyz_feature = self.xyz_up_layer(xyz_input, training=training)
rpn_feature = tf.expand_dims(tf.transpose(
pts_input[..., self.rcnn_input_channel:], (0, 2, 1)),
axis=3)
merged_feature = tf.concat((xyz_feature, rpn_feature), axis=1)
merged_feature = self.merge_down_layer(merged_feature,
training=training)
l_xyz, l_features = [xyz], [tf.squeeze(merged_feature, axis=3)]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i],
l_features[i],
training=training)
l_xyz.append(li_xyz)
l_features.append(li_features)
rcnn_cls = tf.squeeze(tf.transpose(
self.cls_blocks(l_features[-1], training=training), (0, 2, 1)),
axis=1) # (B, 1 or 2)
rcnn_reg = tf.squeeze(tf.transpose(
self.reg_blocks(l_features[-1], training=training), (0, 2, 1)),
axis=1) # (B, C)
ret_dict = {'rois': roi_boxes3d, 'cls': rcnn_cls, 'reg': rcnn_reg}
if gt_boxes3d is not None:
ret_dict.update(target)
return ret_dict
def loss(self, results, inputs):
rcnn_cls = results['cls']
rcnn_reg = results['reg']
cls_label = tf.cast(results['cls_label'], tf.float32)
reg_valid_mask = results['reg_valid_mask']
gt_boxes3d_ct = results['gt_of_rois']
pts_input = results['pts_input']
cls_label_flat = tf.reshape(cls_label, (-1))
# binary cross entropy
rcnn_cls_flat = tf.reshape(rcnn_cls, (-1))
batch_loss_cls = tf.keras.losses.BinaryCrossentropy(reduction="none")(
tf.sigmoid(rcnn_cls_flat), cls_label)
cls_valid_mask = tf.cast((cls_label_flat >= 0), tf.float32)
rcnn_loss_cls = tf.reduce_sum(
batch_loss_cls * cls_valid_mask) / tf.maximum(
tf.reduce_sum(cls_valid_mask), 1.0)
# rcnn regression loss
batch_size = pts_input.shape[0]
fg_mask = (reg_valid_mask > 0)
fg_sum = tf.reduce_sum(tf.cast(fg_mask, tf.int64)).numpy()
if fg_sum != 0:
anchor_size = self.proposal_layer.mean_size
loss_loc, loss_angle, loss_size, reg_loss_dict = \
get_reg_loss(tf.reshape(rcnn_reg, (batch_size, -1))[fg_mask],
tf.reshape(gt_boxes3d_ct, (batch_size, 7))[fg_mask],
loc_scope=self.proposal_layer.loc_scope,
loc_bin_size=self.proposal_layer.loc_bin_size,
num_head_bin=self.proposal_layer.num_head_bin,
anchor_size=anchor_size,
get_xz_fine=True, get_y_by_bin=self.proposal_layer.get_y_by_bin,
loc_y_scope=self.proposal_layer.loc_y_scope, loc_y_bin_size=self.proposal_layer.loc_y_bin_size,
get_ry_fine=True)
loss_size = 3 * loss_size # consistent with old codes
rcnn_loss_reg = loss_loc + loss_angle + loss_size
else:
# Regression loss is zero when no point is classified as foreground.
rcnn_loss_reg = tf.reduce_mean(rcnn_reg * 0)
return {"cls": rcnn_loss_cls, "reg": rcnn_loss_reg}
def rotate_pc_along_y(pc, rot_angle):
"""
Args:
pc: (N, 3+C), (N, 3) is in the rectified camera coordinate.
rot_angle: rad scalar
Returns:
pc: updated pc with XYZ rotated.
"""
cosval = np.cos(rot_angle)
sinval = np.sin(rot_angle)
rotmat = np.array([[cosval, -sinval], [sinval, cosval]])
pc[:, [0, 2]] = np.dot(pc[:, [0, 2]], np.transpose(rotmat))
return pc
class ProposalLayer(tf.keras.layers.Layer):
def __init__(self,
nms_pre=9000,
nms_post=512,
nms_thres=0.85,
nms_post_val=None,
nms_thres_val=None,
mean_size=[1.0],
loc_xz_fine=True,
loc_scope=3.0,
loc_bin_size=0.5,
num_head_bin=12,
get_y_by_bin=False,
get_ry_fine=False,
loc_y_scope=0.5,
loc_y_bin_size=0.25,
post_process=True):
super().__init__()
self.nms_pre = nms_pre
self.nms_post = nms_post
self.nms_thres = nms_thres
self.nms_post_val = nms_post_val
self.nms_thres_val = nms_thres_val
self.mean_size = tf.constant(mean_size)
self.loc_scope = loc_scope
self.loc_bin_size = loc_bin_size
self.num_head_bin = num_head_bin
self.loc_xz_fine = loc_xz_fine
self.get_y_by_bin = get_y_by_bin
self.get_ry_fine = get_ry_fine
self.loc_y_scope = loc_y_scope
self.loc_y_bin_size = loc_y_bin_size
self.post_process = post_process
def call(self, rpn_scores, rpn_reg, xyz, training=True):
batch_size = xyz.shape[0]
proposals = decode_bbox_target(
tf.reshape(xyz, (-1, xyz.shape[-1])),
tf.reshape(rpn_reg, (-1, rpn_reg.shape[-1])),
anchor_size=self.mean_size,
loc_scope=self.loc_scope,
loc_bin_size=self.loc_bin_size,
num_head_bin=self.num_head_bin,
get_xz_fine=self.loc_xz_fine,
get_y_by_bin=self.get_y_by_bin,
get_ry_fine=self.get_ry_fine,
loc_y_scope=self.loc_y_scope,
loc_y_bin_size=self.loc_y_bin_size) # (N, 7)
proposals = tf.reshape(proposals, (batch_size, -1, 7))
nms_post = self.nms_post
nms_thres = self.nms_thres
if not training:
if self.nms_post_val is not None:
nms_post = self.nms_post_val
if self.nms_thres_val is not None:
nms_thres = self.nms_thres_val
if self.post_process:
proposals = tf.concat([
proposals[..., :1], proposals[..., 1:2] +
proposals[..., 3:4] / 2, proposals[..., 2:]
],
axis=-1) # set y as the center of bottom
scores = rpn_scores
sorted_idxs = tf.argsort(scores, axis=1, direction="DESCENDING")
batch_size = scores.shape[0]
ret_bbox3d = []
ret_scores = []
for k in range(batch_size):
scores_single = scores[k]
proposals_single = proposals[k]
order_single = sorted_idxs[k]
scores_single, proposals_single = self.distance_based_proposal(
scores_single, proposals_single, order_single, training)
proposals_tot = proposals_single.shape[0]
ret_bbox3d.append(
tf.concat([
proposals_single,
tf.zeros((nms_post - proposals_tot, 7))
],
axis=0))
ret_scores.append(
tf.concat(
[scores_single,
tf.zeros((nms_post - proposals_tot,))],
axis=0))
ret_bbox3d = tf.stack(ret_bbox3d)
ret_scores = tf.stack(ret_scores)
else:
batch_size = rpn_scores.shape[0]
ret_bbox3d = []
ret_scores = []
for k in range(batch_size):
bev = xywhr_to_xyxyr(
tf.stack([proposals[k, :, i] for i in [0, 2, 3, 5, 6]],
axis=-1))
keep_idx = nms(bev, rpn_scores[k, :, 0], nms_thres)
ret_bbox3d.append(tf.gather(proposals[k], keep_idx))
ret_scores.append(tf.gather(rpn_scores[k], keep_idx))
return ret_bbox3d, ret_scores
def distance_based_proposal(self, scores, proposals, order, training=True):
"""Propose ROIs in two area based on the distance.
Args:
scores: (N)
proposals: (N, 7)
order: (N)
training (bool): Whether we are training?
"""
nms_post = self.nms_post
nms_thres = self.nms_thres
if not training:
if self.nms_post_val is not None:
nms_post = self.nms_post_val
if self.nms_thres_val is not None:
nms_thres = self.nms_thres_val
nms_range_list = [0, 40.0, 80.0]
pre_top_n_list = [
0,
int(self.nms_pre * 0.7), self.nms_pre - int(self.nms_pre * 0.7)
]
post_top_n_list = [
0, int(nms_post * 0.7), nms_post - int(nms_post * 0.7)
]
scores_single_list, proposals_single_list = [], []
# sort by score
scores_ordered = tf.gather(scores, order)
proposals_ordered = tf.gather(proposals, order)
dist = proposals_ordered[:, 2]
first_mask = (dist > nms_range_list[0]) & (dist <= nms_range_list[1])
for i in range(1, len(nms_range_list)):
# get proposal distance mask
dist_mask = ((dist > nms_range_list[i - 1]) &
(dist <= nms_range_list[i]))
if tf.reduce_any(dist_mask):
# this area has points
# reduce by mask
cur_scores = scores_ordered[dist_mask]
cur_proposals = proposals_ordered[dist_mask]
# fetch pre nms top K
cur_scores = cur_scores[:pre_top_n_list[i]]
cur_proposals = cur_proposals[:pre_top_n_list[i]]
else:
assert i == 2, '%d' % i
# this area doesn't have any points, so use rois of first area
cur_scores = scores_ordered[first_mask]
cur_proposals = proposals_ordered[first_mask]
# fetch top K of first area
cur_scores = cur_scores[pre_top_n_list[i -
1]:][:pre_top_n_list[i]]
cur_proposals = cur_proposals[
pre_top_n_list[i - 1]:][:pre_top_n_list[i]]
# oriented nms
bev = xywhr_to_xyxyr(
tf.gather(cur_proposals, [0, 2, 3, 5, 6], axis=1))
keep_idx = nms(bev, cur_scores, nms_thres)
# Fetch post nms top k
keep_idx = keep_idx[:post_top_n_list[i]]
scores_single_list.append(tf.gather(cur_scores, keep_idx))
proposals_single_list.append(tf.gather(cur_proposals, keep_idx))
scores_single = tf.concat(scores_single_list, axis=0)
proposals_single = tf.concat(proposals_single_list, axis=0)
return scores_single, proposals_single
def decode_bbox_target(roi_box3d,
pred_reg,
loc_scope,
loc_bin_size,
num_head_bin,
anchor_size,
get_xz_fine=True,
get_y_by_bin=False,
loc_y_scope=0.5,
loc_y_bin_size=0.25,
get_ry_fine=False):
"""
Args:
roi_box3d: (N, 7)
pred_reg: (N, C)
loc_scope:
loc_bin_size:
num_head_bin:
anchor_size:
get_xz_fine:
get_y_by_bin:
loc_y_scope:
loc_y_bin_size:
get_ry_fine:
"""
per_loc_bin_num = int(loc_scope / loc_bin_size) * 2
loc_y_bin_num = int(loc_y_scope / loc_y_bin_size) * 2
# recover xz localization
x_bin_l, x_bin_r = 0, per_loc_bin_num
z_bin_l, z_bin_r = per_loc_bin_num, per_loc_bin_num * 2
start_offset = z_bin_r
x_bin = tf.argmax(pred_reg[:, x_bin_l:x_bin_r], axis=1)
z_bin = tf.argmax(pred_reg[:, z_bin_l:z_bin_r], axis=1)
pos_x = tf.cast(x_bin,
tf.float32) * loc_bin_size + loc_bin_size / 2 - loc_scope
pos_z = tf.cast(z_bin,
tf.float32) * loc_bin_size + loc_bin_size / 2 - loc_scope
if get_xz_fine:
x_res_l, x_res_r = per_loc_bin_num * 2, per_loc_bin_num * 3
z_res_l, z_res_r = per_loc_bin_num * 3, per_loc_bin_num * 4
start_offset = z_res_r
x_res_norm = tf.gather(pred_reg[:, x_res_l:x_res_r],
x_bin,
batch_dims=1)
z_res_norm = tf.gather(pred_reg[:, z_res_l:z_res_r],
z_bin,
batch_dims=1)
x_res = x_res_norm * loc_bin_size
z_res = z_res_norm * loc_bin_size
pos_x += x_res
pos_z += z_res
# recover y localization
if get_y_by_bin:
y_bin_l, y_bin_r = start_offset, start_offset + loc_y_bin_num
y_res_l, y_res_r = y_bin_r, y_bin_r + loc_y_bin_num
start_offset = y_res_r
y_bin = tf.argmax(pred_reg[:, y_bin_l:y_bin_r], axis=1)
y_res_norm = tf.gather(pred_reg[:, y_res_l:y_res_r],
y_bin,
batch_dims=1)
y_res = y_res_norm * loc_y_bin_size
pos_y = tf.cast(
y_bin, tf.float32
) * loc_y_bin_size + loc_y_bin_size / 2 - loc_y_scope + y_res
pos_y = pos_y + roi_box3d[:, 1]
else:
y_offset_l, y_offset_r = start_offset, start_offset + 1
start_offset = y_offset_r
pos_y = roi_box3d[:, 1] + pred_reg[:, y_offset_l]
# recover ry rotation
ry_bin_l, ry_bin_r = start_offset, start_offset + num_head_bin
ry_res_l, ry_res_r = ry_bin_r, ry_bin_r + num_head_bin
ry_bin = tf.argmax(pred_reg[:, ry_bin_l:ry_bin_r], axis=1)
ry_res_norm = tf.gather(pred_reg[:, ry_res_l:ry_res_r],
ry_bin,
batch_dims=1)
if get_ry_fine:
# divide pi/2 into several bins
angle_per_class = (np.pi / 2) / num_head_bin
ry_res = ry_res_norm * (angle_per_class / 2)
ry = (tf.cast(ry_bin, tf.float32) * angle_per_class +
angle_per_class / 2) + ry_res - np.pi / 4
else:
angle_per_class = (2 * np.pi) / num_head_bin
ry_res = ry_res_norm * (angle_per_class / 2)
# bin_center is (0, 30, 60, 90, 120, ..., 270, 300, 330)
ry = (tf.cast(ry_bin, tf.float32) * angle_per_class + ry_res) % (2 *
np.pi)
ry = tf.where(ry > np.pi, ry - 2 * np.pi, ry)
# recover size
size_res_l, size_res_r = ry_res_r, ry_res_r + 3
assert size_res_r == pred_reg.shape[1]
size_res_norm = pred_reg[:, size_res_l:size_res_r]
hwl = size_res_norm * anchor_size + anchor_size
# shift to original coords
roi_center = roi_box3d[:, 0:3]
shift_ret_box3d = tf.concat(
(tf.reshape(pos_x, (-1, 1)), tf.reshape(
pos_y, (-1, 1)), tf.reshape(pos_z,
(-1, 1)), hwl, tf.reshape(ry, (-1, 1))),
axis=1)
ret_box3d = shift_ret_box3d
if roi_box3d.shape[1] == 7:
roi_ry = roi_box3d[:, 6:7]
ret_box3d = rotate_pc_along_y_tf(shift_ret_box3d, -roi_ry)
ret_box3d = tf.concat([ret_box3d[:, :6], ret_box3d[:, 6:7] + roi_ry],
axis=1)
ret_box3d = tf.concat([
ret_box3d[:, :1] + roi_center[:, :1], ret_box3d[:, 1:2],
ret_box3d[:, 2:3] + roi_center[:, 2:3], ret_box3d[:, 3:]
],
axis=1)
return ret_box3d
def rotate_pc_along_y_tf(pc, rot_angle):
"""
:param pc: (N, 3 + C)
:param rot_angle: (N)
:return:
"""
cosa = tf.reshape(tf.cos(rot_angle), (-1, 1)) # (N, 1)
sina = tf.reshape(tf.sin(rot_angle), (-1, 1)) # (N, 1)
raw_1 = tf.concat([cosa, -sina], axis=1) # (N, 2)
raw_2 = tf.concat([sina, cosa], axis=1) # (N, 2)
R = tf.concat(
(tf.expand_dims(raw_1, axis=1), tf.expand_dims(raw_2, axis=1)),
axis=1) # (N, 2, 2)
pc_temp = tf.reshape(tf.stack([pc[..., 0], pc[..., 2]], axis=-1),
((pc.shape[0], -1, 2))) # (N, 512, 2)
pc_temp = tf.matmul(pc_temp, tf.transpose(R, (0, 2, 1)))
pc_temp = tf.reshape(pc_temp, (pc.shape[:-1] + (2,))) # (N, 512, 2)
pc = tf.concat(
[pc_temp[..., :1], pc[..., 1:2], pc_temp[..., 1:2], pc[..., 3:]],
axis=-1)
return pc
class ProposalTargetLayer(tf.keras.layers.Layer):
def __init__(self,
pool_extra_width=1.0,
num_points=512,
reg_fg_thresh=0.55,
cls_fg_thresh=0.6,
cls_bg_thresh=0.45,
cls_bg_thresh_lo=0.05,
fg_ratio=0.5,
roi_per_image=64,
aug_rot_range=18,
hard_bg_ratio=0.8,
roi_fg_aug_times=10):
super().__init__()
self.pool_extra_width = pool_extra_width
self.num_points = num_points
self.reg_fg_thresh = reg_fg_thresh
self.cls_fg_thresh = cls_fg_thresh
self.cls_bg_thresh = cls_bg_thresh
self.cls_bg_thresh_lo = cls_bg_thresh_lo
self.fg_ratio = fg_ratio
self.roi_per_image = roi_per_image
self.aug_rot_range = aug_rot_range
self.hard_bg_ratio = hard_bg_ratio
self.roi_fg_aug_times = roi_fg_aug_times
def call(self, x):
roi_boxes3d, gt_boxes3d, rpn_xyz, pts_feature = x
batch_rois, batch_gt_of_rois, batch_roi_iou = self.sample_rois_for_rcnn(
roi_boxes3d, gt_boxes3d)
# point cloud pooling
pooled_features, pooled_empty_flag = \
roipool3d_utils.roipool3d_gpu(rpn_xyz, pts_feature, batch_rois, self.pool_extra_width,
sampled_pt_num=self.num_points)
sampled_pts, sampled_features = pooled_features[:, :, :, 0:
3], pooled_features[:, :, :,
3:]
# data augmentation
sampled_pts, batch_rois, batch_gt_of_rois = \
self.data_augmentation(sampled_pts, batch_rois, batch_gt_of_rois)
# canonical transformation
batch_size = batch_rois.shape[0]
roi_ry = batch_rois[:, :, 6:7] % (2 * np.pi)
roi_center = batch_rois[:, :, 0:3]
sampled_pts = sampled_pts - tf.expand_dims(roi_center,
axis=2) # (B, M, 512, 3)
batch_gt_of_rois = tf.concat([
batch_gt_of_rois[:, :, :3] - roi_center,
batch_gt_of_rois[:, :, 3:6], batch_gt_of_rois[:, :, 6:] - roi_ry
],
axis=2)
sampled_pts = tf.unstack(sampled_pts)
batch_gt_of_rois = tf.unstack(batch_gt_of_rois)
for k in range(batch_size):
sampled_pts[k] = rotate_pc_along_y_tf(sampled_pts[k],
batch_rois[k, :, 6])
batch_gt_of_rois[k] = tf.squeeze(rotate_pc_along_y_tf(
tf.expand_dims(batch_gt_of_rois[k], axis=1), roi_ry[k]),
axis=1)
sampled_pts = tf.stack(sampled_pts)
batch_gt_of_rois = tf.stack(batch_gt_of_rois)
# regression valid mask
valid_mask = (pooled_empty_flag == 0)
reg_valid_mask = tf.cast(
((batch_roi_iou > self.reg_fg_thresh) & valid_mask), tf.int64)
# classification label
batch_cls_label = tf.cast((batch_roi_iou > self.cls_fg_thresh),
tf.int64)
invalid_mask = (batch_roi_iou > self.cls_bg_thresh) & (
batch_roi_iou < self.cls_fg_thresh)
batch_cls_label = tf.where(
tf.reduce_any([tf.logical_not(valid_mask), invalid_mask], axis=0),
-1, batch_cls_label)
output_dict = {
'sampled_pts':
tf.reshape(sampled_pts, (-1, self.num_points, 3)),
'pts_feature':
tf.reshape(sampled_features,
(-1, self.num_points, sampled_features.shape[3])),
'cls_label':
tf.reshape(batch_cls_label, (-1)),
'reg_valid_mask':
tf.reshape(reg_valid_mask, (-1)),
'gt_of_rois':
tf.reshape(batch_gt_of_rois, (-1, 7)),
'gt_iou':
tf.reshape(batch_roi_iou, (-1)),
'roi_boxes3d':
tf.reshape(batch_rois, (-1, 7))
}
return output_dict
def sample_rois_for_rcnn(self, roi_boxes3d, gt_boxes3d):
"""
Args:
roi_boxes3d: (B, M, 7)
gt_boxes3d: (B, N, 8) [x, y, z, h, w, l, ry, cls]
Returns:
batch_rois: (B, N, 7)
batch_gt_of_rois: (B, N, 8)
batch_roi_iou: (B, N)
"""
batch_size = roi_boxes3d.shape[0]
fg_rois_per_image = int(np.round(self.fg_ratio * self.roi_per_image))
batch_rois, batch_gt_of_rois, batch_roi_iou = [], [], []
for idx in range(batch_size):
cur_roi, cur_gt = roi_boxes3d[idx], gt_boxes3d[idx]
k = cur_gt.__len__() - 1
while tf.reduce_sum(cur_gt[k]) == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
# include gt boxes in the candidate rois
iou3d = iou_3d(cur_roi.numpy()[:, [0, 1, 2, 5, 3, 4, 6]],
cur_gt[:,
0:7].numpy()[:,
[0, 1, 2, 5, 3, 4, 6]]) # (M, N)
iou3d = tf.constant(iou3d)
gt_assignment = tf.argmax(iou3d, axis=1)
max_overlaps = tf.gather(iou3d, gt_assignment, batch_dims=1)
# sample fg, easy_bg, hard_bg
fg_thresh = min(self.reg_fg_thresh, self.cls_fg_thresh)
fg_inds = tf.reshape(tf.where((max_overlaps >= fg_thresh)), (-1))
# TODO: this will mix the fg and bg when CLS_BG_THRESH_LO < iou < CLS_BG_THRESH
# fg_inds = tf.concat((fg_inds, roi_assignment), axis=0) # consider the roi which has max_iou with gt as fg
easy_bg_inds = tf.reshape(
tf.where((max_overlaps < self.cls_bg_thresh_lo)), (-1))
hard_bg_inds = tf.reshape(
tf.where((max_overlaps < self.cls_bg_thresh) &
(max_overlaps >= self.cls_bg_thresh_lo)), (-1))
fg_num_rois = len(fg_inds.shape)
bg_num_rois = len(hard_bg_inds.shape) + len(easy_bg_inds.shape)
if fg_num_rois > 0 and bg_num_rois > 0:
# sampling fg
fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)
rand_num = tf.constant(np.random.permutation(fg_num_rois),
dtype=tf.int64)
fg_inds = tf.gather(fg_inds, rand_num[:fg_rois_per_this_image])
# sampling bg
bg_rois_per_this_image = self.roi_per_image - fg_rois_per_this_image
bg_inds = self.sample_bg_inds(hard_bg_inds, easy_bg_inds,
bg_rois_per_this_image)
elif fg_num_rois > 0 and bg_num_rois == 0:
# sampling fg
rand_num = np.floor(
np.random.rand(self.roi_per_image) * fg_num_rois)
rand_num = tf.constant(rand_num, dtype=tf.int64)
fg_inds = fg_inds[rand_num]
fg_rois_per_this_image = self.roi_per_image
bg_rois_per_this_image = 0
elif bg_num_rois > 0 and fg_num_rois == 0:
# sampling bg
bg_rois_per_this_image = self.roi_per_image
bg_inds = self.sample_bg_inds(hard_bg_inds, easy_bg_inds,
bg_rois_per_this_image)
fg_rois_per_this_image = 0
else:
import pdb
pdb.set_trace()
raise NotImplementedError
# augment the rois by noise
roi_list, roi_iou_list, roi_gt_list = [], [], []
if fg_rois_per_this_image > 0:
fg_rois_src = tf.gather(cur_roi, fg_inds)
gt_of_fg_rois = tf.gather(cur_gt,
tf.gather(gt_assignment, fg_inds))
iou3d_src = tf.gather(max_overlaps, fg_inds)
fg_rois, fg_iou3d = self.aug_roi_by_noise_torch(
fg_rois_src,
gt_of_fg_rois,
iou3d_src,
aug_times=self.roi_fg_aug_times)
roi_list.append(fg_rois)
roi_iou_list.append(fg_iou3d)
roi_gt_list.append(gt_of_fg_rois)
if bg_rois_per_this_image > 0:
bg_rois_src = tf.gather(cur_roi, bg_inds)
gt_of_bg_rois = tf.gather(cur_gt,
tf.gather(gt_assignment, bg_inds))
iou3d_src = tf.gather(max_overlaps, bg_inds)
aug_times = 1 if self.roi_fg_aug_times > 0 else 0
bg_rois, bg_iou3d = self.aug_roi_by_noise_torch(
bg_rois_src, gt_of_bg_rois, iou3d_src, aug_times=aug_times)
roi_list.append(bg_rois)
roi_iou_list.append(bg_iou3d)
roi_gt_list.append(gt_of_bg_rois)
rois = tf.concat(roi_list, axis=0)
iou_of_rois = tf.concat(roi_iou_list, axis=0)
gt_of_rois = tf.concat(roi_gt_list, axis=0)
batch_rois.append(rois)
batch_gt_of_rois.append(gt_of_rois)
batch_roi_iou.append(iou_of_rois)
return tf.stack(batch_rois), tf.stack(batch_gt_of_rois), tf.stack(
batch_roi_iou)
def sample_bg_inds(self, hard_bg_inds, easy_bg_inds,
bg_rois_per_this_image):
if len(hard_bg_inds.shape) > 0 and len(easy_bg_inds.shape) > 0:
hard_bg_rois_num = int(bg_rois_per_this_image * self.hard_bg_ratio)
easy_bg_rois_num = bg_rois_per_this_image - hard_bg_rois_num
# sampling hard bg
rand_idx = tf.constant(np.random.randint(low=0,
high=len(
hard_bg_inds.shape),
size=(hard_bg_rois_num,)),
dtype=tf.int64)
hard_bg_inds = tf.gather(hard_bg_inds, rand_idx)
# sampling easy bg
rand_idx = tf.constant(np.random.randint(low=0,
high=len(
easy_bg_inds.shape),
size=(easy_bg_rois_num,)),
dtype=tf.int64)
easy_bg_inds = tf.gather(easy_bg_inds, rand_idx)
bg_inds = tf.concat([hard_bg_inds, easy_bg_inds], axis=0)
elif len(hard_bg_inds.shape) > 0 and len(easy_bg_inds.shape) == 0:
hard_bg_rois_num = bg_rois_per_this_image
# sampling hard bg
rand_idx = tf.constant(np.random.randint(low=0,
high=len(
hard_bg_inds.shape),
size=(hard_bg_rois_num,)),
dtype=tf.int64)
bg_inds = tf.gather(hard_bg_inds, rand_idx)
elif len(hard_bg_inds.shape) == 0 and len(easy_bg_inds.shape) > 0:
easy_bg_rois_num = bg_rois_per_this_image
# sampling easy bg
rand_idx = tf.constant(np.random.randint(low=0,
high=len(
easy_bg_inds.shape),
size=(easy_bg_rois_num,)),
dtype=tf.int64)
bg_inds = tf.gather(easy_bg_inds, rand_idx)
else:
raise NotImplementedError
return bg_inds
def aug_roi_by_noise_torch(self,
roi_boxes3d,
gt_boxes3d,
iou3d_src,
aug_times=10):
pos_thresh = min(self.reg_fg_thresh, self.cls_fg_thresh)
aug_boxes = []
iou_of_rois = []
for k in range(roi_boxes3d.shape[0]):
temp_iou = cnt = 0
roi_box3d = roi_boxes3d[k]
gt_box3d = tf.reshape(gt_boxes3d[k], (1, 7))
aug_box3d = roi_box3d
keep = True
while temp_iou < pos_thresh and cnt < aug_times:
if np.random.rand() < 0.2:
aug_box3d = roi_box3d # p=0.2 to keep the original roi box
keep = True
else:
aug_box3d = self.random_aug_box3d(roi_box3d)
keep = False
aug_box3d = tf.reshape(aug_box3d, ((1, 7)))
iou3d = iou_3d(aug_box3d.numpy()[:, [0, 1, 2, 5, 3, 4, 6]],
gt_box3d.numpy()[:, [0, 1, 2, 5, 3, 4, 6]])
iou3d = tf.constant(iou3d)
temp_iou = iou3d[0][0]
cnt += 1
aug_boxes.append(tf.reshape(aug_box3d, (-1)))
if cnt == 0 or keep:
iou_of_rois.append(iou3d_src[k])
else:
iou_of_rois.append(temp_iou)
return tf.stack(aug_boxes), tf.stack(iou_of_rois)
@staticmethod
def random_aug_box3d(box3d):
"""
Random shift, scale, orientation.
Args:
box3d: (7) [x, y, z, h, w, l, ry]
"""
# pos_range, hwl_range, angle_range, mean_iou
range_config = [[0.2, 0.1, np.pi / 12,
0.7], [0.3, 0.15, np.pi / 12, 0.6],
[0.5, 0.15, np.pi / 9,
0.5], [0.8, 0.15, np.pi / 6, 0.3],
[1.0, 0.15, np.pi / 3, 0.2]]
idx = tf.constant(np.random.randint(low=0,
high=len(range_config),
size=(1,))[0],
dtype=tf.int64)
pos_shift = ((tf.random.uniform(
(3,)) - 0.5) / 0.5) * range_config[idx][0]
hwl_scale = ((tf.random.uniform(
(3,)) - 0.5) / 0.5) * range_config[idx][1] + 1.0
angle_rot = ((tf.random.uniform(
(1,)) - 0.5) / 0.5) * range_config[idx][2]
aug_box3d = tf.concat([
box3d[0:3] + pos_shift, box3d[3:6] * hwl_scale,
box3d[6:7] + angle_rot
],
axis=0)
return aug_box3d
def data_augmentation(self, pts, rois, gt_of_rois):
"""
Args:
pts: (B, M, 512, 3)
rois: (B, M. 7)
gt_of_rois: (B, M, 7)
"""
batch_size, boxes_num = pts.shape[0], pts.shape[1]
# rotation augmentation
angles = (tf.random.uniform(
(batch_size, boxes_num)) - 0.5 / 0.5) * (np.pi / self.aug_rot_range)
# calculate gt alpha from gt_of_rois
temp_x, temp_z, temp_ry = gt_of_rois[:, :,
0], gt_of_rois[:, :,
2], gt_of_rois[:, :,
6]
temp_beta = tf.atan2(temp_z, temp_x)
gt_alpha = -tf.sign(
temp_beta) * np.pi / 2 + temp_beta + temp_ry # (B, M)
temp_x, temp_z, temp_ry = rois[:, :, 0], rois[:, :, 2], rois[:, :, 6]
temp_beta = tf.atan2(temp_z, temp_x)
roi_alpha = -tf.sign(
temp_beta) * np.pi / 2 + temp_beta + temp_ry # (B, M)
pts = tf.unstack(pts)
gt_of_rois = tf.unstack(gt_of_rois)
rois = tf.unstack(rois)
for k in range(batch_size):
pts[k] = rotate_pc_along_y_tf(pts[k], angles[k])
gt_of_rois[k] = tf.squeeze(rotate_pc_along_y_tf(
tf.expand_dims(gt_of_rois[k], axis=1), angles[k]),
axis=1)
rois[k] = tf.squeeze(rotate_pc_along_y_tf(
tf.expand_dims(rois[k], axis=1), angles[k]),
axis=1)
pts = tf.stack(pts)
gt_of_rois = tf.stack(gt_of_rois)
rois = tf.stack(rois)
# calculate the ry after rotation
temp_x, temp_z = gt_of_rois[:, :, :1], gt_of_rois[:, :, 2:3]
temp_beta = tf.atan2(temp_z, temp_x)
gt_of_rois = tf.concat([
gt_of_rois[:, :, :6],
tf.sign(temp_beta) * np.pi / 2 + tf.expand_dims(gt_alpha, axis=-1) -
temp_beta
],
axis=2)
temp_x, temp_z = rois[:, :, :1], rois[:, :, 2:3]
temp_beta = tf.atan2(temp_z, temp_x)
rois = tf.concat([
rois[:, :, :6],
tf.sign(temp_beta) * np.pi / 2 +
tf.expand_dims(roi_alpha, axis=-1) - temp_beta
],
axis=2)
# scaling augmentation
scales = 1 + ((tf.random.uniform(
(batch_size, boxes_num)) - 0.5) / 0.5) * 0.05
pts = pts * tf.expand_dims(tf.expand_dims(scales, axis=2), axis=3)
gt_of_rois = tf.concat([
gt_of_rois[:, :, :6] * tf.expand_dims(scales, axis=2),
gt_of_rois[:, :, 6:]
],
axis=2)
rois = tf.concat(
[rois[:, :, :6] * tf.expand_dims(scales, axis=2), rois[:, :, 6:]],
axis=2)
# flip augmentation
flip_flag = tf.sign(tf.random.uniform((batch_size, boxes_num, 1)) - 0.5)
pts = tf.concat([
pts[:, :, :, :1] * tf.expand_dims(flip_flag, axis=3), pts[:, :, :,
1:]
],
axis=3)
gt_of_rois = tf.concat(
[gt_of_rois[:, :, :1] * flip_flag, gt_of_rois[:, :, 1:]], axis=2)
# flip orientation: ry > 0: pi - ry, ry < 0: -pi - ry
src_ry = gt_of_rois[:, :, 6:7]
ry = tf.cast((flip_flag == 1), tf.float32) * src_ry + tf.cast(
(flip_flag == -1), tf.float32) * (tf.sign(src_ry) * np.pi - src_ry)
gt_of_rois = tf.concat([gt_of_rois[:, :, :6], ry], axis=2)
rois = tf.concat([rois[:, :, :1] * flip_flag, rois[:, :, 1:]], axis=2)
# flip orientation: ry > 0: pi - ry, ry < 0: -pi - ry
src_ry = rois[:, :, 6:7]
ry = tf.cast((flip_flag == 1), tf.float32) * src_ry + tf.cast(
(flip_flag == -1), tf.float32) * (tf.sign(src_ry) * np.pi - src_ry)
rois = tf.concat([rois[:, :, :6], ry], axis=2)
return pts, rois, gt_of_rois
|
[
"noreply@github.com"
] |
noreply@github.com
|
03d4807bf6ae79a977ee60b6b4de35c94aeb6e7f
|
88a5dae03f0304d3fb7add71855d2ddc6d8e28e3
|
/main/ext/__init__.py
|
362e9cace53732e41d9341d5e951472eba630fbc
|
[
"Apache-2.0"
] |
permissive
|
huangpd/Shape
|
eabb59781ac6a055f7b7036fef926023cbcd4882
|
fddbbb765e353584752066f7c839293ebd10c4df
|
refs/heads/master
| 2020-03-26T13:04:22.224367
| 2018-05-10T09:06:10
| 2018-05-10T09:06:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
#-*-coding:utf-8-*-
from flask_bcrypt import Bcrypt
bcrypt = Bcrypt()
from flask_bootstrap import Bootstrap
bootstrap = Bootstrap()
from flask_mail import Mail
mail=Mail()
from flask_login import LoginManager
login_manager = LoginManager()
login_manager.login_view="auth.login_index"
login_manager.session_protection="strong"
login_manager.login_message="登录以获得更多功能"
login_manager.login_message_category="info"
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
dbd6c32ba34d3fe4be7a38d40e085d64dc1c2ffc
|
efa2de2e0ca886a22be34c40cb4b4d397aa05015
|
/AGE/link_pred_ddi.py
|
47fa5e9d4c3f629a78f1356612226821a403eb4d
|
[] |
no_license
|
chuanqichen/cs224w
|
34c522d95c37089298a03ff2fd113c5b613036cd
|
aeebce6810221bf04a9a14d8d4369be76691b608
|
refs/heads/main
| 2023-03-18T18:03:16.468040
| 2021-03-21T18:55:48
| 2021-03-21T18:55:56
| 345,116,683
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,490
|
py
|
from __future__ import division
from __future__ import print_function
import os, sys
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=RuntimeWarning)
warnings.simplefilter(action='ignore', category=UserWarning)
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
# For replicating the experiments
SEED = 42
import argparse
import time
import random
import numpy as np
import scipy.sparse as sp
import torch
np.random.seed(SEED)
torch.manual_seed(SEED)
from torch import optim
import torch.nn.functional as F
from model import LinTrans, LogReg
from optimizer import loss_function
from utils import *
from sklearn.cluster import SpectralClustering, KMeans
from clustering_metric import clustering_metrics
from tqdm import tqdm
from sklearn.preprocessing import normalize, MinMaxScaler
from sklearn import metrics
import matplotlib.pyplot as plt
from ogb.linkproppred import PygLinkPropPredDataset, Evaluator
import torch_geometric.transforms as T
parser = argparse.ArgumentParser()
parser.add_argument('--gnnlayers', type=int, default=1, help="Number of gnn layers")
parser.add_argument('--linlayers', type=int, default=1, help="Number of hidden layers")
parser.add_argument('--epochs', type=int, default=400, help='Number of epochs to train.')
parser.add_argument('--dims', type=int, default=[500], help='Number of units in hidden layer 1.')
parser.add_argument('--lr', type=float, default=0.001, help='Initial learning rate.')
parser.add_argument('--upth_st', type=float, default=0.0011, help='Upper Threshold start.')
parser.add_argument('--lowth_st', type=float, default=0.1, help='Lower Threshold start.')
parser.add_argument('--upth_ed', type=float, default=0.001, help='Upper Threshold end.')
parser.add_argument('--lowth_ed', type=float, default=0.5, help='Lower Threshold end.')
parser.add_argument('--upd', type=int, default=10, help='Update epoch.')
parser.add_argument('--bs', type=int, default=10000, help='Batchsize.')
parser.add_argument('--dataset', type=str, default='wiki', help='type of dataset.')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda is True:
print('Using GPU')
torch.cuda.manual_seed(SEED)
os.environ["CUDA_VISIBLE_DEVICES"] = "5"
def clustering(Cluster, feature, true_labels):
f_adj = np.matmul(feature, np.transpose(feature))
predict_labels = Cluster.fit_predict(f_adj)
cm = clustering_metrics(true_labels, predict_labels)
db = -metrics.davies_bouldin_score(f_adj, predict_labels)
acc, nmi, adj = cm.evaluationClusterModelFromLabel(tqdm)
return db, acc, nmi, adj
def update_similarity(z, upper_threshold, lower_treshold, pos_num, neg_num):
f_adj = np.matmul(z, np.transpose(z))
cosine = f_adj
cosine = cosine.reshape([-1,])
pos_num = round(upper_threshold * len(cosine))
neg_num = round((1-lower_treshold) * len(cosine))
pos_inds = np.argpartition(-cosine, pos_num)[:pos_num]
neg_inds = np.argpartition(cosine, neg_num)[:neg_num]
return np.array(pos_inds), np.array(neg_inds)
def update_threshold(upper_threshold, lower_treshold, up_eta, low_eta):
upth = upper_threshold + up_eta
lowth = lower_treshold + low_eta
return upth, lowth
def get_preds(emb, adj_orig, edges):
def sigmoid(x):
return 1 / (1 + np.exp(-x))
adj_rec = np.dot(emb, emb.T)
preds = []
for e in edges:
preds.append(sigmoid(adj_rec[e[0], e[1]]))
return torch.FloatTensor(preds)
def gae_for(args):
print("Using {} dataset".format(args.dataset))
dataset = PygLinkPropPredDataset(name='ogbl-ddi',
transform=T.ToDense())
data = dataset[0]
adj = data.adj.numpy()
adj = sp.csr_matrix(adj)
n = adj.shape[0]
features = np.ones((n, 1))
#split_edge = dataset.get_edge_split()
n_nodes, feat_dim = features.shape
dims = [feat_dim] + args.dims
print("Model dims", dims)
layers = args.linlayers
# Store original adjacency matrix (without diagonal entries) for later
print('adjacency shape', adj.shape)
adj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
adj.eliminate_zeros()
adj_orig = adj
split_edge = dataset.get_edge_split()
val_edges = split_edge['valid']['edge']
val_edges_false = split_edge['valid']['edge_neg']
test_edges = split_edge['test']['edge']
test_edges_false = split_edge['test']['edge_neg']
train_edges = split_edge['train']['edge']
adj_train = mask_test_edges_ddi(adj, train_edges)
adj = adj_train
n = adj.shape[0]
print('feature shape', features.shape)
adj_norm_s = preprocess_graph(adj, args.gnnlayers, norm='sym', renorm=True)
sm_fea_s = sp.csr_matrix(features).toarray()
print('Laplacian Smoothing...')
for a in adj_norm_s:
sm_fea_s = a.dot(sm_fea_s)
adj_1st = (adj + sp.eye(n)).toarray()
adj_label = torch.FloatTensor(adj_1st)
model = LinTrans(layers, dims)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
sm_fea_s = torch.FloatTensor(sm_fea_s)
adj_label = adj_label.reshape([-1,])
print("sm_fea_s shape", sm_fea_s.shape)
print("adj_label shape", adj_label.shape)
if args.cuda:
model.cuda()
inx = sm_fea_s.cuda()
adj_label = adj_label.cuda()
else:
inx = sm_fea_s
pos_num = len(adj.indices)
neg_num = n_nodes*n_nodes-pos_num
print("Num Pos Samples", pos_num)
print("Num Neg Samples", neg_num)
up_eta = (args.upth_ed - args.upth_st) / (args.epochs/args.upd)
low_eta = (args.lowth_ed - args.lowth_st) / (args.epochs/args.upd)
pos_inds, neg_inds = update_similarity(normalize(sm_fea_s.numpy()), args.upth_st, args.lowth_st, pos_num, neg_num)
print("pos_inds shape", pos_inds.shape)
print("neg_inds shape", neg_inds.shape)
upth, lowth = update_threshold(args.upth_st, args.lowth_st, up_eta, low_eta)
bs = min(args.bs, len(pos_inds))
length = len(pos_inds)
if args.cuda:
pos_inds_cuda = torch.LongTensor(pos_inds).cuda()
else:
pos_inds_cuda = torch.LongTensor(pos_inds)
evaluator = Evaluator(name='ogbl-ddi')
best_lp = 0.
print("Batch Size", bs)
print('Start Training...')
for epoch in tqdm(range(args.epochs)):
st, ed = 0, bs
batch_num = 0
model.train()
length = len(pos_inds)
while ( ed <= length ):
if args.cuda:
sampled_neg = torch.LongTensor(np.random.choice(neg_inds, size=ed-st)).cuda()
else:
sampled_neg = torch.LongTensor(np.random.choice(neg_inds, size=ed-st))
print("sampled neg shape", sampled_neg.shape)
print("--------pos inds shape", pos_inds_cuda.shape)
sampled_inds = torch.cat((pos_inds_cuda[st:ed], sampled_neg), 0)
print("sampled inds shape", sampled_inds.shape)
t = time.time()
optimizer.zero_grad()
xind = sampled_inds // n_nodes
yind = sampled_inds % n_nodes
print("xind shape", xind.shape)
print("yind shape", yind.shape)
x = torch.index_select(inx, 0, xind)
y = torch.index_select(inx, 0, yind)
print("some x", x[:5])
print("some y", y[:5])
print("x shape", x.shape)
print("y shape", y.shape)
zx = model(x)
zy = model(y)
print("zx shape", zx.shape)
print("zy shape", zy.shape)
if args.cuda:
batch_label = torch.cat((torch.ones(ed-st), torch.zeros(ed-st))).cuda()
else:
batch_label = torch.cat((torch.ones(ed-st), torch.zeros(ed-st)))
batch_pred = model.dcs(zx, zy)
print("Batch label shape", batch_label.shape)
print("Batch pred shape", batch_pred.shape)
loss = loss_function(adj_preds=batch_pred, adj_labels=batch_label, n_nodes=ed-st)
loss.backward()
cur_loss = loss.item()
optimizer.step()
st = ed
batch_num += 1
if ed < length and ed + bs >= length:
ed += length - ed
else:
ed += bs
if (epoch + 1) % args.upd == 0:
model.eval()
mu = model(inx)
hidden_emb = mu.cpu().data.numpy()
upth, lowth = update_threshold(upth, lowth, up_eta, low_eta)
pos_inds, neg_inds = update_similarity(hidden_emb, upth, lowth, pos_num, neg_num)
bs = min(args.bs, len(pos_inds))
if args.cuda:
pos_inds_cuda = torch.LongTensor(pos_inds).cuda()
else:
pos_inds_cuda = torch.LongTensor(pos_inds)
val_auc, val_ap = get_roc_score(hidden_emb, adj_orig, val_edges, val_edges_false)
if val_auc + val_ap >= best_lp:
best_lp = val_auc + val_ap
best_emb = hidden_emb
tqdm.write("Epoch: {}, train_loss_gae={:.5f}, time={:.5f}".format(
epoch + 1, cur_loss, time.time() - t))
pos_train_edge = train_edges
pos_valid_edge = val_edges
neg_valid_edge = val_edges_false
pos_test_edge = test_edges
neg_test_edge = test_edges_false
pos_train_pred = get_preds(hidden_emb, adj_orig, pos_train_edge)
pos_valid_pred = get_preds(hidden_emb, adj_orig, pos_valid_edge)
neg_valid_pred = get_preds(hidden_emb, adj_orig, neg_valid_edge)
pos_test_pred = get_preds(hidden_emb, adj_orig, pos_test_edge)
neg_test_pred = get_preds(hidden_emb, adj_orig, neg_test_edge)
results = {}
for K in [10, 20, 30]:
evaluator.K = K
train_hits = evaluator.eval({
'y_pred_pos': pos_train_pred,
'y_pred_neg': neg_valid_pred,
})[f'hits@{K}']
valid_hits = evaluator.eval({
'y_pred_pos': pos_valid_pred,
'y_pred_neg': neg_valid_pred,
})[f'hits@{K}']
test_hits = evaluator.eval({
'y_pred_pos': pos_test_pred,
'y_pred_neg': neg_test_pred,
})[f'hits@{K}']
results[f'Hits@{K}'] = (train_hits, valid_hits, test_hits)
for key, result in results.items():
train_hits, valid_hits, test_hits = result
print(key)
print(f'Epoch: {epoch:02d}, '
f'Loss: {cur_loss:.4f}, '
f'Train: {100 * train_hits:.2f}%, '
f'Valid: {100 * valid_hits:.2f}%, '
f'Test: {100 * test_hits:.2f}%')
print('---')
tqdm.write("Optimization Finished!")
auc_score, ap_score = get_roc_score(best_emb, adj_orig, test_edges, test_edges_false)
tqdm.write('Test AUC score: ' + str(auc_score))
tqdm.write('Test AP score: ' + str(ap_score))
if __name__ == '__main__':
gae_for(args)
|
[
"owhsu@stanford.edu"
] |
owhsu@stanford.edu
|
e7c14001f2ea7bf7b866830cf28b3ffddb0acf8f
|
2f1c9bba6ba14a46f04a861e70dbf6d50d96535b
|
/Map-CS61aBerkeley/tests/08.py
|
c6f01f55e6158ebdbaf8b695866c2ec2a09fb31a
|
[] |
no_license
|
leovcunha/CS_learning_projects
|
8a3ed5ba76ad81a22c7162835b39734726028953
|
4cbd45192738c2850b308b35ee9b0c95de798748
|
refs/heads/master
| 2021-01-20T01:23:02.738739
| 2019-07-02T02:11:07
| 2019-07-02T02:11:07
| 89,267,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,929
|
py
|
test = {
'name': 'Problem 8',
'points': 2,
'suites': [
{
'cases': [
{
'answer': '18f4b8f373a149983a060187fb945841',
'choices': [
'a list of restaurants reviewed by the user',
'a list of all possible restaurants',
'a list of ratings for restaurants reviewed by the user'
],
'hidden': False,
'locked': True,
'question': 'In best_predictor, what does the variable reviewed represent?'
},
{
'answer': '6e952a03cc93ab2e76cc6e9be1f58c8e',
'choices': [
'a predictor function, and its r_squared value',
'a predictor function',
'an r_squared value',
'a restaurant'
],
'hidden': False,
'locked': True,
'question': r"""
Given a user, a list of restaurants, and a feature function, what
does find_predictor from Problem 7 return?
"""
},
{
'answer': '6290d50f08bc68e242b1124b49a5e8db',
'choices': [
'the predictor with the highest r_squared',
'the predictor with the lowest r_squared',
'the first predictor in the list',
'an arbitrary predictor'
],
'hidden': False,
'locked': True,
'question': r"""
After getting a list of [predictor, r_squared] pairs,
which predictor should we select?
"""
}
],
'scored': False,
'type': 'concept'
},
{
'cases': [
{
'code': r"""
>>> user = make_user('Cheapskate', [
... make_review('A', 2),
... make_review('B', 5),
... make_review('C', 2),
... make_review('D', 5),
... ])
>>> cluster = [
... make_restaurant('A', [5, 2], [], 4, [
... make_review('A', 5)
... ]),
... make_restaurant('B', [3, 2], [], 2, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 4)
... ]),
... make_restaurant('D', [4, 2], [], 2, [
... make_review('D', 3),
... make_review('D', 4)
... ]),
... ]
>>> fns = [restaurant_price, restaurant_mean_rating]
>>> pred = best_predictor(user, cluster, fns)
>>> [round(pred(r), 5) for r in cluster] # should be a list of decimals
[2.0, 5.0, 2.0, 5.0]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> user = make_user('Cheapskate', [
... make_review('A', 2),
... make_review('B', 5),
... make_review('C', 2),
... make_review('D', 5),
... ])
>>> cluster = [
... make_restaurant('A', [5, 2], [], 4, [
... make_review('A', 5)
... ]),
... make_restaurant('B', [3, 2], [], 2, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 4)
... ]),
... ]
>>> fns = [restaurant_price, restaurant_mean_rating]
>>> pred = best_predictor(user, cluster, fns)
>>> [round(pred(r), 5) for r in cluster]
[2.0, 5.0, 2.0]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> user = make_user('Cheapskate', [
... make_review('A', 2),
... make_review('B', 5),
... make_review('C', 2),
... make_review('D', 5),
... ])
>>> cluster = [
... make_restaurant('A', [5, 2], [], 4, [
... make_review('A', 5)
... ]),
... make_restaurant('B', [3, 2], [], 2, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 4)
... ]),
... ]
>>> fns = [restaurant_mean_rating, restaurant_price]
>>> pred = best_predictor(user, cluster, fns)
>>> [round(pred(r), 5) for r in cluster] # Make sure you're iterating through feature_fns!
[2.0, 5.0, 2.0]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> user = make_user('Cheapskate', [
... make_review('A', 2),
... make_review('B', 5),
... make_review('C', 2),
... make_review('D', 5),
... ])
>>> cluster = [
... make_restaurant('A', [5, 2], [], 4, [
... make_review('A', 5)
... ]),
... make_restaurant('B', [3, 2], [], 2, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 4)
... ]),
... make_restaurant('E', [1, 2], [], 4, [
... make_review('E', 4)
... ]),
... ]
>>> fns = [restaurant_mean_rating, restaurant_price]
>>> pred = best_predictor(user, cluster, fns) # Make sure you're only using user-reviewed restaurants!
>>> [round(pred(r), 5) for r in cluster]
[2.0, 5.0, 2.0, 2.0]
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> import tests.test_functions as test
>>> from recommend import *
""",
'teardown': '',
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>> user = make_user('Cheapskate', [
... make_review('A', 2),
... make_review('B', 5),
... make_review('C', 2),
... make_review('D', 5),
... ])
>>> cluster = [
... make_restaurant('A', [5, 2], [], 4, [
... make_review('A', 5)
... ]),
... make_restaurant('B', [3, 2], [], 2, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 4)
... ]),
... make_restaurant('D', [4, 2], [], 2, [
... make_review('D', 3),
... make_review('D', 4)
... ]),
... ]
>>> fns = [restaurant_price, restaurant_mean_rating]
>>> pred = best_predictor(user, cluster, fns)
>>> # Hint: Price is a perfect predictor of this user's ratings,
>>> # so the predicted ratings should equal the user's ratings
>>> [round(pred(r), 5) for r in cluster] # should be a list of decimals
[2.0, 5.0, 2.0, 5.0]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> user = make_user('Cheapskate', [
... make_review('A', 2),
... make_review('B', 5),
... make_review('C', 2),
... make_review('D', 5),
... ])
>>> cluster = [
... make_restaurant('A', [5, 2], [], 4, [
... make_review('A', 5)
... ]),
... make_restaurant('B', [3, 2], [], 2, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 4)
... ]),
... ]
>>> fns = [restaurant_price, restaurant_mean_rating]
>>> pred = best_predictor(user, cluster, fns)
>>> [round(pred(r), 5) for r in cluster]
[2.0, 5.0, 2.0]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> user = make_user('Cheapskate', [
... make_review('A', 2),
... make_review('B', 5),
... make_review('C', 2),
... make_review('D', 5),
... ])
>>> cluster = [
... make_restaurant('A', [5, 2], [], 4, [
... make_review('A', 5)
... ]),
... make_restaurant('B', [3, 2], [], 2, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 4)
... ]),
... ]
>>> fns = [restaurant_mean_rating, restaurant_price]
>>> pred = best_predictor(user, cluster, fns)
>>> [round(pred(r), 5) for r in cluster] # Make sure you're iterating through feature_fns!
[2.0, 5.0, 2.0]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> user = make_user('Cheapskate', [
... make_review('A', 2),
... make_review('B', 5),
... make_review('C', 2),
... make_review('D', 5),
... ])
>>> cluster = [
... make_restaurant('A', [5, 2], [], 4, [
... make_review('A', 5)
... ]),
... make_restaurant('B', [3, 2], [], 2, [
... make_review('B', 5)
... ]),
... make_restaurant('C', [-2, 6], [], 4, [
... make_review('C', 4)
... ]),
... make_restaurant('E', [1, 2], [], 4, [
... make_review('E', 4)
... ]),
... ]
>>> fns = [restaurant_mean_rating, restaurant_price]
>>> pred = best_predictor(user, cluster, fns) # Make sure you're only using user-reviewed restaurants!
>>> [round(pred(r), 5) for r in cluster]
[2.0, 5.0, 2.0, 2.0]
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> import tests.test_functions as test
>>> import recommend
>>> test.swap_implementations(recommend)
>>> from recommend import *
""",
'teardown': r"""
>>> test.restore_implementations(recommend)
""",
'type': 'doctest'
}
]
}
|
[
"lvcunha@gmail.com"
] |
lvcunha@gmail.com
|
d09ca8a14c8cab0258c427bada63637982b2c608
|
ac0844cbd6258ffc1b15cdde7136a07ef28cb8c1
|
/7_2.py
|
3985ebb570a4ff97b10a6dd4aba43c6eb312940e
|
[] |
no_license
|
nikita1998ivanov/Lab-rabota
|
42948394a6cdb4eaffb8b6f5e801225ba7b8ef80
|
0bb6c283465b11218a5ec24de7645bcbe454754f
|
refs/heads/master
| 2022-06-18T03:03:09.949139
| 2020-05-06T08:55:25
| 2020-05-06T08:55:25
| 261,668,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
d = {}
with open("students.csv") as f:
next(f)
for line in f:
h, nm, a, db = line.split(";")
d.setdefault(int(h), []).append((nm, int(a), db))
l = d.values()
l = list(l)
l.sort()
print(l)
|
[
"noreply@github.com"
] |
noreply@github.com
|
6ff0d59f4790561ec2ce92b3a868755af76b678b
|
3225f11370c581f95e4a5d123ab03eb7de53c6b3
|
/Face-Recognition/face_reco_video.py
|
f174fa3c4a616303402acdefed3781ca94f2554f
|
[] |
no_license
|
jaseem61/python_practice
|
5684ae2f3925c54b2d34666f2531cb99acc96609
|
7bd8ca1b72acb5e78e2c0a451ef8e339417fcff7
|
refs/heads/master
| 2022-04-22T04:15:15.140748
| 2020-04-18T07:10:04
| 2020-04-18T07:10:04
| 231,757,850
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,823
|
py
|
import face_recognition
import os
import cv2
import keyboard
known_faces_dir="known_faces"
count=1
tolerance=0.6
frame_thickness=3
font_thickness=2
model="hog"
print("loading known faces")
known_faces=[]
known_names=[]
for name in os.listdir(known_faces_dir):
for filename in os.listdir(f"{known_faces_dir}/{name}"):
image=face_recognition.load_image_file(f"{known_faces_dir}/{name}/{filename}")
encoding=face_recognition.face_encodings(image)
if encoding:
known_faces.append(encoding)
known_names.append(name)
print("processing unknown faces")
cap=cv2.VideoCapture(0)
while True:
ret,image=cap.read()
locations=face_recognition.face_locations(image,model=model)
encodings=face_recognition.face_encodings(image,locations)
for face_encoding, face_location in zip(encodings,locations):
count=count+1
results=face_recognition.compare_faces(known_faces,face_encoding,tolerance=0.3)
match=None
if bool(results):
print(count)
match= known_names[0]
print(f"Match found:{match}")
top_left=(face_location[3],face_location[0])
bottom_right=(face_location[1],face_location[2])
color=[0,255,0]
cv2.rectangle(image,top_left,bottom_right,color,frame_thickness)
top_left=(face_location[3],face_location[2])
bottom_right=(face_location[1],face_location[2]+22)
cv2.rectangle(image,top_left,bottom_right,color,cv2.FILLED)
cv2.putText(image,match,(face_location[3]+10,face_location[2]+15),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),font_thickness)
cv2.imshow(filename,image)
cv2.waitKey(1)
if(keyboard.is_pressed('q')):
break
|
[
"noreply@github.com"
] |
noreply@github.com
|
c22d3551c3f4ba3d14a4cd5bfa8e93641fd47bd6
|
abca8650e1469641fbfd12cc7c1d33eaffc45c4a
|
/lib/db/format_column_names.py
|
065f11055771b623d570eec195d8030504ac2607
|
[
"MIT"
] |
permissive
|
tunityy/Neon8-Bot
|
2549f2b5fad56b25511289c619bead49cf6be90d
|
3cea7a05356ae5dadd2d7adabbf932bebfb849d8
|
refs/heads/main
| 2023-05-01T07:27:40.644937
| 2021-05-22T23:46:06
| 2021-05-22T23:46:06
| 357,702,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,880
|
py
|
from string import capwords
def column_to_text(column_name):
if type(column_name) == str:
return capwords(column_name.replace('_', ' '))
else:
results = [capwords(y.replace('_', ' ')) for y in column_name]
return results
def stat_name_ifs(stat):
st = str(stat).lower()
st = st.replace(' ', '')
st = st.replace('_', '')
# column_name = ['hunger', 'humanity', 'stains', 'current_willpower', 'total_willpower', 'superficial_damage', 'aggravated_damage', 'health']
column_name = ['hunger', 'humanity', 'stains', 'health']
if st in column_name:
return st
else:
# some of these include common or possible typos and misspellings
hung_synonyms = ['currenthunger', 'currenthung', 'hung', 'hun', 'hungry', 'hungerdice', 'hungdice',
'hd', 'bp', 'bloodpool', 'blooddice', 'bd',
'hugn', 'hugner', 'hungre', 'curenthunger', 'curenthung', 'bloop', 'blooppool', 'bloopool']
hum_synonyms = ['hum', 'huemanatee', 'humane', 'human', 'humanty', 'humanit', 'humantiy', 'humanaty']
stains_synonyms = ['stain', 'stian', 'st', 'stians', 'stans']
cwp_synonyms = ['currentwillpower', 'willpower', 'wp', 'currentwp', 'will', 'currentwill', 'currentwp', 'cwill', 'cwp', 'cw', 'willp', 'currentwillp', 'cwillp',
'wilpower', 'curentwillpower', 'current', 'curentwill', 'wil', 'currentwilpower', 'curentwilpower', 'wpwr', 'willpwr', 'wllpwr', 'wlpwr']
twp_synonyms = ['totalwillpower', 'totalwp', 'twp', 'total', 'tot', 'totalwill', 'willpowertotal', 'wptotal', 'willtotal', 'twill', 'tw', 'twillp', 'twillpower',
'totalwilpower', 'totalwil', 'tote', 'totlewillpower', 'totlwillpower', 'totwill', 't', 'totwil', 'totwp', 'to', 'twil']
spr_dmg_synonyms = ['superficialdamage', 'superficial', 'superficialdmg', 'sdmg', 'sdamage', 'sdmg', 'super', 'superdmg',
'supre', 'superficaldamage', 'superficaldmg', 'superfical', 'superfishul', 'superfishuldamage', 'superfishuldmg']
agg_dmg_synonyms = ['aggravateddamage', 'agg', 'aggravated', 'aggr', 'aggdmg', 'aggrdmg', 'aggravateddmg', 'aggra', 'aggdamage', 'admg', 'adamage',
'aggro', 'aggrivated', 'aggrivateddamage', 'aggrivateddmg', 'aggrevated', 'aggrevateddamage', 'aggrevateddmg', 'aggrovated', 'aggrovateddamage', 'aggrovateddmg', 'aggrovateddmg']
health_synonyms = ['hp', 'hitpoints', 'healthpoints', 'healthbar', 'life', 'heal'
'heath', 'healh', 'helth']
if st in hung_synonyms:
return 'hunger'
elif st in hum_synonyms:
return 'humanity'
elif st in stains_synonyms:
return 'stains'
elif st in cwp_synonyms:
return 'current_willpower'
elif st in twp_synonyms:
return 'total_willpower'
elif st in spr_dmg_synonyms:
return 'superficial_damage'
elif st in agg_dmg_synonyms:
return 'aggravated_damage'
elif st in health_synonyms:
return 'health'
else:
return 'Invalid'
#### ----------------------------------------------------------
### TODO: when it's just a list of one word it actually just comes out as a string. Need to change it to a list?
def stat_names_listifier(stats, words_and_numbs=False):
"""`words_and_numbs` is to differentiate when stats is just numbers, or contains words and numbers."""
if words_and_numbs == False:
list_stats = ' '.join(stats).split(', ')
if int(len(list_stats)) == 1:
column_name = [stat_name_ifs(list_stats[0])]
return column_name
else:
list_of_columns = [stat_name_ifs(term) for term in list_stats]
if 'Invalid' in list_of_columns:
return 'Invalid'
else:
return list_of_columns
elif words_and_numbs == True:
items_to_assess = ' '.join(stats).split(', ')
list_stats = [item.rsplit(' ', 1)[0] for item in items_to_assess]
values_list = [item.split(' ')[-1] for item in items_to_assess]
for item in values_list:
try:
int(item)
except:
return 'Invalid'
if int(len(list_stats)) == 1:
column_name = [stat_name_ifs(list_stats[0])]
if column_name == 'Invalid':
return 'Invalid'
else:
return column_name, values_list
else:
list_of_columns = [stat_name_ifs(term) for term in list_stats]
if 'Invalid' in list_of_columns:
return 'Invalid'
else:
return list_of_columns, values_list
#### ----------------------------------------------------------
|
[
"80991664+tunityy@users.noreply.github.com"
] |
80991664+tunityy@users.noreply.github.com
|
58e9f0902786c9d6ba075f971c789cd992c620a6
|
9334f5334f2da1283f32b08ef99866202b60ae68
|
/learning_logs/models.py
|
2f576215edee0f56da9ac08ece5ee99ed5365952
|
[] |
no_license
|
ArXaHGeL/Learning-Log
|
e033b9b0471185b7bedaa6e3ad2b367e1e7da64f
|
3b43a173b60b624d9c5615804658151c52127577
|
refs/heads/master
| 2023-02-26T15:21:09.499555
| 2021-02-03T12:09:12
| 2021-02-03T12:09:12
| 335,559,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 939
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Topic(models.Model):
"""A topic that the user is learning."""
text = models.CharField(max_length=200)
date_added = models.DateTimeField(auto_now_add=True)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
"""Return a string representation of the model."""
return self.text
class Entry(models.Model):
"""Information learned by the user."""
topic = models.ForeignKey(Topic, on_delete=models.CASCADE)
text = models.TextField()
date_added = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name_plural = 'entries'
def __str__(self):
"""Return a string representation of the model."""
if self.text <= self.text[0:50]:
return self.text
else:
return self.text[0:50] + "..."
|
[
"zenit_dimka@mail.ru"
] |
zenit_dimka@mail.ru
|
8426f5e2a7f3115533abb324288bc031ba59ff53
|
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
|
/sdk/python/pulumi_azure_native/guestconfiguration/outputs.py
|
b1d2bbd2207b1aaffbc05852618b9e218ea32400
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MisinformedDNA/pulumi-azure-native
|
9cbd75306e9c8f92abc25be3f73c113cb93865e9
|
de974fd984f7e98649951dbe80b4fc0603d03356
|
refs/heads/master
| 2023-03-24T22:02:03.842935
| 2021-03-08T21:16:19
| 2021-03-08T21:16:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,362
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
__all__ = [
'AssignmentInfoResponse',
'AssignmentReportResourceComplianceReasonResponse',
'AssignmentReportResourceResponse',
'AssignmentReportResponse',
'ConfigurationInfoResponse',
'ConfigurationParameterResponse',
'ConfigurationSettingResponse',
'GuestConfigurationAssignmentPropertiesResponse',
'GuestConfigurationNavigationResponse',
'VMInfoResponse',
]
@pulumi.output_type
class AssignmentInfoResponse(dict):
"""
Information about the guest configuration assignment.
"""
def __init__(__self__, *,
name: str,
configuration: Optional['outputs.ConfigurationInfoResponse'] = None):
"""
Information about the guest configuration assignment.
:param str name: Name of the guest configuration assignment.
:param 'ConfigurationInfoResponseArgs' configuration: Information about the configuration.
"""
pulumi.set(__self__, "name", name)
if configuration is not None:
pulumi.set(__self__, "configuration", configuration)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the guest configuration assignment.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def configuration(self) -> Optional['outputs.ConfigurationInfoResponse']:
"""
Information about the configuration.
"""
return pulumi.get(self, "configuration")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AssignmentReportResourceComplianceReasonResponse(dict):
"""
Reason and code for the compliance of the guest configuration assignment resource.
"""
def __init__(__self__, *,
code: str,
phrase: str):
"""
Reason and code for the compliance of the guest configuration assignment resource.
:param str code: Code for the compliance of the guest configuration assignment resource.
:param str phrase: Reason for the compliance of the guest configuration assignment resource.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "phrase", phrase)
@property
@pulumi.getter
def code(self) -> str:
"""
Code for the compliance of the guest configuration assignment resource.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def phrase(self) -> str:
"""
Reason for the compliance of the guest configuration assignment resource.
"""
return pulumi.get(self, "phrase")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AssignmentReportResourceResponse(dict):
"""
The guest configuration assignment resource.
"""
def __init__(__self__, *,
compliance_status: str,
properties: Any,
resource_id: str,
reasons: Optional[Sequence['outputs.AssignmentReportResourceComplianceReasonResponse']] = None):
"""
The guest configuration assignment resource.
:param str compliance_status: A value indicating compliance status of the machine for the assigned guest configuration.
:param Any properties: Properties of a guest configuration assignment resource.
:param str resource_id: Name of the guest configuration assignment resource setting.
:param Sequence['AssignmentReportResourceComplianceReasonResponseArgs'] reasons: Compliance reason and reason code for a resource.
"""
pulumi.set(__self__, "compliance_status", compliance_status)
pulumi.set(__self__, "properties", properties)
pulumi.set(__self__, "resource_id", resource_id)
if reasons is not None:
pulumi.set(__self__, "reasons", reasons)
@property
@pulumi.getter(name="complianceStatus")
def compliance_status(self) -> str:
"""
A value indicating compliance status of the machine for the assigned guest configuration.
"""
return pulumi.get(self, "compliance_status")
@property
@pulumi.getter
def properties(self) -> Any:
"""
Properties of a guest configuration assignment resource.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> str:
"""
Name of the guest configuration assignment resource setting.
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter
def reasons(self) -> Optional[Sequence['outputs.AssignmentReportResourceComplianceReasonResponse']]:
"""
Compliance reason and reason code for a resource.
"""
return pulumi.get(self, "reasons")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AssignmentReportResponse(dict):
def __init__(__self__, *,
compliance_status: str,
end_time: str,
id: str,
operation_type: str,
report_id: str,
start_time: str,
assignment: Optional['outputs.AssignmentInfoResponse'] = None,
resources: Optional[Sequence['outputs.AssignmentReportResourceResponse']] = None,
vm: Optional['outputs.VMInfoResponse'] = None):
"""
:param str compliance_status: A value indicating compliance status of the machine for the assigned guest configuration.
:param str end_time: End date and time of the guest configuration assignment compliance status check.
:param str id: ARM resource id of the report for the guest configuration assignment.
:param str operation_type: Type of report, Consistency or Initial
:param str report_id: GUID that identifies the guest configuration assignment report under a subscription, resource group.
:param str start_time: Start date and time of the guest configuration assignment compliance status check.
:param 'AssignmentInfoResponseArgs' assignment: Configuration details of the guest configuration assignment.
:param Sequence['AssignmentReportResourceResponseArgs'] resources: The list of resources for which guest configuration assignment compliance is checked.
:param 'VMInfoResponseArgs' vm: Information about the VM.
"""
pulumi.set(__self__, "compliance_status", compliance_status)
pulumi.set(__self__, "end_time", end_time)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "operation_type", operation_type)
pulumi.set(__self__, "report_id", report_id)
pulumi.set(__self__, "start_time", start_time)
if assignment is not None:
pulumi.set(__self__, "assignment", assignment)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if vm is not None:
pulumi.set(__self__, "vm", vm)
@property
@pulumi.getter(name="complianceStatus")
def compliance_status(self) -> str:
"""
A value indicating compliance status of the machine for the assigned guest configuration.
"""
return pulumi.get(self, "compliance_status")
@property
@pulumi.getter(name="endTime")
def end_time(self) -> str:
"""
End date and time of the guest configuration assignment compliance status check.
"""
return pulumi.get(self, "end_time")
@property
@pulumi.getter
def id(self) -> str:
"""
ARM resource id of the report for the guest configuration assignment.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="operationType")
def operation_type(self) -> str:
"""
Type of report, Consistency or Initial
"""
return pulumi.get(self, "operation_type")
@property
@pulumi.getter(name="reportId")
def report_id(self) -> str:
"""
GUID that identifies the guest configuration assignment report under a subscription, resource group.
"""
return pulumi.get(self, "report_id")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> str:
"""
Start date and time of the guest configuration assignment compliance status check.
"""
return pulumi.get(self, "start_time")
@property
@pulumi.getter
def assignment(self) -> Optional['outputs.AssignmentInfoResponse']:
"""
Configuration details of the guest configuration assignment.
"""
return pulumi.get(self, "assignment")
@property
@pulumi.getter
def resources(self) -> Optional[Sequence['outputs.AssignmentReportResourceResponse']]:
"""
The list of resources for which guest configuration assignment compliance is checked.
"""
return pulumi.get(self, "resources")
@property
@pulumi.getter
def vm(self) -> Optional['outputs.VMInfoResponse']:
"""
Information about the VM.
"""
return pulumi.get(self, "vm")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ConfigurationInfoResponse(dict):
"""
Information about the configuration.
"""
def __init__(__self__, *,
name: str,
version: str):
"""
Information about the configuration.
:param str name: Name of the configuration.
:param str version: Version of the configuration.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the configuration.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def version(self) -> str:
"""
Version of the configuration.
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ConfigurationParameterResponse(dict):
"""
Represents a configuration parameter.
"""
def __init__(__self__, *,
name: Optional[str] = None,
value: Optional[str] = None):
"""
Represents a configuration parameter.
:param str name: Name of the configuration parameter.
:param str value: Value of the configuration parameter.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the configuration parameter.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
Value of the configuration parameter.
"""
return pulumi.get(self, "value")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ConfigurationSettingResponse(dict):
"""
Configuration setting of LCM (Local Configuration Manager).
"""
def __init__(__self__, *,
action_after_reboot: Optional[str] = None,
allow_module_overwrite: Optional[str] = None,
configuration_mode: Optional[str] = None,
configuration_mode_frequency_mins: Optional[float] = None,
reboot_if_needed: Optional[str] = None,
refresh_frequency_mins: Optional[float] = None):
"""
Configuration setting of LCM (Local Configuration Manager).
:param str action_after_reboot: Specifies what happens after a reboot during the application of a configuration. The possible values are ContinueConfiguration and StopConfiguration
:param str allow_module_overwrite: If true - new configurations downloaded from the pull service are allowed to overwrite the old ones on the target node. Otherwise, false
:param str configuration_mode: Specifies how the LCM(Local Configuration Manager) actually applies the configuration to the target nodes. Possible values are ApplyOnly, ApplyAndMonitor, and ApplyAndAutoCorrect.
:param float configuration_mode_frequency_mins: How often, in minutes, the current configuration is checked and applied. This property is ignored if the ConfigurationMode property is set to ApplyOnly. The default value is 15.
:param str reboot_if_needed: Set this to true to automatically reboot the node after a configuration that requires reboot is applied. Otherwise, you will have to manually reboot the node for any configuration that requires it. The default value is false. To use this setting when a reboot condition is enacted by something other than DSC (such as Windows Installer), combine this setting with the xPendingReboot module.
:param float refresh_frequency_mins: The time interval, in minutes, at which the LCM checks a pull service to get updated configurations. This value is ignored if the LCM is not configured in pull mode. The default value is 30.
"""
if action_after_reboot is not None:
pulumi.set(__self__, "action_after_reboot", action_after_reboot)
if allow_module_overwrite is not None:
pulumi.set(__self__, "allow_module_overwrite", allow_module_overwrite)
if configuration_mode is not None:
pulumi.set(__self__, "configuration_mode", configuration_mode)
if configuration_mode_frequency_mins is None:
configuration_mode_frequency_mins = 15
if configuration_mode_frequency_mins is not None:
pulumi.set(__self__, "configuration_mode_frequency_mins", configuration_mode_frequency_mins)
if reboot_if_needed is None:
reboot_if_needed = 'False'
if reboot_if_needed is not None:
pulumi.set(__self__, "reboot_if_needed", reboot_if_needed)
if refresh_frequency_mins is None:
refresh_frequency_mins = 30
if refresh_frequency_mins is not None:
pulumi.set(__self__, "refresh_frequency_mins", refresh_frequency_mins)
@property
@pulumi.getter(name="actionAfterReboot")
def action_after_reboot(self) -> Optional[str]:
"""
Specifies what happens after a reboot during the application of a configuration. The possible values are ContinueConfiguration and StopConfiguration
"""
return pulumi.get(self, "action_after_reboot")
@property
@pulumi.getter(name="allowModuleOverwrite")
def allow_module_overwrite(self) -> Optional[str]:
"""
If true - new configurations downloaded from the pull service are allowed to overwrite the old ones on the target node. Otherwise, false
"""
return pulumi.get(self, "allow_module_overwrite")
@property
@pulumi.getter(name="configurationMode")
def configuration_mode(self) -> Optional[str]:
"""
Specifies how the LCM(Local Configuration Manager) actually applies the configuration to the target nodes. Possible values are ApplyOnly, ApplyAndMonitor, and ApplyAndAutoCorrect.
"""
return pulumi.get(self, "configuration_mode")
@property
@pulumi.getter(name="configurationModeFrequencyMins")
def configuration_mode_frequency_mins(self) -> Optional[float]:
"""
How often, in minutes, the current configuration is checked and applied. This property is ignored if the ConfigurationMode property is set to ApplyOnly. The default value is 15.
"""
return pulumi.get(self, "configuration_mode_frequency_mins")
@property
@pulumi.getter(name="rebootIfNeeded")
def reboot_if_needed(self) -> Optional[str]:
"""
Set this to true to automatically reboot the node after a configuration that requires reboot is applied. Otherwise, you will have to manually reboot the node for any configuration that requires it. The default value is false. To use this setting when a reboot condition is enacted by something other than DSC (such as Windows Installer), combine this setting with the xPendingReboot module.
"""
return pulumi.get(self, "reboot_if_needed")
@property
@pulumi.getter(name="refreshFrequencyMins")
def refresh_frequency_mins(self) -> Optional[float]:
"""
The time interval, in minutes, at which the LCM checks a pull service to get updated configurations. This value is ignored if the LCM is not configured in pull mode. The default value is 30.
"""
return pulumi.get(self, "refresh_frequency_mins")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GuestConfigurationAssignmentPropertiesResponse(dict):
"""
Guest configuration assignment properties.
"""
def __init__(__self__, *,
assignment_hash: str,
compliance_status: str,
last_compliance_status_checked: str,
latest_report_id: str,
provisioning_state: str,
target_resource_id: str,
context: Optional[str] = None,
guest_configuration: Optional['outputs.GuestConfigurationNavigationResponse'] = None,
latest_assignment_report: Optional['outputs.AssignmentReportResponse'] = None):
"""
Guest configuration assignment properties.
:param str assignment_hash: Combined hash of the configuration package and parameters.
:param str compliance_status: A value indicating compliance status of the machine for the assigned guest configuration.
:param str last_compliance_status_checked: Date and time when last compliance status was checked.
:param str latest_report_id: Id of the latest report for the guest configuration assignment.
:param str provisioning_state: The provisioning state, which only appears in the response.
:param str target_resource_id: VM resource Id.
:param str context: The source which initiated the guest configuration assignment. Ex: Azure Policy
:param 'GuestConfigurationNavigationResponseArgs' guest_configuration: The guest configuration to assign.
:param 'AssignmentReportResponseArgs' latest_assignment_report: Last reported guest configuration assignment report.
"""
pulumi.set(__self__, "assignment_hash", assignment_hash)
pulumi.set(__self__, "compliance_status", compliance_status)
pulumi.set(__self__, "last_compliance_status_checked", last_compliance_status_checked)
pulumi.set(__self__, "latest_report_id", latest_report_id)
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "target_resource_id", target_resource_id)
if context is not None:
pulumi.set(__self__, "context", context)
if guest_configuration is not None:
pulumi.set(__self__, "guest_configuration", guest_configuration)
if latest_assignment_report is not None:
pulumi.set(__self__, "latest_assignment_report", latest_assignment_report)
@property
@pulumi.getter(name="assignmentHash")
def assignment_hash(self) -> str:
"""
Combined hash of the configuration package and parameters.
"""
return pulumi.get(self, "assignment_hash")
@property
@pulumi.getter(name="complianceStatus")
def compliance_status(self) -> str:
"""
A value indicating compliance status of the machine for the assigned guest configuration.
"""
return pulumi.get(self, "compliance_status")
@property
@pulumi.getter(name="lastComplianceStatusChecked")
def last_compliance_status_checked(self) -> str:
"""
Date and time when last compliance status was checked.
"""
return pulumi.get(self, "last_compliance_status_checked")
@property
@pulumi.getter(name="latestReportId")
def latest_report_id(self) -> str:
"""
Id of the latest report for the guest configuration assignment.
"""
return pulumi.get(self, "latest_report_id")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="targetResourceId")
def target_resource_id(self) -> str:
"""
VM resource Id.
"""
return pulumi.get(self, "target_resource_id")
@property
@pulumi.getter
def context(self) -> Optional[str]:
"""
The source which initiated the guest configuration assignment. Ex: Azure Policy
"""
return pulumi.get(self, "context")
@property
@pulumi.getter(name="guestConfiguration")
def guest_configuration(self) -> Optional['outputs.GuestConfigurationNavigationResponse']:
"""
The guest configuration to assign.
"""
return pulumi.get(self, "guest_configuration")
@property
@pulumi.getter(name="latestAssignmentReport")
def latest_assignment_report(self) -> Optional['outputs.AssignmentReportResponse']:
"""
Last reported guest configuration assignment report.
"""
return pulumi.get(self, "latest_assignment_report")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GuestConfigurationNavigationResponse(dict):
"""
Guest configuration is an artifact that encapsulates DSC configuration and its dependencies. The artifact is a zip file containing DSC configuration (as MOF) and dependent resources and other dependencies like modules.
"""
def __init__(__self__, *,
content_hash: str,
content_uri: str,
configuration_parameter: Optional[Sequence['outputs.ConfigurationParameterResponse']] = None,
configuration_setting: Optional['outputs.ConfigurationSettingResponse'] = None,
kind: Optional[str] = None,
name: Optional[str] = None,
version: Optional[str] = None):
"""
Guest configuration is an artifact that encapsulates DSC configuration and its dependencies. The artifact is a zip file containing DSC configuration (as MOF) and dependent resources and other dependencies like modules.
:param str content_hash: Combined hash of the guest configuration package and configuration parameters.
:param str content_uri: Uri of the storage where guest configuration package is uploaded.
:param Sequence['ConfigurationParameterResponseArgs'] configuration_parameter: The configuration parameters for the guest configuration.
:param 'ConfigurationSettingResponseArgs' configuration_setting: The configuration setting for the guest configuration.
:param str kind: Kind of the guest configuration. For example:DSC
:param str name: Name of the guest configuration.
:param str version: Version of the guest configuration.
"""
pulumi.set(__self__, "content_hash", content_hash)
pulumi.set(__self__, "content_uri", content_uri)
if configuration_parameter is not None:
pulumi.set(__self__, "configuration_parameter", configuration_parameter)
if configuration_setting is not None:
pulumi.set(__self__, "configuration_setting", configuration_setting)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="contentHash")
def content_hash(self) -> str:
"""
Combined hash of the guest configuration package and configuration parameters.
"""
return pulumi.get(self, "content_hash")
@property
@pulumi.getter(name="contentUri")
def content_uri(self) -> str:
"""
Uri of the storage where guest configuration package is uploaded.
"""
return pulumi.get(self, "content_uri")
@property
@pulumi.getter(name="configurationParameter")
def configuration_parameter(self) -> Optional[Sequence['outputs.ConfigurationParameterResponse']]:
"""
The configuration parameters for the guest configuration.
"""
return pulumi.get(self, "configuration_parameter")
@property
@pulumi.getter(name="configurationSetting")
def configuration_setting(self) -> Optional['outputs.ConfigurationSettingResponse']:
"""
The configuration setting for the guest configuration.
"""
return pulumi.get(self, "configuration_setting")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of the guest configuration. For example:DSC
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the guest configuration.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
Version of the guest configuration.
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VMInfoResponse(dict):
"""
Information about the VM.
"""
def __init__(__self__, *,
id: str,
uuid: str):
"""
Information about the VM.
:param str id: Azure resource Id of the VM.
:param str uuid: UUID(Universally Unique Identifier) of the VM.
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "uuid", uuid)
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource Id of the VM.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def uuid(self) -> str:
"""
UUID(Universally Unique Identifier) of the VM.
"""
return pulumi.get(self, "uuid")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
|
[
"noreply@github.com"
] |
noreply@github.com
|
4608f8b477a4827ab546757c5cdf0cf175bfa969
|
841e32970a080c8beb4ccc94d2afc5f264483a45
|
/api/app/migrations/0001_initial.py
|
d227e2d9a19eee4bb8de17faada02127cabe7b35
|
[] |
no_license
|
semprajapat/automation_pytest
|
3249fec117186ee9984674585b79fe0d75a15a6c
|
05fb58c5cece1043317bf444e8636fd49564fccc
|
refs/heads/master
| 2021-01-14T19:18:53.865204
| 2020-02-24T12:22:16
| 2020-02-24T12:22:16
| 242,727,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
# Generated by Django 3.0.3 on 2020-02-24 11:42
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Datamodel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('last', models.CharField(max_length=50)),
],
),
]
|
[
"aaa@Aaas-MacBook-Pro.local"
] |
aaa@Aaas-MacBook-Pro.local
|
af4da04242f2f06729d65a60df595b64a56f4355
|
ba2c77f62e7c9ddc074606cbca94062941dfc760
|
/small_methods.py
|
e2a95b7f1d18121fe30f08b1b169cac48fdcb01f
|
[] |
no_license
|
scaars10/Lazy-Crawler
|
5608888f1ed60bdc951b2b4ba2a17ca7ab173bea
|
4088514571f096531076f4c551eac2ce4912530d
|
refs/heads/master
| 2021-08-09T00:25:45.546129
| 2017-11-11T18:29:06
| 2017-11-11T18:29:06
| 110,369,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,890
|
py
|
import os
from stemming.porter2 import stem
# omnipresent_words = ['www.', 'http:', 'https:', '.com', '.in']
def directory_manage(relative_location): # checks if a directory exists or not if not then it creates it itself
base_path = os.path.dirname(os.path.realpath(__file__))
if not os.path.exists(os.path.join(base_path, relative_location)):
os.makedirs(os.path.join(base_path, relative_location))
f = open('Processing_Data\\keywords.txt', 'r')
keywords = []
values = []
line_count = 0
for each_line in f:
line_count += 1
line = each_line.split()
try:
values.append(int(line[1]))
keywords.append(stem(line[0]))
except:
directory_manage('Output\\Errors')
f = open('Output\\Errors\\Keyword_Error.txt','a')
f.write('Check Line No. '+str(line_count)+' in Output\\Errors\\keywords.txt for formatting error\n')
f.close()
f.close()
def sort_links_wrt_importance(links, links_text):
link_importance = []
iterate = 0
while iterate < len(links):
link = stem(links[iterate])
if isinstance(links_text[iterate], str):
link_text = stem(links_text[iterate])
else:
link_text = 'ignore'
# divided_link = link.split('/')
i = 0
strength = 0
while i < len(keywords):
if keywords[i] in link:
strength += values[i]
if isinstance(link_text, str):
if keywords[i] in link_text:
strength += values[i]
i += 1
link_importance.append(strength)
iterate += 1
i = 0
while i < len(links):
j = i
# print('sorting')
while j > 0:
if link_importance[j] > link_importance[j-1]:
temp_link = links[j]
links[j] = links[j-1]
links[j-1] = temp_link
# temp_link_text = links_text[j]
# links_text[j] = links_text[j-1]
# links_text[j-1] = temp_link_text
temp_imp = link_importance[j]
link_importance[j] = link_importance[j-1]
link_importance[j-1] = temp_imp
j -= 1
else:
break
i += 1
return links
|
[
"scaars10@gmail.com"
] |
scaars10@gmail.com
|
30a56aa3ea447d0f6e641cf2b1c120ab673bb144
|
fe81c95988122057f030cc6c57681e215093c9ba
|
/比赛分享/调参/tiaocan1.py
|
3413b37f817c71bf81d552fb4f4f5e1c94ca54e1
|
[] |
no_license
|
xiexiaoyang/Big-Data-Challenge
|
fbe2bbfa92a603460479e6cf7ff4a6f197af239f
|
2fc6ae26037a98d46cb0735a0e4c744b74ec9fb0
|
refs/heads/master
| 2021-07-19T02:37:58.980208
| 2017-10-22T07:18:24
| 2017-10-22T07:18:24
| 107,832,382
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,637
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 19 19:28:57 2017
@author: Yang
"""
'''调参
1. 理解模型
2. 列出所有的参数
3. 选择对模型提升大的参数
代码错误:
1. kstep = len(randidx) / nfold 改为 kstep = len(randidx) // nfold
2. 'Disbursed' 改为 target
3, Parameter values should be a list. 改为 param_test1 = {'max_depth':list(range(3,10,2)),'min_child_weight':list(range(1,6,2))}
'''
#Import libraries:
import pandas as pd
import numpy as np
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from sklearn import cross_validation, metrics #Additional scklearn functions
from sklearn.grid_search import GridSearchCV #Perforing grid search
import matplotlib.pylab as plt
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 12, 4
train = pd.read_csv(r"G:\比赛分享\data\alltrain.csv")
test= pd.read_csv(r"G:\比赛分享\data\alltest.csv")
target = 'label'
IDcol = 'id'
def modelfit(alg, dtrain, predictors,useTrainCV=True, cv_folds=5, early_stopping_rounds=50):
if useTrainCV:
xgb_param = alg.get_xgb_params()
xgtrain = xgb.DMatrix(dtrain[predictors].values, label=dtrain[target].values)
cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,
metrics=['auc'], early_stopping_rounds=early_stopping_rounds, show_progress=True)
alg.set_params(n_estimators=cvresult.shape[0])
#Fit the algorithm on the data
alg.fit(dtrain[predictors], dtrain[target],eval_metric='auc')
#Predict training set:
dtrain_predictions = alg.predict(dtrain[predictors])
dtrain_predprob = alg.predict_proba(dtrain[predictors])[:,1]
#Print model report:
print ("\nModel Report")
print (("Accuracy : %.4g") % metrics.accuracy_score(dtrain[target].values, dtrain_predictions))
print (("AUC Score (Train): %f" )% metrics.roc_auc_score(dtrain[target], dtrain_predprob))
feat_imp = pd.Series(alg.booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='xgb Feature Importances')
plt.ylabel('Feature Importance Score')
'''
一 修正用于调整基于树的参数的学习速率和估计量数
也就是 learning_rate n_estimators 学习速率和树的数量
'''
##Choose all predictors except target & IDcols
predictors = [x for x in train.columns if x not in [target, IDcol]]
xgb1 = XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=5,
min_child_weight=1,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=27)
modelfit(xgb1, train, predictors)
###Step 2: Tune max_depth and min_child_weight
#param_test1 = {
# 'max_depth':list(range(3,10,2)),
# 'min_child_weight':list(range(1,6,2))
#}
#gsearch1 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=140, max_depth=5,
# min_child_weight=1, gamma=0, subsample=0.8, colsample_bytree=0.8,
# objective= 'binary:logistic', nthread=4, scale_pos_weight=1, seed=27),
# param_grid = param_test1, scoring='roc_auc',n_jobs=4,iid=False, cv=2 )
#
#print(gsearch1.fit(train[predictors],train[target]))
#print(gsearch1.grid_scores_, gsearch1.best_params_, gsearch1.best_score_)
#
#param_test2 = {
# 'max_depth':[4,5,6],
# 'min_child_weight':[4,5,6]
#}
#gsearch2 = GridSearchCV(estimator = XGBClassifier( learning_rate=0.1, n_estimators=140, max_depth=5,
# min_child_weight=2, gamma=0, subsample=0.8, colsample_bytree=0.8,
# objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
# param_grid = param_test2, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
#print(gsearch2.fit(train[predictors],train[target]))
#print(gsearch2.grid_scores_, gsearch2.best_params_, gsearch2.best_score_)
###Step 3: Tune gamma
#param_test3 = {
# 'gamma':[i/10.0 for i in range(0,5)]
#}
#gsearch3 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=140, max_depth=4,
# min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8,
# objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
# param_grid = param_test3, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
#print(gsearch3.fit(train[predictors],train[target]))
#print(gsearch3.grid_scores_, gsearch3.best_params_, gsearch3.best_score_)
##
#xgb2 = XGBClassifier(
# learning_rate =0.1,
# n_estimators=1000,
# max_depth=4,
# min_child_weight=6,
# gamma=0,
# subsample=0.8,
# colsample_bytree=0.8,
# objective= 'binary:logistic',
# nthread=4,
# scale_pos_weight=1,
# seed=27)
#modelfit(xgb2, train, predictors)
#
###Step 4: Tune subsample and colsample_bytree
#param_test4 = {
# 'subsample':[i/10.0 for i in range(6,10)],
# 'colsample_bytree':[i/10.0 for i in range(6,10)]
#}
#gsearch4 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4,
# min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8,
# objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
# param_grid = param_test4, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
#print(gsearch4.fit(train[predictors],train[target]))
#print(gsearch4.grid_scores_, gsearch4.best_params_, gsearch4.best_score_)
#
#param_test5 = {
# 'subsample':[i/100.0 for i in range(75,90,5)],
# 'colsample_bytree':[i/100.0 for i in range(75,90,5)]
#}
#gsearch5 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4,
# min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8,
# objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
# param_grid = param_test5, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
#gsearch5.fit(train[predictors],train[target])
###Step 5: Tuning Regularization Parameters
#param_test6 = {
# 'reg_alpha':[1e-5, 1e-2, 0.1, 1, 100]
#}
#gsearch6 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4,
# min_child_weight=6, gamma=0.1, subsample=0.8, colsample_bytree=0.8,
# objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
# param_grid = param_test6, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
#print(gsearch6.fit(train[predictors],train[target]))
#print(gsearch6.grid_scores_, gsearch6.best_params_, gsearch6.best_score_)
###Step 6: Reducing Learning Rate
#xgb4 = XGBClassifier(
# learning_rate =0.01,
# n_estimators=5000,
# max_depth=4,
# min_child_weight=6,
# gamma=0,
# subsample=0.8,
# colsample_bytree=0.8,
# reg_alpha=0.005,
# objective= 'binary:logistic',
# nthread=4,
# scale_pos_weight=1,
# seed=27)
#modelfit(xgb4, train, predictors)
|
[
"2509039243@qq.com"
] |
2509039243@qq.com
|
49b63a0524f032834d51833a9cee91640d52b635
|
06933e4550c4d647ecedab639c1fa9748d7aa155
|
/tvshows/tvshows_app/models.py
|
8cdbce4772b4f7f1552184f5fac82bb38761ab97
|
[] |
no_license
|
leoalicastro/tv_shows
|
ce4bb052c64ed6aba34194104f6f31462c1a61c5
|
890122a4a7eda81cb24f10f88cc217c132e7850b
|
refs/heads/main
| 2023-07-16T15:00:03.292785
| 2021-08-19T18:50:09
| 2021-08-19T18:50:09
| 390,782,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
from django.db import models
from datetime import datetime
class ShowManager(models.Manager):
def basic_validator(self, post_data):
errors = {}
if len(post_data['title']) < 2:
errors['title'] = "Title must be atleast 2 characters"
if len(post_data['network']) < 3:
errors['network'] = "Network must be atleast 2 characters"
if post_data['desc'] != '' and len(post_data['desc']) < 10:
errors['desc'] = "Description must be atleast 10 characters"
if datetime.strptime(post_data['release'], '%Y-%m-%d') > datetime.now():
errors['release'] = 'Release Date should be in the past'
return errors
class Show(models.Model):
title = models.CharField(max_length=255)
network = models.CharField(max_length=255)
release = models.DateField()
desc = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = ShowManager()
|
[
"leoalicastro957@gmail.com"
] |
leoalicastro957@gmail.com
|
0e9776198e43fe8ba697233bc1a7c1c9c3496279
|
bef71d057048b93ef784892d911e7c2f7ffaee14
|
/framework_autotest/testsuites/test_wirenetwork.py
|
91fb3dbad2201ce112d8c4fcf0e8e7290c8c7e12
|
[] |
no_license
|
leaf2maple/python-selenium-unittest
|
2379403e98508000276bb5d0c89866efc5450d90
|
50bf514144c6cd6e8d0f51164731bbad367e9356
|
refs/heads/master
| 2020-09-01T00:26:30.307410
| 2019-10-31T17:52:09
| 2019-10-31T17:52:09
| 218,826,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,572
|
py
|
import unittest
import time
from framework.browser_engine import BrowserEngine
from pageobject.wb01_wirenetworkpage import WireNetWorkPage
from framework.login import Login
from pageobject.wb01_homepage import HomePage
class TestWireNetWork(unittest.TestCase):
@classmethod
def setUpClass(cls):
browser = BrowserEngine(cls)
cls.driver = browser.open_browser(cls)
login = Login(cls.driver)
login.skip_or_login()
homepage = HomePage(cls.driver)
homepage.wireNetwork_click()
time.sleep(5)
@classmethod
def tearDownClass(cls):
cls.driver.quit()
def test_011_wirenetwork_switch(self):
wirenetwork = WireNetWorkPage(self.driver)
# 关闭
wirenetwork.wirenetwork_switch_click()
time.sleep(1)
try:
el = self.driver.find_element_by_xpath("//div[@class='wireNetwork']")
assert "IP设置" not in el.text
print("test_011 pass")
wirenetwork.get_screenshot_as_file()
except Exception as e:
print("test_011 fail", format(e))
# 打开
wirenetwork.wirenetwork_switch_click()
time.sleep(1)
try:
el = self.driver.find_element_by_xpath("//span[@class='title' and text()='IP设置']")
assert "IP设置" in el.text
print("test_011 pass")
wirenetwork.get_screenshot_as_file()
except Exception as e:
print("test_011 fail", format(e))
def test_012_ip_manual_set(self):
wirenetwork = WireNetWorkPage(self.driver)
wirenetwork.ip_ul_click()
wirenetwork.ip_manual_set_click()
try:
el = "//span[@class='address-input-title' and text()='IP地址']/following-sibling::span[1]"
assert self.driver.find_element_by_xpath(el) is True
print("test_012 pass")
wirenetwork.get_screenshot_as_file()
except Exception as e:
print("test_012 fail", format(e))
def test_013_ip_auto_set(self):
wirenetwork = WireNetWorkPage(self.driver)
wirenetwork.ip_ul_click()
wirenetwork.ip_auto_set_click()
try:
el = "//span[@class='address-input-title' and text()='IP地址']/following-sibling::span[1]"
assert self.driver.find_element_by_xpath(el) is True
print("test_013 pass")
wirenetwork.get_screenshot_as_file()
except Exception as e:
print("test_013 fail", format(e))
if __name__ == '__main__':
unittest.main()
|
[
"421757223@qq.com"
] |
421757223@qq.com
|
a5db37c7dc9f8509adffc6dc45b2b5386d2c55a7
|
f3a3228c1afa0e252fa041553e450b3b53e273ec
|
/zetcode/tetris.py
|
88a6ac7bbcdf3616296a1f89d250cfc2f9c15cbf
|
[] |
no_license
|
dugbang/pyqt_prj
|
9395d25202d43fcadc577c9ff8606f649a575c9a
|
ed4fae66496e57258cdb22360273462a1ac59ea0
|
refs/heads/master
| 2020-04-13T07:04:41.980757
| 2019-01-29T00:41:17
| 2019-01-29T00:41:17
| 163,039,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,455
|
py
|
"""
ZetCode PyQt5 tutorial
This is a Tetris game clone.
Author: Jan Bodnar
Website: zetcode.com
Last edited: August 2017
"""
from PyQt5.QtWidgets import QMainWindow, QFrame, QDesktopWidget, QApplication
from PyQt5.QtCore import Qt, QBasicTimer, pyqtSignal
from PyQt5.QtGui import QPainter, QColor
import sys, random
class Tetris(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
'''initiates application UI'''
self.tboard = Board(self)
self.setCentralWidget(self.tboard)
self.statusbar = self.statusBar()
self.tboard.msg2Statusbar[str].connect(self.statusbar.showMessage)
self.tboard.start()
self.resize(180, 380)
self.center()
self.setWindowTitle('Tetris')
self.show()
def center(self):
'''centers the window on the screen'''
screen = QDesktopWidget().screenGeometry()
# screen = QDesktopWidget().availableGeometry()
size = self.geometry()
self.move((screen.width() - size.width()) / 2,
(screen.height() - size.height()) / 2)
def center_(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
class Board(QFrame):
msg2Statusbar = pyqtSignal(str)
BoardWidth = 10
BoardHeight = 22
Speed = 300
def __init__(self, parent):
super().__init__(parent)
self.initBoard()
def initBoard(self):
'''initiates board'''
self.timer = QBasicTimer()
self.isWaitingAfterLine = False
self.curX = 0
self.curY = 0
self.numLinesRemoved = 0
self.board = []
self.setFocusPolicy(Qt.StrongFocus)
self.isStarted = False
self.isPaused = False
self.clearBoard()
def shapeAt(self, x, y):
'''determines shape at the board position'''
return self.board[(y * Board.BoardWidth) + x]
def setShapeAt(self, x, y, shape):
'''sets a shape at the board'''
self.board[(y * Board.BoardWidth) + x] = shape
def squareWidth(self):
'''returns the width of one square'''
return self.contentsRect().width() // Board.BoardWidth
def squareHeight(self):
'''returns the height of one square'''
return self.contentsRect().height() // Board.BoardHeight
def start(self):
'''starts game'''
if self.isPaused:
return
self.isStarted = True
self.isWaitingAfterLine = False
self.numLinesRemoved = 0
self.clearBoard()
self.msg2Statusbar.emit(str(self.numLinesRemoved))
self.newPiece()
self.timer.start(Board.Speed, self)
def pause(self):
'''pauses game'''
if not self.isStarted:
return
self.isPaused = not self.isPaused
if self.isPaused:
self.timer.stop()
self.msg2Statusbar.emit("paused")
else:
self.timer.start(Board.Speed, self)
self.msg2Statusbar.emit(str(self.numLinesRemoved))
self.update()
def paintEvent(self, event):
'''paints all shapes of the game'''
painter = QPainter(self)
rect = self.contentsRect()
boardTop = rect.bottom() - Board.BoardHeight * self.squareHeight()
for i in range(Board.BoardHeight):
for j in range(Board.BoardWidth):
shape = self.shapeAt(j, Board.BoardHeight - i - 1)
if shape != Tetrominoe.NoShape:
self.drawSquare(painter,
rect.left() + j * self.squareWidth(),
boardTop + i * self.squareHeight(), shape)
if self.curPiece.shape() != Tetrominoe.NoShape:
for i in range(4):
x = self.curX + self.curPiece.x(i)
y = self.curY - self.curPiece.y(i)
self.drawSquare(painter, rect.left() + x * self.squareWidth(),
boardTop + (Board.BoardHeight - y - 1) * self.squareHeight(),
self.curPiece.shape())
def keyPressEvent(self, event):
'''processes key press events'''
if not self.isStarted or self.curPiece.shape() == Tetrominoe.NoShape:
super(Board, self).keyPressEvent(event)
return
key = event.key()
if key == Qt.Key_P:
self.pause()
return
if self.isPaused:
return
elif key == Qt.Key_Left:
self.tryMove(self.curPiece, self.curX - 1, self.curY)
elif key == Qt.Key_Right:
self.tryMove(self.curPiece, self.curX + 1, self.curY)
elif key == Qt.Key_Down:
self.tryMove(self.curPiece.rotateRight(), self.curX, self.curY)
elif key == Qt.Key_Up:
self.tryMove(self.curPiece.rotateLeft(), self.curX, self.curY)
elif key == Qt.Key_Space:
self.dropDown()
elif key == Qt.Key_D:
self.oneLineDown()
else:
super(Board, self).keyPressEvent(event)
def timerEvent(self, event):
'''handles timer event'''
if event.timerId() == self.timer.timerId():
if self.isWaitingAfterLine:
self.isWaitingAfterLine = False
self.newPiece()
else:
self.oneLineDown()
else:
super(Board, self).timerEvent(event)
def clearBoard(self):
'''clears shapes from the board'''
for i in range(Board.BoardHeight * Board.BoardWidth):
self.board.append(Tetrominoe.NoShape)
def dropDown(self):
'''drops down a shape'''
newY = self.curY
while newY > 0:
if not self.tryMove(self.curPiece, self.curX, newY - 1):
break
newY -= 1
self.pieceDropped()
def oneLineDown(self):
'''goes one line down with a shape'''
if not self.tryMove(self.curPiece, self.curX, self.curY - 1):
self.pieceDropped()
def pieceDropped(self):
'''after dropping shape, remove full lines and create new shape'''
for i in range(4):
x = self.curX + self.curPiece.x(i)
y = self.curY - self.curPiece.y(i)
self.setShapeAt(x, y, self.curPiece.shape())
self.removeFullLines()
if not self.isWaitingAfterLine:
self.newPiece()
def removeFullLines(self):
'''removes all full lines from the board'''
numFullLines = 0
rowsToRemove = []
for i in range(Board.BoardHeight):
n = 0
for j in range(Board.BoardWidth):
if not self.shapeAt(j, i) == Tetrominoe.NoShape:
n = n + 1
if n == 10:
rowsToRemove.append(i)
rowsToRemove.reverse()
for m in rowsToRemove:
for k in range(m, Board.BoardHeight):
for l in range(Board.BoardWidth):
self.setShapeAt(l, k, self.shapeAt(l, k + 1))
numFullLines = numFullLines + len(rowsToRemove)
if numFullLines > 0:
self.numLinesRemoved = self.numLinesRemoved + numFullLines
self.msg2Statusbar.emit(str(self.numLinesRemoved))
self.isWaitingAfterLine = True
self.curPiece.setShape(Tetrominoe.NoShape)
self.update()
def newPiece(self):
'''creates a new shape'''
self.curPiece = Shape()
self.curPiece.setRandomShape()
self.curX = Board.BoardWidth // 2 + 1
self.curY = Board.BoardHeight - 1 + self.curPiece.minY()
if not self.tryMove(self.curPiece, self.curX, self.curY):
self.curPiece.setShape(Tetrominoe.NoShape)
self.timer.stop()
self.isStarted = False
self.msg2Statusbar.emit("Game over")
def tryMove(self, newPiece, newX, newY):
'''tries to move a shape'''
for i in range(4):
x = newX + newPiece.x(i)
y = newY - newPiece.y(i)
if x < 0 or x >= Board.BoardWidth or y < 0 or y >= Board.BoardHeight:
return False
if self.shapeAt(x, y) != Tetrominoe.NoShape:
return False
self.curPiece = newPiece
self.curX = newX
self.curY = newY
self.update()
return True
def drawSquare(self, painter, x, y, shape):
'''draws a square of a shape'''
colorTable = [0x000000, 0xCC6666, 0x66CC66, 0x6666CC,
0xCCCC66, 0xCC66CC, 0x66CCCC, 0xDAAA00]
color = QColor(colorTable[shape])
painter.fillRect(x + 1, y + 1, self.squareWidth() - 2,
self.squareHeight() - 2, color)
painter.setPen(color.lighter())
painter.drawLine(x, y + self.squareHeight() - 1, x, y)
painter.drawLine(x, y, x + self.squareWidth() - 1, y)
painter.setPen(color.darker())
painter.drawLine(x + 1, y + self.squareHeight() - 1,
x + self.squareWidth() - 1, y + self.squareHeight() - 1)
painter.drawLine(x + self.squareWidth() - 1,
y + self.squareHeight() - 1, x + self.squareWidth() - 1, y + 1)
class Tetrominoe(object):
NoShape = 0
ZShape = 1
SShape = 2
LineShape = 3
TShape = 4
SquareShape = 5
LShape = 6
MirroredLShape = 7
class Shape(object):
coordsTable = (
((0, 0), (0, 0), (0, 0), (0, 0)),
((0, -1), (0, 0), (-1, 0), (-1, 1)),
((0, -1), (0, 0), (1, 0), (1, 1)),
((0, -1), (0, 0), (0, 1), (0, 2)),
((-1, 0), (0, 0), (1, 0), (0, 1)),
((0, 0), (1, 0), (0, 1), (1, 1)),
((-1, -1), (0, -1), (0, 0), (0, 1)),
((1, -1), (0, -1), (0, 0), (0, 1))
)
def __init__(self):
self.coords = [[0, 0] for i in range(4)]
self.pieceShape = Tetrominoe.NoShape
self.setShape(Tetrominoe.NoShape)
def shape(self):
'''returns shape'''
return self.pieceShape
def setShape(self, shape):
'''sets a shape'''
table = Shape.coordsTable[shape]
for i in range(4):
for j in range(2):
self.coords[i][j] = table[i][j]
self.pieceShape = shape
def setRandomShape(self):
'''chooses a random shape'''
self.setShape(random.randint(1, 7))
def x(self, index):
'''returns x coordinate'''
return self.coords[index][0]
def y(self, index):
'''returns y coordinate'''
return self.coords[index][1]
def setX(self, index, x):
'''sets x coordinate'''
self.coords[index][0] = x
def setY(self, index, y):
'''sets y coordinate'''
self.coords[index][1] = y
def minX(self):
'''returns min x value'''
m = self.coords[0][0]
for i in range(4):
m = min(m, self.coords[i][0])
return m
def maxX(self):
'''returns max x value'''
m = self.coords[0][0]
for i in range(4):
m = max(m, self.coords[i][0])
return m
def minY(self):
'''returns min y value'''
m = self.coords[0][1]
for i in range(4):
m = min(m, self.coords[i][1])
return m
def maxY(self):
'''returns max y value'''
m = self.coords[0][1]
for i in range(4):
m = max(m, self.coords[i][1])
return m
def rotateLeft(self):
'''rotates shape to the left'''
if self.pieceShape == Tetrominoe.SquareShape:
return self
result = Shape()
result.pieceShape = self.pieceShape
for i in range(4):
result.setX(i, self.y(i))
result.setY(i, -self.x(i))
return result
def rotateRight(self):
'''rotates shape to the right'''
if self.pieceShape == Tetrominoe.SquareShape:
return self
result = Shape()
result.pieceShape = self.pieceShape
for i in range(4):
result.setX(i, -self.y(i))
result.setY(i, self.x(i))
return result
if __name__ == '__main__':
app = QApplication([])
tetris = Tetris()
sys.exit(app.exec_())
|
[
"dugbang@gmail.com"
] |
dugbang@gmail.com
|
b61bba4f1a3cafc372508b61c5bc9307207181e7
|
7cc4b082d0af7622cd77204a1eef2311c24445de
|
/my-venv/bin/wheel
|
e1988530207fb5198672a49ecda9425ec3401572
|
[] |
no_license
|
aasthakumar/ChatClient
|
cbfc18ba4cec6c20280680c659214a458cef5688
|
32c959488eda73caa81c8643957dbf1e6f79c77a
|
refs/heads/master
| 2020-03-15T01:50:46.755513
| 2018-05-02T20:38:43
| 2018-05-02T20:38:43
| 131,903,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
#!/Users/aastha/Documents/GitHub/CMPE-273-quizzes/lab3/my-venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"aastha.kumar@sjsu.edu"
] |
aastha.kumar@sjsu.edu
|
|
cc132717c790921bdf671f8150bb2f9d868ad2fe
|
6ee3daaec8559287b6e9f07ae93e49ab0cbd3a89
|
/Edabit/Edabit_6306022610113_ch7.py
|
b823274d734c905f545a8666458f89e5e1f6a666
|
[] |
no_license
|
6306022610113/INE_Problem
|
9a43c42d71faa4ed7c4da8b83a53bad0a67ac7a2
|
99f3215aafecc486d81fb2d26aeb962d90970768
|
refs/heads/main
| 2023-04-21T11:03:07.471197
| 2021-05-06T04:56:07
| 2021-05-06T04:56:07
| 328,900,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 684
|
py
|
def shared_letters(a, b): #รับค่าตัวเเปร a และ b
lst = [] #สร้างตัวแปรรับค่า
for i in a.lower(): #ถ้า i มีค่าใน a เป็นตัวพิมพ์เล็ก
if i in b.lower(): #และ i มีค่าน b
lst.append(i) #ให้เพิ่ม i ในตัวแปร lst
return ''.join(sorted(set(lst))) #ส่งตัวข้อมูลใน lst ออก
print(shared_letters("house", "home"))
print(shared_letters("Micky", "mouse"))
print(shared_letters("house", "villa"))
|
[
"68582327+6306022610113@users.noreply.github.com"
] |
68582327+6306022610113@users.noreply.github.com
|
4eee374d40da98978fa6eead0dbd109ebd17f59e
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2449/60657/249828.py
|
b6f2e07860c983d2311d854da47037a89843a79d
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 152
|
py
|
import math
A=input().split(',')
B=input()
def judge(A,B):
if A.count(B)!=0:
return A.index(B)
else:
return -1
print(judge(A,B))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
3100c28087a8a7b53bc1fb5f666abd9059c4956a
|
4fec8ef57c150b088c09a4753e1e9fdb3a2ddabd
|
/yadjangoblog/yaaccounts/apps.py
|
700c03e127494b503dcbc9c698fcf5f424059f4d
|
[] |
no_license
|
ValDon/YaDjangoBlog
|
629c124eb6475a2b1947d4b224b6cdd9473a0490
|
4e6b6453c73470b8f05d062a460962ce118954c3
|
refs/heads/master
| 2020-04-10T22:53:10.000582
| 2019-05-04T15:06:35
| 2019-05-04T15:06:35
| 161,334,986
| 0
| 0
| null | 2018-12-11T13:05:01
| 2018-12-11T13:05:00
| null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
from django.apps import AppConfig
class YaAccountsAppConfig(AppConfig):
name = 'yadjangoblog.yaaccounts'
verbose_name = 'YaAccounts模块'
|
[
"twocucao@gmail.com"
] |
twocucao@gmail.com
|
1dfee621f2c8bf35b8a73f7fbbb1a64d238e125a
|
bbb21bb79c8c3efbad3dd34ac53fbd6f4590e697
|
/week3/TODO/TODO/settings.py
|
947cd11c197d9ed2bf30a09cd9c4016007788b22
|
[] |
no_license
|
Nusmailov/BFDjango
|
b14c70c42da9cfcb68eec6930519da1d0b1f53b6
|
cab7f0da9b03e9094c21efffc7ab07e99e629b61
|
refs/heads/master
| 2020-03-28T21:11:50.706778
| 2019-01-21T07:19:19
| 2019-01-21T07:19:19
| 149,136,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,094
|
py
|
"""
Django settings for TODO project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mitb8&^*0ibt!u_xqe1!tjzumo65hy@cnxt-z#+9+p@m$u8qnn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'TODO.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'TODO.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"nusmailov@gmail.com"
] |
nusmailov@gmail.com
|
482c33547adb2af1e66c780d7a9cc833b6182f11
|
c5da0ec1004fcb4283c62f3b2700a5b78dfa1fda
|
/Code/neuro.py
|
02386f5800c9c181a7ba7d26f5b8926616d5cca2
|
[] |
no_license
|
akshayadiga/Optical-Character-Recognition-of-English-Alphabets-using-Neural-Networks
|
78cd63c9d5d5f38bb0fd6de6e003a307712920fe
|
8c040655a72c0fd4dacfa7b358def99d339755d9
|
refs/heads/master
| 2021-01-25T13:07:13.730875
| 2018-03-02T04:58:14
| 2018-03-02T04:58:14
| 123,532,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,930
|
py
|
import neurolab as nl
def toProb(letter):
l=[]
int_letter=ord(letter);
pos=int_letter-97
for i in range(26):
if(i==pos):
l.append(1)
else:
l.append(0)
return l
def main():
# X = matrix of m input examples and n features
f=open("letter.data","r")
X=[]
Y=[]
count=0
for line in f:
vector=line.strip().split()
in_vec=vector[6:]
out_vec=vector[1]
in_vec=[int(i) for i in in_vec]
#out_vec=[int(i) for i in out_vec]
X.append(in_vec)
Y.append(out_vec)
count=count+1
if(count==800):
break
#X=numpy.matrix(X)
f.close()
# Y = matrix of m output vectors, where each output vector has k output units
#Y=numpy.matrix(Y)
#print X
#print Y
Y=[toProb(i) for i in Y]
net = nl.net.newff([[0, 1]]*128, [20, 26],transf=[nl.trans.TanSig(),nl.trans.SoftMax()])
net.train(X, Y, epochs=20, show=1, goal=0.02)
#z=net.sim([X[1]])
#print z
f=open("letter.data","r")
X=[]
Y=[]
count=0
for line in f:
if(count<800):
count=count+1
continue
vector=line.strip().split()
in_vec=vector[6:]
out_vec=vector[1]
in_vec=[int(i) for i in in_vec]
#out_vec=[int(i) for i in out_vec]
X.append(in_vec)
Y.append(out_vec)
count=count+1
if(count==1000):
break
z=net.sim(X)
bit_let_pair=zip(X,Y)
b=[i for p,i in bit_let_pair]
correct=0
incorrect=0
let_predict=[]
###change each index to appropriate letter#####
for i in z:
probs =i
prob_letter=max(probs)
for j in range(26):
if(probs[j]==prob_letter):
prob_pos=j
prob_pos+=97
let_predict.append(chr(prob_pos))
#print(let_predict)
#print(b)
################################
for i in range(len(let_predict)):
if(let_predict[i]==bit_let_pair[i][1]):
correct+=1
else:
incorrect+=1
efficiency=correct/(float(correct+incorrect))
print (efficiency*100),"%"
#e = net.train(input, output, show=1, epochs=100, goal=0.0001)
main()
|
[
"akshayadiga@Akshays-MacBook-Pro.local"
] |
akshayadiga@Akshays-MacBook-Pro.local
|
f4d9d13be2187390413090795c88670b3fbc20fd
|
36644ad31dc42a91cae5200559d3f591b90c3d83
|
/server3/test.py
|
d6f20f35763db67f220d6b7a0d4cfaca6d000768
|
[
"MIT"
] |
permissive
|
kenken64/docker-microservices
|
77070f6a782407e52d45bd5a546efc8758c000a0
|
1923f935de40afda3af5529fadc0b3747b3b3e56
|
refs/heads/master
| 2023-01-22T17:25:44.566427
| 2019-11-22T08:05:44
| 2019-11-22T08:05:44
| 222,846,340
| 1
| 4
|
MIT
| 2023-01-07T11:56:43
| 2019-11-20T03:52:25
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 287
|
py
|
from pymongo import MongoClient
import os
from bson import json_util
client = MongoClient('mongodb://localhost:27017')
print(client)
db = client['testdb']
collection = db.movie.find()
print(collection)
y = list(collection)
print(y)
#x = json_util.dumps({'data': collection })
#print(x)
|
[
"bunnyppl@gmail.com"
] |
bunnyppl@gmail.com
|
931cf513935db910bfd70b2fad4b1ab03410eaa3
|
6fc9e67094d60cb192dcd4e3370e41aae00e73b2
|
/rotate.py
|
cba2a8d2d2d33fa924b5fc5ad00ad3908e89c346
|
[] |
no_license
|
Nusha97/IRIS-Project
|
4c287ac87c482d1dea220a79e9fad5546a954bf5
|
670f66afb7ace4d8e65e2071d28f479b8573972f
|
refs/heads/master
| 2021-08-07T10:58:02.602248
| 2020-04-21T10:59:43
| 2020-04-21T10:59:43
| 158,009,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,685
|
py
|
# -*- coding: utf-8 -*-
"""
Automatically detect rotation and line spacing of an image of text using
Radon transform
If image is rotated by the inverse of the output, the lines will be
horizontal (though they may be upside-down depending on the original image)
It doesn't work with black borders
"""
from __future__ import division, print_function
from skimage.transform import radon
from PIL import Image
from numpy import asarray, mean, array, blackman
import numpy
from numpy.fft import rfft
import matplotlib.pyplot as plt
from matplotlib.mlab import rms_flat
try:
# More accurate peak finding from
# https://gist.github.com/endolith/255291#file-parabolic-py
from parabolic import parabolic
def argmax(x):
return parabolic(x, numpy.argmax(x))[0]
except ImportError:
from numpy import argmax
filename = '2Drotate.png'
# Load file, converting to grayscale
I = asarray(Image.open(filename).convert('L'))
I = I - mean(I) # Demean; make the brightness extend above and below zero
# Do the radon transform and display the result
sinogram = radon(I)
plt.gray()
# Find the RMS value of each row and find "busiest" rotation,
# where the transform is lined up perfectly with the alternating dark
# text and white lines
r = array([rms_flat(line) for line in sinogram.transpose()])
rotation = argmax(r)
print('Rotation: {:.2f} degrees'.format(90 - rotation))
import argparse
import cv2
import numpy as np
img = cv2.imread('2Drotate.png', 0)
rows,cols = img.shape
M = cv2.getRotationMatrix2D((cols/2,rows/2),90 - rotation,1)
dst = cv2.warpAffine(img,M,(cols,rows))
plt.plot(121),plt.imshow(dst),plt.title('Output')
plt.savefig('hello.png')
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
4f9b6137e32d293b24b9b687905de81bb99a0239
|
e28a89a1d79ab7b8ebf579a451b922b357206bb0
|
/leecode/1190. 反转每对括号间的子串.py
|
1d711d65956622209748a7ea68aac81cccb27e98
|
[] |
no_license
|
bobobyu/leecode-
|
67dfcf9f9891a39d68e9c610e896c7151809d529
|
015d42bb58c19869658d3f6405435134ac5444df
|
refs/heads/master
| 2023-01-30T23:35:00.882463
| 2020-12-18T02:20:28
| 2020-12-18T02:20:28
| 308,018,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
from typing import *
from collections import *
class Solution:
def reverseParentheses(self, s: str) -> str:
string_stack: Deque = deque()
s = list(s)
while s:
if (op := s.pop(0)) == '(':
string_stack.append('(')
elif op == ')':
temp: str = ''
while string_stack[-1] != '(':
temp = string_stack.pop() + temp
string_stack[-1] = temp[::-1]
else:
string_stack.append(op)
return ''.join(string_stack)
s = Solution()
print(s.reverseParentheses("a(bcdefghijkl(mno)p)q"))
|
[
"676158322@qq.com"
] |
676158322@qq.com
|
7b91e3b074f85271a746505ec2100144aaa01af3
|
d7641647d67d110e08997767e85bbea081c2537b
|
/bitmovin_api_sdk/models/filter.py
|
6836a5d837fc6e5d357ec0e2fa2c5884394e48d2
|
[
"MIT"
] |
permissive
|
aachenmax/bitmovin-api-sdk-python
|
d3ded77c459852cbea4927ff28c2a4ad39e6026a
|
931bcd8c4695a7eb224a7f4aa5a189ba2430e639
|
refs/heads/master
| 2022-11-16T08:59:06.830567
| 2020-07-06T07:16:51
| 2020-07-06T07:16:51
| 267,538,689
| 0
| 1
|
MIT
| 2020-07-06T07:16:52
| 2020-05-28T08:44:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,781
|
py
|
# coding: utf-8
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.bitmovin_resource import BitmovinResource
import pprint
class Filter(BitmovinResource):
discriminator_value_class_map = {
'CROP': 'CropFilter',
'CONFORM': 'ConformFilter',
'WATERMARK': 'WatermarkFilter',
'ENHANCED_WATERMARK': 'EnhancedWatermarkFilter',
'ROTATE': 'RotateFilter',
'DEINTERLACE': 'DeinterlaceFilter',
'AUDIO_MIX': 'AudioMixFilter',
'DENOISE_HQDN3D': 'DenoiseHqdn3dFilter',
'TEXT': 'TextFilter',
'UNSHARP': 'UnsharpFilter',
'SCALE': 'ScaleFilter',
'INTERLACE': 'InterlaceFilter',
'AUDIO_VOLUME': 'AudioVolumeFilter',
'EBU_R128_SINGLE_PASS': 'EbuR128SinglePassFilter'
}
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
if hasattr(super(Filter, self), "to_dict"):
result = super(Filter, self).to_dict()
for k, v in iteritems(self.discriminator_value_class_map):
if v == type(self).__name__:
result['type'] = k
break
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Filter):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"openapi@bitmovin.com"
] |
openapi@bitmovin.com
|
b4e76b67a52d7e11e271463c76c756cd39c39301
|
f09978f2a0850278255bd198222cd3990cb0c687
|
/gear/schema.py
|
9e678012c38374b5baee61fdf28ff22143a7874c
|
[] |
no_license
|
szpone/climbing-gear
|
0e4e53b99a0b550c0e172af21c2c9e08e2c3f1ba
|
78ab13b97b4b66464859b95ba6e5ed8587d5e60c
|
refs/heads/master
| 2022-12-12T11:08:57.277056
| 2019-06-05T16:06:02
| 2019-06-05T16:06:02
| 185,016,538
| 1
| 0
| null | 2022-11-22T03:49:28
| 2019-05-05T10:30:11
|
Python
|
UTF-8
|
Python
| false
| false
| 514
|
py
|
import graphene
from graphene_django.types import DjangoObjectType, ObjectType
from .models import Gear
class GearType(DjangoObjectType):
class Meta:
model = Gear
class Query(ObjectType):
gear = graphene.Field(GearType, id=graphene.Int())
gears = graphene.List(GearType)
def resolve_gear(self, info, gear_id):
return Gear.objects.filter(id=gear_id).first()
def resolve_gears(self, info, **kwargs):
return Gear.objects.all()
schema = graphene.Schema(query=Query)
|
[
"nikola.adamus@gmail.com"
] |
nikola.adamus@gmail.com
|
398f138bde398c4dea87e5a99707f52b0581bd66
|
6cabff723ad404c3883037d9fa1d32298c27b23e
|
/练习/实战8.py
|
0275a7e71db5efc7a6797d06ef684f6bb633958c
|
[] |
no_license
|
Brandyzwz/practice
|
300c128947e59b209098c60131ed3750e982b28a
|
661f74851af6208b5c6880b41ba5d7bc9da6bf5c
|
refs/heads/master
| 2020-03-25T02:25:51.136558
| 2018-08-02T12:06:44
| 2018-08-02T12:06:44
| 143,289,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
a = int(input('输入直角边a:\n'))
b = int(input('输入直角边b:\n'))
c = (a**2+b**2)**0.5
print('斜边c的长为:%d'%c)
|
[
"brandyzwz@outlook.com"
] |
brandyzwz@outlook.com
|
63b17a08ac2f4745e14601141a43ae06dd3014d8
|
e4dfc1402839f277e1e9ff8686dc6b67f1eb0bf0
|
/api_example/languages/urls.py
|
289d6f6c2218819b00280ffb8242142ac2eacf15
|
[] |
no_license
|
punitchauhan771/Sample-Rest-API
|
32ee7a16270a978ac4d82a161fe4fb677c029d36
|
8372982507fa2ee301d3a9de1e6fe9d4b66028ed
|
refs/heads/main
| 2023-02-24T00:49:57.831224
| 2021-01-23T08:23:35
| 2021-01-23T08:23:35
| 331,853,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
from django.urls import path, include
from . import views
from rest_framework import routers
router = routers.DefaultRouter()
router.register('languages', views.LanguageView)
router.register('Paradigm', views.ParadigmView)
router.register('Programmer',views.ProgrammerView)
urlpatterns = [
path('', include(router.urls))
]
|
[
"chauhanbhupendra980@gmail.com"
] |
chauhanbhupendra980@gmail.com
|
16eb3d8ca61b71e4472dd9dbccbad3c0497e2eb6
|
9f59572095262bb77b1069154dd70f52a2743582
|
/utils/.history/helper_20210303152111.py
|
db03f12b94ec4773cd7b73e126c98158e374fa06
|
[] |
no_license
|
zhang199609/diagnose_fault_by_vibration
|
ef089807fd3ae6e0fab71a50863c78ea163ad689
|
7b32426f3debbe9f98a59fe78acdec3ad6a186fd
|
refs/heads/master
| 2023-04-15T15:35:59.000854
| 2021-04-23T06:53:40
| 2021-04-23T06:53:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
"""
提高代码简洁性和可交互性的代码
"""
import time
def get_running_time(fun):
"""
代码运行时间装饰器
参数
-----
fun:要测试运行时间的函数
返回
-----
返回装饰器wrapper
例子
-----
>>> @get_running_time
def hello(name):
print("hello %s"%name)
time.sleep(3)
>>>
hello("Tony")
"""
def wrapper(*args, **kwargs):
start_time = time.time()
# 调用需要计算运行时间的函数
fun(*args, **kwargs)
end_time = time.time()
running_time = end_time - start_time
h = int(running_time//3600)
m = int((running_time - h*3600)//60)
s = int(running_time%60)
print("time cost: {0}:{1}:{2}".format(h, m, s))
return running_time # -> 可以省略
return wrapper
|
[
"18036834556@163.com"
] |
18036834556@163.com
|
f59eef689da00fb5d14fdfaddf69c05fcdb4d412
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/labservices/azure-mgmt-labservices/azure/mgmt/labservices/models/lab_account_fragment.py
|
0e3a2fb1fa1897771c1c81ee386226f6730a5827
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507
| 2020-09-04T15:48:52
| 2020-09-04T15:48:52
| 205,457,088
| 1
| 2
|
MIT
| 2020-06-16T16:38:15
| 2019-08-30T21:08:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,376
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class LabAccountFragment(Resource):
"""Represents a lab account.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The identifier of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param location: The location of the resource.
:type location: str
:param tags: The tags of the resource.
:type tags: dict[str, str]
:param enabled_region_selection: Represents if region selection is enabled
:type enabled_region_selection: bool
:param provisioning_state: The provisioning status of the resource.
:type provisioning_state: str
:param unique_identifier: The unique immutable identifier of a resource
(Guid).
:type unique_identifier: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'enabled_region_selection': {'key': 'properties.enabledRegionSelection', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'unique_identifier': {'key': 'properties.uniqueIdentifier', 'type': 'str'},
}
def __init__(self, **kwargs):
super(LabAccountFragment, self).__init__(**kwargs)
self.enabled_region_selection = kwargs.get('enabled_region_selection', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.unique_identifier = kwargs.get('unique_identifier', None)
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
3921c679f0f414668848b1d37b5d076edec45b8d
|
5d1441cc173e06fb24c389eb812067a3fc355587
|
/workflow/templatetags/custom.py
|
c9f466a6cebfba8a0db9f818d8958efed3756c15
|
[] |
no_license
|
Johums/ProjectManage
|
2860eb12134d9b522c5a5f2fa4e4054533d9175a
|
22d662e089adab447f247d078c89c670384e78ff
|
refs/heads/master
| 2021-01-10T16:36:47.412675
| 2016-02-27T15:24:13
| 2016-02-27T15:24:13
| 52,213,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
# -*- coding: utf-8 -*-
from django import template
from django.utils import safestring
register = template.Library()
@register.filter
def multi_menu(menu, move=0):
html_content = """
<ul class="nav nav-pills nav-stacked" style="margin-left: %spx" >
""" % move
for k, v in menu.items():
html_content += """
<li data-toggle="collapse" data-target=".demo">
<a href="{1}"><small>{2}</small></a>
</li>
""".format(*k)
if v:
html_content += multi_menu(v, move + 20)
html_content += """
</ul>
"""
return safestring.mark_safe(html_content)
|
[
"13276915582@163.com"
] |
13276915582@163.com
|
5c5a54f0963e8d6bd055050c7770fbf455661208
|
12a62bbca8065dcb6d835144368f6ad4cf46f219
|
/random_proj.py
|
e15f439ab7201a601e168512de11ffb755e3dcbf
|
[] |
no_license
|
daemonmaker/biglittle
|
c2371b198a43273275144036e3971c8035efd588
|
feadb55aa68f5b54f52084e0a12368783c93dd78
|
refs/heads/master
| 2021-01-11T11:03:25.318130
| 2014-11-25T23:56:42
| 2014-11-25T23:56:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,591
|
py
|
#! /usr/bin/env python
import time
from datetime import datetime
import numpy as np
import sys
import os
import os.path as op
import cPickle as pkl
from itertools import product
import gc
import theano
from theano import function
from theano import tensor as T
from theano import config
from theano import shared
from theano.tensor.shared_randomstreams import RandomStreams
from utils import *
from experiments import Experiments
from layer import HiddenLayer, HiddenBlockLayer, HiddenRandomBlockLayer
from timing_stats import TimingStats as TS
from models import (
EqualParametersModel,
EqualComputationsModel,
SparseBlockModel,
all_same
)
def simple_train(
model,
train_model,
test_model,
validate_model,
learning_rate,
shared_learning_rate,
timing_stats,
n_epochs=1000
):
timing_stats.add(['epoch', 'train'])
epoch = 0
minibatch_avg_cost_accum = 0
while(epoch < n_epochs):
print "Epoch %d" % epoch
timing_stats.start('epoch')
for minibatch_index in xrange(model.data.n_train_batches):
if minibatch_index % 10 == 0:
print '... minibatch_index: %d/%d\r' \
% (minibatch_index, model.data.n_train_batches),
# Note the magic comma on the previous line prevents new lines
timing_stats.start('train')
minibatch_avg_cost = train_model(minibatch_index)
timing_stats.end('train')
minibatch_avg_cost_accum += minibatch_avg_cost[0]
print '... minibatch_avg_cost_accum: %f' \
% (minibatch_avg_cost_accum/float(model.data.n_train_batches))
timing_stats.end('epoch')
epoch += 1
def train(
model,
train_model,
test_model,
validate_model,
learning_rate,
shared_learning_rate,
timing_stats,
n_epochs=1000
):
def summarize_rates():
print "Learning rate: ", learning_rate.rate
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 100 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(data.n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_params = None
this_validation_loss = 0
best_validation_loss = np.inf
best_iter = 0
test_score = 0.
accum = 0
epoch = 0
done_looping = False
timing_stats.add(['train', 'epoch', 'valid'])
summarize_rates()
timing_stats.start()
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
timing_stats.start('epoch')
for minibatch_index in xrange(data.n_train_batches):
timing_stats.start('train')
minibatch_avg_cost = train_model(minibatch_index)
timing_stats.end('train')
#print "0: ", model.layers[-5].in_idxs.get_value()
#print "1: ", model.layers[-4].in_idxs.get_value()
#print "2: ", model.layers[-3].in_idxs.get_value()
#print "3: ", model.layers[-2].in_idxs.get_value()
#print "4: ", model.layers[-1].in_idxs.get_value()
minibatch_avg_cost = minibatch_avg_cost[0]
accum = accum + minibatch_avg_cost
# print (
# "minibatch_avg_cost: " + str(minibatch_avg_cost)
# + " minibatch_avg_cost: " + str(minibatch_avg_cost)
# )
# print (
# l_layers[0].W.get_value().sum()
# + ' ' + l_layers[1].W.get_value().sum()
# + ' '
# + layers[0].W.get_value().sum()
# + ' ' + layers[1].W.get_value().sum()
# )
# print (
# "A: " + np.max(np.abs(layers[0].W.get_value()))
# + ' ' + np.max(np.abs(layers[0].b.get_value()))
# + ' ' + np.max(np.abs(layers[1].W.get_value()))
# + ' ' + np.max(np.abs(layers[1].b.get_value()))
# )
# print (
# "B: " + np.abs(layers[0].W.get_value()).sum()
# + ' ' + np.abs(layers[0].b.get_value()).sum()
# + ' ' + np.abs(layers[1].W.get_value()).sum()
# + ' ' + np.abs(layers[1].b.get_value()).sum()
# )
# print (
# "C: " + np.abs(np.array(minibatch_avg_cost[1])).sum()
# + ' ' + np.abs(np.array(minibatch_avg_cost[2])).sum()
# + ' ' + np.abs(np.array(minibatch_avg_cost[3])).sum()
# + ' ' + np.abs(np.array(minibatch_avg_cost[4])).sum()
# )
# iteration number
iter = (epoch - 1) * data.n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
timing_stats.end('epoch')
timing_stats.reset('epoch')
timing_stats.reset('train')
accum = accum / validation_frequency
summary = ("minibatch_avg_cost: %f, time: %f"
% (accum, timing_stats.accumed['train'][-1][1]))
accum = 0
print "%s" % (summary)
# compute zero-one loss on validation set
summary = (
'epoch %i, minibatch %i/%i'
% (
epoch, minibatch_index + 1, data.n_train_batches
)
)
validation_losses = [validate_model(i) for i
in xrange(data.n_valid_batches)]
this_validation_loss = np.mean(validation_losses)
#this_validation_loss = 0
summary = ('validation error %f %% '
% (this_validation_loss * 100.))
print ("%s" % (summary))
# if we got the best validation score until now
this_validation_loss = this_validation_loss
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [test_model(i) for i
in xrange(data.n_test_batches)]
test_score = np.mean(test_losses)
#test_score = 0
summary = 'test_score: %f' % (test_score * 100.)
print (' epoch %i, minibatch %i/%i,'
' test error of best model %s'
% (epoch, minibatch_index + 1,
data.n_train_batches, summary))
learning_rate.update()
shared_learning_rate.set_value(learning_rate.rate)
summarize_rates()
if patience <= iter:
done_looping = True
break
timing_stats.end()
print('Optimization complete. Best validation score of %f %% '
'obtained at iteration %i, with test performance %f %%' %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %s' % timing_stats)
def run_experiments(exps, models, rng=None):
if rng is None:
rng = np.random.RandomState()
data = None
model = None
timings = None
for idx, model_class in product(exps, models):
print 'Experiment: %d, Model class: %s' % (idx, model_class)
parameters = exps.get_parameters_by_exp_idx(idx)
print 'Batch size: %d' % parameters['batch_size']
if (
data is None
or data.batch_size != parameters['batch_size']
or data.reshape_data != model_class.reshape_data
):
print 'Loading Data'
print '... MNIST'
data = MNIST(parameters['batch_size'], model_class.reshape_data)
gc.collect()
try:
shared_learning_rate = shared(
np.array(
parameters['learning_rate'].rate,
dtype=config.floatX
),
name='learning_rate'
)
timings = TS(['build_model', 'build_functions', 'full_train'])
print 'Building model: %s' % str(model_class)
timings.start('build_model')
layer_definitions = exps.get_layers_definition(idx)
model = model_class(
data=data,
layer_descriptions=layer_definitions,
batch_size=parameters['batch_size'],
learning_rate=shared_learning_rate,
L1_reg=parameters['L1_reg'],
L2_reg=parameters['L2_reg'],
)
print '... time: %f' % timings.end('build_model')
print 'Building functions'
timings.start('build_functions')
functions = model.build_functions()
print '... time: %f' % timings.end('build_functions')
print 'Training'
timings.start('full_train')
simple_train(
model,
learning_rate=parameters['learning_rate'],
shared_learning_rate=shared_learning_rate,
n_epochs=parameters['n_epochs'],
timing_stats=timings,
**functions
)
print 'Training time: %d' % timings.end('full_train')
model = None
except MemoryError:
epoch_time = -1
if timings is not None:
print 'Timings: %s' % timings
exps.save(idx, model_class.__name__, 'timings', timings)
timings = None
gc.collect()
pkl.dump(exps, open('random_proj_experiments.pkl', 'wb'))
def plot_times_by_batch(database):
import matplotlib.pyplot as plt
# Load the database
exps = pkl.load(open(database, 'rb'))
# Find experiments that have results
exp_idxs = exps.get_idxs('experiments', has_results=True)
# Plot results for each experiment grouped by the layers_description
layers_description_idxs = exps.get_table_idxs_by_exp_idxs(
'layers_description',
exp_idxs
)
for layers_description_idx in layers_description_idxs:
result_idxs = exps.get_result_idxs_by_table_idx(
'layers_description',
layers_description_idx
)
batch_sizes = [exps.get_parameters_by_exp_idx(idx)['batch_size']
for idx in result_idxs]
timings = {model_name: np.zeros(len(batch_sizes))
for model_name in exps.results[result_idxs[0]].keys()}
for i, idx in enumerate(result_idxs):
for model_name, stats in exps.results[idx].iteritems():
timings[model_name][i] = stats[
'timings'
].mean_difference('train')/batch_sizes[i]
for model_name, timings in timings.iteritems():
plt.plot(batch_sizes, timings, marker='o', label=model_name,)
plt.title('Train time per sample', fontsize=12)
layers_description = exps.get_layers_description(
layers_description_idx
)
plt.suptitle(
'layers_description_idx: %d, n_units: %s, n_hids: %s,\n'
'k_pers: %s, all same: %r' % (
layers_description_idx,
layers_description['n_hids'],
layers_description['n_units_per'],
layers_description['k_pers'],
'index_selection_funcs' in layers_description.keys()
),
y=0.99,
fontsize=10
)
plt.xlabel('Batch Size')
plt.ylabel('Time (s)')
plt.legend()
plt.xticks(batch_sizes)
figs_dir = 'figs'
if not op.exists(figs_dir):
os.mkdir(figs_dir)
plt.savefig(
op.join(
figs_dir,
'layers_description_%d.png' % layers_description_idx
),
format='png'
)
plt.show()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Run random_proj experiments and plot results'
)
parser.add_argument(
'-m', '--use_layers',
type=int, default=[], nargs='+',
help='Identifier for which models to use in the experiments.'
)
parser.add_argument(
'-c', '--layer_class',
default='HiddenRandomBlockLayer',
help='The type of layer to use in the block sparse model.'
)
parser.add_argument(
'-b', '--batch_sizes',
type=int, default=[32], nargs='+',
help='Range of batch sizes to test.'
)
parser.add_argument(
'-n', '--number_of_epochs',
type=int, default=1,
help='Number of epochs to execute for each experiment.'
)
parser.add_argument(
'-u', '--units_per_block',
type=int, default=32,
help='Number of units per block in the sparse block models.'
)
parser.add_argument(
'-d', '--database',
default='random_proj_experiments.pkl',
help='Which database to use.'
)
parser.add_argument(
'-l', '--load_database',
default=False, action='store_true',
help='Whether to load an existing database.'
)
parser.add_argument(
'-p', '--plot',
default=False,
action='store_true',
help='Plot results instaed of execute experiments.'
)
args = parser.parse_args()
if args.plot:
plot_times_by_batch(args.database)
else:
if args.load_database:
exps = pkl.load(open(args.database))
else:
## Determine the type of sparsity layer to use
if args.layer_class == 'HiddenRandomBlockLayer':
layer_class = HiddenRandomBlockLayer
else:
layer_class = HiddenBlockLayer
## Create experiments
exps = Experiments(
input_dim=784, # data.train_set_x.shape[-1].eval(),
num_classes=10
)
# Add descriptions of models
exps.add_layers_description(
0,
{
'n_hids': (25,),
'n_units_per': args.units_per_block,
'k_pers': (1, 1),
'activations': (T.tanh, None),
'layer_classes': [
HiddenBlockLayer,
HiddenBlockLayer,
],
}
)
exps.add_layers_description(
1,
{
'n_hids': (25, 25),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.5, 1),
'activations': (T.tanh, T.tanh, None),
'layer_classes': [
HiddenBlockLayer,
layer_class,
HiddenBlockLayer,
],
}
)
exps.add_layers_description(
2,
{
'n_hids': (25, 100, 25),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.25, 0.25, 1),
'activations': (T.tanh, T.tanh, T.tanh, None),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
HiddenBlockLayer,
],
}
)
exps.add_layers_description(
3,
{
'n_hids': (25, 100, 25),
'n_units_per': args.units_per_block,
'k_pers': (1., 0.25, 0.25, 1),
'activations': (T.tanh, T.tanh, T.tanh, None),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
HiddenBlockLayer,
],
'index_selection_funcs': (
None, all_same, all_same, None
)
}
)
exps.add_layers_description(
4,
{
'n_hids': (50, 100, 20),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.05, 0.2, 1),
'activations': (T.tanh, T.tanh, T.tanh, None),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
HiddenBlockLayer,
],
},
)
exps.add_layers_description(
5,
{
'n_hids': (50, 100, 20),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.05, 0.05, 1),
'activations': (T.tanh, T.tanh, T.tanh, None),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
HiddenBlockLayer,
],
'index_selection_funcs': (
None, all_same, all_same, None
)
},
)
exps.add_layers_description(
6,
{
'n_hids': (25, 100, 100, 25),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.05, 0.05, 1, 1),
'activations': (T.tanh, T.tanh, T.tanh, T.tanh, None),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
layer_class,
HiddenBlockLayer,
],
}
)
exps.add_layers_description(
7,
{
'n_hids': (25, 100, 100, 25),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.05, 0.05, 1, 1),
'activations': (T.tanh, T.tanh, T.tanh, T.tanh, None),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
layer_class,
HiddenBlockLayer,
],
'index_selection_funcs': (
None, all_same, all_same, all_same, None
)
}
)
exps.add_layers_description(
8,
{
'n_hids': (50, 200, 500, 200, 50),
'n_units_per': args.units_per_block,
'k_pers': (1., 0.1, 0.02, 0.02, 0.1, 1),
'activations': (
None, T.tanh, T.tanh, T.tanh, T.tanh, None
),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
layer_class,
layer_class,
HiddenBlockLayer,
],
}
)
exps.add_layers_description(
9,
{
'n_hids': (50, 75, 200, 75, 50),
'n_units_per': args.units_per_block,
'k_pers': (1., 0.1, 0.05, 0.05, 0.1, 1),
'activations': (
T.tanh, T.tanh, T.tanh, T.tanh, T.tanh, None
),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
layer_class,
layer_class,
HiddenBlockLayer,
],
'index_selection_funcs': (
None, all_same, all_same, all_same,
all_same, None
)
}
)
exps.add_layers_description(
10,
{
'n_hids': (50, 500, 500, 500, 500, 20),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.07, 0.03, 0.02, 0.01, 0.15, 1),
'activations': (
T.tanh, T.tanh, T.tanh, T.tanh, T.tanh, T.tanh, None
),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
HiddenBlockLayer,
],
},
)
exps.add_layers_description(
11,
{
'n_hids': (50, 500, 500, 500, 500, 20),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.07, 0.03, 0.02, 0.01, 0.15, 1),
'activations': (
T.tanh, T.tanh, T.tanh, T.tanh, T.tanh, T.tanh, None
),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
HiddenBlockLayer,
],
'index_selection_funcs': (
None, all_same, all_same, all_same,
all_same, all_same, None
)
}
)
exps.add_layers_description(
12,
{
'n_hids': (50, 100, 500, 500, 500, 500, 500, 100, 20),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.1, 0.05, 0.01, 0.01, 0.01, 0.01, 0.05, 0.1, 1),
'activations': (
None, T.tanh, T.tanh, T.tanh, T.tanh,
T.tanh, T.tanh, T.tanh, T.tanh, None
),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
HiddenBlockLayer,
],
},
)
exps.add_layers_description(
13,
{
'n_hids': (50, 100, 500, 500, 500, 500, 500, 100, 20),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.1, 0.05, 0.01, 0.01, 0.01, 0.1, 0.5, 0.1, 1),
'activations': (
None, T.tanh, T.tanh, T.tanh, T.tanh,
T.tanh, T.tanh, T.tanh, T.tanh, None
),
'layer_classes': [
HiddenBlockLayer,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
layer_class,
HiddenBlockLayer,
],
'index_selection_funcs': (
None, all_same, all_same, all_same,
all_same, all_same, None
)
}
)
exps.add_layers_description(
14,
{
'n_hids': (50, 100, 20),
'n_units_per': args.units_per_block,
'k_pers': (1, 0.05, 0.05, 1),
'activations': (T.tanh, T.tanh, T.tanh, None),
'layer_classes': [
layer_class,
layer_class,
layer_class,
layer_class,
],
},
)
# Add parameter combinations
for idx, batch_size in enumerate(args.batch_sizes):
exps.add_parameters(
idx,
{
'n_epochs': args.number_of_epochs,
'batch_size': batch_size,
'learning_rate': LinearChangeRate(
0.21, -0.01, 0.2, 'learning_rate'
),
'L1_reg': 0.0,
'L2_reg': 0.0001
}
)
if len(args.use_layers) > 0:
print 'Executing experiments for layers %s' % args.use_layers
exps.create_experiments(args.use_layers)
else:
exps.create_experiments()
run_experiments(
exps,
models=[
EqualParametersModel,
EqualComputationsModel,
SparseBlockModel
]
)
|
[
"daemonmaker@gmail.com"
] |
daemonmaker@gmail.com
|
6df0a8249cc984e79381ba0ffcddd3d27403a62b
|
0c72282d601ccf840dd4e41b675c0675de7bc916
|
/students/Jean-Baptiste/lessons/lesson03/assignment03_solution_JB/create_customers.py
|
45516c4640f57281eda038e5e82484c20727fe20
|
[] |
no_license
|
zconn/PythonCert220Assign
|
c7fedd9ffae4f9e74e5e4dfc59bc6c511c7900ab
|
99271cd60485bd2e54f8d133c9057a2ccd6c91c2
|
refs/heads/master
| 2020-04-15T14:42:08.765699
| 2019-03-14T09:13:36
| 2019-03-14T09:13:36
| 164,763,504
| 2
| 0
| null | 2019-01-09T01:34:40
| 2019-01-09T01:34:40
| null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
"""
This is to create database using the Peewee ORM, sqlite and Python
"""
from customers_model import *
import customers_model as cm
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.info('Let us build the classes from the model in the database')
cm.database.create_tables([cm.Customer])
cm.database.close()
|
[
"jbyamindi@yahoo.fr"
] |
jbyamindi@yahoo.fr
|
d82cf9e821ecf30bd91d020d422728952809a303
|
597ed154876611a3d65ca346574f4696259d6e27
|
/dbaas/workflow/steps/tests/test_vm_step.py
|
1f05feed7c79364c570f0ed132f5da3578825a91
|
[] |
permissive
|
soitun/database-as-a-service
|
41984d6d2177734b57d726cd3cca7cf0d8c5f5d6
|
1282a46a9437ba6d47c467f315b5b6a3ac0af4fa
|
refs/heads/master
| 2023-06-24T17:04:49.523596
| 2018-03-15T19:35:10
| 2018-03-15T19:35:10
| 128,066,738
| 0
| 0
|
BSD-3-Clause
| 2022-05-10T22:39:58
| 2018-04-04T13:33:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
from mock import patch
from physical.tests.factory import HostFactory, EnvironmentFactory
from ..util.vm import VmStep, MigrationWaitingBeReady
from . import TestBaseStep
@patch('workflow.steps.util.vm.get_credentials_for', return_value=True)
@patch('workflow.steps.util.vm.CloudStackProvider', return_value=object)
class VMStepTests(TestBaseStep):
def setUp(self):
super(VMStepTests, self).setUp()
self.host = self.instance.hostname
def test_environment(self, *args, **kwargs):
vm_step = VmStep(self.instance)
self.assertEqual(vm_step.environment, self.environment)
def test_host(self, *args, **kwargs):
vm_step = VmStep(self.instance)
self.assertEqual(vm_step.host, self.host)
@patch('workflow.steps.util.vm.get_credentials_for', return_value=True)
@patch('workflow.steps.util.vm.CloudStackProvider', return_value=object)
class VMStepTestsMigration(TestBaseStep):
def setUp(self):
super(VMStepTestsMigration, self).setUp()
self.host = self.instance.hostname
self.future_host = HostFactory()
self.host.future_host = self.future_host
self.host.save()
self.environment_migrate = EnvironmentFactory()
self.environment.migrate_environment = self.environment_migrate
self.environment.save()
def test_environment(self, *args, **kwargs):
vm_step = MigrationWaitingBeReady(self.instance)
self.assertEqual(vm_step.environment, self.environment_migrate)
def test_host(self, *args, **kwargs):
vm_step = MigrationWaitingBeReady(self.instance)
self.assertEqual(vm_step.host, self.future_host)
|
[
"mauro_murari@hotmail.com"
] |
mauro_murari@hotmail.com
|
b8572b08870ce01777c59a851f52f3cd3d40ed69
|
ed6dd94781e3022f230050284d2ddd3554cc0772
|
/multithreading/multiprocessing_pipes_conttest.py
|
379999612d2531d37797406abcedc405c450bf1c
|
[] |
no_license
|
Mantabit/python_examples
|
602d4f4237dbc2044d30dc5482e3e2dee4d90fb6
|
516dbb9cc63c7de5bfe7d0e79477dff9ff340a5d
|
refs/heads/master
| 2021-07-04T08:26:38.007606
| 2020-08-17T10:09:04
| 2020-08-17T10:09:04
| 153,170,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,224
|
py
|
import multiprocessing as mp
import time
class testClass(object):
def __init__(self,name):
self.name=name
def doSomething(self):
print("Object %s reporting!"%(self.name))
#function receives objects
def receiverFunction(receive_end):
while True:
#receive object from the pipe
try:
obj=receive_end.recv()
except EOFError as err:
print("nothing left in the queue, aborting receiver thread")
break
#use the received object
obj.doSomething()
#function generates objects
def producerFunction(send_end):
start=time.time()
i=0
#produce data every 50ms for 5s
while time.time()-start<1:
i+=1
send_end.send(testClass("Object%d"%(i)))
time.sleep(50e-3)
print("Closing the send_end in producer process...")
send_end.close()
if __name__=="__main__":
(receive_end,send_end)=mp.Pipe()
p_recv=mp.Process(target=receiverFunction,args=[receive_end])
p_send=mp.Process(target=producerFunction,args=[send_end])
p_recv.start()
p_send.start()
p_send.join()
send_end.close()
print("Closing send_end in parent process")
p_recv.join()
|
[
"dvarx@gmx.ch"
] |
dvarx@gmx.ch
|
8bf31f37886bfff9512c59b2d24b2699e2383f4b
|
559d7428cba525ddff43a4b03f495c070f382075
|
/Final/FinalExam/Q1/lazy.py
|
0ecd5c3d8b15acde96bbd4735217dc6431ee8534
|
[] |
no_license
|
kwokwill/comp3522-object-oriented-programming
|
9c222ad4d1a2c2420a5eb509f80ba792e94991f6
|
6e2347b70c07cfc3ca83af29c2bd5c4696c55bb6
|
refs/heads/master
| 2023-04-13T19:16:25.546865
| 2021-04-27T21:20:48
| 2021-04-27T21:20:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,260
|
py
|
import time
"""
(4 marks total)
This program simulates loading screens in a game. One of the screens is entered and resources
need to be loaded, the other does not
The code below takes about 1 second (on my machine) to complete.
Requirements
Speed up the code using the LAZY INITIALIZATION design pattern.
Do NOT change any code in the main function
Hints:
The code should run in about half the time after implementing Lazy Initialization
There is no need to use any multithreading/multiprocessing
"""
class Resources:
def __init__(self):
print("Creating resources")
time.sleep(0.5)
def __str__(self):
return "resources available"
class Screen:
def __init__(self, name):
self._name = name
self._resources = None
def enter_screen(self):
if not self._resources:
self._resources = Resources()
return self._resources
def __str__(self):
return self._name
def main():
start_time = time.time()
game_over = Screen("Game over")
print(game_over)
main_menu = Screen("Main menu")
print(main_menu)
print(main_menu.enter_screen())
end_time = time.time()
print("duration:", end_time - start_time)
if __name__ == '__main__':
main()
|
[
"donglmin@icloud.com"
] |
donglmin@icloud.com
|
1f0f45aa77603540df78c0dde6159ce16e10364a
|
873b6d338e696b200d1a6ca74bef85deaa8d8088
|
/manage.py
|
b4f28f3c7627588f205a3b43b083e42d7990486f
|
[] |
no_license
|
Craiglal/ChudoSkrynia
|
9720c8360f1589b97c15c5cfa48ba15bf2c6d0e7
|
ef2ca9ca356666628f2c8e4d1df8e97e0d0f72eb
|
refs/heads/master
| 2020-08-15T09:23:57.076445
| 2019-10-15T14:28:26
| 2019-10-15T14:28:26
| 215,316,335
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ChudoSkrynia.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"asleep.alex@gmail.com"
] |
asleep.alex@gmail.com
|
64923cbcfed9624b6b8c664e8deaf3ad28ade468
|
a756e26160502b49dea686baa4f8d8480895ab85
|
/PartB_LBC_CorrespondingStates.py
|
128522479dc943e764d6b24b93be59a28c826581
|
[] |
no_license
|
Aitous/ENE251
|
adeb715ad24094765e23d03a481e309ab2dd3f8c
|
e7770c469f63683c4c3ea7916d8bcad64ad16593
|
refs/heads/master
| 2022-12-04T18:01:57.908783
| 2020-08-19T21:16:54
| 2020-08-19T21:16:54
| 288,349,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,765
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[38]:
import time
import numpy as np
import matplotlib.pyplot as plt
import math
from sklearn.linear_model import LinearRegression
from scipy import interpolate
# Solving RachfordRice
def SolveRachfordRice(l, Nc, z, K):
F = lambda l: sum([(1 - K[i]) * z[i]/(K[i] + (1 - K[i]) * l) for i in range(Nc)])
dF = lambda l: sum([-z[i] * (1 - K[i])**2/((K[i] + (1 - K[i]) * l)**2) for i in range(Nc)])
F0 = F(0)
F1 = F(1)
if(F0 > 0 and F1 < 0):
lmin = 0
lmax = 1
elif(F0 > 0 and F1 > 0):
lmin = 1
lmax = np.max([(K[i]*z[i] - K[i])/(1 - K[i]) for i in range(Nc)])
else:
lmax = 0
lmin = np.min([(z[i] - K[i])/(1 - K[i]) for i in range(Nc)])
useNewton = True #Change to false for bisection only
error = [] #error array
i = 0
tol = 1.e-5
while abs(F(l)) > tol:
if(F(l) > 0):
lmin = l
else:
lmax = l
delta_l = - F(l) / dF(l)
if(l + delta_l > lmin and l + delta_l < lmax and useNewton):
l = l + delta_l
else:
l = 0.5 * (lmin + lmax)
error.append(F(l))
#print('error = ', error[i]) #reporting error for each step
i += 1
return l
#Calculating the a's and b's of the vapor and liquid phases. The function kij loads the interaction coefficients based on the EOS of interest
def kij(EOS):
if EOS is 'PR':
return np.zeros((19,19))
elif EOS is 'SRK':
return np.array([[0 , 0.1, 0.1257, 0.0942], [0.1, 0, 0.027, 0.042], [0.1257, 0.027, 0, 0.008], [0.0942, 0.042, 0.008, 0]])
return Kij
elif EOS is 'RK':
return np.zeros([3,3])
def calc_a(EOS, T, Tc, Pc, omega):
'''calculates ai for each component for the EOS of interest
EOS: Equation of state (PR, SRK, or RK)
T, Tc: temperature and critical temperature of the component
Pc: critical pressure of the component
omega: accentric factor for the component'''
R = 8.314
if EOS is 'PR':
fw = 0.37464 + 1.54226*omega - 0.26992*omega**2
a1 = np.divide(0.45724*R**2*Tc**2 , Pc)
a2 = (1 + np.multiply(fw, (1 - np.sqrt(np.divide(T, Tc)))))**2
a = np.multiply(a1, a2)
elif EOS is 'SRK':
fw = 0.48 + 1.574*omega - 0.176*omega**2
a1 = np.divide((0.42748*R**2*Tc**2), Pc)
a2 = (1 + np.multiply(fw, (1 - np.sqrt(np.divide(T, Tc)))))**2
a = np.multiply(a1, a2)
elif EOS is 'RK':
a = np.divide(0.42748*R**2*Tc**(5/2), (Pc*T**0.5))
else:
print('parameters for his EOS is not defined')
return a
def calc_b(EOS, Tc, Pc):
'''calculates bi for each component for the EOS of interest
EOS: Equation of state (PR, SRK, or RK)
Tc: critical temperature of the component
Pc: critical pressure of the component
'''
R = 8.314 # gas constant
# The below if statement computes b for each
# componenet based on the EOS of
# interest (Table 5.1 in the course reader)
if EOS is 'PR':
b = np.divide(0.07780*R*Tc, Pc)
elif EOS is 'SRK':
b = np.divide(0.08664*R*Tc, Pc)
elif EOS is 'RK':
b = np.divide(0.08664*R*Tc ,Pc)
return b
def find_am(EOS, y, T, Tc, Pc, omega):
''' calculates the a parameter for the EOS of interest
EOS: equation of state of interest (PR, SRK, RK)
y: vapor or liquid compositions
T, Tc: temperature value and critical temperature array
Pc: critical pressure array
omega: accentric factors array '''
kijs = kij(EOS)
am = np.sum(y[i]*y[j]*np.sqrt(calc_a(EOS, T, Tc[i], Pc[i], omega[i]) *calc_a(EOS, T, Tc[j], Pc[j], omega[j]))*(1-kijs[i,j]) for i in range(len(y)) for j in range(len(y)))
return am
def find_bm(EOS, y, Tc, Pc):
'''This function computes the b for the mixture for the EOS of interest
EOS: Equation of state (PR, SRK, or RK)
y: liquid or vapor compositions array
Tc and Pc: critical temperature and pressure array
'''
bm = np.sum(np.multiply(y, calc_b(EOS, Tc, Pc)))
return bm
def Z_factor(EOS, P, T, a, b):
'''This function computes the Z factor for the cubic EOS of interest
EOS: equation of state (PR, SRK, or RK)
P, T: pressure and temperature
a, b: the vapor or liquid parameters of equation of state
'''
R = 8.314 # gas constant
if EOS == 'PR':
u = 2
w = -1
elif EOS == 'SRK':
u = 1
w = 0
elif EOS == 'RK':
u = 1
w = 0
A = np.divide(a*P, R**2*T**2)
B = np.divide(b*P, R*T)
Coeffs = list()
Coeffs.append(1)
Coeffs.append(-(1 + B - u*B))
Coeffs.append(A + w*B**2 - u*B - u*B**2)
Coeffs.append(-np.multiply(A, B) - w*B**2 - w*B**3)
Z = np.roots(Coeffs)
# remove the roots with imaginary parts
Z = np.real(Z[np.imag(Z) == 0])
Zv = max(Z)
Zl = min(Z)
return Zv, Zl
def get_fug(EOS, y, Z, Tc, Pc, P, T, omega, a, b):
'''This function computes the liquid or vapor fugacity of all components
using Eq. 6.8 in course reader
parameters needed:
EOS: equation of state (PR, SRK, or RK)
y: liquid or vapor compositions
Z: z-factors for vapor or liquid
Tc and Pc: critical temperature and pressure for all individual comp.s
P, T: pressure and temperature of the system
omega: accentric factors for all individual components
a and b: EOS parameters as computed in another function
'''
R = 8.314 # gas constant
if EOS is 'PR':
u = 2
w = -1
kijs = kij(EOS)
elif EOS is 'SRK':
u = 1
w = 0
kijs = kij(EOS)
elif EOS is 'RK':
u = 1
w = 0
kijs = kij(EOS)
fug = np.zeros(y.shape)
A = np.divide(a*P, R**2*T**2)
B = np.divide(b*P, R*T)
delta_i = list()
a_i = list()
for i in range(len(y)):
a_i.append(calc_a(EOS, T, Tc[i], Pc[i], omega[i]))
for i in range(len(y)):
xa = 0
for j in range(len(y)):
xa += y[j] * math.sqrt(a_i[j]) * (1 - kijs[i][j])
delta_i.append(2 * math.sqrt(a_i[i]) / a * xa)
for i in range(len(fug)):
bi = calc_b(EOS, Tc, Pc)[i]
ln_Phi = bi/b * (Z - 1) - math.log(Z - B) + A / (B * math.sqrt(u**2 - 4*w)) * (bi/b - delta_i[i]) * math.log((2 * Z + B *(u + math.sqrt(u**2 - 4*w))) /(2 * Z + B *(u - math.sqrt(u**2 - 4*w))))
fug[i] = y[i] * P * math.exp(ln_Phi)
return fug
def Ki_guess(Pc, Tc, P, T, omega, Nc):
Ki = np.array([Pc[i]/P * np.exp(5.37 * (1 + omega[i]) * (1 - Tc[i]/T)) for i in range(Nc)])
return Ki
def flash(EOS, l, Nc, zi, Tc, Pc, P, T, omega):
Ki = Ki_guess(Pc, Tc, P, T, omega, Nc)
tol = 1e-5
R = 8.314 # gas constant
l = SolveRachfordRice(l, Nc, zi, Ki)
xi = np.divide(zi, l+(1-l)*Ki)
yi = np.divide(np.multiply(Ki, zi), (l+(1-l)*Ki))
av = find_am(EOS,yi,T,Tc,Pc,omega)
al = find_am(EOS,xi,T,Tc,Pc,omega)
bv = find_bm(EOS,yi,Tc,Pc)
bl = find_bm(EOS,xi,Tc,Pc)
#Z and fugacity determination for the vapor phase based on minimising Gibbs Free Energy
Zv = Z_factor(EOS,P,T,av,bv) #containing the max and min roots
fugV_v = get_fug(EOS, yi, Zv[0], Tc, Pc, P, T, omega, av, bv)
fugV_l = get_fug(EOS, yi, Zv[1], Tc, Pc, P, T, omega, av, bv)
deltaGV = np.sum(yi * np.log(fugV_l / fugV_v))
if deltaGV <= 0:
Zv = Zv[1]
fug_v = fugV_l
else:
Zv = Zv[0]
fug_v = fugV_v
#Z and fugacity determination for the liquid phase based on minimising Gibbs Free Energy
Zl = Z_factor(EOS,P,T,al,bl) #containing the max and min roots
fugL_v = get_fug(EOS, xi, Zl[0], Tc, Pc, P, T, omega, al, bl)
fugL_l = get_fug(EOS, xi, Zl[1], Tc, Pc, P, T, omega, al, bl)
deltaGL = np.sum(xi * np.log(fugL_l / fugL_v))
if deltaGL <= 0:
Zl = Zl[1]
fug_l = fugL_l
else:
Zl = Zl[0]
fug_l = fugL_v
while np.max(abs(np.divide(fug_v, fug_l) - 1)) > tol:
Ki = Ki * np.divide(fug_l, fug_v)
l = SolveRachfordRice(l, Nc, zi, Ki)
xi = np.divide(zi, l+(1-l)*Ki)
yi = np.divide(np.multiply(Ki, zi), (l+(1-l)*Ki))
av = find_am(EOS,yi,T,Tc,Pc,omega)
al = find_am(EOS,xi,T,Tc,Pc,omega)
bv = find_bm(EOS,yi,Tc,Pc)
bl = find_bm(EOS,xi,Tc,Pc)
#Z and fugacity determination for the vapor phase based on minimising Gibbs Free Energy
Zv = Z_factor(EOS,P,T,av,bv) #containing the max and min roots
fugV_v = get_fug(EOS, yi, Zv[0], Tc, Pc, P, T, omega, av, bv)
fugV_l = get_fug(EOS, yi, Zv[1], Tc, Pc, P, T, omega, av, bv)
deltaGV = np.sum(yi * np.log(fugV_l / fugV_v))
if deltaGV <= 0:
Zv = Zv[1]
fug_v = fugV_l
else:
Zv = Zv[0]
fug_v = fugV_v
#Z and fugacity determination for the liquid phase based on minimising Gibbs Free Energy
Zl = Z_factor(EOS,P,T,al,bl) #containing the max and min roots
fugL_v = get_fug(EOS, xi, Zl[0], Tc, Pc, P, T, omega, al, bl)
fugL_l = get_fug(EOS, xi, Zl[1], Tc, Pc, P, T, omega, al, bl)
deltaGL = np.sum(xi * np.log(fugL_l / fugL_v))
if deltaGL <= 0:
Zl = Zl[1]
fug_l = fugL_l
else:
Zl = Zl[0]
fug_l = fugL_v
Vv = np.divide(Zv*R*T, P)
Vl = np.divide(Zl*R*T, P)
return (fug_v, fug_l, l, xi, yi)
def volumeCorrection(EOS, V, zi, Pc, Tc):
Mw = np.array([44.01, 28.013, 16.043, 30.07, 44.097, 58.123, 58.123, 72.15, 72.15, 84, 96, 107, 121, 134, 163.5, 205.4, 253.6, 326.7, 504.4])
if EOS == "PR":
#Si from the reader page 129
S = [-0.1540, 0.1002, -0.08501, -0.07935, -0.06413, -0.04350, -0.04183, -0.01478]
c = [3.7, 0] #CO2 and N2
#For the heavy components
for i in range(10, len(Pc)):
S.append(1 - 2.258/Mw[i]**0.1823) #values correlated for heavier components (+C7)
for i in range(0, len(Pc)-2):
c.append(S[i] * calc_b(EOS, Tc[i+2], Pc[i+2]))
V = V - np.sum([zi[i] * c[i] for i in range(len(Pc))])
return V
def volume(EOS, P, T, Pc, Tc, omega, zi = np.array([1]), mixture = False):
R = 8.314
if not mixture:
a = calc_a(EOS, T, Tc, Pc, omega)
b = calc_b(EOS, Tc, Pc)
Z = Z_factor(EOS,P,T,a,b)
fug_v = get_fug(EOS, zi, Z[0], Tc, Pc, P, T, omega, a, b)
fug_l = get_fug(EOS, zi, Z[1], Tc, Pc, P, T, omega, a, b)
deltaG = np.sum(zi * np.log(fug_l / fug_v))
if deltaG <= 0:
Z = Z[1]
fug = fug_l
else:
Z = Z[0]
fug = fug_v
V = np.divide(Z*R*T, P)
else:
bm = find_bm(EOS, zi, Tc, Pc)
am = find_am(EOS, zi, T, Tc, Pc, omega)
Z = Z_factor(EOS,P,T,am,bm)
fug_v = get_fug(EOS, zi, Z[0], Tc, Pc, P, T, omega, am, bm)
fug_l = get_fug(EOS, zi, Z[1], Tc, Pc, P, T, omega, am, bm)
deltaG = np.sum(zi * np.log(fug_v / fug_l))
if deltaG <= 0:
Z = Z[1]
fug = fug_l
else:
Z = Z[0]
fug = fug_v
V = np.divide(Z*R*T, P)
#V = volumeCorrection(EOS, V, zi, Pc, Tc)
return V
# Computes reference viscosity of methane using the correlation of Hanley et al. Cyrogenics, July 1975
# To be used for corresponding states computation of mixture viscosity
# A. R. Kovscek
# 20 November 2018
# Tref is the reference temperature in K (viscosity computed at this temperature)
# rho_ref is the reference density in g/cm3 (viscosity computed at this temperature and density)
# mu_C1 is the viscosity from correlation in mPa-s (identical to cP)
def ViscMethane(Tref,rho_ref):
import math
#Local variables
#critical density of methane (g/cm^3)
rho_c=16.043/99.2
#parameters for the dilute gas coefficient
GV=[-209097.5,264726.9,-147281.8,47167.40,-9491.872,1219.979,-96.27993,4.274152,-0.08141531]
#parameters for the first density correction term
Avisc1 = 1.696985927
Bvisc1 = -0.133372346
Cvisc1 = 1.4
Fvisc1 = 168.0
#parameters for the viscosity remainder
j1 = -10.35060586
j2 = 17.571599671
j3 = -3019.3918656
j4 = 188.73011594
j5 = 0.042903609488
j6 = 145.29023444
j7 = 6127.6818706
#compute dilute gas coefficient
visc0 = 0.
exp1 = 0.
for i in range(0,len(GV)):
exp1 = -1. + (i)*1./3.
visc0 = visc0 + GV[i]*math.pow(Tref,exp1)
#first density coefficient
visc1 = Avisc1+Bvisc1*math.pow((Cvisc1-math.log(Tref/Fvisc1)),2.)
#viscosity remainder
theta=(rho_ref-rho_c)/rho_c
visc2 = math.pow(rho_ref,0.1)
visc2 = visc2*(j2+j3/math.pow(Tref,1.5))+theta*math.sqrt(rho_ref)*(j5+j6/Tref+j7/math.pow(Tref,2.))
visc2 = math.exp(visc2)
visc2 = visc2 - 1.
visc2 = math.exp(j1+j4/Tref)*visc2
#methane viscosity at T and density (Tref,rho_ref)
#multiply by 10-4 to convert to mPa-s(cP)
mu_C1 = (visc0+visc1+visc2)*0.0001
return (mu_C1)
def get_interp_density(p, T):
'''
@param p: pressure in Pa
@param T: temperature in K
@return : methane density in kg/m3
'''
data_p = [0.1e6, 1e6, 3e6, 5e6, 10e6, 20e6, 50e6]
data_T = [90.7, 94, 98, 100, 105, 110, 120, 140, 170]
if p < data_p[0] or p > data_p[-1] or T < data_T[0] or T > data_T[-1]:
raise Exception('Input parameter out of range')
data_den = [[451.5, 447.11, 441.68, 438.94, 431.95, 424.79, 409.9, 1.403, 1.1467],
[451.79, 447.73, 442.34, 439.62, 432.7, 425.61, 410.9, 377.7, 14.247],
[453, 449.08, 443.78, 441.11, 434.32, 427.38, 413.05, 381.12, 314.99],
[454, 450.4, 445.19, 442.55, 435.89, 429.09, 415.1, 384.28, 324.32],
[456, 453.57, 448.55, 446.02, 439.63, 433.13, 419.9, 391.35, 340.6],
[460, 458, 454.74, 452.37, 446.43, 440.43, 428.32, 402.99, 361.57],
[477, 473, 470, 468, 463.2, 458.14, 448.08, 427.88, 397.48]]
f = interpolate.interp2d(data_T, data_p, data_den)
return f(T, p)
# In[39]:
def LBC_viscosity(P, T, zi, Tc, Pc, omega, Mw, Vci):
coef = [0.10230, 0.023364, 0.058533, -0.040758, 0.0093324]
Nc = len(zi)
EOS = 'PR'
Pmax = 3000 * 6894.76
Pressure = []
visc = []
while P < Pmax:
#flash
fug_v, fug_l, l, xi, yi = flash(EOS, 0.5, Nc, zi, Tc, Pc, P, T, omega)
if l>1:
xi = zi
#Computing Ksi
Ksi = 5.4402 * 399.54 * np.sum(xi * Tc)**(1/6)/np.multiply(np.sum(xi * Mw)**(0.5),np.sum(xi * Pc)**(2/3))
Ksi_i = 5.4402 * 399.54 * Tc**(1/6) * Mw**(-0.5) * Pc**(-2/3)
#Ksi_i = Tc**(1/6) * Mw**(-0.5) * Pc**(-2/3)
eta_star_i = np.zeros(xi.shape)
for i in range(Nc):
Tr = T/Tc[i]
if Tr < 1.5:
eta_star_i[i] = 34e-5 * (Tr**0.94)/Ksi_i[i]
else:
eta_star_i[i] = 17.78 * 1e-5 * ((4.58*Tr - 1.67)**0.625)/ Ksi_i[i]
eta_star = np.divide(np.sum(xi * eta_star_i * Mw**0.5), np.sum(xi * Mw**0.5))
MC7_plus = np.sum(xi[i] * Mw[i] for i in range(10, Nc)) / np.sum(xi[i] for i in range(10, Nc))
denC7_plus = 0.895
Vc_plus = (21.573 + 0.015122*MC7_plus - 27.656*denC7_plus + 0.070615*denC7_plus*MC7_plus) * 6.2372*1e-5
V_mixture = volume(EOS, P, T, Pc, Tc, omega, xi, True)
xC7_plus = np.sum(xi[i] for i in range(10, Nc))
Vc_mixture = np.sum(xi[i] * Vci[i] for i in range(10))*1e-6 + xC7_plus * Vc_plus
rho_r = Vc_mixture/V_mixture
viscosity = ((coef[0] + coef[1] * rho_r + coef[2] * rho_r**2 + coef[3] * rho_r**3 + coef[4] * rho_r**4)**4 - 0.0001)/Ksi + eta_star
visc.append(viscosity)
Pressure.append(P)
P = 1.1 * P
plt.plot(Pressure, visc)
plt.xlabel("Pressure (Pa)")
plt.ylabel("Viscosity (cP)")
plt.title("Viscosity vs pressure")
plt.show()
# In[40]:
P = 1500 * 6894.76
T = 106 + 273.15
Names = {'CO2' 'N2' 'C1' 'C2' 'C3' 'iC4' 'n-C4' 'i-C5' 'n-C5' 'C6' 'C7' 'C8' 'C9' 'C10' 'PS1' 'PS2' 'PS3' 'PS4' 'PS5'}
zi = np.array([0.0044, 0.0017, 0.3463, 0.0263, 0.0335, 0.092, 0.0175, 0.0089, 0.0101, 0.0152, 0.05, 0.0602, 0.0399, 0.0355, 0.1153, 0.0764, 0.0633, 0.0533, 0.0330])#oil composition
Tc = np.array([304.2, 126.2, 190.6, 305.4, 369.8, 408.1, 425.2, 460.4, 469.6, 507.4, 548, 575, 603, 626, 633.1803, 675.9365, 721.3435, 785.0532, 923.8101]) # in Kelvin
Pc = np.array([72.9, 33.6, 45.4, 48.2, 41.9, 36.0, 37.5, 33.4, 33.3, 29.3, 30.7, 28.4, 26, 23.9, 21.6722, 19.0339, 16.9562, 14.9613, 12.6979])*101325 # in Pa
omega = np.array([0.228, 0.04, 0.008, 0.098, 0.152, 0.176, 0.193, 0.227, 0.251, 0.296, 0.28, 0.312, 0.348, 0.385, 0.6254, 0.7964, 0.9805, 1.2222, 1.4000]) # accentric factors
Mw = np.array([44.01, 28.013, 16.043, 30.07, 44.097, 58.123, 58.123, 72.15, 72.15, 84, 96, 107, 121, 134, 163.5, 205.4, 253.6, 326.7, 504.4])
Vci = np.array([91.9, 84, 99.2, 147, 200, 259, 255, 311, 311, 368]) # cm3/mol
LBC_viscosity(P, T, zi, Tc, Pc, omega, Mw, Vci)
# In[41]:
def corresponding_state_Visco(P, T ,zi, Tc, Pc, omega, Mw):
R = 8.314 # gas constant
Names = {'CO2' 'N2' 'C1' 'C2' 'C3' 'iC4' 'n-C4' 'i-C5' 'n-C5' 'C6' 'C7' 'C8' 'C9' 'C10' 'PS1' 'PS2' 'PS3' 'PS4' 'PS5'}
Nc = len(zi)
EOS = 'PR'
tol = 1e-5
Pmax = 3000 * 6894.76
visc = []
Pressure = []
while P < Pmax:
fug_v, fug_l, l, xi, yi = flash(EOS, 0.5, Nc, zi, Tc, Pc, P, T, omega)
if l>1:
xi = zi
#Initiliazing
Tc_mix = 0
Mmix = 0
Mn = 0
M = 0
denominator = 0
for i in range(Nc):
Mn += xi[i] * Mw[i]
M += xi[i] * Mw[i]**2
for j in range(Nc):
Tc_mix += xi[i]*xi[j]*(Tc[i] * Tc[j])**(0.5)*((Tc[i]/Pc[i])**(1/3) + (Tc[j]/Pc[j])**(1/3))**3
denominator += xi[i]*xi[j]*((Tc[i]/Pc[i])**(1/3) + (Tc[j]/Pc[j])**(1/3))**3
Tc_mix = Tc_mix / denominator
Pc_mix = 8 * Tc_mix / denominator
M /= Mn
Mmix = 1.304 * 1e-4 * (M**2.303 - Mn**2.303) + Mn
Tr = (T * Tc[2])/Tc_mix
Pr = (P * Pc[2])/Pc_mix
rho_c = 162.84 #kg/m3
#volume correction
S = -0.154
b = calc_b(EOS, Tc[2], Pc[2])
Vc = volume(EOS, Pr, Tr, np.array([Pc[2]]), np.array([Tc[2]]), np.array([omega[2]]))
volume_cor = Vc - b * S
rho_r = Mw[2] * 1e-3 / volume_cor / rho_c
alpha_mix = 1 + 7.378 * 10**(-3) * rho_r ** 1.847 * Mmix**0.5173
alpha_0 = 1 + 0.031*rho_r**1.847
Tref = Tr * alpha_0 / alpha_mix
Pref = Pr * alpha_0 / alpha_mix
S = -0.085
Vc_ref = volume(EOS, Pref, Tref, np.array([Pc[2]]), np.array([Tc[2]]), np.array([omega[2]]))
volume_cor = Vc_ref - b * S
rho_ref = Mw[2]/volume_cor/ 1e6
visc_methane = ViscMethane(Tref, rho_ref)
visc_mix = (Tc_mix/Tc[2])**(-1/6) * (Pc_mix/Pc[2])**(2/3) * (Mmix/Mw[2])**(1/2) * alpha_mix / alpha_0 * visc_methane
visc.append(visc_mix)
Pressure.append(P)
#print(P, visc_mix)
P = 1.1 * P
plt.plot(Pressure, visc)
plt.xlabel("Pressure (Pa)")
plt.ylabel("Viscosity (cP)")
plt.title("Viscosity vs compositions")
plt.show()
# In[42]:
zi = np.array([0.0044, 0.0017, 0.3463, 0.0263, 0.0335, 0.092, 0.0175, 0.0089, 0.0101, 0.0152, 0.05, 0.0602, 0.0399, 0.0355, 0.1153, 0.0764, 0.0633, 0.0533, 0.0330])#oil composition
Tc = np.array([304.2, 126.2, 190.6, 305.4, 369.8, 408.1, 425.2, 460.4, 469.6, 507.4, 548, 575, 603, 626, 633.1803, 675.9365, 721.3435, 785.0532, 923.8101]) # in Kelvin
Pc = np.array([72.9, 33.6, 45.4, 48.2, 41.9, 36.0, 37.5, 33.4, 33.3, 29.3, 30.7, 28.4, 26, 23.9, 21.6722, 19.0339, 16.9562, 14.9613, 12.6979])*101325 # in Pa
omega = np.array([0.228, 0.04, 0.008, 0.098, 0.152, 0.176, 0.193, 0.227, 0.251, 0.296, 0.28, 0.312, 0.348, 0.385, 0.6254, 0.7964, 0.9805, 1.2222, 1.4000]) # accentric factors
Mw = np.array([44.01, 28.013, 16.043, 30.07, 44.097, 58.123, 58.123, 72.15, 72.15, 84, 96, 107, 121, 134, 163.5, 205.4, 253.6, 326.7, 504.4])
P = 1500 * 6894.76
T = 106 + 273.15
corresponding_state_Visco(P, T, zi, Tc, Pc, omega, Mw)
# In[43]:
def viscosity(Oilcomp, Injcomp, P, T, Pc, Tc, omega, Mw, Vci):
coef = [0.10230, 0.023364, 0.058533, -0.040758, 0.0093324]
EOS = 'PR'
Nc = len(Oilcomp)
alpha = 0.5
l = 0.5
tol = 1e-5
zi = Oilcomp + alpha * (Injcomp - Oilcomp)
fug_v, fug_l, l, xi, yi = flash(EOS, 0.5, Nc, zi, Tc, Pc, P, T, omega)
Ksi = 5.4402 * 399.54 * np.sum(xi * Tc)**(1/6)/np.multiply(np.sum(xi * Mw)**(0.5),np.sum(xi * Pc)**(2/3))
Ksi_i = 5.4402 * 399.54 * Tc**(1/6) * Mw**(-0.5) * Pc**(-2/3)
eta_star_i = np.zeros(xi.shape)
for i in range(Nc):
Tr = T/Tc[i]
if Tr < 1.5:
eta_star_i[i] = 34e-5 * (Tr**0.94)/Ksi_i[i]
else:
eta_star_i[i] = 17.78 * 1e-5 * ((4.58*Tr - 1.67)**0.625)/ Ksi_i[i]
eta_star = np.divide(np.sum(xi * eta_star_i * Mw**0.5), np.sum(xi * Mw**0.5))
MC7_plus = np.sum(xi[i] * Mw[i] for i in range(10, Nc)) / np.sum(xi[i] for i in range(10, Nc))
denC7_plus = 0.895
Vc_plus = (21.573 + 0.015122*MC7_plus - 27.656*denC7_plus + 0.070615*denC7_plus*MC7_plus) * 6.2372*1e-5
V_mixture = volume(EOS, P, T, Pc, Tc, omega, xi, True)
xC7_plus = np.sum(xi[i] for i in range(10, Nc))
Vc_mixture = np.sum(xi[i] * Vci[i] for i in range(10))*1e-6 + xC7_plus * Vc_plus
rho_r = Vc_mixture/V_mixture
viscosity = ((coef[0] + coef[1] * rho_r + coef[2] * rho_r**2 + coef[3] * rho_r**3 + coef[4] * rho_r**4)**4 - 0.0001)/Ksi + eta_star
return viscosity
# In[44]:
def vicosity_vs_composition(LPG_CO2_comb, Oilcomp, P, T, Pc, Tc, omega, Mw, Vci, makePlot = False):
'''This function computes the MMP for the different compositions of the LPG and CO2
and returns a plot of the MMP versus gas injectant composition.
LPG_CO2_comb contains the porcentage of LPG in the mixte.
an array of the form [0.7, 0.55, 0.4, 0.2, 0.1, 0.] means that for the first mixture
we have 70% LPG and 30% CO2 and for the second 55% LPG and 45% CO2 and so on...
The LPG composition is: C2: 0.01, C3: 0.38, iC4: 0.19, nC4: 0.42.
'''
#reservoir Oil components.
Names = {'CO2' 'N2' 'C1' 'C2' 'C3' 'i-C4' 'n-C4' 'i-C5' 'n-C5' 'C6' 'C7' 'C8' 'C9' 'C10' 'PS1' 'PS2' 'PS3' 'PS4' 'PS5'}
LPG = np.array([0, 0, 0, 0.01, 0.38, 0.19, 0.42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
CO2 = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
numMixtures = len(LPG_CO2_comb)
Viscosity = []
composition = []
for i in range(numMixtures):
Injcomp = np.array(LPG_CO2_comb[i] * LPG + (1 - LPG_CO2_comb[i]) * CO2)
Viscosity.append(viscosity(Oilcomp, Injcomp, P, T, Pc, Tc, omega, Mw, Vci))
composition.append(LPG_CO2_comb[i])
if makePlot:
plt.plot(composition, Viscosity)
plt.xlabel('Composition (mole fraction of the LPG)')
plt.ylabel('Viscosity (cP)')
plt.title('Viscosty vs Injectant composition')
plt.show()
# In[45]:
P = 28e6
T = 106 + 273.15
Names = {'CO2' 'N2' 'C1' 'C2' 'C3' 'iC4' 'n-C4' 'i-C5' 'n-C5' 'C6' 'C7' 'C8' 'C9' 'C10' 'PS1' 'PS2' 'PS3' 'PS4' 'PS5'}
zi = np.array([0.0044, 0.0017, 0.3463, 0.0263, 0.0335, 0.092, 0.0175, 0.0089, 0.0101, 0.0152, 0.05, 0.0602, 0.0399, 0.0355, 0.1153, 0.0764, 0.0633, 0.0533, 0.0330])#oil composition
Tc = np.array([304.2, 126.2, 190.6, 305.4, 369.8, 408.1, 425.2, 460.4, 469.6, 507.4, 548, 575, 603, 626, 633.1803, 675.9365, 721.3435, 785.0532, 923.8101]) # in Kelvin
Pc = np.array([72.9, 33.6, 45.4, 48.2, 41.9, 36.0, 37.5, 33.4, 33.3, 29.3, 30.7, 28.4, 26, 23.9, 21.6722, 19.0339, 16.9562, 14.9613, 12.6979])*101325 # in Pa
omega = np.array([0.228, 0.04, 0.008, 0.098, 0.152, 0.176, 0.193, 0.227, 0.251, 0.296, 0.28, 0.312, 0.348, 0.385, 0.6254, 0.7964, 0.9805, 1.2222, 1.4000]) # accentric factors
Mw = np.array([44.01, 28.013, 16.043, 30.07, 44.097, 58.123, 58.123, 72.15, 72.15, 84, 96, 107, 121, 134, 163.5, 205.4, 253.6, 326.7, 504.4])
Vci = np.array([91.9, 84, 99.2, 147, 200, 259, 255, 311, 311, 368]) # cm3/mol
vicosity_vs_composition(np.array([0.7, 0.55, 0.4, 0.2, 0.1, 0.]), zi, P, T, Pc, Tc, omega, Mw, Vci, True)
# In[47]:
#Plotting the experimental CCE curve of pressure against relative volume.
x = np.array([
20096156.97
,19406680.97
,18717204.97
,16834935.49
,15476667.77
,15312158.8
,13890872.97
,10443492.97
])
y = np.array([
0.622
,0.6162
,0.611
,0.597
,0.5869
,0.5857
,0.6341
,0.708
])
plt.plot(x, y, 'r')
plt.xlabel("Pressure (Pa)")
plt.ylabel("Viscosity (cP)")
plt.title("Experimental evolution of viscosity")
plt.legend(loc='best')
plt.show()
# In[ ]:
|
[
"youssef.aitousarrah@gmail.com"
] |
youssef.aitousarrah@gmail.com
|
f60ef5fd88169007360b05340e5af2ea2122afca
|
3bbcad666d11eabcb80f951f2ff976f23ec79346
|
/mindblown.py
|
ae9299207c6970f4daae69dcd0dc99dfa4d3acb0
|
[] |
no_license
|
sxflame/Turtledraw
|
1d877e7b2856d6f90972880dcb9dc336a6fae63a
|
c887ca15cd40f3e235d684d2b6a49ed9d63e70bd
|
refs/heads/master
| 2021-01-11T16:15:56.753085
| 2017-01-25T19:34:45
| 2017-01-25T19:34:45
| 80,049,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
import turtle
def draw_square(some_turtle):
for i in range (1,5):
some_turtle.forward (100)
some_turtle.right(90)
def draw_bigsquare(some_turtle):
for i in range (1,5):
some_turtle.forward (200)
some_turtle.right(90)
#def draw_circle(some_turtle):
# some_turtle.cirlce(100)
def draw_art():
window = turtle.Screen()
window.bgcolor ("black")
brad = turtle.Turtle()
brad.shape("turtle")
brad.color("yellow")
brad.speed(1000)
for i in range (1,37):
draw_square(brad)
brad.right(10)
for i in range (38,74):
draw_bigsquare(brad)
brad.right(10)
# angie = turtle.Turtle()
# angie.shape("arrow")
# angie.color("blue")
# draw_circle(angie)
window.exitonclick()
draw_art()
|
[
"noreply@github.com"
] |
noreply@github.com
|
8dba5286a903756c1d25fcc25d34a5e543f90741
|
ac42160440b161365c6f863bd1c89ce8a09570cb
|
/array.py
|
2eeeea45eeabb94107eafd1e45dbd55b677be5d0
|
[] |
no_license
|
vikask1640/Demogit
|
7f12b141af535f95373379a5469573d9c69ad461
|
ad35dcab54b56afbc75578711dac78204ad008b0
|
refs/heads/master
| 2020-04-13T23:32:33.126507
| 2019-01-03T11:00:56
| 2019-01-03T11:00:56
| 163,509,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
import array as a
def ace():
vals = a.array("i", [10, 8, 14, 55, 4])
print(vals)
x=list(vals)
x.sort()
print(x)
ace()
# factorilas numbers
y=5
fact=1
for j in range(1,y+1): # 1 to 5
fact=fact*j
print(fact)
|
[
"noreply@github.com"
] |
noreply@github.com
|
cee0f8705da747a739d9a3dcea926258152f7f22
|
e8ac7df7a1e067ce002ed12e295e6e1a0908cc8c
|
/Python/ThinkFlask/home/views/index.py
|
9606b3fed92dca1ef2bbb1509bf9f8f1c929cbeb
|
[] |
no_license
|
919858271/MyCode
|
fc9a9e3479843f933582f1030d504d6f5eb94dcb
|
5940b045207376c4c679260a660ca402b5b4751c
|
refs/heads/master
| 2021-06-21T13:59:00.754383
| 2019-07-21T13:28:48
| 2019-07-21T13:28:48
| 53,465,398
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 923
|
py
|
#-------------------------------------------------------------------------------
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: jianwen
# Email: npujianwenxu@163.com
#-------------------------------------------------------------------------------
from flask import render_template
from app.models import Model
from home.model.models import User
from home import home_router
@home_router.route('/')
def index():
return 'Think Flask. This is Home'
@home_router.route('/add/<username>/<password>/')
def add_user(username, password):
user = User(username=username, password=password)
User.insert(user)
return 'success'
@home_router.route('/delete/<int:key>/')
def delete_user(key):
user = User.query.get(key)
Model.delete(user)
return 'success'
@home_router.route('/test/<username>/')
def test(username):
return render_template('home/index.html', username=username)
|
[
"npujianwenxu@163.com"
] |
npujianwenxu@163.com
|
7048a4f70ae9aad9393bb2928bb43fcb1d64edb8
|
8c0bf198a6e0be4128a8615a6944f0a167fc9c79
|
/options.py
|
0cc3ba14fb313fce757adb11cbbf48670d4e23ef
|
[] |
no_license
|
amietn/anna
|
73c43d3a2a9d7f465784ec7adc54f9494b07a178
|
e6e24d2c8252085e8ed69df8da976360b0f43baf
|
refs/heads/master
| 2021-05-03T06:12:09.783335
| 2018-02-07T09:09:18
| 2018-02-07T09:09:18
| 120,590,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
#!/usr/bin/env python3
import os
import json
irc_credentials_path = os.path.expanduser("~/.config/anna/irc_credentials.json")
def get_irc_credentials():
return get_irc_credentials_path(irc_credentials_path)
def get_irc_credentials_path(path):
with open(path, 'r') as f:
j = json.load(f)
return j
if __name__ == '__main__':
creds = get_irc_credentials_path("irc_credentials.json.template")
print(creds)
|
[
"amietn@foobar"
] |
amietn@foobar
|
23563b0b159ad0e6f9fb0609a2fe50cac098b742
|
83acd2e879b8d1dfbd7d735193539b8537e86d08
|
/pyropod/ropod/utils/uuid.py
|
6db2ac71146f4b02679290c59fb0510fc15c2fc8
|
[] |
no_license
|
HBRS-SDP/ropod_common
|
89b296e6bb56dc225319850036d3a63efd46ace9
|
5ce24b8ae79239f4fd5d2249fd33d1b1061eaceb
|
refs/heads/master
| 2020-05-09T23:39:11.209722
| 2019-03-12T12:59:48
| 2019-03-12T12:59:48
| 181,508,576
| 0
| 0
| null | 2019-04-15T14:52:18
| 2019-04-15T14:52:18
| null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
import uuid
def generate_uuid():
"""
Returns a string containing a random uuid
"""
return str(uuid.uuid4())
|
[
"argentina.ortega@h-brs.de"
] |
argentina.ortega@h-brs.de
|
c964b70be501d47fcdf10b78f7e203cf0a8ec8f0
|
ec24cfb57415f5cb15aa42e75af436e087d17b8b
|
/1002.py
|
2a84f5b39cbe862b9fba145053997424d39ca2d5
|
[] |
no_license
|
arturbs/pythonUriAnswers
|
41801eff3b12e9e18f390ebf264003e62bc0cf55
|
ed99ff24b66b4ae91ac8b8f3772198f07ab9be1b
|
refs/heads/master
| 2020-03-24T22:35:22.705373
| 2019-08-23T04:37:31
| 2019-08-23T04:37:31
| 143,092,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
#Programa que calcula e imprime a area de um circulo a partir de um raio.
RAIO = float(input())
A= 3.14159 * RAIO**2
print("A=%0.4f" %A)
|
[
"arturbritosouza@hotmail.com.br"
] |
arturbritosouza@hotmail.com.br
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.