blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73e8ef99e53d2aaeeb1ea03125982542a67cbd7b
|
0c027ff203fc3d109f927639642c28f5ec37632b
|
/robot/video.py
|
bcace3cd4968c0ab69d7bcf4ed13020a075bde16
|
[] |
no_license
|
mcbianconi/reddit_scrapper
|
5200705473116205770da9d1a6723e225891a4c2
|
447d38eee0190214be108483619d28493cfacfb5
|
refs/heads/master
| 2022-02-19T06:02:45.739192
| 2019-09-21T14:57:45
| 2019-09-21T14:57:45
| 207,712,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,047
|
py
|
import os
import shutil
import subprocess
from robot.config import (IMG_HEIGHT, IMG_WIDTH, OUTPUT_DIR,
RESIZED_VIDEO_SUFFIX)
VIDEO_FILE_NAME = "video_input_list"
AUDIO_FILE_NAME = "audio_input_list"
def make_files(comment_list):
submission_folder = os.path.join(
OUTPUT_DIR, comment_list[0].submission.fullname)
video_input_list = os.path.join(submission_folder, VIDEO_FILE_NAME)
audio_input_list = os.path.join(submission_folder, AUDIO_FILE_NAME)
comment_list_file_path = os.path.join(submission_folder, "comment_list")
video_file = open(video_input_list, "a+")
audio_file = open(audio_input_list, "a+")
comment_list_file = open(comment_list_file_path, "a+")
video_file.write(
f" file '{submission_folder}/{comment_list[0].submission.fullname}.png'\n")
audio_file.write(
f" file '{submission_folder}/{comment_list[0].submission.fullname}.mp3'\n")
for c in comment_list:
video_file.write(f" file '{submission_folder}/{c.fullname}.png'\n")
audio_file.write(f" file '{submission_folder}/{c.fullname}.mp3'\n")
comment_list_file.write(f"{c.fullname}\n")
video_file.close()
audio_file.close()
def create_submission_video(submission):
submission_folder = os.path.join(OUTPUT_DIR, submission.fullname)
video_file = os.path.join(submission_folder, VIDEO_FILE_NAME)
audio_file = os.path.join(submission_folder, AUDIO_FILE_NAME)
video_title = submission.title.replace(" ", "_")
output_video = os.path.join(submission_folder, video_title + ".mp4")
cmnd = [
"ffmpeg -r 24 -f concat -safe 0 -i", video_file,
"-f concat -safe 0 -i", audio_file,
"-c:a aac -pix_fmt yuv420p -crf 23 -r 24 -shortest -y", output_video]
p = subprocess.call(cmnd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def scale_video(input_path: str):
output = input_path + RESIZED_VIDEO_SUFFIX
cmnd = [
"ffmpeg", "-i", input_path, "-vf",
f"scale={IMG_WIDTH}:{IMG_HEIGHT}",
"-max_muxing_queue_size", "9999",
"-y",
output]
subprocess.call(cmnd)
return output
def concat_videos(video, edition_path):
shutil.copy(video, edition_path)
edit_list = (
os.path.join(edition_path, "intro.mp4"),
os.path.join(edition_path, video),
os.path.join(edition_path, "outro.mp4"),
)
list_file = os.path.join(edition_path, "tmp_edit_file.txt")
with open(list_file, "a+") as file:
for partial_video in filter(
lambda video: os.path.exists(video),
edit_list):
resized_video = scale_video(
os.path.join(edition_path, partial_video))
file.write(f"file '{resized_video}'\n")
cmnd = [
"ffmpeg",
"-safe", "0", "-f", "concat", "-i", list_file,
"-c", "copy", video + "_FINAL.mp4",
]
process = subprocess.call(cmnd)
os.remove(list_file)
os.remove(os.path.join(edition_path, video))
return process
|
[
"murillo.bianconi@gmail.com"
] |
murillo.bianconi@gmail.com
|
ec4f8c6ba276514b62e7887ae58142eafdba83b9
|
ade85b6086c4775b673b94fb39dd2cab6965e48d
|
/loudblog_just_for_git_purposes/imi_to_cgnet_recordings/IMI_to_CGNet.py
|
eadd7edd1dfbdc8245981777c45c07e8f436b15f
|
[
"MIT"
] |
permissive
|
rulebreakerdude/MobileSatyagraha_Backend_aws
|
dde4c0d6294269275609c2631281859d93440a96
|
af8658e5ab4c1593f56d38196c1b872655d70977
|
refs/heads/master
| 2022-12-21T22:46:56.628832
| 2020-04-19T03:17:07
| 2020-04-19T03:17:07
| 155,894,774
| 2
| 3
|
MIT
| 2022-12-08T04:51:04
| 2018-11-02T16:43:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,861
|
py
|
import ftplib
import subprocess
from db_repo import *
import smtplib
#from string import Template
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
from mutagen.mp3 import MP3
mydb=database_flaskr()
MY_ADDRESS = 'cgnetmail2019@gmail.com'
PASSWORD = 'QWERTYCGTECH123'
s = smtplib.SMTP_SSL(host='smtp.gmail.com', port=465)
s.ehlo()
s.set_debuglevel(1)
s.login(MY_ADDRESS, PASSWORD)
email='cgnetswaratest@gmail.com'
session = ftplib.FTP('59.162.167.59','Cgnet','mdy8YtLIzxf2')
session.cwd("/Recordings")
unsyncedFiles = mydb.getCGSwaraUnsyncedNumberData()
for row in unsyncedFiles:
print row
ref_id=row[0]
phoneNumber=row[1]
filetocopy = ref_id+'.wav'
localfile = open('temp.wav', 'wb')
try:
session.retrbinary('RETR ' + filetocopy, localfile.write)
localfile.close()
subprocess.call(['lame', 'temp.wav', '%s.mp3' %(ref_id)])
localmp3=open('%s.mp3' %(ref_id), 'rb')
time='{:%Y%m%d%H%M%S}'.format(datetime.datetime.now())
length=str(MP3(ref_id+'.mp3').info.length)
subject = "Swara-Main|app|" + length + "|DRAFT|" + phoneNumber + "|unk|" + time + "|PUBLIC";
message = "Recording sent from IMI cloud IVR"
msg = MIMEMultipart()
msg['From'] = MY_ADDRESS
msg['To'] = email
msg['Subject'] = subject
msg.attach(MIMEText(message, 'plain'))
part = MIMEBase('application', 'octet-stream')
part.set_payload((localmp3).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s.mp3" %(ref_id))
msg.attach(part)
s.sendmail(msg['From'],msg['To'],msg.as_string())
subprocess.call(['rm','%s.mp3' %(ref_id)])
mydb.setCGSwaraSyncedNumberData(ref_id)
except ftplib.error_perm:
print filetocopy+' not present'
session.quit()
|
[
"rulebreakerdude@gmail.com"
] |
rulebreakerdude@gmail.com
|
5599bacd9c46473647fe8b163053d8f159092557
|
7515ed0821b925d08b31c4acb7efc2e0c0f8c8d6
|
/copla_search_reply_bot.py
|
d1d2e453e9448e447a968ff155733c7c1e99d614
|
[] |
no_license
|
himanshu-irl/covidplasma_bot
|
271895e107d83cbd1fecfd5eef083b96f7de80d7
|
a32f604fc0d0dbf2ae209ff93b174dfeed7a29b2
|
refs/heads/main
| 2023-09-01T23:17:43.774502
| 2021-11-19T21:22:50
| 2021-11-19T21:22:50
| 360,315,645
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,375
|
py
|
# -*- coding: utf-8 -*-
"""
@author:
Verma, Himanshu
"""
#If account is locked
#https://twitter.com/account/access
import tweepy
import time
import random
import datetime as dtm
import logging
#ud-modules
from covidplasma_bot.input import keys, paths, tweet_parameter as param
from covidplasma_bot.replier import tweet_replier as tr
from covidplasma_bot.helper import file_handler as fh, telegram_poster as tp
#Twitter API keys
CONSUMER_KEY = keys.CONSUMER_KEY
CONSUMER_SECRET = keys.CONSUMER_SECRET
ACCESS_KEY = keys.ACCESS_KEY
ACCESS_SECRET = keys.ACCESS_SECRET
#Telegram: TwitterNotify bot API keys
tgram_token = keys.tgram_token
tgram_success_chatid = keys.success_chatid #TwitterNotify Bot chat
tgram_error_chatid = keys.error_chatid # Twitter Bot Notifications channel
date_since = (dtm.datetime.now()-dtm.timedelta(days=1)).strftime('%Y-%m-%d')
#Setting up paths
LOG_FILE_NAME = paths.replier_log_file
#FILE_NAME = 'last_seen_id.txt'
MENTION_FILE_NAME = paths.mentions_file
REPLIER_FILE_NAME = paths.replier_file
#----------------------------------------#
while True:
#deleting log file
print('deleting log file...')
fh.del_log(LOG_FILE_NAME)
logging.basicConfig(handlers=[logging.FileHandler(filename=LOG_FILE_NAME
,encoding='utf-8'
,mode='a+')]
,level=logging.DEBUG
,format='%(asctime)s %(message)s')
logger = logging.getLogger(name='copla-search-reply-bot')
try:
tr.reply_to_tweets(CONSUMER_KEY
,CONSUMER_SECRET
,ACCESS_KEY
,ACCESS_SECRET
,tgram_token
,tgram_success_chatid
,logger
,REPLIER_FILE_NAME
,MENTION_FILE_NAME
,search_for = param.search_for
,date_since = date_since
,rand_sleep=6)
except tweepy.TweepError as e:
print(e)
tp.send_message(tgram_token,tgram_error_chatid,str('COVID PLASMA BOT REPLIER ERROR: ' + str(e.args[0][0]['code']) + ' - ' + str(e.args[0][0]['message'])))
logger.info(e)
time.sleep(random.randint(300,600))
|
[
"himanshuverma1516@gmail.com"
] |
himanshuverma1516@gmail.com
|
5bd77a1c3d522e951f3792bdc26ad0123792bc50
|
b3a3ff13bae2f2597cf1e4f1ca74845ff34cbd08
|
/apps/goods/migrations/0013_images_classify_two.py
|
c649add75a59df8c6eca2e9e6ea86ca7ba2a9d79
|
[] |
no_license
|
mkcc581130/MkOnline
|
b6593c973f958105dfdee92b3063231e9d7e6b97
|
b39cc48a05abe7340cbd8b59f77f21dc1b12bb23
|
refs/heads/master
| 2020-04-06T21:04:07.391589
| 2018-11-16T09:11:16
| 2018-11-16T09:11:16
| 157,791,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 605
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-08-31 15:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('goods', '0012_auto_20170831_1513'),
]
operations = [
migrations.AddField(
model_name='images',
name='classify_two',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='goods.ClassifyTwo', verbose_name='\u4e8c\u7ea7\u5206\u7c7b'),
),
]
|
[
"575151723@qq.com"
] |
575151723@qq.com
|
bbafe5cb079e010686f81c3f97e3da672521a8e1
|
eaedf6de025b1d04fddaae2c80556ec9791d15a6
|
/website/contactkeep/models.py
|
435c315da64642a75b39bd47d4f9322ac882e742
|
[] |
no_license
|
sherlock6147/contact-keeper
|
54ed02565c806b8e5d8a49eb9e9a9b2f77ec8256
|
38c0516aa62cbcc5ff89f307944ae98640bd96c4
|
refs/heads/master
| 2023-04-26T18:43:59.764527
| 2021-05-23T16:31:41
| 2021-05-23T16:31:41
| 365,991,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,702
|
py
|
from django.db import models
from django.db.models.query_utils import select_related_descend
from django.utils import timezone
# Create your models here.
class Event(models.Model):
name = models.CharField("Event Name",max_length=150)
start_date = models.DateField("start date")
end_date = models.DateField("end date")
current = models.BooleanField("Current Event",default=False)
def __str__(self):
return self.name
class Website(models.Model):
event = models.ForeignKey(Event, on_delete=models.CASCADE)
url = models.CharField("Link for website",max_length=250)
name = models.CharField("Name for website",max_length=250)
web_cache = models.CharField("Cache of website content",max_length=100000,default='')
last_visit = models.DateTimeField("last visited on",auto_now=True)
created_on = models.DateTimeField("Created on",auto_created=True,default=timezone.now)
def __str__(self):
return self.name
class Contact(models.Model):
name = models.CharField("Name",max_length=150)
last_save = models.DateTimeField("Last saved on",auto_now=True)
event = models.ForeignKey(Event,on_delete=models.CASCADE)
website = models.ForeignKey(Website,on_delete=models.CASCADE)
def __str__(self):
return self.name
class PhoneNumber(models.Model):
phoneNumber = models.CharField("Phone No.",max_length=20)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE)
def __str__(self):
return self.phoneNumber
class Email(models.Model):
email = models.CharField("Email",max_length=100)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE)
def __str__(self):
return self.email
|
[
"tooshort9541@gmail.com"
] |
tooshort9541@gmail.com
|
c8ca91810328f7cdb73e19a3734869b5ba021ea3
|
e5521f8544c63da113859eb9356828a0e7edd652
|
/blog/migrations/0016_auto_20171222_1657.py
|
5c89db310681999cb3c8a55df918c7eab6886577
|
[] |
no_license
|
E0han/photography-web
|
c906967faa91efeae640e3759d46f41610410547
|
ba8fcaa0bc45918ea06ede0e38703d3453cba6dc
|
refs/heads/master
| 2021-09-02T09:40:29.267390
| 2018-01-01T14:43:00
| 2018-01-01T14:43:00
| 112,300,685
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 838
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-22 16:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0015_auto_20171222_0916'),
]
operations = [
migrations.AddField(
model_name='introductions',
name='author',
field=models.CharField(default=django.utils.timezone.now, max_length=256, verbose_name='作者'),
preserve_default=False,
),
migrations.AddField(
model_name='introductions',
name='location',
field=models.CharField(default=django.utils.timezone.now, max_length=258, verbose_name='举办地点'),
preserve_default=False,
),
]
|
[
"ethan@YAMdeMacBook-Air.local"
] |
ethan@YAMdeMacBook-Air.local
|
fb62ce0017de691177ebd7cac0b9e7d400b3b23d
|
88c453eebfd506926560787d2e964132cc6150a6
|
/accounts/admin.py
|
33bb6ab7d054bef7ca02fbe4ed6ed014b8f859bb
|
[] |
no_license
|
ashish2/PySite
|
ecbde977a5195adefe7fe9065a909b87a95c46e1
|
a1513090568de3a0eade4d0f7df3d67d1992cbe2
|
refs/heads/master
| 2018-09-27T17:56:57.045095
| 2018-08-19T15:26:54
| 2018-08-19T15:26:54
| 12,662,322
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from models import UserProfile
class ProfileInline(admin.StackedInline):
model = UserProfile
fk_name = 'user'
max_num = 1
class CustomUserAdmin(UserAdmin):
inlines = [ProfileInline,]
class UserProfileAdmin(admin.ModelAdmin):
model = UserProfile
admin.site.unregister(User)
admin.site.register(User, CustomUserAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
|
[
"vickyojha2@yahoo.com"
] |
vickyojha2@yahoo.com
|
9622f9ca09bff14c0d6cf590fd551ff732c036e1
|
6bf9e20aa0521fa9a153ddf754fe558e701b077e
|
/Flask/main.py
|
a889af48f0b4f2e818c3633c76d0f68e09cc1d4a
|
[] |
no_license
|
Santhosh-A-K/Flask
|
0b04baaf08c4d393727a695b8aa383e23ecf0c2f
|
33eb7b45f69d3c50969e7617ebfa29bc0cca50b3
|
refs/heads/master
| 2022-08-26T05:40:11.289824
| 2020-03-12T03:34:22
| 2020-03-12T03:34:22
| 244,713,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 3 22:26:46 2020
@author: santhosh
"""
import logging
logging.basicConfig(filename='app.log', filemode='a',level=logging.DEBUG)
from flask import Flask,redirect,url_for,request,render_template
from mongoConnection import mongoConnect
app=Flask(__name__)
@app.route('/')
def index():
return render_template('index.html',check=False)
@app.route('/connect/mongo/',methods=['POST','GET'])
def connectToMongo():
logging.info('For this you must change the level and add a handler.')
if request.method=='POST':
host=request.form['host']
port=request.form['port']
dbName=request.form['dbName']
elif request.method=='GET':
host=request.args.get('host')
port=request.args.get('port')
dbName=request.args.get('dbName')
con=mongoConnect(host,int(port),dbName)
print(con)
if con==None:
return render_template('index.html',check=True)
else:
return render_template('columns.html')
if __name__=='__main__':
app.run(port=5000,debug=True,use_reloader=False)
|
[
"noreply@github.com"
] |
Santhosh-A-K.noreply@github.com
|
1b548a161e569b32fa70ab178b597b67048e8363
|
a6edb3c29d06abf46f657963fcb8716eb370aabe
|
/wiki/urls.py
|
709aff20082fdbe0934395e004bdbfd35faff7f0
|
[
"MIT"
] |
permissive
|
ellojess/makewiki
|
5ecd2b05e11906f2e7ce8ee4160620f8c925c95d
|
353352ef71f395c0246b3757006bbafcc9bffa6d
|
refs/heads/master
| 2023-05-01T19:25:49.618312
| 2020-02-21T19:57:57
| 2020-02-21T19:57:57
| 238,797,725
| 0
| 0
|
MIT
| 2023-04-21T20:46:34
| 2020-02-06T22:27:23
|
Python
|
UTF-8
|
Python
| false
| false
| 319
|
py
|
from django.urls import path
from wiki.views import PageListView, PageDetailView, NewPageView
urlpatterns = [
path('', PageListView.as_view(), name='wiki-list-page'),
path('new-page/', NewPageView.as_view(), name='wiki-new-page'),
path('<str:slug>/', PageDetailView.as_view(), name='wiki-details-page'),
]
|
[
"jtjessicatrinh@gmail.com"
] |
jtjessicatrinh@gmail.com
|
bc03d3213f757b64b9f4f8f296e4c26d3787b6b0
|
cb39dedfab9ce07fa0dd7fc0efed625f71a23cda
|
/passbucket/users/migrations/0001_initial.py
|
1424e01acc7c376b72b97d47d0578beca0e9f5e9
|
[] |
no_license
|
neotrons/passbucket
|
25fb5fcccab6d6a2fe181cf0a98fe7dabad37d68
|
8845cfd2fdad04df0a68c62f9f5a1b0eeb60d585
|
refs/heads/master
| 2020-03-30T20:14:09.332629
| 2019-05-09T22:17:58
| 2019-05-09T22:17:58
| 151,579,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,868
|
py
|
# Generated by Django 2.0.4 on 2018-10-04 23:39
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
[
"jcramireztello@gmail.com"
] |
jcramireztello@gmail.com
|
c327fdbb3cdba0853e6eed11b3727dd273c92dd2
|
aadfb150c0b662c9cb7ec763bddfdb3e3a7333b2
|
/Mono_Encrypt.py
|
ecae4b47cd1fb51a4fae7a108d82cc0855566bb0
|
[] |
no_license
|
xue-yuan/Classic_Crypto
|
7edbf40831f08e67b9303a7bf89e08ea3ca6edcc
|
88c475ca7cd4f055842b890a081c11de00c23539
|
refs/heads/master
| 2022-03-30T07:49:42.711728
| 2019-12-21T04:27:24
| 2019-12-21T04:27:24
| 183,240,416
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
def monoCipherEncrypt(plain, key):
keymap = {}
cipher = []
for i, e in enumerate(key):
keymap[chr(i + 97)] = e
for i in plain:
if i < 'a' or i > 'z':
cipher.append(i)
continue
cipher.append(keymap.get(i))
return cipher
while 1:
text = input()
text = list(text.lower().split())
plain = "".join(" " + i for i in text[:-1]).replace(' ', '', 1)
key = text[-1]
print("".join(monoCipherEncrypt(plain, key)))
#keepgoingnevergiveup qwertyuiopasdfghjklzxcvbnm
#atthugofuftctkuoctxh qwertyuiopasdfghjklzxcvbnm
|
[
"g00se.9527@gmail.com"
] |
g00se.9527@gmail.com
|
fe14c1c48dec9e54e0e8fb37472870b424569383
|
87ba51b22a7f42c24e3c5364bccf460390f79655
|
/rain.py
|
af3fb913db82fc77c3a5f42e8ea1f0bc3e066faa
|
[] |
no_license
|
cjturne6/weatherstation
|
4406f779dd0395be2ca8cd8445d8df1b9be6fc70
|
007d6473a9341e290b51344f5814804402f9bebf
|
refs/heads/main
| 2023-04-10T10:30:54.355120
| 2021-04-25T14:24:03
| 2021-04-25T14:24:03
| 360,980,685
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
#!/usr/bin/env python3
import RPi.GPIO as GPIO
BUTTON_GPIO = 26
if __name__ == '__main__':
GPIO.setmode(GPIO.BCM)
GPIO.setup(BUTTON_GPIO, GPIO.IN, pull_up_down=GPIO.PUD_UP)
while True:
GPIO.wait_for_edge(BUTTON_GPIO, GPIO.FALLING)
print("Button pressed!")
|
[
"turner.collin@gmail.com"
] |
turner.collin@gmail.com
|
095895835303bf63e55c209087016bcd47a53900
|
c6de42be3b8d3952ac4f970a410eb5ee7afbd580
|
/firstProgram_oct.py
|
e64f5038b01ccc5a540402d3f4949fed17ed6d9c
|
[] |
no_license
|
kaiyaprovost/algobio_scripts_python
|
f0ac802b3a92ad514f69745984089ae69634c1e3
|
d273974127d6651621a3bb854036436d6e42444d
|
refs/heads/master
| 2021-01-21T13:53:15.826050
| 2016-05-06T21:54:28
| 2016-05-06T21:54:28
| 51,173,508
| 0
| 0
| null | 2016-05-06T21:54:28
| 2016-02-05T20:57:01
|
Python
|
UTF-8
|
Python
| false
| false
| 136
|
py
|
import turtle
teddy = turtle.Turtle()
for i in range(8):
teddy.forward(100)
teddy.right((360/8))
turtle.exitonclick()
|
[
"klp2143@columbia.edu"
] |
klp2143@columbia.edu
|
49bb51e9b948d124b3a1f776df7bbddf90d813b7
|
dade24dd3f1144878a87e7b27eef3b90b9ebdfb4
|
/api-clients/python/test/test_inline_response_200_5.py
|
f0521f14c0bb2aaccb6e75fb36ab3117f75ac0b6
|
[] |
no_license
|
lucaslmmanoel/otreeba-api-clients
|
f076144ff013071628cca179592b9713fba1b094
|
355e0635b4ffc651df7eb7cf2b23ea62d39280c1
|
refs/heads/master
| 2021-07-12T18:32:25.169618
| 2017-10-13T20:30:31
| 2017-10-13T20:30:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
# coding: utf-8
"""
Otreeba Open Cannabis API
This is an open, canonical database of cannabis seed companies, strains, brands, products, retailers, and studies from [Otreeba](https://otreeba.com). It is written on the OpenAPI Specification AKA Swagger. You can find out more about the Open API Initiative at [https://www.openapis.org/](https://www.openapis.org), or more info on Swagger at [http://swagger.io/](http://swagger.io/).
OpenAPI spec version: 1.0.0
Contact: api@otreeba.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.inline_response_200_5 import InlineResponse2005
class TestInlineResponse2005(unittest.TestCase):
""" InlineResponse2005 unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse2005(self):
"""
Test InlineResponse2005
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.inline_response_200_5.InlineResponse2005()
pass
if __name__ == '__main__':
unittest.main()
|
[
"david@randomdrake.com"
] |
david@randomdrake.com
|
8d06548df5f6398354e80696bdcd4de55ab84d3a
|
f44e4485385296f4d1de2032c64c76de37ec5007
|
/pyatv/mrp/protobuf/DeviceInfoMessage_pb2.py
|
f18237f3e98c6570af7c0facc2de477cda9de067
|
[
"MIT"
] |
permissive
|
kdschlosser/pyatv
|
370d0a35e39623b8e8e6a087c675ec47aa50fb16
|
fa32dab9ad3c4adffdc944ed78427f6c724074f5
|
refs/heads/master
| 2022-06-20T06:58:13.608441
| 2020-05-11T04:57:55
| 2020-05-11T06:22:23
| 264,143,600
| 1
| 0
|
MIT
| 2020-05-15T08:48:06
| 2020-05-15T08:48:05
| null |
UTF-8
|
Python
| false
| true
| 17,207
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pyatv/mrp/protobuf/DeviceInfoMessage.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pyatv.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2
from pyatv.mrp.protobuf import Common_pb2 as pyatv_dot_mrp_dot_protobuf_dot_Common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pyatv/mrp/protobuf/DeviceInfoMessage.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=b'\n*pyatv/mrp/protobuf/DeviceInfoMessage.proto\x1a(pyatv/mrp/protobuf/ProtocolMessage.proto\x1a\x1fpyatv/mrp/protobuf/Common.proto\"\x98\x07\n\x11\x44\x65viceInfoMessage\x12\x18\n\x10uniqueIdentifier\x18\x01 \x02(\t\x12\x0c\n\x04name\x18\x02 \x02(\t\x12\x1a\n\x12localizedModelName\x18\x03 \x01(\t\x12\x1a\n\x12systemBuildVersion\x18\x04 \x02(\t\x12#\n\x1b\x61pplicationBundleIdentifier\x18\x05 \x02(\t\x12 \n\x18\x61pplicationBundleVersion\x18\x06 \x01(\t\x12\x17\n\x0fprotocolVersion\x18\x07 \x02(\x05\x12 \n\x18lastSupportedMessageType\x18\x08 \x01(\r\x12\x1d\n\x15supportsSystemPairing\x18\t \x01(\x08\x12\x15\n\rallowsPairing\x18\n \x01(\x08\x12\x11\n\tconnected\x18\x0b \x01(\x08\x12\x1e\n\x16systemMediaApplication\x18\x0c \x01(\t\x12\x13\n\x0bsupportsACL\x18\r \x01(\x08\x12\x1b\n\x13supportsSharedQueue\x18\x0e \x01(\x08\x12\x1e\n\x16supportsExtendedMotion\x18\x0f \x01(\x08\x12\x18\n\x10\x62luetoothAddress\x18\x10 \x01(\x0c\x12\x1a\n\x12sharedQueueVersion\x18\x11 \x01(\r\x12\x11\n\tdeviceUID\x18\x13 \x01(\t\x12\x1d\n\x15managedConfigDeviceID\x18\x14 \x01(\t\x12&\n\x0b\x64\x65viceClass\x18\x15 \x01(\x0e\x32\x11.DeviceClass.Enum\x12\x1a\n\x12logicalDeviceCount\x18\x16 \x01(\r\x12\x1a\n\x12tightlySyncedGroup\x18\x17 \x01(\x08\x12\x1a\n\x12isProxyGroupPlayer\x18\x18 \x01(\x08\x12\x14\n\x0ctightSyncUID\x18\x19 \x01(\t\x12\x10\n\x08groupUID\x18\x1a \x01(\t\x12\x11\n\tgroupName\x18\x1b \x01(\t\x12*\n\x0egroupedDevices\x18\x1c \x03(\x0b\x32\x12.DeviceInfoMessage\x12\x15\n\risGroupLeader\x18\x1d \x01(\x08\x12\x17\n\x0fisAirplayActive\x18\x1e \x01(\x08\x12 \n\x18systemPodcastApplication\x18\x1f \x01(\t\x12\x1c\n\x14\x65nderDefaultGroupUID\x18 \x01(\t\x12\x18\n\x10\x61irplayReceivers\x18! \x03(\t\x12\x11\n\tlinkAgent\x18\" \x01(\t:?\n\x11\x64\x65viceInfoMessage\x12\x10.ProtocolMessage\x18\x14 \x01(\x0b\x32\x12.DeviceInfoMessage'
,
dependencies=[pyatv_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.DESCRIPTOR,pyatv_dot_mrp_dot_protobuf_dot_Common__pb2.DESCRIPTOR,])
DEVICEINFOMESSAGE_FIELD_NUMBER = 20
deviceInfoMessage = _descriptor.FieldDescriptor(
name='deviceInfoMessage', full_name='deviceInfoMessage', index=0,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR)
_DEVICEINFOMESSAGE = _descriptor.Descriptor(
name='DeviceInfoMessage',
full_name='DeviceInfoMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uniqueIdentifier', full_name='DeviceInfoMessage.uniqueIdentifier', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='DeviceInfoMessage.name', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='localizedModelName', full_name='DeviceInfoMessage.localizedModelName', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='systemBuildVersion', full_name='DeviceInfoMessage.systemBuildVersion', index=3,
number=4, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='applicationBundleIdentifier', full_name='DeviceInfoMessage.applicationBundleIdentifier', index=4,
number=5, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='applicationBundleVersion', full_name='DeviceInfoMessage.applicationBundleVersion', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='protocolVersion', full_name='DeviceInfoMessage.protocolVersion', index=6,
number=7, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lastSupportedMessageType', full_name='DeviceInfoMessage.lastSupportedMessageType', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='supportsSystemPairing', full_name='DeviceInfoMessage.supportsSystemPairing', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allowsPairing', full_name='DeviceInfoMessage.allowsPairing', index=9,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='connected', full_name='DeviceInfoMessage.connected', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='systemMediaApplication', full_name='DeviceInfoMessage.systemMediaApplication', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='supportsACL', full_name='DeviceInfoMessage.supportsACL', index=12,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='supportsSharedQueue', full_name='DeviceInfoMessage.supportsSharedQueue', index=13,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='supportsExtendedMotion', full_name='DeviceInfoMessage.supportsExtendedMotion', index=14,
number=15, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bluetoothAddress', full_name='DeviceInfoMessage.bluetoothAddress', index=15,
number=16, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sharedQueueVersion', full_name='DeviceInfoMessage.sharedQueueVersion', index=16,
number=17, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deviceUID', full_name='DeviceInfoMessage.deviceUID', index=17,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='managedConfigDeviceID', full_name='DeviceInfoMessage.managedConfigDeviceID', index=18,
number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deviceClass', full_name='DeviceInfoMessage.deviceClass', index=19,
number=21, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='logicalDeviceCount', full_name='DeviceInfoMessage.logicalDeviceCount', index=20,
number=22, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tightlySyncedGroup', full_name='DeviceInfoMessage.tightlySyncedGroup', index=21,
number=23, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isProxyGroupPlayer', full_name='DeviceInfoMessage.isProxyGroupPlayer', index=22,
number=24, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tightSyncUID', full_name='DeviceInfoMessage.tightSyncUID', index=23,
number=25, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='groupUID', full_name='DeviceInfoMessage.groupUID', index=24,
number=26, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='groupName', full_name='DeviceInfoMessage.groupName', index=25,
number=27, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='groupedDevices', full_name='DeviceInfoMessage.groupedDevices', index=26,
number=28, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isGroupLeader', full_name='DeviceInfoMessage.isGroupLeader', index=27,
number=29, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isAirplayActive', full_name='DeviceInfoMessage.isAirplayActive', index=28,
number=30, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='systemPodcastApplication', full_name='DeviceInfoMessage.systemPodcastApplication', index=29,
number=31, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enderDefaultGroupUID', full_name='DeviceInfoMessage.enderDefaultGroupUID', index=30,
number=32, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='airplayReceivers', full_name='DeviceInfoMessage.airplayReceivers', index=31,
number=33, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='linkAgent', full_name='DeviceInfoMessage.linkAgent', index=32,
number=34, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=122,
serialized_end=1042,
)
_DEVICEINFOMESSAGE.fields_by_name['deviceClass'].enum_type = pyatv_dot_mrp_dot_protobuf_dot_Common__pb2._DEVICECLASS_ENUM
_DEVICEINFOMESSAGE.fields_by_name['groupedDevices'].message_type = _DEVICEINFOMESSAGE
DESCRIPTOR.message_types_by_name['DeviceInfoMessage'] = _DEVICEINFOMESSAGE
DESCRIPTOR.extensions_by_name['deviceInfoMessage'] = deviceInfoMessage
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DeviceInfoMessage = _reflection.GeneratedProtocolMessageType('DeviceInfoMessage', (_message.Message,), {
'DESCRIPTOR' : _DEVICEINFOMESSAGE,
'__module__' : 'pyatv.mrp.protobuf.DeviceInfoMessage_pb2'
# @@protoc_insertion_point(class_scope:DeviceInfoMessage)
})
_sym_db.RegisterMessage(DeviceInfoMessage)
deviceInfoMessage.message_type = _DEVICEINFOMESSAGE
pyatv_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension(deviceInfoMessage)
# @@protoc_insertion_point(module_scope)
|
[
"pierre.staahl@gmail.com"
] |
pierre.staahl@gmail.com
|
ba0a3db81be17cc3f350086bd34cecc5073b54ac
|
935911dbf5c7ec43e3525ed000fc60d8efff0d9e
|
/find_anagram/anagram_checker.py
|
743a9041dcb73c7980fc1f063753b47521b81c8f
|
[
"MIT"
] |
permissive
|
mattsgit/anagram_finder
|
653fdfe2fd0400a16acb7744fca2826fd12915d3
|
da5352d0881563522c3710f3ca880f512dd02748
|
refs/heads/master
| 2021-08-19T19:39:42.657063
| 2017-11-27T08:28:56
| 2017-11-27T08:28:56
| 111,863,461
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
import sys
class AnagramCheck:
def __init__(self):
pass
def is_anagram(self, a, b):
# remove all non alphanum chars in string
new_a = "".join([x for x in a.lower() if x.isalnum()])
new_b = "".join([x for x in b.lower() if x.isalnum()])
if len(new_a) != len(new_b):
return False
a_dict = {}
b_dict = {}
for char in new_a:
a_dict[char] = a_dict.get(char, 0) + 1
for char in new_b:
b_dict[char] = b_dict.get(char, 0) + 1
return b_dict == a_dict
class AnagramTester:
def __init__(self):
pass
def test_is_anagram_with_file(self, filename):
results = {}
anagram_checker = AnagramCheck()
with open(filename) as f:
content = f.readlines()
for line in content:
line_list = line.strip().split('","')
results[anagram_checker.is_anagram(line_list[0][1:], line_list[1][:-1])] = results.get(
anagram_checker.is_anagram(line_list[0][1:], line_list[1][:-1]), 0) + 1
return results
def main():
if len(sys.argv) == 3:
checker = AnagramCheck()
print checker.is_anagram(sys.argv[1],sys.argv[2])
if __name__ == '__main__':
main()
|
[
"matthewradams22@gmail.com"
] |
matthewradams22@gmail.com
|
83b3e4af3c41afb8fa6ab9c832651bb33e8752e4
|
6f56e9496eca4ce758bfd9b3f9d8180929851e2b
|
/cmp_dirs.py
|
3098ae8ea3df252c4cc9cc817d781cb30e10bbf1
|
[] |
no_license
|
yoramzarai/py_tools
|
b231fc3ad3d41876359eafa0143aef6cc26880b7
|
0d5386f9034bdcea423137433c50af9e04ae73bf
|
refs/heads/master
| 2020-04-13T15:01:18.962746
| 2018-12-27T10:08:24
| 2018-12-27T10:08:24
| 163,278,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,139
|
py
|
#!/opt/local/bin/python3
'''Compares the content in two directories'''
import argparse
import os
from termcolor import colored
import filecmp as fcmp
# Command-line parsing information
def parse_in_args():
''' Defines input arguments '''
# place description
parser = argparse.ArgumentParser(description='Compares the content of two directories.', add_help=False)
# required arguments
group = parser.add_argument_group('Required arguments')
group.add_argument('-l', dest='dirl', help='First (left) directory', type=str, metavar='<dir 1>', required='True')
group.add_argument('-r', dest='dirr', help='Second (right) directory', type=str, metavar='<dir 2>', required='True')
# optional arguments
group_opt = parser.add_argument_group('Optional arguments')
group_opt.add_argument('-f', dest='chk_f', help='File list to compare. Default is all files.', type=str, nargs='+', \
metavar=('fn1', 'fn2'), default=list())
group_opt.add_argument('-e', dest='ign_f', help='File list to ignore. Default is none.', type=str, nargs='+', \
metavar=('fn1', 'fn2'), default=list())
group_opt.add_argument('-D', help='Enables debug prints.', action='store_true')
group_opt.add_argument('-h', '--help', help='Displays usage information and exits.', action='help')
return parser.parse_args(), parser.print_help
def print_base_cmp_dirs(var, name, colr):
print(colored('\n\n{} {}', colr).format(len(var), name))
for s in var: print(colored(s, colr), end=' ')
def print_cmp_dirs(args):
cmp = fcmp.dircmp(args.dirl, args.dirr, args.ign_f)
#print(cmp.report())
# same files
print_base_cmp_dirs(cmp.same_files, 'identical files:', 'green')
# different version
print_base_cmp_dirs(cmp.diff_files, 'with different versions:', 'red')
# could not compare
print_base_cmp_dirs(cmp.funny_files, 'could not compare:', 'white')
# files and directories only in args.dirl
print_base_cmp_dirs(cmp.left_only, 'are only in {}:'.format(args.dirl), 'magenta')
# files and directories only in args.dirr
print_base_cmp_dirs(cmp.right_only, 'are only in {}:'.format(args.dirr), 'cyan')
print()
# Main function
# =============
def main():
''' Main body '''
args, _ = parse_in_args() # parse, validate and return input arguments
if args.D: print('Comparing {} with {}...'.format(args.dirl, args.dirr))
args.ign_f += ['.DS_Store']
if args.chk_f:
match, mismatch, errors = fcmp.cmpfiles(args.dirl, args.dirr, args.chk_f)
if match: print_base_cmp_dirs(match, 'identical files:', 'green')
if mismatch: print_base_cmp_dirs(mismatch, 'with different versions:', 'yellow')
if errors: print_base_cmp_dirs(errors, 'missing files (in one or both directories):', 'red')
print()
else:
print_cmp_dirs(args)
# =======================================================================================================================
if __name__ == "__main__":
main()
else:
print(__name__, 'has been imported.')
|
[
"noreply@github.com"
] |
yoramzarai.noreply@github.com
|
551479db553cc38ade33fc01b941a3b7ba4c94bb
|
06a0e768da8fae660652a7de477e13f8be9a0708
|
/QuantumDiscreteFourierTransform/classic_dft_example.py
|
84030076af85b3b0efc62d3bf5992731bc7bcec9
|
[
"MIT"
] |
permissive
|
ettoremessina/quantum-experiments
|
d5dc1e94cc32a3de2f3f40a25208cfb2b9667301
|
0a06dd5bdde1c3831625147f96348a3d8dfb0533
|
refs/heads/master
| 2022-11-05T06:40:58.822685
| 2022-10-30T20:41:10
| 2022-10-30T20:41:10
| 218,624,888
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,800
|
py
|
import numpy as np
import matplotlib.pyplot as plt
#signal generation
duration = 2. #in seconds
sampling_rate = 100. #per seconds
sampling_period = 1./sampling_rate
discrete_times = np.arange(0, duration, sampling_period)
num_of_samples = len(discrete_times)
frequency = 5. #in Hz
amplitude = 1.4 #in Volts, for example
phase = 0. #in radiant
sampled_values = amplitude * np.sin(2 * np.pi * frequency * discrete_times + phase)
frequency = 10.
amplitude = 0.8
phase = np.pi / 2.
sampled_values += amplitude * np.sin(2 * np.pi * frequency * discrete_times + phase)
plt.plot(discrete_times, sampled_values, 'b')
plt.title('Signal')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.grid()
plt.show()
#Discrete Fourier Transform using SciPy
import scipy.fft as spf
transformed_signal = np.abs(spf.fft(sampled_values)[0:num_of_samples//2])
normalized_transformed = (2.0/num_of_samples) * transformed_signal
discrete_frequencies = spf.fftfreq(num_of_samples, sampling_period)[:num_of_samples//2]
plt.plot(discrete_frequencies, normalized_transformed, 'r')
plt.title('DFT by SciPy fft')
plt.xlabel('Frequency')
plt.ylabel('Magnitude')
plt.grid()
plt.show()
#Discrete Fourier Matrix Transform
def DFTByMatrix(signal):
N = len(signal)
n = np.arange(N)
k = n.reshape((N, 1))
fourier_matrix = np.exp(-2j * np.pi * k * n / N)
transf_signal = np.dot(fourier_matrix, signal)
return transf_signal
transformed_signal = np.abs(DFTByMatrix(sampled_values))[0:num_of_samples//2]
normalized_transformed = (2.0/num_of_samples) * transformed_signal
discrete_frequencies = (discrete_times / sampling_period)[:num_of_samples//2]
plt.plot(discrete_frequencies, normalized_transformed, 'g')
plt.title('DFT by Matrix')
plt.xlabel('Frequency')
plt.ylabel('Magnitude')
plt.grid()
plt.show()
|
[
"et.messina@gmail.com"
] |
et.messina@gmail.com
|
c05700dbe86d74616c8013fd8d430433417ac148
|
f7f66d1327238f34d0b3b85c1e221616a95aae8c
|
/memex_dossier/web/search_engines.py
|
d91e55479ba651492f015a4e32ee8074f1624841
|
[
"MIT"
] |
permissive
|
biyanisuraj/memex-dossier-open
|
820d5afc8a5cf93afc1364fb2a960ac5ab245217
|
43bab4e42d46ab2cf1890c3c2935658ae9b10a3a
|
refs/heads/master
| 2020-06-07T01:34:51.467907
| 2018-10-09T15:44:58
| 2018-10-09T15:44:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,270
|
py
|
'''memex_dossier.web.search_engines
.. This software is released under an MIT/X11 open source license.
Copyright 2012-2014 Diffeo, Inc.
'''
from __future__ import absolute_import, division, print_function
from itertools import ifilter, islice
import logging
import random as rand
from memex_dossier.fc import SparseVector, StringCounter
from memex_dossier.web.interface import SearchEngine
logger = logging.getLogger(__name__)
class random(SearchEngine):
'''Return random results with the same name.
This finds all content objects that have a matching name and
returns ``limit`` results at random.
If there is no ``NAME`` index defined, then this always returns
no results.
'''
def __init__(self, store):
super(random, self).__init__()
self.store = store
def recommendations(self):
if u'NAME' not in self.store.index_names():
return {'results': []}
fc = self.store.get(self.query_content_id)
if fc is None:
raise KeyError(self.query_content_id)
cids = []
for name in fc.get(u'NAME', {}):
cids.extend(self.store.index_scan_ids(u'NAME', name))
predicate = self.create_filter_predicate()
results = list(ifilter(predicate, self.store.get_many(cids)))
rand.shuffle(results)
return {'results': results[0:self.params['limit']]}
class plain_index_scan(SearchEngine):
'''Return a random sample of an index scan.
This scans all indexes defined for all values in the query
corresponding to those indexes.
'''
def __init__(self, store):
super(plain_index_scan, self).__init__()
self.store = store
def recommendations(self):
predicate = self.create_filter_predicate()
cids = self.streaming_ids(self.query_content_id)
results = ifilter(predicate,
((cid, self.store.get(cid)) for cid in cids))
sample = streaming_sample(
results, self.params['limit'], self.params['limit'] * 10)
return {'results': sample}
def get_query_fc(self, content_id):
query_fc = self.store.get(content_id)
if query_fc is None:
logger.info('Could not find FC for "%s"', content_id)
return query_fc
def streaming_ids(self, content_id):
def scan(idx_name, val):
for cid in self.store.index_scan(idx_name, val):
if cid not in cids and cid not in blacklist:
cids.add(cid)
yield cid
query_fc = self.get_query_fc(content_id)
if query_fc is None:
return
blacklist = set([content_id])
cids = set()
logger.info('starting index scan (query content id: %s)', content_id)
for idx_name in self.store.index_names():
feat = query_fc.get(idx_name, None)
if isinstance(feat, unicode):
logger.info('[Unicode index: %s] scanning for "%s"',
idx_name, feat)
for cid in scan(idx_name, feat):
yield cid
elif isinstance(feat, (SparseVector, StringCounter)):
for name in feat.iterkeys():
logger.info('[StringCounter index: %s] scanning for "%s"',
idx_name, name)
for cid in scan(idx_name, name):
yield cid
def streaming_sample(seq, k, limit=None):
'''Streaming sample.
Iterate over seq (once!) keeping k random elements with uniform
distribution.
As a special case, if ``k`` is ``None``, then ``list(seq)`` is
returned.
:param seq: iterable of things to sample from
:param k: size of desired sample
:param limit: stop reading ``seq`` after considering this many
:return: list of elements from seq, length k (or less if seq is
short)
'''
if k is None:
return list(seq)
seq = iter(seq)
if limit is not None:
k = min(limit, k)
limit -= k
result = list(islice(seq, k))
for count, x in enumerate(islice(seq, limit), len(result)):
if rand.random() < (1.0 / count):
result[rand.randint(0, k-1)] = x
return result
|
[
"jrf@diffeo.com"
] |
jrf@diffeo.com
|
a0bbf4695e7ad752b16be83f1631a7babdea7f3a
|
c9293ab68d0235a1830a3634a41a5b65b4eb5d6a
|
/Lessons/Section-03/lesson_0081/lesson_0002.py
|
05b0ca33ab0b45e7283839c5a790fda630138775
|
[] |
no_license
|
lipegomes/python-django-udemy-studies
|
4f836497ee10ece7ee5b40af1b636bb1c03deb75
|
938fa6a05f9505b8eaf6e7e6bc1c5e199b670432
|
refs/heads/master
| 2023-01-07T01:22:16.855346
| 2020-11-03T13:49:54
| 2020-11-03T13:49:54
| 283,852,942
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
from newcalc import multiply, double_the_list, PI
from sayhai import say_hi
print(multiply([10, 5]))
say_hi()
numbers1 = [5, 4, 8]
print(double_the_list(numbers1))
print(PI)
|
[
"61765381+lipegomes@users.noreply.github.com"
] |
61765381+lipegomes@users.noreply.github.com
|
b6a7c8bb7b0ea4447d1bfa3f5e4bfaf58671e05a
|
770586dc530756e179ae9db9ae8b30ffa9664e7c
|
/dataset/mnist.py
|
1928dda8769e7e0c038a2e28a7dba134ac64c244
|
[
"MIT"
] |
permissive
|
mseif2016/private_objective_perturbation
|
0f4c242aa473b57e309fdaf3b07ccceb06e3ca4e
|
ce0e881f9115c1d07df535261b4b7c6ee650325c
|
refs/heads/master
| 2022-11-14T02:42:35.958656
| 2020-06-30T15:02:08
| 2020-06-30T15:02:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
import os
import sys
import numpy as np
from sklearn import preprocessing
from tensorflow.examples.tutorials.mnist import input_data
from utils.utils_preprocessing import convert_to_binary, normalize_rows, format_output
FILENAME_X = 'mnist_processed_x.npy'
FILENAME_Y = 'mnist_processed_y.npy'
def preprocess(cache_location="dataset/data_cache", output_location="dataset/data"):
np.random.seed(10000019)
mnist = input_data.read_data_sets(
os.path.join(cache_location, "MNIST_data"), one_hot=True)
train_features = np.array(mnist.train.images)
train_labels = np.array(mnist.train.labels)
test_features = np.array(mnist.test.images)
test_labels = np.array(mnist.test.labels)
features_set = np.vstack((train_features, test_features))
labels_set = np.vstack((train_labels, test_labels))
label_width = len(labels_set[0])
combined_data = np.column_stack([features_set, labels_set])
np.random.shuffle(combined_data)
np.save(os.path.join(output_location, FILENAME_X), combined_data[:, :-label_width])
np.save(os.path.join(output_location, FILENAME_Y), combined_data[:, -label_width:])
if __name__=="__main__":
if len(sys.argv) == 3:
preprocess(sys.argv[1], sys.argv[2])
else:
preprocess()
|
[
"giusevtr@gmail.com"
] |
giusevtr@gmail.com
|
a562ea5925bb853287c30692e331db3ad17821e2
|
8c42964a29af1d5a2f4541ab634b54e25a90b9f4
|
/Example2/configuration.py
|
5a64a7d9aada01e4a7e1e383119cbc7d566d617f
|
[] |
no_license
|
lenzip/CMSDataAnalysisSchoolPisa2019ScalarToWW
|
a21dc572ae2e152410a867ae5013703c886e4bbf
|
8cff1dea08887b78a9efc26a142609ba1b7ba296
|
refs/heads/master
| 2020-04-14T21:13:03.028961
| 2019-01-23T16:22:23
| 2019-01-23T16:22:23
| 164,121,564
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 803
|
py
|
# example of configuration file
tag = 'Inclusive'
# used by mkShape to define output directory for root files
outputDir = 'rootFile'
# file with list of variables
variablesFile = 'variables.py'
# file with list of cuts
cutsFile = 'cuts.py'
# file with list of samples
samplesFile = 'samples.py'
# file with list of samples
plotFile = 'plot.py'
# luminosity to normalize to (in 1/fb)
lumi = 35.867
# used by mkPlot to define output directory for plots
# different from "outputDir" to do things more tidy
outputDirPlots = 'plotsInclusive'
# used by mkDatacards to define output directory for datacards
outputDirDatacard = 'datacardsInclusive'
# structure file for datacard
structureFile = 'structure.py'
# nuisances file for mkDatacards and for mkShape
nuisancesFile = 'nuisances.py'
|
[
"piergiulio.lenzi@cern.ch"
] |
piergiulio.lenzi@cern.ch
|
e3e0ff71c09f66324bba160b6a4edccc40d93fff
|
ddc5aa77203bf76cd789c173dffbc382ed8ef004
|
/test/app_test/master.py
|
f1fe1995de473cf239f7fc143c31029ce2d5bca1
|
[] |
no_license
|
phroiland/FinBiotic
|
0b8183ce9f97c3fc4b1f7e20decc3472bffe8800
|
a30ef2e979b230e5424fd25ef7dd1fb49bbd5245
|
refs/heads/master
| 2023-08-18T15:26:15.948262
| 2023-08-15T15:13:23
| 2023-08-15T15:13:23
| 93,895,989
| 2
| 2
| null | 2023-03-01T20:08:37
| 2017-06-09T20:52:02
|
Python
|
UTF-8
|
Python
| false
| false
| 4,105
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon May 29 13:30:38 2017
@author: jonfroiland
"""
import sys
import argparse
import oandapyV20
import oandapyV20.endpoints.positions as openPos
# Data, Price, and Strategy Imports
import settings
import common.config
import common.args
from stream.streamingData import StreamingData
from stream.view import mid_string, heartbeat_to_string, instrument_string
from account.balance import Balance
from strategy.breakout import Breakout
from strategy.spreads import Spreads
from strategy.strategy import Strategy
from pivots.pivotImports import PivotImports
# from view import bid_string, ask_string, price_to_string
from datetime import datetime
import pandas as pd
pd.set_option('display.large_repr', 'truncate')
pd.set_option('display.max_columns', 0)
def main():
print "------ System online -------", datetime.now()
parser = argparse.ArgumentParser()
common.config.add_argument(parser)
parser.add_argument('--instrument', "-i", type=common.args.instrument,
required=True, action="append",
help="Instrument to get prices for")
parser.add_argument('--snapshot', action="store_true", default=True,
help="Request an initial snapshot")
parser.add_argument('--no-snapshot', dest="snapshot", action="store_false",
help="Do not request an initial snapshot")
parser.add_argument('--show-heartbeats', "-s", action='store_true',
default=False, help="display heartbeats")
args = parser.parse_args()
# print sys.argv[2]
account_id = args.config.active_account
api = args.config.create_streaming_context()
account_api = args.config.create_context()
response = api.pricing.stream(account_id, snapshot=args.snapshot,
instruments=",".join(args.instrument))
dfD = PivotImports(sys.argv[2]).daily()
# dfW = p.weekly()
balance = Balance(account_api, account_id).balance()
df = pd.DataFrame([])
for msg_type, msg in response.parts():
if msg_type == "pricing.Heartbeat" and args.show_heartbeats:
print heartbeat_to_string(msg)
if msg_type == "pricing.Price":
sd = StreamingData(datetime.now(), instrument_string(msg),
mid_string(msg), account_api, account_id, 's',
'5min', balance)
df = df.append(sd.df())
sd.resample(df)
print "df:", df.shape[0], "minuteData:", sd.minuteData().shape[0]
# print sd.minuteData(),'\n'
if sd.minuteData().shape[0] < 20:
continue
else:
client = oandapyV20.API(settings.ACCESS_TOKEN)
r = openPos.OpenPositions(accountID=account_id)
client.request(r)
openTrades = []
for i in r.response['positions']:
trades = i['instrument']
openTrades.append(trades)
print 'Open Trades', openTrades
if instrument_string(msg) in openTrades:
continue
else:
try:
b = Breakout(sd.minuteData())
breakout = b.breakout()
# print 'Breakout Units:',breakout
s = Spreads(dfD, mid_string(msg))
pivot, rl1, rl2, rl3, sl1, sl2, sl3 = s.spreads()
rate1, rate2 = s.spreads_out()
strat = Strategy(account_api, account_id,
instrument_string(msg), dfD,
mid_string(msg), breakout, pivot, rl1,
rl2, rl3, sl1, sl2, sl3, rate1, rate2)
strat.res_check()
strat.sup_check()
except Exception as e:
print e
if __name__ == "__main__":
main()
|
[
"jon.froiland@gmail.com"
] |
jon.froiland@gmail.com
|
4449a9ba1f7077329a5da7221fd2c951aa9a4573
|
ebcea394905df8222c257c8c6c469627a6e48095
|
/PyQt5/object_detection/inputs_test.py
|
cc79131e3a02e54893093a7c803e84b4cb10687c
|
[] |
no_license
|
valiok98/Python-Qt5-Tensorflow
|
2773cfc2a0e569ed53cf3d90066885f17abe8c6a
|
e03ccc2884b687a36fbe47f5ff320837be3e217a
|
refs/heads/master
| 2021-09-17T20:41:01.908602
| 2018-03-31T12:42:25
| 2018-03-31T12:42:25
| 103,644,683
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,083
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.tflearn.inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import numpy as np
import tensorflow as tf
import sys
sys.path.append("..")
import inputs
from core import preprocessor
from core import standard_fields as fields
from utils import config_util
FLAGS = tf.flags.FLAGS
def _get_configs_for_model(model_name):
"""Returns configurations for model."""
# TODO: Make sure these tests work fine outside google3.
fname = os.path.join(
FLAGS.test_srcdir,
('google3/third_party/tensorflow_models/'
'object_detection/samples/configs/' + model_name + '.config'))
label_map_path = os.path.join(FLAGS.test_srcdir,
('google3/third_party/tensorflow_models/'
'object_detection/data/pet_label_map.pbtxt'))
data_path = os.path.join(FLAGS.test_srcdir,
('google3/third_party/tensorflow_models/'
'object_detection/test_data/pets_examples.record'))
configs = config_util.get_configs_from_pipeline_file(fname)
return config_util.merge_external_params_with_configs(
configs,
train_input_path=data_path,
eval_input_path=data_path,
label_map_path=label_map_path)
class InputsTest(tf.test.TestCase):
def test_faster_rcnn_resnet50_train_input(self):
"""Tests the training input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
configs['train_config'].unpad_groundtruth_tensors = True
model_config = configs['model']
model_config.faster_rcnn.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = train_input_fn()
self.assertAllEqual([None, None, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[None, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[None, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[None],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
def test_faster_rcnn_resnet50_eval_input(self):
"""Tests the eval input function for FasterRcnnResnet50."""
configs = _get_configs_for_model('faster_rcnn_resnet50_pets')
model_config = configs['model']
model_config.faster_rcnn.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
configs['eval_config'], configs['eval_input_config'], model_config)
features, labels = eval_input_fn()
self.assertAllEqual([1, None, None, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[1, None, None, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([1], features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[1, None, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[1, None, model_config.faster_rcnn.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[1, None],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[1, None],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(
tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[1, None],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(
tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_ssd_inceptionV2_train_input(self):
"""Tests the training input function for SSDInceptionV2."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
batch_size = configs['train_config'].batch_size
train_input_fn = inputs.create_train_input_fn(
configs['train_config'], configs['train_input_config'], model_config)
features, labels = train_input_fn()
self.assertAllEqual([batch_size, 300, 300, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual([batch_size],
features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[batch_size],
labels[fields.InputDataFields.num_groundtruth_boxes].shape.as_list())
self.assertEqual(tf.int32,
labels[fields.InputDataFields.num_groundtruth_boxes].dtype)
self.assertAllEqual(
[batch_size, 50, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[batch_size, 50, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[batch_size, 50],
labels[fields.InputDataFields.groundtruth_weights].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_weights].dtype)
def test_ssd_inceptionV2_eval_input(self):
"""Tests the eval input function for SSDInceptionV2."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
model_config = configs['model']
model_config.ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
configs['eval_config'], configs['eval_input_config'], model_config)
features, labels = eval_input_fn()
self.assertAllEqual([1, 300, 300, 3],
features[fields.InputDataFields.image].shape.as_list())
self.assertEqual(tf.float32, features[fields.InputDataFields.image].dtype)
self.assertAllEqual(
[1, None, None, 3],
features[fields.InputDataFields.original_image].shape.as_list())
self.assertEqual(tf.uint8,
features[fields.InputDataFields.original_image].dtype)
self.assertAllEqual([1], features[inputs.HASH_KEY].shape.as_list())
self.assertEqual(tf.int32, features[inputs.HASH_KEY].dtype)
self.assertAllEqual(
[1, None, 4],
labels[fields.InputDataFields.groundtruth_boxes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_boxes].dtype)
self.assertAllEqual(
[1, None, model_config.ssd.num_classes],
labels[fields.InputDataFields.groundtruth_classes].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_classes].dtype)
self.assertAllEqual(
[1, None],
labels[fields.InputDataFields.groundtruth_area].shape.as_list())
self.assertEqual(tf.float32,
labels[fields.InputDataFields.groundtruth_area].dtype)
self.assertAllEqual(
[1, None],
labels[fields.InputDataFields.groundtruth_is_crowd].shape.as_list())
self.assertEqual(
tf.bool, labels[fields.InputDataFields.groundtruth_is_crowd].dtype)
self.assertAllEqual(
[1, None],
labels[fields.InputDataFields.groundtruth_difficult].shape.as_list())
self.assertEqual(
tf.int32, labels[fields.InputDataFields.groundtruth_difficult].dtype)
def test_predict_input(self):
"""Tests the predict input function."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
predict_input_fn = inputs.create_predict_input_fn(
model_config=configs['model'])
serving_input_receiver = predict_input_fn()
image = serving_input_receiver.features[fields.InputDataFields.image]
receiver_tensors = serving_input_receiver.receiver_tensors[
inputs.SERVING_FED_EXAMPLE_KEY]
self.assertEqual([1, 300, 300, 3], image.shape.as_list())
self.assertEqual(tf.float32, image.dtype)
self.assertEqual(tf.string, receiver_tensors.dtype)
def test_error_with_bad_train_config(self):
"""Tests that a TypeError is raised with improper train config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['eval_config'], # Expecting `TrainConfig`.
train_input_config=configs['train_input_config'],
model_config=configs['model'])
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_train_input_config(self):
"""Tests that a TypeError is raised with improper train input config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['model'], # Expecting `InputReader`.
model_config=configs['model'])
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_train_model_config(self):
"""Tests that a TypeError is raised with improper train model config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
train_input_fn = inputs.create_train_input_fn(
train_config=configs['train_config'],
train_input_config=configs['train_input_config'],
model_config=configs['train_config']) # Expecting `DetectionModel`.
with self.assertRaises(TypeError):
train_input_fn()
def test_error_with_bad_eval_config(self):
"""Tests that a TypeError is raised with improper eval config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['train_config'], # Expecting `EvalConfig`.
eval_input_config=configs['eval_input_config'],
model_config=configs['model'])
with self.assertRaises(TypeError):
eval_input_fn()
def test_error_with_bad_eval_input_config(self):
"""Tests that a TypeError is raised with improper eval input config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['model'], # Expecting `InputReader`.
model_config=configs['model'])
with self.assertRaises(TypeError):
eval_input_fn()
def test_error_with_bad_eval_model_config(self):
"""Tests that a TypeError is raised with improper eval model config."""
configs = _get_configs_for_model('ssd_inception_v2_pets')
configs['model'].ssd.num_classes = 37
eval_input_fn = inputs.create_eval_input_fn(
eval_config=configs['eval_config'],
eval_input_config=configs['eval_input_config'],
model_config=configs['eval_config']) # Expecting `DetectionModel`.
with self.assertRaises(TypeError):
eval_input_fn()
class DataAugmentationFnTest(tf.test.TestCase):
def test_apply_image_and_box_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
with self.test_session() as sess:
augmented_tensor_dict_out = sess.run(augmented_tensor_dict)
self.assertAllEqual(
augmented_tensor_dict_out[fields.InputDataFields.image].shape,
[20, 20, 3]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_boxes],
[[10, 10, 20, 20]]
)
def test_include_masks_in_data_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
})
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.zeros([2, 10, 10], np.uint8))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
with self.test_session() as sess:
augmented_tensor_dict_out = sess.run(augmented_tensor_dict)
self.assertAllEqual(
augmented_tensor_dict_out[fields.InputDataFields.image].shape,
[20, 20, 3])
self.assertAllEqual(augmented_tensor_dict_out[
fields.InputDataFields.groundtruth_instance_masks].shape, [2, 20, 20])
def test_include_keypoints_in_data_augmentation(self):
data_augmentation_options = [
(preprocessor.resize_image, {
'new_height': 20,
'new_width': 20,
'method': tf.image.ResizeMethod.NEAREST_NEIGHBOR
}),
(preprocessor.scale_boxes_to_pixel_coordinates, {}),
]
data_augmentation_fn = functools.partial(
inputs.augment_input_data,
data_augmentation_options=data_augmentation_options)
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(10, 10, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1., 1.]], np.float32)),
fields.InputDataFields.groundtruth_keypoints:
tf.constant(np.array([[[0.5, 1.0], [0.5, 0.5]]], np.float32))
}
augmented_tensor_dict = data_augmentation_fn(tensor_dict=tensor_dict)
with self.test_session() as sess:
augmented_tensor_dict_out = sess.run(augmented_tensor_dict)
self.assertAllEqual(
augmented_tensor_dict_out[fields.InputDataFields.image].shape,
[20, 20, 3]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_boxes],
[[10, 10, 20, 20]]
)
self.assertAllClose(
augmented_tensor_dict_out[fields.InputDataFields.groundtruth_keypoints],
[[[10, 20], [10, 10]]]
)
def _fake_model_preprocessor_fn(image):
return (image, tf.expand_dims(tf.shape(image)[1:], axis=0))
def _fake_image_resizer_fn(image, mask):
return (image, mask, tf.shape(image))
class DataTransformationFnTest(tf.test.TestCase):
def test_returns_correct_class_label_encodings(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[0, 0, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_classes],
[[0, 0, 1], [1, 0, 0]])
def test_returns_correct_merged_boxes(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_boxes:
tf.constant(np.array([[.5, .5, 1, 1], [.5, .5, 1, 1]], np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
merge_multiple_boxes=True)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_boxes],
[[.5, .5, 1., 1.]])
self.assertAllClose(
transformed_inputs[fields.InputDataFields.groundtruth_classes],
[[1, 0, 1]])
def test_returns_resized_masks(self):
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np.random.rand(4, 4, 3).astype(np.float32)),
fields.InputDataFields.groundtruth_instance_masks:
tf.constant(np.random.rand(2, 4, 4).astype(np.float32)),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def fake_image_resizer_fn(image, masks):
resized_image = tf.image.resize_images(image, [8, 8])
resized_masks = tf.transpose(
tf.image.resize_images(tf.transpose(masks, [1, 2, 0]), [8, 8]),
[2, 0, 1])
return resized_image, resized_masks, tf.shape(resized_image)
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=fake_image_resizer_fn,
num_classes=num_classes)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllEqual(transformed_inputs[
fields.InputDataFields.groundtruth_instance_masks].shape, [2, 8, 8])
def test_applies_model_preprocess_fn_to_image_tensor(self):
np_image = np.random.randint(256, size=(4, 4, 3))
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np_image),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def fake_model_preprocessor_fn(image):
return (image / 255., tf.expand_dims(tf.shape(image)[1:], axis=0))
num_classes = 3
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes)
with self.test_session() as sess:
transformed_inputs = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllClose(transformed_inputs[fields.InputDataFields.image],
np_image / 255.)
self.assertAllClose(transformed_inputs[fields.InputDataFields.
true_image_shape],
[4, 4, 3])
def test_applies_data_augmentation_fn_to_tensor_dict(self):
np_image = np.random.randint(256, size=(4, 4, 3))
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np_image),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def add_one_data_augmentation_fn(tensor_dict):
return {key: value + 1 for key, value in tensor_dict.items()}
num_classes = 4
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=_fake_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=add_one_data_augmentation_fn)
with self.test_session() as sess:
augmented_tensor_dict = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllEqual(augmented_tensor_dict[fields.InputDataFields.image],
np_image + 1)
self.assertAllEqual(
augmented_tensor_dict[fields.InputDataFields.groundtruth_classes],
[[0, 0, 0, 1], [0, 1, 0, 0]])
def test_applies_data_augmentation_fn_before_model_preprocess_fn(self):
np_image = np.random.randint(256, size=(4, 4, 3))
tensor_dict = {
fields.InputDataFields.image:
tf.constant(np_image),
fields.InputDataFields.groundtruth_classes:
tf.constant(np.array([3, 1], np.int32))
}
def mul_two_model_preprocessor_fn(image):
return (image * 2, tf.expand_dims(tf.shape(image)[1:], axis=0))
def add_five_to_image_data_augmentation_fn(tensor_dict):
tensor_dict[fields.InputDataFields.image] += 5
return tensor_dict
num_classes = 4
input_transformation_fn = functools.partial(
inputs.transform_input_data,
model_preprocess_fn=mul_two_model_preprocessor_fn,
image_resizer_fn=_fake_image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=add_five_to_image_data_augmentation_fn)
with self.test_session() as sess:
augmented_tensor_dict = sess.run(
input_transformation_fn(tensor_dict=tensor_dict))
self.assertAllEqual(augmented_tensor_dict[fields.InputDataFields.image],
(np_image + 5) * 2)
if __name__ == '__main__':
tf.test.main()
|
[
"valentin1998v@gmail.com"
] |
valentin1998v@gmail.com
|
b5183ff595872d8796cab1c531e0e8ca9453123d
|
0ca0bbb58378d9e73b69ca605a8ea8a82b6617da
|
/src/tag_add.py
|
c81c4cedf93a1f6da82c33a48c734fcdae582776
|
[
"MIT"
] |
permissive
|
tws0002/footage-importer
|
e1f14447ae4489ad300edd92f459f2776e9a0a4d
|
a797b79efa184167ca472369b07d1a029dd86cbd
|
refs/heads/master
| 2020-03-28T17:40:47.444278
| 2018-09-19T07:47:05
| 2018-09-19T07:47:05
| 148,812,988
| 0
| 0
|
MIT
| 2018-09-19T07:47:06
| 2018-09-14T16:10:24
|
Python
|
UTF-8
|
Python
| false
| false
| 438
|
py
|
from PyQt5.QtWidgets import QDialog
from PyQt5 import uic
from PyQt5.QtCore import Qt
import os
ui_path = os.path.join(os.path.dirname(__file__), 'tag_add.ui')
class TagAddInput(QDialog):
def __init__(self):
super().__init__(None, Qt.WindowTitleHint | Qt.WindowCloseButtonHint)
uic.loadUi(ui_path, self)
self.apply_button.clicked.connect(self.accept)
self.cancel_button.clicked.connect(self.close)
|
[
"mrhchief@gmail.com"
] |
mrhchief@gmail.com
|
55249766cbf2b635521c31509e0f05dbd5aa83cb
|
4310528fa617bf1fd498535858bd02e40eb45834
|
/venv/bin/easy_install
|
73392ec6d584aeeff44e80c7e4d72d15dafafa5d
|
[] |
no_license
|
lgergelyo/DHT11
|
d6f38df38e57b5ac6b2570373a18f6879a859852
|
b531722281f29fdaa954bd89fb4333ec49346362
|
refs/heads/master
| 2020-07-11T08:11:29.105044
| 2019-08-26T14:14:50
| 2019-08-26T14:14:50
| 204,485,805
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
#!/home/gergely/PycharmProjects/DHT11/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"leandrotecnico@terra.com.br"
] |
leandrotecnico@terra.com.br
|
|
44f6551cecf87e0cc64db8a41ab7784033adc958
|
586e60b4bbf80e3da9c1051182a42cb81bb2ea1b
|
/scripts/generate-demo-users.py
|
787052a0fab94bece1059cc3565abb512a20e0bd
|
[
"Apache-2.0"
] |
permissive
|
DD-DeCaF/caffeine-bootstrap
|
daa0cb844fd694b87430451baee664d816e366a7
|
ec65cd5f135f86c7bf2faeb96930637e910c380f
|
refs/heads/master
| 2021-07-09T15:18:56.476754
| 2020-08-18T11:16:37
| 2020-08-18T11:16:37
| 161,489,310
| 1
| 0
|
Apache-2.0
| 2020-08-18T11:16:38
| 2018-12-12T13:03:41
|
Shell
|
UTF-8
|
Python
| false
| false
| 409
|
py
|
from iam.models import User, db
from iam.app import app, init_app
init_app(app, db)
app.app_context().push()
print("Adding user: demo@demo")
user = User(email="demo@demo")
user.set_password("demo")
db.session.add(user)
for i in range(40):
print(f"Adding user: demo{i}@demo (password demo)")
user = User(email=f"demo{i}@demo")
user.set_password("demo")
db.session.add(user)
db.session.commit()
|
[
"ali@kvikshaug.no"
] |
ali@kvikshaug.no
|
3c7c5139a5cd6dd8e33834de89b98fdd8bba4a33
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/length_20200529113854.py
|
76b776e2932e64a11975284ff9a772f9332ca676
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
def removeDuplicates(nums):
i = 0
while i <len(nums):
print(nums[i])
if nums[i] == nums[i+1]:
nums.remove(nums[i])
else:
nums.add(nums[i])
# for i in range(length):
# print('i--------->',i)
# for j in range(i+1,length):
# print('j----->',j)
removeDuplicates([1,1,2])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
30980eca76f9208b779a5f3c5e0e65affab9eb1c
|
5e4897b32cd19d145cefc4451ced910313cde0bb
|
/sphinxextra/phpdomain.py
|
4380ed3ab6981611f85ff56abfe6880149f92879
|
[] |
no_license
|
Tinkerforge/doc
|
7e87edcf8d8b67d1edce749c4a3106f431a77585
|
19e49bad70fbe644aa9b4af4d64f99aa0cf71d7f
|
refs/heads/master
| 2023-08-20T22:10:37.363910
| 2023-08-17T13:33:28
| 2023-08-17T13:33:28
| 2,262,338
| 6
| 8
| null | 2023-07-24T13:46:27
| 2011-08-24T15:21:34
|
Python
|
UTF-8
|
Python
| false
| false
| 34,922
|
py
|
# -*- coding: utf-8 -*-
"""
sphinx.domains.php
~~~~~~~~~~~~~~~~~~
The PHP language domain.
:copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from copy import deepcopy
from docutils import nodes
from sphinx import addnodes
from sphinx.roles import XRefRole
from sphinx.locale import l_, _
from sphinx.domains import Domain, ObjType
from sphinx.directives import ObjectDescription
from sphinx.util.nodes import make_refnode
from sphinx.util.compat import Directive
from sphinx.util.docfields import TypedField
from sphinxextra.utils import fixup_index_entry
# Olaf: add [\[\]]*, remove \b to allow java arrays, add \. to allow Class1.Class2
#_identifier_re = re.compile(r'\b(~?[a-zA-Z_][a-zA-Z0-9_]*)\b')
_identifier_re = re.compile(r'\$?\b(~?[a-zA-Z_\$][a-zA-Z0-9_\.]*[\[\]]*)')
_whitespace_re = re.compile(r'\s+(?u)')
_string_re = re.compile(r"[LuU8]?('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
_visibility_re = re.compile(r'\b(public|private|protected)\b')
_operator_re = re.compile(r'''(?x)
\[\s*\]
| \(\s*\)
| [!<>=/*%+|&^-]=?
| \+\+ | --
| (<<|>>)=? | ~ | && | \| | \|\|
| ->\*? | \,
''')
_id_shortwords = {
'char': 'c',
'signed char': 'c',
'unsigned char': 'C',
'int': 'i',
'signed int': 'i',
'unsigned int': 'U',
'long': 'l',
'signed long': 'l',
'unsigned long': 'L',
'bool': 'b',
'size_t': 's',
'std::string': 'ss',
'std::ostream': 'os',
'std::istream': 'is',
'std::iostream': 'ios',
'std::vector': 'v',
'std::map': 'm',
'operator[]': 'subscript-operator',
'operator()': 'call-operator',
'operator!': 'not-operator',
'operator<': 'lt-operator',
'operator<=': 'lte-operator',
'operator>': 'gt-operator',
'operator>=': 'gte-operator',
'operator=': 'assign-operator',
'operator/': 'div-operator',
'operator*': 'mul-operator',
'operator%': 'mod-operator',
'operator+': 'add-operator',
'operator-': 'sub-operator',
'operator|': 'or-operator',
'operator&': 'and-operator',
'operator^': 'xor-operator',
'operator&&': 'sand-operator',
'operator||': 'sor-operator',
'operator==': 'eq-operator',
'operator!=': 'neq-operator',
'operator<<': 'lshift-operator',
'operator>>': 'rshift-operator',
'operator-=': 'sub-assign-operator',
'operator+=': 'add-assign-operator',
'operator*-': 'mul-assign-operator',
'operator/=': 'div-assign-operator',
'operator%=': 'mod-assign-operator',
'operator&=': 'and-assign-operator',
'operator|=': 'or-assign-operator',
'operator<<=': 'lshift-assign-operator',
'operator>>=': 'rshift-assign-operator',
'operator^=': 'xor-assign-operator',
'operator,': 'comma-operator',
'operator->': 'pointer-operator',
'operator->*': 'pointer-by-pointer-operator',
'operator~': 'inv-operator',
'operator++': 'inc-operator',
'operator--': 'dec-operator',
'operator new': 'new-operator',
'operator new[]': 'new-array-operator',
'operator delete': 'delete-operator',
'operator delete[]': 'delete-array-operator'
}
class DefinitionError(Exception):
def __init__(self, description):
self.description = description
def __unicode__(self):
return self.description
def __str__(self):
return unicode(self.encode('utf-8'))
class DefExpr(object):
def __unicode__(self):
raise NotImplementedError()
def __eq__(self, other):
if type(self) is not type(other):
return False
try:
for key, value in self.__dict__.iteritems():
if value != getattr(other, value):
return False
except AttributeError:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def clone(self):
"""Close a definition expression node"""
return deepcopy(self)
def get_id(self):
"""Returns the id for the node"""
return u''
def get_name(self):
"""Returns the name. Returns either `None` or a node with
a name you might call :meth:`split_owner` on.
"""
return None
def split_owner(self):
"""Nodes returned by :meth:`get_name` can split off their
owning parent. This function returns the owner and the
name as a tuple of two items. If a node does not support
it, :exc:`NotImplementedError` is raised.
"""
raise NotImplementedError()
def prefix(self, prefix):
"""Prefixes a name node (a node returned by :meth:`get_name`)."""
raise NotImplementedError()
def __str__(self):
return unicode(self).encode('utf-8')
def __repr__(self):
return '<defexpr %s>' % self
class PrimaryDefExpr(DefExpr):
def get_name(self):
return self
def split_owner(self):
return None, self
def prefix(self, prefix):
if isinstance(prefix, PathDefExpr):
prefix = prefix.clone()
prefix.path.append(self)
return prefix
return PathDefExpr([prefix, self])
class NameDefExpr(PrimaryDefExpr):
def __init__(self, name):
self.name = name
def get_id(self):
name = _id_shortwords.get(self.name)
if name is not None:
return name
return self.name.replace(u' ', u'-')
def __unicode__(self):
return unicode(self.name)
class PathDefExpr(PrimaryDefExpr):
def __init__(self, parts):
self.path = parts
def get_id(self):
rv = u'::'.join(x.get_id() for x in self.path)
return _id_shortwords.get(rv, rv)
def split_owner(self):
if len(self.path) > 1:
return PathDefExpr(self.path[:-1]), self.path[-1]
return None, self
def prefix(self, prefix):
if isinstance(prefix, PathDefExpr):
prefix = prefix.clone()
prefix.path.extend(self.path)
return prefix
return PathDefExpr([prefix] + self.path)
def __unicode__(self):
return u'::'.join(map(unicode, self.path))
class TemplateDefExpr(PrimaryDefExpr):
def __init__(self, typename, args):
self.typename = typename
self.args = args
def split_owner(self):
owner, typename = self.typename.split_owner()
return owner, TemplateDefExpr(typename, self.args)
def get_id(self):
return u'%s:%s:' % (self.typename.get_id(),
u'.'.join(x.get_id() for x in self.args))
def __unicode__(self):
return u'%s<%s>' % (self.typename, u', '.join(map(unicode, self.args)))
class WrappingDefExpr(DefExpr):
def __init__(self, typename):
self.typename = typename
def get_name(self):
return self.typename.get_name()
class ModifierDefExpr(WrappingDefExpr):
def __init__(self, typename, modifiers):
WrappingDefExpr.__init__(self, typename)
self.modifiers = modifiers
def get_id(self):
pieces = [_id_shortwords.get(unicode(x), unicode(x))
for x in self.modifiers]
pieces.append(self.typename.get_id())
return u'-'.join(pieces)
def __unicode__(self):
return u' '.join(map(unicode, list(self.modifiers) + [self.typename]))
class PtrDefExpr(WrappingDefExpr):
def get_id(self):
return self.typename.get_id() + u'P'
def __unicode__(self):
return u'%s*' % self.typename
class RefDefExpr(WrappingDefExpr):
def get_id(self):
return self.typename.get_id() + u'R'
def __unicode__(self):
return u'%s&' % self.typename
class ConstDefExpr(WrappingDefExpr):
def __init__(self, typename, prefix=False):
WrappingDefExpr.__init__(self, typename)
self.prefix = prefix
def get_id(self):
return self.typename.get_id() + u'C'
def __unicode__(self):
return (self.prefix and u'const %s' or u'%s const') % self.typename
class CastOpDefExpr(PrimaryDefExpr):
def __init__(self, typename):
self.typename = typename
def get_id(self):
return u'castto-%s-operator' % self.typename.get_id()
def __unicode__(self):
return u'operator %s' % self.typename
class ArgumentDefExpr(DefExpr):
def __init__(self, type, name, default=None):
self.name = name
self.type = type
self.default = default
def get_name(self):
return self.name.get_name()
def get_id(self):
if self.type is None:
return 'X'
return self.type.get_id()
def __unicode__(self):
return (self.type is not None and u'%s %s' % (self.type, self.name)
or unicode(self.name)) + (self.default is not None and
u'=%s' % self.default or u'')
class NamedDefExpr(DefExpr):
def __init__(self, name, visibility, static):
self.name = name
self.visibility = visibility
self.static = static
def get_name(self):
return self.name.get_name()
def get_modifiers(self):
rv = []
if self.visibility != 'public':
rv.append(self.visibility)
if self.static:
rv.append(u'static')
return rv
class TypeObjDefExpr(NamedDefExpr):
def __init__(self, name, visibility, static, typename):
NamedDefExpr.__init__(self, name, visibility, static)
self.typename = typename
def get_id(self):
if self.typename is None:
return self.name.get_id()
return u'%s__%s' % (self.name.get_id(), self.typename.get_id())
def __unicode__(self):
buf = self.get_modifiers()
if self.typename is None:
buf.append(unicode(self.name))
else:
buf.extend(map(unicode, (self.typename, self.name)))
return u' '.join(buf)
class MemberObjDefExpr(NamedDefExpr):
def __init__(self, name, visibility, static, typename, value):
NamedDefExpr.__init__(self, name, visibility, static)
self.typename = typename
self.value = value
def get_id(self):
return u'%s__%s' % (self.name.get_id(), self.typename.get_id())
def __unicode__(self):
buf = self.get_modifiers()
buf.append(u'%s %s' % (self.typename, self.name))
if self.value is not None:
buf.append(u'= %s' % self.value)
return u' '.join(buf)
class FuncDefExpr(NamedDefExpr):
def __init__(self, name, visibility, static, explicit, rv,
signature, const, pure_virtual):
NamedDefExpr.__init__(self, name, visibility, static)
self.rv = rv
self.signature = signature
self.explicit = explicit
self.const = const
self.pure_virtual = pure_virtual
def get_id(self):
return u'%s%s%s' % (
self.name.get_id(),
self.signature and u'__' +
u'.'.join(x.get_id() for x in self.signature) or u'',
self.const and u'C' or u''
)
def __unicode__(self):
buf = self.get_modifiers()
if self.explicit:
buf.append(u'explicit')
if self.rv is not None:
buf.append(unicode(self.rv))
buf.append(u'%s(%s)' % (self.name, u', '.join(
map(unicode, self.signature))))
if self.const:
buf.append(u'const')
if self.pure_virtual:
buf.append(u'= 0')
return u' '.join(buf)
class ClassDefExpr(NamedDefExpr):
def __init__(self, name, visibility, static):
NamedDefExpr.__init__(self, name, visibility, static)
def get_id(self):
return self.name.get_id()
def __unicode__(self):
buf = self.get_modifiers()
buf.append(unicode(self.name))
return u' '.join(buf)
class DefinitionParser(object):
# mapping of valid type modifiers. if the set is None it means
# the modifier can prefix all types, otherwise only the types
# (actually more keywords) in the set. Also check
# _guess_typename when changing this.
_modifiers = {
'volatile': None,
'register': None,
'mutable': None,
'const': None,
'typename': None,
'unsigned': set(('char', 'int', 'long')),
'signed': set(('char', 'int', 'long')),
'short': set(('int', 'short')),
'long': set(('int', 'long', 'double'))
}
def __init__(self, definition):
self.definition = definition.strip()
self.pos = 0
self.end = len(self.definition)
self.last_match = None
self._previous_state = (0, None)
def fail(self, msg):
raise DefinitionError('Invalid definition: %s [error at %d]\n %s' %
(msg, self.pos, self.definition))
def match(self, regex):
match = regex.match(self.definition, self.pos)
if match is not None:
self._previous_state = (self.pos, self.last_match)
self.pos = match.end()
self.last_match = match
return True
return False
def backout(self):
self.pos, self.last_match = self._previous_state
def skip_string(self, string):
strlen = len(string)
if self.definition[self.pos:self.pos + strlen] == string:
self.pos += strlen
return True
return False
def skip_word(self, word):
return self.match(re.compile(r'\b%s\b' % re.escape(word)))
def skip_ws(self):
return self.match(_whitespace_re)
@property
def eof(self):
return self.pos >= self.end
@property
def current_char(self):
try:
return self.definition[self.pos]
except IndexError:
return 'EOF'
@property
def matched_text(self):
if self.last_match is not None:
return self.last_match.group()
def _parse_operator(self):
self.skip_ws()
# thank god, a regular operator definition
if self.match(_operator_re):
return NameDefExpr('operator' +
_whitespace_re.sub('', self.matched_text))
# new/delete operator?
for allocop in 'new', 'delete':
if not self.skip_word(allocop):
continue
self.skip_ws()
if self.skip_string('['):
self.skip_ws()
if not self.skip_string(']'):
self.fail('expected "]" for ' + allocop)
allocop += '[]'
return NameDefExpr('operator ' + allocop)
# oh well, looks like a cast operator definition.
# In that case, eat another type.
type = self._parse_type()
return CastOpDefExpr(type)
def _parse_name(self):
if not self.match(_identifier_re):
print self.definition, self.pos
self.fail('expected name')
identifier = self.matched_text
# strictly speaking, operators are not regular identifiers
# but because operator is a keyword, it might not be used
# for variable names anyways, so we can safely parse the
# operator here as identifier
if identifier == 'operator':
return self._parse_operator()
return NameDefExpr(identifier)
def _guess_typename(self, path):
if not path:
return [], 'int'
# for the long type, we don't want the int in there
if 'long' in path:
path = [x for x in path if x != 'int']
# remove one long
path.remove('long')
return path, 'long'
if path[-1] in ('int', 'char'):
return path[:-1], path[-1]
return path, 'int'
def _attach_crefptr(self, expr, is_const=False):
if is_const:
expr = ConstDefExpr(expr, prefix=True)
while 1:
self.skip_ws()
if self.skip_word('const'):
expr = ConstDefExpr(expr)
elif self.skip_string('*'):
expr = PtrDefExpr(expr)
elif self.skip_string('&'):
expr = RefDefExpr(expr)
else:
return expr
def _peek_const(self, path):
try:
path.remove('const')
return True
except ValueError:
return False
def _parse_builtin(self, modifier):
path = [modifier]
following = self._modifiers[modifier]
while 1:
self.skip_ws()
if not self.match(_identifier_re):
break
identifier = self.matched_text
if identifier in following:
path.append(identifier)
following = self._modifiers[modifier]
assert following
else:
self.backout()
break
is_const = self._peek_const(path)
modifiers, typename = self._guess_typename(path)
# Olaf: don't use typename (this makes "short int" from "short" etc)
if typename != 'long':
typename = ''
rv = ModifierDefExpr(NameDefExpr(typename), modifiers)
return self._attach_crefptr(rv, is_const)
def _parse_type_expr(self):
typename = self._parse_name()
self.skip_ws()
if not self.skip_string('<'):
return typename
args = []
while 1:
self.skip_ws()
if self.skip_string('>'):
break
if args:
if not self.skip_string(','):
self.fail('"," or ">" in template expected')
self.skip_ws()
args.append(self._parse_type(True))
return TemplateDefExpr(typename, args)
def _parse_type(self, in_template=False):
self.skip_ws()
result = []
modifiers = []
# if there is a leading :: or not, we don't care because we
# treat them exactly the same. Buf *if* there is one, we
# don't have to check for type modifiers
if not self.skip_string('::'):
self.skip_ws()
while self.match(_identifier_re):
modifier = self.matched_text
if modifier in self._modifiers:
following = self._modifiers[modifier]
# if the set is not none, there is a limited set
# of types that might follow. It is technically
# impossible for a template to follow, so what
# we do is go to a different function that just
# eats types
if following is not None:
return self._parse_builtin(modifier)
modifiers.append(modifier)
else:
self.backout()
break
while 1:
self.skip_ws()
if (in_template and self.current_char in ',>') or \
(result and not self.skip_string('::')) or \
self.eof:
break
result.append(self._parse_type_expr())
if not result:
self.fail('expected type')
if len(result) == 1:
rv = result[0]
else:
rv = PathDefExpr(result)
is_const = self._peek_const(modifiers)
if modifiers:
rv = ModifierDefExpr(rv, modifiers)
return self._attach_crefptr(rv, is_const)
def _parse_default_expr(self):
self.skip_ws()
if self.match(_string_re):
return self.matched_text
idx1 = self.definition.find(',', self.pos)
idx2 = self.definition.find(')', self.pos)
if idx1 < 0:
idx = idx2
elif idx2 < 0:
idx = idx1
else:
idx = min(idx1, idx2)
if idx < 0:
self.fail('unexpected end in default expression')
rv = self.definition[self.pos:idx]
self.pos = idx
return rv
def _parse_signature(self):
self.skip_ws()
if not self.skip_string('('):
self.fail('expected parentheses for function')
args = []
while 1:
self.skip_ws()
if self.eof:
self.fail('missing closing parentheses')
if self.skip_string(')'):
break
if args:
if not self.skip_string(','):
self.fail('expected comma between arguments')
self.skip_ws()
argname = self._parse_type()
argtype = default = None
self.skip_ws()
if self.skip_string('='):
self.pos += 1
default = self._parse_default_expr()
elif self.current_char not in ',)':
argtype = argname
argname = self._parse_name()
self.skip_ws()
if self.skip_string('='):
default = self._parse_default_expr()
args.append(ArgumentDefExpr(argtype, argname, default))
self.skip_ws()
const = self.skip_word('const')
if const:
self.skip_ws()
if self.skip_string('='):
self.skip_ws()
if not (self.skip_string('0') or \
self.skip_word('NULL') or \
self.skip_word('nullptr')):
self.fail('pure virtual functions must be defined with '
'either 0, NULL or nullptr, other macros are '
'not allowed')
pure_virtual = True
else:
pure_virtual = False
return args, const, pure_virtual
def _parse_visibility_static(self):
visibility = ''
if self.match(_visibility_re):
visibility = self.matched_text
static = self.skip_word('static')
return visibility, static
def parse_type(self):
return self._parse_type()
def parse_type_object(self):
visibility, static = self._parse_visibility_static()
typename = self._parse_type()
self.skip_ws()
if not self.eof:
name = self._parse_type()
else:
name = typename
typename = None
return TypeObjDefExpr(name, visibility, static, typename)
def parse_member_object(self):
visibility, static = self._parse_visibility_static()
typename = self._parse_type()
name = self._parse_type()
self.skip_ws()
if self.skip_string('='):
value = self.read_rest().strip()
else:
value = None
return MemberObjDefExpr(name, visibility, static, typename, value)
def parse_function(self):
visibility, static = self._parse_visibility_static()
if self.skip_word('explicit'):
explicit = True
self.skip_ws()
else:
explicit = False
rv = self._parse_type()
self.skip_ws()
# some things just don't have return values
if self.current_char == '(':
name = rv
rv = None
else:
name = self._parse_type()
return FuncDefExpr(name, visibility, static, explicit, rv,
*self._parse_signature())
def parse_class(self):
visibility, static = self._parse_visibility_static()
return ClassDefExpr(self._parse_type(), visibility, static)
def read_rest(self):
rv = self.definition[self.pos:]
self.pos = self.end
return rv
def assert_end(self):
self.skip_ws()
if not self.eof:
self.fail('expected end of definition, got %r' %
self.definition[self.pos:])
class PHPObject(ObjectDescription):
"""Description of a PHP language object."""
def attach_name(self, node, name):
owner, name = name.split_owner()
varname = unicode(name)
if owner is not None:
owner = unicode(owner) + '::'
node += addnodes.desc_addname(owner, owner)
node += addnodes.desc_name(varname, varname)
def attach_type(self, node, type):
# XXX: link to c?
text = unicode(type)
pnode = addnodes.pending_xref(
'', refdomain='php', reftype='type',
reftarget=text, modname=None, classname=None)
pnode['php:parent'] = self.env.temp_data.get('php:parent')
pnode += nodes.Text(text)
node += pnode
def attach_modifiers(self, node, obj):
if obj.visibility != 'public':
node += addnodes.desc_annotation(obj.visibility,
obj.visibility)
node += nodes.Text(' ')
if obj.static:
node += addnodes.desc_annotation('static', 'static')
node += nodes.Text(' ')
def add_target_and_index(self, sigobj, sig, signode):
theid = sigobj.get_id()
name = unicode(sigobj.name)
signode['names'].append(theid)
signode['ids'].append(theid)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
self.env.domaindata['php']['objects'].setdefault(name,
(self.env.docname, self.objtype, theid))
indextext = self.get_index_text(name)
if indextext:
self.indexnode['entries'].append(fixup_index_entry(('single', indextext, name, name, 'foobar')))
def before_content(self):
lastname = self.names and self.names[-1]
if lastname and not self.env.temp_data.get('php:parent'):
assert isinstance(lastname, NamedDefExpr)
self.env.temp_data['php:parent'] = lastname.name
self.parentname_set = True
else:
self.parentname_set = False
def after_content(self):
if self.parentname_set:
self.env.temp_data['php:parent'] = None
def parse_definition(self, parser):
raise NotImplementedError()
def describe_signature(self, signode, arg):
raise NotImplementedError()
def handle_signature(self, sig, signode):
parser = DefinitionParser(sig)
try:
rv = self.parse_definition(parser)
parser.assert_end()
except DefinitionError, e:
self.env.warn(self.env.docname,
e.description, self.lineno)
raise ValueError
self.describe_signature(signode, rv)
parent = self.env.temp_data.get('php:parent')
if parent is not None:
rv = rv.clone()
rv.name = rv.name.prefix(parent)
return rv
class PHPClassObject(PHPObject):
def get_index_text(self, name):
return _('%s (PHP class)') % name
def parse_definition(self, parser):
return parser.parse_class()
def describe_signature(self, signode, cls):
self.attach_modifiers(signode, cls)
signode += addnodes.desc_annotation('class ', 'class ')
self.attach_name(signode, cls.name)
class PHPTypeObject(PHPObject):
def get_index_text(self, name):
if self.objtype == 'type':
return _('%s (PHP type)') % name
return ''
def parse_definition(self, parser):
return parser.parse_type_object()
def describe_signature(self, signode, obj):
self.attach_modifiers(signode, obj)
signode += addnodes.desc_annotation('type ', 'type ')
if obj.typename is not None:
self.attach_type(signode, obj.typename)
signode += nodes.Text(' ')
self.attach_name(signode, obj.name)
class PHPMemberObject(PHPObject):
def get_index_text(self, name):
if self.objtype == 'member':
return _('%s (PHP member)') % name
return ''
def parse_definition(self, parser):
return parser.parse_member_object()
def describe_signature(self, signode, obj):
self.attach_modifiers(signode, obj)
self.attach_type(signode, obj.typename)
signode += nodes.Text(' ')
self.attach_name(signode, obj.name)
if obj.value is not None:
signode += nodes.Text(u' = ' + obj.value)
class PHPFunctionObject(PHPObject):
def attach_function(self, node, func):
owner, name = func.name.split_owner()
if owner is not None:
owner = unicode(owner) + '::'
node += addnodes.desc_addname(owner, owner)
# cast operator is special. in this case the return value
# is reversed.
if isinstance(name, CastOpDefExpr):
node += addnodes.desc_name('operator', 'operator')
node += nodes.Text(u' ')
self.attach_type(node, name.typename)
else:
funcname = unicode(name)
node += addnodes.desc_name(funcname, funcname)
paramlist = addnodes.desc_parameterlist()
for arg in func.signature:
param = addnodes.desc_parameter('', '', noemph=True)
if arg.type is not None:
self.attach_type(param, arg.type)
param += nodes.Text(u' ')
param += nodes.emphasis(unicode(arg.name), unicode(arg.name))
if arg.default is not None:
def_ = u'=' + unicode(arg.default)
param += nodes.emphasis(def_, def_)
paramlist += param
node += paramlist
if func.const:
node += addnodes.desc_addname(' const', ' const')
if func.pure_virtual:
node += addnodes.desc_addname(' = 0', ' = 0')
def get_index_text(self, name):
return _('%s (PHP function)') % name
def parse_definition(self, parser):
return parser.parse_function()
def describe_signature(self, signode, func):
self.attach_modifiers(signode, func)
if func.explicit:
signode += addnodes.desc_annotation('explicit', 'explicit')
signode += nodes.Text(' ')
# return value is None for things with a reverse return value
# such as casting operator definitions or constructors
# and destructors.
if func.rv is not None:
self.attach_type(signode, func.rv)
signode += nodes.Text(u' ')
self.attach_function(signode, func)
class PHPCurrentNamespace(Directive):
"""This directive is just to tell Sphinx that we're documenting
stuff in namespace foo.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
def run(self):
env = self.state.document.settings.env
if self.arguments[0].strip() in ('NULL', '0', 'nullptr'):
env.temp_data['php:prefix'] = None
else:
parser = DefinitionParser(self.arguments[0])
try:
prefix = parser.parse_type()
parser.assert_end()
except DefinitionError, e:
self.env.warn(self.env.docname,
e.description, self.lineno)
else:
env.temp_data['php:prefix'] = prefix
return []
class PHPXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
refnode['php:parent'] = env.temp_data.get('php:parent')
if not has_explicit_title:
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
# parts of the contents
if title[:1] == '~':
title = title[1:]
dcolon = title.rfind('::')
if dcolon != -1:
title = title[dcolon + 2:]
return title, target
class PHPDomain(Domain):
"""PHP language domain."""
name = 'php'
label = 'PHP'
object_types = {
'class': ObjType(l_('class'), 'class'),
'function': ObjType(l_('function'), 'func'),
'member': ObjType(l_('member'), 'member'),
'type': ObjType(l_('type'), 'type')
}
directives = {
'class': PHPClassObject,
'function': PHPFunctionObject,
'member': PHPMemberObject,
'type': PHPTypeObject,
'namespace': PHPCurrentNamespace
}
roles = {
'class': PHPXRefRole(),
'func' : PHPXRefRole(fix_parens=True),
'member': PHPXRefRole(),
'type': PHPXRefRole()
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
}
def clear_doc(self, docname):
for fullname, (fn, _, _) in self.data['objects'].items():
if fn == docname:
del self.data['objects'][fullname]
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
def _create_refnode(expr):
name = unicode(expr)
if name not in self.data['objects']:
return None
obj = self.data['objects'][name]
if obj[1] not in self.objtypes_for_role(typ):
return None
return make_refnode(builder, fromdocname, obj[0], obj[2],
contnode, name)
parser = DefinitionParser(target)
# XXX: warn?
try:
expr = parser.parse_type().get_name()
parser.skip_ws()
if not parser.eof or expr is None:
return None
except DefinitionError:
return None
parent = node['php:parent']
rv = _create_refnode(expr)
if rv is not None or parent is None:
return rv
parent = parent.get_name()
rv = _create_refnode(expr.prefix(parent))
if rv is not None:
return rv
parent, name = parent.split_owner()
return _create_refnode(expr.prefix(parent))
def get_objects(self):
for refname, (docname, type, theid) in self.data['objects'].iteritems():
yield (refname, refname, type, docname, refname, 1)
def setup(app):
app.add_domain(PHPDomain)
|
[
"matthias@tinkerforge.com"
] |
matthias@tinkerforge.com
|
4dcb1a63e7effceb8e87d2579849844a5dcaecbe
|
d9eb21a408a449918ed431f760b6a61292869de6
|
/Workshops/custom_list/test_custom_list.py
|
ba4bb2591f76ff8946987ea4c1a7891db8355939
|
[] |
no_license
|
zhyordanova/Python-OOP
|
5c73ab851848c969beb50b774b67bc9e4c102610
|
aad42e108b676de119ac99bef632b76ac595d49a
|
refs/heads/main
| 2023-05-27T06:09:23.524422
| 2021-05-06T22:00:18
| 2021-05-06T22:00:18
| 349,583,825
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,477
|
py
|
from unittest import TestCase
from lists.custom_list import ArrayList
class TestArrayList(TestCase):
def setUp(self):
self.al = ArrayList()
def test_append__when_list_is_empty__expect_append_to_the_end(self):
self.al.append(1)
values = list(self.al)
self.assertEqual([1], values)
def test_append__expect_to_return_the_list(self):
result = self.al.append(1)
self.assertEqual(self.al, result)
def test_append__when_list_not_empty__expect_append_to_the_end(self):
self.al.append(1)
self.al.append(2)
self.al.append(3)
values = list(self.al)
self.assertEqual([1, 2, 3], values)
def test_append__1024_values__expect_append_to_the_end(self):
values = [x for x in range(1024)]
[self.al.append(x) for x in values]
list_value = list(self.al)
self.assertEqual(values, list_value)
def test_append__expect_to_increase_size(self):
self.al.append(1)
self.assertEqual(1, self.al.size())
def test_remove__when_index_is_valid__expect_remove_value_and_return_it(self):
self.al.append(1)
self.al.append(2)
self.al.append(333)
self.al.append(4)
result = self.al.remove(2)
self.assertEqual([1, 2, 4], list(self.al))
self.assertEqual(333, result)
def test_remove__when_index_is_invalid__expect_to_raise(self):
self.al.append(1)
self.al.append(2)
self.al.append(3)
self.al.append(4)
with self.assertRaises(IndexError):
self.al.remove(self.al.size())
def test_get__when_index_is_valid__expect_to_return_it(self):
self.al.append(1)
self.al.append(2)
self.al.append(333)
self.al.append(4)
result = self.al.get(2)
self.assertEqual(333, result)
def test_get__when_index_is_invalid__expect_to_raise(self):
self.al.append(1)
self.al.append(2)
self.al.append(3)
self.al.append(4)
with self.assertRaises(IndexError):
self.al.get(self.al.size())
def test_extend_whit_empty_iterable__expect_to_be_same(self):
self.al.append(1)
self.al.extend([])
self.assertEqual([1], list(self.al))
def test_extend_whit_list_iterable__expect_to_append_the_list(self):
self.al.append(1)
self.al.extend([2])
self.assertEqual([1, 2], list(self.al))
def test_extend_whit_generator__expect_to_append_the_list(self):
self.al.append(1)
self.al.extend((x for x in range(1)))
self.assertEqual([1, 0], list(self.al))
def test_extend_when_empty__expect_to_append_to_list(self):
self.al.append(1)
self.al.extend([1])
self.assertEqual([1, 1], list(self.al))
def test_extend_whit_no_iterable__expect_to_raise(self):
self.al.append(1)
with self.assertRaises(ValueError):
self.al.extend(2)
def test_insert__when_index_is_valid__expect_to_place_value_at_index(self):
self.al.append(1)
self.al.append(2)
self.al.append(4)
self.al.append(5)
self.al.append(6)
self.al.append(7)
self.al.append(8)
self.al.append(9)
self.al.insert(2, 333)
self.assertEqual([1, 2, 333, 4, 5, 6, 7, 8, 9], list(self.al))
def test_insert__when_index_is_invalid__expect_to_raise(self):
self.al.append(1)
self.al.append(2)
self.al.append(3)
with self.assertRaises(IndexError):
self.al.insert(self.al.size() + 1, 2)
def test_pop__expect_to_remove_last_element_and_return_it(self):
self.al.append(1)
self.al.append(2)
self.al.append(3)
self.al.append(4)
result = self.al.pop()
self.assertEqual(4, result)
self.assertEqual([1, 2, 3], list(self.al))
def test_pop__when_empty__expect_to_raise(self):
with self.assertRaises(IndexError):
self.al.pop()
def test_clear__expect_to_be_empty(self):
[self.al.append(x) for x in range(15)]
self.al.clear()
self.assertEqual([], list(self.al))
def test_index__when_item_is_present__expect_return_correct_index(self):
[self.al.append(x) for x in range(15)]
index = self.al.index(5)
self.assertEqual(5, index)
def test_index__when_item_is_not_present__expect_raise(self):
[self.al.append(x) for x in range(15)]
with self.assertRaises(ValueError):
self.al.index(17)
def test_count__when_item_is_present_one_time__expected_to_return_1(self):
[self.al.append(x) for x in range(15)]
expected_count = 1
actual_count = self.al.count(5)
self.assertEqual(expected_count, actual_count)
def test_count__when_item_is_present_multiple_times__expected_to_return_correct_count(self):
[self.al.append(x) for x in range(15)]
self.al.append(5)
self.al.insert(3, 5)
self.al.insert(7, 5)
self.al.insert(1, 5)
self.al.insert(9, 5)
expected_count = 6
actual_count = self.al.count(5)
self.assertEqual(expected_count, actual_count)
def test_count__when_item_is_present_multiple_times_and_once_poped__expected_to_return_correct_count(self):
[self.al.append(x) for x in range(15)]
self.al.insert(3, 5)
self.al.insert(7, 5)
self.al.insert(1, 5)
self.al.insert(9, 5)
self.al.append(5)
self.al.pop()
expected_count = 5
actual_count = self.al.count(5)
self.assertEqual(expected_count, actual_count)
def test_count__when_item_is_not_present__expected_to_return_0(self):
[self.al.append(x) for x in range(15)]
expected_count = 0
actual_count = self.al.count(55)
self.assertEqual(expected_count, actual_count)
def test_reversed__expect_in_reversed_order(self):
[self.al.append(x) for x in range(5)]
expected = [x for x in range(4, -1, -1)]
actual = self.al.reverse()
self.assertEqual(expected, actual)
def test_copy__expect_to_return_another_list_with_same_value(self):
[self.al.append(x) for x in range(5)]
copied_list = self.al.copy()
expected_result = [x for x in range(5)]
actual_result = list(copied_list)
self.assertNotEqual(copied_list, self.al)
self.assertEqual(expected_result, actual_result)
def test_add_first__when_empty__expect_to_add(self):
self.al.add_first(1)
self.assertListEqual([1], list(self.al))
def test_add_first__when_non_empty__expect_to_add(self):
[self.al.append(x) for x in range(5)]
self.al.add_first(1)
self.assertListEqual([1, 0, 1, 2, 3, 4], list(self.al))
def test_dictionize__when_empty__expect_dict(self):
expected = {}
actual = self.al.dictionize()
self.assertEqual(expected, actual)
def test_dictionize__when_even_elements_count_expect_coorct_result(self):
self.al.append(1)
self.al.append(2)
self.al.append(3)
self.al.append(4)
expected = {
1: 2,
3: 4,
}
actual = self.al.dictionize()
self.assertEqual(expected, actual)
def test_dictionize__when_odd_elements_count_expect_coorct_result(self):
self.al.append(1)
self.al.append(2)
self.al.append(3)
self.al.append(4)
self.al.append(5)
expected = {
1: 2,
3: 4,
5: ' ',
}
actual = self.al.dictionize()
self.assertEqual(expected, actual)
def test_move_list_empty__expect_to_move_nothing(self):
self.al.move(1)
self.assertEqual([], list(self.al))
def test_move__when_moving_1_element__expect_to_move_1_element(self):
self.al.append(1)
self.al.append(2)
self.al.append(3)
self.al.append(4)
self.al.move(1)
self.assertEqual([2, 3, 4, 1], list(self.al))
def test_move__when_moving_3_elements__expect_to_move_3_elements(self):
self.al.append(1)
self.al.append(2)
self.al.append(3)
self.al.append(4)
self.al.move(3)
self.assertEqual([4, 1, 2, 3], list(self.al))
def test_move__when_moving_3_values_and_have_2_values__expect_to_move_3_value_from_the_start_to_the_end(self):
self.al.append(1)
self.al.append(2)
self.al.move(3)
self.assertEqual([2, 1], list(self.al))
def test_sum__when_values__expected_to_return_correct_sum(self):
self.al.append(1)
self.al.append('2')
self.al.append(3)
expected = 5
actual = self.al.sum()
self.assertEqual(expected, actual)
def test_sum__when_empty_expected_to_return_0(self):
self.assertEqual(0, self.al.sum())
def test_overbound__expect_to_return_min_value(self):
values = [x for x in range(15)]
[self.al.append(x) for x in values]
expected = max(values)
actual = self.al.overbound()
self.assertEqual(expected, actual)
def test_underbound__expect_to_return_min_value(self):
values = [x for x in range(15)]
[self.al.append(x) for x in values]
expected = min(values)
actual = self.al.underbound()
self.assertEqual(expected, actual)
|
[
"zhyordanova88@gmail.com"
] |
zhyordanova88@gmail.com
|
026745467476e61080f1b8483e76fc80ed91ca82
|
8f337d7a1477eb9878bd252f45fadd967ba5dbbe
|
/run_galfit_disk_only.py
|
62c3df5903da86c2f2a4574520757cfb091c1fa8
|
[] |
no_license
|
bpRsh/b1_research_lib
|
bd4c293946329ea96d0fb37d8769aaa83d1ca15d
|
1de77f683b3ba18a1ab142b0fe86114c7a67791a
|
refs/heads/master
| 2021-01-15T19:04:32.177465
| 2020-11-23T19:55:34
| 2020-11-23T19:55:34
| 99,805,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,674
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-#
#
# Author : Bhishan Poudel; Physics Graduate Student, Ohio University
# Date : 26-Oct-2016 13:10
# Last update : Dec 15, 2016
# Est time : 3 min for one galaxy one filter.
# Main commands : rm -r imgblock.fits subcomps.fit ; galfit expdisk_devauc.sh
# galfit -o3 galfit.01 && rm -r galfit.01
# ds9 -multiframe imgblock.fits subcomps.fits &
# Imports
from __future__ import division, unicode_literals, print_function
import subprocess
import os
import time
from string import ascii_uppercase
import astropy.io
from astropy.io import fits
from astropy.io.fits import getdata
from astropy.io.fits import getheader
from astropy.io.fits import getval
paramfile = r'expdisk_devauc.sh'
def replace_galfit_param(name, value, object_num=1, fixed=True):
"""Replace input galfit parameter file with new configuration.
Arguments:
name : parameter name, e.g. A-P, 1-10, 'Z'
value: new value for the parameter in string form. e.g. '20.0'
object_num: For A-Z object_num is 1
For objects, object_num starts from 1.
fixed: True means parameter will be fixed (0) during fitting.
NOTE: Keep fixed = False while using this function to vary the parameter.
"""
name, value = str(name), str(value)
with open(paramfile) as f:
gf_file = f.readlines()
# Location of param.
# 3rd column is where one can hold the parameters fixed (0) or allow vary 1
loc = [i for i in range(len(gf_file)) if
gf_file[i].strip().startswith(name + ')')][object_num - 1]
param_str = gf_file[loc]
comment = param_str.find('#')
if name in ascii_uppercase:
fmt = '{}) {} {}'
param_str = fmt.format(name, value, param_str[comment:])
else:
fmt = '{}) {} {} {}'
param_str = fmt.format(name, value, '0' if fixed else '1',
param_str[comment:])
gf_file[loc] = param_str
with open(paramfile, 'w') as f:
f.writelines(gf_file)
def run_galfit(galaxy, outdir, count):
"""Run galfit on the input galaxy and create model and residual images.
Runs galfit on the given input galaxies and creates model
and residue images in the output directory
galaxy : base name of input galaxy, e.g f606w or f814w
outdir : output directory, e.g. galfit_outputs
count : count number of galaxy, e.g. 0 for f606w_gal0.fits
Needs : galfit_outputs/two_components/bulge/
galfit_outputs/two_components/disk/
galfit_outputs/two_components/residual/
Note: 1. This program will also read the values of mag and rad from the
input fitsfile header, and updates the value in the
galfit paramfile 'sim2.feedme'.
2. it will also create the mask file using ic command.
"""
# galaxy = f606w or f814w
# path = '/Users/poudel/jedisim/simdatabase/colors'
path = '/Users/poudel/jedisim/simdatabase/galaxies'
ingal = path + '/' + galaxy + '_gal' + str(count) + '.fits'
psf = galaxy + '_psf.fits' # psf in the script directory
# get the value of magnitude, radius and mag0 of input galaxy
try:
mag = getval(ingal, 'MAG')
except:
mag = 20.0
try:
rad = getval(ingal, 'RADIUS')
except:
rad = 10.0
mag0 = getval(ingal, 'MAG0')
# create galfit paramfile according to the input galaxy
# For A-Z object_num is 1
# fixed=True means it is fixed and not changed
print("\n\n\n")
print('+' * 80)
print('+' * 80)
print('+' * 80)
print('{} {} {}'.format('Current Galaxy : ', ingal, ''))
print('+' * 80)
print('+' * 80)
print('+' * 80)
replace_galfit_param('A', ingal, object_num=1, fixed=False)
replace_galfit_param('D', psf, object_num=1, fixed=False)
replace_galfit_param('J', mag0, object_num=1, fixed=False)
replace_galfit_param('3', mag, object_num=1, fixed=False)
replace_galfit_param('4', rad, object_num=1, fixed=False)
replace_galfit_param('3', mag, object_num=2, fixed=False)
replace_galfit_param('4', rad, object_num=2, fixed=False)
# create mask file according to the input galaxy
cmd = "ic '1 0 %1 0 == ?' " + ingal + " > mask.fits"
subprocess.call(cmd, shell=True)
# For objects, object_num starts from 1
# 1 = expdisk, 2 = devauc
# run galfit
# rm -r imgblock.fits subcomps.fits galfit.01 # removes these files.
# galfit sim.feedme # gives galfit.01, imgblock.fits,if succeed.
# galfit -o3 galfit.01 # runs only when galfit.01 exists
# we can delete galfit.01 immediately after it it used.
cmd1 = 'rm -r imgblock.fits; galfit ' + paramfile
cmd2 = 'rm -r subcomps.fits; galfit -o3 galfit.01; rm -r galfit.01'
print("\n\n\n")
print('*' * 80)
print('Running: {}'.format(cmd1))
print('*' * 80)
subprocess.call(cmd1, shell=True) # gives galfit.01 if succeed
if os.path.exists('galfit.01'):
print("\n\n\n")
print('!' * 80)
print('Running: {}'.format(cmd2))
print('!' * 80)
subprocess.call(cmd2, shell=True)
# get residual map from imgblock.fits
# residual = outdir + '/residual/' + galaxy + '_res' + str(count) + '.fits'
# get devauc and expdisk models from subcomps.fits
# galaxy = f606w or f814w
# devauc = bulge and expdisk+residual = disk
# devauc = outdir + '/bulge/' + galaxy + '_bulge' + str(count) + '.fits'
expdisk = outdir + galaxy + '_disk' +\
str(count) + '.fits'
# extracting frames of imgblock.fits and subcomps.fits if they exists.
if os.path.isfile('subcomps.fits') and os.path.isfile('imgblock.fits'):
# for imgblock.fits : 0 is empty, 1 is input, 2 is model, 3 is residual
# dat_res, hdr_res = fits.getdata(r'imgblock.fits', ext=3, header=True)
# for subcomps.fits: 0 is input, 1 is expdisk, 2 is devauc etc.
dat_exp, hdr_exp = fits.getdata(r'subcomps.fits', ext=1, header=True)
# dat_dev, hdr_dev = fits.getdata(r'subcomps.fits', ext=2, header=True)
# fits.writeto(expdisk, dat_exp, hdr_exp, clobber=False)
# fits.writeto(residual, dat_res, hdr_res, clobber=True)
# fits.writeto(devauc, dat_dev, hdr_dev, clobber=True)
fits.writeto(expdisk, dat_exp, hdr_exp, clobber=True)
# print('{} {} {}'.format('Output file: ', expdisk, ''))
# print('{} {} {}'.format('Output file: ', residual, ''))
# print('{} {} {}'.format('Output file: ', devauc, ''))
print('{} {} {}'.format('Output file: ', expdisk, ''))
def main():
"""Main program."""
# output directory without '/' in the end
# range is from 0 to 101 and both f606w and f814w
galfit_outdir = 'disk_only_280_301/'
# there are 302 galaxies for each filter
# for count in list(range(101, 303)):
for count in range(280, 301):
run_galfit('f606w', galfit_outdir, count)
run_galfit('f814w', galfit_outdir, count)
if __name__ == '__main__':
# beginning time
program_begin_time = time.time()
begin_ctime = time.ctime()
# run main program
main()
# print the time taken
program_end_time = time.time()
end_ctime = time.ctime()
seconds = program_end_time - program_begin_time
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
print('\nBegin time: ', begin_ctime)
print('End time: ', end_ctime, '\n')
print("Time taken: {0:.0f} days, {1:.0f} hours, \
{2:.0f} minutes, {3:f} seconds.".format(d, h, m, s))
|
[
"bhishantryphysics@gmail.com"
] |
bhishantryphysics@gmail.com
|
b3d9caa16e9c29665a7edb9b7efabb1a3531e91d
|
e6f1e23409bfcba563dcfc9dbf6d19c5c99fc0d5
|
/linear regression.py
|
07283088d68c33d1727f08a7efb0b7811af865cf
|
[] |
no_license
|
AmiraHmd/gdML
|
4378fae056f5ff88cdd1a7d86c68c28f5d16e80d
|
a4e6c3f495d02c2b0c43700843290c89c30f2fc1
|
refs/heads/master
| 2022-10-24T01:13:48.355828
| 2020-06-24T10:12:35
| 2020-06-24T10:12:35
| 272,714,651
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,909
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
from sklearn.datasets import make_regression
import matplotlib.pyplot as plt
# In[2]:
x,y = make_regression(n_samples=100, n_features=1, noise=10)
plt.scatter(x,y)
# In[3]:
print(x.shape)
y=y.reshape(y.shape[0],1)
print(y.shape)
# In[4]:
#matrice X
X=np.hstack((x,np.ones(x.shape)))
X.shape
# In[5]:
theta=np.random.randn(2,1)
theta
# In[6]:
def model(X, theta):
return X.dot(theta)
# In[7]:
plt.scatter(x,y)
plt.plot(x, model(X, theta))
# In[8]:
def cost_function(X, y, theta):
m=len(y)
return 1/(2*m )* np.sum((model(X,theta)-y)**2)
# In[9]:
cost_function(X, y, theta)
# In[10]:
def grad(X,y, theta):
m=len(y)
return 1/m *X.T.dot(model(X,theta)-y)
# In[22]:
def gradient_descent(X, y, theta, learning_rate, n_iterations):
cost_history=np.zeros(n_iterations)
for i in range(0, n_iterations):
theta=theta-learning_rate*grad(X, y, theta)
cost_history[i]=cost_function(X, y, theta)
return theta, cost_history
# In[23]:
theta_final, cost_history= gradient_descent(X, y, theta, learning_rate=0.01, n_iterations=1000)
# In[18]:
theta_final
# In[19]:
predictions= model(X, theta_final)
plt.scatter(x,y)
plt.plot(X, predictions, c='r')
# In[24]:
plt.plot(range(1000), cost_history)
# In[27]:
def coef_determination(y, pred):
u=((y-pred)**2).sum()
v=((y-y.mean())**2).sum()
return 1-u/v
# In[28]:
coef_determination(y, predictions)
# In[29]:
from sklearn.linear_model import SGDRegressor
# In[31]:
np.random.seed(0)
x, y= make_regression(n_samples=100, n_features=1, noise=10)
plt.scatter(x,y)
# In[33]:
model=SGDRegressor(max_iter=100, eta0=0.0001)
model.fit(x,y)
# In[35]:
print('coeff R2=' , model.score(x,y))
plt.scatter(x,y)
plt.plot(x, model.predict(x) , c='red', lw=3)
# In[53]:
model=SGDRegressor(max_iter=1000, eta0=0.001)
model.fit(x,y)
# In[55]:
print('coeff R2=' , model.score(x,y))
plt.scatter(x,y)
plt.plot(x, model.predict(x) , c='red', lw=3)
# In[46]:
from sklearn.preprocessing import PolynomialFeatures
# In[47]:
np.random.seed(0)
# In[48]:
# création du Dataset
x, y = make_regression(n_samples=100, n_features=1, noise=10)
y = y**2 # y ne varie plus linéairement selon x !
# In[49]:
# On ajoute des variables polynômiales dans notre dataset
poly_features = PolynomialFeatures(degree=2, include_bias=False)
x = poly_features.fit_transform(x)
# In[50]:
plt.scatter(x[:,0], y)
x.shape # la dimension de x: 100 lignes et 2 colonnes
# In[51]:
# On entraine le modele comme avant ! rien ne change !
model = SGDRegressor(max_iter=1000, eta0=0.001)
model.fit(x,y)
print('Coeff R2 =', model.score(x, y))
# In[52]:
plt.scatter(x[:,0], y, marker='o')
plt.scatter(x[:,0], model.predict(x), c='red', marker='+')
# In[ ]:
|
[
"hamadiamira2@gmail.com"
] |
hamadiamira2@gmail.com
|
0d345e0ccd73dd5b9c1f651ef05e860db671f8e1
|
76c5a7c8428387d83c0ac11e907997e12a27f8ef
|
/handler/base.py
|
bf5c39efa6fe59fccfd439a7b6d8d170d2e92587
|
[
"MIT"
] |
permissive
|
northfun/godeyes
|
13fd52ce1030899f8d4f015c8a10b63e23d90447
|
5afffa52701e61514aa9935df2806f1804e4a43f
|
refs/heads/master
| 2020-09-12T22:54:03.929993
| 2019-11-27T15:56:45
| 2019-11-27T15:56:45
| 222,584,998
| 0
| 0
| null | 2019-11-19T01:54:23
| 2019-11-19T01:54:22
| null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
class Base:
instance = None
client = None
def __new__(cls, *args, **kwargs):
if cls.instance is None:
cls.instance = super().__new__(cls, *args, **kwargs)
return cls.instance
def __init__(self):
pass
|
[
"chenenquan@qutoutiao.net"
] |
chenenquan@qutoutiao.net
|
8097d71b8ebae32d7fdc01e7873b5ee6d6ad0fb4
|
c01ab71f681efdeb9f4e7d52ed083745b6d42590
|
/old/6th sem/cpp/TRIKA/test_modules/testCases.py
|
96b35814c7b3c3e9a1a25b8848bf226225f18b05
|
[] |
no_license
|
anant-pushkar/competetive_programming_codes
|
398a39c85a761c8d242f42f368933239a438ac06
|
127c67d7d4e2cef2d1f25189b6535606f4523af6
|
refs/heads/master
| 2021-01-20T11:57:07.528790
| 2014-11-14T08:29:21
| 2014-11-14T08:29:21
| 23,577,655
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
import testTemplate
'''number of test suites'''
nSuites=1
def getTests():
tests = []
suite=testTemplate.testSuite("Sample 1")
testcase = testTemplate.testInstance("4 4\n1 1\n100 55 10 2\n20 10 90 1\n60 20 22 4\n1 30 70 5" , "Y 23" , "")
suite.add(testcase)
tests.append(suite)
suite=testTemplate.testSuite("Sample 2")
testcase = testTemplate.testInstance("2 2\n1 1\n1 55 \n20 10 " , "N" , "")
suite.add(testcase)
tests.append(suite)
return tests
|
[
"anantpushkar009@gmail.com"
] |
anantpushkar009@gmail.com
|
adbbdfada5b469d69539163f64be0df3954710d1
|
00af94d633b29adb849409a264caa49d4702822e
|
/examples/18_rgb_encoding_mobilenet.py
|
ec73d90363b79e1d647bf484a89e5213631038a9
|
[
"MIT"
] |
permissive
|
gromovnik1337/depthai-python
|
bcc0fe5aff3651a698ee86daf07a5a860f3675d4
|
2b17444aba2f94a236222934e1572c4dd06062dc
|
refs/heads/main
| 2023-03-28T00:34:03.525543
| 2021-03-27T15:28:09
| 2021-03-27T15:28:09
| 348,476,293
| 0
| 0
|
MIT
| 2021-03-20T08:20:56
| 2021-03-16T20:01:17
| null |
UTF-8
|
Python
| false
| false
| 3,589
|
py
|
#!/usr/bin/env python3
from pathlib import Path
import sys
import cv2
import depthai as dai
import numpy as np
# Get argument first
mobilenet_path = str((Path(__file__).parent / Path('models/mobilenet.blob')).resolve().absolute())
if len(sys.argv) > 1:
mobilenet_path = sys.argv[1]
pipeline = dai.Pipeline()
cam = pipeline.createColorCamera()
cam.setBoardSocket(dai.CameraBoardSocket.RGB)
cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
cam.setPreviewSize(300, 300)
cam.setInterleaved(False)
videoEncoder = pipeline.createVideoEncoder()
videoEncoder.setDefaultProfilePreset(1920, 1080, 30, dai.VideoEncoderProperties.Profile.H265_MAIN)
cam.video.link(videoEncoder.input)
detection_nn = pipeline.createNeuralNetwork()
detection_nn.setBlobPath(mobilenet_path)
cam.preview.link(detection_nn.input)
videoOut = pipeline.createXLinkOut()
videoOut.setStreamName('h265')
videoEncoder.bitstream.link(videoOut.input)
xout_rgb = pipeline.createXLinkOut()
xout_rgb.setStreamName("rgb")
cam.preview.link(xout_rgb.input)
xout_nn = pipeline.createXLinkOut()
xout_nn.setStreamName("nn")
detection_nn.out.link(xout_nn.input)
# MobilenetSSD label texts
texts = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
with dai.Device(pipeline) as device, open('video.h265', 'wb') as videoFile:
device.startPipeline()
queue_size = 8
q_rgb = device.getOutputQueue("rgb", queue_size)
q_nn = device.getOutputQueue("nn", queue_size)
q_rgb_enc = device.getOutputQueue('h265', maxSize=30, blocking=True)
frame = None
bboxes = []
labels = []
confidences = []
def frame_norm(frame, bbox):
norm_vals = np.full(len(bbox), frame.shape[0])
norm_vals[::2] = frame.shape[1]
return (np.clip(np.array(bbox), 0, 1) * norm_vals).astype(int)
while True:
in_rgb = q_rgb.tryGet()
in_nn = q_nn.tryGet()
while q_rgb_enc.has():
q_rgb_enc.get().getData().tofile(videoFile)
if in_rgb is not None:
# if the data from the rgb camera is available, transform the 1D data into a HxWxC frame
shape = (3, in_rgb.getHeight(), in_rgb.getWidth())
frame = in_rgb.getData().reshape(shape).transpose(1, 2, 0).astype(np.uint8)
frame = np.ascontiguousarray(frame)
if in_nn is not None:
bboxes = np.array(in_nn.getFirstLayerFp16())
bboxes = bboxes.reshape((bboxes.size // 7, 7))
bboxes = bboxes[bboxes[:, 2] > 0.5]
# Cut bboxes and labels
labels = bboxes[:, 1].astype(int)
confidences = bboxes[:, 2]
bboxes = bboxes[:, 3:7]
if frame is not None:
for raw_bbox, label, conf in zip(bboxes, labels, confidences):
bbox = frame_norm(frame, raw_bbox)
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2)
cv2.putText(frame, texts[label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
cv2.putText(frame, f"{int(conf * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
cv2.imshow("rgb", frame)
if cv2.waitKey(1) == ord('q'):
break
print("To view the encoded data, convert the stream file (.h265) into a video file (.mp4) using a command below:")
print("ffmpeg -framerate 30 -i video.h265 -c copy video.mp4")
|
[
"lukpila29@gmail.com"
] |
lukpila29@gmail.com
|
6660b8d4d207c68d1ac096de1ccd5270579c84c5
|
cbe790c67841f82102b54d4e7ff9d9bfbae5435d
|
/GDash/GDash-Min/GDash-Min/urls.py
|
e532f05450bc7971f1c55fab5d0759c621db5488
|
[] |
no_license
|
Lance-Gauthier-CSC/GenomeDashboard-Django
|
69fa35cd026492ca8fd0208a502a55cd7e709a85
|
0d644f83f01d04f444d65e22e0b7a2c1f8362ff0
|
refs/heads/master
| 2022-11-27T10:33:12.331828
| 2020-07-28T11:35:07
| 2020-07-28T11:35:07
| 270,015,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 888
|
py
|
"""GDash-Min URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', include('master.urls')),
path('dalliance/', include('dalliance.urls')),
path('ngl/', include('ngl.urls')),
path('admin/', admin.site.urls),
]
|
[
"lance.gauthier.csc@gmail.com"
] |
lance.gauthier.csc@gmail.com
|
6b7a698ab1b77b1c68ff89692d00593e418f7d31
|
7f62cf6037d0c6a0e79a0e197519f6404bc0d930
|
/bookBrowser.py
|
6f2ec69f728637bd7fd475914ddc0c60079da309
|
[] |
no_license
|
stpCollabr8nLstn/book-browser
|
d6d25f346320f44169442514d06d25a66270d445
|
099463df504a4df2a3f4e54bcc0d31edab134110
|
refs/heads/master
| 2021-01-09T20:18:50.344789
| 2016-06-30T03:27:57
| 2016-06-30T03:27:57
| 62,276,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,331
|
py
|
import flask
import json
app = flask.Flask(__name__)
# returns the index of an id you are searching for in a list
def get_index(general_list, key, value):
general_index = None
for i in range(len(general_list)):
if value == general_list[i][key]:
general_index = i
break
return general_index
def get_books(author_id, book_list):
authors_books = []
id_found = False
for book in book_list:
if author_id in book['authors']:
authors_books.append(book)
id_found = True
if id_found:
return authors_books
else:
return flask.abort(404)
def get_authors(author_list, full_list):
books_authors = []
for author in full_list:
if author['id'] in author_list:
books_authors.append(author)
return books_authors
with open('./data/authors.json', encoding='utf-8') as author_file:
authors = json.loads(author_file.read())
with open('./data/books.json', encoding='utf-8') as book_file:
books = json.loads(book_file.read())
num_books = 0
num_authors = 0
num_editions = 0
for book in books:
num_books += 1
for edition in book['editions']:
num_editions += 1
for author in authors:
num_authors += 1
@app.route('/')
def index():
return flask.render_template('index.html', num_authors=num_authors, num_books=num_books, num_editions=num_editions)
@app.route('/authors/')
def show_authors():
return flask.render_template('authors.html', authors=authors)
@app.route('/authors/<author_id>/')
def show_author(author_id):
# check if author id is invalid
# if author_id not in dictionary, render template
author_index = get_index(authors, 'id', author_id)
if author_index is None:
flask.abort(404)
authors_books = get_books(author_id, books)
return flask.render_template('author.html', author=authors[author_index], author_id=author_id,
authors_books=authors_books)
@app.route('/books/')
def show_books():
return flask.render_template('books.html', books=books)
@app.route('/books/<book_id>/')
def show_book(book_id):
book_index = get_index(books, 'id', book_id)
if book_index is None:
flask.abort(404)
books_authors = get_authors(books[book_index]['authors'], authors)
for edition in books[book_index]['editions']:
if 'publish_date' not in edition:
edition['publish_date'] = 'Publish Date Unavailable'
return flask.render_template('book.html', books=books, book_id=book_id, book_index=book_index,
books_authors=books_authors)
@app.route('/books/<book_id>/editions/<edition_id>')
def show_edition(book_id, edition_id):
book_index = get_index(books, 'id', book_id)
edition_index = get_index(books[book_index]['editions'], 'id', edition_id)
books_authors = get_authors(books[book_index]['editions'][edition_index]['authors'], authors)
return flask.render_template('edition.html', books=books, book_id=book_id, book_index=book_index,
books_authors=books_authors, edition_index=edition_index)
@app.errorhandler(404)
def not_found(err):
return flask.render_template('404.html', path=flask.request.path), 404
if __name__ == '__main__':
app.run(debug=True)
|
[
"amrios@us.ibm.com"
] |
amrios@us.ibm.com
|
15fc22e8fd23bf75543afca8ce167e6017251fa0
|
fb1e852da0a026fb59c8cb24aeb40e62005501f1
|
/decoding/GAD/fairseq/dataclass/constants.py
|
93bc6d03cb81618c47a58009dc22f7953a106eb3
|
[
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"MIT"
] |
permissive
|
microsoft/unilm
|
134aa44867c5ed36222220d3f4fd9616d02db573
|
b60c741f746877293bb85eed6806736fc8fa0ffd
|
refs/heads/master
| 2023-08-31T04:09:05.779071
| 2023-08-29T14:07:57
| 2023-08-29T14:07:57
| 198,350,484
| 15,313
| 2,192
|
MIT
| 2023-08-19T11:33:20
| 2019-07-23T04:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,626
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum, EnumMeta
from typing import List
class StrEnumMeta(EnumMeta):
# this is workaround for submitit pickling leading to instance checks failing in hydra for StrEnum, see
# https://github.com/facebookresearch/hydra/issues/1156
@classmethod
def __instancecheck__(cls, other):
return "enum" in str(type(other))
class StrEnum(Enum, metaclass=StrEnumMeta):
def __str__(self):
return self.value
def __eq__(self, other: str):
return self.value == other
def __repr__(self):
return self.value
def __hash__(self):
return hash(str(self))
def ChoiceEnum(choices: List[str]):
"""return the Enum class used to enforce list of choices"""
return StrEnum("Choices", {k: k for k in choices})
LOG_FORMAT_CHOICES = ChoiceEnum(["json", "none", "simple", "tqdm"])
DDP_BACKEND_CHOICES = ChoiceEnum([
"c10d", # alias for pytorch_ddp
"legacy_ddp",
"no_c10d", # alias for legacy_ddp
"pytorch_ddp",
"slow_mo",
])
DATASET_IMPL_CHOICES = ChoiceEnum(["raw", "lazy", "cached", "mmap", "fasta"])
GENERATION_CONSTRAINTS_CHOICES = ChoiceEnum(["ordered", "unordered"])
GENERATION_DECODING_FORMAT_CHOICES = ChoiceEnum(
["unigram", "ensemble", "vote", "dp", "bs"]
)
ZERO_SHARDING_CHOICES = ChoiceEnum(["none", "os"])
PIPELINE_CHECKPOINT_CHOICES = ChoiceEnum(["always", "never", "except_last"])
PRINT_ALIGNMENT_CHOICES = ChoiceEnum(["hard", "soft"])
|
[
"tage@microsoft.com"
] |
tage@microsoft.com
|
4b79a3eb38897d261f00134058fddc49fa3acc84
|
f8de6503a34902e5f12d1a0ea22be54c342fbc9c
|
/translator/__init__.py
|
ebea33dce6b355697575aa5d6ceb801be7d612e4
|
[] |
no_license
|
ethanhjennings/didnt-even-know-it
|
2e6b14ad598a8e5f66584e7172cc53c0aa0c551e
|
207b737597966b77409f58b13be3a1bf2bef7ee4
|
refs/heads/master
| 2021-01-20T12:44:48.875354
| 2013-09-24T01:27:21
| 2013-09-24T01:27:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
from translator.translate import translatePoem
from translator.exceptions import TranslatorSyntaxError
|
[
"ethan@cs.utexas.edu"
] |
ethan@cs.utexas.edu
|
048eb7259bc442fe98c5f467df45c137bb3725cd
|
4fb8939db0fac64cca6e8414b75353cc6f89b89c
|
/venv/Lib/site-packages/jedi/evaluate/compiled/subprocess/__init__.py
|
9cc8704a457e1919212c066ec733eb538301e8b1
|
[
"MIT"
] |
permissive
|
BDubon/Wander
|
2c303e09524d84c455171dfe2926e15f87b3bd3c
|
12c7e5a9c18b0445a01a61fa1e05f2681bf884df
|
refs/heads/master
| 2022-12-11T20:30:40.468109
| 2019-11-15T03:18:17
| 2019-11-15T03:18:17
| 131,355,054
| 1
| 0
|
MIT
| 2022-11-22T04:32:15
| 2018-04-27T23:59:07
|
Python
|
UTF-8
|
Python
| false
| false
| 11,703
|
py
|
"""
Makes it possible to do the compiled analysis in a subprocess. This has two
goals:
1. Making it safer - Segfaults and RuntimeErrors as well as stdout/stderr can
be ignored and dealt with.
2. Make it possible to handle different Python versions as well as virtualenvs.
"""
import os
import sys
import subprocess
import socket
import errno
import weakref
import traceback
from functools import partial
from jedi._compatibility import queue, is_py3, force_unicode, \
pickle_dump, pickle_load, GeneralizedPopen
from jedi.cache import memoize_method
from jedi.evaluate.compiled.subprocess import functions
from jedi.evaluate.compiled.access import DirectObjectAccess, AccessPath, \
SignatureParam
from jedi.api.exceptions import InternalError
_subprocesses = {}
_MAIN_PATH = os.path.join(os.path.dirname(__file__), '__main__.py')
def get_subprocess(executable):
try:
return _subprocesses[executable]
except KeyError:
sub = _subprocesses[executable] = _CompiledSubprocess(executable)
return sub
def _get_function(name):
return getattr(functions, name)
class _EvaluatorProcess(object):
def __init__(self, evaluator):
self._evaluator_weakref = weakref.ref(evaluator)
self._evaluator_id = id(evaluator)
self._handles = {}
def get_or_create_access_handle(self, obj):
id_ = id(obj)
try:
return self.get_access_handle(id_)
except KeyError:
access = DirectObjectAccess(self._evaluator_weakref(), obj)
handle = AccessHandle(self, access, id_)
self.set_access_handle(handle)
return handle
def get_access_handle(self, id_):
return self._handles[id_]
def set_access_handle(self, handle):
self._handles[handle.id] = handle
class EvaluatorSameProcess(_EvaluatorProcess):
"""
Basically just an easy access to functions.py. It has the same API
as EvaluatorSubprocess and does the same thing without using a subprocess.
This is necessary for the Interpreter process.
"""
def __getattr__(self, name):
return partial(_get_function(name), self._evaluator_weakref())
class EvaluatorSubprocess(_EvaluatorProcess):
def __init__(self, evaluator, compiled_subprocess):
super(EvaluatorSubprocess, self).__init__(evaluator)
self._used = False
self._compiled_subprocess = compiled_subprocess
def __getattr__(self, name):
func = _get_function(name)
def wrapper(*args, **kwargs):
self._used = True
result = self._compiled_subprocess.run(
self._evaluator_weakref(),
func,
args=args,
kwargs=kwargs,
)
# IMO it should be possible to create a hook in pickle.load to
# mess with the loaded objects. However it's extremely complicated
# to work around this so just do it with this call. ~ dave
return self._convert_access_handles(result)
return wrapper
def _convert_access_handles(self, obj):
if isinstance(obj, SignatureParam):
return SignatureParam(*self._convert_access_handles(tuple(obj)))
elif isinstance(obj, tuple):
return tuple(self._convert_access_handles(o) for o in obj)
elif isinstance(obj, list):
return [self._convert_access_handles(o) for o in obj]
elif isinstance(obj, AccessHandle):
try:
# Rewrite the access handle to one we're already having.
obj = self.get_access_handle(obj.id)
except KeyError:
obj.add_subprocess(self)
self.set_access_handle(obj)
elif isinstance(obj, AccessPath):
return AccessPath(self._convert_access_handles(obj.accesses))
return obj
def __del__(self):
if self._used:
self._compiled_subprocess.delete_evaluator(self._evaluator_id)
class _CompiledSubprocess(object):
_crashed = False
def __init__(self, executable):
self._executable = executable
self._evaluator_deletion_queue = queue.deque()
@property
@memoize_method
def _process(self):
parso_path = sys.modules['parso'].__file__
args = (
self._executable,
_MAIN_PATH,
os.path.dirname(os.path.dirname(parso_path))
)
return GeneralizedPopen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
def run(self, evaluator, function, args=(), kwargs={}):
# Delete old evaluators.
while True:
try:
evaluator_id = self._evaluator_deletion_queue.pop()
except IndexError:
break
else:
self._send(evaluator_id, None)
assert callable(function)
return self._send(id(evaluator), function, args, kwargs)
def get_sys_path(self):
return self._send(None, functions.get_sys_path, (), {})
def kill(self):
self._crashed = True
try:
subprocess = _subprocesses[self._executable]
except KeyError:
# Fine it was already removed from the cache.
pass
else:
# In the `!=` case there is already a new subprocess in place
# and we don't need to do anything here anymore.
if subprocess == self:
del _subprocesses[self._executable]
self._process.kill()
self._process.wait()
def _send(self, evaluator_id, function, args=(), kwargs={}):
if self._crashed:
raise InternalError("The subprocess %s has crashed." % self._executable)
if not is_py3:
# Python 2 compatibility
kwargs = {force_unicode(key): value for key, value in kwargs.items()}
data = evaluator_id, function, args, kwargs
try:
pickle_dump(data, self._process.stdin)
except (socket.error, IOError) as e:
# Once Python2 will be removed we can just use `BrokenPipeError`.
# Also, somehow in windows it returns EINVAL instead of EPIPE if
# the subprocess dies.
if e.errno not in (errno.EPIPE, errno.EINVAL):
# Not a broken pipe
raise
self.kill()
raise InternalError("The subprocess %s was killed. Maybe out of memory?"
% self._executable)
try:
is_exception, traceback, result = pickle_load(self._process.stdout)
except EOFError:
self.kill()
raise InternalError("The subprocess %s has crashed." % self._executable)
if is_exception:
# Replace the attribute error message with a the traceback. It's
# way more informative.
result.args = (traceback,)
raise result
return result
def delete_evaluator(self, evaluator_id):
"""
Currently we are not deleting evalutors instantly. They only get
deleted once the subprocess is used again. It would probably a better
solution to move all of this into a thread. However, the memory usage
of a single evaluator shouldn't be that high.
"""
# With an argument - the evaluator gets deleted.
self._evaluator_deletion_queue.append(evaluator_id)
class Listener(object):
def __init__(self):
self._evaluators = {}
# TODO refactor so we don't need to process anymore just handle
# controlling.
self._process = _EvaluatorProcess(Listener)
def _get_evaluator(self, function, evaluator_id):
from jedi.evaluate import Evaluator
try:
evaluator = self._evaluators[evaluator_id]
except KeyError:
from jedi.api.environment import InterpreterEnvironment
evaluator = Evaluator(
# The project is not actually needed. Nothing should need to
# access it.
project=None,
environment=InterpreterEnvironment()
)
self._evaluators[evaluator_id] = evaluator
return evaluator
def _run(self, evaluator_id, function, args, kwargs):
if evaluator_id is None:
return function(*args, **kwargs)
elif function is None:
del self._evaluators[evaluator_id]
else:
evaluator = self._get_evaluator(function, evaluator_id)
# Exchange all handles
args = list(args)
for i, arg in enumerate(args):
if isinstance(arg, AccessHandle):
args[i] = evaluator.compiled_subprocess.get_access_handle(arg.id)
for key, value in kwargs.items():
if isinstance(value, AccessHandle):
kwargs[key] = evaluator.compiled_subprocess.get_access_handle(value.id)
return function(evaluator, *args, **kwargs)
def listen(self):
stdout = sys.stdout
# Mute stdout/stderr. Nobody should actually be able to write to those,
# because stdout is used for IPC and stderr will just be annoying if it
# leaks (on module imports).
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
stdin = sys.stdin
if sys.version_info[0] > 2:
stdout = stdout.buffer
stdin = stdin.buffer
while True:
try:
payload = pickle_load(stdin)
except EOFError:
# It looks like the parent process closed. Don't make a big fuss
# here and just exit.
exit(1)
try:
result = False, None, self._run(*payload)
except Exception as e:
result = True, traceback.format_exc(), e
pickle_dump(result, file=stdout)
class AccessHandle(object):
def __init__(self, subprocess, access, id_):
self.access = access
self._subprocess = subprocess
self.id = id_
def add_subprocess(self, subprocess):
self._subprocess = subprocess
def __repr__(self):
try:
detail = self.access
except AttributeError:
detail = '#' + str(self.id)
return '<%s of %s>' % (self.__class__.__name__, detail)
def __getstate__(self):
return self.id
def __setstate__(self, state):
self.id = state
def __getattr__(self, name):
if name in ('id', 'access') or name.startswith('_'):
raise AttributeError("Something went wrong with unpickling")
#if not is_py3: print >> sys.stderr, name
#print('getattr', name, file=sys.stderr)
return partial(self._workaround, force_unicode(name))
def _workaround(self, name, *args, **kwargs):
"""
TODO Currently we're passing slice objects around. This should not
happen. They are also the only unhashable objects that we're passing
around.
"""
if args and isinstance(args[0], slice):
return self._subprocess.get_compiled_method_return(self.id, name, *args, **kwargs)
return self._cached_results(name, *args, **kwargs)
@memoize_method
def _cached_results(self, name, *args, **kwargs):
#if type(self._subprocess) == EvaluatorSubprocess:
#print(name, args, kwargs,
#self._subprocess.get_compiled_method_return(self.id, name, *args, **kwargs)
#)
return self._subprocess.get_compiled_method_return(self.id, name, *args, **kwargs)
|
[
"33406715+BDubon@users.noreply.github.com"
] |
33406715+BDubon@users.noreply.github.com
|
a29864449dba920011f6794ab0dfac0a7a45a45b
|
d5b95e229c5c21ff3c25e828838aed1dc5ca9c1c
|
/prueba.py
|
13bc402c7d03354e6f93fd3460b7881ddc0942d7
|
[] |
no_license
|
jmvazz/ds_desafio1
|
d3c89dbd82cace3eed65976ee0909a89bd1aac33
|
b16f0898f367f6e4e9e828a56d8e901ae32b7db6
|
refs/heads/master
| 2020-03-26T17:01:52.841600
| 2018-09-05T20:13:57
| 2018-09-05T20:13:57
| 145,138,172
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20
|
py
|
def prueba():
pass
|
[
"jm.vazzano@gmail.com"
] |
jm.vazzano@gmail.com
|
3e6ea47accbdc339c75b3939b92eac58902b0157
|
203243793e32405778c18d27a32088c806da8ee1
|
/DataStructuresAndAlgorithms/DataStructures/ArrayQueue.py
|
ec38702b77cf625219811099c8312d9e536bc7f9
|
[] |
no_license
|
jonnysassoon/Projects
|
bf9ff35f71b5583d9377deb41a6aa485eec313ac
|
3a96ee7800dbdf1c08cdb6c4e5534c4db019ee4a
|
refs/heads/master
| 2021-10-15T23:14:38.053566
| 2019-02-06T16:14:53
| 2019-02-06T16:14:53
| 109,350,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,301
|
py
|
"""
Author: Jonny Sassoon
Program: Queue
Implementation of FIFO Data Structure
"""
class Empty(Exception):
pass
class ArrayQueue:
INITIAL_CAPACITY = 10
def __init__(self):
self.data = [None] * ArrayQueue.INITIAL_CAPACITY
self.num_of_elems = 0
self.front_ind = 0
def __len__(self):
return self.num_of_elems
def is_empty(self):
return (self.num_of_elems == 0)
def enqueue(self, elem):
if (self.num_of_elems == len(self.data)):
self.resize(2 * len(self.data))
back_ind = (self.front_ind + self.num_of_elems) % len(self.data)
self.data[back_ind] = elem
self.num_of_elems += 1
def dequeue(self):
if (self.is_empty()):
raise Empty("Queue is empty")
value = self.data[self.front_ind]
self.data[self.front_ind] = None
self.front_ind = (self.front_ind + 1) % len(self.data)
self.num_of_elems -= 1
if(self.num_of_elems < len(self.data) // 4):
self.resize(len(self.data) // 2)
return value
def first(self):
if self.is_empty():
raise Empty("Queue is empty")
return self.data[self.front_ind]
def resize(self, new_cap):
old_data = self.data
self.data = [None] * new_cap
old_ind = self.front_ind
for new_ind in range(self.num_of_elems):
self.data[new_ind] = old_data[old_ind]
old_ind = (old_ind + 1) % len(old_data)
self.front_ind = 0
|
[
"jonny.sassoon@nyu.edu"
] |
jonny.sassoon@nyu.edu
|
8b336856ca278bdb4d36904fd1587cee0e315585
|
da6e23ae4623a4c975f37257ab8a22e0bdf0e67e
|
/File-Encrypt/file-encrypt.py
|
c147cd3b3076c4fa7bd467bc153e87c3e31d15c1
|
[] |
no_license
|
unbin/Python
|
e7946d31774fed6bc88618e70be3cac58c650261
|
4e73c0ae1bb1ec934831eaff8efabb79b64adb84
|
refs/heads/master
| 2020-08-03T10:45:43.171974
| 2020-04-04T01:40:29
| 2020-04-04T01:40:29
| 211,724,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,189
|
py
|
# File Encrypt.py
# ===============
# XOR Encrypts a file with
# the supplied key.
#
from sys import argv, exit
import sys
import os
DEBUG = True
# DEF
def usage():
print("Usage: " + argv[0] + " <File Path> <Key>")
exit(1)
# Read blocks from file_in, xor encrypt, and write to file_out
def encrypt(file_in, file_out):
file_out.write("Encryption Not Implemented Yet!\n".encode("utf-8"))
# END DEF
# MAIN
# Check arguments
if (len(argv) != 3):
usage()
# Check if file exists
if not os.path.exists(argv[1]):
print("Error: Path file name not found!", file=sys.stderr)
exit(1)
if not os.path.isfile(argv[1]):
print("Error: File must be a normal file.", file=sys.stderr)
exit(1)
file_size = os.path.getsize(argv[1])
if DEBUG:
print("[DEBUG] File Size: {} Bytes".format(file_size))
with open (argv[1], "rb") as file_in:
if DEBUG:
print("[DEBUG] File " + file_in.name + " Opened.")
with open(argv[1] + ".encrypted", 'wb') as file_out:
if DEBUG:
print("[DEBUG] File " + file_out.name + " Opened.")
encrypt(file_in, file_out)
file_out.close()
file_in.close()
# END MAIN
|
[
"unbin1234@gmail.com"
] |
unbin1234@gmail.com
|
30d076a33b413db6d98a89853257711172247372
|
60f067710243089ea5a09c676f8092232904ed40
|
/ltp/task_segmention.py
|
bfd04d9af9f685de08e23778fb8c48e4e00e5b95
|
[] |
no_license
|
liyang-2401/ltp
|
cfc5386fe9cebc78f828431b1c04d8288d450678
|
5d26093f2e2bbec76a892dd25e206d9e7dacc13e
|
refs/heads/master
| 2023-01-22T14:43:16.871839
| 2020-12-04T08:00:23
| 2020-12-04T08:00:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,058
|
py
|
import types
import numpy
import torch
import torch.utils.data
import os
from tqdm import tqdm
from argparse import ArgumentParser
from ltp.data import dataset as datasets
from ltp import optimization
from ltp.data.utils import collate
from seqeval.metrics import f1_score
from ltp.transformer_linear import TransformerLinear as Model
import pytorch_lightning as pl
from pytorch_lightning import Trainer
from transformers import AutoTokenizer
from ltp.utils import TaskInfo, common_train, map2device, convert2npy
os.environ['TOKENIZERS_PARALLELISM'] = 'true'
task_info = TaskInfo(task_name='seg', metric_name='f1')
# CUDA_VISIBLE_DEVICES=0 PYTHONPATH=. python ltp/task_segmention.py --data_dir=data/seg --num_labels=2 --max_epochs=10 --batch_size=16 --gpus=1 --precision=16 --auto_lr_find=lr
def build_dataset(model, data_dir):
dataset = datasets.load_dataset(
datasets.Conllu,
data_dir=data_dir,
cache_dir=data_dir
)
dataset.remove_columns_(["id", "lemma", "upos", "xpos", "feats", "head", "deprel", "deps", "misc"])
tokenizer = AutoTokenizer.from_pretrained(model.hparams.transformer, use_fast=True)
# {'B':1, 'I':0}
def tokenize(examples):
res = tokenizer(
examples['form'],
is_split_into_words=True,
max_length=model.transformer.config.max_position_embeddings,
truncation=True
)
labels = []
for encoding in res.encodings:
labels.append([])
last_word_idx = -1
for word_idx in encoding.words[1:-1]:
labels[-1].append(int(word_idx != last_word_idx))
last_word_idx = word_idx
res['labels'] = labels
return res
dataset = dataset.map(
lambda examples: tokenize(examples), batched=True,
cache_file_names={
k: d._get_cache_file_path(f"{task_info.task_name}-{k}-tokenized") for k, d in dataset.items()
}
)
dataset.set_format(type='torch', columns=['input_ids', 'token_type_ids', 'attention_mask', 'labels'])
dataset.shuffle(
indices_cache_file_names={
k: d._get_cache_file_path(f"{task_info.task_name}-{k}-shuffled-index-{model.hparams.seed}") for k, d in
dataset.items()
}
)
return dataset, f1_score
def validation_method(metric, loss_tag='val_loss', metric_tag=f'val_{task_info.metric_name}', log=True):
label_mapper = ['I-W', 'B-W']
def step(self: pl.LightningModule, batch, batch_nb):
loss, logits = self(**batch)
mask = batch['attention_mask'][:, 2:] != 1
# acc
labels = batch['labels']
preds = torch.argmax(logits, dim=-1)
labels[mask] = -1
preds[mask] = -1
labels = [[label_mapper[word] for word in sent if word != -1] for sent in labels.detach().cpu().numpy()]
preds = [[label_mapper[word] for word in sent if word != -1] for sent in preds.detach().cpu().numpy()]
return {'loss': loss.item(), 'pred': preds, 'labels': labels}
def epoch_end(self: pl.LightningModule, outputs):
if isinstance(outputs, dict):
outputs = [outputs]
length = len(outputs)
loss = sum([output['loss'] for output in outputs]) / length
preds = sum([output['pred'] for output in outputs], [])
labels = sum([output['labels'] for output in outputs], [])
f1 = metric(preds, labels)
if log:
self.log_dict(
dictionary={loss_tag: loss, metric_tag: f1},
on_step=False, on_epoch=True, prog_bar=True, logger=True
)
else:
return f1
return step, epoch_end
def build_method(model):
dataset, metric = build_dataset(model, model.hparams.data_dir)
def train_dataloader(self):
res = torch.utils.data.DataLoader(
dataset[datasets.Split.TRAIN],
batch_size=self.hparams.batch_size,
collate_fn=collate,
num_workers=self.hparams.num_workers,
pin_memory=True
)
return res
def training_step(self, batch, batch_nb):
loss, logits = self(**batch)
self.log("loss", loss.item())
return loss
def val_dataloader(self):
return torch.utils.data.DataLoader(
dataset[datasets.Split.VALIDATION],
batch_size=self.hparams.batch_size,
collate_fn=collate,
num_workers=self.hparams.num_workers,
pin_memory=True
)
def test_dataloader(self):
return torch.utils.data.DataLoader(
dataset[datasets.Split.TEST],
batch_size=self.hparams.batch_size,
collate_fn=collate,
num_workers=self.hparams.num_workers,
pin_memory=True
)
# AdamW + LR scheduler
def configure_optimizers(self: Model):
num_epoch_steps = (len(dataset[datasets.Split.TRAIN]) + self.hparams.batch_size - 1) // self.hparams.batch_size
num_train_steps = num_epoch_steps * self.hparams.max_epochs
optimizer, scheduler = optimization.create_optimizer(
self,
lr=self.hparams.lr,
num_train_steps=num_train_steps,
weight_decay=self.hparams.weight_decay,
warmup_steps=self.hparams.warmup_steps,
warmup_proportion=self.hparams.warmup_proportion,
layerwise_lr_decay_power=self.hparams.layerwise_lr_decay_power,
n_transformer_layers=self.transformer.config.num_hidden_layers,
lr_scheduler=optimization.get_polynomial_decay_schedule_with_warmup,
lr_scheduler_kwargs={
'lr_end': self.hparams.lr_end,
'power': self.hparams.lr_decay_power
}
)
return [optimizer], [{'scheduler': scheduler, 'interval': 'step'}]
model.configure_optimizers = types.MethodType(configure_optimizers, model)
model.train_dataloader = types.MethodType(train_dataloader, model)
model.training_step = types.MethodType(training_step, model)
# model.training_epoch_end = types.MethodType(training_epoch_end, model)
validation_step, validation_epoch_end = validation_method(
metric, loss_tag='val_loss', metric_tag=f'val_{task_info.metric_name}'
)
model.val_dataloader = types.MethodType(val_dataloader, model)
model.validation_step = types.MethodType(validation_step, model)
model.validation_epoch_end = types.MethodType(validation_epoch_end, model)
test_step, test_epoch_end = validation_method(
metric, loss_tag='test_loss', metric_tag=f'test_{task_info.metric_name}'
)
model.test_dataloader = types.MethodType(test_dataloader, model)
model.test_step = types.MethodType(test_step, model)
model.test_epoch_end = types.MethodType(test_epoch_end, model)
def add_task_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--seed', type=int, default=19980524)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--data_dir', type=str, required=True)
parser.add_argument('--build_dataset', action='store_true')
return parser
def build_distill_dataset(args):
model = Model.load_from_checkpoint(
args.resume_from_checkpoint, hparams=args
)
model.eval()
model.freeze()
dataset, metric = build_dataset(model, args.data_dir)
train_dataloader = torch.utils.data.DataLoader(
dataset[datasets.Split.TRAIN],
batch_size=args.batch_size,
collate_fn=collate,
num_workers=args.num_workers
)
output = os.path.join(args.data_dir, 'output.pt')
if torch.cuda.is_available():
model.cuda()
map2cpu = lambda x: map2device(x)
map2cuda = lambda x: map2device(x, model.device)
else:
map2cpu = lambda x: x
map2cuda = lambda x: x
with torch.no_grad():
batchs = []
for batch in tqdm(train_dataloader):
batch = map2cuda(batch)
loss, logits = model(**batch)
batch.update(logits=logits)
batchs.append(map2cpu(batch))
numpy.savez(output, data=convert2npy(batchs))
print("Done")
def main():
parser = ArgumentParser()
# add task level args
parser = add_task_specific_args(parser)
# add model specific args
parser = Model.add_model_specific_args(parser)
parser = optimization.add_optimizer_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# set task specific args
parser.set_defaults(num_labels=2)
args = parser.parse_args()
if args.build_dataset:
build_distill_dataset(args)
else:
common_train(
args,
metric=f'val_{task_info.metric_name}',
model_class=Model,
build_method=build_method,
task=task_info.task_name
)
if __name__ == '__main__':
main()
|
[
"ylfeng@ir.hit.edu.cn"
] |
ylfeng@ir.hit.edu.cn
|
6878d0f840206e6156c0f635965e8608c7f7bd8e
|
4ddb1cb60794f75b7f72074fee6002f4f7367043
|
/day22.py
|
e682b2491ffcafe21b85d17927c4bd51a18b548b
|
[] |
no_license
|
shamayn/aoc2020
|
25032c84843e5ccb4472bb762ea88ab91b04f249
|
3a81253f0825180615d64dd6dae57a8a1ca9d28c
|
refs/heads/main
| 2023-02-15T08:54:59.214285
| 2021-01-18T02:39:40
| 2021-01-18T02:39:40
| 330,533,882
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,613
|
py
|
from collections import deque
TEST_INPUT = [
"Player 1:",
"9",
"2",
"6",
"3",
"1",
"Player 2:",
"5",
"8",
"4",
"7",
"10",
]
TEST_INFINITE_INPUT = [
"Player 1:",
"43",
"19",
"Player 2:",
"2",
"29",
"14",
]
def playCombat(input):
playerdecks = parseDecks(input)
while len(playerdecks[1]) > 0 and len(playerdecks[2]) > 0:
cards = [playerdecks[1].popleft(), playerdecks[2].popleft()]
print("pop", cards)
winindex = cards.index(max(cards)) + 1
playerdecks[winindex].append(max(cards))
playerdecks[winindex].append(min(cards))
print(1, list(playerdecks[1]))
print(2, list(playerdecks[2]))
wincards = list(playerdecks[winindex])
# 0 -> len
# n-1 -> 1
score = getScore(wincards)
print("SCORE", score)
return score
def getScore(cards):
return sum([(len(cards) - i) * cards[i] for i, x in enumerate(cards)])
def parseDecks(input):
playerid = 0
playerdeck0 = []
playerdeck1 = []
for line in input:
if line.strip() == "": continue
if line.startswith("Player"):
playerid = int(line[7])
else:
if playerid == 1:
playerdeck0.append(int(line.strip()))
elif playerid == 2:
playerdeck1.append(int(line.strip()))
return (playerdeck0, playerdeck1)
#return playerdecks
# if both players have at least as many cards in their own decks as the number on the card
# they just dealt, the winner of the round is
# determined by recursing into a sub-game of Recursive Combat.
def playRecursiveCombat(input):
(deck0, deck1) = parseDecks(input)
(winner, score) = doPlayRC(deck0, deck1, 1)
return score
def doPlayRC(deck0, deck1, gameid):
winindex = -1
score = 0
past_rounds_0 = []
past_rounds_1 = []
print("Playing Game", gameid)
round = 1
while len(deck0) > 0 and len(deck1) > 0:
print("Begin round", round)
print(0, deck0)
print(1, deck1)
if deck0 in past_rounds_0 and deck1 in past_rounds_1 and \
past_rounds_0.index(deck0) == past_rounds_1.index(deck1):
winindex = 0
windeck = deck0
score = getScore(deck0)
print("The winner is player 0 by default, score", score)
print("pastrounds")
print(past_rounds_0)
print(past_rounds_1)
return (winindex, score)
past_rounds_0.append(deck0)
past_rounds_1.append(deck1)
cards = [deck0[0], deck1[0]]
deck0 = deck0[1:]
deck1 = deck1[1:]
print("pop", cards)
if len(deck0) >= cards[0] and len(deck1) >= cards[1]:
# move to subgame
newdeck1 = deck0[0:cards[0]]
newdeck2 = deck1[0:cards[1]]
print("starting subgame with", newdeck1, newdeck2)
(winindex, score) = doPlayRC(newdeck1, newdeck2, gameid+1)
else:
winindex = cards.index(max(cards))
if winindex == 1:
deck1.append(cards[1])
deck1.append(cards[0])
score = getScore(deck1)
elif winindex == 0:
deck0.append(cards[0])
deck0.append(cards[1])
score = getScore(deck0)
round += 1
print("Winner of this round, player", winindex)
# print(0, playerdecks[0])
# print(1, playerdecks[1])
print("The winner of game", gameid, "is Player", winindex, "Score", score)
return (winindex, score)
def testPlayCombat():
result = 306
if playCombat(TEST_INPUT) == result:
print("testPlayCombat Pass")
else:
print("testPlayCombat Fail")
def testPlayRecursiveCombat():
result = 291
if playRecursiveCombat(TEST_INPUT) == result:
print("testPlayRecursiveCombat Pass")
else:
print("testPlayRecursiveCombat Fail")
#playRecursiveCombat(TEST_INFINITE_INPUT);
def main():
#testPlayCombat()
# testPlayRecursiveCombat()
f = open('data/day22_input.txt', 'r')
lines = f.readlines()
#playCombat(lines)
playRecursiveCombat(lines)
if __name__ == '__main__':
main()
|
[
"shamayn@gmail.com"
] |
shamayn@gmail.com
|
226980fdf20772f3a2d26e3b993584790ded886b
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/299/100769/submittedfiles/testes.py
|
b90b88a3a2fbbabb9a6af0cc8e965ec6c94201cb
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,871
|
py
|
from minha_bib import verificar_vitoria
from minha_bib import sorteio
from minha_bib import sorteio2
from minha_bib import maquinainteligente
import time
c=0
tabuleiro=[[1,2,3],[1,2,3],[1,2,3]]
for i in range(0,3,1):
for j in range(0,3,1):
tabuleiro[i][j]=" "
print('---------------------------------------')
print('JOGO DA VELHA')
print('Olá\nSeja Bem Vindo ao jogo da velha!')
#JOGO ENTRE DUAS PESSOAS
nome1=str(input('Qual seu nome(ou apelido)? '))
'''nome2=str(input('Qual o nome do segundo jogador? '))'''
s1=str(input('Qual símbolo você deseja utilizar,'+nome1+'?[X/O]'))
if s1=='X':
s2='O'
'''print('Ok, vamos começar,'+nome2+' ficará com "O"')'''
else:
s2='X'
'''print('Ok, vamos começar,'+nome2+'ficará com "X"')'''
print('Esse é o nosso tabuleiro \n',tabuleiro[0][0],'|',tabuleiro[0][1],'|',tabuleiro[0][2],'\n',tabuleiro[1][0],'|',tabuleiro[1][1],'|',tabuleiro[1][2],'\n',tabuleiro[2][0],'|',tabuleiro[2][1],'|',tabuleiro[2][2])
print('Você vai me informar a casa que quer jogar com números.\n E cada um desses números representa as seguintes casas:')
print('00 | 01 | 02\n10 | 11 | 12\n20 | 21 | 22')
print('E aí eu vou lá e substituo a casa pelo seu símbolo, por exemplo:\nO você me informa a seguinte jogada: 22')
print('Eu vou lá e...')
print('',tabuleiro[0][0],'|',tabuleiro[0][1],'|',tabuleiro[0][2],'\n',tabuleiro[1][0],'|',tabuleiro[1][1],'|',tabuleiro[1][2],'\n',tabuleiro[2][0],'|',tabuleiro[2][1],'|',s2)
print('----------------------------------------------')
#COMEÇO DO JOGO
inicio=sorteio(0,1)
if inicio==0:
inicio=str('Usuário')
else:
inicio=str('Máquina')
print('O vencedor do sorteio para incio foi '+inicio)
if inicio=='Usuário':
print('Então você começa')
k=0
while k<10:
k+=1
if k%2!=0:
jogada=str(input('Qual a sua jogada '+nome1+'?'))
i=jogada[0]
j=jogada[1]
i=int(i)
j=int(j)
while tabuleiro[i][j]!=" ":
print('Jogada inválida')
jogada=str(input('Qual a sua jogada?'))
i=jogada[0]
j=jogada[1]
i=int(i)
j=int(j)
tabuleiro[i][j]=s1
print('',tabuleiro[0][0],'|',tabuleiro[0][1],'|',tabuleiro[0][2],'\n',tabuleiro[1][0],'|',tabuleiro[1][1],'|',tabuleiro[1][2],'\n',tabuleiro[2][0],'|',tabuleiro[2][1],'|',tabuleiro[2][2])
if verificar_vitoria(tabuleiro)==True:
print('PARABÉNS,VOCÊ VENCEU')
break
elif k%2==0:
print('Minha vez')
time.sleep(1)
x=str(maquinainteligente(tabuleiro))
i=int(x[0])
j=int(x[1])
while tabuleiro[i][j]!=' ':
i=int(sorteio2(0,2))
j=int(sorteio2(0,2))
tabuleiro[i][j]
tabuleiro[i][j]=s2
print('',tabuleiro[0][0],'|',tabuleiro[0][1],'|',tabuleiro[0][2],'\n',tabuleiro[1][0],'|',tabuleiro[1][1],'|',tabuleiro[1][2],'\n',tabuleiro[2][0],'|',tabuleiro[2][1],'|',tabuleiro[2][2])
if verificar_vitoria(tabuleiro)==True:
print('Ahh, não foi dessa vez')
break
elif inicio=='Máquina':
print('Então eu começo')
for k in range(1,10,1):
if k%2!=0:
print('Minha vez')
time.sleep(1)
x=str(maquinainteligente(tabuleiro))
i=int(x[0])
j=int(x[1])
while tabuleiro[i][j]!=' ':
i=int(sorteio2(0,2))
j=int(sorteio2(0,2))
tabuleiro[i][j]
tabuleiro[i][j]=s2
print('',tabuleiro[0][0],'|',tabuleiro[0][1],'|',tabuleiro[0][2],'\n',tabuleiro[1][0],'|',tabuleiro[1][1],'|',tabuleiro[1][2],'\n',tabuleiro[2][0],'|',tabuleiro[2][1],'|',tabuleiro[2][2])
if verificar_vitoria(tabuleiro)==True:
print('Ahh, não foi dessa vez')
break
elif k%2==0:
jogada=str(input('Qual a sua jogada '+nome1+'?'))
i=jogada[0]
j=jogada[1]
i=int(i)
j=int(j)
while tabuleiro[i][j]!=" ":
print('Jogada inválida')
jogada=str(input('Qual a sua jogada?'))
i=jogada[0]
j=jogada[1]
i=int(i)
j=int(j)
tabuleiro[i][j]=s1
print('',tabuleiro[0][0],'|',tabuleiro[0][1],'|',tabuleiro[0][2],'\n',tabuleiro[1][0],'|',tabuleiro[1][1],'|',tabuleiro[1][2],'\n',tabuleiro[2][0],'|',tabuleiro[2][1],'|',tabuleiro[2][2])
if verificar_vitoria(tabuleiro)==True:
print('PARABÉNS,VOCÊ VENCEU')
break
elif k==9 and verificar_vitoria(tabuleiro)==False:
print('ihhhh, Deu velha')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
cf36801b7f70c2544b64f279314e448ea314e413
|
231684cab7d5254a1c41b797768989757d6f7359
|
/0x06-python-classes/4-square.py
|
d615b4952f77703977f1cd9a7ae017bf3d843a46
|
[] |
no_license
|
gavazcal/holbertonschool-higher_level_programming
|
c937b224177f0101bcfcc0ee9183c782772ebfe9
|
a7ad9de29acf4c0cc837eaf4b8ab753f3a779fdb
|
refs/heads/master
| 2023-08-10T19:43:06.215169
| 2021-09-22T21:28:58
| 2021-09-22T21:28:58
| 319,359,143
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 639
|
py
|
#!/usr/bin/python3
"""defines a square class"""
class Square:
"""square class"""
def __init__(self, size=0):
"""creates instance size"""
self.__size = size
def area(self):
"""calculates square area"""
return self.__size ** 2
@property
def size(self):
"""size getter"""
return self.__size
@size.setter
def size(self, value):
"""size setter"""
try:
self.__size = value
if value < 0:
raise ValueError("size must be >= 0")
except TypeError:
raise TypeError("size must be an integer")
|
[
"2392@holbertonschool.com"
] |
2392@holbertonschool.com
|
5f6797cbc576bcd2cab29cfca37c302d8329420f
|
9afc5ffde1488d718ac8dec9acd0c5b2dddf390d
|
/src/mask.py
|
c3597754fb985b12f27837ca9a15aad361ec8b59
|
[] |
no_license
|
pypaut/dobble-player
|
1b0b7255986dd08942014c2ff20b547f23aa4838
|
499a9d21dd61a2a6710cca7f42ae3e6b60b56461
|
refs/heads/master
| 2023-06-10T13:59:42.139290
| 2021-07-05T08:12:54
| 2021-07-05T08:12:54
| 383,062,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 844
|
py
|
import cv2 as cv
from src.utils import show_image
def cards_mask(image, debug=False):
"""
Generate 2D mask for card presence on @image
"""
# Threshold
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
threshold = cv.inRange(gray, 170, 255)
if debug:
show_image("Threshold", threshold)
# Floodfill
tmp = threshold.copy()
points = [
(0, 0), # Bottom left
(tmp.shape[1] // 2, 0), # Bottom middle
(tmp.shape[1] - 1, tmp.shape[0] - 1), # Top right
]
for p in points:
cv.floodFill(tmp, None, p, 255)
if debug:
show_image("Flood fill", tmp)
# Invert floodfilled image
inverted = cv.bitwise_not(tmp)
if debug:
show_image("Inverted", inverted)
# Combine the two images to get the foreground.
return threshold | inverted
|
[
"pypaut@hotmail.fr"
] |
pypaut@hotmail.fr
|
a9b62fc19004fc5b0e65d895afed852291ef136d
|
9742f49edd518a3e053c363d8b09d1899b1b59d3
|
/VOCdevkit/makeTxt.py
|
006730baf3813a8181061b24c6793ea1d4adcb9f
|
[
"MIT"
] |
permissive
|
1306298019/YOLOV4
|
7000a68917481d8974cfd65548e3ad213af250ed
|
5b790f036a94a30ea6337a3eebd83e8ea8023da4
|
refs/heads/main
| 2023-04-22T17:48:46.718475
| 2021-05-12T03:30:24
| 2021-05-12T03:30:24
| 366,579,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 787
|
py
|
from sklearn.model_selection import train_test_split
import os
name_path = r'.\data\VOCdevkit2007\VOC2007\JPEGImages'
name_list = os.listdir(name_path)
names = []
for i in name_list:
# 获取图像名
names.append(i.split('.')[0])
trainval,test = train_test_split(names,test_size=0.5,shuffle=10)
val,train = train_test_split(trainval,test_size=0.5,shuffle=10)
with open('ImageSets/Main/trainval.txt','w') as fw:
for i in trainval:
fw.write(i+'\n')
with open('ImageSets/Main/test.txt','w') as fw:
for i in test:
fw.write(i+'\n')
with open('ImageSets/Main/val.txt','w') as fw:
for i in val:
fw.write(i+'\n')
with open('ImageSets/Main/train.txt','w') as fw:
for i in train:
fw.write(i+'\n')
print('done!')
|
[
"noreply@github.com"
] |
1306298019.noreply@github.com
|
af47c1a7f6ae89919ffdb24bffc208cb913e6ee9
|
f24543d25294be7802fd78eb58697f0b223a00ae
|
/flask_setup_example/flaskr/__init__.py
|
02c4a17c33f1fad50a3b5093b4e1a3aebcbea0c1
|
[] |
no_license
|
blt1339/udacity_full_stack_web_developer
|
606e5fbff53b8b6ef5f7984c708b2d42fd052101
|
3a7c103a2bffce5b5e9f01c2b7c4f09510be403d
|
refs/heads/main
| 2023-07-08T23:24:05.694066
| 2021-08-03T00:08:21
| 2021-08-03T00:08:21
| 359,160,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
# Import your dependencies
from flask import Flask, jsonify
# Define the create_app function
def create_app(test_config=None):
# Create and configure the app
# Include the first parameter: Here, __name__is the name of the current Python module.
app = Flask(__name__)
@app.route('/')
def hello_world():
return jsonify({'message':'Hello, World!'})
@app.route('/smiley')
def smiley():
return ':-)'
# Return the app instance
return app
|
[
"blt1339@gmail.com"
] |
blt1339@gmail.com
|
a7cd0c227a128b7a39f4db49d9085443bd6a2ca1
|
ef23a265f03c21c192707ebada89a4587c351a5e
|
/client/PeopleCounter.py
|
550eea656eff5c88b43ef874be170643d15907d0
|
[] |
no_license
|
hadasg-vayyar/Walabot-MeetingRoom
|
73cb9c0845a58f13d0c6292467c0f73524d21d78
|
6191f8bbe18fcbb9dbedd1fd76c7f530efbaae80
|
refs/heads/master
| 2021-07-11T18:39:20.769223
| 2017-10-15T08:38:06
| 2017-10-15T08:38:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,861
|
py
|
from __future__ import print_function, division
from datetime import datetime # used to the current time
from math import sin, cos, radians, sqrt # used to calculate MAX_Y_VALUE
import socket
import WalabotAPI as wlbt
try:
input = raw_input
except NameError:
pass
R_MIN, R_MAX, R_RES = 10, 60, 2 # SetArenaR parameters
THETA_MIN, THETA_MAX, THETA_RES = -10, 10, 10 # SetArenaTheta parameters
PHI_MIN, PHI_MAX, PHI_RES = -10, 10, 2 # SetArenaPhi parametes
THRESHOLD = 15 # SetThreshold parametes
MAX_Y_VALUE = R_MAX * cos(radians(THETA_MAX)) * sin(radians(PHI_MAX))
SENSITIVITY = 0.25 # amount of seconds to wait after a move has been detected
TENDENCY_LOWER_BOUND = 0.1 # tendency below that won't count as entrance/exit
IGNORED_LENGTH = 3 # len in cm to ignore targets in center of arena
ASSUMED_FRAME_RATE = 10
# TODO: Need to be configured to real server's ip and port
SERVER_ADDRESS = "127.0.0.1"
SERVER_PORT = 9999
# TODO: Need to be configured to real room's name
ROOM_NAME = "yellow"
# TODO: Need to be configured to real room's max people.
MAX_PEOPLE = 6
wlbt.Init()
wlbt.SetSettingsFolder()
def getNumOfPeopleInside():
""" Gets the current number of people in the room as input and returns it.
Validate that the number is valid.
Returns:
num Number of people in the room that got as input
"""
num = input('- Enter current number of people in the room: ')
if (not num.isdigit()) or (int(num) < 0):
print('- Invalid input, try again.')
return getNumOfPeopleInside()
return int(num)
def verifyWalabotIsConnected():
""" Check for Walabot connectivity. loop until detect a Walabot.
"""
while True:
try:
wlbt.ConnectAny()
except wlbt.WalabotError as err:
input("- Connect Walabot and press 'Enter'.")
else:
print('- Connection to Walabot established.')
return
def setWalabotSettings():
""" Configure Walabot's profile, arena (r, theta, phi), threshold and
the image filter.
"""
wlbt.SetProfile(wlbt.PROF_TRACKER)
wlbt.SetArenaR(R_MIN, R_MAX, R_RES)
wlbt.SetArenaTheta(THETA_MIN, THETA_MAX, THETA_RES)
wlbt.SetArenaPhi(PHI_MIN, PHI_MAX, PHI_RES)
wlbt.SetThreshold(THRESHOLD)
wlbt.SetDynamicImageFilter(wlbt.FILTER_TYPE_NONE)
print('- Walabot Configured.')
def startAndCalibrateWalabot():
""" Start the Walabot and calibrate it.
"""
wlbt.StartCalibration()
print('- Calibrating...')
while wlbt.GetStatus()[0] == wlbt.STATUS_CALIBRATING:
wlbt.Trigger()
wlbt.Start()
print('- Calibration ended.\n- Ready!')
def getDataList():
""" Detect and record a list of Walabot sensor targets. Stop recording
and return the data when enough triggers has occured (according to the
SENSITIVITY) with no detection of targets.
Returns:
dataList: A list of the yPosCm attribute of the detected
sensor targets
"""
while True:
wlbt.Trigger()
targets = wlbt.GetTrackerTargets()
if targets:
targets = [max(targets, key=distance)]
numOfFalseTriggers = 0
triggersToStop = ASSUMED_FRAME_RATE * SENSITIVITY
while numOfFalseTriggers < triggersToStop:
wlbt.Trigger()
newTargets = wlbt.GetTrackerTargets()
if newTargets:
targets.append(max(newTargets, key=distance))
numOfFalseTriggers = 0
else:
numOfFalseTriggers += 1
yList = [
t.yPosCm for t in targets if abs(t.yPosCm) > IGNORED_LENGTH]
if yList:
return yList
def distance(t):
return sqrt(t.xPosCm**2 + t.yPosCm**2 + t.zPosCm**2)
def analizeAndAlert(dataList, numOfPeople):
""" Analize a given dataList and print to the screen one of two results
if occured: an entrance or an exit.
Arguments:
dataList A list of values
numOfPeople The current number of people in the room
returns:
numOfPeople The new number of people in the room
"""
currentTime = datetime.now().strftime('%H:%M:%S')
tendency = getTypeOfMovement(dataList)
if tendency > 0:
result = ': Someone has left!'.ljust(25)
numOfPeople -= 1
elif tendency < 0:
result = ': Someone has entered!'.ljust(25)
numOfPeople += 1
else: # do not count as a valid entrance / exit
result = ': Someone is at the door!'.ljust(25)
numToDisplay = ' Currently '+str(numOfPeople)+' people in the room.'
print(currentTime+result+numToDisplay)
return numOfPeople
def getTypeOfMovement(dataList):
""" Calculate and return the type of movement detected.
The movement only counts as a movement inside/outside if the tendency
if above TENDENCY_LOWER_BOUND and if the we have at least of item from
both sides of the door header.
Arguments:
dataList A list of values
Returns:
tendency if zero - not count as a valid entrance/exit
if positive - counts as exiting the room
if negative - counts as entering the room
"""
if dataList:
velocity = getVelocity(dataList)
tendency = (velocity * len(dataList)) / (2 * MAX_Y_VALUE)
side1 = any(x > 0 for x in dataList)
side2 = any(x < 0 for x in dataList)
bothSides = side1 and side2
aboveLowerBound = abs(tendency) > TENDENCY_LOWER_BOUND
if bothSides or aboveLowerBound:
return tendency
return 0
def getVelocity(data):
""" Calculate velocity of a given set of values using linear regression.
Arguments:
data An iterator contains values.
Returns:
velocity The estimates slope.
"""
sumY = sumXY = 0
for x, y in enumerate(data):
sumY, sumXY = sumY + y, sumXY + x*y
if sumXY == 0: # no values / one values only / all values are 0
return 0
sumX = x * (x+1) / 2 # Gauss's formula - sum of first x natural numbers
sumXX = x * (x+1) * (2*x+1) / 6 # sum of sequence of squares
return (sumXY - sumX*sumY/(x+1)) / (sumXX - sumX**2/(x+1))
def stopAndDisconnectWalabot():
""" Stops Walabot and disconnect the device.
"""
wlbt.Stop()
wlbt.Disconnect()
def PeopleCounter():
""" Main function. init and configure the Walabot, get the current number
of people from the user, start the main loop of the app.
Walabot scan constantly and record sets of data (when peoples are
near the door header). For each data set, the app calculates the type
of movement recorded and acts accordingly.
"""
verifyWalabotIsConnected()
numOfPeople = getNumOfPeopleInside()
setWalabotSettings()
startAndCalibrateWalabot()
try:
client_socket = socket.socket()
client_socket.connect((SERVER_ADDRESS, SERVER_PORT))
while True:
dataList = getDataList()
numOfPeople = analizeAndAlert(dataList, numOfPeople)
# Run this line in python2.7
# client_socket.send(json.dumps({"room": ROOM_NAME, "number_of_people": numOfPeople}))
# Run this line in python3
client_socket.send(json.dumps({"name": ROOM_NAME, "number_of_people": numOfPeople, "max_people": MAX_PEOPLE}
).encode('UTF-8'))
except socket.error:
print("Server is currently unavailable.")
except KeyboardInterrupt:
pass
finally:
stopAndDisconnectWalabot()
if __name__ == '__main__':
PeopleCounter()
|
[
"noam.hoze@gmail.com"
] |
noam.hoze@gmail.com
|
1b8532d3421a9dd5536b1e0debfc39c16e37a6c3
|
1bccf7d57c7aa8d48b84fff187de4b6ff2599cb6
|
/pandora_common/state_manager/scripts/state_manager/__init__.py
|
6d30fa3d4c6b665f9f74250df0145ce48aae504d
|
[] |
no_license
|
skohlbr/pandora_ros_pkgs
|
733ed34edb5b6d46e59df4acb01288f28ef3b50f
|
eecaf082b47e52582c5f009eefbf46dd692aba4f
|
refs/heads/indigo-devel
| 2021-01-21T18:06:14.967943
| 2015-11-04T15:08:03
| 2015-11-04T15:08:03
| 53,413,573
| 0
| 1
| null | 2016-03-08T13:19:40
| 2016-03-08T13:19:40
| null |
UTF-8
|
Python
| false
| false
| 37
|
py
|
from state_client import StateClient
|
[
"pandora@ee.auth.gr"
] |
pandora@ee.auth.gr
|
c519b85a50ace53148b8cf5fe0eafb56d732355b
|
4a683a1ee801d0ee05d4ac44407be162e70cbc06
|
/creat_key.py
|
45eb43722976f44e24146bfc9a7576408497f0f5
|
[] |
no_license
|
aakash10897/aviation-using-neo4j
|
8b585dfca4166192aa558566b642c38b60ab6974
|
1d475bae042bb2ea2f9a6afa8788bd3d7cc260db
|
refs/heads/master
| 2020-07-01T18:17:06.888416
| 2019-08-23T18:57:47
| 2019-08-23T18:57:47
| 201,252,791
| 0
| 1
| null | 2019-08-14T09:19:08
| 2019-08-08T12:23:18
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 321
|
py
|
import csv
import sys
def createKey():
inputfile = 'data2/routes.csv'
outputfile = 'data2/routes_key.csv'
with open(inputfile,'r') as inut, open(outputfile,'w') as oput:
r = csv.reader(inut)
w = csv.writer(oput)
counter = 0
for row in r:
counter =counter + 1
w.writerow(row+[str(counter)])
createKey()
|
[
"aakash.10897@gmail.com"
] |
aakash.10897@gmail.com
|
5b86d1ba8124f7ae022306cd7979e8aa97754314
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/HdrqkdT4r9DeKPjCM_15.py
|
b8f9cf3d649052ff9b6b798b8d9e233d02626467
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
def is_polygonal(n):
if n==1:
return "0th of all"
if n <= 3:
return False
list = []
for k in range(3, n):
i=1
current=k+1
while current < n:
i+=1
current += k*i
if current == n:
i = str(i)
i += "th" if i[-2:-1]=="1" else {"1":"st","2":"nd","3":"rd"}.get(i[-1],"th")
list.append("{ith} {k}-gonal number".format(ith=i,k=k))
return list
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
e3cb93d8809d1c926a15fdd21eca2bccf5ba4ac2
|
f7411485d2603aa8c2841f88bf5bfb2e1930951e
|
/Labs/Lab10/actor.py
|
76ab8c0f4c37c402d0f769ebec6983b93dd48568
|
[] |
no_license
|
Johnspeanut/Computer_science_fundation_course
|
156e03e8cf6fcca4ddcbfaa837b8c55f95083045
|
79a13f3152c7e61d8d6cc10da2213a15c8a364e5
|
refs/heads/master
| 2023-05-13T01:55:10.171165
| 2021-05-31T07:00:31
| 2021-05-31T07:00:31
| 372,412,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
class Actor:
def __init__(self, lastname, show_collection):
'''
Constructor -- creates an object of actor
Parameters:
self -- the current piece object
firstname -- first name of actor, String
lastname -- last name of actor, String
show_collection -- show collection, list
'''
self.lastname = lastname
self.collection = show_collection
|
[
"pengqiong2015fall@hotmail.com"
] |
pengqiong2015fall@hotmail.com
|
083c357cb60f0627ddb64b67ee8260e13ee2414f
|
67db81532a2ee0281d901b47a8ffdcfbb8a8199d
|
/interface/teacher_interface.py
|
0ad75f4dfd8939002e08f92167972ed6600d2c31
|
[] |
no_license
|
liuqingzheng/courseSelection
|
a829df3c9948d127556b1adcc35068ff05ab92ab
|
a18d636a1899f72171c2e8bd463942d68f1a0b06
|
refs/heads/master
| 2020-03-11T06:19:58.805019
| 2018-05-26T10:42:11
| 2018-05-26T10:42:11
| 129,827,176
| 16
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,431
|
py
|
import os
from conf import setting
from db import models
from lib import common
def check_course(teacher_name):
'''
查看教授的课程
:param teacher_name:
:return:
'''
teacher_obj = models.Teacher.get_obj_by_name(teacher_name)
teach_course_list = teacher_obj.get_teach_course()
return teach_course_list
def check_all_course():
base_dir_course = os.path.join(setting.BASE_DB, 'course')
course_list = common.get_all_file(base_dir_course)
return course_list
def choose_course(teacher_name, course_name):
'''
将该课程绑定到老师身上
:param teacher_name:
:param course_name:
:return:
'''
teacher_obj = models.Teacher.get_obj_by_name(teacher_name)
teacher_obj.bind_to_course(course_name)
def check_student_by_course(course_name):
'''
查看课程下所有的学生
:param course_name:
:return:
'''
course_obj = models.Course.get_obj_by_name(course_name)
return course_obj.student_name_list
def change_student_scour(teacher_name, student_name, course_name, score):
'''
修改学生的成绩
:param teacher_name:
:param student_name:
:param course_name:
:param score:
:return:
'''
teacher_obj = models.Teacher.get_obj_by_name(teacher_name)
student_obj = models.Student.get_obj_by_name(student_name)
teacher_obj.change_student_score(student_obj, course_name, score)
|
[
"306334678@qq.com"
] |
306334678@qq.com
|
68dcb318fb7daab562bcb76eb37a28ff7014fe59
|
4e7c688741975346b277fdaa113fbe48563d4288
|
/main.py
|
cd381e078413f986c2e4af0f982093ba27f7db2a
|
[] |
no_license
|
nathankong/deep_net_sparse
|
6d2aedfeb6b653d9be808af4ca987620ae415b9d
|
94bfb9e44a33ba8bc9774ad297eef73a1145b08a
|
refs/heads/master
| 2022-03-31T16:07:03.170324
| 2019-12-10T22:27:54
| 2019-12-10T22:27:54
| 225,716,786
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,639
|
py
|
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import pickle
import os
os.environ["TORCH_HOME"] = "/mnt/fs5/nclkong/deep_models/"
import numpy as np
import collections
from functools import partial
import torch
import torch.nn as nn
from torchvision.datasets import ImageFolder
from ModelInfo import get_model_info
from utils import plot_stats, image_loader, load_model, compute_statistics
torch.manual_seed(0)
if torch.cuda.is_available():
DEVICE = torch.device("cuda:0")
else:
DEVICE = torch.device("cpu")
DEVICE = torch.device("cpu")
print "Device:", DEVICE
FIGURE_DIR = "./figures/"
RESULTS_DIR = "./results/"
def main(model_name, images_dir):
# Load model
m_info = get_model_info(model_name)
feature_layer_dict = m_info.get_feature_layer_index_dictionary()
classifier_layer_dict = m_info.get_classifier_layer_index_dictionary()
layers_order = m_info.get_layers()
m = load_model(model_name).to(DEVICE)
# A dictionary that keeps saving the activations as they come
activations = collections.defaultdict(list)
def save_activation(name, mod, inp, out):
#print name, out.cpu().size()
activations[name].append(np.copy(out.cpu().detach().numpy()))
# Get Conv2d/Pooling layer activations
for i, module in enumerate(m.features):
#if type(module)==nn.Conv2d or type(module)==nn.MaxPool2d:
if type(module)==nn.Conv2d or type(module)==nn.ReLU or type(module)==nn.MaxPool2d:
#if type(module)==nn.ReLU or type(module)==nn.MaxPool2d:
name = feature_layer_dict[i]
module.register_forward_hook(partial(save_activation, name))
#print i, module
# Get FC layer activations
for i, module in enumerate(m.classifier):
if type(module)==nn.Linear:
name = classifier_layer_dict[i]
module.register_forward_hook(partial(save_activation, name))
#print i, module
dataset = ImageFolder(images_dir, loader=partial(image_loader, DEVICE))
data_loader = torch.utils.data.DataLoader(dataset, batch_size=10, shuffle=False)
m.eval()
for step, (images,y) in enumerate(data_loader):
# TODO: DEBUG, use 1/10 of the batches for now, for runtime
if (step+1) % 1 == 0:
print "Batch {}".format(step+1)
_ = m(images)
activations = {name: np.concatenate(outputs, axis=0) for name, outputs in activations.items()}
n_feats_all = list()
n_zero_mean_all = list()
layer_feature_stds = list()
for layer in layers_order:
features = activations[layer]
n_feats, n_zero_mean = compute_statistics(features)
n_feats_all.append(n_feats)
n_zero_mean_all.append(n_zero_mean)
layer_feature_stds.append(features.std(axis=0))
# Save statistics
results = dict()
results["layers"] = layers_order
results["statistics"] = dict()
results["statistics"]["zero_mean_proportion"] = n_zero_mean_all
results["statistics"]["num_features"] = n_feats_all
results["statistics"]["feature_stds"] = layer_feature_stds
results_fname = "{}_stats.pkl".format(model_name)
pickle.dump(results, open(RESULTS_DIR + results_fname, "wb"))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default="vgg19")
parser.add_argument('--imagedir', type=str, default="./images/")
args = parser.parse_args()
model_name = args.model.lower()
images_dir = args.imagedir.lower()
print "Model:", model_name
print "Image directory:", images_dir
main(model_name, images_dir)
|
[
"kongnathan@gmail.com"
] |
kongnathan@gmail.com
|
c3abe5035eada595291caa229e664159b4743cb2
|
e9ef3cd143478660d098668a10e67544a42b5878
|
/Lib/corpuscrawler/crawl_thk.py
|
f49f58ce0e90e3a983f847f9a2de5a9de94840a2
|
[
"Apache-2.0"
] |
permissive
|
google/corpuscrawler
|
a5c790c19b26e6397b768ce26cf12bbcb641eb90
|
10adaecf4ed5a7d0557c8e692c186023746eb001
|
refs/heads/master
| 2023-08-26T04:15:59.036883
| 2022-04-20T08:18:11
| 2022-04-20T08:18:11
| 102,909,145
| 119
| 40
|
NOASSERTION
| 2022-04-20T08:18:12
| 2017-09-08T22:21:03
|
Python
|
UTF-8
|
Python
| false
| false
| 809
|
py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
from corpuscrawler.util import crawl_bibleis
def crawl(crawler):
out = crawler.get_output(language='thk')
crawl_bibleis(crawler, out, bible='THKBTL')
|
[
"sascha@brawer.ch"
] |
sascha@brawer.ch
|
5acbbf066b8de964068c4efea6f60aa5238e4b13
|
62c7898fe58fdfa18fc6c445d7145fb625cd4412
|
/source/process_spatial_data.py
|
a9afed907e5c8e9e289324dd95a921bda0c65bfc
|
[] |
no_license
|
xziyue/MCCNN-Playground
|
b9fa5cd2c530605f55db0d5bed4b2f99fc787122
|
f807d3164237bcb14fbeab3f7ea55a7073351ef7
|
refs/heads/master
| 2020-12-03T10:57:57.891328
| 2020-01-05T19:12:20
| 2020-01-05T19:12:20
| 231,290,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,324
|
py
|
import os
import subprocess as sp
from source.rel_path import rootDir
import re
from shutil import copy2, move
def _recursive_search_target(path, result):
assert os.path.exists(path)
allFiles = os.listdir(path)
for filename in allFiles:
fullname = os.path.join(path, filename)
if os.path.isdir(fullname):
_recursive_search_target(fullname, result)
elif os.path.isfile(fullname):
_, ext = os.path.splitext(fullname)
if ext == '.pdbqt':
result.append(fullname)
def recursive_search_target(path):
ret = []
_recursive_search_target(path, ret)
return ret
def get_autogrid4_template(id):
rawTemplate = \
r'''npts 15 15 15 # num.grid points in xyz
gridfld $$$$.maps.fld # grid_data_file
spacing 1.0 # spacing(A)
receptor_types A C HD N NA OA SA # receptor atom types
ligand_types A C HD N NA OA SA # ligand atom types
receptor $$$$.pdbqt # macromolecule
gridcenter auto # xyz-coordinates or auto
smooth 0.5 # store minimum energy w/in rad(A)
map $$$$.A.map # atom-specific affinity map
map $$$$.C.map # atom-specific affinity map
map $$$$.HD.map # atom-specific affinity map
map $$$$.N.map # atom-specific affinity map
map $$$$.NA.map # atom-specific affinity map
map $$$$.OA.map # atom-specific affinity map
map $$$$.SA.map # atom-specific affinity map
elecmap $$$$.e.map # electrostatic potential map
dsolvmap $$$$.d.map # desolvation potential map
dielectric -0.1465 # <0, AD4 distance-dep.diel;>0, constant
'''
return re.sub(r'\${4}', id, rawTemplate)
def run_autogrid4(targetFilename, outputPath):
# get target id
head1, _ = os.path.split(targetFilename)
_, head2 = os.path.split(head1)
id = head2
# get template
pgfTemplate = get_autogrid4_template(id)
autogridPath = os.path.join(rootDir, 'autogrid')
autogridExecutable = os.path.join(autogridPath, 'autogrid4')
tempPgfFilename = os.path.join(autogridPath, id + '.pgf')
with open(tempPgfFilename, 'w') as outFile:
outFile.write(pgfTemplate)
# copy data
tempTargetFilename = os.path.join(autogridPath, id + '.pdbqt')
copy2(targetFilename, tempTargetFilename)
# run autogrid4
sp.run([autogridExecutable, '-p', tempPgfFilename], check=True, cwd=autogridPath)
# separate files
relatedFiles = []
uselessFiles = []
for name in os.listdir(autogridPath):
if name.startswith(id):
if name.endswith('.map'):
relatedFiles.append(name)
else:
uselessFiles.append(name)
# move all files to target path
for name in relatedFiles:
move(os.path.join(autogridPath, name), outputPath)
# delete useless files
for name in uselessFiles:
os.remove(os.path.join(autogridPath, name))
if __name__ == '__main__':
# extract all features from the dataset
targets = recursive_search_target(os.path.join(rootDir, 'data', 'spatial_features'))
for target in targets:
run_autogrid4(target, os.path.join(rootDir, 'data', 'grid_data'))
|
[
"xziyue@qq.com"
] |
xziyue@qq.com
|
5eea65897e3ff96532bb28a2c4f116df82eb09cc
|
c3317daf3ded8b4c793e779075632b3d0bc748bb
|
/touch_file.py
|
d287577d3537240b6d4896b9d7e3f2f1929274be
|
[] |
no_license
|
AarashFarahani/watching-directory
|
f68557c92788c9151b09ad8ca1c807df0310ac8e
|
c9b1f72c8f823951ed07f1e48b28052916261009
|
refs/heads/master
| 2020-12-20T03:22:09.520508
| 2020-02-03T08:16:14
| 2020-02-03T08:16:14
| 235,945,526
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,468
|
py
|
import os
import pyinotify
from pathlib import Path
import log_handler
class MyEventHandler(pyinotify.ProcessEvent):
def __init__(self, source, destination):
self.source = source
self.destination = destination
def process_IN_CLOSE_WRITE(self, event):
log_handler.info("%s has been added", event.name)
_touch(self.source, self.destination, event.pathname, event.name)
def _touch(source, destination, file_path, file_name):
dest_path = destination + file_path.replace(source, '')
dest_dir = dest_path.replace(file_name, '')
try:
if os.path.isdir(dest_dir) == False:
os.makedirs(dest_dir)
log_handler.info("%s directory has been made", dest_dir)
Path(dest_path).touch()
log_handler.info("%s has been touched", dest_path)
except Exception as e:
print(e)
log_handler.error(e, exc_info=True)
def start_watching(source, destination):
wm = pyinotify.WatchManager()
wm.add_watch(source, pyinotify.ALL_EVENTS, rec=True)
# event handler
eh = MyEventHandler(source, destination)
# notifier
notifier = pyinotify.Notifier(wm, eh)
notifier.loop()
def touch_exist_files(source, destination):
dict = {}
for r, d, f in os.walk(source):
for file in f:
dict[os.path.join(r, file)] = file
for file_path, file_name in dict.items():
_touch(source, destination, file_path, file_name)
|
[
"noreply@github.com"
] |
AarashFarahani.noreply@github.com
|
d2d771a77a5dabb499ec5f9b80115f57636340f4
|
fd4b76bac768ad313e0ca12cdcbe6918c5dd1233
|
/list/views.py
|
236166ef935d670ab55f1f2283923111129d7431
|
[] |
no_license
|
PsalmsGlobal/ToDoApp
|
ebd95fec8f7ad704448dedd4c2299660101ebbfb
|
ae51b14d6bfc2e64a551d935ba9ad9b34bd3ca27
|
refs/heads/master
| 2023-04-14T15:28:46.501922
| 2021-04-15T14:46:05
| 2021-04-15T14:46:05
| 336,938,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,648
|
py
|
from django.shortcuts import render, redirect
from .models import MyList
from .forms import MyListForm
from django.contrib import messages
from django.http import HttpResponseRedirect
def home(request):
if request.method == 'POST':
form = MyListForm(request.POST or None)
if form.is_valid():
form.save()
all_items = MyList.objects.all
messages.success(request, ('Item Has Been Added To List!!'))
return render(request, 'home.html', {'all_items': all_items})
else:
all_items = MyList.objects.all
return render(request, 'home.html', {'all_items': all_items})
def about(request):
context = {'first_name': 'Nhicoulous', 'last_name': 'Horford'}
return render(request, 'about.html', context)
def delete(request, List_id):
item = MyList.objects.get(pk=List_id)
item.delete()
messages.success(request, ('Item Has Been Deleted'))
return redirect('home')
def cross_off(request, List_id):
item = MyList.objects.get(pk=List_id)
item.completed = True
item.save()
return redirect('home')
def uncross(request, List_id):
item = MyList.objects.get(pk=List_id)
item.completed = False
item.save()
return redirect('home')
def edit(request, List_id):
if request.method == 'POST':
item = MyList.objects.get(pk=List_id)
form = MyListForm(request.POST or None, instance= item)
if form.is_valid():
form.save()
messages.success(request, ('Item Has Been Edited!'))
return redirect('home')
else:
item = MyList.objects.get(pk=List_id)
return render(request, 'edit.html', {'item': item})
def back(request):
return render(request, 'home.html')
|
[
"noreply@github.com"
] |
PsalmsGlobal.noreply@github.com
|
efc34d950ca51f5ceeb25007b700dbcb9a79a6bc
|
f7e4b3e8010241b31aa9c772a7d6bfec3454dcf2
|
/ipcs.py
|
e34d276efc73cd3d126afcd8f861c9ed76837cc3
|
[] |
no_license
|
MR414N-ID/IPCS
|
74294500cc6d0daf4d38f27d058b39588c319225
|
f233e21c00e0fddce54da2797d8782ea666db3c9
|
refs/heads/master
| 2022-11-23T02:38:22.451130
| 2020-07-24T12:26:38
| 2020-07-24T12:26:38
| 278,318,219
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,111
|
py
|
#Jangan ganti author , hargai creator cape loh buat nya
import LIST
from LIST.id import *
from LIST.it import *
from LIST.jp import *
from LIST.us import *
from LIST.fr import *
from LIST.kr import *
from LIST.de import *
from LIST.tr import *
import requests,re,os
b="\033[0;34m"
g="\033[1;32m"
w="\033[1;37m"
r="\033[1;31m"
y="\033[1;33m"
cyan = "\033[0;36m"
lgray = "\033[0;37m"
dgray = "\033[1;30m"
ir = "\033[0;101m"
reset = "\033[0m"
def main():
os.system('clear')
print("{} ____ ").format(r)
print(" _[]_/____\__n_ ")
print(" |_____.--.__()_|")
print(" |I //# \\\ |")
print("{} |P \\\__// | ").format(w)
print(" |CS '--' | ")
print("{} '--------------'------{}----------------------. ").format(r,w)
print("{} | {}Author : {}MR.414N {} | {}INDO{}N{}{}ESIA | ").format(r,w,r,w,r,ir,reset,w)
print("{} | {}TEAM : {}CYBER CRIMINAL PUBLIC {}| {}082292838634 {} |").format(r,w,w,w,lgray,w)
print("{} '------------------------------------{}-------' ").format(r,w)
print (" {}[ 1 ] {}Italy").format(r,w)
print (" {}[ 2 ] {}Indonesia").format(r,w)
print (" {}[ 3 ] {}Japan").format(r,w)
print (" {}[ 4 ] {}United States").format(r,w)
print (" {}[ 5 ] {}France").format(r,w)
print (" {}[ 6 ] {}Korea").format(r,w)
print (" {}[ 7 ] {}German").format(r,w)
print (" {}[ 8 ] {}Turkey").format(r,w)
print (" {}[ 9 ] {}Exit").format(r,w)
print ""
select = input("\033[1;31m[ \033[1;37mSelect@Number \033[1;31m]\033[1;37m> ")
filtering(select)
def filtering(pilih):
if pilih == 1:
italy()
elif pilih == 2:
indonesia()
elif pilih == 3:
japan()
elif pilih == 4:
unitedstates()
elif pilih == 5:
france()
elif pilih == 6:
korea()
elif pilih == 7:
german()
elif pilih == 8:
turkey()
elif pilih == 9:
print (r+"Exiting ..."+w)
os.sys.exit()
else:
print (r+"Exiting ..."+w)
os.sys.exit()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
MR414N-ID.noreply@github.com
|
2f8b6c78d6f72f8ff2235dd0556ce46aafda9f3b
|
2a7224df36ea68c5ece24d410d97a9c336baa0a8
|
/dags/utils/db.py
|
98967c56ec1a74279db58cf4865b7e5939817885
|
[
"MIT"
] |
permissive
|
jsmithdataanalytics/house_price_tracker
|
aaa7bc12a45caa4dc1fe26963ad0539264ac8b83
|
a4795db21c25c014f45ff6742c5bb30ad26ded75
|
refs/heads/master
| 2023-05-08T22:24:00.764398
| 2020-04-29T06:28:51
| 2020-04-29T06:28:51
| 257,061,766
| 1
| 0
|
MIT
| 2021-06-02T01:32:12
| 2020-04-19T17:32:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,859
|
py
|
import sqlite3
from typing import List, Dict, Iterable
from os import environ
db_filename = environ['DATABASE_URL'][10:]
def select_all(table_name: str):
connection = sqlite3.connect(db_filename)
connection.row_factory = sqlite3.Row
with connection:
cursor = connection.execute(f'SELECT * FROM `{table_name}`')
result = [dict(row) for row in cursor.fetchall()]
connection.close()
return result
def insert_or_replace(table_name: str, items: Iterable[Dict]):
InsertOrReplaceBuffer(table_name=table_name, size=500).run(items)
class InsertOrReplaceBuffer:
def __init__(self, table_name: str, size: int):
self.table_name = table_name
self.size = size
self.data: List[Dict] = []
def __len__(self):
return len(self.data)
def __is_full(self):
return len(self) >= self.size
def __append(self, item: Dict):
if self.__is_full():
self.__flush()
self.data.append(item)
def __flush(self):
print(f'Uploading batch of {len(self)} {self.table_name.lower()} to database...')
column_names = list(self.data[0].keys())
values = [tuple(item[column_name] for column_name in column_names) for item in self.data]
column_names_string = ', '.join([f'`{column_name}`' for column_name in column_names])
placeholders_string = ', '.join(['?'] * len(column_names))
query = f'REPLACE INTO `{self.table_name}` ({column_names_string}) VALUES ({placeholders_string})'
connection = sqlite3.connect(db_filename)
with connection:
connection.executemany(query, values)
connection.close()
print('Done.')
self.data = []
def run(self, iterable: Iterable):
for item in iterable:
self.__append(item)
self.__flush()
|
[
"jsmithdataanalytics@gmail.com"
] |
jsmithdataanalytics@gmail.com
|
1c00d1285fb51001a7306edf2f7a7c849d622ad2
|
e072275351f45031c2f30235864a471e689280c5
|
/shipping_addresses/forms.py
|
d78428382c70d689f7fec35ac54be09dfc8ef13e
|
[] |
no_license
|
nluiscuadros24/Exportellus
|
764dbca7e0fda0d80b92661801d7a8c2a9f9cd0a
|
9a3b01af51d02c09f89bba2b9b180a484e89d6aa
|
refs/heads/master
| 2022-11-27T01:57:28.461546
| 2020-08-06T05:44:42
| 2020-08-06T05:44:42
| 285,472,305
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,369
|
py
|
from django.forms import ModelForm
from .models import ShippingAddress
class ShippingAddressForm(ModelForm):
class Meta:
model = ShippingAddress
fields = [
'line1', 'line2', 'city', 'state', 'country', 'postal_code', 'reference'
]
labels = {
'line1': 'Calle 1',
'line2': 'Calle 2',
'city': 'Ciudad',
'state': 'Estado',
'country': 'País',
'postal_code': 'Código postal',
'reference': 'Referencias'
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['line1'].widget.attrs.update({
'class': 'form-control'
}) #Dic
self.fields['line2'].widget.attrs.update({
'class': 'form-control'
})
self.fields['city'].widget.attrs.update({
'class': 'form-control'
})
self.fields['state'].widget.attrs.update({
'class': 'form-control'
})
self.fields['country'].widget.attrs.update({
'class': 'form-control'
})
self.fields['postal_code'].widget.attrs.update({
'class': 'form-control',
'placeholder': '0000'
})
self.fields['reference'].widget.attrs.update({
'class': 'form-control'
})
|
[
"luiscuadrosa@gmail.com"
] |
luiscuadrosa@gmail.com
|
779a502f8d520ab2b65985fd08af1a4fb8b521d9
|
d17616959f48f6438ed95d62e6f8cfbd17f4451e
|
/KerasRFCN/Utils.py
|
53e0c2d39c7f39fbec30caa95a6b47b878559842
|
[
"MIT"
] |
permissive
|
mitulrm/FaceRFCN
|
8365df0690303502ec44fde5182be8def3141d65
|
5e1fdaf197b3a93c22a82d9476a3f9a1c804e398
|
refs/heads/master
| 2020-05-15T20:35:54.496866
| 2019-08-20T02:41:35
| 2019-08-20T02:41:35
| 182,484,924
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,603
|
py
|
"""
Keras RFCN
Copyright (c) 2018
Licensed under the MIT License (see LICENSE for details)
Written by parap1uie-s@github.com
"""
import sys
import os
import math
import random
import numpy as np
import tensorflow as tf
import scipy.misc
import skimage.color
import skimage.io
import urllib.request
import shutil
import keras.backend as K
from keras.callbacks import Callback
############################################################
# Bounding Boxes
############################################################
# def extract_bboxes(mask):
# """Compute bounding boxes.
# mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
# Returns: bbox array [num_instances, (y1, x1, y2, x2)].
# """
# boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)
# for i in range(mask.shape[-1]):
# m = mask[:, :, i]
# # Bounding box.
# horizontal_indicies = np.where(np.any(m, axis=0))[0]
# vertical_indicies = np.where(np.any(m, axis=1))[0]
# if horizontal_indicies.shape[0]:
# x1, x2 = horizontal_indicies[[0, -1]]
# y1, y2 = vertical_indicies[[0, -1]]
# # x2 and y2 should not be part of the box. Increment by 1.
# x2 += 1
# y2 += 1
# else:
# # No mask for this instance. Might happen due to
# # resizing or cropping. Set bbox to zeros
# x1, x2, y1, y2 = 0, 0, 0, 0
# boxes[i] = np.array([y1, x1, y2, x2])
# return boxes.astype(np.int32)
def compute_iou(box, boxes, box_area, boxes_area):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)
union = box_area + boxes_area[:] - intersection[:]
iou = intersection / union
return iou
def compute_overlaps(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
For better performance, pass the largest set first and the smaller second.
"""
# Areas of anchors and GT boxes
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(overlaps.shape[1]):
box2 = boxes2[i]
overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)
return overlaps
def non_max_suppression(boxes, scores, threshold):
"""Performs non-maximum supression and returns indicies of kept boxes.
boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.
scores: 1-D array of box scores.
threshold: Float. IoU threshold to use for filtering.
"""
assert boxes.shape[0] > 0
if boxes.dtype.kind != "f":
boxes = boxes.astype(np.float32)
# Compute box areas
y1 = boxes[:, 0]
x1 = boxes[:, 1]
y2 = boxes[:, 2]
x2 = boxes[:, 3]
area = (y2 - y1) * (x2 - x1)
# Get indicies of boxes sorted by scores (highest first)
ixs = scores.argsort()[::-1]
pick = []
while len(ixs) > 0:
# Pick top box and add its index to the list
i = ixs[0]
pick.append(i)
# Compute IoU of the picked box with the rest
iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])
# Identify boxes with IoU over the threshold. This
# returns indicies into ixs[1:], so add 1 to get
# indicies into ixs.
remove_ixs = np.where(iou > threshold)[0] + 1
# Remove indicies of the picked and overlapped boxes.
ixs = np.delete(ixs, remove_ixs)
ixs = np.delete(ixs, 0)
return np.array(pick, dtype=np.int32)
def apply_box_deltas(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)]. Note that (y2, x2) is outside the box.
deltas: [N, (dy, dx, log(dh), log(dw))]
"""
boxes = boxes.astype(np.float32)
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= np.exp(deltas[:, 2])
width *= np.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
return np.stack([y1, x1, y2, x2], axis=1)
def box_refinement_graph(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)]
"""
box = tf.cast(box, tf.float32)
gt_box = tf.cast(gt_box, tf.float32)
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = tf.log(gt_height / height)
dw = tf.log(gt_width / width)
result = tf.stack([dy, dx, dh, dw], axis=1)
return result
def box_refinement(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)]. (y2, x2) is
assumed to be outside the box.
"""
box = box.astype(np.float32)
gt_box = gt_box.astype(np.float32)
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = np.log(gt_height / height)
dw = np.log(gt_width / width)
return np.stack([dy, dx, dh, dw], axis=1)
############################################################
# Dataset
############################################################
class Dataset(object):
"""The base class for dataset classes.
To use it, create a new class that adds functions specific to the dataset
you want to use. For example:
class CatsAndDogsDataset(Dataset):
def load_cats_and_dogs(self):
...
def load_bbox(self, image_id):
...
def image_reference(self, image_id):
...
See COCODataset and ShapesDataset as examples.
"""
def __init__(self, class_map=None):
self._image_ids = []
self.image_info = []
# Background is always the first class
self.class_info = [{"source": "", "id": 0, "name": "BG"}]
self.source_class_ids = {}
def add_class(self, source, class_id, class_name):
assert "." not in source, "Source name cannot contain a dot"
# Does the class exist already?
for info in self.class_info:
if info['source'] == source and info["id"] == class_id:
# source.class_id combination already available, skip
return
# Add the class
self.class_info.append({
"source": source,
"id": class_id,
"name": class_name,
})
def add_image(self, source, image_id, path, **kwargs):
image_info = {
"id": image_id,
"source": source,
"path": path,
}
image_info.update(kwargs)
self.image_info.append(image_info)
def image_reference(self, image_id):
"""Return a link to the image in its source Website or details about
the image that help looking it up or debugging it.
Override for your dataset, but pass to this function
if you encounter images not in your dataset.
"""
return ""
def prepare(self, class_map=None):
"""Prepares the Dataset class for use.
TODO: class map is not supported yet. When done, it should handle mapping
classes from different datasets to the same class ID.
"""
def clean_name(name):
"""Returns a shorter version of object names for cleaner display."""
return ",".join(name.split(",")[:1])
# Build (or rebuild) everything else from the info dicts.
self.num_classes = len(self.class_info)
self.class_ids = np.arange(self.num_classes)
self.class_names = [clean_name(c["name"]) for c in self.class_info]
self.num_images = len(self.image_info)
self._image_ids = np.arange(self.num_images)
self.class_from_source_map = {"{}.{}".format(info['source'], info['id']): id
for info, id in zip(self.class_info, self.class_ids)}
# Map sources to class_ids they support
self.sources = list(set([i['source'] for i in self.class_info]))
self.source_class_ids = {}
# Loop over datasets
for source in self.sources:
self.source_class_ids[source] = []
# Find classes that belong to this dataset
for i, info in enumerate(self.class_info):
# Include BG class in all datasets
if i == 0 or source == info['source']:
self.source_class_ids[source].append(i)
def map_source_class_id(self, source_class_id):
"""Takes a source class ID and returns the int class ID assigned to it.
For example:
dataset.map_source_class_id("coco.12") -> 23
"""
return self.class_from_source_map[source_class_id]
def get_source_class_id(self, class_id, source):
"""Map an internal class ID to the corresponding class ID in the source dataset."""
info = self.class_info[class_id]
assert info['source'] == source
return info['id']
def append_data(self, class_info, image_info):
self.external_to_class_id = {}
for i, c in enumerate(self.class_info):
for ds, id in c["map"]:
self.external_to_class_id[ds + str(id)] = i
# Map external image IDs to internal ones.
self.external_to_image_id = {}
for i, info in enumerate(self.image_info):
self.external_to_image_id[info["ds"] + str(info["id"])] = i
@property
def image_ids(self):
return self._image_ids
def source_image_link(self, image_id):
"""Returns the path or URL to the image.
Override this to return a URL to the image if it's availble online for easy
debugging.
"""
return self.image_info[image_id]["path"]
def load_image(self, image_id):
"""Load the specified image and return a [H,W,3] Numpy array.
"""
# Load image
image = skimage.io.imread(self.image_info[image_id]['path'])
# If grayscale. Convert to RGB for consistency.
if image.ndim != 3:
image = skimage.color.gray2rgb(image)
return image
def load_bbox(self, image_id):
"""Load instance bbox for the given image.
Different datasets use different ways to store bbox. Override this
method to load instance bbox and return them in the form of am
array of binary bbox of shape [height, width, instances].
Returns:
bbox: A bool array of shape [height, width, instance count] with
a binary bbox per instance.
class_ids: a 1D array of class IDs of the instance bbox.
"""
# Override this function to load a bbox from your dataset.
# Otherwise, it returns an empty bbox.
bbox = np.empty([0, 0, 0])
class_ids = np.empty([0], np.int32)
return bbox, class_ids
def resize_image(image, min_dim=None, max_dim=None, padding=False):
"""
Resizes an image keeping the aspect ratio.
min_dim: if provided, resizes the image such that it's smaller
dimension == min_dim
max_dim: if provided, ensures that the image longest side doesn't
exceed this value.
padding: If true, pads image with zeros so it's size is max_dim x max_dim
Returns:
image: the resized image
window: (y1, x1, y2, x2). If max_dim is provided, padding might
be inserted in the returned image. If so, this window is the
coordinates of the image part of the full image (excluding
the padding). The x2, y2 pixels are not included.
scale: The scale factor used to resize the image
padding: Padding added to the image [(top, bottom), (left, right), (0, 0)]
"""
# Default window (y1, x1, y2, x2) and default scale == 1.
h, w = image.shape[:2]
window = (0, 0, h, w)
scale = 1
# Scale?
if min_dim:
# Scale up but not down
scale = max(1, min_dim / min(h, w))
# Does it exceed max dim?
if max_dim:
image_max = max(h, w)
if round(image_max * scale) > max_dim:
scale = max_dim / image_max
# Resize image and mask
if scale != 1:
image = scipy.misc.imresize(
image, (round(h * scale), round(w * scale)))
# Need padding?
if padding:
# Get new height and width
h, w = image.shape[:2]
top_pad = (max_dim - h) // 2
bottom_pad = max_dim - h - top_pad
left_pad = (max_dim - w) // 2
right_pad = max_dim - w - left_pad
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
image = np.pad(image, padding, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
return image, window, scale, padding
def resize_bbox(boxes, scale, padding):
"""Resizes a bbox using the given scale and padding.
Typically, you get the scale and padding from resize_image() to
ensure both, the image and the bbox, are resized consistently.
scale: bbox scaling factor
padding: Padding to add to the bbox in the form
[(top, bottom), (left, right), (0, 0)]
"""
top_pad = padding[0][0]
left_pad = padding[1][0]
resized_boxes = []
for box in boxes:
temp_new_box = box * scale
y1 = temp_new_box[0] + top_pad
x1 = temp_new_box[1] + left_pad
y2 = temp_new_box[2] + top_pad
x2 = temp_new_box[3] + left_pad
resized_boxes.append((y1,x1,y2,x2))
return np.array(resized_boxes)
############################################################
# Anchors
############################################################
def generate_anchors(scales, ratios, shape, feature_stride, anchor_stride):
"""
scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128]
ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2]
shape: [height, width] spatial shape of the feature map over which
to generate anchors.
feature_stride: Stride of the feature map relative to the image in pixels.
anchor_stride: Stride of anchors on the feature map. For example, if the
value is 2 then generate anchors for every other feature map pixel.
"""
# Get all combinations of scales and ratios
scales, ratios = np.meshgrid(np.array(scales), np.array(ratios))
scales = scales.flatten()
ratios = ratios.flatten()
# Enumerate heights and widths from scales and ratios
heights = scales / np.sqrt(ratios)
widths = scales * np.sqrt(ratios)
# Enumerate shifts in feature space
shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride
shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride
shifts_x, shifts_y = np.meshgrid(shifts_x, shifts_y)
# Enumerate combinations of shifts, widths, and heights
box_widths, box_centers_x = np.meshgrid(widths, shifts_x)
box_heights, box_centers_y = np.meshgrid(heights, shifts_y)
# Reshape to get a list of (y, x) and a list of (h, w)
box_centers = np.stack(
[box_centers_y, box_centers_x], axis=2).reshape([-1, 2])
box_sizes = np.stack([box_heights, box_widths], axis=2).reshape([-1, 2])
# Convert to corner coordinates (y1, x1, y2, x2)
boxes = np.concatenate([box_centers - 0.5 * box_sizes,
box_centers + 0.5 * box_sizes], axis=1)
return boxes
def generate_pyramid_anchors(scales, ratios, feature_shapes, feature_strides,
anchor_stride):
"""Generate anchors at different levels of a feature pyramid. Each scale
is associated with a level of the pyramid, but each ratio is used in
all levels of the pyramid.
Returns:
anchors: [N, (y1, x1, y2, x2)]. All generated anchors in one array. Sorted
with the same order of the given scales. So, anchors of scale[0] come
first, then anchors of scale[1], and so on.
"""
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
anchors = []
for i in range(len(scales)):
anchors.append(generate_anchors(scales[i], ratios, feature_shapes[i],
feature_strides[i], anchor_stride))
return np.concatenate(anchors, axis=0)
############################################################
# Miscellaneous
############################################################
def trim_zeros(x):
"""It's common to have tensors larger than the available data and
pad with zeros. This function removes rows that are all zeros.
x: [rows, columns].
"""
assert len(x.shape) == 2
return x[~np.all(x == 0, axis=1)]
def compute_ap(gt_boxes, gt_class_ids,
pred_boxes, pred_class_ids, pred_scores,
iou_threshold=0.5):
"""Compute Average Precision at a set IoU threshold (default 0.5).
Returns:
mAP: Mean Average Precision
precisions: List of precisions at different class score thresholds.
recalls: List of recall values at different class score thresholds.
overlaps: [pred_boxes, gt_boxes] IoU overlaps.
"""
# Trim zero padding and sort predictions by score from high to low
# TODO: cleaner to do zero unpadding upstream
gt_boxes = trim_zeros(gt_boxes)
pred_boxes = trim_zeros(pred_boxes)
pred_scores = pred_scores[:pred_boxes.shape[0]]
indices = np.argsort(pred_scores)[::-1]
pred_boxes = pred_boxes[indices]
pred_class_ids = pred_class_ids[indices]
pred_scores = pred_scores[indices]
# Compute IoU overlaps [pred_boxes, gt_boxes]
overlaps = compute_overlaps(pred_boxes, gt_boxes)
# Loop through ground truth boxes and find matching predictions
match_count = 0
pred_match = np.zeros([pred_boxes.shape[0]])
gt_match = np.zeros([gt_boxes.shape[0]])
for i in range(len(pred_boxes)):
# Find best matching ground truth box
sorted_ixs = np.argsort(overlaps[i])[::-1]
for j in sorted_ixs:
# If ground truth box is already matched, go to next one
if gt_match[j] == 1:
continue
# If we reach IoU smaller than the threshold, end the loop
iou = overlaps[i, j]
if iou < iou_threshold:
break
# Do we have a match?
if pred_class_ids[i] == gt_class_ids[j]:
match_count += 1
gt_match[j] = 1
pred_match[i] = 1
break
# Compute precision and recall at each prediction box step
precisions = np.cumsum(pred_match) / (np.arange(len(pred_match)) + 1)
recalls = np.cumsum(pred_match).astype(np.float32) / len(gt_match)
# Pad with start and end values to simplify the math
precisions = np.concatenate([[0], precisions, [0]])
recalls = np.concatenate([[0], recalls, [1]])
# Ensure precision values decrease but don't increase. This way, the
# precision value at each recall threshold is the maximum it can be
# for all following recall thresholds, as specified by the VOC paper.
for i in range(len(precisions) - 2, -1, -1):
precisions[i] = np.maximum(precisions[i], precisions[i + 1])
# Compute mean AP over recall range
indices = np.where(recalls[:-1] != recalls[1:])[0] + 1
mAP = np.sum((recalls[indices] - recalls[indices - 1]) *
precisions[indices])
return mAP, precisions, recalls, overlaps
def compute_recall(pred_boxes, gt_boxes, iou):
"""Compute the recall at the given IoU threshold. It's an indication
of how many GT boxes were found by the given prediction boxes.
pred_boxes: [N, (y1, x1, y2, x2)] in image coordinates
gt_boxes: [N, (y1, x1, y2, x2)] in image coordinates
"""
# Measure overlaps
overlaps = compute_overlaps(pred_boxes, gt_boxes)
iou_max = np.max(overlaps, axis=1)
iou_argmax = np.argmax(overlaps, axis=1)
positive_ids = np.where(iou_max >= iou)[0]
matched_gt_boxes = iou_argmax[positive_ids]
recall = len(set(matched_gt_boxes)) / gt_boxes.shape[0]
return recall, positive_ids
# ## Batch Slicing
# Some custom layers support a batch size of 1 only, and require a lot of work
# to support batches greater than 1. This function slices an input tensor
# across the batch dimension and feeds batches of size 1. Effectively,
# an easy way to support batches > 1 quickly with little code modification.
# In the long run, it's more efficient to modify the code to support large
# batches and getting rid of this function. Consider this a temporary solution
def batch_slice(inputs, graph_fn, batch_size, names=None):
"""Splits inputs into slices and feeds each slice to a copy of the given
computation graph and then combines the results. It allows you to run a
graph on a batch of inputs even if the graph is written to support one
instance only.
inputs: list of tensors. All must have the same first dimension length
graph_fn: A function that returns a TF tensor that's part of a graph.
batch_size: number of slices to divide the data into.
names: If provided, assigns names to the resulting tensors.
"""
if not isinstance(inputs, list):
inputs = [inputs]
outputs = []
for i in range(batch_size):
inputs_slice = [x[i] for x in inputs]
output_slice = graph_fn(*inputs_slice)
if not isinstance(output_slice, (tuple, list)):
output_slice = [output_slice]
outputs.append(output_slice)
# Change outputs from a list of slices where each is
# a list of outputs to a list of outputs and each has
# a list of slices
outputs = list(zip(*outputs))
if names is None:
names = [None] * len(outputs)
result = [tf.stack(o, axis=0, name=n)
for o, n in zip(outputs, names)]
if len(result) == 1:
result = result[0]
return result
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, image_shape, window, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array. Use
parse_image_meta() to parse the values back.
image_id: An int ID of the image. Useful for debugging.
image_shape: [height, width, channels]
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
list(active_class_ids) # size=num_classes
)
return meta
# Two functions (for Numpy and TF) to parse image_meta tensors.
def parse_image_meta(meta):
"""Parses an image info Numpy array to its components.
See compose_image_meta() for more details.
"""
image_id = meta[:, 0]
image_shape = meta[:, 1:4]
window = meta[:, 4:8] # (y1, x1, y2, x2) window of image in in pixels
active_class_ids = meta[:, 8:]
return image_id, image_shape, window, active_class_ids
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
"""
image_id = meta[:, 0]
image_shape = meta[:, 1:4]
window = meta[:, 4:8]
active_class_ids = meta[:, 8:]
return [image_id, image_shape, window, active_class_ids]
def mold_image(images, config):
"""Takes RGB images with 0-255 values and subtraces
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
'''
class GradientsCallback(Callback):
def on_batch_end(self, batch, logs):
weights = self.model.trainable_weights # weight tensors
gradients = self.model.optimizer.get_gradients(self.model.total_loss, weights) # gradient tensors
input_tensors = self.model.inputs + self.model.sample_weights + self.model.targets + [K.learning_phase()]
get_gradients = K.function(inputs=input_tensors, outputs=gradients)
inputs = [x, x_off, np.ones(len(x)), y, 0]
grads = get_gradients(inputs)
with open('gradients.txt','w') as f:
f.write('grads')
'''
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} ".format(str(array.shape)))
if array.size:
text += ("min: {:10.5f} max: {:10.5f}".format(array.min(),array.max()))
else:
text += ("min: {:10} max: {:10}".format("",""))
text += " {}".format(array.dtype)
print(text)
def denorm_boxes(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [N, (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:[N, (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = shape
scale = np.array([h - 1, w - 1, h - 1, w - 1])
shift = np.array([0, 0, 1, 1])
return np.around(np.multiply(boxes, scale) + shift).astype(np.int32)
|
[
"mitulmodi15@gmail.com"
] |
mitulmodi15@gmail.com
|
dd8266083726914414d608f3cacd125395994324
|
7ef29543c9e8305f181084cede03d8cec50508f1
|
/docker_vnc_immutable/immutableworkstation3.py
|
61d3ee59bb7a0be59a34ef246f878368298cc05d
|
[
"MIT"
] |
permissive
|
mikadosoftware/workstation
|
6168ba7f8f8357d73e7792a3c65c0daec37222e7
|
9c8b19bc5d6c596843da30f58f1dad6a60c7e989
|
refs/heads/master
| 2023-02-21T03:45:54.209770
| 2023-02-08T08:41:36
| 2023-02-08T08:41:36
| 138,070,951
| 477
| 29
|
MIT
| 2023-02-07T21:53:32
| 2018-06-20T18:28:07
|
Python
|
UTF-8
|
Python
| false
| false
| 13,917
|
py
|
#!/usr/bin/python3
#! -*- coding:utf-8 -*-
"""
ImmutableWorkstation
====================
This is a single entry point for the `immutableworkstation` project.
The project is pretty simple - I want to have a consistent, immutable
workstation on any host machine I am developing on - so I am using a
docker instance on a host machine - the instance is my development
"machine", and it can be rebuilt from consistent templates - this
script helps control all that - its supposed to be easier to get
started than a bunch of poorly documneted shell scripts.
* the start and stopping of the dev instance.
* the compilation of the docker image
* vsarious config and templates used to build to docker image.
This script does quite a lot, and needs to be installed on
the host machine - do so using
pip3 install docopt
python3 setup.py install
(I will launch it on PyPI soon)
Once this is done, you should be able to run
./immutableworkstation.py
usage
-----
We expect to have a config .ini file. This is for ease of specifying things like
volume mappings.
By default the config file is at `~/immuntableworkstation/config.ini`
[ ] Implement expect-style testing so we can automate testing.
[x] put the home dir into git seperate to rest of pacakge (ie thats the indivudal part)
[ ] put blog.mikadosoftware onto AWS and run this testing with docker on it.
[ ] migrate rest of the articles there.
[x] create a plain docker instance and just import devstation, see if it works (ie clean install)
[ ] run the get github projects into one place
[ ] podman system prune : clean up a lot of cruft in docker areas.
[x] remove priviledged access with auser name remapping
[ ] improve using https://github.com/mviereck/x11docker
"""
##### imports #####
import logging, sys
from docopt import docopt
import subprocess
import time
import os
from pprint import pprint as pp
from mikado.core import config
import shutil
import json
import lib_config
import operator
##### Module setup #####
# TODO: split out logging into common module
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
log.addHandler(handler)
DRYRUN = False
PDB = False
OCI_CMD = 'sudo docker'
OCI_CMD = 'podman'
#: usage defintons
DOCOPT_HELP = """immutableworkstation
Usage:
immutableworkstation.py showconfig [options]
immutableworkstation.py createDockerfile --templatedir=<path> [options]
immutableworkstation.py start tagname [options]
immutableworkstation.py stop tagname [options]
immutableworkstation.py login tagname [options]
immutableworkstation.py buildAnyDocker <path_to_dockerfile> <context_dir> [options]
immutableworkstation.py status
immutableworkstation.py test
immutableworkstation.py (-h | --help )
Options:
-h --help Show this screen
-d --dryrun dryrun
--configfile=<configpath> path 2 config ini file
--tagname=<tagname> Name to tag
--instancename=<instancename>
--username=<username>
--volumearray=<volumearray>
"""
def parse_docopt(argsd):
'''We want to split into args (<val>), options (--left) and commands (foo.py fire) '''
args = []
options = []
commands = []
active_commmands = []
# we assume only one command at a time?
for k,i in argsd.items():
if k.startswith("--"):
options.append({k:i})
elif k.startswith("<"):
args.append({k:i})
else:
commands.append({k:i})
#
active_commands = [list(d.keys())[0] for d in commands if list(d.values())[0]]
return args, options, commands, active_commands
############### Config
def build_sshcmd():
"""Create the command used to connect to running docker via ssh."""
return "ssh -X {username}@{localhost} -p {ssh_port}".format(**CONFD)
def build_dockerrun(latest=True):
"""create the command used to start docker instance.
tagname of image
name of running instance
"""
_latest = LATEST if latest else NEXT
instance_name = "run_{}_{}".format(CONFD["instance_name"], _latest)
image_name = "{}:{}".format(CONFD["tagname"], _latest)
vols = ""
for hostpath, mountpath in CONFD["volumes"].items():
vols += "-v {}:{} ".format(hostpath, mountpath)
return [
"{} container prune -f".format(OCI_CMD),
"""{OCI_CMD} run -d \
{vols} \
--name {instance_name} \
--device /dev/snd \
-p {ssh_port}:22 \
--privileged \
{tagname}:{_latest}
""".format(
OCI_CMD=OCI_CMD,
vols=vols,
instance_name=instance_name,
ssh_port=CONFD["ssh_port"],
_latest=_latest,
tagname=CONFD["tagname"],
),
]
def build_docker_build(latest=True):
"""Create command used to (re)build the container.
We store the Dockerfile (as that name)
in dir .next or .latest so that we can
have various templates and assets and so on
in the 'context' directory.
"""
tmpl = "{} build -t {{tagname}}:{{tagtag}} {{pathtodockerfile}} --squash".format(OCI_CMD)
_latest = LATEST if latest else NEXT
pathtodockerfile = os.path.join(CONFD["devstation_config_root"], "." + _latest)
return tmpl.format(
tagname=CONFD["tagname"], tagtag=_latest, pathtodockerfile=pathtodockerfile
)
def build_docker_any_build(path_to_dockerfile, context_dir):
"""Create command used to (re)build the container.
"""
tmpl = "{} build -t {{tagname}}:{{tagtag}} -f {{path_to_dockerfile}} {{context_dir}} --squash".format(OCI_CMD)
return tmpl.format(
tagname='anybuild', tagtag='0.1', path_to_dockerfile=path_to_dockerfile, context_dir=context_dir
)
def read_subprocess(cmd):
"""Run a command and return output """
result = subprocess.run(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=True)
txt = result.stdout
return txt
def run_subprocess(cmd, shell=None):
"""Run the given command in a subprocess."""
if DRYRUN:
telluser(cmd)
else:
log.info(cmd)
subprocess.run(cmd, shell=True)
def spawn_sibling_console():
"""This script is best thought of as a launcher for other shells we
shall be working in. We want to interact with the console, not
this script much.
I have played with fork'ing a child console, then passing `fd`
0,1,2 over to it. But the easiest way seems to be to assume this
is a GUI workstation, and people are using a terminal program
(like Konsole) - so we just spawn konsole and run -e
"""
sshcmd = '{} "{}" &'.format(CONFD["terminal_command"], build_sshcmd())
log.info(sshcmd)
run_subprocess(sshcmd)
def handle_start(args):
"""Perform cmsd needed to start the docker and login
I really need to monitor the success of the underlying
cmds, instead of brute force sleep.
[ ] {milestone} stop using sleep, monitor the subprocess for return values.
"""
# do start up here
cmds = build_dockerrun(args["latest"])
for cmd in cmds:
# TODO get better solution than sleep
run_subprocess(cmd, shell=True)
time.sleep(8) # brute force give docker time to complete its stuff.
time.sleep(10) # As above, but let docker catch up before login
handle_login(args)
############### Config
# This is a 'well-known' location
CONFIGDIR = os.path.join(os.path.expanduser("~"), ".immutableworkstation")
CONFIGLOCATION = os.path.join(
os.path.expanduser("~"), ".immutableworkstation/config.ini"
)
def handle_showconfig(args):
print(args['--configfile'])
#lib_config.show_config(confd=CONFD)
def handle_login(args):
spawn_sibling_console()
def handle_createDockerfile(args):
makeDocker(args['--templatedir'])
def handle_buildDocker(args):
"""Trigger the processes to create new dockerfile and then build image. """
makeDocker(latest=args["latest"])
cmd = build_docker_build(latest=args["latest"])
run_subprocess(cmd)
def parse_volumearray(args):
''' COnvert volumne array to usable instructions
>>> parse_volumearray(args)
'''
x = ['~/data=/var/data',
'~/projects=/var/projects',
'~/secrets=/var/secrets:ro',
'~/Dropbox=/var/Dropbox']
return x
def handle_buildAnyDocker(args):
"""Trigger the processes to create new dockerfile and then build image. """
#import pdb;pdb.set_trace()
cmd = build_docker_any_build(args['<path_to_dockerfile>'], args['<context_dir>'])
run_subprocess(cmd)
def handle_status(args):
"""Show container status. """
cmd = "{} container ls".format(OCI_CMD)
run_subprocess(cmd)
cmd = "{} inspect run_devstation_next".format(OCI_CMD)
txt = read_subprocess(cmd)
jsond = json.loads(txt)
ipaddress = jsond[0]['NetworkSettings']['IPAddress']
print('Use this ip address {}'.format(ipaddress))
def handle_stop(args):
"""Kill the specified instance. """
_latest = LATEST if args["latest"] else NEXT
#: rewrite so this is not in two places
instance_name = "run_{}_{}".format(CONFD["instance_name"], _latest)
cmd = "{} container kill {}".format(OCI_CMD, instance_name)
run_subprocess(cmd)
def hasValidConfig():
"""This is a placeholder for future development on checking curr env. """
has_config_file = os.path.isfile(CONFIGLOCATION)
return all([has_config_file])
def gatherinfo():
questions = {
"username": "What username should be the default (only) on your immutable workstation?"
}
answers = {}
for label, question in questions.items():
answer = input(question)
answers[label] = answer
return answers
def handle_quickstart(args):
"""We have a starter config on github. Pull that down and put in
users homedir, then alter based on questions.
I am spending too long yak shaving on this app, and so will just
print instructions and look to automate it later.
"""
helpmsg = ""
if hasValidConfig():
helpmsg += """You appear to have an existing config in {}.
Please adjust it manually - view docs for help.""".format(
CONFIGLOCATION
)
if not hasValidConfig():
helpmsg += """ In the future this app will walk you through a series of
questions, but for now please can you download and unzip into {} the
starter config stored at {}. You should have a directory layout like::
.immutableworkstation
|
-config.ini
|
-.next/
-.latest/
You should copy these into *your* github repo, and then update the
templates to your needs, as you find a new package to be added to your
workstation, adjust the config needed.
""".format(
CONFIGDIR, STARTER_CONFIG_URL
)
telluser(helpmsg)
def handle_unknown(command, e, args):
telluser(f"Unknown request. We got command: {command} and error: {e}. Full args were {args}")
def makeDocker(templatesdir):
"""Take a .skeleton file, and replace defined markup with
contents of txt files
Based on 'dockerfile.skeleton', replace any instance of
{{ python }} with the contents of file `templates\python.template`
This is an *extremely* simple templating tool. It is *not*
supposed to have the complexity even of Jinja2. Its supposed to
be really dumb. Lucky I wrote it then :-).
"""
pathtodockerfile = os.path.join(templatesdir, "../Dockerfile")
skeleton = "dockerfile.skeleton"
outputs = ""
with open(os.path.join(templatesdir, skeleton)) as fo:
for line in fo:
if line.find("{{") == 0:
file = line.replace("{{", "").replace("}}", "").strip()
filepath = os.path.join(templatesdir, file + ".template")
txt = open(filepath).read()
outputs += "\n### {}\n{}\n".format(line, txt)
else:
outputs += "{}".format(line)
fo = open(pathtodockerfile, "w")
fo.write(outputs)
fo.close()
telluser("Written new Dockerfile at {}".format(pathtodockerfile))
def telluser(msg):
""" aggregate print stmts into one place."""
# handle my weird formatting
print(msg)
def build_current_confd(args, options, commands, active_commands):
print("args", args, '----\n')
print("options", options, '----\n')
print("commands", commands, '----\n')
print("active commands", active_commands, '----\n')
volumes = parse_volumearray(options)
import sys; sys.exit()
def run(argsd):
#: start with quickstart as it may be our only options
#: [ ] make this safer with .get
args, options, commands, active_commands = parse_docopt(argsd)
build_current_confd(args, options, commands, active_commands)
for active_command in active_commands:
try:
# in current module, prepend handle_ to the name of the active command and
# look for that in current module, if it exists, call it
current_module = sys.modules[__name__]
fn = operator.attrgetter('handle_{}'.format(active_command))(current_module)
fn.__call__(argsd)
except Exception as e:
handle_unknown(active_command, e, argsd)
def runtests():
import doctest
doctest.testmod()
teststr = '''
[default]
tagname = workstation
instance_name = devstation
localhost = 127.0.0.1
username = pbrian
ssh_port = 2222
terminal_command = /usr/bin/konsole -e
volume_array: ~/secrets=/var/secrets:ro ~/secrets2=/var/secrets2:ro
'''
def main():
global DRYRUN
args = docopt(DOCOPT_HELP)
if args.get("--dryrun", False):
DRYRUN = True
run(args)
if __name__ == "__main__":
main()
|
[
"paul@mikadosoftware.com"
] |
paul@mikadosoftware.com
|
d18f96dd0867f55e3239dacad3182148cccd426e
|
d02aac5fd9864b2f446c048a48c0370292cdf148
|
/captcha_data.py
|
6d919a12f192429da318e56f35e7c9d435ee5262
|
[] |
no_license
|
sayaadit/captcha_breaker
|
81677018be269d3a82162f48e3bf0ad91bd03270
|
827274a5d8f6054b11242b50d4284039f6550152
|
refs/heads/master
| 2020-05-24T04:00:18.114397
| 2019-05-16T18:49:56
| 2019-05-16T18:49:56
| 187,083,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,713
|
py
|
import glob, os
import numpy as np
import cv2
import random
class OCR_data(object):
def __init__(self, num, data_dir, num_classes, batch_size=50, len_code=5, height=60, width=180, resize_height=24,
resize_width=88, num_channels=1):
self.num = num
self.data_dir = data_dir
self.num_classes = num_classes
self.batch_size = batch_size
self.len_code = len_code
self.height = height
self.width = width
self.resize_height = resize_height
self.resize_width = resize_width
self.num_channels = num_channels
self.index_in_epoch = 0
self._imgs = []
self._labels = []
for pathAndFilename in glob.iglob(os.path.join(data_dir, '*.png')):
img, label = self.create_captcha(pathAndFilename)
self._imgs.append(img)
self._labels.append(label)
self._imgs = np.array(self._imgs).reshape((-1, resize_height, resize_width, num_channels)).astype(
np.float32)
self._labels = np.array(self._labels)
def create_captcha(self, pathAndFilename):
img = cv2.imread(pathAndFilename, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (self.resize_width, self.resize_height), interpolation=cv2.INTER_AREA)
filename, ext = os.path.splitext(os.path.basename(pathAndFilename))
label = self.create_label(filename)
return (img, label)
def create_label(self, filename):
label = []
for c in filename:
ascii_code = ord(c)
if ascii_code < 58:
char_value = ascii_code - 48
else:
char_value = ascii_code - 87
label.append(char_value)
return self.dense_to_one_hot(label, self.num_classes)
def dense_to_one_hot(self, labels_dense, num_classes):
num_labels = len(labels_dense)
index_offest = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offest + labels_dense] = 1
labels_one_hot = labels_one_hot.reshape(num_labels * num_classes)
return labels_one_hot
def next_batch(self, batch_size):
start = self.index_in_epoch
self.index_in_epoch += batch_size
if self.index_in_epoch > self.num:
perm = np.arange(self.num)
np.random.shuffle(perm)
self._imgs = self._imgs[perm]
self._labels = self._labels[perm]
start = 0
self.index_in_epoch = batch_size
assert batch_size <= self.num
end = self.index_in_epoch
return self._imgs[start:end], self._labels[start:end]
|
[
"sayaadit9@gmail.com"
] |
sayaadit9@gmail.com
|
a3a738a23827197ecf696191edae4ca0e4645061
|
3f9aff869095b986e99b50b996b172ea946ee667
|
/python_code.py
|
ffc6055f970c7256040159792f1d5b4980c8ce90
|
[] |
no_license
|
ssmitha21/repo_1
|
a0a822c256b56df2fbfed1da41008aa4ff0db2e5
|
905f11b4ca6948baf54a9bbb708b2692be21c761
|
refs/heads/master
| 2020-11-26T02:03:48.325768
| 2019-12-19T20:33:59
| 2019-12-19T20:33:59
| 228,931,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23
|
py
|
print("Hello Githib!")
|
[
"ssmitha_21@yahoo.com"
] |
ssmitha_21@yahoo.com
|
3d32dcf93fd427ca48693b5eae480d0cd2609d04
|
b295b72d3f9471e1badf2d1568fa4748007403bd
|
/api-examples/impala/create_impala_datasource.py
|
092e3b637a3045ad3fe15126b6422e37d2521d04
|
[
"Apache-2.0"
] |
permissive
|
Zoomdata/zoomdata-tools
|
ed4fa11ebc83697bb0cd75087d718117d1d7e824
|
a411396e29cea5198dce4f389d30fa5ebf86effa
|
refs/heads/master
| 2022-05-02T17:56:39.215640
| 2022-04-05T09:06:58
| 2022-04-05T09:06:58
| 70,515,227
| 3
| 10
|
Apache-2.0
| 2022-04-05T09:06:59
| 2016-10-10T18:07:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,872
|
py
|
#!/usr/bin/env python
from base_classes.zoomdata_api_base import ZoomdataRequest
from base_classes.zoomdata_api_impala import ImpalaDatasource
zoomdataBaseURL = raw_input("Enter the Zoomdata instance URL (https://<server>:<port>/zoomdata): ")
adminUser = raw_input("Enter the Zoomdata administrator username (typically 'admin'): ")
adminPassword = raw_input("Enter the password for the Zoomdata administrator: ")
connectionID = raw_input("Enter the Zoomdata connection ID to use: ")
connectorTypeInput = raw_input("Is this connector an EDC? (yes or no): ")
if connectorTypeInput.lower() == "yes":
connectorType = "EDC2"
else:
connectorType = "IMPALA"
collectionName = raw_input("Enter the Impala collection name, or custom SQL statement: ")
customSQL = raw_input("Did you enter a custom SQL statement in the previous step? (yes or no): ")
if customSQL.lower() == "yes":
customSQLFlag = "true"
else:
customSQLFlag = "false"
schemaName = raw_input("Enter the Impala schema name that contains the collection: ")
debug = raw_input("Do you want to enable verbose output (debug mode; prints all API request data to the console)? (yes or no): ")
sourceName = raw_input("Finally, enter a name for the new datasource: ")
# Create the Zoomdata server request
zoomdataServerRequest = ZoomdataRequest(zoomdataBaseURL, adminUser, adminPassword)
# Enable verbose output if desired
if debug.lower() == "yes":
zoomdataServerRequest.enableDebug()
# Initialize the source object
source = ImpalaDatasource(sourceName, zoomdataServerRequest, connectionID, collectionName, schemaName, customSQLFlag, connectorType=connectorType)
# Finally, create the source in Zoomdata
source.create()
# Uncomment the line below to delete the datasource after creation (for testing purposes)
#source.delete()
# Return the Zoomdata source id of the newly created source
print "source: "+source.id
|
[
"boyd@zoomdata.com"
] |
boyd@zoomdata.com
|
a4807335c903336469b5249d53ae61d78e699610
|
a21131c2ef7cd2a4a6a27d3fcef132ba3fdc9756
|
/path_context_reader.py
|
c9424ebf391dca42abf666c0be8f79024f41cfd6
|
[
"MIT"
] |
permissive
|
eladn/code2vec
|
3cfb9a14bc3f720720a0cdb933832778dd04d7a7
|
32dabfa21200be35f0e7beeb0dc536edb549f021
|
refs/heads/master
| 2020-04-28T05:31:16.767427
| 2019-09-18T08:53:33
| 2019-09-18T08:53:33
| 175,023,515
| 0
| 1
|
MIT
| 2019-03-11T14:58:18
| 2019-03-11T14:58:17
| null |
UTF-8
|
Python
| false
| false
| 11,627
|
py
|
import tensorflow as tf
from typing import Dict, Tuple, NamedTuple, Union, Optional, Iterable
from config import Config
from vocabularies import Code2VecVocabs
import abc
from functools import reduce
from enum import Enum
class EstimatorAction(Enum):
Train = 'train'
Evaluate = 'evaluate'
Predict = 'predict'
@property
def is_train(self):
return self is EstimatorAction.Train
@property
def is_evaluate(self):
return self is EstimatorAction.Evaluate
@property
def is_predict(self):
return self is EstimatorAction.Predict
@property
def is_evaluate_or_predict(self):
return self.is_evaluate or self.is_predict
class ReaderInputTensors(NamedTuple):
"""
Used mostly for convenient-and-clear access to input parts (by their names).
"""
path_source_token_indices: tf.Tensor
path_indices: tf.Tensor
path_target_token_indices: tf.Tensor
context_valid_mask: tf.Tensor
target_index: Optional[tf.Tensor] = None
target_string: Optional[tf.Tensor] = None
path_source_token_strings: Optional[tf.Tensor] = None
path_strings: Optional[tf.Tensor] = None
path_target_token_strings: Optional[tf.Tensor] = None
class ModelInputTensorsFormer(abc.ABC):
"""
Should be inherited by the model implementation.
An instance of the inherited class is passed by the model to the reader in order to help the reader
to construct the input in the form that the model expects to receive it.
This class also enables conveniently & clearly access input parts by their field names.
eg: 'tensors.path_indices' instead if 'tensors[1]'.
This allows the input tensors to be passed as pure tuples along the computation graph, while the
python functions that construct the graph can easily (and clearly) access tensors.
"""
@abc.abstractmethod
def to_model_input_form(self, input_tensors: ReaderInputTensors):
...
@abc.abstractmethod
def from_model_input_form(self, input_row) -> ReaderInputTensors:
...
class PathContextReader:
def __init__(self,
vocabs: Code2VecVocabs,
config: Config,
model_input_tensors_former: ModelInputTensorsFormer,
estimator_action: EstimatorAction,
repeat_endlessly: bool = False):
self.vocabs = vocabs
self.config = config
self.model_input_tensors_former = model_input_tensors_former
self.estimator_action = estimator_action
self.repeat_endlessly = repeat_endlessly
self.CONTEXT_PADDING = ','.join([self.vocabs.token_vocab.special_words.PAD,
self.vocabs.path_vocab.special_words.PAD,
self.vocabs.token_vocab.special_words.PAD])
self.csv_record_defaults = [[self.vocabs.target_vocab.special_words.OOV]] + \
([[self.CONTEXT_PADDING]] * self.config.MAX_CONTEXTS)
# initialize the needed lookup tables (if not already initialized).
self.create_needed_vocabs_lookup_tables(self.vocabs)
self._dataset: Optional[tf.data.Dataset] = None
@classmethod
def create_needed_vocabs_lookup_tables(cls, vocabs: Code2VecVocabs):
vocabs.token_vocab.get_word_to_index_lookup_table()
vocabs.path_vocab.get_word_to_index_lookup_table()
vocabs.target_vocab.get_word_to_index_lookup_table()
@tf.function
def process_input_row(self, row_placeholder):
parts = tf.io.decode_csv(
row_placeholder, record_defaults=self.csv_record_defaults, field_delim=' ', use_quote_delim=False)
# Note: we DON'T apply the filter `_filter_input_rows()` here.
tensors = self._map_raw_dataset_row_to_input_tensors(*parts)
# make it batched (first batch axis is going to have dimension 1)
tensors_expanded = ReaderInputTensors(
**{name: None if tensor is None else tf.expand_dims(tensor, axis=0)
for name, tensor in tensors._asdict().items()})
return self.model_input_tensors_former.to_model_input_form(tensors_expanded)
def process_and_iterate_input_from_data_lines(self, input_data_lines: Iterable) -> Iterable:
for data_row in input_data_lines:
processed_row = self.process_input_row(data_row)
yield processed_row
def get_dataset(self, input_data_rows: Optional = None) -> tf.data.Dataset:
if self._dataset is None:
self._dataset = self._create_dataset_pipeline(input_data_rows)
return self._dataset
def _create_dataset_pipeline(self, input_data_rows: Optional = None) -> tf.data.Dataset:
if input_data_rows is None:
assert not self.estimator_action.is_predict
dataset = tf.data.experimental.CsvDataset(
self.config.data_path(is_evaluating=self.estimator_action.is_evaluate),
record_defaults=self.csv_record_defaults, field_delim=' ', use_quote_delim=False,
buffer_size=self.config.CSV_BUFFER_SIZE)
else:
dataset = tf.data.Dataset.from_tensor_slices(input_data_rows)
dataset = dataset.map(
lambda input_line: tf.io.decode_csv(
tf.reshape(tf.cast(input_line, tf.string), ()),
record_defaults=self.csv_record_defaults,
field_delim=' ', use_quote_delim=False))
if self.repeat_endlessly:
dataset = dataset.repeat()
if self.estimator_action.is_train:
if not self.repeat_endlessly and self.config.NUM_TRAIN_EPOCHS > 1:
dataset = dataset.repeat(self.config.NUM_TRAIN_EPOCHS)
dataset = dataset.shuffle(self.config.SHUFFLE_BUFFER_SIZE, reshuffle_each_iteration=True)
dataset = dataset.map(self._map_raw_dataset_row_to_expected_model_input_form,
num_parallel_calls=self.config.READER_NUM_PARALLEL_BATCHES)
batch_size = self.config.batch_size(is_evaluating=self.estimator_action.is_evaluate)
if self.estimator_action.is_predict:
dataset = dataset.batch(1)
else:
dataset = dataset.filter(self._filter_input_rows)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(buffer_size=40) # original: tf.contrib.data.AUTOTUNE) -- got OOM err; 10 seems promising.
return dataset
def _filter_input_rows(self, *row_parts) -> tf.bool:
row_parts = self.model_input_tensors_former.from_model_input_form(row_parts)
assert all(tensor.shape == (self.config.MAX_CONTEXTS,) for tensor in
{row_parts.path_source_token_indices, row_parts.path_indices,
row_parts.path_target_token_indices, row_parts.context_valid_mask})
# FIXME: Does "valid" here mean just "no padding" or "neither padding nor OOV"? I assumed just "no padding".
any_word_valid_mask_per_context_part = [
tf.not_equal(tf.reduce_max(row_parts.path_source_token_indices, axis=0),
self.vocabs.token_vocab.word_to_index[self.vocabs.token_vocab.special_words.PAD]),
tf.not_equal(tf.reduce_max(row_parts.path_target_token_indices, axis=0),
self.vocabs.token_vocab.word_to_index[self.vocabs.token_vocab.special_words.PAD]),
tf.not_equal(tf.reduce_max(row_parts.path_indices, axis=0),
self.vocabs.path_vocab.word_to_index[self.vocabs.path_vocab.special_words.PAD])]
any_contexts_is_valid = reduce(tf.logical_or, any_word_valid_mask_per_context_part) # scalar
if self.estimator_action.is_evaluate:
cond = any_contexts_is_valid # scalar
else: # training
word_is_valid = tf.greater(
row_parts.target_index, self.vocabs.target_vocab.word_to_index[self.vocabs.target_vocab.special_words.OOV]) # scalar
cond = tf.logical_and(word_is_valid, any_contexts_is_valid) # scalar
return cond # scalar
def _map_raw_dataset_row_to_expected_model_input_form(self, *row_parts) -> \
Tuple[Union[tf.Tensor, Tuple[tf.Tensor, ...], Dict[str, tf.Tensor]], ...]:
tensors = self._map_raw_dataset_row_to_input_tensors(*row_parts)
return self.model_input_tensors_former.to_model_input_form(tensors)
def _map_raw_dataset_row_to_input_tensors(self, *row_parts) -> ReaderInputTensors:
row_parts = list(row_parts)
target_str = row_parts[0]
target_index = self.vocabs.target_vocab.lookup_index(target_str)
contexts_str = tf.stack(row_parts[1:(self.config.MAX_CONTEXTS + 1)], axis=0)
split_contexts = tf.compat.v1.string_split(contexts_str, sep=',', skip_empty=False)
# dense_split_contexts = tf.sparse_tensor_to_dense(split_contexts, default_value=self.vocabs.token_vocab.special_words.PAD)
sparse_split_contexts = tf.sparse.SparseTensor(
indices=split_contexts.indices, values=split_contexts.values, dense_shape=[self.config.MAX_CONTEXTS, 3])
dense_split_contexts = tf.reshape(
tf.sparse.to_dense(sp_input=sparse_split_contexts, default_value=self.vocabs.token_vocab.special_words.PAD),
shape=[self.config.MAX_CONTEXTS, 3]) # (max_contexts, 3)
path_source_token_strings = tf.squeeze(
tf.slice(dense_split_contexts, begin=[0, 0], size=[self.config.MAX_CONTEXTS, 1]), axis=1) # (max_contexts,)
path_strings = tf.squeeze(
tf.slice(dense_split_contexts, begin=[0, 1], size=[self.config.MAX_CONTEXTS, 1]), axis=1) # (max_contexts,)
path_target_token_strings = tf.squeeze(
tf.slice(dense_split_contexts, begin=[0, 2], size=[self.config.MAX_CONTEXTS, 1]), axis=1) # (max_contexts,)
path_source_token_indices = self.vocabs.token_vocab.lookup_index(path_source_token_strings) # (max_contexts, )
path_indices = self.vocabs.path_vocab.lookup_index(path_strings) # (max_contexts, )
path_target_token_indices = self.vocabs.token_vocab.lookup_index(path_target_token_strings) # (max_contexts, )
# FIXME: Does "valid" here mean just "no padding" or "neither padding nor OOV"? I assumed just "no padding".
valid_word_mask_per_context_part = [
tf.not_equal(path_source_token_indices, self.vocabs.token_vocab.word_to_index[self.vocabs.token_vocab.special_words.PAD]),
tf.not_equal(path_target_token_indices, self.vocabs.token_vocab.word_to_index[self.vocabs.token_vocab.special_words.PAD]),
tf.not_equal(path_indices, self.vocabs.path_vocab.word_to_index[self.vocabs.path_vocab.special_words.PAD])] # [(max_contexts, )]
context_valid_mask = tf.cast(reduce(tf.logical_or, valid_word_mask_per_context_part), dtype=tf.float32) # (max_contexts, )
assert all(tensor.shape == (self.config.MAX_CONTEXTS,) for tensor in
{path_source_token_indices, path_indices, path_target_token_indices, context_valid_mask})
return ReaderInputTensors(
path_source_token_indices=path_source_token_indices,
path_indices=path_indices,
path_target_token_indices=path_target_token_indices,
context_valid_mask=context_valid_mask,
target_index=target_index,
target_string=target_str,
path_source_token_strings=path_source_token_strings,
path_strings=path_strings,
path_target_token_strings=path_target_token_strings
)
|
[
"eladnah@gmail.com"
] |
eladnah@gmail.com
|
29a911f8facee7df9b6c2801468794f26212fdd0
|
542323b41107b68e1e4ee910817beca684439497
|
/pjViz/Visual/node.py
|
e8a96e500ca55dae8a3a717a0f50375edbf71438
|
[] |
no_license
|
Twoods01/programVisualization
|
08ef1778353535272db0937233e035f3348b64ac
|
c1f86de13380c71c4b734dc3eb4c785b63ad1c3f
|
refs/heads/master
| 2016-08-03T21:43:13.670679
| 2015-06-11T18:07:17
| 2015-06-11T18:07:17
| 30,376,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,422
|
py
|
__author__ = 'twoods0129'
import operator as op
import pyglet
from pjViz.constants import Constants
import pjViz.Utils.vectorMath as vm
import pjViz.Utils.spline as spline
init_radius = 150
node_height = 70
node_width = 114
x_offset = node_width / 2
y_offset = node_height / 2
node_vertices = {}
class Node:
texture = pyglet.image.load('Visual/rr.png').get_texture()
#Construct a new Node given a MethodDeclaration which it represents, and a parent if it has one
def __init__(self, method, parent=None, visible=True):
#Array of child nodes
self.child_branches = []
#Array of parent nodes
self.parents = []
#Array of child nodes
self.children= []
if parent is not None:
self.parents.append(parent)
#Hash of number of times this node has been visited by other nodes
# key is the node, value is the number of visits
self.visit_count = {}
#Hash of splines which form the path from this node to its parents
#key is the parent node, value is the Spline
self.splines = {}
#x,y location on screen
self.x = -1
self.y = -1
#Branch and index of this node
self.branch = 0
self.index = 0
#The method which this node represents
self.method = method
self.radius = init_radius
self.visible = visible
def add_parent(self, parent):
self.parents.append(parent)
parent.children.append(self)
#Print for debugging
def write(self):
print(self.method.name)
print(' child branches ' + str(self.child_branches))
print(' parents ' + str(map(lambda x: x.method.name, self.parents)))
print(' children ' + str(map(lambda x: x.method.name, self.children)))
#Set x,y position of this node
def set_position(self, x, y):
self.x = x
self.y = y
#Draw the node with the given color
def draw(self, color, additional_draw_task=None, texture=True):
if not self.visible:
if additional_draw_task != None:
additional_draw_task()
return
pyglet.gl.glPushMatrix()
pyglet.gl.glTranslatef(self.x, self.y, 0)
#Check if we've drawn a node of this color before, if not create a vertex list for it
if not color in node_vertices:
node_vertices[color] = pyglet.graphics.vertex_list_indexed(4,
[0, 1, 2, 0, 2, 3],
('v3i', (-57, -35, 0,
57, -35, 0,
57, 35, 0,
-57, 35, 0)),
('t2f', (0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0)),
('c4B', (color[0], color[1], color[2], 255) * 4))
if texture:
pyglet.gl.glEnable(Node.texture.target)
pyglet.gl.glBindTexture(Node.texture.target, Node.texture.id)
node_vertices[color].draw(pyglet.gl.GL_TRIANGLES)
if additional_draw_task != None:
pyglet.gl.glPopMatrix()
additional_draw_task()
pyglet.gl.glPushMatrix()
pyglet.gl.glTranslatef(self.x, self.y, 0)
#Label it with method name
pyglet.text.Label(self.method.name + "()",
font_name= Constants.font,
font_size=12,
x = 0,
y = 0,
anchor_y = 'center',
anchor_x= 'center').draw()
if texture:
pyglet.gl.glDisable(Node.texture.target)
pyglet.gl.glPopMatrix()
def add_branch(self, branch_num):
self.child_branches.append(branch_num)
#Returns true if this node has been given a location, otherwise false
def placed(self):
return self.x != -1 and self.y != -1
#Returns a vector containing the direction from self to node
def get_direction(self, node):
return vm.normalize(map(op.sub, (node.x, node.y), (self.x, self.y)))
#Given x, y coordinate and current camera position determine if that coordinate is inside the node
def hit(self, x, y):
return x > self.x - x_offset and x < self.x + node_width - x_offset\
and y > self.y - y_offset and y < self.y + node_height - y_offset
#Connect current node to |node| UNUSED
def connect(self, color=[237, 255, 228]):
pyglet.gl.glLineWidth(3)
for p in self.parents:
pyglet.graphics.draw(2, pyglet.gl.GL_LINES,
('v2i', (int(self.x), int(self.y),
int(p.x), int(p.y))),
('c3B', (color[0], color[1], color[2]) * 2))
#Draw an edge from self to node, using a spline
def draw_edge(self, node, color=[255, 255, 255], up=False, control=None):
pyglet.gl.glLineWidth(3)
if not node in self.splines:
self.splines[node] = spline.Spline(self, node, up=up, control=control)
if not self in node.splines:
node.splines[self] = self.splines[node]
self.splines[node].draw(color)
|
[
"twoods0129@gmail.com"
] |
twoods0129@gmail.com
|
132f9d82eb8b31115fe7d76fe9d57fb3439e4fa5
|
cba6b7debfa923fc05e97dce02584f579d4fcba6
|
/gremlin-python/src/main/jython/gremlin_python/process/traversal.py
|
1afaa6c5d8081c3097a421e5f5843ff6fffa6c3e
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sri-desai/TinkerPop
|
74367e39051c24815203b1588f4d1464f1a061fa
|
06902258499cf62e82c5661dd21091bfa8b875ae
|
refs/heads/master
| 2021-07-25T22:28:09.089307
| 2017-11-09T03:17:24
| 2017-11-09T03:17:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,013
|
py
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
from aenum import Enum
from .. import statics
from ..statics import long
class Traversal(object):
def __init__(self, graph, traversal_strategies, bytecode):
self.graph = graph
self.traversal_strategies = traversal_strategies
self.bytecode = bytecode
self.side_effects = TraversalSideEffects()
self.traversers = None
self.last_traverser = None
def __repr__(self):
return str(self.bytecode)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.bytecode == other.bytecode
else:
return False
def __iter__(self):
return self
def __next__(self):
if self.traversers is None:
self.traversal_strategies.apply_strategies(self)
if self.last_traverser is None:
self.last_traverser = next(self.traversers)
object = self.last_traverser.object
self.last_traverser.bulk = self.last_traverser.bulk - 1
if self.last_traverser.bulk <= 0:
self.last_traverser = None
return object
def toList(self):
return list(iter(self))
def toSet(self):
return set(iter(self))
def iterate(self):
while True:
try: self.nextTraverser()
except StopIteration: return self
def nextTraverser(self):
if self.traversers is None:
self.traversal_strategies.apply_strategies(self)
if self.last_traverser is None:
return next(self.traversers)
else:
temp = self.last_traverser
self.last_traverser = None
return temp
def next(self, amount=None):
if amount is None:
return self.__next__()
else:
count = 0
tempList = []
while count < amount:
count = count + 1
try: temp = self.__next__()
except StopIteration: return tempList
tempList.append(temp)
return tempList
def promise(self, cb=None):
self.traversal_strategies.apply_async_strategies(self)
future_traversal = self.remote_results
future = type(future_traversal)()
def process(f):
try:
traversal = f.result()
except Exception as e:
future.set_exception(e)
else:
self.traversers = iter(traversal.traversers)
self.side_effects = traversal.side_effects
if cb:
try:
result = cb(self)
except Exception as e:
future.set_exception(e)
else:
future.set_result(result)
else:
future.set_result(self)
future_traversal.add_done_callback(process)
return future
Barrier = Enum('Barrier', ' normSack')
statics.add_static('normSack', Barrier.normSack)
Cardinality = Enum('Cardinality', ' list_ set_ single')
statics.add_static('single', Cardinality.single)
statics.add_static('list_', Cardinality.list_)
statics.add_static('set_', Cardinality.set_)
Column = Enum('Column', ' keys values')
statics.add_static('keys', Column.keys)
statics.add_static('values', Column.values)
Direction = Enum('Direction', ' BOTH IN OUT')
statics.add_static('OUT', Direction.OUT)
statics.add_static('IN', Direction.IN)
statics.add_static('BOTH', Direction.BOTH)
GraphSONVersion = Enum('GraphSONVersion', ' V1_0 V2_0 V3_0')
statics.add_static('V1_0', GraphSONVersion.V1_0)
statics.add_static('V2_0', GraphSONVersion.V2_0)
statics.add_static('V3_0', GraphSONVersion.V3_0)
GryoVersion = Enum('GryoVersion', ' V1_0 V3_0')
statics.add_static('V1_0', GryoVersion.V1_0)
statics.add_static('V3_0', GryoVersion.V3_0)
Operator = Enum('Operator', ' addAll and_ assign div max min minus mult or_ sum sumLong')
statics.add_static('sum', Operator.sum)
statics.add_static('minus', Operator.minus)
statics.add_static('mult', Operator.mult)
statics.add_static('div', Operator.div)
statics.add_static('min', Operator.min)
statics.add_static('max', Operator.max)
statics.add_static('assign', Operator.assign)
statics.add_static('and_', Operator.and_)
statics.add_static('or_', Operator.or_)
statics.add_static('addAll', Operator.addAll)
statics.add_static('sumLong', Operator.sumLong)
Order = Enum('Order', ' decr incr keyDecr keyIncr shuffle valueDecr valueIncr')
statics.add_static('incr', Order.incr)
statics.add_static('decr', Order.decr)
statics.add_static('keyIncr', Order.keyIncr)
statics.add_static('valueIncr', Order.valueIncr)
statics.add_static('keyDecr', Order.keyDecr)
statics.add_static('valueDecr', Order.valueDecr)
statics.add_static('shuffle', Order.shuffle)
Pick = Enum('Pick', ' any none')
statics.add_static('any', Pick.any)
statics.add_static('none', Pick.none)
Pop = Enum('Pop', ' all_ first last mixed')
statics.add_static('first', Pop.first)
statics.add_static('last', Pop.last)
statics.add_static('all_', Pop.all_)
statics.add_static('mixed', Pop.mixed)
Scope = Enum('Scope', ' global_ local')
statics.add_static('global_', Scope.global_)
statics.add_static('local', Scope.local)
T = Enum('T', ' id key label value')
statics.add_static('label', T.label)
statics.add_static('id', T.id)
statics.add_static('key', T.key)
statics.add_static('value', T.value)
class P(object):
def __init__(self, operator, value, other=None):
self.operator = operator
self.value = value
self.other = other
@staticmethod
def between(*args):
return P("between", *args)
@staticmethod
def eq(*args):
return P("eq", *args)
@staticmethod
def gt(*args):
return P("gt", *args)
@staticmethod
def gte(*args):
return P("gte", *args)
@staticmethod
def inside(*args):
return P("inside", *args)
@staticmethod
def lt(*args):
return P("lt", *args)
@staticmethod
def lte(*args):
return P("lte", *args)
@staticmethod
def neq(*args):
return P("neq", *args)
@staticmethod
def not_(*args):
return P("not_", *args)
@staticmethod
def outside(*args):
return P("outside", *args)
@staticmethod
def test(*args):
return P("test", *args)
@staticmethod
def within(*args):
return P("within", *args)
@staticmethod
def without(*args):
return P("without", *args)
def and_(self, arg):
return P("and", self, arg)
def or_(self, arg):
return P("or", self, arg)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.operator == other.operator and self.value == other.value and self.other == other.other
def __repr__(self):
return self.operator + "(" + str(self.value) + ")" if self.other is None else self.operator + "(" + str(self.value) + "," + str(self.other) + ")"
def and_(self, arg):
return P("and", self, arg)
def or_(self, arg):
return P("or", self, arg)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.operator == other.operator and self.value == other.value and self.other == other.other
def __repr__(self):
return self.operator + "(" + str(self.value) + ")" if self.other is None else self.operator + "(" + str(self.value) + "," + str(self.other) + ")"
def between(*args):
return P.between(*args)
statics.add_static('between',between)
def eq(*args):
return P.eq(*args)
statics.add_static('eq',eq)
def gt(*args):
return P.gt(*args)
statics.add_static('gt',gt)
def gte(*args):
return P.gte(*args)
statics.add_static('gte',gte)
def inside(*args):
return P.inside(*args)
statics.add_static('inside',inside)
def lt(*args):
return P.lt(*args)
statics.add_static('lt',lt)
def lte(*args):
return P.lte(*args)
statics.add_static('lte',lte)
def neq(*args):
return P.neq(*args)
statics.add_static('neq',neq)
def not_(*args):
return P.not_(*args)
statics.add_static('not_',not_)
def outside(*args):
return P.outside(*args)
statics.add_static('outside',outside)
def test(*args):
return P.test(*args)
statics.add_static('test',test)
def within(*args):
return P.within(*args)
statics.add_static('within',within)
def without(*args):
return P.without(*args)
statics.add_static('without',without)
'''
TRAVERSER
'''
class Traverser(object):
def __init__(self, object, bulk=None):
if bulk is None:
bulk = long(1)
self.object = object
self.bulk = bulk
def __repr__(self):
return str(self.object)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.object == other.object
'''
TRAVERSAL SIDE-EFFECTS
'''
class TraversalSideEffects(object):
def keys(self):
return set()
def get(self, key):
raise KeyError(key)
def __getitem__(self, key):
return self.get(key)
def __repr__(self):
return "sideEffects[size:" + str(len(self.keys())) + "]"
'''
TRAVERSAL STRATEGIES
'''
class TraversalStrategies(object):
global_cache = {}
def __init__(self, traversal_strategies=None):
self.traversal_strategies = traversal_strategies.traversal_strategies if traversal_strategies is not None else []
def add_strategies(self, traversal_strategies):
self.traversal_strategies = self.traversal_strategies + traversal_strategies
def apply_strategies(self, traversal):
for traversal_strategy in self.traversal_strategies:
traversal_strategy.apply(traversal)
def apply_async_strategies(self, traversal):
for traversal_strategy in self.traversal_strategies:
traversal_strategy.apply_async(traversal)
def __repr__(self):
return str(self.traversal_strategies)
class TraversalStrategy(object):
def __init__(self, strategy_name=None, configuration=None):
self.strategy_name = type(self).__name__ if strategy_name is None else strategy_name
self.configuration = {} if configuration is None else configuration
def apply(self, traversal):
return
def apply_async(self, traversal):
return
def __eq__(self, other):
return isinstance(other, self.__class__)
def __hash__(self):
return hash(self.strategy_name)
def __repr__(self):
return self.strategy_name
'''
BYTECODE
'''
class Bytecode(object):
def __init__(self, bytecode=None):
self.source_instructions = []
self.step_instructions = []
self.bindings = {}
if bytecode is not None:
self.source_instructions = list(bytecode.source_instructions)
self.step_instructions = list(bytecode.step_instructions)
def add_source(self, source_name, *args):
instruction = [source_name]
for arg in args:
instruction.append(self.__convertArgument(arg))
self.source_instructions.append(instruction)
def add_step(self, step_name, *args):
instruction = [step_name]
for arg in args:
instruction.append(self.__convertArgument(arg))
self.step_instructions.append(instruction)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.source_instructions == other.source_instructions and self.step_instructions == other.step_instructions
else:
return False
def __convertArgument(self,arg):
if isinstance(arg, Traversal):
self.bindings.update(arg.bytecode.bindings)
return arg.bytecode
elif isinstance(arg, dict):
newDict = {}
for key in arg:
newDict[self.__convertArgument(key)] = self.__convertArgument(arg[key])
return newDict
elif isinstance(arg, list):
newList = []
for item in arg:
newList.append(self.__convertArgument(item))
return newList
elif isinstance(arg, set):
newSet = set()
for item in arg:
newSet.add(self.__convertArgument(item))
return newSet
elif isinstance(arg, tuple) and 2 == len(arg) and isinstance(arg[0], str):
self.bindings[arg[0]] = arg[1]
return Binding(arg[0],self.__convertArgument(arg[1]))
else:
return arg
def __repr__(self):
return (str(self.source_instructions) if len(self.source_instructions) > 0 else "") + \
(str(self.step_instructions) if len(self.step_instructions) > 0 else "")
'''
BINDINGS
'''
class Bindings(object):
def of(self,key,value):
if not isinstance(key, str):
raise TypeError("Key must be str")
return (key,value)
class Binding(object):
def __init__(self,key,value):
self.key = key
self.value = value
def __eq__(self, other):
return isinstance(other, self.__class__) and self.key == other.key and self.value == other.value
def __hash__(self):
return hash(self.key) + hash(self.value)
def __repr__(self):
return "binding[" + self.key + "=" + str(self.value) + "]"
|
[
"srinivas.desai491@gmail.com"
] |
srinivas.desai491@gmail.com
|
a2a3823e6435408a754b473b37f7233309d5ef3f
|
4754d6b05b7eb255983f58474164d8690f4d8684
|
/figurines/tests/test_views.py
|
4ad1ab56cb491358a3a1c8c3bb9812ce62ef1085
|
[] |
no_license
|
pythonmentor/benjamin-p13
|
4f629be3cd9b2e8af6934fb69dfca63d6a294346
|
ada744761d3a3c6ecde1aec5db20770960cb2146
|
refs/heads/master
| 2023-01-24T17:10:30.235330
| 2020-11-30T17:29:09
| 2020-11-30T17:29:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,155
|
py
|
from django.test import TestCase
from django.urls import reverse
from figurines.models import Category, DidYouSee, Figurine
from users.models import User
class FigurineTestViews(TestCase):
def setUp(self):
self.user_test = User.objects.create_user(
username="UserTest", password="PaswordOfTheTest&120"
)
category_figurine = Category.objects.create(
name="super heroes"
)
figurine = Figurine.objects.create(
figurine_number="1",
category=category_figurine,
name="batman"
)
figurine.user.add(self.user_test)
return super().setUp()
def test_figurine_add_figurine(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
response = self.client.post(
"/figurines/add_figurine/",
{"figurine_number": "31", "category": "World of Warcraft", "name": "Thrall"},
)
self.assertEqual(response.status_code, 302)
self.assertTemplateUsed('figurines/collection.html')
def test_figurine_collection_user(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
response = self.client.get('/figurines/collection/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('figurines/collection.html')
def test_figurine_search_with_all_figurines(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
user = User.objects.get(username="UserTest")
response = self.client.get('/figurines/search/?all=all')
user_figurine = user.figurine_set.all()
self.assertQuerysetEqual(
response.context['figurines_list'],
[repr(figurine) for figurine in user_figurine]
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('figurines/search.html')
def test_figurine_search_without_all_figurines(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
user = User.objects.get(username="UserTest")
user_figurine = user.figurine_set.all().delete()
response = self.client.get('/figurines/search/?all=all')
self.assertFalse(response.context['figurines_list'])
self.assertContains(response, 'Pas de résultat.')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('figurines/search.html')
def test_figurine_search_with_figurines(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
user = User.objects.get(username="UserTest")
response = self.client.get('/figurines/search/?q=batman')
user_figurine = user.figurine_set.filter(name__icontains='batman')
self.assertQuerysetEqual(
response.context['figurines_list'],
[repr(figurine) for figurine in user_figurine]
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('figurines/search.html')
def test_figurine_search_without_all_figurines(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
user = User.objects.get(username="UserTest")
user_figurine = user.figurine_set.filter(name__icontains='batman').delete()
response = self.client.get('/figurines/search/?q=batman')
self.assertFalse(response.context['figurines_list'])
self.assertContains(response, 'Pas de résultat.')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('figurines/search.html')
def test_figurine_did_you_see(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
response = self.client.get("/figurines/did_you_see/")
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed("figurines/did_you_see.html")
def test_create_question(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
response = self.client.post(
"/figurines/create_question",
{
"title": "Je recherche batman",
"text": "Bonjour, je recherche Batman",
"date": "03/07/2020",
},
)
self.assertRedirects(response, '/figurines/did_you_see/')
response = self.client.get('/figurines/did_you_see/')
self.assertContains(response, 'Je recherche batman')
self.assertTemplateUsed('figurines/did_you_see.html')
def test_can_respond_to_question(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
response = self.client.post(
"/figurines/create_question",
{
"title": "Je recherche batman2",
"text": "Bonjour, je recherche Batman2",
"date": "03/07/2020",
},
)
post = DidYouSee.objects.get(title='Je recherche batman2')
response_second_message = self.client.post(
f"/figurines/create_question/{post.id}",
{
"title": "J'ai batman2",
"text": "j'ai batman",
"date": "20/07/2020",
}
)
response_detail = self.client.get(f'/figurines/post_detail/{post.id}/')
self.assertContains(response_detail, "j'ai batman")
self.assertTemplateUsed('figurines/post_detail.html')
def test_post_detail(self):
self.client.force_login(self.user_test)
user = User.objects.get(username="UserTest")
post = DidYouSee(
author=user,
title="Je recherche batman",
text="Bonjour, j'ai trouvé Batman",
)
post.save()
post.parent = post
post.save()
response = self.client.get(
f"/figurines/post_detail/{post.id}"
)
self.assertContains(response, "Je recherche batman")
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('figurines/post_detail.html')
"""
def test_delete_figurine(self):
self.client.login(username="UserTest", password="PaswordOfTheTest&120")
response = self.client.post('/figurines/collection/?q=logan')
user = User.objects.get(username="UserTest")
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('figurines/collection.html')
"""
# def test_report_post(self):
# self.client.login(username="UserTest", password="PaswordOfTheTest&120")
# response = self.client.post(
# "/figurines/post_detail/51/",
# {
# "title": "Je recherche batman",
# "text": "Bonjour, j'ai trouvé Batman",
# },
# )
# self.assertEqual(response.status_code, 200)
# self.assertTemplateUsed('figurines/report_post.html')
|
[
"benjamin.rejaud@gmail.com"
] |
benjamin.rejaud@gmail.com
|
2e33e0e16030e96cb4126bce18cbe60adc5220f1
|
4adc5b30bdd5ed6388746f9822d9b0e6f1879a69
|
/geeksforgeeks_ArrayInsertEnd.py
|
0ae2df6e160a0b16f7f498110c8e80577ff5559f
|
[] |
no_license
|
praveengadiyaram369/geeksforgeeks_submissions
|
1a69a609ef27819e60174aad4709a8b11d7b10ab
|
f546faedf048a57e8cee34cb141dd13c377b7ba5
|
refs/heads/master
| 2022-12-27T15:40:39.692379
| 2020-10-03T07:03:39
| 2020-10-03T07:03:39
| 286,435,728
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 108
|
py
|
# _Array insert at end
def insertAtEnd(arr, sizeOfArray, element):
arr.append(element)
return arr
|
[
"praveengadiyaram@gmail.com"
] |
praveengadiyaram@gmail.com
|
0ea64af5b9f481b06e417b65c708cd5668bc733a
|
1f71eac5d7514e24def7e5e231c5ef7487bf9c0a
|
/links/schema_registry.py
|
ba9ea8977c623da1050e5118badb9dd80b1347f3
|
[] |
no_license
|
stefanfoulis/Arkestra
|
b68f212c4c83ab2c66ea98313c5f1291f897e56d
|
bddd11ae98b633b5e7bfaf2fa98ae6f98b039130
|
refs/heads/master
| 2021-01-15T18:45:57.398106
| 2011-03-07T12:52:03
| 2011-03-07T12:52:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,986
|
py
|
from django.http import HttpResponse
from django.utils import simplejson
from django.db.models import get_model
from django.db.models import Q
from django.db.models.query import QuerySet
from django.db.models import ForeignKey
from django.utils.safestring import mark_safe
from django.utils.encoding import smart_str, smart_unicode
import operator
from django.http import HttpResponse, HttpResponseNotFound
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentTypeManager, ContentType
from django.conf import settings
#WIDGETRY_AVAILABLE = 'widgetry' in settings.INSTALLED_APPS
from widgetry.utils import traverse_object
from widgetry.views import search, WrapperFactory, SearchItemWrapper, call_if_callable
from widgetry import signals as widgetry_signals
class LinkWrapper(SearchItemWrapper):
# gets default identifier, title, description and thumbnail methods
# from SearchItemWrapper
def text(self):
return call_if_callable( getattr( self.obj, 'text', self.title()) )
def short_text(self):
return call_if_callable( getattr( self.obj, 'short_text', self.text()) )
def url(self):
return call_if_callable( getattr( self.obj, 'get_absolute_url', "" ) )
def metatag(self):
return call_if_callable( getattr( self.obj, 'metatag', 'no metatag defined') )
def heading(self):
return call_if_callable( getattr( self.obj, 'heading', 'no heading defined') )
ATTRIBUTES = [
'identifier',
'title',
'description',
'thumbnail_url',
'text',
'short_text',
'url',
'metadata',
'heading',
]
class MyWrapperFactory(WrapperFactory):
pass
wrapper_factory = MyWrapperFactory(LinkWrapper, ATTRIBUTES)
class Registry(object):
def __init__(self):
self.wrappers = dict()
self.content_types = dict()
self.discovered = False
# these signals make sure that whenever a widgetry function is used
# the schemas from links are actually registered
widgetry_signals.search_request.connect(self.discover_links_schemas)
widgetry_signals.get_wrapper.connect(self.discover_links_schemas)
def register(self, klasses, search_fields, **kwargs):
if not isinstance(klasses, list):
klasses = [klasses]
if not search_fields:
raise Exception("link schema registration: search_fields are missing")
for klass in klasses:
wrapper = wrapper_factory.build('%sAutoGenerated' % klass.__name__, search_fields, kwargs)
self.register_wrapper(klass, wrapper)
def register_wrapper(self, klasses, wrapper):
if not isinstance(klasses, list):
klasses = [klasses]
for klass in klasses:
#print u"registering %s to %s" % (klass, wrapper)
self.wrappers[klass] = wrapper
self.content_types[klass] = ContentType.objects.get_for_model(klass)
# also register any links with the search/autocomplete system
if not search.is_registered(klass):
# but only if it is not registered yet
#print u"schema: %s is already registerd for search, not adding" % klass
search.register_wrapper(klass, wrapper)
def get_wrapper(self, model_or_string):
self.discover_links_schemas()
#print "get wrapper %s" % model_or_string
if isinstance(model_or_string, str):
app_label, model_name = model_or_string.split('.')
content_type = ContentType.objects.get(app_label=app_label, model=model_name)
model = content_type.model_class()
else:
model = model_or_string
#print "return wrapper for %s" % model
#print self.wrappers
if model in self.wrappers:
wrapper = self.wrappers[model]
else:
wrapper = LinkWrapper
#print " wrapper: %s" % wrapper
return wrapper
def is_registered(self, model):
self.discover_links_schemas()
return model in self.wrappers
def content_type_choices(self):
self.discover_links_schemas()
choices = [('','----')]
#q_obj = None
for model_class, content_type in self.content_types.items():
#new_q = Q(app_label = model_class._meta.app_name, )
choices.append((content_type.pk, u"%s: %s" % (content_type.app_label.replace('_', ' '), content_type.name)))
return choices
def discover_links_schemas(self, *args, **kwargs):
'''
run through all installed apps to find link schema definitions.
This needs to get called rather late, because it needs access to
models and admin
'''
if self.discovered:
return
for app in settings.INSTALLED_APPS:
__import__(app, {}, {}, ['link_schemas'])
self.discovered = True
schema = Registry()
|
[
"daniele@apple-juice.co.uk"
] |
daniele@apple-juice.co.uk
|
891cf68c8f2e5a2d7b2c3c9baf3fd45f36ba1c93
|
3e3a835ee885eb9a71fd35ea58acd04361f72f47
|
/python基础/复习.py/石头剪刀布.py
|
df86dfa2ef1429a31cb3268c524f245a54ab4e82
|
[] |
no_license
|
hanfang302/py-
|
dbb259f24e06fbe1a900df53ae6867acb8cb54ea
|
dd3be494ccef5100c0f06ed936f9a540d8ca0995
|
refs/heads/master
| 2020-03-16T01:59:57.002135
| 2018-05-07T12:02:21
| 2018-05-07T12:02:21
| 132,454,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
player = int(input('请出拳 石头(1),剪刀(2),布(3):'))
computer = 2
if ((player == 1 and computer == 2) or
(player == 2 and computer == 3) or
(player == 3 and computer == 1)):
print('电脑输了')
elif player == computer:
print('心有灵犀,再来一局')
else:
print('不行,我要和你决战到底')
|
[
"hanfang123@aliyun.com"
] |
hanfang123@aliyun.com
|
92d3f6d6dc1e477f6b89f1665b180bf5ab4360da
|
968913bda3879ef316100410cdb2b01333ac14a8
|
/004_Algorithm_Implementations_In_Python/data_structures/queue/queue_on_list.py
|
898ffac3a9c7c1fda92bb8b75af1826ee7ec17f0
|
[
"MIT"
] |
permissive
|
sm2774us/2021_Interview_Prep
|
02b6a81ee52f3cb14d9e060839a01aadd84e231f
|
c6689411a4334d53c88581a296e57c314b50f46c
|
refs/heads/main
| 2023-03-02T05:30:17.156821
| 2021-01-26T04:31:02
| 2021-01-26T04:31:02
| 332,603,676
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,213
|
py
|
"""Queue represented by a python list"""
class Queue():
def __init__(self):
self.entries = []
self.length = 0
self.front=0
def __str__(self):
printed = '<' + str(self.entries)[1:-1] + '>'
return printed
"""Enqueues {@code item}
@param item
item to enqueue"""
def put(self, item):
self.entries.append(item)
self.length = self.length + 1
"""Dequeues {@code item}
@requirement: |self.length| > 0
@return dequeued
item that was dequeued"""
def get(self):
self.length = self.length - 1
dequeued = self.entries[self.front]
#self.front-=1
#self.entries = self.entries[self.front:]
self.entries = self.entries[1:]
return dequeued
"""Rotates the queue {@code rotation} times
@param rotation
number of times to rotate queue"""
def rotate(self, rotation):
for i in range(rotation):
self.put(self.get())
"""Enqueues {@code item}
@return item at front of self.entries"""
def front(self):
return self.entries[0]
"""Returns the length of this.entries"""
def size(self):
return self.length
|
[
"sm2774us@gmail.com"
] |
sm2774us@gmail.com
|
0e3d3dd2945ef2e53a134724fa8bbc66d98afb65
|
0e96dde7517fbbccffcb93a3c4bd324fefcbed0a
|
/index/migrations/0002_auto_20210708_1042.py
|
064bb0952af48b0b143c1aad29c85a803f9eff67
|
[] |
no_license
|
phoby20/heatmap-analytics
|
b70c5c45674f0f3928f7b3c07954e558490fa263
|
38366d05a3480eee884c50dc1791660985acb659
|
refs/heads/master
| 2023-06-18T22:07:51.562849
| 2021-07-14T05:43:16
| 2021-07-14T05:43:16
| 385,824,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 590
|
py
|
# Generated by Django 3.2.3 on 2021-07-08 10:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('index', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='movehistory',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='pointcount',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
]
|
[
"phoby20@hotmail.com"
] |
phoby20@hotmail.com
|
7d8b5b232c28c669043a2e65a6cbec7643049db9
|
3fc53a89ff5d9fc8643255b36887db17085cbdc8
|
/penguinlifelines/settings.py
|
a80156f0db44c2a401a709f068316931ab6842f0
|
[] |
no_license
|
eamonnmag/PenguinLifelines
|
9dd94396b8f706c9d68dacc8aca84583a38a2bae
|
a88ecb8b032479fd74420f72ee602ef79fb27a8a
|
refs/heads/master
| 2021-01-23T07:34:37.058261
| 2014-09-17T09:11:17
| 2014-09-17T09:11:17
| 14,091,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,847
|
py
|
# Django settings for penguinlifelines project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), '')
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'uploader.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media/').replace('\\','/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/site_media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'b&#^ir@x%(o_8+676)r^z8rak8@5g##$ag%%%-&rk&ozmcbp%q'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'penguinlifelines.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'penguinlifelines.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
PROJECT_ROOT + 'multiuploader/',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'grappelli',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'profiles',
'registration',
'multiuploader',
'sorl.thumbnail',
'django.contrib.flatpages',
'app',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
MULTI_FILE_DELETE_URL = 'multi_delete'
MULTI_IMAGE_URL = 'multi_image'
MULTI_IMAGES_FOLDER = 'multiuploader_images'
|
[
"eamonnmag@gmail.com"
] |
eamonnmag@gmail.com
|
b2493c69029aa36ecfe9427d2df847afc4e33904
|
687c0094ea4f20a7d779f6b50e44e86ee6b9dd51
|
/hadoop/client.py
|
d931c6db8daf62344bfa024c6ce73a44274cb02d
|
[] |
no_license
|
d0r1h/Aut0Infra
|
aa90f65aa292a2924e5ba118dedeb79f016a3ae9
|
ad2f475400ebb97d352d1e68d4c62d565504d5ed
|
refs/heads/master
| 2023-01-28T11:06:17.056688
| 2020-12-10T03:38:46
| 2020-12-10T03:38:46
| 311,212,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38
|
py
|
def client():
print('I am client')
|
[
"59r@protonmail.com"
] |
59r@protonmail.com
|
6ad1ec33ed60cb67164cba8e6c216bf23b7eff14
|
09592939eaf88d46f7d2d760d9587cb9fc22707e
|
/entity/cards/LETLT_083/LETLT_083.py
|
c575c2ef97600aa10d16c30ba708043ebfac001e
|
[
"MIT"
] |
permissive
|
fulln/lushi_script
|
5deb2fb99956988ee4884836443f74277b361939
|
f2c5250f6ce7e3ea2b8d3ba280d999ae8c7beb8b
|
refs/heads/main
| 2023-09-04T16:50:24.696142
| 2021-11-24T03:44:41
| 2021-11-24T03:44:41
| 431,565,901
| 0
| 0
|
MIT
| 2021-11-24T17:04:06
| 2021-11-24T17:04:05
| null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
# -*- coding: utf-8 -*-
from hearthstone.entities import Entity
from entity.spell_entity import SpellEntity
class LETLT_083(SpellEntity):
"""
剧烈爆发
对本回合中已经行动过的敌人造成10点伤害。在下一场战斗开始时,重复此伤害。
"""
def __init__(self, entity: Entity):
super().__init__(entity)
self.damage = 0
self.range = 0
def play(self, game, hero, target):
pass
|
[
"gg48@qq.com"
] |
gg48@qq.com
|
09b392b45aef0ce2b082eaa210be15285a463e0c
|
45015c94a4376a4af66e4134f0552288cd15a2d8
|
/services/authentication_service.py
|
ee9f1e65813dcf31637b0a0974cb9c00e4c7b390
|
[] |
no_license
|
Anubhav722/trello
|
971111af8cbc1f6c344ace200e2741e809e9a1fa
|
600b5410cde7fd2a51720fa4ca7cc2ecfbff322e
|
refs/heads/master
| 2023-07-13T18:24:51.937539
| 2021-08-21T13:22:17
| 2021-08-21T13:22:17
| 398,563,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
class AuthenticationService:
def __init__(self, ttl):
self.tokens = {} # Map<token_id, user_obj>
def renew_token(self, token_id):
pass
def authenticate_request(self, token_id, timestamp):
pass
def register_user(self, ):
pass
|
[
"anubhavs286@gmail.com"
] |
anubhavs286@gmail.com
|
52cf3aac7e139b3a4d760b80cc223a9bd88e323d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03496/s023860422.py
|
3418e271fe6d39c5afd0834fa668eb6252fedf15
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
n = int(input())
a = list(map(int,input().split()))
mi = a[0]
mii = 1
ma = a[0]
mai = 1
for i in range(n):
if a[i] > ma:
ma = a[i]
mai = i+1
if a[i] < mi:
mi = a[i]
mii = i+1
if mi >= 0:
print(n-1)
for i in range(1,n):
print(i,i+1)
elif ma <= 0:
print(n-1)
for i in range(n,1,-1):
print(i,i-1)
elif abs(ma) >= abs(mi):
print(n*2-1)
for i in range(n):
print(mai,i+1)
for i in range(1,n):
print(i,i+1)
else:
print(n*2-1)
for i in range(n):
print(mii,i+1)
for i in range(n,1,-1):
print(i,i-1)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
295327d0390c539e66194d54fcb18c86e0801f85
|
a13b01ff29782857a4d14e771da58e10601648af
|
/first_look.py
|
327512e724057a78b67efb887a4dea43171022a2
|
[] |
no_license
|
HappyLantern/DeepLearningWithPython
|
d6c8f4d088982dd95b4f417ecf9cf65621d0b6db
|
2f3cacfed7b0261927c074bc77024edabad83df8
|
refs/heads/master
| 2020-08-14T06:47:25.540771
| 2019-10-14T18:23:40
| 2019-10-14T18:23:40
| 215,116,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
# Solving the classification problem of the MNIST dataset.
# Digits of 28x28 pixels that belong to 0,..,9
from keras.datasets import mnist
from keras import models
from keras import layers
from keras.utils import to_categorical
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
network = models.Sequential()
network.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,)))
network.add(layers.Dense(10, activation='softmax'))
network.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
train_images = train_images.reshape((60000, 28 * 28))
train_images = train_images.astype('float32') / 255
test_images = test_images.reshape((10000, 28 * 28))
test_images = test_images.astype('float32') / 255
print(train_labels.shape)
train_labels = to_categorical(train_labels)
print(train_labels.shape)
test_labels = to_categorical(test_labels)
network.fit(train_images, train_labels, epochs=5, batch_size=128)
test_loss, test_acc = network.evaluate(test_images, test_labels)
print('test_acc:', test_acc)
|
[
"Kevinjohansson1995@gmail.com"
] |
Kevinjohansson1995@gmail.com
|
538b671955b4ac1fa9cf8fb82d290212541efada
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/djDJHv3nwWsRM9mtu_15.py
|
d8d00d9f36be6af88a931dc7bc4cd7cb6aa76d74
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
def validate_spelling(txt):
return "".join(txt.split(". ")[:-1]).lower() == txt.split(". ")[-1][:-1].lower()
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
6f541be782c903bc9b6347e681942a1e1f3f53a9
|
d7c9f9ff75fabfcd4e42fedfc594ed38e4258481
|
/ExperimentalModels/demo_pySSA.py
|
ad594053ae28f6ada2e5247277675df83f182d8a
|
[
"MIT"
] |
permissive
|
johnabel/circadian_tools
|
b034b78bd716007659b61f257977f39abd3ff6b3
|
d7ec5f798e2215331761857f5138941b764c03a8
|
refs/heads/master
| 2021-03-22T00:12:47.705529
| 2018-05-22T16:00:37
| 2018-05-22T16:00:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,235
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 19 12:19:48 2014
@author: john abel
"""
import numpy as np
import casadi as cs
import pdb
import stochkit_resources as stk
import modelbuilder as mb
import pylab as pl
import circadiantoolbox_raw as ctb
import Bioluminescence as bl
EqCount = 11
ParamCount = 46
modelversion='deg_sync_v9_0_stochkit'
#initial values
y0in = np.array([ 0.09909523, 0.70371313, 0.2269922 , 0.10408456, 0.00490967,
0.86826377, 0.89688085, 0.06720938, 0.42133251, 0.00728958,
0.47681956])
#known period (not necessary for running but useful for analysis sometimes)
period = 23.7000
#omega parameter for stochkit
vol=200
#parameter values
param=[0.2846151688657202 , 0.232000177516616 , 0.088617203761593 , 0.30425468 , 0.210097869 ,
0.4353107703541283 , 1.003506668772474 , 1.088997860405459 , 0.0114281138 , 1.37671691 ,
2.6708076060464903 , 0.034139448 , 2.679624716511808 , 0.769392535473404 , 2.54809178 ,
0.0770156091097623 , 0.305050587159186 , 0.0636139454 , 0.102828479472142 , 0.0021722217886776 ,
3.4119930083042749 , 0.313135234185038 , 0.129134295035583 , 0.086393910969617 , 0.1845394740887122 ,
0.1918543699832282 , 2.93509002 , 0.668784664 , 1.08399453 , 0.368097886 ,
1.1283479292931928 , 0.305037169 , 0.530015145234027 , 0.317905521992663 , 0.3178454269093350 ,
3.1683607 , 0.531341137364938 , 0.807082897 , 0.251529761689481 , 0.1805825385998701 ,
1.418566520274632 , 0.835185094 , 0.376214021 , 0.285090232 , 0.27563398 ,
1.113098655804457 ]
def StochModel():
#==================================================================
# Stochastic Model Setup
#==================================================================
print 'Now converting model to StochKit XML format...'
#Converts concentration to population for initial values
y0in_stoch = (vol*y0in).astype(int)
#collects state and parameter array to be converted to species and parameter objects,
#makes copies of the names so that they are on record
species_array = ['p', 'c1', 'c2', 'vip', 'P', 'C1', 'C2', 'eVIP', 'C1P', 'C2P', 'CREB']
param_array = ['vtpr' , 'vtc1r' , 'vtc2r' , 'knpr' , 'kncr' ,
'vdp' , 'vdc1' , 'vdc2' , 'kdp' , 'kdc' ,
'vdP' , 'kdP' , 'vdC1' , 'vdC2' , 'kdC' ,
'vdC1n' , 'vdC2n' , 'kdCn' , 'vaCP' , 'vdCP' , 'ktlnp',
'vtpp' , 'vtc1p' , 'vtc2p' , 'vtvp' , 'vtvr' ,
'knpp' , 'kncp' , 'knvp' , 'knvr' , 'vdv' ,
'kdv' , 'vdVIP' , 'kdVIP' , 'vgpcr' , 'kgpcr' ,
'vdCREB', 'kdCREB', 'ktlnv' , 'vdpka' , 'vgpka' ,
'kdpka' , 'kgpka' , 'kdc1' , 'kdc2' , 'ktlnc']
#duplicated for names later
state_names=species_array[:]
param_names=param_array[:]
#Names model
SSAmodel = stk.StochKitModel(name=modelversion)
#SSAmodel.units='concentration'
#creates SSAmodel class object
SSA_builder = mb.SSA_builder(species_array,param_array,y0in_stoch,param,SSAmodel,vol)
# REACTIONS
#per mRNA
SSA_builder.SSA_MM('per mRNA activation','vtpp',km=['knpp'],Prod=['p'],Act=['CREB'])
SSA_builder.SSA_MM('per mRNA repression','vtpr',km=['knpr'],Prod=['p'],Rep=['C1P','C2P'])
SSA_builder.SSA_MM('per mRNA degradation','vdp',km=['kdp'],Rct=['p'])
#cry1 mRNA
SSA_builder.SSA_MM('c1 mRNA activation','vtc1p',km=['kncp'],Prod=['c1'],Act=['CREB'])
SSA_builder.SSA_MM('c1 mRNA repression','vtc1r',km=['kncr'],Prod=['c1'],Rep=['C1P','C2P'])
SSA_builder.SSA_MM('c1 mRNA degradation','vdc1',km=['kdc'],Rct=['c1'])
#cry2 mRNA
SSA_builder.SSA_MM('c2 mRNA activation','vtc2p',km=['kncp'],Prod=['c2'],Act=['CREB'])
SSA_builder.SSA_MM('c2 mRNA repression','vtc2r',km=['kncr'],Prod=['c2'],Rep=['C1P','C2P'])
SSA_builder.SSA_MM('c2 mRNA degradation','vdc2',km=['kdc'],Rct=['c2'])
#vip mRNA
SSA_builder.SSA_MM('vip mRNA activation','vtvp',km=['knvp'],Prod=['vip'],Act=['CREB'])
SSA_builder.SSA_MM('vip mRNA repression','vtvr',km=['knvr'],Prod=['vip'],Rep=['C1P','C2P'])
SSA_builder.SSA_MM('vip mRNA degradation','vdv',km=['kdv'],Rct=['vip'])
#CRY1, CRY2, PER, VIP creation and degradation
SSA_builder.SSA_MA_tln('PER translation' ,'P' ,'ktlnp','p')
SSA_builder.SSA_MA_tln('CRY1 translation','C1' ,'ktlnc','c1')
SSA_builder.SSA_MA_tln('CRY2 translation','C2' ,'ktlnc','c2')
SSA_builder.SSA_MA_tln('VIP translation' ,'eVIP','ktlnv','vip')
SSA_builder.SSA_MM('PER degradation','vdP',km=['kdP'],Rct=['P'])
SSA_builder.SSA_MM('C1 degradation','vdC1',km=['kdC'],Rct=['C1'])
SSA_builder.SSA_MM('C2 degradation','vdC2',km=['kdC'],Rct=['C2'])
SSA_builder.SSA_MA_deg('eVIP degradation','eVIP','kdVIP')
#CRY1 CRY2 complexing
SSA_builder.SSA_MA_complex('CRY1-P complex','C1','P','C1P','vaCP','vdCP')
SSA_builder.SSA_MA_complex('CRY2-P complex','C2','P','C2P','vaCP','vdCP')
SSA_builder.SSA_MM('C1P degradation','vdC1n',km=['kdCn'],Rct=['C1P','C2P'])
SSA_builder.SSA_MM('C2P degradation','vdC2n',km=['kdCn'],Rct=['C2P','C1P'])
#VIP/CREB Pathway
SSA_builder.SSA_MM('CREB formation','vgpka',km=['kgpka'],Prod=['CREB'],Act=['eVIP'])
SSA_builder.SSA_MM('CREB degradation','vdCREB',km=['kdCREB'],Rct=['CREB'])
# END REACTIONS
#stringSSAmodel = SSAmodel.serialize()
#print stringSSAmodel
return SSAmodel,state_names,param_names
def main():
#Creates SSA version of model.
SSAmodel,state_names,param_names=StochModel()
#calls and runs stochkit
trajectories = stk.stochkit(SSAmodel,job_id='test',t=75,number_of_trajectories=100,increment=0.1)
#evaluation bit
StochEval = stk.StochEval(trajectories,state_names,param_names,vol)
StochEval.PlotAvg('p',color='blue')
pl.show()
pdb.set_trace()
if __name__ == "__main__":
main()
|
[
"j.h.abel01@gmail.com"
] |
j.h.abel01@gmail.com
|
a4fab23437c5dd0c8fbf1273ea1ba4a8c0b3042a
|
54ba669ca04cf7422134b7183787f86caccb5516
|
/TeslaStationRoute.py
|
e39308e6a37b9d9b4165578ee78ca66bd9826b45
|
[] |
no_license
|
aakashparwani/ConsumerComplaints_VisualizationReport
|
4757968526970330e94453fc45c6d72fa71105fc
|
90604dc2529c42f35f12e6c769dee05df39e1836
|
refs/heads/master
| 2021-01-12T04:13:29.658738
| 2017-06-17T00:23:51
| 2017-06-17T00:23:51
| 77,546,680
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,206
|
py
|
# coding: utf-8
# 1. Find route between two Tesla Supercharging Stations "55 Parsonage Rd. & 150th Ave and 147th St" -- Google Maps API
#
# In order to use the Google Maps - Directions API, you need to create an account with Google and get your API key, go to: https://developers.google.com/maps/documentation/directions/ and then go to "get a key".
# In[12]:
# first import all the important packages
import plotly.plotly as py
py.sign_in("aakashparwani", "ob2ncx7bg1")
from plotly.graph_objs import *
import mapbox
import numpy as np
import requests
import copy
import googlemaps
mapbox_access_token = 'pk.eyJ1IjoiYWFrYXNocGFyd2FuaSIsImEiOiJjaXZzdnN2MHIwM3FwMnlvMXVtdDc1MWh0In0.kyKt29LCvJC8UjEPUvPl4w'
# In[13]:
#now request all the Tesla Supercharging Stations present in USA from the Tesla website.
r = requests.get('https://www.tesla.com/findus?redirect=no#/bounds/49.38,-66.94,25.82,-124.38999999999999?search=supercharger,&name=United%20States')
r_copy = copy.deepcopy(r.text)
supercharger_locations = {}
#look for particular country data.
valid_countries = ['United States','Canada']
# define the parameters that will be used to locate the stations on google maps.
params_for_locations = ['postal_code":"', 'country":"', 'latitude":"', 'longitude":"']
# now traverse the fetched stations data and copy it in supercharger_locations dictionary that will be used in coming steps.
while True:
# add address line to the dictionary
index = r_copy.find('address_line_1":"')
if index == -1:
break
index += len('address_line_1":"')
index_end = index
while r_copy[index_end] != '"':
index_end += 1
address_line_1 = r_copy[index:index_end]
address_line_1 = str(address_line_1)
supercharger_locations[address_line_1] = {}
for param in params_for_locations:
index = r_copy.find(param)
if index == -1:
break
index += len(param)
index_end = index
while r_copy[index_end] != '"':
index_end += 1
supercharger_locations[address_line_1][param[0:-3]] = r_copy[index:index_end]
r_copy = r_copy[index_end:len(r.text)] # slice off the traversed code
#clean all the data which has important parameters "postal code & country" missing.
all_keys = list(supercharger_locations.keys())
for key in all_keys:
if '\\' in supercharger_locations[key] or supercharger_locations[key] == '' or supercharger_locations[key]['postal_code'] == '' or supercharger_locations[key]['country'] not in valid_countries:
del supercharger_locations[key]
# In[14]:
#let us check data of start address
for v in supercharger_locations.keys():
if v=='55 Parsonage Rd.':
print (supercharger_locations[v]['latitude'], supercharger_locations[v]['longitude'],
supercharger_locations[v]['postal_code'], supercharger_locations[v]['country'])
# In[15]:
#let us check data of end address
for v in supercharger_locations.keys():
if v=='150th Ave and 147th St':
print (supercharger_locations[v]['latitude'], supercharger_locations[v]['longitude'],
supercharger_locations[v]['postal_code'], supercharger_locations[v]['country'])
# In[16]:
# define function that will take "start address & end address" as input and will draw route between them.
def plot_route_between_tesla_stations(address_start, address_end, zoom=3, endpt_size=6):
start = (supercharger_locations[address_start]['latitude'], supercharger_locations[address_start]['longitude'])
end = (supercharger_locations[address_end]['latitude'], supercharger_locations[address_end]['longitude'])
directions = gmaps.directions(start, end)
steps = []
steps.append(start) # add starting coordinate to trip
for index in range(len(directions[0]['legs'][0]['steps'])):
start_coords = directions[0]['legs'][0]['steps'][index]['start_location']
steps.append((start_coords['lat'], start_coords['lng']))
if index == len(directions[0]['legs'][0]['steps']) - 1:
end_coords = directions[0]['legs'][0]['steps'][index]['end_location']
steps.append((end_coords['lat'], end_coords['lng']))
steps.append(end) # add ending coordinate to trip
data = Data([
Scattermapbox(
lat=[item_x[0] for item_x in steps],
lon=[item_y[1] for item_y in steps],
mode='markers+lines',
marker=Marker(
size=[endpt_size] + [4 for j in range(len(steps) - 2)] + [endpt_size]
),
)
])
layout = Layout(
autosize=True,
hovermode='closest',
mapbox=dict(
accesstoken=mapbox_access_token,
bearing=0,
style='streets',
center=dict(
lat=np.mean([float(step[0]) for step in steps]),
lon=np.mean([float(step[1]) for step in steps]),
),
pitch=0,
zoom=zoom
),
)
fig = dict(data=data, layout=layout)
return fig
# get the google map api key in order to call the Google API.
gmap_api_key = 'AIzaSyDzrUYQwoyb4I0i2bhl3CzALP031n4yLac'
gmaps = googlemaps.Client(gmap_api_key)
# define start address
address_start = '55 Parsonage Rd.'
# define end address
address_end = '150th Ave and 147th St'
zoom=12.2
endpt_size=20
fig = plot_route_between_tesla_stations(address_start, address_end, zoom=9, endpt_size=20)
# plot route between stations
py.iplot(fig, filename='tesla-driving-directions-between-superchargers')
##############################NEED TO WORK: MAP BOX DIRECTION API CODE#################
def plot_route1_between_tesla_stations(address_start, address_end, zoom=3, endpt_size=6):
start = (supercharger_locations[address_start]['latitude'], supercharger_locations[address_start]['longitude'])
end = (supercharger_locations[address_end]['latitude'], supercharger_locations[address_end]['longitude'])
startv = round(float(start[0]), 5)
startv1 = round(float(start[1]), 5)
endv = round(float(end[0]), 5)
endv1 = round(float(end[1]), 5)
points = [{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Point",
"coordinates": [
startv,
startv1]}}, {
"type": "Feature",
"properties": {},
"geometry": {
"type": "Point",
"coordinates": [
endv,
endv1]}}]
directions = mapbox.Directions(access_token=mapbox_access_token).directions(points)
steps = []
steps.append(start) # add starting coordinate to trip
for index in range(len(directions[0]['legs'][0]['steps'])):
start_coords = directions[0]['legs'][0]['steps'][index]['start_location']
steps.append((start_coords['lat'], start_coords['lng']))
if index == len(directions[0]['legs'][0]['steps']) - 1:
end_coords = directions[0]['legs'][0]['steps'][index]['end_location']
steps.append((end_coords['lat'], end_coords['lng']))
steps.append(end) # add ending coordinate to trip
data = Data([
Scattermapbox(
lat=[item_x[0] for item_x in steps],
lon=[item_y[1] for item_y in steps],
mode='markers+lines',
marker=Marker(
size=[endpt_size] + [4 for j in range(len(steps) - 2)] + [endpt_size]
),
)
])
layout = Layout(
autosize=True,
hovermode='closest',
mapbox=dict(
accesstoken=mapbox_access_token,
bearing=0,
style='streets',
center=dict(
lat=np.mean([float(step[0]) for step in steps]),
lon=np.mean([float(step[1]) for step in steps]),
),
pitch=0,
zoom=zoom
),
)
fig = dict(data=data, layout=layout)
return fig
# define start address
address_start = '55 Parsonage Rd.'
# define end address
address_end = '150th Ave and 147th St'
zoom=12.2
endpt_size=20
fig = plot_route1_between_tesla_stations(address_start, address_end, zoom=9, endpt_size=20)
#py.iplot(fig, filename='tesla-driving-directions-between-superchargers_mapbox')
|
[
"noreply@github.com"
] |
aakashparwani.noreply@github.com
|
c5beb6a5ce0818e96720a154c9ed0db1dcb62b79
|
d344a8c6872d6b906e7d32a7ea9b210cd76a7ae5
|
/venv/Scripts/pip3.7-script.py
|
5feb926f753ba8c8ebdc097707b785bc570735df
|
[] |
no_license
|
hardikmaru193/TextUtils
|
9988c155d6fedcf75823a28b5263dc01e35ea03e
|
557356fee3ca8cd1d2ca40dbdbb3f382b8c08e9b
|
refs/heads/master
| 2020-07-17T16:04:53.316466
| 2019-09-03T10:54:50
| 2019-09-03T10:54:50
| 206,050,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
#!C:\Users\hardi\PycharmProjects\TextUtils\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
|
[
"hardikmaru.job@gmail.com"
] |
hardikmaru.job@gmail.com
|
83c14b747a531d75e73f4488a2e4d14b5ade425f
|
8569401c096695c5c8b8f3c6c75fb23d2fc3b753
|
/contextual_bandit.py
|
e9a8649c698e7f0e7933b0d6699ae608e6e46b9e
|
[] |
no_license
|
stabbysaur/reinforcement
|
2d3ede1012217789088075340708c08863385a9d
|
ac46a5c9caca2e5f11c4dd67917f0151dca0be71
|
refs/heads/master
| 2020-03-30T21:58:37.044978
| 2018-10-19T00:38:41
| 2018-10-19T00:38:41
| 151,649,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,864
|
py
|
"""
2018-10-16
Exercise from Arthur Juliani's RL tutorial (adapted for Pytorch)
Part 1.5: Contextual bandits!
"""
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import pdb
"""
CONTEXT BLOCK:
There are now contexts!!
Each "context" refers to a different bandit.
Each bandit has 4 arms.
The NN needs to learn which arm to pull for each bandit!!
This now has states / actions / rewards but the action taken
does not determine the next state.
Almost at the full RL problem!
Note that this network uses a POLICY GRADIENT approach
(rather than value-based approaches). The network updates
towards the correct action, not the value of an action
in a given state.
"""
class contextual_bandit():
"""taken straight from the blog post :)"""
def __init__(self):
self.state = 0
# List out our bandits. Currently arms 4, 2, and 1 (respectively) are the most optimal.
self.bandits = np.array([[0.2, 0, -0.0, -5], [0.1, -5, 1, 0.25], [-5, 5, 5, 5]])
self.num_bandits = self.bandits.shape[0]
self.num_actions = self.bandits.shape[1]
def getBandit(self):
self.state = np.random.randint(0, len(self.bandits)) # Returns a random state for each episode.
return self.state
def pullArm(self, action):
# Get a random number.
bandit = self.bandits[self.state, action]
result = np.random.randn(1)
if result > bandit:
# return a positive reward.
return 1
else:
# return a negative reward.
return -1
"""set up NN!"""
class SimpleNN(nn.Module):
def __init__(self, n_inputs, n_classes):
super(SimpleNN, self).__init__()
self.fc1 = nn.Linear(n_inputs, n_classes, bias=False)
# nn_init.uniform_(self.fc1.weight, 0.0, 0.1)
self.act1 = nn.Sigmoid()
def forward(self, X):
output = self.fc1(X)
output = self.act1(output)
return output
bandit = contextual_bandit()
agent = SimpleNN(n_inputs=bandit.num_bandits, n_classes=bandit.num_actions)
optimizer = torch.optim.SGD(agent.parameters(), lr=0.05)
episodes = 10000
epsilon = 0.1
rewards = []
for ep in range(episodes):
"""get a bandit!!"""
band_vector = np.zeros(bandit.num_bandits)
band = bandit.getBandit()
band_vector[band] = 1
band_vector = torch.from_numpy(band_vector).float()
"""pass into agent!!"""
actions = agent.forward(band_vector) # this is the current weighting of arms for the given bandit (=state)
if np.random.rand(1) < epsilon:
selected = np.random.randint(0, bandit.num_actions - 1)
else:
selected = torch.argmax(actions).item() # pick the best action in the state
"""get reward from taking an action!!!"""
reward = bandit.pullArm(selected)
"""calculate loss!"""
loss = -torch.log(actions[selected]) * reward # same as the non-contextual bandit
optimizer.zero_grad()
loss.backward()
optimizer.step()
rewards.append(reward)
if ep % 100 == 0:
print("Episode {0}!".format(ep))
print(sum(rewards) / (ep + 1.))
"""check for whether the agent converged to the right arms for each bandit"""
for band in range(bandit.num_bandits):
"""get a bandit!!"""
band_vector = np.zeros(bandit.num_bandits)
band_vector[band] = 1
band_vector = torch.from_numpy(band_vector).float()
"""pass into agent!!"""
actions = agent.forward(band_vector) # this is the current weighting of arms for the given bandit (=state)
print("The agent thinks action " + str(torch.argmax(actions).item() + 1) + " for bandit " + str(band + 1) + " is the most promising....")
if torch.argmax(actions).item() == np.argmin(bandit.bandits[band]):
print("...and it was right!")
else:
print("...and it was wrong!")
|
[
"vicious.narcoleptic@gmail.com"
] |
vicious.narcoleptic@gmail.com
|
d4dc403b25d1c1e99ae62c8c28e7834c02eab079
|
90e52dabd2e5450a46f61896b8b53bf3b6353e18
|
/Python/Python_day2/ex4.py
|
56673ac871a9e25c372b5cc52165ce24bf42b8b8
|
[] |
no_license
|
ATzitz/Main-Github
|
0c20ebb04be6e99c8130462e900a1060cb287fd4
|
2d065c8f6efc2daa8a965cc1b8f52000d4ad1aac
|
refs/heads/master
| 2021-05-03T22:53:18.422192
| 2016-11-29T13:38:39
| 2016-11-29T13:38:39
| 71,696,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
a=input("Enter 10 digit Number :" )
la=list(x for x in a)
lz=[]
lm=[]
lk=[]
count=0
b,c,d,=0,0,0
for x in la:
if int(x)%3==2:
lz.append(x)
strz = ' '.join(str(e) for e in lz)
if int(x)%3==1:
lm.append(x)
strm = ' '.join(str(e) for e in lm)
if int(x)%3==0:
lk.append(x)
strk = ' '.join(str(e) for e in lk)
print( strm, '\n ',strz, '\n ', strk)
|
[
"a.tzitzeras@gmail.com"
] |
a.tzitzeras@gmail.com
|
b3afdc5ed5a2cd8de578e1fd31eb490f17a5db95
|
2455062787d67535da8be051ac5e361a097cf66f
|
/Producers/BSUB/TrigProd_amumu_a5_dR5/trigger_amumu_producer_cfg_TrigProd_amumu_a5_dR5_499.py
|
14a070c95d6dc5d7822dce37415383786cbf8e82
|
[] |
no_license
|
kmtos/BBA-RecoLevel
|
6e153c08d5ef579a42800f6c11995ee55eb54846
|
367adaa745fbdb43e875e5ce837c613d288738ab
|
refs/heads/master
| 2021-01-10T08:33:45.509687
| 2015-12-04T09:20:14
| 2015-12-04T09:20:14
| 43,355,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,360
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PAT")
#process.load("BBA/Analyzer/bbaanalyzer_cfi")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load('Configuration.EventContent.EventContent_cff')
process.load("Configuration.Geometry.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("PhysicsTools.PatAlgos.producersLayer1.patCandidates_cff")
process.load("PhysicsTools.PatAlgos.selectionLayer1.selectedPatCandidates_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'MCRUN2_71_V1::All', '')
process.load("Configuration.StandardSequences.MagneticField_cff")
####################
# Message Logger
####################
process.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(100)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
## switch to uncheduled mode
process.options.allowUnscheduled = cms.untracked.bool(True)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(500)
)
####################
# Input File List
####################
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('root://eoscms//eos/cms/store/user/ktos/RECO_Step3_amumu_a5/RECO_Step3_amumu_a5_499.root'),
secondaryFileNames = cms.untracked.vstring()
)
############################################################
# Defining matching in DeltaR, sorting by best DeltaR
############################################################
process.mOniaTrigMatch = cms.EDProducer("PATTriggerMatcherDRLessByR",
src = cms.InputTag( 'slimmedMuons' ),
matched = cms.InputTag( 'patTrigger' ), # selections of trigger objects
matchedCuts = cms.string( 'type( "TriggerMuon" ) && path( "HLT_Mu16_TkMu0_dEta18_Onia*")' ), # input does not yet have the 'saveTags' parameter in HLT
maxDPtRel = cms.double( 0.5 ), # no effect here
maxDeltaR = cms.double( 0.3 ), #### selection of matches
maxDeltaEta = cms.double( 0.2 ), # no effect here
resolveAmbiguities = cms.bool( True ),# definition of matcher output
resolveByMatchQuality = cms.bool( True )# definition of matcher output
)
# talk to output module
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string("file:RECO_Step3_amumu_a5_TrigProd_499.root"),
outputCommands = process.MINIAODSIMEventContent.outputCommands
)
process.out.outputCommands += [ 'drop *_*_*_*',
'keep *_*slimmed*_*_*',
'keep *_pfTausEI_*_*',
'keep *_hpsPFTauProducer_*_*',
'keep *_hltTriggerSummaryAOD_*_*',
'keep *_TriggerResults_*_HLT',
'keep *_patTrigger*_*_*',
'keep *_prunedGenParticles_*_*',
'keep *_mOniaTrigMatch_*_*'
]
################################################################################
# Running the matching and setting the the trigger on
################################################################################
from PhysicsTools.PatAlgos.tools.trigTools import *
switchOnTrigger( process ) # This is optional and can be omitted.
switchOnTriggerMatching( process, triggerMatchers = [ 'mOniaTrigMatch'
])
process.outpath = cms.EndPath(process.out)
|
[
"kmtos@ucdavis.edu"
] |
kmtos@ucdavis.edu
|
2b2498877b3efcf756b777b0a07744d9728de1a6
|
fcf3db349562825a7f8d6713a3092cefa03e6d3d
|
/fastdtwtest.py
|
2c3ca15200519570b9937304ee31e692c98e8e91
|
[] |
no_license
|
nhorcher/final_project498rc3
|
12728316d2a8dcb2d9b302c42eaf01f643faf636
|
157dafe297f289bfb45aa2ea604ddb8922499300
|
refs/heads/master
| 2020-03-12T08:53:39.831445
| 2018-05-08T20:21:12
| 2018-05-08T20:21:12
| 130,539,027
| 0
| 0
| null | 2018-05-06T19:41:55
| 2018-04-22T05:45:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,775
|
py
|
import numpy as np
from scipy.spatial.distance import euclidean
from scipy.interpolate import spline
import scipy.fftpack
from scipy.signal import savgol_filter
from fastdtw import fastdtw
import matplotlib.pyplot as plt
import sys
import os
import pandas as pd
folder = sys.argv[1]
file = 'gyroscope.csv';
path = os.path.join(folder,file)
f = open(path)
d = pd.read_csv(f)
f.close()
t = d['Time']
x = d['X']
y = d['Y']
z = d['Z']
# reset time to start at 0
t = np.array(t)
t = t-t[0]
lin = np.linspace(-2,2,1000)
sinc = -4*np.sinc(lin)
## Spline smoothing
## Toooooo slow
# t_sm = np.array(t)
# z_sm = np.array(z)
# Takes forever to run
# t_smooth = np.linspace(t_sm.min(), t_sm.max(), len(t))
# z_smooth = spline(t, z, t_smooth)
## FFT method, fast and smooth, but loses peaks
w = scipy.fftpack.rfft(z)
f = scipy.fftpack.rfftfreq(len(t), t[1]-t[0])
spectrum = w**2
cutoff_idx = spectrum < (spectrum.max()/5)
w2 = w.copy()
w2[cutoff_idx] = 0
y2 = scipy.fftpack.irfft(w2)
## SavGol Filter
# savgol3 = savgol_filter(z,249,3)
# savgol5 = savgol_filter(z,249,5)
savgol6 = savgol_filter(z,249,6)
# savgol6[abs(savgol6) < 1] = 0
## DTW
# Not really a well developed library. too hard to figure out
# distance, path = fastdtw(z[100:300], sinc, dist=euclidean)
# xpath = [z[100+i[0]] for i in path]
# ypath = [sinc[i[1]] for i in path]
# print(distance)
plt.figure()
plt.plot(z, label='Original')
# plt.plot(z_smooth, label='Splining')
# plt.plot(savgol3, label='SavGol3')
# plt.plot(savgol5, label='SavGol5')
plt.plot(savgol6, label='SavGol6')
# plt.plot(y2, label='FFTMethod')
plt.plot(np.linspace(0,200,1000),sinc, label='match')
# plt.plot(xpath, z[100:300], label='xpath')
# plt.plot(np.linspace(100,len(ypath),300)ypath, label='ypath')
plt.legend()
plt.show()
|
[
"nick.horcher@gmail.com"
] |
nick.horcher@gmail.com
|
bdcfdb621e28558b0c8a4dc4927343f24aa750cc
|
84010059524cbf5229a872aa2b857735e0bbd2b6
|
/locallibrary/locallibrary/locallibrary/urls.py
|
c86f12cdb2e2ff9c8337d579036f3ab3e832557a
|
[] |
no_license
|
sergeykool37/web_site_django
|
976aa61317dc14fa358129af18aaf0b965e14f9a
|
4b340f8c80fb78b2c83c99180bba8619b204b506
|
refs/heads/master
| 2022-10-28T10:59:44.729695
| 2020-06-21T10:54:31
| 2020-06-21T10:54:31
| 273,824,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
"""locallibrary URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
]
urlpatterns += [
path('catalog/', include('catalog.urls')),
]
from django.views.generic import RedirectView
urlpatterns += [
path('', RedirectView.as_view(url='/catalog/', permanent=True)),
]
from django.conf import settings
from django.conf.urls.static import static
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
[
"sergeykool37@gmail.com"
] |
sergeykool37@gmail.com
|
94fbe3b06ebbbbfc7744c597c80accaa9f252602
|
10565593bd79c3a86ee074b7e08dc1cd35885e24
|
/ds_validation_viirs.py
|
8b12b313ac5320b9f20b7cb10edcb2c61d57784d
|
[] |
no_license
|
fangbin08/SMAP
|
528e3995692eef87731f5fc010687dc4fc9dfd60
|
d2b196f9af0ee8158a5380fbe23a2206546eded8
|
refs/heads/master
| 2022-10-18T01:51:45.804054
| 2022-10-07T07:00:29
| 2022-10-07T07:00:29
| 190,057,817
| 11
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 44,764
|
py
|
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
plt.rcParams["font.family"] = "serif"
import h5py
import calendar
import datetime
import glob
import pandas as pd
import rasterio
from scipy import stats
from statsmodels.graphics.tsaplots import plot_acf
import skill_metrics as sm
import cartopy.crs as ccrs
from cartopy.io.shapereader import Reader
from cartopy.feature import ShapelyFeature
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import itertools
# Ignore runtime warning
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=RuntimeWarning)
#########################################################################################
# (Function 1) Subset the coordinates table of desired area
def coordtable_subset(lat_input, lon_input, lat_extent_max, lat_extent_min, lon_extent_max, lon_extent_min):
lat_output = lat_input[np.where((lat_input <= lat_extent_max) & (lat_input >= lat_extent_min))]
row_output_ind = np.squeeze(np.array(np.where((lat_input <= lat_extent_max) & (lat_input >= lat_extent_min))))
lon_output = lon_input[np.where((lon_input <= lon_extent_max) & (lon_input >= lon_extent_min))]
col_output_ind = np.squeeze(np.array(np.where((lon_input <= lon_extent_max) & (lon_input >= lon_extent_min))))
return lat_output, row_output_ind, lon_output, col_output_ind
####################################################################################################################################
# 0. Input variables
# Specify file paths
# Path of current workspace
path_workspace = '/Users/binfang/Documents/SMAP_Project/smap_codes'
# Path of GIS data
path_gis_data = '/Users/binfang/Documents/SMAP_Project/data/gis_data'
# Path of source LTDR NDVI data
path_ltdr = '/Volumes/MyPassport/SMAP_Project/Datasets/LTDR/Ver5'
# Path of Land mask
path_lmask = '/Volumes/MyPassport/SMAP_Project/Datasets/Lmask'
# Path of model data
path_model = '/Volumes/MyPassport/SMAP_Project/Datasets/model_data'
# Path of source MODIS data
path_modis = '/Volumes/MyPassport/SMAP_Project/NewData/MODIS/HDF'
# Path of source output MODIS data
path_modis_op = '/Volumes/MyPassport/SMAP_Project/NewData/MODIS/Output'
# Path of MODIS data for SM downscaling model input
path_modis_model_ip = '/Volumes/MyPassport/SMAP_Project/NewData/MODIS/Model_Input'
# Path of SM model output
path_model_op = '/Volumes/MyPassport/SMAP_Project/Datasets/SMAP_ds/Model_Output'
# Path of downscaled SM
path_smap_sm_ds = '/Volumes/MyPassport/SMAP_Project/Datasets/MODIS/Downscale'
# Path of 9 km SMAP SM
path_smap = '/Volumes/MyPassport/SMAP_Project/Datasets/SMAP'
# Path of ISMN
path_ismn = '/Volumes/MyPassport/SMAP_Project/Datasets/ISMN/Ver_1/processed_data'
# Path of processed data
path_processed = '/Volumes/MyPassport/SMAP_Project/Datasets/processed_data'
# Path of GPM data
path_gpm = '/Volumes/MyPassport/SMAP_Project/Datasets/GPM'
# Path of Results
path_results = '/Users/binfang/Documents/SMAP_Project/results/results_200810'
folder_400m = '/400m/'
folder_1km = '/1km/'
folder_9km = '/9km/'
smap_sm_9km_name = ['smap_sm_9km_am', 'smap_sm_9km_pm']
# Generate a sequence of string between start and end dates (Year + DOY)
start_date = '2015-04-01'
end_date = '2019-12-31'
year = 2019 - 2015 + 1
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d').date()
end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d').date()
delta_date = end_date - start_date
date_seq = []
date_seq_doy = []
for i in range(delta_date.days + 1):
date_str = start_date + datetime.timedelta(days=i)
date_seq.append(date_str.strftime('%Y%m%d'))
date_seq_doy.append(str(date_str.timetuple().tm_year) + str(date_str.timetuple().tm_yday).zfill(3))
# Count how many days for a specific year
yearname = np.linspace(2015, 2019, 5, dtype='int')
monthnum = np.linspace(1, 12, 12, dtype='int')
monthname = np.arange(1, 13)
monthname = [str(i).zfill(2) for i in monthname]
daysofyear = []
for idt in range(len(yearname)):
if idt == 0:
f_date = datetime.date(yearname[idt], monthnum[3], 1)
l_date = datetime.date(yearname[idt], monthnum[-1], 31)
delta_1y = l_date - f_date
daysofyear.append(delta_1y.days + 1)
else:
f_date = datetime.date(yearname[idt], monthnum[0], 1)
l_date = datetime.date(yearname[idt], monthnum[-1], 31)
delta_1y = l_date - f_date
daysofyear.append(delta_1y.days + 1)
daysofyear = np.asarray(daysofyear)
# Find the indices of each month in the list of days between 2015 - 2018
nlpyear = 1999 # non-leap year
lpyear = 2000 # leap year
daysofmonth_nlp = np.array([calendar.monthrange(nlpyear, x)[1] for x in range(1, len(monthnum)+1)])
ind_nlp = [np.arange(daysofmonth_nlp[0:x].sum(), daysofmonth_nlp[0:x+1].sum()) for x in range(0, len(monthnum))]
daysofmonth_lp = np.array([calendar.monthrange(lpyear, x)[1] for x in range(1, len(monthnum)+1)])
ind_lp = [np.arange(daysofmonth_lp[0:x].sum(), daysofmonth_lp[0:x+1].sum()) for x in range(0, len(monthnum))]
ind_iflpr = np.array([int(calendar.isleap(yearname[x])) for x in range(len(yearname))]) # Find out leap years
# Generate a sequence of the days of months for all years
daysofmonth_seq = np.array([np.tile(daysofmonth_nlp[x], len(yearname)) for x in range(0, len(monthnum))])
daysofmonth_seq[1, :] = daysofmonth_seq[1, :] + ind_iflpr # Add leap days to February
# daysofmonth_seq_cumsum = np.cumsum(daysofmonth_seq, axis=1)
# ind_init = daysofmonth_seq_cumsum[2, :]
# ind_end = daysofmonth_seq_cumsum[8, :] - 1
# ind_gpm = np.stack((ind_init, ind_end), axis=1)
# ind_gpm[0, :] = ind_gpm[0, :] - 90
daysofmonth_seq_cumsum = np.cumsum(daysofmonth_seq, axis=0)
ind_init = daysofmonth_seq_cumsum[2, :]
ind_end = daysofmonth_seq_cumsum[8, :]
ind_gpm = np.stack((ind_init, ind_end), axis=1)
# Extract the indices of the months between April - September
date_seq_month = np.array([int(date_seq[x][4:6]) for x in range(len(date_seq))])
monthnum_conus = monthnum[3:9]
date_seq_doy_conus_ind = np.where((date_seq_month >= 4) & (date_seq_month <= 9))[0]
date_seq_doy_conus = [date_seq_doy[date_seq_doy_conus_ind[x]] for x in range(len(date_seq_doy_conus_ind))]
# Load in geo-location parameters
os.chdir(path_workspace)
f = h5py.File("ds_parameters.hdf5", "r")
varname_list = ['lat_conus_max', 'lat_conus_min', 'lon_conus_max', 'lon_conus_min', 'cellsize_400m', 'cellsize_9km',
'lat_conus_ease_1km', 'lon_conus_ease_1km', 'lat_conus_ease_9km', 'lon_conus_ease_9km',
'lat_world_ease_9km', 'lon_world_ease_9km', 'lat_conus_ease_400m', 'lon_conus_ease_400m',
'row_conus_ease_9km_ind', 'col_conus_ease_9km_ind', 'lat_world_geo_10km', 'lon_world_geo_10km']
for x in range(len(varname_list)):
var_obj = f[varname_list[x]][()]
exec(varname_list[x] + '= var_obj')
del(var_obj)
f.close()
########################################################################################################################
# 1. Read SM data in CONUS
# 1.1 Load the site lat/lon from Excel files and Locate the SMAP 400m, 1/9 km SM positions by lat/lon of ISMN in-situ data
# Find the indices of the days between April - Sepetember
month_list = np.array([int(date_seq[x][4:6]) for x in range(len(date_seq))])
month_list_ind = np.where((month_list >= 4) & (month_list <= 9))[0]
month_list_ind = month_list_ind + 2 #First two columns are lat/lon
ismn_list = sorted(glob.glob(path_ismn + '/[A-Z]*.xlsx'))
coords_all = []
df_table_am_all = []
df_table_pm_all = []
for ife in range(14, len(ismn_list)):
df_table_am = pd.read_excel(ismn_list[ife], index_col=0, sheet_name='AM')
df_table_pm = pd.read_excel(ismn_list[ife], index_col=0, sheet_name='PM')
netname = os.path.basename(ismn_list[ife]).split('_')[1]
netname = [netname] * df_table_am.shape[0]
coords = df_table_am[['lat', 'lon']]
coords_all.append(coords)
df_table_am_value = df_table_am.iloc[:, month_list_ind]
df_table_am_value.insert(0, 'network', netname)
df_table_pm_value = df_table_pm.iloc[:, month_list_ind]
df_table_pm_value.insert(0, 'network', netname)
df_table_am_all.append(df_table_am_value)
df_table_pm_all.append(df_table_pm_value)
del(df_table_am, df_table_pm, df_table_am_value, df_table_pm_value, coords, netname)
print(ife)
df_coords = pd.concat(coords_all)
df_table_am_all = pd.concat(df_table_am_all)
df_table_pm_all = pd.concat(df_table_pm_all)
new_index = [df_coords.index[x].title() for x in range(len(df_coords.index))] # Capitalize each word
df_coords.index = new_index
df_table_am_all.index = new_index
df_table_pm_all.index = new_index
rec_list = ['Smap-Ok', 'Tony_Grove_Rs', 'Bedford_5_Wnw', 'Harrison_20_Sse', 'John_Day_35_Wnw']
rec_post_list = ['SMAP-OK', 'Tony_Grove_RS', 'Bedford_5_WNW', 'Harrison_20_SSE', 'John_Day_35_WNW']
# rec_list_ind = [np.where(df_table_am_all.index == rec_list[x])[0][0] for x in range(len(rec_list))]
for x in range(1, len(rec_list)):
df_table_am_all.rename(index={rec_list[x]: rec_post_list[x]}, inplace=True)
df_table_pm_all.rename(index={rec_list[x]: rec_post_list[x]}, inplace=True)
df_coords.rename(index={rec_list[x]: rec_post_list[x]}, inplace=True)
########################################################################################################################
# 1.2 Extract 400 m, 1 km / 9 km SMAP by lat/lon
# Locate the SM pixel positions
stn_lat_all = np.array(df_coords['lat'])
stn_lon_all = np.array(df_coords['lon'])
stn_row_400m_ind_all = []
stn_col_400m_ind_all = []
stn_row_1km_ind_all = []
stn_col_1km_ind_all = []
stn_row_9km_ind_all = []
stn_col_9km_ind_all = []
for idt in range(len(stn_lat_all)):
stn_row_400m_ind = np.argmin(np.absolute(stn_lat_all[idt] - lat_conus_ease_400m)).item()
stn_col_400m_ind = np.argmin(np.absolute(stn_lon_all[idt] - lon_conus_ease_400m)).item()
stn_row_400m_ind_all.append(stn_row_400m_ind)
stn_col_400m_ind_all.append(stn_col_400m_ind)
stn_row_1km_ind = np.argmin(np.absolute(stn_lat_all[idt] - lat_conus_ease_1km)).item()
stn_col_1km_ind = np.argmin(np.absolute(stn_lon_all[idt] - lon_conus_ease_1km)).item()
stn_row_1km_ind_all.append(stn_row_1km_ind)
stn_col_1km_ind_all.append(stn_col_1km_ind)
stn_row_9km_ind = np.argmin(np.absolute(stn_lat_all[idt] - lat_world_ease_9km)).item()
stn_col_9km_ind = np.argmin(np.absolute(stn_lon_all[idt] - lon_world_ease_9km)).item()
stn_row_9km_ind_all.append(stn_row_9km_ind)
stn_col_9km_ind_all.append(stn_col_9km_ind)
del(stn_row_400m_ind, stn_col_400m_ind, stn_row_1km_ind, stn_col_1km_ind, stn_row_9km_ind, stn_col_9km_ind)
# 1.3 Extract 400 m SMAP SM (2019)
smap_400m_sta_all = []
tif_files_400m_name_ind_all = []
for iyr in [3, 4]: # range(yearname):
os.chdir(path_smap + folder_400m + str(yearname[iyr]))
tif_files = sorted(glob.glob('*.tif'))
# Extract the file name
tif_files_name = [os.path.splitext(tif_files[x])[0].split('_')[-1] for x in range(len(tif_files))]
tif_files_name_1year_ind = [date_seq_doy_conus.index(item) for item in tif_files_name if item in date_seq_doy_conus]
date_seq_doy_conus_1year_ind = [tif_files_name.index(item) for item in tif_files_name if item in date_seq_doy_conus]
tif_files_400m_name_ind_all.append(tif_files_name_1year_ind)
del(tif_files_name, tif_files_name_1year_ind)
smap_400m_sta_1year = []
for idt in range(len(date_seq_doy_conus_1year_ind)):
src_tf = rasterio.open(tif_files[date_seq_doy_conus_1year_ind[idt]]).read()
smap_400m_sta_1day = src_tf[:, stn_row_400m_ind_all, stn_col_400m_ind_all]
smap_400m_sta_1year.append(smap_400m_sta_1day)
del(src_tf, smap_400m_sta_1day)
print(tif_files[date_seq_doy_conus_1year_ind[idt]])
smap_400m_sta_all.append(smap_400m_sta_1year)
del(smap_400m_sta_1year, date_seq_doy_conus_1year_ind)
tif_files_400m_name_ind_all = np.concatenate(tif_files_400m_name_ind_all)
smap_400m_sta_all = np.concatenate(smap_400m_sta_all)
# Fill the extracted SMAP SM into the proper position of days
smap_400m_sta_am = np.empty((df_table_am_all.shape[0], df_table_am_all.shape[1]-1), dtype='float32')
smap_400m_sta_am[:] = np.nan
for idt in range(len(tif_files_400m_name_ind_all)):
smap_400m_sta_am[:, tif_files_400m_name_ind_all[idt]] = smap_400m_sta_all[idt, 0, :]
# 1.4 Extract 1km SMAP SM (2019)
smap_1km_sta_all = []
tif_files_1km_name_ind_all = []
for iyr in [3, 4]: # range(yearname):
os.chdir(path_smap + folder_1km + '/nldas/' + str(yearname[iyr]))
tif_files = sorted(glob.glob('*.tif'))
# Extract the file name
tif_files_name = [os.path.splitext(tif_files[x])[0].split('_')[-1] for x in range(len(tif_files))]
tif_files_name_1year_ind = [date_seq_doy_conus.index(item) for item in tif_files_name if item in date_seq_doy_conus]
date_seq_doy_conus_1year_ind = [tif_files_name.index(item) for item in tif_files_name if item in date_seq_doy_conus]
tif_files_1km_name_ind_all.append(tif_files_name_1year_ind)
del(tif_files_name, tif_files_name_1year_ind)
smap_1km_sta_1year = []
for idt in range(len(date_seq_doy_conus_1year_ind)):
src_tf = rasterio.open(tif_files[date_seq_doy_conus_1year_ind[idt]]).read()
smap_1km_sta_1day = src_tf[:, stn_row_1km_ind_all, stn_col_1km_ind_all]
smap_1km_sta_1year.append(smap_1km_sta_1day)
del(src_tf, smap_1km_sta_1day)
print(tif_files[date_seq_doy_conus_1year_ind[idt]])
smap_1km_sta_all.append(smap_1km_sta_1year)
del(smap_1km_sta_1year, date_seq_doy_conus_1year_ind)
tif_files_1km_name_ind_all = np.concatenate(tif_files_1km_name_ind_all)
smap_1km_sta_all = np.concatenate(smap_1km_sta_all)
# Fill the extracted SMAP SM into the proper position of days
smap_1km_sta_am = np.empty((df_table_am_all.shape[0], df_table_am_all.shape[1]-1), dtype='float32')
smap_1km_sta_am[:] = np.nan
for idt in range(len(tif_files_1km_name_ind_all)):
smap_1km_sta_am[:, tif_files_1km_name_ind_all[idt]] = smap_1km_sta_all[idt, 0, :]
# 1.5 Extract 9km SMAP SM (2019)
smap_9km_sta_am = np.empty((df_table_am_all.shape[0], df_table_am_all.shape[1]-1), dtype='float32')
smap_9km_sta_am[:] = np.nan
for iyr in [3, 4]: #range(len(yearname)):
smap_9km_sta_am_1year = []
for imo in range(3, 9):#range(len(monthname)):
smap_9km_sta_am_1month = []
# Load in SMAP 9km SM data
smap_file_path = path_smap + folder_9km + 'smap_sm_9km_' + str(yearname[iyr]) + monthname[imo] + '.hdf5'
# Find the if the file exists in the directory
if os.path.exists(smap_file_path) == True:
f_smap_9km = h5py.File(smap_file_path, "r")
varname_list_smap = list(f_smap_9km.keys())
smap_9km_sta_am_1month = f_smap_9km[varname_list_smap[0]][()]
smap_9km_sta_am_1month = smap_9km_sta_am_1month[stn_row_9km_ind_all, stn_col_9km_ind_all, :]
print(smap_file_path)
f_smap_9km.close()
else:
pass
smap_9km_sta_am_1year.append(smap_9km_sta_am_1month)
del(smap_9km_sta_am_1month)
smap_9km_sta_am_1year = np.concatenate(smap_9km_sta_am_1year, axis=1)
smap_9km_sta_am[:, iyr*183:(iyr+1)*183] = smap_9km_sta_am_1year
del(smap_9km_sta_am_1year)
# Save variables
var_name_val = ['smap_400m_sta_am', 'smap_1km_sta_am', 'smap_9km_sta_am']
with h5py.File('/Users/binfang/Downloads/Processing/VIIRS/smap_validation_conus_viirs.hdf5', 'w') as f:
for x in var_name_val:
f.create_dataset(x, data=eval(x))
f.close()
########################################################################################################################
# 2. Scatterplots
# Site ID
# COSMOS: 0, 11, 25, 28, 34, 36, 42, 44
# SCAN: 250, 274, 279, 286, 296, 351, 362, 383
# SOILSCAPE: 860, 861, 870, 872, 896, 897, 904, 908
# USCRN: 918, 926, 961, 991, 1000,1002, 1012, 1016
# Number of sites for each SM network
# COSMOS 52
# iRON 9
# PBO_H2O 140
# RISMA 9
# SCAN 188
# SNOTEL 404
# SOILSCAPE 119
# USCRN 113
# Load in the saved parameters
f_mat = h5py.File('/Users/binfang/Downloads/Processing/VIIRS/smap_validation_conus_viirs.hdf5', 'r')
varname_list = list(f_mat.keys())
for x in range(len(varname_list)):
var_obj = f_mat[varname_list[x]][()]
exec(varname_list[x] + '= var_obj')
del(var_obj)
f_mat.close()
# os.chdir(path_results + '/single')
ismn_sm_am = np.array(df_table_am_all.iloc[:, 1:])
ismn_sm_pm = np.array(df_table_pm_all.iloc[:, 1:])
# 2.1 single plots
# stat_array_allnan = np.empty([3, 6], dtype='float32')
# stat_array_allnan[:] = np.nan
stat_array_400m = []
stat_array_1km = []
stat_array_9km = []
ind_slc_all = []
for ist in range(len(ismn_sm_am)):
x = ismn_sm_am[ist, :].flatten()
y1 = smap_400m_sta_am[ist, :].flatten()
y2 = smap_1km_sta_am[ist, :].flatten()
y3 = smap_9km_sta_am[ist, :].flatten()
ind_nonnan = np.where(~np.isnan(x) & ~np.isnan(y1) & ~np.isnan(y2) & ~np.isnan(y3))[0]
if len(ind_nonnan) > 5:
x = x[ind_nonnan]
y1 = y1[ind_nonnan]
y2 = y2[ind_nonnan]
y3 = y3[ind_nonnan]
slope_1, intercept_1, r_value_1, p_value_1, std_err_1 = stats.linregress(x, y1)
y1_estimated = intercept_1 + slope_1 * x
number_1 = len(y1)
r_sq_1 = r_value_1 ** 2
ubrmse_1 = np.sqrt(np.mean((x - y1_estimated) ** 2))
bias_1 = np.mean(x - y1)
conf_int_1 = std_err_1 * 1.96 # From the Z-value
stdev_1 = np.std(y1)
stat_array_1 = [number_1, r_sq_1, ubrmse_1, stdev_1, bias_1, p_value_1, conf_int_1]
slope_2, intercept_2, r_value_2, p_value_2, std_err_2 = stats.linregress(x, y2)
y2_estimated = intercept_2 + slope_2 * x
number_2 = len(y2)
r_sq_2 = r_value_2 ** 2
ubrmse_2 = np.sqrt(np.mean((x - y2_estimated) ** 2))
bias_2 = np.mean(x - y2)
conf_int_2 = std_err_2 * 1.96 # From the Z-value
stdev_2 = np.std(y2)
stat_array_2 = [number_2, r_sq_2, ubrmse_2, stdev_2, bias_2, p_value_2, conf_int_2]
slope_3, intercept_3, r_value_3, p_value_3, std_err_3 = stats.linregress(x, y3)
y3_estimated = intercept_3 + slope_3 * x
number_3 = len(y3)
r_sq_3 = r_value_3 ** 2
ubrmse_3 = np.sqrt(np.mean((x - y3_estimated) ** 2))
bias_3 = np.mean(x - y3)
conf_int_3 = std_err_3 * 1.96 # From the Z-value
stdev_3 = np.std(y3)
stat_array_3 = [number_3, r_sq_3, ubrmse_3, stdev_3, bias_3, p_value_3, conf_int_3]
if ubrmse_1 - ubrmse_3 < 0:
fig = plt.figure(figsize=(11, 6.5))
fig.subplots_adjust(hspace=0.2, wspace=0.2)
ax = fig.add_subplot(111)
ax.scatter(x, y1, s=20, c='m', marker='s', label='400 m')
ax.scatter(x, y2, s=20, c='b', marker='o', label='1 km')
ax.scatter(x, y3, s=20, c='g', marker='^', label='9 km')
ax.plot(x, intercept_1+slope_1*x, '-', color='m')
ax.plot(x, intercept_2+slope_2*x, '-', color='b')
ax.plot(x, intercept_3+slope_3*x, '-', color='g')
plt.xlim(0, 0.4)
ax.set_xticks(np.arange(0, 0.5, 0.1))
plt.ylim(0, 0.4)
ax.set_yticks(np.arange(0, 0.5, 0.1))
ax.tick_params(axis='both', which='major', labelsize=13)
ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c=".3")
plt.grid(linestyle='--')
plt.legend(loc='upper left', prop={'size': 13})
# plt.title(network_name[ist], fontsize=18, fontweight='bold')
# plt.show()
# plt.savefig(path_results + '/validation/single_plots/' + df_table_am_all['network'][ist] + '_' + df_table_am_all.index[ist]
# + '_(' + str(ist) + ')' + '.png')
plt.close(fig)
stat_array_400m.append(stat_array_1)
stat_array_1km.append(stat_array_2)
stat_array_9km.append(stat_array_3)
ind_slc_all.append(ist)
print(ist)
del(stat_array_1, stat_array_2, stat_array_3)
else:
pass
else:
pass
stat_array_400m = np.array(stat_array_400m)
stat_array_1km = np.array(stat_array_1km)
stat_array_9km = np.array(stat_array_9km)
columns_validation = ['number', 'r_sq', 'ubrmse', 'stdev', 'bias', 'p_value', 'conf_int']
index_validation = df_coords.index[ind_slc_all]
# index_validation = ['COSMOS', 'SCAN', 'USCRN']
# stat_array_400m = np.concatenate((id, stat_array_400m), axis=1)
# stat_array_1km = np.concatenate((id, stat_array_1km), axis=1)
# stat_array_9km = np.concatenate((id, stat_array_9km), axis=1)
df_stat_400m = pd.DataFrame(stat_array_400m, columns=columns_validation, index=index_validation)
# df_stat_400m = pd.concat([df_table_am_all['network'][ind_slc_all], df_stat_400m], axis=1)
df_stat_1km = pd.DataFrame(stat_array_1km, columns=columns_validation, index=index_validation)
# df_stat_1km = pd.concat([df_table_am_all['network'][ind_slc_all], df_stat_1km], axis=1)
df_stat_9km = pd.DataFrame(stat_array_9km, columns=columns_validation, index=index_validation)
# df_stat_9km = pd.concat([df_table_am_all['network'][ind_slc_all], df_stat_9km], axis=1)
writer_400m = pd.ExcelWriter(path_results + '/validation/stat_400m.xlsx')
writer_1km = pd.ExcelWriter(path_results + '/validation/stat_1km.xlsx')
writer_9km = pd.ExcelWriter(path_results + '/validation/stat_9km.xlsx')
df_stat_400m.to_excel(writer_400m)
df_stat_1km.to_excel(writer_1km)
df_stat_9km.to_excel(writer_9km)
writer_400m.save()
writer_1km.save()
writer_9km.save()
# ubrmse_diff = stat_array_400m[:, 2] - stat_array_9km[:, 2]
# ubrmse_diff_ind = np.where(ubrmse_diff<0)[0]
# ubrmse_good = df_table_am_all['network'][ubrmse_diff_ind]
stn_slc_all = df_table_am_all['network'][ind_slc_all]
stn_slc_all_unique = stn_slc_all.unique()
stn_slc_all_group = [np.where(stn_slc_all == stn_slc_all_unique[x]) for x in range(len(stn_slc_all_unique))]
# 2.2 subplots
# COSMOS: 3, 41
# SCAN: 211, 229, 254, 258, 272, 280, 298, 330, 352, 358
# SNOTEL: 427, 454, 492, 520, 522, 583, 714, 721, 750, 755
# USCRN: 914, 918, 920, 947, 952, 957, 961, 985, 1002, 1016
network_name = ['COSMOS', 'SCAN', 'SNOTEL', 'USCRN']
site_ind = [[3, 9, 23, 36, 41, 44], [211, 229, 254, 258, 272, 280, 298, 330, 352, 358], [427, 454, 492, 520, 522, 583, 714, 721, 750, 755],
[914, 918, 920, 947, 952, 957, 961, 985, 1002, 1016]]
# network_name = list(stn_slc_all_unique)
# site_ind = stn_slc_all_group
for inw in range(1, len(site_ind)):
fig = plt.figure(figsize=(11, 11))
plt.subplots_adjust(left=0.1, right=0.95, bottom=0.08, top=0.92, hspace=0.25, wspace=0.25)
for ist in range(len(site_ind[inw])):
x = ismn_sm_am[site_ind[inw][ist], :].flatten()
x[x == 0] = np.nan
y1 = smap_400m_sta_am[site_ind[inw][ist], :].flatten()
y2 = smap_1km_sta_am[site_ind[inw][ist], :].flatten()
y3 = smap_9km_sta_am[site_ind[inw][ist], :].flatten()
ind_nonnan = np.where(~np.isnan(x) & ~np.isnan(y1) & ~np.isnan(y2) & ~np.isnan(y3))[0]
x = x[ind_nonnan]
y1 = y1[ind_nonnan]
y2 = y2[ind_nonnan]
y3 = y3[ind_nonnan]
slope_1, intercept_1, r_value_1, p_value_1, std_err_1 = stats.linregress(x, y1)
y1_estimated = intercept_1 + slope_1 * x
number_1 = len(y1)
r_sq_1 = r_value_1 ** 2
ubrmse_1 = np.sqrt(np.mean((x - y1_estimated) ** 2))
bias_1 = np.mean(x - y1)
conf_int_1 = std_err_1 * 1.96 # From the Z-value
stat_array_1 = [number_1, r_sq_1, ubrmse_1, bias_1, p_value_1, conf_int_1]
slope_2, intercept_2, r_value_2, p_value_2, std_err_2 = stats.linregress(x, y2)
y2_estimated = intercept_2 + slope_2 * x
number_2 = len(y2)
r_sq_2 = r_value_2 ** 2
ubrmse_2 = np.sqrt(np.mean((x - y2_estimated) ** 2))
bias_2 = np.mean(x - y2)
conf_int_2 = std_err_2 * 1.96 # From the Z-value
stat_array_2 = [number_2, r_sq_2, ubrmse_2, bias_2, p_value_2, conf_int_2]
slope_3, intercept_3, r_value_3, p_value_3, std_err_3 = stats.linregress(x, y3)
y3_estimated = intercept_3 + slope_3 * x
number_3 = len(y3)
r_sq_3 = r_value_3 ** 2
ubrmse_3 = np.sqrt(np.mean((x - y3_estimated) ** 2))
bias_3 = np.mean(x - y3)
conf_int_3 = std_err_3 * 1.96 # From the Z-value
stat_array_3 = [number_3, r_sq_3, ubrmse_3, bias_3, p_value_3, conf_int_3]
ax = fig.add_subplot(len(site_ind[inw])//2, 2, ist+1)
sc1 = ax.scatter(x, y1, s=20, c='m', marker='s', label='400 m')
sc2 = ax.scatter(x, y2, s=20, c='b', marker='o', label='1 km')
sc3 = ax.scatter(x, y3, s=20, c='g', marker='^', label='9 km')
ax.plot(x, intercept_1+slope_1*x, '-', color='m')
ax.plot(x, intercept_2+slope_2*x, '-', color='b')
ax.plot(x, intercept_3+slope_3*x, '-', color='g')
plt.xlim(0, 0.4)
ax.set_xticks(np.arange(0, 0.5, 0.1))
plt.ylim(0, 0.4)
ax.set_yticks(np.arange(0, 0.5, 0.1))
ax.tick_params(axis='both', which='major', labelsize=13)
ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c=".3")
plt.grid(linestyle='--')
ax.text(0.01, 0.35, df_table_am_all.index[site_ind[inw][ist]].replace('_', ' '), fontsize=13)
# add all legends together
handles = [sc1] + [sc2] + [sc3]
labels = [l.get_label() for l in handles]
# leg = plt.legend([sc1, sc2, sc3], labels, loc=(-0.6, 3.55), mode="expand", borderaxespad=0, ncol=3, prop={"size": 13})
leg = plt.legend([sc1, sc2, sc3], labels, loc=(-0.6, 6.1), mode="expand", borderaxespad=0, ncol=3, prop={"size": 13})
fig.text(0.52, 0.01, 'In Situ SM ($\mathregular{m^3/m^3)}$', ha='center', fontsize=16, fontweight='bold')
fig.text(0.02, 0.4, 'SMAP SM ($\mathregular{m^3/m^3)}$', rotation='vertical', fontsize=16, fontweight='bold')
plt.suptitle(network_name[inw], fontsize=21, y=0.99, fontweight='bold')
plt.show()
plt.savefig(path_results + '/validation/subplots/' + network_name[inw] + '.png')
plt.close(fig)
########################################################################################################################
# 3. Time-series plots
# 3.1 Locate the corresponding GPM 10 km data located by lat/lon of in-situ data
# df_slc_coords = pd.read_csv(path_results + '/slc_coords.csv', index_col=0)
# slc_coords = np.array(df_slc_coords.iloc[:, 1:])
stn_row_10km_ind_all = []
stn_col_10km_ind_all = []
for ist in range(df_coords.shape[0]):
stn_row_10km_ind = np.argmin(np.absolute(df_coords.iloc[ist, 0] - lat_world_geo_10km)).item()
stn_col_10km_ind = np.argmin(np.absolute(df_coords.iloc[ist, 1] - lon_world_geo_10km)).item()
stn_row_10km_ind_all.append(stn_row_10km_ind)
stn_col_10km_ind_all.append(stn_col_10km_ind)
del(stn_row_10km_ind, stn_col_10km_ind)
# Extract the GPM data by indices
gpm_precip_ext_all = []
for iyr in [3, 4]:#range(len(yearname)-1):
f_gpm = h5py.File(path_gpm + '/gpm_precip_' + str(yearname[iyr]) + '.hdf5', 'r')
varname_list_gpm = list(f_gpm.keys())
for x in range(len(varname_list_gpm)):
var_obj = f_gpm[varname_list_gpm[x]][()]
exec(varname_list_gpm[x] + '= var_obj')
del (var_obj)
f_gpm.close()
exec('gpm_precip = gpm_precip_10km_' + str(yearname[iyr]))
gpm_precip_ext = gpm_precip[stn_row_10km_ind_all, stn_col_10km_ind_all, :]
gpm_precip_ext_all.append(gpm_precip_ext)
print(iyr)
del(gpm_precip, gpm_precip_ext)
ind_gpm = ind_gpm[-2:, :]
gpm_precip_ext_all = [gpm_precip_ext_all[x][:, ind_gpm[x, 0]:ind_gpm[x, 1]] for x in range(len(gpm_precip_ext_all))]
gpm_precip_ext_all = np.concatenate(gpm_precip_ext_all, axis=1)
gpm_precip_ext = np.empty((1034, 549), dtype='float32')
gpm_precip_ext[:] = np.nan
gpm_precip_ext = np.concatenate((gpm_precip_ext, gpm_precip_ext_all), axis=1)
# index = df_slc_sites.index
# columns = df_table_am_all.columns[1:]
# df_gpm_precip_ext = pd.DataFrame(gpm_precip_ext_all, index=index, columns=columns)
# df_gpm_precip_ext.to_csv(path_results + '/gpm_precip_ext.csv', index=True)
# 3.2 Make the time-series plots
# df_gpm_precip = pd.read_csv(path_results + '/gpm_precip_ext.csv', index_col=0)
# gpm_precip_ext = np.array(df_gpm_precip)
# site_ind = [[0, 11, 25, 28, 34, 36, 42, 44], [250, 274, 279, 286, 296, 351, 362, 383],
# [860, 861, 870, 872, 896, 897, 904, 908], [918, 926, 961, 991, 1000, 1002, 1012, 1016]]
# network_name = ['COSMOS', 'SCAN', 'SOILSCAPE', 'USCRN']
network_name = ['COSMOS', 'SCAN', 'SNOTEL', 'USCRN']
site_ind = [[9, 23, 36, 41, 44], [229, 254, 280, 330, 352], [492, 520, 522, 714, 721],
[947, 957, 985, 1002, 1016]]
# Find the indices from df_gpm_precip
# df_gpm_precip_ind = [df_gpm_precip.index.get_loc(df_table_am_all.index[site_ind[y][x]]) for y in range(len(site_ind)) for x in range(len(site_ind[y]))]
# df_gpm_precip_ind = [df_gpm_precip_ind[:8], df_gpm_precip_ind[8:16], df_gpm_precip_ind[16:24], df_gpm_precip_ind[24:]]
for inw in range(len(site_ind)):
fig = plt.figure(figsize=(13, 8))
plt.subplots_adjust(left=0.08, right=0.88, bottom=0.08, top=0.92, hspace=0.35, wspace=0.25)
for ist in range(len(site_ind[inw])):
x = ismn_sm_am[site_ind[inw][ist], 549:]
y1 = smap_400m_sta_am[site_ind[inw][ist], 549:]
y2 = smap_1km_sta_am[site_ind[inw][ist], 549:]
y3 = smap_9km_sta_am[site_ind[inw][ist], 549:]
z = gpm_precip_ext[site_ind[inw][ist], 549:]
# ind_nonnan = np.where(~np.isnan(x) & ~np.isnan(y1) & ~np.isnan(y2))[0]
# x = x[ind_nonnan]
# y1 = y1[ind_nonnan]
# y2 = y2[ind_nonnan]
# z = z[ind_nonnan]
ax = fig.add_subplot(5, 1, ist+1)
lns1 = ax.plot(x, c='k', marker='+', label='In-situ', markersize=3, linestyle='None')
lns2 = ax.plot(y1, c='m', marker='s', label='400 m', markersize=2, linestyle='None')
lns3 = ax.plot(y2, c='b', marker='o', label='1 km', markersize=2, linestyle='None')
lns4 = ax.plot(y3, c='g', marker='^', label='9 km', markersize=2, linestyle='None')
ax.text(310, 0.4, df_table_am_all.index[site_ind[inw][ist]].replace('_', ' '), fontsize=11)
plt.xlim(0, len(x)//2)
ax.set_xticks(np.arange(0, len(x)//2*3, (len(x))//2))
ax.set_xticklabels([])
labels = ['2018', '2019']
mticks = ax.get_xticks()
ax.set_xticks((mticks[:-1] + mticks[1:]) / 2, minor=True)
ax.tick_params(axis='x', which='minor', length=0)
ax.set_xticklabels(labels, minor=True)
plt.ylim(0, 0.5)
ax.set_yticks(np.arange(0, 0.6, 0.2))
ax.tick_params(axis='y', labelsize=10)
ax2 = ax.twinx()
ax2.set_ylim(0, 64, 8)
ax2.invert_yaxis()
lns5 = ax2.bar(np.arange(len(x)), z, width=0.8, color='royalblue', label='Precip')
ax2.tick_params(axis='y', labelsize=10)
# add all legends together
handles = lns1+lns2+lns3+lns4+[lns5]
labels = [l.get_label() for l in handles]
# handles, labels = ax.get_legend_handles_labels()
plt.gca().legend(handles, labels, loc='center left', bbox_to_anchor=(1.04, 6))
fig.text(0.5, 0.01, 'Days', ha='center', fontsize=16, fontweight='bold')
fig.text(0.02, 0.4, 'SM ($\mathregular{m^3/m^3)}$', rotation='vertical', fontsize=16, fontweight='bold')
fig.text(0.96, 0.4, 'GPM Precip (mm/day)', rotation='vertical', fontsize=16, fontweight='bold')
plt.suptitle(network_name[inw], fontsize=19, y=0.97, fontweight='bold')
plt.savefig(path_results + '/validation/subplots/' + network_name[inw] + '_tseries' + '.png')
plt.close(fig)
########################################################################################################################
# 4. Make CONUS maps for R^2
df_stats = pd.read_csv(path_results + '/validation/stat_all.csv', index_col=0)
stn_coords_ind = [np.where(df_coords.index == df_stats.index[x])[0][0] for x in range(len(df_stats))]
df_coords_slc = df_coords.iloc[stn_coords_ind]
# df_coords_slc = df_table_am_all.iloc[stn_coords_ind]
stn_lat = [df_coords_slc.iloc[x]['lat'] for x in range(len(df_stats))]
stn_lon = [df_coords_slc.iloc[x]['lon'] for x in range(len(df_stats))]
# site_ind = [[3, 9, 23, 36, 41, 44], [211, 229, 254, 258, 272, 280, 298, 330, 352, 358], [427, 454, 492, 520, 522, 583, 714, 721, 750, 755],
# [914, 918, 920, 947, 952, 957, 961, 985, 1002, 1016]]
site_ind = [[3, 9, 23, 36, 41, 44], [211, 229, 254, 258, 272, 280, 298, 330, 352, 358], [427, 454, 492, 520, 522, 583, 714, 721, 750, 755],
[914, 918, 920, 947, 952, 957, 961, 985, 1002, 1016]]
site_ind_flat = list(itertools.chain(*site_ind))
site_ind_name = df_table_am_all.iloc[site_ind_flat]
site_ind_name = site_ind_name['network']
df_stats_slc_ind = [np.where(df_stats.index == site_ind_name.index[x])[0][0] for x in range(len(site_ind_flat))]
df_stats_slc = df_stats.iloc[df_stats_slc_ind]
df_stats_slc_full = pd.concat([site_ind_name, df_stats_slc], axis=1)
# Write to file
writer_stn = pd.ExcelWriter(path_results + '/validation/stat_stn.xlsx')
df_stats_slc_full.to_excel(writer_stn)
writer_stn.save()
# Write coordinates and network to files
# df_coords_full = pd.concat([df_table_am_all['network'].to_frame().reset_index(drop=True, inplace=True),
# df_coords.reset_index(drop=True, inplace=True)], axis=1)
df_coords.iloc[ind_slc_all].to_csv(path_results + '/df_coords.csv', index=True)
df_table_am_all_slc = df_table_am_all.iloc[ind_slc_all]
df_network = df_table_am_all_slc['network'].to_frame()
df_network.to_csv(path_results + '/df_network.csv', index=True)
# 4.1 Make the maps
# Extract state name and center coordinates
shp_records = Reader(path_gis_data + '/cb_2015_us_state_500k/cb_2015_us_state_500k.shp').records()
shp_records = list(shp_records)
state_name = [shp_records[x].attributes['STUSPS'] for x in range(len(shp_records))]
# name_lon = [(shp_records[x].bounds[0] + shp_records[x].bounds[2])/2 for x in range(len(shp_records))]
# name_lat = [(shp_records[x].bounds[1] + shp_records[x].bounds[3])/2 for x in range(len(shp_records))]
shape_conus = ShapelyFeature(Reader(path_gis_data + '/cb_2015_us_state_500k/cb_2015_us_state_500k.shp').geometries(),
ccrs.PlateCarree(), edgecolor='black', facecolor='none')
shape_conus_geometry = list(Reader(path_gis_data + '/cb_2015_us_state_500k/cb_2015_us_state_500k.shp').geometries())
name_coords = [shape_conus_geometry[x].representative_point().coords[:] for x in range(len(shape_conus_geometry))]
c_rsq_400m = df_stats['r_sq_400m'].tolist()
c_rmse_400m = df_stats['ubrmse_400m'].tolist()
c_rsq_1km = df_stats['r_sq_1km'].tolist()
c_rmse_1km = df_stats['ubrmse_1km'].tolist()
c_rsq_9km = df_stats['r_sq_9km'].tolist()
c_rmse_9km = df_stats['ubrmse_9km'].tolist()
# 4.1.1 R^2
fig = plt.figure(figsize=(10, 12), dpi=100, facecolor='w', edgecolor='k')
plt.subplots_adjust(left=0.05, right=0.88, bottom=0.05, top=0.95, hspace=0.1, wspace=0.1)
# 400 m
ax1 = fig.add_subplot(3, 1, 1, projection=ccrs.PlateCarree())
ax1.set_extent([-125, -67, 25, 50], ccrs.PlateCarree())
ax1.add_feature(shape_conus)
sc1 = ax1.scatter(stn_lon, stn_lat, c=c_rsq_400m, s=40, marker='^', edgecolors='k', cmap='jet')
sc1.set_clim(vmin=0, vmax=1)
ax1.text(-123, 27, '400 m', fontsize=16, fontweight='bold')
for x in range(len(shp_records)):
ax1.annotate(s=state_name[x], xy=name_coords[x][0], horizontalalignment='center')
# 1 km
ax2 = fig.add_subplot(3, 1, 2, projection=ccrs.PlateCarree())
ax2.set_extent([-125, -67, 25, 50], ccrs.PlateCarree())
ax2.add_feature(shape_conus)
sc2 = ax2.scatter(stn_lon, stn_lat, c=c_rsq_1km, s=40, marker='^', edgecolors='k', cmap='jet')
sc2.set_clim(vmin=0, vmax=1)
ax2.text(-123, 27, '1 km', fontsize=16, fontweight='bold')
for x in range(len(shp_records)):
ax2.annotate(s=state_name[x], xy=name_coords[x][0], horizontalalignment='center')
# 9 km
ax3 = fig.add_subplot(3, 1, 3, projection=ccrs.PlateCarree())
ax3.set_extent([-125, -67, 25, 50], ccrs.PlateCarree())
ax3.add_feature(shape_conus)
sc3 = ax3.scatter(stn_lon, stn_lat, c=c_rsq_9km, s=40, marker='^', edgecolors='k', cmap='jet')
sc3.set_clim(vmin=0, vmax=1)
ax3.text(-123, 27, '9 km', fontsize=16, fontweight='bold')
for x in range(len(shp_records)):
ax3.annotate(s=state_name[x], xy=name_coords[x][0], horizontalalignment='center')
cbar_ax = fig.add_axes([0.9, 0.2, 0.02, 0.6])
cbar = fig.colorbar(sc3, cax=cbar_ax, extend='both')
cbar.ax.locator_params(nbins=5)
cbar.ax.tick_params(labelsize=14)
plt.suptitle('$\mathregular{R^2}$', fontsize=20, y=0.98, fontweight='bold')
plt.savefig(path_results + '/validation/' + 'r2_map.png')
plt.close(fig)
# 4.1.2 RMSE
fig = plt.figure(figsize=(10, 12), dpi=100, facecolor='w', edgecolor='k')
plt.subplots_adjust(left=0.05, right=0.88, bottom=0.05, top=0.95, hspace=0.1, wspace=0.1)
# 400 m
ax1 = fig.add_subplot(3, 1, 1, projection=ccrs.PlateCarree())
ax1.set_extent([-125, -67, 25, 50], ccrs.PlateCarree())
ax1.add_feature(shape_conus)
sc1 = ax1.scatter(stn_lon, stn_lat, c=c_rmse_400m, s=40, marker='^', edgecolors='k', cmap='jet')
sc1.set_clim(vmin=0, vmax=0.3)
ax1.text(-123, 27, '400 m', fontsize=16, fontweight='bold')
for x in range(len(shp_records)):
ax1.annotate(s=state_name[x], xy=name_coords[x][0], horizontalalignment='center')
# 1 km
ax2 = fig.add_subplot(3, 1, 2, projection=ccrs.PlateCarree())
ax2.set_extent([-125, -67, 25, 50], ccrs.PlateCarree())
ax2.add_feature(shape_conus)
sc2 = ax2.scatter(stn_lon, stn_lat, c=c_rmse_1km, s=40, marker='^', edgecolors='k', cmap='jet')
sc2.set_clim(vmin=0, vmax=0.3)
ax2.text(-123, 27, '1 km', fontsize=16, fontweight='bold')
for x in range(len(shp_records)):
ax2.annotate(s=state_name[x], xy=name_coords[x][0], horizontalalignment='center')
# 9 km
ax3 = fig.add_subplot(3, 1, 3, projection=ccrs.PlateCarree())
ax3.set_extent([-125, -67, 25, 50], ccrs.PlateCarree())
ax3.add_feature(shape_conus)
sc3 = ax3.scatter(stn_lon, stn_lat, c=c_rmse_9km, s=40, marker='^', edgecolors='k', cmap='jet')
sc3.set_clim(vmin=0, vmax=0.3)
ax3.text(-123, 27, '9 km', fontsize=16, fontweight='bold')
for x in range(len(shp_records)):
ax3.annotate(s=state_name[x], xy=name_coords[x][0], horizontalalignment='center')
cbar_ax = fig.add_axes([0.9, 0.2, 0.02, 0.6])
cbar = fig.colorbar(sc3, cax=cbar_ax, extend='both')
cbar.set_label('$\mathregular{(m^3/m^3)}$', fontsize=14)
cbar.ax.locator_params(nbins=6)
cbar.ax.tick_params(labelsize=14)
plt.suptitle('RMSE', fontsize=20, y=0.98, fontweight='bold')
plt.savefig(path_results + '/validation/' + 'rmse_map.png')
plt.close(fig)
# 4.1.3 R^2 and RMSE map
c_rsq_400m_3net = c_rsq_400m[0:88] + c_rsq_400m[255:]
c_rmse_400m_3net = c_rmse_400m[0:88] + c_rmse_400m[255:]
stn_lon_3net = stn_lon[0:88] + stn_lon[255:]
stn_lat_3net = stn_lat[0:88] + stn_lat[255:]
fig = plt.figure(figsize=(10, 8), dpi=150, facecolor='w', edgecolor='k')
plt.subplots_adjust(left=0.05, right=0.88, bottom=0.05, top=0.95, hspace=0.1, wspace=0.1)
# R^2
ax1 = fig.add_subplot(2, 1, 1, projection=ccrs.PlateCarree())
ax1.set_extent([-125, -67, 25, 50], ccrs.PlateCarree())
ax1.add_feature(shape_conus)
sc1 = ax1.scatter(stn_lon_3net, stn_lat_3net, c=c_rsq_400m_3net, s=40, marker='^', edgecolors='k', cmap='jet')
sc1.set_clim(vmin=0, vmax=1)
ax1.text(-123, 27, '$\mathregular{R^2}$', fontsize=16, fontweight='bold')
for x in range(len(shp_records)):
ax1.annotate(s=state_name[x], xy=name_coords[x][0], horizontalalignment='center')
cbar_ax1 = fig.add_axes([0.9, 0.52, 0.015, 0.43])
cbar1 = fig.colorbar(sc1, cax=cbar_ax1, extend='both')
cbar1.ax.locator_params(nbins=5)
cbar1.ax.tick_params(labelsize=12)
# RMSE
ax2 = fig.add_subplot(2, 1, 2, projection=ccrs.PlateCarree())
ax2.set_extent([-125, -67, 25, 50], ccrs.PlateCarree())
ax2.add_feature(shape_conus)
sc2 = ax2.scatter(stn_lon_3net, stn_lat_3net, c=c_rmse_400m_3net, s=40, marker='^', edgecolors='k', cmap='jet')
sc2.set_clim(vmin=0, vmax=0.3)
ax2.text(-123, 27, 'RMSE', fontsize=16, fontweight='bold')
for x in range(len(shp_records)):
ax2.annotate(s=state_name[x], xy=name_coords[x][0], horizontalalignment='center')
cbar_ax2 = fig.add_axes([0.9, 0.05, 0.015, 0.43])
cbar2 = fig.colorbar(sc2, cax=cbar_ax2, extend='both')
cbar2.ax.locator_params(nbins=6)
cbar2.ax.tick_params(labelsize=12)
cbar2.set_label('$\mathregular{(m^3/m^3)}$', fontsize=14)
plt.savefig(path_results + '/validation/' + 'r2_rmse_map.png')
plt.close(fig)
########################################################################################################################
# 5. Taylor diagram
df_stats = pd.read_csv(path_results + '/validation/stat_all.csv', index_col=0)
stn_coords_ind = [np.where(df_coords.index == df_stats.index[x])[0][0] for x in range(len(df_stats))]
stdev_400m = np.array(df_stats['stdev_400m'])
rmse_400m = np.array(df_stats['ubrmse_400m'])
r_400m = np.array(np.sqrt(df_stats['r_sq_400m']))
stdev_1km = np.array(df_stats['stdev_1km'])
rmse_1km = np.array(df_stats['ubrmse_1km'])
r_1km = np.array(np.sqrt(df_stats['r_sq_1km']))
stdev_9km = np.array(df_stats['stdev_9km'])
rmse_9km = np.array(df_stats['ubrmse_9km'])
r_9km = np.array(np.sqrt(df_stats['r_sq_9km']))
# 5.1 Plot together
fig = plt.figure(figsize=(7, 14), dpi=100, facecolor='w', edgecolor='k')
# 400 m
plt.subplots_adjust(left=0.05, right=0.99, bottom=0.05, top=0.9, hspace=0.2, wspace=0.2)
ax1 = fig.add_subplot(3, 1, 1)
sm.taylor_diagram(stdev_400m, rmse_400m, r_400m, markerColor='k', markerSize=10, alpha=0.0, markerLegend='off',
tickRMS=np.arange(0, 0.15, 0.03), colRMS='tab:green', styleRMS=':', widthRMS=1.0, titleRMS='on',
titleRMSDangle=40.0, showlabelsRMS='on', tickSTD=np.arange(0, 0.12, 0.03), axismax=0.12,
colSTD='black', styleSTD='-.', widthSTD=1.0, titleSTD='on',
colCOR='tab:blue', styleCOR='--', widthCOR=1.0, titleCOR='on')
plt.xticks(np.arange(0, 0.15, 0.05))
ax1.text(0.1, 0.12, '400 m', fontsize=16, fontweight='bold')
# 1 km
ax2 = fig.add_subplot(3, 1, 2)
sm.taylor_diagram(stdev_1km, rmse_1km, r_1km, markerColor='k', markerSize=10, alpha=0.0, markerLegend='off',
tickRMS=np.arange(0, 0.15, 0.03), colRMS='tab:green', styleRMS=':', widthRMS=1.0, titleRMS='on',
titleRMSDangle=40.0, showlabelsRMS='on', tickSTD=np.arange(0, 0.12, 0.03), axismax=0.12,
colSTD='black', styleSTD='-.', widthSTD=1.0, titleSTD='on',
colCOR='tab:blue', styleCOR='--', widthCOR=1.0, titleCOR='on')
plt.xticks(np.arange(0, 0.15, 0.05))
ax2.text(0.1, 0.12, '1 km', fontsize=16, fontweight='bold')
# 9 km
ax3 = fig.add_subplot(3, 1, 3)
sm.taylor_diagram(stdev_9km, rmse_9km, r_9km, markerColor='k', markerSize=10, alpha=0.0, markerLegend='off',
tickRMS=np.arange(0, 0.15, 0.03), colRMS='tab:green', styleRMS=':', widthRMS=1.0, titleRMS='on',
titleRMSDangle=40.0, showlabelsRMS='on', tickSTD=np.arange(0, 0.12, 0.03), axismax=0.12,
colSTD='black', styleSTD='-.', widthSTD=1.0, titleSTD='on',
colCOR='tab:blue', styleCOR='--', widthCOR=1.0, titleCOR='on')
plt.xticks(np.arange(0, 0.15, 0.05))
ax3.text(0.1, 0.12, '9 km', fontsize=16, fontweight='bold')
plt.savefig(path_results + '/validation/' + 'td.png')
# 5.2 Plot 400 m
stdev_400m_3net = np.concatenate((stdev_400m[0:88], stdev_400m[255:]))
rmse_400m_3net = np.concatenate((rmse_400m[0:88], rmse_400m[255:]))
r_400m_3net = np.concatenate((r_400m[0:88], r_400m[255:]))
fig = plt.figure(figsize=(5, 5), dpi=200, facecolor='w', edgecolor='k')
# plt.subplots_adjust(left=0.1, right=0.9, bottom=0.1, top=0.9, hspace=0.2, wspace=0.2)
# ax1 = fig.add_subplot(3, 1, 1)
sm.taylor_diagram(stdev_400m_3net, rmse_400m_3net, r_400m_3net, markerColor='k', markerSize=10, alpha=0.0, markerLegend='off',
tickRMS=np.arange(0, 0.15, 0.03), colRMS='tab:green', styleRMS=':', widthRMS=1.0, titleRMS='on',
titleRMSDangle=40.0, showlabelsRMS='on', tickSTD=np.arange(0, 0.12, 0.03), axismax=0.12,
colSTD='black', styleSTD='-.', widthSTD=1.0, titleSTD='on',
colCOR='tab:blue', styleCOR='--', widthCOR=1.0, titleCOR='on')
plt.xticks(np.arange(0, 0.15, 0.05))
# plt.text(0.1, 0.12, '400 m', fontsize=16, fontweight='bold')
plt.savefig(path_results + '/validation/' + 'td_400m.png')
########################################################################################################################
# 6 Classify the stations
df_stat_1km = pd.read_excel(path_results + '/validation/stat_1km.xlsx', index_col=0)
|
[
"noreply@github.com"
] |
fangbin08.noreply@github.com
|
abd8712565c86b38cf0f645f8ce46f74fa9d024c
|
f5af6d2d6f63ff5fcd985fee19043b181316f7e3
|
/models.py
|
59f89672061266d541052ba261689c4c9125b1a1
|
[] |
no_license
|
xnuray98s/FSND-capstone
|
bb3c7bff887093c53e909bda0be528dd2787fc2c
|
9528366e6e7a4e980770009f09e1b066e950acc2
|
refs/heads/main
| 2023-07-24T06:56:40.200989
| 2021-09-06T11:16:04
| 2021-09-06T11:16:04
| 403,443,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,656
|
py
|
import os
from sqlalchemy import Column, String, Integer, create_engine
from flask_sqlalchemy import SQLAlchemy
import json
database_name = "casting"
database_path_locally = "postgresql://{}:{}@{}/{}".format(
"postgres", "postgres", "localhost:5432", database_name
)
db = SQLAlchemy()
"""
setup_db(app)
binds a flask application and a SQLAlchemy service
"""
# change the path to database_path_locally
def setup_db(
app,
database_path=os.environ["DATABASE_URL"].replace(
"postgres://", "postgresql://"),
):
app.config["SQLALCHEMY_DATABASE_URI"] = database_path
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.app = app
db.init_app(app)
db.create_all()
def insert_dummy_values_for_test():
movie1 = Movie(
title="I Am Legend",
image="https://images.moviesanywhere.com/56c992f18d66817a14cd68de04a10e57/2838a862-a8a4-4f54-9a22-bc2fba7264a3.jpg",
cast="Will Smith, Alice Braga, Charlie Tahan, Salli Richardson-Whitfield",
plot="Years after a plague kills most of humanity and transforms the rest into monsters, the sole survivor in New York City struggles valiantly to find a cure.",
genres="Drama, Horror, Sci-Fi",
rating="PG-13",
imdb="7.2",
release="2007",
)
movie2 = Movie(
title="Avatar",
image="https://i.pinimg.com/originals/32/f1/1b/32f11b88771756b748a427428565afdd.jpg",
cast="Sam Worthington, Zoe Saldana, Sigourney Weaver, Stephen Lang",
plot="A paraplegic marine dispatched to the moon Pandora on a unique mission becomes torn between following his orders and protecting the world he feels is his home.",
genres="Action, Adventure, Fantasy",
rating="PG-13",
imdb="7.9",
release="2009",
)
actor1 = Actor(
name="Will Smith",
image="https://encrypted-tbn2.gstatic.com/images?q=tbn:ANd9GcQbuF86tSHODHWHJRusio04zBWZHRNgFJdu-jyiWgkIbBC4-tuT",
gender="m",
nationality="American",
dob=1968,
movie="I Am Legend",
)
actor2 = Actor(
name="Sam Worthington",
image="https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcRRyYPpSOn_kpXBtE4wJ50MCIJ9J7bBAq8_swh03mb1kml7lGqF",
gender="m",
nationality="Australian",
dob=1976,
movie="Avatar",
)
db.drop_all()
db.create_all()
movie1.insert()
movie2.insert()
actor1.insert()
actor2.insert()
class Movie(db.Model):
__tablename__ = "movies"
id = Column(Integer, primary_key=True)
title = Column(String)
image = Column(String)
cast = Column(String)
plot = Column(String)
genres = Column(String)
rating = Column(String)
imdb = Column(String)
release = Column(String)
def __init__(self, title, image, cast,
plot, genres, rating, imdb, release):
self.title = title
self.image = image
self.cast = cast
self.plot = plot
self.genres = genres
self.rating = rating
self.imdb = imdb
self.release = release
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
"id": self.id,
"title": self.title,
"image": self.image,
"cast": self.cast,
"plot": self.plot,
"genres": self.genres,
"rating": self.rating,
"imdb": self.imdb,
"release": self.release,
}
class Actor(db.Model):
__tablename__ = "actors"
id = Column(Integer, primary_key=True)
name = Column(String)
image = Column(String)
gender = Column(String)
nationality = Column(String)
dob = Column(String)
movie = Column(String)
def __init__(self, name, image, gender, nationality, dob, movie):
self.name = name
self.image = image
self.gender = gender
self.nationality = nationality
self.dob = dob
self.movie = movie
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
"id": self.id,
"name": self.name,
"image": self.image,
"gender": self.gender,
"nationality": self.nationality,
"dob": self.dob,
"movie": self.movie,
}
|
[
"66944976+xnuray98s@users.noreply.github.com"
] |
66944976+xnuray98s@users.noreply.github.com
|
9eebd51cd8523865c63b5ea9bc13a91b30809bd9
|
0e1e643e864bcb96cf06f14f4cb559b034e114d0
|
/Exps_7_v3/doc3d/I_w_M_to_Wxyz_focus_Z_ok/wiColorJ/pyr_Tcrop255_pad20_jit15/Sob_k15_s001_EroM_Mae_s001/pyr_5s/L5/step10_a.py
|
e6c87465892d874e7e738fd489c714ca918ab17a
|
[] |
no_license
|
KongBOy/kong_model2
|
33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307
|
1af20b168ffccf0d5293a393a40a9fa9519410b2
|
refs/heads/master
| 2022-10-14T03:09:22.543998
| 2022-10-06T11:33:42
| 2022-10-06T11:33:42
| 242,080,692
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140,087
|
py
|
#############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_5side_L5 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_in_I_gt_W_ch_norm_v2
use_loss_obj = [mae_s001_sobel_k15_s001_EroseM_loss_info_builder.set_loss_target("UNet_z").copy(), mae_s001_sobel_k15_s001_EroseM_loss_info_builder.set_loss_target("UNet_y").copy(), mae_s001_sobel_k15_s001_EroseM_loss_info_builder.set_loss_target("UNet_x").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
##################################
### 1side1
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_1side_1__2side_1__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
##################################
### 1side2
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_1side_2__2side_1__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 "3" 6 10 15 21 28 36 45 55
# 2side2 OK 4
ch032_1side_2__2side_2__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
##################################
### 1side3
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_1side_3__2side_1__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 "3" 6 10 15 21 28 36 45 55
# 2side2 OK 4
ch032_1side_3__2side_2__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 "6" 10 15 21 28 36 45 55
# 2side3 OK 10
ch032_1side_3__2side_3__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
##################################
### 1side4
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_1side_4__2side_1__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 "3" 6 10 15 21 28 36 45 55
# 2side2 OK 4
ch032_1side_4__2side_2__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 "6" 10 15 21 28 36 45 55
# 2side3 OK 10
ch032_1side_4__2side_3__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 6 "10" 15 21 28 36 45 55
# 2side4 OK 20
ch032_1side_4__2side_4__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
##################################
### 1side5
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_1side_5__2side_1__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 "3" 6 10 15 21 28 36 45 55
# 2side2 OK 4
ch032_1side_5__2side_2__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_2__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_2__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_2__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_2__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 "6" 10 15 21 28 36 45 55
# 2side3 OK 10
ch032_1side_5__2side_3__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 6 "10" 15 21 28 36 45 55
# 2side4 OK 20
ch032_1side_5__2side_4__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_4_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_4_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_4_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_4_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_4_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_4_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_4_5s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4_5s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4_4side_4_5s4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 6 10 "15" 21 28 36 45 55
# 2side5 OK 35
ch032_1side_5__2side_5__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_4_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_4_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_4_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_4_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_4_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_4_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_4_5s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4_5s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4_4side_4_5s4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_4_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_4_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_4_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_4_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_4_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_4_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_4_5s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4_5s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_4_5s4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s5.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
##################################
### 5side6
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_1side_6__2side_1__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_1__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_1__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 "3" 6 10 15 21 28 36 45 55
# 2side2 OK 4
ch032_1side_6__2side_2__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_2__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_2__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_2__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_2__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_2__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_2__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_2__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_2__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_2__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_2__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_2__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 "6" 10 15 21 28 36 45 55
# 2side3 OK 10
ch032_1side_6__2side_3__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_3__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_3__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_3__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_3__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_3__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_3__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_3__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_3__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_3__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_3__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 6 "10" 15 21 28 36 45 55
# 2side4 OK 20
ch032_1side_6__2side_4__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_4_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_4_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_4_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_4_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_4_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_4_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_4_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_4_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_4_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_4_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_4_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_4_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_4_4side_4_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_4side_4_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_4_4side_4_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_4_4side_4_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_4side_4_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_4_4side_4_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_4_4side_4_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_4side_4_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_4_4side_4_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_4_4side_4_5s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_4side_4_5s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_4_4side_4_5s4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 6 10 "15" 21 28 36 45 55
# 2side5 OK 35
ch032_1side_6__2side_5__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_4_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_4_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_4_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_4_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_4_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_4_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_4_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_4_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_4_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_4_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_4_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_4_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_4_4side_4_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_4side_4_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_4_4side_4_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_4_4side_4_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_4side_4_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_4_4side_4_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_4_4side_4_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_4side_4_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_4_4side_4_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_4_4side_4_5s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_4side_4_5s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_4_4side_4_5s4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_5_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_5_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_5_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_5_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_5_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_5_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_4_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_4_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_5_4side_4_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_4_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_4_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_5_4side_4_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_4_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_4_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_5_4side_4_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_4_5s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_4_5s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_5_4side_4_5s4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_5_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_5_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_5_4side_5_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_5_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_5_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_5_4side_5_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_5_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_5_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_5_4side_5_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_5_5s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_5_5s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_5_4side_5_5s4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_5_5s5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_5_5s5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_5_4side_5_5s5.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
# 1 3 6 10 15 "21" 28 36 45 55
# 2side6 OK 56
ch032_1side_6__2side_6__3side_1_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_1_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_1_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_2_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_2_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_2_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_2_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_2_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_2_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_2_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_2_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_2_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_3_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_3_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_3_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_3_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_3_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_3_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_3_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_3_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_3_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_3_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_3_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_3_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_3_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_3_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_3_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_3_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_3_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_3_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_4_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_4_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_4_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_4_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_4_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_4_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_4_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_4_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_4_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_4_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_4_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_4_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_4_4side_4_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_4side_4_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_4_4side_4_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_4_4side_4_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_4side_4_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_4_4side_4_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_4_4side_4_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_4side_4_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_4_4side_4_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_4_4side_4_5s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_4side_4_5s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_4_4side_4_5s4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_5_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_5_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_5_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_5_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_5_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_5_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_4_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_4_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_5_4side_4_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_4_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_4_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_5_4side_4_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_4_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_4_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_5_4side_4_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_4_5s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_4_5s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_5_4side_4_5s4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_5_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_5_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_5_4side_5_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_5_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_5_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_5_4side_5_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_5_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_5_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_5_4side_5_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_5_5s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_5_5s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_5_4side_5_5s4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_5_5s5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_5_5s5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_5_4side_5_5s5.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_1_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_1_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_1_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_2_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_2_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_2_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_2_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_2_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_2_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_3_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_3_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_3_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_3_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_3_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_3_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_3_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_3_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_3_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_4_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_4_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_4_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_4_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_4_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_4_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_4_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_4_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_4_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_4_5s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_4_5s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_4_5s4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_5_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_5_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_5_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_5_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_5_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_5_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_5_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_5_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_5_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_5_5s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_5_5s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_5_5s4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_5_5s5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_5_5s5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_5_5s5.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_6_5s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_6_5s1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_6_5s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_6_5s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_6_5s2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_6_5s2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_6_5s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_6_5s3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_6_5s3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_6_5s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_6_5s4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_6_5s4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_6_5s5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_6_5s5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_6_5s5.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_6_5s6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_6_5s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6_4side_6_5s6.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1__2side_1__3side_1_4side_1_5s1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
|
[
"s89334roy@yahoo.com.tw"
] |
s89334roy@yahoo.com.tw
|
0433f6b760f294fc900f09f2cf37f4c06fc0cd3d
|
d4aae842e09692196df8004d0eac18cfc76b57ed
|
/catch for main loop.py
|
35c648717e44a36775b4d24f81bb600fed422656
|
[] |
no_license
|
Jeffwuzh/Morse-Code-Interpreter-with-Blink-Recognition
|
3ae6d626873f583dbb51f1a89b4200cfacc18344
|
9ee049b1404efe05e9fc789887e444811607f9b1
|
refs/heads/main
| 2023-05-30T09:40:58.835335
| 2021-06-11T12:29:35
| 2021-06-11T12:29:35
| 376,014,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,030
|
py
|
# -*- coding: utf-8 -*-
"""Copy of catch.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1XWI48mjxp9aONWMYfVUcqJQ_dvPGcWYm
"""
from google.colab import drive
drive.mount('/content/drive')
!pip install catch22
!pip install -U scikit-learn
pip install delayed
from scipy.io import wavfile
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
def streaming_classifier_Noraml(samplerate,Y):
xtime = np.array(range(0, len(Y)))/int(samplerate*0.5)
window_size = int(samplerate*0.5)
increment = int(window_size/3)
thresh = 150
predicted_labels = [] # stores predicted
lower_interval = 0 # used to increment window
max_time = int(max(xtime) * int(samplerate*0.5))
predicted = []
# initialing signal vector
counter = 0
is_event = []
while (max_time > lower_interval + window_size):
if max_time < lower_interval + window_size + increment:
upper_interval = max_time
else:
upper_interval = lower_interval + window_size
interval = Y[lower_interval:upper_interval]
xinterval = xtime[lower_interval:upper_interval] # gets corresponding time
zerocrossing = (np.diff(np.sign(interval)) != 0).sum()
Mean_value = np.mean(interval)
standarddeviation = round(np.std(interval),3)
abssum = sum(map(abs, interval))/10000
#print(abssum,standarddeviation,counter,lower_interval,upper_interval)
# If it is a event, recored it as True and add one to counter
if abssum > thresh and upper_interval != max_time:
is_event.append(True)
counter = counter + 1
lower_interval = lower_interval + increment
# If ends, and the counter is greater than 0 which means it has event not finished
elif upper_interval == max_time and counter > 0:
begin_time = lower_interval - increment * counter
end_time = max_time
predicted.append([begin_time,end_time,end_time-begin_time,Y[begin_time:end_time]])
#print(begin_time,end_time)
lower_interval = lower_interval + increment
# If it is not a event, back to its previous one and adjust whether its previous is event or not
else:
is_event.append(False)
if len(is_event) == 1:
lower_interval = lower_interval + increment
elif is_event[-2] == True:
begin_time = lower_interval - increment * counter
end_time = lower_interval - increment + window_size
predicted.append([begin_time,end_time,end_time-begin_time,Y[begin_time:end_time]])
#print(begin_time,end_time,end_time-begin_time)
lower_interval = end_time
else:
lower_interval = lower_interval + increment
counter = 0
df = pd.DataFrame(predicted,columns=['begin','end','Long','Values'])
return df
#return predicted,eventtime
"""## Noraml Blink test"""
pathlist_1 = ["/content/drive/MyDrive/Data3888/blinking/blink_olli_bot_v1.wav",
"/content/drive/MyDrive/Data3888/blinking/blinking_jack_top_v1.wav",
"/content/drive/MyDrive/Data3888/blinking/blinking_jack_top_v2.wav",
"/content/drive/MyDrive/Data3888/blinking/blinking_ollie_top_v1.wav"]
Normal_blink=[]
for i in pathlist_1:
samplerate, Y = wavfile.read(i)
result = streaming_classifier_Noraml(samplerate,Y)
Normal_blink.append(result)
Normal_blink = pd.concat(Normal_blink)
Normal_blink['Type'] = "Normal"
for i in pathlist_1:
samplerate, Y = wavfile.read(i)
xtime = np.array(range(0, len(Y)))/samplerate
plt.figure(figsize=(20,5))
plt.plot(xtime, Y)
result = streaming_classifier_Noraml(samplerate,Y)
for i in range(0,len(result)):
begin = int(result.iloc[i].at['begin'])
end = int(result.iloc[i].at['end'])
Y = result.iloc[i].at['Values']
xtime = np.array(range(begin, begin+len(Y)))/samplerate
plt.plot(xtime, Y, color='red')
"""## Long Blink test"""
pathlist_2 = ["/content/drive/MyDrive/Data3888/blinking/longblinkinh_ollie_bot_v2.wav",
"/content/drive/MyDrive/Data3888/blinking/longblink_olli_top_v1.wav",
"/content/drive/MyDrive/Data3888/blinking/longblink_jack_top_v1.wav"]
Long_blink=[]
for i in pathlist_2:
samplerate, Y = wavfile.read(i)
result = streaming_classifier_Noraml(samplerate,Y)
Long_blink.append(result)
Long_blink = pd.concat(Long_blink)
Long_blink['Type'] = "Long"
for i in pathlist_2:
samplerate, Y = wavfile.read(i)
xtime = np.array(range(0, len(Y)))/samplerate
plt.figure(figsize=(20,5))
plt.plot(xtime, Y)
result = streaming_classifier_Noraml(samplerate,Y)
for i in range(0,len(result)):
begin = int(result.iloc[i].at['begin'])
end = int(result.iloc[i].at['end'])
Y = result.iloc[i].at['Values']
xtime = np.array(range(begin, begin+len(Y)))/samplerate
plt.plot(xtime, Y, color='red')
"""## Double test"""
pathlist_3 = ["/content/drive/MyDrive/Data3888/double_blink/doubleblink_jack_top_v1.wav",
"/content/drive/MyDrive/Data3888/double_blink/doublelink_jack_bot_v1.wav",
"/content/drive/MyDrive/Data3888/double_blink/doublelink_jack_bot_v2.wav",
"/content/drive/MyDrive/Data3888/double_blink/doublelink_jack_top_v2.wav"]
Double_blink=[]
for i in pathlist_3:
samplerate, Y = wavfile.read(i)
result = streaming_classifier_Noraml(samplerate,Y)
Double_blink.append(result)
Double_blink = pd.concat(Double_blink)
Double_blink['Type'] = "Double"
for i in pathlist_3:
samplerate, Y = wavfile.read(i)
xtime = np.array(range(0, len(Y)))/samplerate
plt.figure(figsize=(20,7))
plt.plot(xtime, Y)
result = streaming_classifier_Noraml(samplerate,Y)
for i in range(0,len(result)):
begin = int(result.iloc[i].at['begin'])
end = int(result.iloc[i].at['end'])
Y = result.iloc[i].at['Values']
xtime = np.array(range(begin, begin+len(Y)))/samplerate
plt.plot(xtime, Y, color='red')
"""# Combine table and Features
## Catch22
"""
EventFrame = pd.concat([Normal_blink,Long_blink,Double_blink], ignore_index=True)
flawed=EventFrame.index[[0,26, 35, 45, 46, 48, 54, 72, 75, 147, 148, 149, 150, 151, 152, 153, 154, 161, 162, 165, 166, 167,168, 169, 170, 171, 172, 174, 175, 176, 177, 178, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 198, 200, 201,202, 203, 204, 206, 213, 214,221, 222, 231, 280]]
EventFrame=EventFrame.drop(flawed)
EventFrame
"""## Peak and Withs"""
import matplotlib.pyplot as plt
from scipy.signal import find_peaks
def peaks(y): #gives the number of peaks
y=np.array(y)
peaks, properties = find_peaks(abs(y),height=1700, width=200)
return sum(np.diff(peaks)>400)+1
def betweenlastpeaks(y): #gives diff in indexes of first and last peak
y=np.array(y)
peaks, properties = find_peaks(abs(y),height=1700, width=200)
if len(peaks)==0:
return 0
return peaks[-1]-peaks[0]
col = [
'Type',
'Len_between_peaks',
'Peaks',
'DN_HistogramMode_5',
'DN_HistogramMode_10',
'CO_f1ecac',
'CO_FirstMin_ac',
'CO_HistogramAMI_even_2_5',
'CO_trev_1_num',
'MD_hrv_classic_pnn40',
'SB_BinaryStats_mean_longstretch1',
'SB_TransitionMatrix_3ac_sumdiagcov',
'PD_PeriodicityWang_th0_01',
'CO_Embed2_Dist_tau_d_expfit_meandiff',
'IN_AutoMutualInfoStats_40_gaussian_fmmi',
'FC_LocalSimple_mean1_tauresrat',
'DN_OutlierInclude_p_001_mdrmd',
'DN_OutlierInclude_n_001_mdrmd',
'SP_Summaries_welch_rect_area_5_1',
'SB_BinaryStats_diff_longstretch0',
'SB_MotifThree_quantile_hh',
'SC_FluctAnal_2_rsrangefit_50_1_logi_prop_r1',
'SC_FluctAnal_2_dfa_50_1_2_logi_prop_r1',
'SP_Summaries_welch_rect_centroid',
'FC_LocalSimple_mean3_stderr']
df=pd.DataFrame(columns= col)
from catch22 import catch22_all
for i in range(0,len(EventFrame)):
current_row = EventFrame[i:i+1]
current_type = current_row['Type'].to_string().split()[1]
Y = sum(current_row["Values"]).tolist()
t = catch22_all(Y)
features = t["values"]
features.insert(0,current_type)
features.insert(1,peaks(Y))
features.insert(1,betweenlastpeaks(Y))
df.loc[i]=features
df
"""# Modles Selection"""
from sklearn.model_selection import train_test_split
#from sklearn.datasets import make_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
"""##Random Forest"""
#this is from a feature selection algorihtmn, ive harcoded it since it might change and only need this for the main loop to trina
selected_feat=['Len_between_peaks', 'Peaks', 'CO_f1ecac', 'CO_FirstMin_ac',
'SB_TransitionMatrix_3ac_sumdiagcov',
'SP_Summaries_welch_rect_area_5_1',
'SC_FluctAnal_2_rsrangefit_50_1_logi_prop_r1',
'SP_Summaries_welch_rect_centroid']
rfselected=df[selected_feat]
rfselected #note- higher than before!
y= df['Type']
import delayed
from sklearn.ensemble import RandomForestClassifier
RDF = RandomForestClassifier(n_estimators=100)
x_train,x_test,y_train,y_test = train_test_split(rfselected,y,test_size=0.2,random_state=1)
model = RandomForestClassifier(n_estimators=100).fit(x_train, y_train)
print("Training set score: {:.3f}".format(model.score(x_train, y_train)))
print("Test set score: {:.3f}".format(model.score(x_test, y_test)))
"""# Fuction rewrite"""
from scipy.io import wavfile
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold
import catch22
#########
# samplerate and Y can be change to streaming file path if neend
# modle can be use as any model
#########
def get_features(eventin):
amount=eventin.tolist()
col= ['Len_between_peaks', 'Peaks', 'CO_f1ecac', 'CO_FirstMin_ac',
'SB_TransitionMatrix_3ac_sumdiagcov',
'SP_Summaries_welch_rect_area_5_1',
'SC_FluctAnal_2_rsrangefit_50_1_logi_prop_r1',
'SP_Summaries_welch_rect_centroid']
insidedf=pd.DataFrame(columns=col)
row=[]
row.append(betweenlastpeaks(amount))
row.append(peaks(amount))
row.append(catch22.CO_f1ecac(amount))
row.append(catch22.CO_FirstMin_ac(amount))
row.append(catch22.SB_TransitionMatrix_3ac_sumdiagcov(amount))
row.append(catch22.SP_Summaries_welch_rect_area_5_1(amount))
row.append(catch22.SC_FluctAnal_2_rsrangefit_50_1_logi_prop_r1(amount))
row.append(catch22.SP_Summaries_welch_rect_centroid(amount))
insidedf.loc[0] = row
return insidedf.iloc[0:1]
#1 for long blink, #0 for blink, -1 for double , -2
def eventlist(eventclassify):
if eventclassify=="Normal":
return 0
if eventclassify=="Double":
return -1
if eventclassify=="Long":
return 1
def streaming_classifier(samplerate,Y,model):
## def streaming_classifier(path,model):
## samplerate, Y = wavfile.read(path)
xtime = np.array(range(0, len(Y)))/int(samplerate*0.5)
window_size = int(samplerate*0.5)
increment = int(window_size/3)
thresh = 150
predicted_labels = [] # stores predicted
lower_interval = 0 # used to increment window
max_time = int(max(xtime) * int(samplerate*0.5))
predicted = []
# initialing signal vector
counter = 0
is_event = []
while (max_time > lower_interval + window_size):
if max_time < lower_interval + window_size + increment:
upper_interval = max_time
else:
upper_interval = lower_interval + window_size
interval = Y[lower_interval:upper_interval]
xinterval = xtime[lower_interval:upper_interval] # gets corresponding time
zerocrossing = (np.diff(np.sign(interval)) != 0).sum()
Mean_value = np.mean(interval)
standarddeviation = round(np.std(interval),3)
abssum = sum(map(abs, interval))/10000
#print(abssum,standarddeviation,counter,lower_interval,upper_interval)
# If it is a event, recored it as True and add one to counter
if abssum > thresh and upper_interval != max_time:
is_event.append(True)
counter = counter + 1
lower_interval = lower_interval + increment
# If ends, and the counter is greater than 0 which means it has event not finished
elif upper_interval == max_time and counter > 0:
begin_time = lower_interval - increment * counter
end_time = max_time
#ADD EVENT INTO LIST AND PRINT THE prediction
current_value = Y[begin_time:end_time]
# Predict by model
y_pred=model.predict(get_features(current_value))[0]
predicted.append([begin_time,end_time,end_time-begin_time,Y[begin_time:end_time],y_pred])
print(y_pred)
predicted_labels.append(eventlist(y_pred))
######################################
# Moss code recognition application is added here
######################################
lower_interval = lower_interval + increment
# If it is not a event, back to its previous one and adjust whether its previous is event or not
else:
is_event.append(False)
if len(is_event) == 1:
lower_interval = lower_interval + increment
elif is_event[-2] == True:
begin_time = lower_interval - increment * counter
end_time = lower_interval - increment + window_size
#ADD EVENT INTO LIST AND PRINT THE prediction
current_value = Y[begin_time:end_time]
# Predict by model
y_pred=model.predict(get_features(current_value))[0]
predicted.append([begin_time,end_time,end_time-begin_time,Y[begin_time:end_time],y_pred])
print(y_pred)
predicted_labels.append(eventlist(y_pred))
#########################################
# Moss code recognition application is added here
#########################################
#print(begin_time,end_time,end_time-begin_time)
lower_interval = end_time
else:
lower_interval = lower_interval + increment
counter = 0
df = pd.DataFrame(predicted,columns=['begin','end','Long','Values',"type"])
return df, predicted_labels
#example of how to run it:
#sample rate (aka 10000), Y is the actual values from spikerbox
busamplerate, Y = wavfile.read("/content/drive/MyDrive/Data3888/blinking/blinking_jack_top_v1.wav")
#Train the model prior, it is just the
#Output is a(dataframe - Currently still have the dataframe since useful for analysis later), b(list of 0, 1 and -1 - no 2 yet since unsrue if want it)
a,b=streaming_classifier(busamplerate,Y, model)
print(b)
|
[
"noreply@github.com"
] |
Jeffwuzh.noreply@github.com
|
439a97a92d842d7709709585d9ab7de7ba40a8fc
|
015dd47b2fdf23fd6bab8281686e1fee22057a38
|
/apps/articles/models.py
|
7862afacd7d2e1ed2affe02fa4ca24620d2a0871
|
[] |
no_license
|
Simonskiii/backGround
|
d3b8929ea0db09552dad58e220d8229fbc97636d
|
bf4d396a71ecd001245bad2cf3644cd42fce69e7
|
refs/heads/master
| 2023-03-17T11:20:48.913256
| 2019-11-19T13:11:32
| 2019-11-19T13:11:32
| 222,699,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,579
|
py
|
from django.db import models
from datetime import datetime
from ckeditor.fields import RichTextField
from django.contrib.auth import get_user_model
import factory
from faker import Factory
User = get_user_model()
fake = Factory.create()
# Create your models here.
class ArticleCatergory(models.Model):
name = models.CharField(default="", max_length=30, verbose_name="类别名", help_text="类别名")
code = models.CharField(default="", max_length=30, verbose_name="类别code", help_text="类别code")
parent_category = models.ForeignKey("self", null=True, blank=True, verbose_name="父类目级别", help_text="父目录",
related_name="sub_cat", on_delete=models.CASCADE)
is_tab = models.BooleanField(default=False, verbose_name="是否导航", help_text="是否导航")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "商品类别"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Article(models.Model):
name = models.CharField(default='', max_length=50, verbose_name='题目', help_text='题目', null=True)
category = models.ForeignKey(ArticleCatergory, verbose_name='文章类别', on_delete=models.CASCADE, default='')
content = RichTextField('文章内容')
click_num = models.IntegerField(default=0, verbose_name='点击数')
fav_num = models.IntegerField(default=0, verbose_name='喜欢数')
aritcle_brief = models.TextField(max_length=500, verbose_name="文章概述", default='')
article_front_image = models.ImageField(upload_to="article/images/", null=True, blank=True, verbose_name="封面图")
is_hot = models.BooleanField(default=False, verbose_name="是否热门")
is_anonymous = models.BooleanField(default=False, verbose_name='是否匿名')
# author = models.ForeignKey(User, verbose_name='作者', on_delete=models.SET_DEFAULT, default='')
author = models.CharField(max_length=20, default='', verbose_name='作者', help_text='作者')
class Meta:
verbose_name = '文章'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class ArticleShow(models.Model):
name = models.ForeignKey(Article, on_delete=models.SET_DEFAULT, verbose_name='文章名', help_text='文章名', default='')
User = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='用户', help_text='用户', default='')
class ArticleImage(models.Model):
"""
商品轮播图
"""
articles = models.ForeignKey(Article, verbose_name="文章", related_name="images", on_delete=models.CASCADE)
image = models.ImageField(upload_to="", verbose_name="图片", null=True, blank=True)
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = '封面图'
verbose_name_plural = verbose_name
def __str__(self):
return self.articles.name
# import random
# class ArticleCatergoryFactory(factory.DjangoModelFactory):
# class Meta:
# model = ArticleCatergory
# name = fake.word()
# li = []
# for i in name.split(" "):
# li.append(i[0])
# code = "".join('1')
# catergory_type = random.randint(1,3)
#
#
# class ArticleFactory(factory.django.DjangoModelFactory):
# class Meta:
# model = Article
# author = fake.name()
# content = fake.text()
# name = fake.word()
# category = factory.SubFactory(ArticleCatergoryFactory)
|
[
"3017209256@tju.edu.cn"
] |
3017209256@tju.edu.cn
|
f86f1440c1dfce4772c26f8bd9d40aeb6c368956
|
27a066c48096e30e3cf4a795edf6e8387f63728b
|
/mysite/django_vises/runtimes/misc.py
|
dbb4cc342ce1012cbf1a9397f2dea0e09cf202d4
|
[] |
no_license
|
26huitailang/django-tutorial
|
2712317c3f7514743e90fb4135e5fe3fed5def90
|
28a0b04ee3b9ca7e2d6e84e522047c63b0d19c8f
|
refs/heads/master
| 2023-01-07T11:55:37.003245
| 2019-09-04T09:19:50
| 2019-09-04T09:19:50
| 113,199,279
| 1
| 0
| null | 2023-01-03T15:24:01
| 2017-12-05T15:27:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,885
|
py
|
#!/usr/bin/env python
# coding=utf-8
# import glob
import os
import operator
from django.utils.six import text_type
# copy from rest_framework
# Header encoding (see RFC5987)
HTTP_HEADER_ENCODING = 'iso-8859-1'
def get_request_client_ip_address(request):
"""获取 request 请求来源 ip address, 支持 nginx 使用 X-Real-IP/X-FORWARDED-FOR 传递来源 ip 地址
"""
ip = request.META.get('X-Real-IP') or request.META.get('HTTP_X_FORWARDED_FOR')
if ip:
ip = ip.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def get_authorization_header(request):
"""
Return request's 'Authorization:' header, as a bytestring.
Hide some test client ickyness where the header can be unicode.
"""
auth = request.META.get('HTTP_AUTHORIZATION', b'')
if isinstance(auth, text_type):
# Work around django test client oddness
auth = auth.encode(HTTP_HEADER_ENCODING)
return auth
def get_authorization_token_from_header(request):
"""
Return request's 'Authorization:' token
"""
keyword = 'Token'
auth = get_authorization_header(request).split()
if not auth or auth[0].lower() != keyword.lower().encode():
return None
# if len(auth) == 1:
# msg = _('Invalid token header. No credentials provided.')
# raise exceptions.AuthenticationFailed(msg)
# elif len(auth) > 2:
# msg = _('Invalid token header. Token string should not contain spaces.')
# raise exceptions.AuthenticationFailed(msg)
#
# try:
# token = auth[1].decode()
# except UnicodeError:
# msg = _('Invalid token header. Token string should not contain invalid characters.')
# raise exceptions.AuthenticationFailed(msg)
if len(auth) != 2:
return None
try:
token = auth[1].decode()
except UnicodeError:
return None
return token
def str_to_boolean(text):
"""将字符转为布尔值,if条件可以扩展"""
if text.lower() in ['false']:
return False
elif text.lower() in ['true']:
return True
def sort_dict_list(dict_to_sort: dict = None, sort_key='', reverse=False) -> list:
sorted_list = sorted(dict_to_sort, key=operator.itemgetter(sort_key), reverse=reverse)
return sorted_list
def get_local_suite_img_list(suite_path: str = None, format='jpg') -> list:
"""获取本地suite的图片列表"""
if suite_path is None:
return []
# 复杂的路径glob匹配不了
# img_file_list = glob.glob('{}/*.{}'.format(suite_path, format))
files_list = os.listdir(suite_path)
img_file_list = list(filter(lambda x: x.endswith(format), files_list))
return img_file_list
def get_local_suite_count(suite_path: str = None) -> int:
"""本地suite图片数量"""
return len(get_local_suite_img_list(suite_path))
|
[
"26huitailang@gmail.com"
] |
26huitailang@gmail.com
|
f1674b141e3d17158f2bb2d03ed0e3b467a50143
|
59c9c2286af3be35a3deb50c1eed0c75a2169bc2
|
/Server/app/api_helper.py
|
764fa7aed31126b49dd7e5d03fa9d77b9c9ea1cb
|
[
"MIT"
] |
permissive
|
chauandvi4/Kid-friendlyThingTranslator
|
e0252bd6363429eed89a2042a15ed91b33a3b165
|
685bd5cb3d1dd3b4c39a35d5f7fc96c45ad1aa75
|
refs/heads/main
| 2023-05-31T01:02:20.926999
| 2021-07-05T02:40:56
| 2021-07-05T02:40:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,916
|
py
|
import decouple
from yolov3.yolo import ImageLabeler
from yolov3.image_converter import string_to_image, to_rgb
from word_definition.memory_dictionary import MemoryDictionary
from word_definition.database_dictionary import DatabaseDictionary
from env_access import env_accessor
from app.model import *
from fastapi import Body
from google.cloud import vision
from google.auth.exceptions import DefaultCredentialsError
import mysql.connector
import cv2
import logging
import time
from enum import Enum
class ApiHelper:
class DbId(Enum):
User = env_accessor.database_user
Dictionary = env_accessor.database_dictionary
def __init__(self):
self._create_conns()
self._create_google_cloud_client()
self._create_yolov3_image_labeler()
self._create_memory_dictionary()
self._create_database_dictionary()
def _create_conns(self):
self._db_conns = {}
for db_id in self.DbId:
db_name = db_id.value
is_success = False
while not is_success:
try:
logging.info("Connecting to %s database...", db_name)
self._db_conns[db_id] = mysql.connector.connect(
host=env_accessor.host_database,
user=env_accessor.username_database,
password=env_accessor.password_database,
database=db_name
)
logging.info("Connected to %s database", db_name)
is_success = True
except mysql.connector.Error as err:
logging.error("Failed to connect to %s database: %s", db_name, str(err))
time_to_retry = 5
logging.info("Retry connecting in %d seconds", time_to_retry)
time.sleep(time_to_retry)
def _create_google_cloud_client(self):
logging.info("Instantiating a Google Cloud Vision Client...")
try:
logging.info("Instantiating a Google Cloud Vision Client by default credentials...")
self._gg_cloud_vision_client = vision.ImageAnnotatorClient()
except DefaultCredentialsError as err:
logging.warning("Failed to instantiate a Google Cloud Vision Client by default credentials: %s", str(err))
try:
logging.info("Instantiating a Google Cloud Vision Client by environment variable...")
self._gg_cloud_vision_client = vision.ImageAnnotatorClient.from_service_account_json(
env_accessor.google_credential
)
except (decouple.UndefinedValueError, FileNotFoundError) as err:
logging.warning("Failed to instantiate a Google Cloud Vision Client: %s", str(err))
self._gg_cloud_vision_client = None
return
logging.info("Google Cloud Vision Client instantiated")
def _create_yolov3_image_labeler(self):
try:
logging.info("Loading YOLO from disk...")
self._yolov3_image_labeler = ImageLabeler(
labels_path=env_accessor.path_yolov3_names,
config_path=env_accessor.path_yolov3_config,
weights_path=env_accessor.path_yolov3_weights
)
logging.info("Loaded YOLO")
except decouple.UndefinedValueError as err:
logging.warning("Failed to load YOLO")
self._yolov3_image_labeler = None
def _create_memory_dictionary(self):
try:
logging.info("Loading a Memory Dictionary...")
self._memory_word_dictionary = MemoryDictionary(env_accessor.path_json_definition_common_word)
logging.info("Loaded a Memory Dictionary")
except MemoryError as err:
logging.warning("Failed to load a Memory Dictionary: %s", str(err))
self._memory_word_dictionary = None
def _create_database_dictionary(self):
try:
logging.info("Loading a Database Dictionary...")
self._database_word_dictionary = DatabaseDictionary(self._db_conns[self.DbId.Dictionary])
logging.info("Loaded a Database Dictionary")
except Exception as e:
logging.error("Failed to load a Database Dictionary: %s", str(e))
def is_user_login_info_exist(self, data: UserLoginSchema) -> bool:
cursor = self._db_conns[self.DbId.User].cursor()
sql = "SELECT email FROM Accounts WHERE email = %s AND passphrase = %s"
param = (data.email, data.password)
cursor.execute(sql, param)
return cursor.fetchone() is not None
def get_user_name(self, email: str) -> str:
cursor = self._db_conns[self.DbId.User].cursor()
sql = "SELECT name FROM Accounts WHERE email = %s"
param = (email,)
cursor.execute(sql, param)
res = cursor.fetchone()
return res[0] if res is not None else ""
def create_user(self, user: UserSignupSchema = Body(...)) -> bool:
conn = self._db_conns[self.DbId.User]
cursor = conn.cursor()
sql = "INSERT INTO Accounts(email, passphrase, name) VALUES (%s, %s, %s)"
param = (user.email, user.password, user.name)
try:
cursor.execute(sql, param)
conn.commit()
return True
except mysql.connector.errors.IntegrityError:
return False
def label_image_by_gg_cloud_vision(self, image_base64: str) -> str:
if self._gg_cloud_vision_client is None:
return ""
image = to_rgb(string_to_image(image_base64))
_, encoded_image = cv2.imencode('.jpg', image)
image = vision.Image(content=encoded_image.tobytes())
# Try using GG Cloud service
try:
# Performs label detection on the image file
response = self._gg_cloud_vision_client.label_detection(image=image)
labels = response.label_annotations
if response.error.message:
return ""
return labels[0].description if labels else ""
except Exception as err:
logging.warning("Failed to use Google Cloud Vision: %s", str(err))
def label_image_by_yolov3(self, image_base64: str) -> str:
if self._yolov3_image_labeler is None:
return ""
image = to_rgb(string_to_image(image_base64))
# Performs label detection on the image file
return self._yolov3_image_labeler.label_image(image)
def define_word_by_memory_dictionary(self, word: str) -> str:
return self._memory_word_dictionary.define(word)
def define_word_by_database_dictionary(self, word: str) -> str:
return self._database_word_dictionary.define(word)
def __del__(self):
for conn in self._db_conns.values():
conn.close()
|
[
"phu.nguyenpfoem@hcmut.edu.vn"
] |
phu.nguyenpfoem@hcmut.edu.vn
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.