blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f52217589ed79b1f85cd8ee714acfcff57a119f0
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/kKFuf9hfo2qnu7pBe_0.py
|
dd06b526390459e9a1fd16863570418e3cc2438d
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
def is_prime(p,n,l=0,r=None):
r=len(p)-1
while r>=l:
m=l+(r-l)//2
if p[m]==n:return'yes'
elif p[m]>n:r=m-1
else:l=m+1
return'no'
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
5c34960b45ed72b6cf3652d2817736cbb1c25c95
|
e823be336bf8a47c32e3e41752ac885063a01c9b
|
/morningproject/morningproject/wsgi.py
|
e523da206c5766ae943afaabedde6a98314ef26d
|
[] |
no_license
|
sinha-sandeep/django-project
|
db5c82f98b26bbe88760e7be5982541a8bedd221
|
95135bad5555436ec5c58e2fddc1875fb59c2f86
|
refs/heads/master
| 2022-12-07T05:02:26.233834
| 2020-09-01T08:25:53
| 2020-09-01T08:25:53
| 291,940,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
"""
WSGI config for morningproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "morningproject.settings")
application = get_wsgi_application()
|
[
"sandeepsinha78148@gmail.com"
] |
sandeepsinha78148@gmail.com
|
81d006ac7c02ed9231d643533aadf3160e5a9532
|
89503a61ab7599d4cd8b8f1460cf3e09dceb34b6
|
/users/migrations/0002_auto_20180827_1332.py
|
8d9fb1d3202c3c848a8e1344b6389fc25ffef7cd
|
[] |
no_license
|
AteamVentures/ateam-chat
|
5e362ebb169d1c50daa73424e67fa8b972922608
|
06261f80cbed610b1eb1aba390734737d37780a3
|
refs/heads/master
| 2020-03-24T22:39:58.465030
| 2018-08-30T03:51:42
| 2018-08-30T03:51:42
| 143,096,765
| 0
| 0
| null | 2018-08-30T03:51:44
| 2018-08-01T03:02:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,931
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-08-27 13:32
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='date_joined',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='가입일'),
),
migrations.AlterField(
model_name='user',
name='display_name',
field=models.CharField(max_length=20, verbose_name='닉네임'),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(db_index=True, max_length=190, verbose_name='이메일'),
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(max_length=50, verbose_name='이름'),
),
migrations.AlterField(
model_name='user',
name='phone',
field=models.CharField(db_index=True, max_length=30, validators=[django.core.validators.RegexValidator('^[0-9]+$', '숫자만 입력 가능합니다.', 'invalid')], verbose_name='연락처'),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(help_text='필수입니다. 영문소문자, 숫자와 밑줄(_)만 입력가능합니다.', max_length=32, unique=True, validators=[django.core.validators.MinLengthValidator(3), django.core.validators.MaxLengthValidator(32), django.core.validators.RegexValidator('^(?!_)[a-zA-Z0-9_]+$', '영문소문자, 숫자와 밑줄(_)만 입력가능합니다.', 'invalid')], verbose_name='username'),
),
]
|
[
"libbom14@gmail.com"
] |
libbom14@gmail.com
|
5889afcef180b5e7624a875a630b53dff05b6f61
|
a5cf4468d2b7c4bd798f60554b8ad0ee430c4799
|
/src/astrogun.py
|
74ff3bc385c2baff78261a5a7637f7864684765a
|
[] |
no_license
|
cyberaa/astrogun
|
5cc2f5e8bf69470cbd410822571b71595da3173f
|
f712f4a2c992d501101951b43853b6e8d14562eb
|
refs/heads/master
| 2020-12-30T17:50:04.907547
| 2014-05-21T21:25:01
| 2014-05-21T21:25:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,999
|
py
|
#!/usr/bin/python
from __future__ import absolute_import, division, print_function, unicode_literals
""" Wavefront obj model loading. Material properties set in mtl file.
Uses the import pi3d method to load *everything*
"""
import sys
sys.path.append('../src')
import pi3d
import time
import asteroids, bullets
import numpy, numpy.linalg
import util
import math
import RPi.GPIO as GPIO
import os.path
import pickle
from settings import *
import RTIMU
import threading
import pygame.mixer
######################################
#### IMUReader
######################################
class IMUReader(threading.Thread):
def __init__(self, imu):
threading.Thread.__init__(self)
self.imu = imu
self.data = (0, 0, 0)
self.running = True;
self.wait_s = imu.IMUGetPollInterval()*1.0/1000.0
def run(self):
while(self.running):
if self.imu.IMURead():
self.data = self.imu.getFusionData()
time.sleep(self.wait_s)
######################################
#### GameLevel
######################################
# Operating modes
MODE_READY = 0
MODE_READY_GO = 1
MODE_GO = 2
MODE_GO_OUT = 3
MODE_PLAY = 4
class GameLevel:
def __init__(self, sprites):
# Instantiate an Asteroid Generator
self.gen = asteroids.AsteroidGenerator(ASTEROIDS, 0.1, None, shader_explosion,
shader_uv_flat)
self.bullet_gen = bullets.BulletGenerator()
self.active_asteroids = {}
self.asteroid_id = 0
self.active_bullets = []
self.hit_asteroids = []
self.azimuth = 0.0
self.incl = 0.0
self.self_hit = -1
self.sprites = sprites
self.fixed_sprites = []
self.lives = INITIAL_LIVES
self.scores = 0
self.scores_changed = True
self.pause = False
self.free_play = False
self.fire_button_state = 1
self.frames = 0
self.mode = [MODE_READY, READY_TIME]
self.ready_text = pi3d.String(font=FONT_BALLS,
string = "READY?",
x = -.3, y = 1, z = 3.9,
sx=0.018, sy=0.018)
self.ready_text.set_shader(shader_uv_flat)
self.go_text = pi3d.String(font=FONT_BALLS,
string = "GO!",
x = -.2, y = 0.8, z = 3.9 + 5*.5,
sx=0.018, sy=0.018)
self.go_text.set_shader(shader_uv_flat)
# Initial sprite location
s = self.sprites['sight']
s.position(*SIGHT_POSITION)
s.scale(*SIGHT_SCALE)
self.fixed_sprites.append(s)
s = sprites['radar_panel']
s.position(*RADAR_PANEL_POSITION)
s.scale(*RADAR_PANEL_SCALE)
self.fixed_sprites.append(s)
self.radar_target = sprites['radar_target']
self.radar_target.scale(*TARGET_SCALE)
self.life_full = sprites['life_full']
self.life_full.scale(*LIFE_BAR_SCALE)
self.life_empty = sprites['life_empty']
self.life_empty.scale(*LIFE_BAR_SCALE)
def create_bullet(self, now):
b = self.bullet_gen.generate(self.azimuth, self.incl, now)
self.active_bullets.append(b)
SOUNDS['shot'].play()
# For all asteroids, check if the bullet hits them
I = b.get_direction()
indx = 0
dest = None
# Scan all the asteroids against incidence with the newly
# created bullet. If more than one asteroid incides with
# the bullet trajectory, pick the closest one
for astid, ast in self.active_asteroids.items():
if (self.check_incidence(ast, I)):
if dest is None:
dest = (astid, ast)
else:
if (ast.distance2() < dest[1].distance2()):
dest = (astid, ast)
b.set_destination(dest)
# Check wheter a bullet will hit an asteroid.
# asteroid - An Asteroid class object
# bullet - A unit vector designating the bullet direction
#
# The test is based on a line-sphere intersection test, as described
# in http://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection
# We are not interested in the full solution of the equation, only whether
# the term under square root is non-negative. Also, the bullets always
# originate at the origin (0,0,0) simplifying the equation further
def check_incidence(self, asteroid, bullet):
c = asteroid.get_position()
r = asteroid.radius
I = bullet
sq = (I.dot(c))**2 - (I.dot(I)*(c.dot(c) - r**2))
return (sq >= 0)
def play(self, keys):
now = time.time()
start_time = now
imux = 0
imuy = 0
imuz = 0
while DISPLAY.loop_running():
now = time.time()
self.frames += 1
# Self hit effect
if self.self_hit > 0:
DISPLAY.set_background(self.self_hit*1.0/10.0, 0, 0, 1)
if self.self_hit < 10:
self.self_hit += 1
else:
self.self_hit = -1
DISPLAY.set_background(0.0,0,0,1.0)
# (possibly) generate a new asteroid
if not self.pause:
ast = self.gen.generate_asteroid(now)
if ast is not None:
self.active_asteroids[self.asteroid_id] = ast
self.asteroid_id += 1
# Draw all active asteroid
for astid, ast in self.active_asteroids.items():
# Draw the asteroid itseld
if not self.pause:
ast.move(now)
dist2_from_origin = ast.distance2()
# Draw the target on the radar view
dist_from_origin = (math.sqrt(dist2_from_origin)/INITIAL_DISTANCE)*TARGET_DIST_SCALE
angle = math.radians(ast.azimuth + self.azimuth + 90)
rtx = dist_from_origin*math.cos(angle)
rty = dist_from_origin*math.sin(angle)
self.radar_target.position(TARGET_CENTER_POSITION[0]+rtx,
TARGET_CENTER_POSITION[1]+rty,
TARGET_CENTER_POSITION[2])
self.radar_target.draw(camera = cam2d)
if dist2_from_origin < SELF_IMPACT_RADIUS2:
# Reached origin, destory it
self.gen.return_asteroid(self.active_asteroids[astid])
del self.active_asteroids[astid]
self.self_hit = 1
SOUNDS['self_hit'].play()
if not self.free_play:
self.lives -= 1
# Position, rotate and draw the asteroid
ast.draw(camera = cam3d)
# Delete all hit asteroids, whose time has passed
for astid in range(len(self.hit_asteroids)):
print (astid)
print(self.hit_asteroids)
ast = self.hit_asteroids[astid]
if ast.hit_time > 8.0:
self.gen.return_asteroid(self.hit_asteroids[astid])
del self.hit_asteroids[astid]
# Draw all hit asteroids
for ast in self.hit_asteroids:
ast.move(now)
if ast.hit_time > 8.0:
self.hit_asteroids[0]
ast.draw(camera = cam3d)
# Draw all active bullets
objindex = 0
for bull in self.active_bullets:
if not self.pause:
bull.move(now)
dest = bull.get_destination()
dist2_from_origin = bull.distance2()
if (dest is not None) and (dest[0] in self.active_asteroids):
ast_distance2 = dest[1].distance2()
if dist2_from_origin > ast_distance2:
# Bullet hit the asteroid
del self.active_asteroids[dest[0]]
dest[1].hit(now)
self.hit_asteroids.append(dest[1])
del self.active_bullets[objindex]
self.scores += 1
self.scores_changed = True
SOUNDS['astro_hit'].play()
elif dist2_from_origin > BULLET_DISTANCE2:
# Reached final distance, destroy it
del self.active_bullets[objindex]
else:
objindex += 1
bull.draw(camera = cam3d)
# Draw Sprites
for s in self.fixed_sprites:
s.draw(camera = cam2d)
# Draw lives
for l in range(0, 5):
if l+1 > self.lives:
s = self.life_empty
else:
s = self.life_full
s.position(LIFE_BAR_POSITION[0],
LIFE_BAR_POSITION[1] + l*LIFE_BAR_STEP,
LIFE_BAR_POSITION[2])
s.draw(camera = cam2d)
# Draw scores
if self.scores_changed:
self.scores_str = pi3d.String(font=FONT_COMPUTER,
string="%03d" % self.scores,
x = SCORE_POSITION[0],
y = SCORE_POSITION[1],
z = SCORE_POSITION[2],
sx=0.01, sy=0.01)
self.scores_str.set_shader(shader_uv_flat)
scores_changed = False
self.scores_str.draw(camera = cam2d)
# Draw READY-GO text
if (self.mode[0] == MODE_READY):
self.ready_text.draw(camera = cam2d)
self.mode[1] -= 1
if (self.mode[1] == 0):
self.mode = [MODE_READY_GO, 5]
elif (self.mode[0] == MODE_READY_GO):
self.ready_text.translateZ(.5)
self.ready_text.set_custom_data(17, [self.mode[1]/5.0])
self.ready_text.draw(camera = cam2d)
self.go_text.translateZ(-0.5)
self.go_text.set_custom_data(17, [1.0 - self.mode[1]/5.0])
self.go_text.draw(camera = cam2d)
self.mode[1] -= 1
if (self.mode[1] == 0):
self.mode = [MODE_GO, GO_TIME]
elif (self.mode[0] == MODE_GO):
self.go_text.draw(camera = cam2d)
self.mode[1] -= 1
if (self.mode[1] == 0):
self.mode = [MODE_GO_OUT, 5]
elif (self.mode[0] == MODE_GO_OUT):
self.go_text.translateZ(.5)
self.go_text.set_custom_data(17, [self.mode[1]/5.0])
self.go_text.draw(camera = cam2d)
self.go_text.draw(camera = cam2d)
self.mode[1] -= 1
if (self.mode[1] == 0):
self.mode = [MODE_PLAY, 0]
# Debugging
#debug_str = "az: %f incl: %f" % (self.azimuth, self.incl)
#debug_str_pi = pi3d.String(font=FONT_ARIAL, string=debug_str,
# x = 0, y = 0, z = 5, sx=0.005, sy=0.005)
#debug_str_pi.set_shader(shader_uv_flat)
#debug_str_pi.draw(camera = cam2d)
# Read the IMU angles
imux, imuy, imuz = IMU.data
self.incl = -math.degrees(imuy)
self.azimuth = math.degrees(imuz)
cam_rotate = True
# TEMPORARY CODE
k = keys.read()
cam_rotate = False
if k >-1:
if k == ord('p'):
# Toggle pause
self.pause = not self.pause
elif k == ord('f'):
# Toggle free play mode
self.free_play = not self.free_play
elif k==ord(' '):
self.create_bullet(now)
elif (k == 27):
break
# Check if the trigger button is pressed
fire_button = GPIO.input(BUTTON_FIRE_GPIO[0])
if (fire_button == 1 and self.fire_button_state == 0):
self.create_bullet(now)
pass
self.fire_button_state = fire_button
# Handle camera rotation
if True: #cam_rotate:
cam3d.reset()
cam3d.rotateX(self.incl)
cam3d.rotateY(-self.azimuth)
# If no more lives left, terminate the game
if self.lives == 0:
break
# Calculate average FPS
end_time = time.time()
self.FPS = (1.0*self.frames)/(1.0*(end_time - start_time))
######################################
#### FullScreenImage
######################################
class FullScreenImage(object):
def __init__(self, image_filename):
# Create a sprite from the image file
self.bg = pi3d.ImageSprite(image_filename, shader = shader_uv_flat,
w = 1.6, h = 1)
# Position the openinig screen graphics
self.bg.position(0, 0, 4)
self.bg.scale(3.7, 3.7, 1)
def start(self):
while DISPLAY.loop_running():
# Draw the background
self.bg.draw(camera = cam2d)
# Additional drawing
self.draw(camera = cam2d)
# Process input
if not self.process_input():
break
# Default additional draw - nothing
def draw(self, camera):
pass
# Default input processing - always continue
def process_input(self):
return True
######################################
#### OpeningScreen
######################################
class OpeningScreen(FullScreenImage):
def __init__(self):
super(OpeningScreen, self).__init__(BITMAP_DIR + "opening.png")
# Create a text string
self.text = pi3d.String(font=FONT_COMPUTER,
string = "Press the START Button to Begin",
x = 0, y = .5, z = 3.9,
sx=0.005, sy=0.005)
self.text.set_shader(shader_uv_flat)
self.text_ts_delta = 0.1
self.text_ts = 0
def draw(self, camera):
# Set the transparency of the text
self.text_ts += self.text_ts_delta
self.text.set_custom_data(17, [abs(math.sin(self.text_ts))])
self.text.draw(camera = cam2d)
def process_input(self):
# Check if the START button was pressed
b = GPIO.input(BUTTON_START_GPIO)
if (b == 0):
return False
k = KEYS.read()
if k >-1:
return False;
return True
######################################
#### EndingScreen
######################################
class EndingScreen(FullScreenImage):
def __init__(self, image, sound = None, tmax = 8):
super(EndingScreen, self).__init__(BITMAP_DIR + image)
self.sound = sound
self.t_end = time.time() + tmax
def start(self):
# Call the super to create the image
super(EndingScreen, self).start()
# If a sound is defined, play it
if self.sound is not None:
self.sound.play()
def process_input(self):
# Check if a designated number of seconds has passed since
# the screen was created
if time.time() > self.t_end:
return False
# Check if the START button was pressed
b = GPIO.input(BUTTON_START_GPIO)
if (b == 0):
return False
k = KEYS.read()
if k >-1:
return False;
return True
def load_sprites():
sprite_filenames = ['sight', 'radar_panel', 'radar_target', 'life_full', 'life_empty', 'trans']
sprites = {}
sh = shader_uv_flat
for fn in sprite_filenames:
s = pi3d.ImageSprite('../media/bitmaps/' + fn + '.png', shader = sh, w = 1, h = 1)
sprites[fn] = s
return sprites
def setup_io():
GPIO.setmode(GPIO.BCM)
GPIO.setup(BUTTON_START_GPIO, GPIO.IN, GPIO.PUD_UP)
GPIO.setup(BUTTON_FIRE_GPIO[0], GPIO.IN, GPIO.PUD_UP)
GPIO.setup(BUTTON_FIRE_GPIO[1], GPIO.OUT)
GPIO.output(BUTTON_FIRE_GPIO[1], 0)
GPIO.setup(RUMBLE_FIRE_GPIO, GPIO.OUT)
def load_asteroids():
# Check if a pre-loaded database exists
db_filename = os.path.join(VAR_DIR, AST_DB_FILENAME)
start = time.time()
if os.path.exists(db_filename):
# Load the database
ast = pickle.load(file(db_filename, 'rb'))
else:
# Database does not exist. Load the models then save
# the database
ast = []
global_scale = 1.0
for mf in asteroids.models[0:5]:
model_filename = mf[0]
model_scale = mf[1]
model_name = model_filename.split('.')[0] # Remove the .obj extention
m = pi3d.Model(file_string='../media/models/' + model_filename,
name=model_name)
m.scale(model_scale*global_scale,
model_scale*global_scale,
model_scale*global_scale)
ast.append(m)
pickle.dump(ast, file(db_filename, 'wb'))
# Set the shader for all models
for a in ast:
a.set_shader(shader_uv_flat)
end = time.time()
print("Loading time: %f\n" % (end-start))
return ast
def init_imu():
s = RTIMU.Settings("RTIMU")
imu = RTIMU.RTIMU(s)
print("IMU Name: " + imu.IMUName())
if (not imu.IMUInit()):
print("IMU Init Failed");
sys.exit(1)
else:
print("IMU Init Succeeded");
reader = IMUReader(imu)
reader.start()
return reader
def init_sounds():
# Init the mixer
pygame.mixer.init()
# Load sounds
sounds = {
'win': pygame.mixer.Sound(SOUNDS_DIR + '126000__xserra__campeones.wav'),
'shot': pygame.mixer.Sound(SOUNDS_DIR + '156895__halgrimm__a-shot.wav'),
'self_hit': pygame.mixer.Sound(SOUNDS_DIR + '218721__bareform__boom-bang.wav'),
'astro_hit': pygame.mixer.Sound(SOUNDS_DIR + '147584__cactus2003__far-off-boom.wav'),
'lose': pygame.mixer.Sound(SOUNDS_DIR + '178875__rocotilos__you-lose-evil.wav')
}
return sounds
# Setup display and initialise pi3d
DISPLAY = pi3d.Display.create(background=(0.0, 0, 0, 1))
DISPLAY.frames_per_second = 30
# Create Cameras
ASPECT = DISPLAY.width / DISPLAY.height
cam3d = pi3d.Camera((0,0,0), (0,0,-0.1), (1, 1000, 45, ASPECT), is_3d=True)
cam2d = pi3d.Camera(is_3d=True)
# Load shaders
shader_uv_flat = pi3d.Shader('uv_flat')
shader_mat_flat = pi3d.Shader('mat_flat')
shader_explosion = pi3d.Shader("uv_flat_explode")
# Load Fonts
FONT_ARIAL = pi3d.Font("../media/fonts/FreeMonoBoldOblique.ttf", (221,0,170,255))
FONT_COMPUTER = pi3d.Font("../media/fonts/Computerfont.ttf", (0,0,255,255))
FONT_BALLS = pi3d.Font("../media/fonts/BallsoOnTheRampage.ttf", (50,70,120,255))
# Load Sprites
SPRITES = load_sprites()
# Load Asteroid models
ASTEROIDS = load_asteroids()
# Load sounds
SOUNDS = init_sounds()
# Setup I/O
setup_io()
# Initialize the IMU
IMU = init_imu()
# Fetch key presses
KEYS = pi3d.Keyboard()
EndingScreen('you_lost.png', SOUNDS['lose']).start()
EndingScreen('new_high_scores.png').start()
opening = OpeningScreen()
opening.start()
level = GameLevel(SPRITES)
try:
level.play(KEYS)
KEYS.close()
DISPLAY.destroy()
IMU.running = False
except:
#mykeys.close()
DISPLAY.destroy()
IMU.running = False
print(level.gen.asteroid_model_list)
raise
IMU.running = False
|
[
"avishorp@gmail.com"
] |
avishorp@gmail.com
|
2f43182db61abb35c751b6995a55904df4d54bcc
|
6da9f35aba05c777fcccf3b09f798cd8250069fb
|
/rent house balanceamt.py
|
5940e71dd2cae265e42c9de36d3ba05f3590efb1
|
[] |
no_license
|
rvsmegaraj1996/Megaraj
|
02c5a7ad5ab00e4ba1142b41b2021a5838b030f5
|
6d320d20958414347be7eea21a5ce3dc2bd7aedf
|
refs/heads/master
| 2022-12-11T16:18:54.101221
| 2020-09-14T09:58:33
| 2020-09-14T09:58:33
| 295,368,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
alpha=int(input("tell us the no of floor: "))
beta=int(input("tell us the houses: "))
sum=0
for row1 in range(1,alpha+1):
for row2 in range(1,beta+1):
rent=int(input("bring the rent: "))
if row2==2 or row2==4:
if rent>=8000:
print("thanks for the payment");sum+=8000
print("balanced to be returned: ",(rent-8000))
else:print("need to pay")
elif row2==1 or row2==3:
if rent>=6000:
print("thanks for the payment");sum+=6000
print("balanced to be returned: ",(rent-6000))
else:print("need to pay")
print("total collection: ",sum)
|
[
"rvsmegaraj1996@gmail.com"
] |
rvsmegaraj1996@gmail.com
|
c76af58c2e312372fa843448ac81fff6762ed6be
|
7daae66222485f603abba30a2684264cf40df2a4
|
/pythonproject1/runserver.py
|
7adc90775d56206dfdcf4e6e3232233ec0e215d8
|
[] |
no_license
|
uribracha/schoolproject1
|
4dbd59c899293d4a64e74d0f113f1ac90ed4d5c0
|
d3cea9a0de0fc2fde8b19aa4a7abef30c12dcdf0
|
refs/heads/master
| 2020-12-10T09:09:48.569328
| 2020-01-14T12:07:05
| 2020-01-14T12:07:05
| 233,552,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
"""
This script runs the pythonproject1 application using a development server.
"""
from os import environ
from pythonproject1 import app
if __name__ == '__main__':
HOST = environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
app.run(HOST, PORT)
|
[
"uriluli@gmail.com"
] |
uriluli@gmail.com
|
c7d1b3a586f3641b237a990281f07792be8b5c3e
|
063a95c05876944e45e204557c76f4140f028182
|
/Leetcode_Practice/DS-LinkedList.py
|
75277cffe5cc217f8f76cabf7588015262f044cd
|
[] |
no_license
|
Urvashi-91/Urvashi_Git_Repo
|
5c23ee898d637f245b287548afa9a6b6901edbd2
|
73a7b069746631717bd5739df5ded2d6866b0c8c
|
refs/heads/master
| 2023-06-27T19:10:36.668469
| 2021-07-26T21:02:39
| 2021-07-26T21:02:39
| 324,001,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,163
|
py
|
class Node:
def __init__(self, data: None, next: None):
self.data = data
self.next = next
class SLinkedList:
def __init__(self):
self.head = None
def insert_at_begining(self, data):
node = Node(data, self.head)
self.head = node
def insert_at_end(self, data):
if self.head == None:
self.head = Node(data, None)
return
itr = self.head
while itr.next:
itr = itr.next
itr.next = Node(data, None)
def traverse(self):
if self.head == None:
return
itr = self.head
value = ""
while itr:
value += str(itr.data) + '--->'
itr = itr.next
print(value)
def insert_list(self, data_list):
self.head = None
for data in data_list:
self.insert_at_end(data)
def get_length(self):
count = 0
if self.head == None:
return 0
itr = self.head
while itr:
count += 1
itr = itr.next
return count
def remove_at(self, index):
if index < 0 or index >= self.get_length():
raise Exception("Invalid")
if index == 0:
self.head = self.head.next
return
count = 0
itr = self.head
while index-1 != count:
itr = itr.next
count += 1
itr.next = itr.next.next
def insert_at(self, data, index):
if index < 0 or index >= self.get_length():
raise Exception("Invalid")
if index == 0:
self.insert_at_begining(data)
return
count = 0
itr = self.head
while itr:
if count == index-1:
node = Node(data, itr.next)
itr.next = node
break
itr = itr.next
count += 1
if __name__ == '__main__':
ll = SLinkedList()
ll.insert_at_begining(3)
ll.insert_at_begining(5)
ll.insert_at_end(6)
ll.insert_at_end(5)
ll.insert_list(["1","2","3"])
ll.remove_at(2)
ll.insert_at(6,1)
ll.traverse()
|
[
"hanu@Urvashis-MacBook-Pro.local"
] |
hanu@Urvashis-MacBook-Pro.local
|
6e08f55c19e49a6774097a3bb41dc89c3be44e8d
|
77bae4adbdea1cc5d8f22e0df0dbe3e20e445d17
|
/dqe/apps.py
|
03411d3aa722db372d79b294a75811cc92dbe512
|
[] |
no_license
|
Gavin188/WGT1
|
d8d015c22edf4613e91db353bea9d7394c1ffaa4
|
ecb28f0172ccbe5f99e71f6b8fb5b96fe256e587
|
refs/heads/master
| 2020-08-29T05:19:03.188790
| 2019-11-20T09:11:36
| 2019-11-20T09:11:36
| 217,935,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 81
|
py
|
from django.apps import AppConfig
class DqeConfig(AppConfig):
name = 'dqe'
|
[
"gavin@foxconn.com"
] |
gavin@foxconn.com
|
6bc2b8d39f966cac094db6b5c6c1371919362bf7
|
07acc994db69fc1f8d92d63b5979aa2f4cd687f9
|
/config/wsgi.py
|
d121d21402b0f26a15c725faa9cf686c19d372c9
|
[
"MIT"
] |
permissive
|
pavlovicr/bcs
|
e05d97c87b488a148ae9a4d8fb23b99de31c44da
|
e38a2cf988bf4470dedfb4ca0b02d3c4ba6b80f2
|
refs/heads/master
| 2021-05-08T15:49:59.802370
| 2018-10-31T19:22:31
| 2018-10-31T19:22:31
| 120,127,034
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,938
|
py
|
"""
WSGI config for bcs project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# bcs directory.
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'bcs'))
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
[
"rados.pavlovic@euroinvest.si"
] |
rados.pavlovic@euroinvest.si
|
27e8be43a750682db198f4c4e531ea92710c421d
|
6f9380fae128a8a097cd5a66f6a06f8f0791eea5
|
/intercom_mattermost/asgi.py
|
3b6f5f327b2f5646de58a29da88ba2a08935d33c
|
[] |
no_license
|
LaBayVeTroi/intercom-mattermost
|
1688a4b984b28ae06687901ad2965fe9cda18b37
|
3336841b04f247b874fa2b294c08722de3a494e5
|
refs/heads/master
| 2021-03-11T11:58:27.905597
| 2020-03-11T09:12:35
| 2020-03-11T09:12:35
| 246,526,880
| 0
| 0
| null | 2020-03-11T09:21:36
| 2020-03-11T09:21:35
| null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
"""
ASGI config for intercom_mattermost project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'intercom_mattermost.settings')
application = get_asgi_application()
|
[
"phmtuan313@gmail.com"
] |
phmtuan313@gmail.com
|
3fa3c62d774e21245e74937168de3992ac3770b8
|
ecf85bcf4a8a0c234c0151c9d426755a45caf164
|
/Python Library/EX_pyAesCrypt.py
|
e6c52f3db0a48b7cf88d4b2c9a30b879b676e1d4
|
[] |
no_license
|
dentiny/Python-applications
|
c6351a5842c958f0169997b57c887fbb435b1590
|
700ca488b26bdb1b6456a141486a6f68679d28e3
|
refs/heads/master
| 2021-06-19T17:46:43.073939
| 2021-01-21T05:15:40
| 2021-01-21T05:15:40
| 167,809,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
import pyAesCrypy;
password="password";
buffer_size=64*1024;
pyAesCrypt.encryptFile("file.txt", "file.txt.aes",password,buffer_size);
pyAesCrypt.decryptFile("file.txt.aes","file.txt",password,buffer_size);
|
[
"noreply@github.com"
] |
dentiny.noreply@github.com
|
e32c1292e66c96cf9ac99ddebf502af97c20107b
|
b7c59c059c2562f98859c10b87e3509dbaee9f53
|
/test/unit/client/test_posts.py
|
858cc8b601e880a72557871b6d35f438c0668528
|
[] |
no_license
|
culturemesh/culturemeshFFB
|
37e21975bff6e7e61dd961068f96cd17186d78da
|
53b4b61d541751d517d5e3a7a077358c4494d132
|
refs/heads/master
| 2022-03-01T23:38:27.329929
| 2019-10-28T07:29:58
| 2019-10-28T07:29:58
| 106,875,697
| 2
| 2
| null | 2019-10-28T07:15:39
| 2017-10-13T21:38:00
|
Python
|
UTF-8
|
Python
| false
| false
| 865
|
py
|
#
# Tests client/posts.py
#
from nose.tools import assert_true, assert_equal
import test.unit.client.client_test_prep
from culturemesh.client import Client
def test_get_post():
"""
Tests we can get a single post.
"""
c = Client(mock=True)
post = c.get_post(4)
print(post)
assert_equal(post['vid_link'], "https://www.lorempixel.com/1016/295")
def test_get_post_replies():
"""
Tests that we can get post replies as expected.
"""
c = Client(mock=True)
posts1 = c.get_post_replies(1, count=5)
posts2 = c.get_post_replies(2, count=5)
print(posts1)
posts3 = c.get_post_replies(1, count=1)
posts4 = c.get_post_replies(1, count=3, max_id=1)
assert_equal(len(posts1), 2)
assert_equal(len(posts2), 0)
assert_equal(len(posts3), 1)
assert_equal(posts3[0]['id'], 2)
assert_equal(len(posts4), 1)
assert_equal(posts4[0]['id'], 1)
|
[
"alanf94@stanford.edu"
] |
alanf94@stanford.edu
|
0c67c809421800dba3f118a0c77c9b2ce41181c6
|
fad5c0cf71ce8f0e7dc4029270a48969f3311dd7
|
/resgrid/nr/codebook1/para.py
|
ad7326863025ef61ddbf3b9c6b700c197ea74160
|
[] |
no_license
|
liuyonggang1/liuyonggang1.github.io
|
1cb93560e6b8e80169b66326610bbd538798e33a
|
1b99f7576195d6169aa67b2b11136c8f0e13ab0c
|
refs/heads/master
| 2023-04-11T05:00:24.871853
| 2023-04-01T13:28:58
| 2023-04-01T13:28:58
| 191,664,945
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,289
|
py
|
from parabase import Parameter, ConfigBase, ParameterDisplay
# import sys # sys._getframe().f_code.co_name = {File}_{Class}_{digit}_{function}_{digit}
#############################################################################
# start add parameter
#############################################################################
t = Parameter("codebookMode")
t.range = [1, 2]
t.spec = '38331: CodebookConfig->codebookType->type1->codebookMode'
t = Parameter('pcsirs')
t.desc = 'P<sub>CSI-RS</sub>'
t.range = [4, 8, 12, 16, 24, 32]
t.spec = '38.214 Table 5.2.2.2.1-2. The number of CSI-RS ports.'
t = Parameter('n1n2')
t.desc = 'N<sub>1</sub>,N<sub>2</sub>'
t.range = [""]
t.spec = '38331: CodebookConfig->codebookType->type1->subType->typeI-SinglePanel->nrOfAntennaPorts->moreThanTwo->n1-n2'
t = ParameterDisplay('o1o2')
t.desc = 'O<sub>1</sub>,O<sub>2</sub>'
t.spec = '38.214 Table 5.2.2.2.1-2'
t = Parameter('nlayers')
t.desc = 'layers'
t.range = [1,2,3,4,5,6,7,8]
t.spec = 'number of layers'
#############################################################################
# end add parameter
#############################################################################
class Config(ConfigBase):
#############################################################################
# start add consistency check
#############################################################################
def pcsirs_change(self, event):
m = self.mvalue
if m == 4:
new_range = ["2,1"]
elif m == 8:
new_range = ["2,2", "4,1"]
elif m == 12:
new_range = ["3,2", "6,1"]
elif m == 16:
new_range = ["4,2", "8,1"]
elif m == 24:
new_range = ["4,3", "6,2", "12,1"]
elif m == 32:
new_range = ["4,4", "8,2", "16,1"]
self.set_range('n1n2', new_range)
self.n1n2_change(None)
if m == 4:
new_range = range(1, 5)
else:
new_range = range(1, 9)
self.set_range('nlayers', new_range)
def n1n2_change(self, event):
m = self.mvalue
new_range = "4,1" if m.endswith(",1") else "4,4"
self.set_range('o1o2', new_range)
|
[
"yonggang.liu@nokia-sbell.com"
] |
yonggang.liu@nokia-sbell.com
|
a7a86059a9d0de0019b07e665dadab10f212e05d
|
ddcaa8f8c330ac79daf8893eb77252910b5fa369
|
/image_classification/setup.py
|
ee288ad8ec06d9f274fce7e89b8ba1010fa001a4
|
[
"Apache-2.0"
] |
permissive
|
waikato-datamining/tensorflow
|
6d396803547232a3ee63d72bf35cd0a2c0dc9713
|
98adc81e7a70093dba18c527ce98cd9e77065d51
|
refs/heads/master
| 2023-04-06T20:20:40.954270
| 2022-09-30T00:27:36
| 2022-09-30T00:27:36
| 132,871,288
| 2
| 5
| null | 2023-03-26T20:18:05
| 2018-05-10T08:26:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,769
|
py
|
from setuptools import setup, find_namespace_packages
def _read(f) -> bytes:
"""
Reads in the content of the file.
:param f: the file to read
:type f: str
:return: the content
:rtype: str
"""
return open(f, 'rb').read()
setup(
name="wai.tfimageclass",
description="Image classification using tensorflow.",
long_description=(
_read('DESCRIPTION.rst') + b'\n' +
_read('CHANGES.rst')).decode('utf-8'),
url="https://github.com/waikato-datamining/tensorflow/tree/master/image_classification",
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Programming Language :: Python :: 3',
],
license='Apache 2.0 License',
package_dir={
'': 'src'
},
packages=find_namespace_packages(where='src'),
namespace_packages=[
"wai",
],
version="0.0.15",
author='Peter Reutemann and TensorFlow Team',
author_email='fracpete@waikato.ac.nz',
install_requires=[
"argparse",
"numpy",
"pillow",
"tensorflow_hub",
"simple-confusion-matrix",
"redis",
"redis-docker-harness",
],
entry_points={
"console_scripts": [
"tfic-retrain=wai.tfimageclass.train.retrain:sys_main",
"tfic-stats=wai.tfimageclass.train.stats:sys_main",
"tfic-export=wai.tfimageclass.train.export:sys_main",
"tfic-labelimage=wai.tfimageclass.predict.label_image:sys_main",
"tfic-label-redis=wai.tfimageclass.predict.label_redis:sys_main",
"tfic-poll=wai.tfimageclass.predict.poll:sys_main",
]
}
)
|
[
"fracpete@gmail.com"
] |
fracpete@gmail.com
|
a07cef6dbffcbf1eb0f07a581c979bed024b4f4a
|
fa8a430bd484d3a96aba27a2abb73226fa5c3920
|
/Main/cnf_min.py
|
80885e4dfb7ee9e38adcd29fff085243e04a2cbd
|
[] |
no_license
|
Shubhankar007/ECEN-699
|
f39aecda647bf61f6b151d41016e5c5f5d3bf52c
|
b738faf36a8fd891b9c7e98b95188d00bd6d2ef3
|
refs/heads/master
| 2021-01-10T15:45:06.091632
| 2016-05-06T21:41:17
| 2016-05-06T21:41:17
| 54,232,604
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,118
|
py
|
#!/usr/bin/python
from pyeda.inter import *
import sys
import ast
#function to split clause list
def split_list(seq, size):
newlist = []
splitsize = 1.0/size*len(seq)
for i in range(size):
newlist.append(seq[int(round(i*splitsize)):int(round((i+1)*splitsize))])
return newlist
#Import CNF file and get clauses in an AST
with open(sys.argv[1], 'r') as fin:
f = parse_cnf(fin.read())
#f = ast2expr(parse_cnf(fin.read()))
#Get CNF Clauses in a list
l=len(f)
f_clause_list =[]
for i in range(1, l):
f_clause_list.append(ast2expr(f[i]))
f_clause_list_str =[]
#Sort the Clauses
# for current in range(len(f_clause_list)):
# f_clause_list_str.append(str(f_clause_list[current]))
# f_clause_list_str.sort()
#Split the clauses into clusters
num_of_clauses = int(l/50)
f_clause_list_split = split_list(f_clause_list, (num_of_clauses + 1))
f_split = []
for current in range(len(f_clause_list_split)):
f_split1 = 1
for current2 in range(len(f_clause_list_split[current])):
f_split1 = And((f_clause_list_split[current][current2]),f_split1)
f_split.append(f_split1)
#APPLY DE MORGAN
#Get NOT(F)
f_bar_split = []
f_bar_2 = []
for current in range(len(f_split)):
f_bar_2.append(((~f_split[current])).to_nnf())
f_bar_split.append(f_bar_2[current].to_dnf())
#print(f_bar)
#Run Espresso on split NOT(F)
f_bar_min_split = []
f_bar_min_split_2 = None
for current in range(len(f_bar_split)):
f_bar_min_split_2 = espresso_exprs(f_bar_split[current])
f_bar_min_split.append(expr(f_bar_min_split_2[0]))
#print(f_bar_min_split)
#Combine NOT(F)
c = 0
for current in range(len(f_bar_min_split)):
c = Or((f_bar_min_split[current]),c)
#f_split.append(f_split1)
f_bar_min = expr(c)
#print(f_bar_min)
#APPLY DE MORGAN
#Get Minimized F
f_n = (~f_bar_min).to_nnf()
f_new = f_n.to_cnf()
litmap, nvars, clauses = f_new.encode_cnf()
f_nd = DimacsCNF(nvars, clauses)
f_min = str(f_nd)
#print(f_min)
#Generate Output file
inputfile = sys.argv[1]
outputfile = inputfile.split(".")[0] + "_out.cnf"
with open(outputfile, "w" ) as file_out:
file_out.write(f_min)
|
[
"shubhankar.007@outlook.com"
] |
shubhankar.007@outlook.com
|
2e6737da85278d0e58fa363fc15db9e421376690
|
e4b31b28ac7e6a98f82fc6ce596242d931eef950
|
/utils/logger.py
|
69b802fd2f868c442434f7eb620f9a7275a95c80
|
[] |
no_license
|
dgl-prc/pfa_extraction
|
6df1295f1f8d1421d6d36ffbd5fdcb9e37bf04c3
|
8b32da42ce4037e8672095f2c8f24eba9f7bca34
|
refs/heads/master
| 2023-04-06T04:33:25.975490
| 2019-12-22T14:55:09
| 2019-12-22T14:55:09
| 183,348,663
| 0
| 0
| null | 2023-03-24T23:18:02
| 2019-04-25T03:22:40
|
Python
|
UTF-8
|
Python
| false
| false
| 423
|
py
|
import sys
import os
class Logger(object):
def __init__(self, filename='default.log', stream=sys.stdout):
path = os.path.dirname(filename)
if not os.path.exists(path):
os.makedirs(path)
self.terminal = stream
self.log = open(filename, 'a')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
|
[
"2469039292@qq.com"
] |
2469039292@qq.com
|
cf0077e02d0facb38182876a848e1d77a50cb7dc
|
71460476c5f5ebdca719def124f1a0650861fdab
|
/mint_work/custom/website_support/models/__init__.py
|
acc3edeae75ffcea559ebdda524111b3e095b039
|
[] |
no_license
|
merdhah/dubai_work
|
fc3a70dc0b1db6df19c825a3bf1eef2a373d79c0
|
e24eb12b276a4cd5b47a4bd5470d915179872a4f
|
refs/heads/master
| 2022-01-07T11:22:07.628435
| 2018-10-17T13:37:24
| 2018-10-17T13:37:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 107
|
py
|
# import website_support_ticket, res_partner, website_support_help
import res_partner
import project_case
|
[
"asghar0517@gmail.com"
] |
asghar0517@gmail.com
|
2f41e15819c9b6581a8d03aa451b8f225f44308c
|
fdef109b896eae29c5e7719107eaf60b32384a43
|
/users/migrations/0006_auto_20200115_1950.py
|
5fbb172b852d2751f1b54d9438d42f30b6a82bb0
|
[] |
no_license
|
DimaSapsay/ma_book
|
2cac7aa4d8db099dd628398fe0dcc99b129609d4
|
57572cec27449bf6d88c1e7de16e0e048372eaf6
|
refs/heads/master
| 2020-12-15T09:52:22.669639
| 2020-02-24T22:38:03
| 2020-02-24T22:38:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
# Generated by Django 3.0.2 on 2020-01-15 17:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0005_auto_20200115_1948'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"logotip123@yahoo.com"
] |
logotip123@yahoo.com
|
162a02858d11b22fc69525f65f3e65e82933e009
|
960aa77201947f20288612744566a04a4df27095
|
/performance/migrations/0009_auto_20171203_0959.py
|
11ab41402defa641e03852acd254f5b166ac556a
|
[] |
no_license
|
ahmadiga/personal-assistant
|
3fd05b7bf2a4ae4feedebc55f940f1e54c6f7640
|
26703b32ae154a4cc72eb1d88c1d37594ffa2fa4
|
refs/heads/master
| 2021-01-12T09:30:54.704880
| 2018-02-08T19:41:16
| 2018-02-08T19:41:16
| 76,168,300
| 2
| 5
| null | 2018-01-25T14:01:24
| 2016-12-11T10:45:50
|
HTML
|
UTF-8
|
Python
| false
| false
| 595
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-12-03 07:59
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('performance', '0008_auto_20171203_0958'),
]
operations = [
migrations.AlterField(
model_name='performance',
name='year',
field=models.DateTimeField(blank=True, default=datetime.datetime(2017, 12, 3, 7, 59, 53, 307207, tzinfo=utc), null=True),
),
]
|
[
"m.bazadough@sit-mena.com"
] |
m.bazadough@sit-mena.com
|
ce6d49c3500788c0879de6c7ef21e5b063f46026
|
eb6fad2bb7e7a54cf13123ccb1889b5441ca4fb7
|
/segmentation/test_coco.py
|
ccc92c343578e2605c28f5b549422fa002c9afbd
|
[
"MIT"
] |
permissive
|
daydreamer2023/VISTA-Net
|
728bd6721c96c285adf8be0b1d2fcff41e0bfc4a
|
62e54c7ecf0e39d3e26a1bd930ea4b6d1f9b0370
|
refs/heads/main
| 2023-03-05T13:47:08.317135
| 2021-02-15T11:33:09
| 2021-02-15T11:33:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,722
|
py
|
###########################################################################
# Created by: Hang Zhang
# Email: zhang.hang@rutgers.edu
# Copyright (c) 2017
###########################################################################
import os
import torch
import torchvision.transforms as transform
import scipy.io as sio
import encoding.utils as utils
import cv2
import numpy as np
from tqdm import tqdm
from torch.utils import data
from encoding.nn import BatchNorm2d
from encoding.datasets import get_dataset, test_batchify_fn
from encoding.models import get_model, MultiEvalModule
from option import Options
import time
def test(args):
# output folder
outdir = args.save_folder
if not os.path.exists(outdir):
os.makedirs(outdir)
# data transforms
input_transform = transform.Compose([
transform.ToTensor(),
transform.Normalize([.485, .456, .406], [.229, .224, .225])])
# dataset
testset = get_dataset(args.dataset, split=args.split, mode=args.mode,
transform=input_transform)
# dataloader
loader_kwargs = {'num_workers': args.workers, 'pin_memory': True} \
if args.cuda else {}
test_data = data.DataLoader(testset, batch_size=args.test_batch_size,
drop_last=False, shuffle=False,
collate_fn=test_batchify_fn, **loader_kwargs)
# model
if args.model_zoo is not None:
model = get_model(args.model_zoo, pretrained=True)
else:
model = get_model(args.model, dataset=args.dataset,
backbone=args.backbone, dilated=args.dilated,
lateral=args.lateral, attentiongraph=args.attentiongraph, aux=args.aux,
se_loss=args.se_loss, norm_layer=BatchNorm2d,
base_size=args.base_size, crop_size=args.crop_size)
# resuming checkpoint
if args.resume is None or not os.path.isfile(args.resume):
raise RuntimeError("=> no checkpoint found at '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
# strict=False, so that it is compatible with old pytorch saved models
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
# print(model)
scales = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25] if args.dataset == 'citys' else \
[0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
if not args.ms:
scales = [1.0]
evaluator = MultiEvalModule(model, testset.num_class, scales=scales, flip=args.ms).cuda()
evaluator.eval()
metric = utils.SegmentationMetric(testset.num_class)
tbar = tqdm(test_data)
for i, (image) in enumerate(tbar):
# colormap_dir = './output_seg_att'
# if not os.path.isdir(colormap_dir):
# os.mkdir(colormap_dir)
# for img in image:
# img = np.transpose(img.numpy(), (1, 2, 0))
# # print(img.shape)
# cv2.imwrite(os.path.join(colormap_dir, str(i).zfill(4) + 'rgb.jpg'), np.uint8(img))
with torch.no_grad():
predicts = evaluator.parallel_forward(image)
###save_attention_map
colormap_dir = './output_seg_att'
if not os.path.isdir(colormap_dir):
os.mkdir(colormap_dir)
# # print(predicts[0].shape)
predict = torch.argmax(torch.squeeze(predicts[0]),dim=0)
cv2.imwrite(os.path.join(colormap_dir, str(i).zfill(4) + '.png'),predict.cpu().numpy())
if __name__ == "__main__":
args = Options().parse()
torch.manual_seed(args.seed)
args.test_batch_size = torch.cuda.device_count()
test(args)
|
[
"641807447@qq.com"
] |
641807447@qq.com
|
19033387259ea46681b3410825fa6ebd05639b21
|
bfbe07f8edd3b5b3c1ac2393b8508ced3845219d
|
/Contests/Google/Codejam21/Round1C/B/B.py
|
ed00fe939fef67b75557b49a61aafbc27db263ff
|
[] |
no_license
|
srinjoyray/Competitive
|
5ee384b446a20aad2cdcea8e08304101a9a92b34
|
d6680510462a915756c4835374d2d6a342960f5f
|
refs/heads/main
| 2023-06-16T16:08:10.787883
| 2021-07-14T14:19:17
| 2021-07-14T14:19:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
t = int(input())
for test in range(1, t + 1):
s = "Case #" + str(test) + ":"
print(s, end=" ")
y = int(input())
ys = str(y)
a = []
ans = ""
for i in ys:
ans = ans + i
tmp = ans
for j in range(int(tmp) + 1, 10**26):
tmp += str(j)
a.append(int(tmp))
if len(tmp) > 20:
break
ys = str(int(ys[0]) + 1) + ys[1:]
ans = ""
for i in ys:
ans = ans + i
tmp = ans
for j in range(int(tmp) + 1, 10**26):
tmp += str(j)
a.append(int(tmp))
if len(tmp) > 20:
break
m = 1;
while m <= 10**18:
tmp = str(m)
for j in range(m + 1, 10**26):
tmp += str(j)
a.append(int(tmp))
if len(tmp) > 20:
break
m *= 10
a.sort()
res = 0
for i in a:
if i > y:
res = i
break
print(res)
|
[
"ankurkayal1234@gmail.com"
] |
ankurkayal1234@gmail.com
|
51922f396cb3bd0169ef7c29ddd80a31cd539a3e
|
785ebba58aff36a4d9411b2c1e9fc76f04e18dd1
|
/Artificial Intelligence/Assignments/Assignment 2/.ipynb_checkpoints/game-checkpoint.py
|
d260c23d17fd61b3ed33f0c4969b40e716ebe430
|
[] |
no_license
|
masterashu/Semester-4
|
60b6f2a6941edb7f2fa8b3113df85ed55b836119
|
4ed82c7d84ed848a2cce8ffaa41e027a5f1611eb
|
refs/heads/main
| 2022-12-12T20:33:53.986287
| 2020-09-05T13:39:42
| 2020-09-05T13:39:42
| 293,082,589
| 0
| 0
| null | 2020-09-05T13:39:44
| 2020-09-05T13:35:35
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,381
|
py
|
from abc import ABC, abstractmethod
from minmax import *
from typing import List
class Player:
def __init__(self, name='Player', mark=None, user: bool = False):
self.name = name
self.mark = mark
self.user = user
def __str__(self):
return f'{self.name} - {self.mark}'
class Game(ABC):
def __init__(self, initial_state, all_moves, players: List[Player]):
self.initial_state = initial_state
self.all_moves = all_moves
self.players = players
self.state = None
super(Game, self).__init__()
@abstractmethod
def make_move(self, move) -> bool:
pass
@abstractmethod
def utility(self, state, player):
pass
@abstractmethod
def explore_moves(self, state, player: Player):
pass
@staticmethod
@abstractmethod
def transition(state, move, player=None):
pass
@abstractmethod
def game_end(self, state) -> bool:
pass
@abstractmethod
def winner(self, state):
pass
@staticmethod
@abstractmethod
def get_user_move(player):
pass
@abstractmethod
def heuristic(self, state):
pass
@abstractmethod
def state_repr(self, state):
pass
@abstractmethod
def reset(self):
pass
class GameSolvingAgent:
def __init__(self, game: Game, current_player=None, algo: Algorithm = Algorithm.MinMax):
self.game = game
self.moves = []
self.current_player = current_player or game.players[0]
self.algo = algo
def predict_next_move(self, max_depth=5):
move = search_minmax(self.game, self.algo, max_depth=max_depth)
return move
def make_agent_move(self, move):
self.game.make_move(move)
self.moves.append(move)
# Duplicate Function to distinguish
# user actions from agents without confusion
def make_user_move(self, user_move):
self.moves.append(user_move)
if self.game.make_move(user_move) is False:
raise AssertionError()
class GamePlayingAgent:
def __init__(self, game: Game, algo: Algorithm = Algorithm.MinMax, **kwargs):
self.game = game
self.algo = algo
self.params = kwargs
self.agent = GameSolvingAgent(game, algo=algo)
@property
def game_ended(self):
return self.game.game_end(self.game.state)
@property
def game_state(self):
return self.game.state_repr(self.game.state)
def play(self):
_move = self.agent.predict_next_move(**self.params)
if _move:
self.agent.make_agent_move(_move)
else:
print("No possible Moves")
def request_input(self):
player = self.game.players[1]
# Verify The Player is User
assert player.user
_move = self.game.get_user_move(player)
self.agent.make_user_move(_move)
def user_move(self, pos):
player = self.game.players[1]
move = (player.mark, pos[0], pos[1])
try:
self.agent.make_user_move(move)
except AssertionError:
return False
return True
def print_result(self):
assert self.game_ended
if self.game.winner(self.game.state) == "TIE":
s = "It's a tie."
else:
s = self.game.winner(self.game.state) + ' wins'
|
[
"masterashu@live.in"
] |
masterashu@live.in
|
1a0c2038efd1f65eea978eb7cfed64dabe005087
|
0b6ed51cedd44df54e511a0bdeb28dcfb89d6c58
|
/age_func.py
|
91a82e5e1ecfd637e0aa1f61b3159319c1775946
|
[] |
no_license
|
nansencenter/nextsim-age
|
b2180971058a3f64b049bc6479cff272cdb27dc9
|
51029ce58cf3ef073d540a5564649f79de978c7a
|
refs/heads/master
| 2020-03-31T21:40:09.354760
| 2019-06-18T12:03:03
| 2019-06-18T12:03:03
| 152,589,122
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,772
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import regionmask
import cartopy
import cartopy.crs as ccrs
import pyresample as pr
import scipy.ndimage as ndimage
from pyproj import Proj, transform
def plot_pcolormesh(lons,lats,var,outname,vmin=0,vmax=1,cmap='jet',label='Variable'):
# create the figure panel
fig = plt.figure(figsize=(10,10), facecolor='w')
# create the map using the cartopy NorthPoleStereo
# +proj=stere +a=6378273 +b=6356889.44891 +lat_0=90 +lat_ts=70 +lon_0=-45"
globe = cartopy.crs.Globe(semimajor_axis=6378273, semiminor_axis=6356889.44891)
ax1 = plt.subplot(1,1,1, projection=ccrs.NorthPolarStereo(central_longitude=-45, true_scale_latitude=70, globe=globe))
ax1.set_extent([15, -180, 72, 62], crs=ccrs.PlateCarree())
# add coastlines, gridlines, make sure the projection is maximised inside the plot, and fill in the land with colour
ax1.coastlines(resolution='110m', zorder=3) # zorder=3 makes sure that no other plots overlay the coastlines
ax1.gridlines()
ax1.add_feature(cartopy.feature.LAND, zorder=1,facecolor=cartopy.feature.COLORS['land_alt1'])
# plot sea ice field
pp = plt.pcolormesh(lons,lats,var,vmin=vmin,vmax=vmax, cmap=cmap, transform=ccrs.PlateCarree())
# add the colourbar to the bottom of the plot.
# The first moves the bottom of the map up to 15% of total figure height,
# the second makes the new axes for the colourbar,
# the third makes the colourbar, and the final adds the label
fig.subplots_adjust(bottom=0.15)
cbar_ax = fig.add_axes([0.2, 0.1, 0.625, 0.033])
stp = (vmax-vmin)/10.
cbar = plt.colorbar(pp, cax=cbar_ax, orientation='horizontal', ticks=np.arange(vmin,vmax+stp,stp))
cbar.set_label(label=label,size=14, family='serif')
plt.savefig(outname,bbox_inches='tight')
plt.close()
def plot_contour(lons,lats,data,levels=[.15],colors=['purple'],lw=[1.],labels=['Variable'],outname='test.png'):
# create the figure panel
fig = plt.figure(figsize=(10,10), facecolor='w')
# create the map using the cartopy NorthPoleStereo
# +proj=stere +a=6378273 +b=6356889.44891 +lat_0=90 +lat_ts=70 +lon_0=-45"
globe = cartopy.crs.Globe(semimajor_axis=6378273, semiminor_axis=6356889.44891)
ax1 = plt.subplot(1,1,1, projection=ccrs.NorthPolarStereo(central_longitude=-45, true_scale_latitude=70, globe=globe))
ax1.set_extent([15, -180, 72, 62], crs=ccrs.PlateCarree())
# add coastlines, gridlines, make sure the projection is maximised inside the plot, and fill in the land with colour
ax1.coastlines(resolution='110m', zorder=3) # zorder=3 makes sure that no other plots overlay the coastlines
ax1.gridlines()
ax1.add_feature(cartopy.feature.LAND, zorder=1,facecolor=cartopy.feature.COLORS['land_alt1'])
# plot sea ice field
for i in range(len(levels)):
cs = plt.contour(lons,lats,data[i],levels=[levels[i]], colors=colors[i], linewidths=lw[i], transform=ccrs.PlateCarree())
cs.collections[0].set_label(labels[i])
ax1.legend(loc='upper right')
plt.savefig(outname,bbox_inches='tight')
plt.close()
def plot_contour_bg(lons,lats,bg,data,levels=[.15],colors=['purple'],lw=[1.],ls=['-'],labels=['Variable'],bg_label='Snow_depth',outname='test.png',vmin=0,vmax=1,cmap='jet',cbar=True):
# create the figure panel
fig = plt.figure(figsize=(10,10), facecolor='w')
# create the map using the cartopy NorthPoleStereo
# +proj=stere +a=6378273 +b=6356889.44891 +lat_0=90 +lat_ts=70 +lon_0=-45"
globe = cartopy.crs.Globe(semimajor_axis=6378273, semiminor_axis=6356889.44891)
ax1 = plt.subplot(1,1,1, projection=ccrs.NorthPolarStereo(central_longitude=-45, true_scale_latitude=70, globe=globe))
ax1.set_extent([15, -180, 72, 62], crs=ccrs.PlateCarree())
# add coastlines, gridlines, make sure the projection is maximised inside the plot, and fill in the land with colour
ax1.coastlines(resolution='110m', zorder=3) # zorder=3 makes sure that no other plots overlay the coastlines
ax1.gridlines()
ax1.add_feature(cartopy.feature.LAND, zorder=1,facecolor=cartopy.feature.COLORS['land_alt1'])
#plot 'background'
pp = plt.pcolormesh(lons,lats,bg,vmin=vmin,vmax=vmax, cmap=cmap, transform=ccrs.PlateCarree())
# add the colourbar to the bottom of the plot.
# plot sea ice field
handle_list = []
for i in range(len(levels)):
cs = plt.contour(lons,lats,data[i],levels=[levels[i]], colors=colors[i], linewidths=lw[i], transform=ccrs.PlateCarree())
cs.collections[0].set_label(labels[i])
handle_list.append(cs.collections[0])
ax1.legend(loc='upper right')
if cbar:
fig.subplots_adjust(bottom=0.15)
cbar_ax = fig.add_axes([0.2, 0.1, 0.625, 0.033])
cbar = plt.colorbar(pp, cax=cbar_ax, orientation='horizontal', ticks=np.arange(0,1.1,0.1))
cbar.set_label(label=bg_label,size=14, family='serif')
else:
import matplotlib.patches as mpatches
red_patch = mpatches.Patch(color='darkred', label=bg_label)
handle_list.append(red_patch)
ax1.legend(handles=handle_list, loc='upper right')
plt.savefig(outname,bbox_inches='tight')
plt.close()
def plot_quiver(x,y,u,v,outname,vmin=0,vmax=1,cmap='jet',label='Variable', scale=5):
# create the figure panel
fig = plt.figure(figsize=(10,10), facecolor='w')
# create the map using the cartopy NorthPoleStereo
# +proj=stere +a=6378273 +b=6356889.44891 +lat_0=90 +lat_ts=70 +lon_0=-45"
globe = cartopy.crs.Globe(semimajor_axis=6378273, semiminor_axis=6356889.44891)
ax1 = plt.subplot(1,1,1, projection=ccrs.NorthPolarStereo(central_longitude=-45, true_scale_latitude=70, globe=globe))
ax1.set_extent([15, -180, 72, 62], crs=ccrs.PlateCarree())
# add coastlines, gridlines, make sure the projection is maximised inside the plot, and fill in the land with colour
ax1.coastlines(resolution='110m', zorder=3) # zorder=3 makes sure that no other plots overlay the coastlines
#ax1.gridlines(crs=ccrs.PlateCarree(),xlocs=range(0,370,10),ylocs=range(60,90,5))
ax1.gridlines()
ax1.add_feature(cartopy.feature.LAND,facecolor=cartopy.feature.COLORS['land_alt1'])
# plot sea ice field
speed = np.sqrt(u**2+v**2)
pp = plt.pcolormesh(x,y,speed,vmin=vmin,vmax=vmax, cmap=cmap)
#but our northings and eastings are in the projcted grid and not in lat, lon!!!!
ax1.quiver(x, y, u, v, scale=scale)
# add the colourbar to the bottom of the plot.
# The first moves the bottom of the map up to 15% of total figure height,
# the second makes the new axes for the colourbar,
# the third makes the colourbar, and the final adds the label
fig.subplots_adjust(bottom=0.15)
cbar_ax = fig.add_axes([0.2, 0.1, 0.625, 0.033])
stp = (vmax-vmin)/10.
cbar = plt.colorbar(pp, cax=cbar_ax, orientation='horizontal', ticks=np.arange(vmin,vmax+stp,stp))
cbar.set_label(label=label,size=14, family='serif')
plt.savefig(outname,bbox_inches='tight')
plt.close()
def smooth_data(data,lon,lat,coarse_lon,coarse_lat):
#smoothen the data for nicer contours with a lowpass filter
#data = ndimage.gaussian_filter(data, 3) #sigma
#regrid to equally spaced grid in latlon - otherwise there will be problems with cyclic point in contour plots
orig_def = pr.geometry.SwathDefinition(lons=lon, lats=lat)
targ_def = pr.geometry.SwathDefinition(lons=coarse_lon, lats=coarse_lat)
#coarse_def = pr.geometry.SwathDefinition(lons=coarse_lon[::5,::5], lats=coarse_lat[::5,::5])
#data_coarse = pr.kd_tree.resample_nearest(orig_def, data, coarse_def, radius_of_influence=50000, fill_value=0)
##fill all nans with 0 >> closed contours
#data_coarse = np.nan_to_num(data_coarse)
data_smooth = pr.kd_tree.resample_gauss(orig_def, data, targ_def, radius_of_influence=500000, neighbours=10, sigmas=250000, fill_value=0)
#data_smooth = pr.kd_tree.resample_nearest(coarse_def, data_coarse, targ_def, radius_of_influence=500000, fill_value=0)
#wf = lambda r: 1
#data_smooth = pr.kd_tree.resample_custom(coarse_def, data_coarse, targ_def, radius_of_influence=100000, weight_funcs=wf)
data_smooth = np.nan_to_num(data_smooth)
#plot_pcolormesh(lon,lat,myi,'test.png',vmin=0,vmax=1,label='MYI fraction')
#plot_pcolormesh(lon_g[::5],lat_g[::5],myi_coarse,'test1.png',vmin=0,vmax=1,label='MYI fraction')
#plot_pcolormesh(lon_g,lat_g,myi_smooth,'test2.png',vmin=0,vmax=1,label='MYI fraction')
#plot_contour(lon_g,lat_g,myi_smooth,'test3.png',levels=[.1], lw=[10], label='MYI extent')
return(data_smooth)
def regrid_data(data,inlon,inlat,outlon,outlat):
#regrid to equally spaced grid in latlon - otherwise there will be problems with cyclic point in contour plots
orig_def = pr.geometry.SwathDefinition(lons=inlon, lats=inlat)
targ_def = pr.geometry.SwathDefinition(lons=outlon, lats=outlat)
data = pr.kd_tree.resample_nearest(orig_def, data, targ_def, radius_of_influence=50000, fill_value=0)
#fill all nans with 0 >> closed contours
data = np.nan_to_num(data)
return(data)
def get_poly_mask(lons,lats):
#create a geographical polygon for the Central Arctic (without the narrow band off the CAA)
#https://regionmask.readthedocs.io/en/stable/_static/notebooks/create_own_regions.html
#make two masks - one for W and one for E Arctic
#regionmask does not handle well the circular polygons around the NP
lon360 = np.where(lons<0,360+lons,lons)
#print(lon360)
#i,j coordinates of corner points can be found by exploring display in ncview
#W Arctic
poly1 = []
pt = [360,90];poly1.append(pt)
pt = [360,lats[273,115]];poly1.append(pt)
pt = [lon360[273,115],lats[273,115]];poly1.append(pt)
pt = [lon360[260,128],lats[260,128]];poly1.append(pt)
pt = [lon360[239,136],lats[239,136]];poly1.append(pt)
pt = [lon360[228,145],lats[228,145]];poly1.append(pt)
pt = [lon360[210,148],lats[210,148]];poly1.append(pt)
pt = [lon360[194,147],lats[194,147]];poly1.append(pt)
pt = [lon360[157,156],lats[157,156]];poly1.append(pt)
pt = [lon360[113,174],lats[113,174]];poly1.append(pt)
pt = [lon360[89,157],lats[89,157]];poly1.append(pt)
pt = [lon360[29,123],lats[29,123]];poly1.append(pt)
##more radical (even further away from the CAA coast)
#pt = [lon360[260,132],lats[260,132]];poly1.append(pt)
#pt = [lon360[239,140],lats[239,140]];poly1.append(pt)
#pt = [lon360[228,149],lats[228,149]];poly1.append(pt)
#pt = [lon360[210,152],lats[210,152]];poly1.append(pt)
#pt = [lon360[194,151],lats[194,151]];poly1.append(pt)
#pt = [lon360[157,160],lats[157,160]];poly1.append(pt)
#pt = [lon360[113,178],lats[113,178]];poly1.append(pt)
#pt = [lon360[65,160],lats[65,160]];poly1.append(pt)
#pt = [lon360[24,162],lats[29,162]];poly1.append(pt)
pt = [lon360[3,194],lats[3,194]];poly1.append(pt)
pt = [lon360[3,344],lats[3,344]];poly1.append(pt)
pt = [180,65];poly1.append(pt)
pt = [180,90];poly1.append(pt)
pt = [270,90];poly1.append(pt)
pt = [360,90];poly1.append(pt)
#print(poly1)
#E Arctic
poly2 = []
pt = [0,90];poly2.append(pt)
pt = [90,90];poly2.append(pt)
pt = [180,90];poly2.append(pt)
pt = [180,65];poly2.append(pt)
pt = [lon360[135,386],lats[135,386]];poly2.append(pt)
pt = [lon360[238,390],lats[238,390]];poly2.append(pt)
pt = [lon360[310,344],lats[310,344]];poly2.append(pt)
pt = [lon360[449,301],lats[449,301]];poly2.append(pt)
pt = [lon360[350,122],lats[350,122]];poly2.append(pt)
pt = [0,lats[273,115]];poly2.append(pt)
pt = [0,90];poly2.append(pt)
#print(poly2)
numbers = [0, 1]
names = ['Arctic_west', 'Arctic_east']
abbrevs = ['Aw', 'Ae']
Arctic_mask = regionmask.Regions_cls('Arctic_mask', numbers, names, abbrevs, [poly1, poly2])
##Plot polygons in Mercator projection
#ax=Arctic_mask.plot()
#ax.set_extent([-180, 180, 45, 90], ccrs.PlateCarree())
#plt.show()
#Make raster
mask = Arctic_mask.mask(lons, lats, wrap_lon=True)
#Merge mask
mask = np.where(mask>=0,1,0)
# pcolormesh does not handle NaNs, requires masked array
age_mask = np.ma.masked_invalid(mask)
##Plot mask
#outpath_plots = 'plots/run04/'
#outname = outpath_plots+'age_mask_rest.png'
#plot_pcolormesh(lons,lats,age_mask,outname,cmap='viridis',label='Central Arctic Mask=1')
#exit()
return(age_mask)
def get_dra_mask(lons,lats):
#get 'Data Release Area' mask for the Central Arctic, published by Rothrock et al, 2008
#this is the area for which submarine draft data is available (1979-2000)
#and all Kwok papers use this area to show trend extended by IS and CS-2 data
#regionmask does not handle well the circular polygons around the NP
lon360 = np.where(lons<0,360+lons,lons)
poly1 =[[360.,90.],
[360.,87.],
[345.,87.],
[300.,86.58],
[230.,80.],
[219.,80.],
[219.,70.],
[205.,72.],
[180.,74.],
[180.,90.],
[360.,90.]]
poly2 =[[ 0.,86.],
[ 0.,90.],
[180.,90.],
[180.,74.],
[175.,75.50],
[172.,78.50],
[163.,80.50],
[126.,78.50],
[110.,84.33],
[ 80.,84.42],
[ 57.,85.17],
[ 33.,83.83],
[ 8.,84.08],
[ 0.,86.]]
numbers = [0, 1]
names = ['Arctic_west', 'Arctic_east']
abbrevs = ['Aw', 'Ae']
Arctic_mask = regionmask.Regions_cls('DRA_mask', numbers, names, abbrevs, [poly1,poly2])
#Make raster
mask = Arctic_mask.mask(lons, lats, wrap_lon=True)
#Merge mask
mask = np.where(mask>=0,1,0)
# pcolormesh does not handle NaNs, requires masked array
age_mask = np.ma.masked_invalid(mask)
##Plot mask
#outpath_plots = 'plots/new/'
#outname = outpath_plots+'age_mask_DRA.png'
#plot_pcolormesh(lons,lats,age_mask,outname,cmap='viridis',label='Central Arctic Mask=1')
#exit()
return(age_mask)
def read_sir(sirfile):
#Matlab code
#fid=fopen(filename,'r','ieee-be'); %ieee-be is big endian integer in python: <i2
#head=fread(fid,[256],'short'); % read header %usage:A = fread(fileID,sizeA,precision) %short: signed integers, 16bit, 2 byte (same as int16)
data = np.fromfile(sirfile, dtype='<f')
print(data)
print(data.shape)
print(data[0])
head = data[:256]
#print(head)
with open(sirfile,'rb') as fin:
header = fin.read(256)
print(header)
hh = np.fromstring(header, dtype=np.int16)
nhead = hh[40] #number of data blocks
ipol = hh[44] #polarisation (valid options: 0=n/a,1=H,2=V)
idatatype = hh[47] #head(48) = idatatype ! data type code 0,2=i*2,1=i*1,4=f
print(nhead)
print(ipol)
print(idatatype)
exit()
return()
def corr_pearson(x, y):
"""
Compute Pearson correlation.
"""
x_mean = np.mean(x, axis=0)
x_stddev = np.std(x, axis=0)
y_mean = np.mean(y, axis=0)
y_stddev = np.std(y, axis=0)
x1 = (x - x_mean)/x_stddev
y1 = (y - y_mean)/y_stddev
x1y1mult = x1 * y1
x1y1sum = np.sum(x1y1mult, axis=0)
corr = x1y1sum/x.shape[0]
return corr
def corr_pearson_circ(x, y):
"""
Compute Pearson correlation for circular data (angles).
"""
#calculate means
x_mean = circmean(x,axis=0)
y_mean = circmean(y,axis=0)
#mean angle difference
diff = y_mean - x_mean
diff = np.where(diff>180,diff-360,diff)
diff = np.where(diff<-180,diff+360,diff)
#calculate residuals
resx = np.sin(np.radians(x)-np.radians(x_mean))
resy = np.sin(np.radians(y)-np.radians(y_mean))
#calculate Pearson correlation coefficient
#this is original formula from Jammamaldaka, 2001 (Rozman et al, 2011 has an error in denominator - it summs before it multiplys)
stev = np.sum(resx*resy,axis=0)
imen = np.sqrt(np.sum(resx**2,axis=0)*np.sum(resy**2,axis=0))
corr = stev/imen
return x_mean,y_mean,diff,corr
def circmean(alpha,axis=None):
#To convert from radians to degrees, multiply by (180o/(PI))
tod = 180/np.pi
tor = np.pi/180
sa = np.mean(np.sin(alpha*tor),axis)
ca = np.mean(np.cos(alpha*tor),axis)
mean_angle = np.arctan2(sa,ca)*tod
mean_angle = np.where(mean_angle<0,mean_angle+360,mean_angle)
return mean_angle
def plot_pdf(l1,l2,outname):
fig = plt.figure(figsize=(8,8), facecolor='w')
ax = plt.subplot(1,1,1)
#plot a PDF
bl = np.arange(0.,.41,.01)
#n, bins, patches = plt.hist(slist, bl, normed=True, histtype='step', color='m', alpha=.8, label='neXtSIM', lw = 3)
#n, bins, patches = plt.hist(slist_gauss, bl, normed=True, histtype='step', color='r', alpha=.8, label='neXtSIM', lw = 3)
n, bins, patches = plt.hist(np.clip(l1, bl[0], bl[-1]), bl, normed=True, histtype='step', color='darkred', alpha=.8, label='neXtSIM', lw = 3)
n, bins, patches = plt.hist(np.clip(l2, bl[0], bl[-1]), bl, normed=True, histtype='step', alpha=.8, label='OSI-SAF', lw = 3)
plt.xlim(0,20)
plt.xlabel('Speed (m/s)')
plt.ylabel('Probability')
plt.title('Probability distribution of mean 2-day speed \nfor January 2007-2015')
#plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
#plt.axis([40, 160, 0, 0.03])
plt.legend(loc='upper right',prop={'size':16})
plt.grid(True)
plt.savefig(outname)
plt.close()
#from pynextsim.projection_info import ProjectionInfo
#mm = ProjectionInfo(f)
###def __init__(self,
###ecc = 0.081816153,
###a = 6378.273e3,
###lat_0 = 90.,
###lon_0 = -45.,
###lat_ts = 60.,
###proj='stere',
##print(mm.proj,mm.lat_ts,mm.lat_0,mm.lon_0,mm.a,mm.ecc)
#nextsim_proj = '+proj=%s +lat_ts=%f +lat_0=%f +lon_0=%f +a=%f +e=%f +units=m' %(mm.proj,mm.lat_ts,mm.lat_0,mm.lon_0,mm.a,0.081816153)
#inProj = Proj(nextsim_proj,preserve_units=True)
#outProj = Proj("+init=EPSG:4326") # WGS84 in degrees
#lonc,latc=transform(inProj,outProj,xc,yc)
#Hi @loniitkina, this class can't be initialised in this way.
#To get the default nextsim projection:
#proj=ProjectionInfo()
#You can also get it from
#nbi = NextsimBin(f)
#proj = nbi.mesh_info.projection
#and if you have an mppfile
#proj==ProjectionInfo.init_from_mppfile(mppfile=...)
|
[
"geopolonica@gmail.com"
] |
geopolonica@gmail.com
|
d9bf433949bfe44f549106417d3231148380ab7a
|
f0b3d4c9e6a5f8f4454adedf91db1b80c89401a7
|
/operatory.py
|
b74951f045fd8815a5b53e9c300d275522d85dee
|
[] |
no_license
|
akotwicka/Learning_Python_Udemy
|
5b31656858e8d729cc0274b3b873f9d3852e67b9
|
c3d1c93d914ae1f2d4f497181ac41de39aeb0ce0
|
refs/heads/master
| 2020-06-24T18:28:42.294106
| 2019-08-06T10:45:34
| 2019-08-06T10:45:34
| 199,046,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
class Cake:
bakery_offer = []
def __init__(self, name, kind, taste, additives, filling):
self.name = name
self.kind = kind
self.taste = taste
self.additives = additives.copy()
self.filling = filling
self.bakery_offer.append(self)
def show_info(self):
print("{}".format(self.name.upper()))
print("Kind: {}".format(self.kind))
print("Taste: {}".format(self.taste))
if len(self.additives) > 0:
print("Additives:")
for a in self.additives:
print("\t\t{}".format(a))
if len(self.filling) > 0:
print("Filling: {}".format(self.filling))
print('-' * 20)
def __str__(self):
return "Kind: {}, Name: {}, Additives: {}".format(self.kind, self.name, self.additives)
def __iadd__(self, other):
if type(other) is str:
self.additives.append(other)
return self
elif type(other) is list:
self.additives.extend(other)
return self
else:
raise Exception('Operation not possible')
cake01 = Cake('Vanilla Cake', 'cake', 'vanilla', ['chocolade', 'nuts'], 'cream')
print(cake01)
cake01 += "almonds"
print(cake01)
cake01 += ['lemon', 'little meringues']
print(cake01)
cake01 += 1
print(cake01)
|
[
"a_kotwicka@wp.pl"
] |
a_kotwicka@wp.pl
|
60d29f8f859c00b316824ed6c3fc2e5ca0436598
|
f91c71f5dd3fdef91d7db2c8ebac03b0d4b1d22b
|
/Qt/g2.py
|
240716c1c65c1cd0f1e6d83417622622b4df2e6e
|
[] |
no_license
|
vijayakumar75/py-programs
|
dd7ee9d6160e358b27498fc5e70376e146f485a2
|
a07056ddd400280cdf65b6cc57d0103d3581b54d
|
refs/heads/master
| 2020-05-04T18:45:21.811548
| 2016-08-29T11:56:42
| 2016-08-29T11:56:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,239
|
py
|
import logging
import sys
import traceback
import logging.handlers
class gi:
def __init__(self,message):
self.message=message
def getMessage(self):
return self.message
def setMessage(self,message):
self.message = message
class StreamToLogger(object):
def __init__(self, logger, log_level, std, handler):
self.handler = handler
self.logger = logger
self.log_level = log_level
self.linebuf = ''
self.std = std
self.gi = gi("")
def write(self, buf):
for line in buf.rstrip().splitlines():
self.gi.setMessage(line)
self.logger.log(self.log_level, line.rstrip())
self.std.write(line+"\n")
hand.flush()
self.std.flush()
def flush(self):
self.std.flush()
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s',
filename="history.log",
filemode='a'
)
hand = logging.handlers.TimedRotatingFileHandler("bot.log", when="S", interval=20)
#my attempt at handling
stdout_logger = logging.getLogger('STDOUT')
sl = StreamToLogger(stdout_logger, logging.INFO, sys.__stdout__, hand)
stderr_logger = logging.getLogger('STDERR')
sl = StreamToLogger(stderr_logger, logging.ERROR, sys.__stderr__, hand)
for i in range(2):
sl.write("is this working")
|
[
"girishramnani95@gmail.com"
] |
girishramnani95@gmail.com
|
acaed954e590d163f9df84e081c988bbabd00661
|
1a93478e72c6fb4528006d76d518cf3e1e4b676b
|
/medium_boardgame_bot.py
|
7ff3f910d8a8a4d2692901b11d2a50c40fe9e6cd
|
[] |
no_license
|
iamohcy/medium_boardgame_bot
|
618db0d8426e70d00bea65524f572cf6bff4b909
|
b9209849c3809156a6b8d1120259e27cf1617b84
|
refs/heads/master
| 2022-04-10T19:28:15.182807
| 2020-04-01T10:54:08
| 2020-04-01T10:54:08
| 251,350,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,184
|
py
|
TOKEN = "953155266:AAF-g0tEk7qMCZwDxheNHQZD3oGMXn5w3G0"
# TODO:
# 1) don't let /begin work multiple times
# 1) solve issue with empty entries, use original word instead
# 2) solve issue with multiple ins
# 3) solve issue with no past words
# 1) Fixed issue with /begin working multiple times
# 2) Fixed issue where original words could be re-used
# 3) Fixed issue where multiple /in commands would screw things update
# 4) Fixed issue where empty or multi-word entries were allowed
# Medium Tele Bot v1.0 Beta is done!
# 1) Added a "/left" command to see which players have yet to enter their words
# 2) Added a "/points" command to see current point tallies
# 3) Added an "/out" command to allow for people to leave the game
# 4) Modified "/help" command to print more useful information
# 5) Added reminder for new players to add the bot at @medium_boardgame_bot
# 6) Game now stops when enough players have left
# 7) **No longer need /enter command, can just type directly in to the bot chat**
# 8) You can no kick idle players using the kick_idle command
# 9) Various bug fixes and QOL improvements
import telegram
from telegram.ext import Updater, MessageHandler, CommandHandler, Filters
# import requests
from word_lib import getWords
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
def kick_idle(update, context):
chat_data = context.chat_data
chat_id = update.message.chat_id
chat_bot = context.bot
if (update.message.chat_id > 0):
context.bot.send_message(chat_id=update.message.chat_id, text="This command can only be sent in a group channel!", parse_mode=telegram.ParseMode.HTML)
return
if ("gameStarted" not in context.chat_data):
context.bot.send_message(chat_id=update.message.chat_id, text="Type /new to create a new game!", parse_mode=telegram.ParseMode.HTML)
return
if (context.chat_data["gameStarted"]):
chat_id = update.message.chat_id
context.bot.send_message(chat_id=update.message.chat_id, text="Kicking the following idle players...", parse_mode=telegram.ParseMode.HTML)
for player in context.chat_data["playersArray"]:
if (player["inGame"] == True) and (player["entry"] == None):
kickPlayer(player["id"], update, context, True)
if (context.chat_data["gameStarted"] == False):
return
checkForAllEntered(chat_data, chat_id, chat_bot)
else:
context.bot.send_message(chat_id=update.message.chat_id, text="Type /begin to begin the game first!", parse_mode=telegram.ParseMode.HTML)
def kickPlayer(userId, update, context, forced):
chat_data = context.chat_data
chat_id = update.message.chat_id
chat_bot = context.bot
player = chat_data["playersDict"][userId]
player["inGame"] = False
player["entry"] = None
if (not forced):
context.bot.send_message(chat_id=update.message.chat_id, text="Psychic <b>%s</b> has left the game!" % player["name"], parse_mode=telegram.ParseMode.HTML)
else:
context.bot.send_message(chat_id=update.message.chat_id, text="Psychic <b>%s</b> has been booted from the game!" % player["name"], parse_mode=telegram.ParseMode.HTML)
# Stop game if < 2 players
numPlayersStillInGame = 0
for player in chat_data["playersArray"]:
if (player["inGame"] == True):
numPlayersStillInGame += 1
if numPlayersStillInGame < 2:
context.bot.send_message(chat_id=update.message.chat_id, text="Not enough players to continue the game! Stopping game...", parse_mode=telegram.ParseMode.HTML)
stop(update, context)
return
def deregister_user(update, context):
chat_data = context.chat_data
chat_id = update.message.chat_id
chat_bot = context.bot
if (chat_id > 0):
context.bot.send_message(chat_id=update.message.chat_id, text="This command can only be sent in a group channel!", parse_mode=telegram.ParseMode.HTML)
return
if ("gameStarted" not in context.chat_data):
context.bot.send_message(chat_id=update.message.chat_id, text="Type /new to create a new game!", parse_mode=telegram.ParseMode.HTML)
return
userId = update.message.from_user.id
kickPlayer(userId, update, context, False)
checkForAllEntered(chat_data, chat_id, chat_bot)
def register_user(update, context):
if (update.message.chat_id > 0):
context.bot.send_message(chat_id=update.message.chat_id, text="This command can only be sent in a group channel!", parse_mode=telegram.ParseMode.HTML)
return
if ("gameStarted" not in context.chat_data):
context.bot.send_message(chat_id=update.message.chat_id, text="Type /new to create a new game!", parse_mode=telegram.ParseMode.HTML)
return
userId = update.message.from_user.id
name = update.message.from_user.first_name
context.user_data["chat_data"] = context.chat_data
context.user_data["chat_id"] = update.message.chat_id
context.user_data["chat_bot"] = context.bot
if userId not in context.chat_data["playersDict"]:
player = {"id":userId, "name":name, "entry":None, "points": 0, "inGame": True, "isMainPlayer":False}
# TEMP
if name == "Wee Loong":
player["name"] = "To Wee Or Not To Wee That Is The Question"
context.chat_data["playersArray"].append(player)
context.chat_data["playersDict"][userId] = player
context.bot.send_message(chat_id=update.message.chat_id, text="Psychic <b>%s</b> has joined the game!" % player["name"], parse_mode=telegram.ParseMode.HTML)
# Player has joined midway, send them the message
if (context.chat_data["gameStarted"]):
sendWordRequest(player, context.chat_data, context.bot)
else:
player = context.chat_data["playersDict"][userId]
if (player["inGame"] == True):
context.bot.send_message(chat_id=update.message.chat_id, text="Psychic <b>%s</b> is already in the game!" % player["name"], parse_mode=telegram.ParseMode.HTML)
else:
player["inGame"] = True
player["entry"] = None
context.bot.send_message(chat_id=update.message.chat_id, text="Psychic <b>%s</b> has re-joined the game!" % player["name"], parse_mode=telegram.ParseMode.HTML)
sendWordRequest(player, context.chat_data, context.bot)
# else:
# context.bot.send_message(chat_id=update.message.chat_id, text="Game has not yet started!", parse_mode=telegram.ParseMode.HTML)
def players_left(update, context):
if (update.message.chat_id > 0):
context.bot.send_message(chat_id=update.message.chat_id, text="This command can only be sent in a group channel!", parse_mode=telegram.ParseMode.HTML)
return
if ("gameStarted" not in context.chat_data):
context.bot.send_message(chat_id=update.message.chat_id, text="Type /new to create a new game!", parse_mode=telegram.ParseMode.HTML)
return
if (len(context.chat_data["playersArray"]) < 2):
context.bot.send_message(chat_id=update.message.chat_id, text="Waiting for game to begin!", parse_mode=telegram.ParseMode.HTML)
else:
leftText = "Still waiting for: "
for player in context.chat_data["playersArray"]:
if player["inGame"] and (player["entry"] == None):
leftText += "<b>%s</b>, " % player["name"]
leftText = leftText[0:-2]
context.bot.send_message(chat_id=update.message.chat_id, text=leftText, parse_mode=telegram.ParseMode.HTML)
POINTS_ARRAY = [10,5,2]
NON_MAIN_POINTS = 1 # points the non main players get for matching with main players
NUM_ROUNDS = len(POINTS_ARRAY)
def points(update, context):
if (update.message.chat_id > 0):
context.bot.send_message(chat_id=update.message.chat_id, text="This command can only be sent in a group channel!", parse_mode=telegram.ParseMode.HTML)
return
if ("gameStarted" not in context.chat_data):
context.bot.send_message(chat_id=update.message.chat_id, text="Type /new to create a new game!", parse_mode=telegram.ParseMode.HTML)
return
printScore(context.chat_data, update.message.chat_id, context.bot)
def printScore(chat_data, chat_id, chat_bot):
# print points
pointsText = "<b>Current points:</b>\n"
for player in chat_data["playersArray"]:
if player["inGame"]:
pointsText += "<b>%s</b>: %d points\n" % (player["name"], player["points"])
else:
pointsText += "<b>%s</b> [out]: %d points\n" % (player["name"], player["points"])
chat_bot.send_message(chat_id=chat_id, text=pointsText, parse_mode=telegram.ParseMode.HTML)
def sendWordRequest(player, chat_data, chat_bot):
player["entry"] == None
chat_bot.send_message(chat_id=player["id"], text="Current words are <b>%s</b> and <b>%s</b>!" % chat_data["words"], parse_mode=telegram.ParseMode.HTML)
chat_bot.send_message(chat_id=player["id"], text="When you are ready, enter your Medium Word (just one) here!", parse_mode=telegram.ParseMode.HTML)
def sendWordRequestToAll(chat_data, chat_id, chat_bot):
for player in chat_data["playersArray"]:
if player["inGame"]:
sendWordRequest(player, chat_data, chat_bot)
def handleNewRound(chat_data, chat_id, chat_bot):
for player in chat_data["playersArray"]:
player["entry"] = None
if (chat_data["subRound"] == 0):
(wordA, wordB) = getWords()
chat_data["words"] = (wordA, wordB)
chat_data["seenWords"] = [wordA.lower(), wordB.lower()]
currentRound = chat_data["currentRound"]
currentSubRound = chat_data["subRound"]
if (currentRound > 0):
printScore(chat_data, chat_id, chat_bot)
# player1Index = currentRound % numPlayers
# player2Index = (currentRound+1) % numPlayers
# chat_data["nextPlayer1Index"] = player2Index;
numPlayers = len(chat_data["playersArray"])
potentialPlayer1Index = chat_data["nextPlayer1Index"]
# Initialize main player to false first
for player in chat_data["playersArray"]:
player["isMainPlayer"] = False
mainPlayers = []
currentIndex = chat_data["nextPlayer1Index"]
while len(mainPlayers) < 2:
potentialPlayer = chat_data["playersArray"][currentIndex]
if potentialPlayer["inGame"]:
potentialPlayer["isMainPlayer"] = True
mainPlayers.append(potentialPlayer)
chat_data["nextPlayer1Index"] = currentIndex # Index of latest player
currentIndex = (currentIndex + 1) % numPlayers
chat_data["player1"] = mainPlayers[0]
chat_data["player2"] = mainPlayers[1]
startText = "<b>Round %d - Attempt %d</b>\n\n" % (currentRound+1, currentSubRound+1)
startText += "Main players: <b>%s</b> and <b>%s</b>\n\n" % (chat_data["player1"]["name"], chat_data["player2"]["name"])
startText += "Let's get psychic! The two words are: <b>%s</b> and <b>%s</b>" % chat_data["words"]
chat_bot.send_message(chat_id=chat_id, text=startText, parse_mode=telegram.ParseMode.HTML)
sendWordRequestToAll(chat_data, chat_id, chat_bot)
def begin(update, context):
if (update.message.chat_id > 0):
context.bot.send_message(chat_id=update.message.chat_id, text="This command can only be sent in a group channel!", parse_mode=telegram.ParseMode.HTML)
return
if ("gameStarted" not in context.chat_data):
context.bot.send_message(chat_id=update.message.chat_id, text="Type /new to create a new game!", parse_mode=telegram.ParseMode.HTML)
return
if (context.chat_data["gameStarted"]):
context.bot.send_message(chat_id=update.message.chat_id, text="Game has already begun!", parse_mode=telegram.ParseMode.HTML)
elif (len(context.chat_data["playersArray"]) < 2):
context.bot.send_message(chat_id=update.message.chat_id, text="You need at least 2 players to begin a game!", parse_mode=telegram.ParseMode.HTML)
else:
context.chat_data["gameStarted"] = True
context.chat_data["currentRound"] = 0
context.chat_data["subRound"] = 0
handleNewRound(context.chat_data, update.message.chat_id, context.bot)
def new_game(update, context):
if (update.message.chat_id > 0):
context.bot.send_message(chat_id=update.message.chat_id, text="This command can only be sent in a group channel!", parse_mode=telegram.ParseMode.HTML)
return
context.chat_data["gameStarted"] = False
context.chat_data["playersArray"] = []
context.chat_data["playersDict"] = {}
context.chat_data["chat_id"] = update.message.chat_id
context.chat_data["currentRound"] = 0
context.chat_data["subRound"] = 0
context.chat_data["seenWords"] = []
context.chat_data["nextPlayer1Index"] = 0
userId = update.message.from_user.id
context.bot.send_message(chat_id=update.message.chat_id, text="New game has begun! Type '/in' to join the game! When everyone has joined, type /begin to begin the first round.", parse_mode=telegram.ParseMode.HTML)
context.bot.send_message(chat_id=update.message.chat_id, text="For completely new players, remember to add the bot by clicking @medium_boardgame_bot before joining the game!", parse_mode=telegram.ParseMode.HTML)
def help(update, context):
message = "Welcome to the Telegram Bot for the Medium Board Game!\n\n"
message += "In the game Medium, players act as psychic mediums, harnessing their powerful extra-sensory abilities to access other players’ thoughts. Together in pairs, they mentally determine the Medium: the word that connects the words on their two cards, and then attempt to say the same word at the same time!\n\n"
message += "For example, if the words are <b>fruit</b> and <b>gravity</b> a Medium Word might be <b>apple</b>. If both parties say the SAME Medium Word, they both get 10 points! Otherwise they fail and get a second attempt, except now the two new words to match are the words they've just given. If they match in the second attempt they get 5 points, and 2 if they match in the third and last attempt.\n\n"
message += "Meanwhile, other players can try to snatch 1 point by matching either of the 2 main players\n\n"
message += "To begin, add this bot at @medium_boardgame_bot and type /new to create a new game!\n\n"
context.bot.send_message(chat_id=update.message.chat_id, text=message, parse_mode=telegram.ParseMode.HTML)
def stop(update, context):
if (update.message.chat_id > 0):
context.bot.send_message(chat_id=update.message.chat_id, text="This command can only be sent in a group channel!", parse_mode=telegram.ParseMode.HTML)
return
if ("gameStarted" not in context.chat_data):
context.bot.send_message(chat_id=update.message.chat_id, text="Type /new to create a new game!", parse_mode=telegram.ParseMode.HTML)
return
pointsText = "Game ended!\n-----------------------\n<b>Current points:</b>\n"
currentMaxPoints = -1
winners = []
for player in context.chat_data["playersArray"]:
name = player["name"]
points = player["points"]
if (points > currentMaxPoints):
winners = [name]
currentMaxPoints = points
elif (points == currentMaxPoints):
winners.append(name)
pointsText += "<b>%s</b>: %d points\n" % (player["name"], player["points"])
pointsText += "\nWinner(s): "
for name in winners:
pointsText += name + ", "
pointsText = pointsText[0:-2]
context.bot.send_message(chat_id=update.message.chat_id, text=pointsText, parse_mode=telegram.ParseMode.HTML)
# Reset data
del context.chat_data["gameStarted"]
del context.chat_data["playersArray"]
del context.chat_data["playersDict"]
del context.chat_data["currentRound"]
del context.chat_data["subRound"]
del context.chat_data["seenWords"]
del context.chat_data["nextPlayer1Index"]
def checkForAllEntered(chat_data, chat_id, chat_bot):
allEntered = True
enteredCount = 0
for player in chat_data["playersArray"]:
if (player["inGame"] == True):
if (player["entry"] == None):
allEntered = False
else:
enteredCount += 1
if (allEntered):
if enteredCount <= 1:
return
chat_bot.send_message(chat_id=chat_data["chat_id"], text="Everyone has entered their words!", parse_mode=telegram.ParseMode.HTML)
currentEntry = None
testPassed = True
entryText = "<b>Main Players:</b>\n"
for player in chat_data["playersArray"]:
if player["inGame"] and player["isMainPlayer"]:
entry = player["entry"]
chat_data["seenWords"].append(entry.lower())
entryText += "Psychic %s entered - <b>%s</b>\n" % (player["name"], entry)
# Check if anyone else matched
if (len(chat_data["playersArray"]) > 2):
found = False
for player in chat_data["playersArray"]:
if player["inGame"] and (not player["isMainPlayer"]):
entry = player["entry"]
if (chat_data["player1"]["inGame"] and entry.lower() == chat_data["player1"]["entry"].lower()) or (chat_data["player2"]["inGame"] and entry.lower() == chat_data["player2"]["entry"].lower()):
# Give player one point for matching one of the main players
player["points"] += NON_MAIN_POINTS
if not found:
found = True
entryText += "\n<b>Other Players:</b>\n"
entryText += "Psychic %s also entered - <b>%s</b>! (+%d points)\n" % (player["name"], entry, NON_MAIN_POINTS)
chat_bot.send_message(chat_id=chat_id, text=entryText, parse_mode=telegram.ParseMode.HTML)
# Main player has left the game!
if not (chat_data["player1"]["inGame"] and chat_data["player2"]["inGame"]):
chat_bot.send_message(chat_id=chat_id, text="One of the main players has temporarily left the game! Moving on to the next round...", parse_mode=telegram.ParseMode.HTML)
chat_data["currentRound"] += 1
chat_data["subRound"] = 0
handleNewRound(chat_data, chat_id, chat_bot)
return
# Calculate if succeeded
succeeded = chat_data["player1"]["entry"].lower() == chat_data["player2"]["entry"].lower()
if succeeded:
numPoints = POINTS_ARRAY[chat_data["subRound"]]
chat_data["player1"]["points"] += numPoints
chat_data["player2"]["points"] += numPoints
chat_bot.send_message(chat_id=chat_id, text="Success! %s and %s get %d points each." % (chat_data["player1"]["name"], chat_data["player2"]["name"], numPoints), parse_mode=telegram.ParseMode.HTML)
chat_data["currentRound"] += 1
chat_data["subRound"] = 0
handleNewRound(chat_data, chat_id, chat_bot)
else:
chat_data["subRound"] += 1
if (chat_data["subRound"] == NUM_ROUNDS):
chat_data["currentRound"] += 1
chat_data["subRound"] = 0
chat_bot.send_message(chat_id=chat_id, text="Oops! Last attempt failed! Moving on to next round...", parse_mode=telegram.ParseMode.HTML)
handleNewRound(chat_data, chat_id, chat_bot)
else:
chat_data["words"] = (chat_data["player1"]["entry"], chat_data["player2"]["entry"])
chat_bot.send_message(chat_id=chat_id, text="Attempt %d failed! Try again with these two new words - <b>%s</b> and <b>%s</b>" % (chat_data["subRound"], chat_data["words"][0], chat_data["words"][1]), parse_mode=telegram.ParseMode.HTML)
for player in chat_data["playersArray"]:
player["entry"] = None
sendWordRequestToAll(chat_data, chat_id, chat_bot)
def test(update, context):
chat_id = update.effective_chat.id
userId = update.message.from_user.id
entry = update.message.text
context.bot.send_message(chat_id=userId, text=entry, parse_mode=telegram.ParseMode.HTML)
def enter(update, context):
chat_id = update.effective_chat.id
userId = update.message.from_user.id
entry = update.message.text
# Guarantees that this is private chat with player, rather than a group chat
if (update.message.chat_id > 0):
if ("chat_data" not in context.user_data):
context.bot.send_message(chat_id=userId, text="Game has not yet started!", parse_mode=telegram.ParseMode.HTML)
return
chat_data = context.user_data["chat_data"]
chat_bot = context.user_data["chat_bot"]
chat_id = context.user_data["chat_id"]
if ("gameStarted" in chat_data) and (chat_data["gameStarted"]):
player = chat_data["playersDict"][userId]
if player["inGame"]:
if entry.strip() == "":
context.bot.send_message(chat_id=userId, text="Empty entry detected, please try again!" % entry, parse_mode=telegram.ParseMode.HTML)
elif len(entry.split()) > 1:
context.bot.send_message(chat_id=userId, text="You can only send <b>one</b> word!" % entry, parse_mode=telegram.ParseMode.HTML)
elif entry.lower() not in chat_data["seenWords"]:
player["entry"] = entry
context.bot.send_message(chat_id=userId, text="Received! - [%s]" % entry, parse_mode=telegram.ParseMode.HTML)
else:
context.bot.send_message(chat_id=userId, text="The word <b>%s</b> has been seen this round already!" % entry, parse_mode=telegram.ParseMode.HTML)
checkForAllEntered(chat_data, chat_id, chat_bot)
else:
context.bot.send_message(chat_id=userId, text="You are currently not in the game! Type /in in the group chat to rejoin the game." % entry, parse_mode=telegram.ParseMode.HTML)
else:
context.bot.send_message(chat_id=userId, text="Game has not yet started!", parse_mode=telegram.ParseMode.HTML)
def main():
updater = Updater(token=TOKEN, use_context=True)
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler('new',new_game))
dispatcher.add_handler(CommandHandler('in',register_user))
dispatcher.add_handler(CommandHandler('out',deregister_user))
dispatcher.add_handler(CommandHandler('begin',begin))
# dispatcher.add_handler(CommandHandler('enter',enter))
# dispatcher.add_handler(CommandHandler('e',enter))
dispatcher.add_handler(CommandHandler('help',help))
dispatcher.add_handler(CommandHandler('stop',stop))
dispatcher.add_handler(CommandHandler('points',points))
dispatcher.add_handler(CommandHandler('left',players_left))
dispatcher.add_handler(CommandHandler('kick_idle',kick_idle))
dispatcher.add_handler(MessageHandler(Filters.text, enter))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
[
"iamohcy@gmail.com"
] |
iamohcy@gmail.com
|
21127b91e1a1270f520744db39ed1654c850fbe7
|
f0117325b7a40779965b35ec6cefc8d12353d779
|
/python_exercises/py_part3_ex/lecture_ex/graph.py
|
f52b6765dbabf902157c27570ddac5c742aa6dfe
|
[] |
no_license
|
joshwestbury/Digital_Crafts
|
4188e71ad631439dcb2cca9eea63d29400c37dc0
|
66c06f198d110388781a30c0ecb7902d3a8daf5a
|
refs/heads/master
| 2021-07-24T05:37:02.370341
| 2017-11-04T14:36:39
| 2017-11-04T14:36:39
| 103,189,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 368
|
py
|
from matplotlib import pyplot
def f(x):
return 2 * x + 1
def g(x):
return x + 1
for x in range(-3, 5):
print("f({x})={y} \t g({x})={z}".format(x=x, y=f(x), z=g(x)))
f_output = []
g_output =[]
x_list = list(range(-3, 5))
for x in x_list:
f_output.append(f(x))
g_output.append(g(x))
pyplot.plot(x_list, f_output, x_list, g_output)
pyplot.show()
|
[
"joshwestbury@gmail.com"
] |
joshwestbury@gmail.com
|
021499735f89ec69e09a67a4705a4f336386e591
|
c440f2cffdb41da5e940376c621a872f3eb93377
|
/maze_generator/mazegen
|
891aa461549c82b9050c333c361a5defb7b42e4f
|
[] |
no_license
|
stephkno/CS_21_Python
|
06cab501e811c51ba74a9f9dddcc4440a23cbb6d
|
266bc6e34670966c5491d3a7559df27dd08d188c
|
refs/heads/master
| 2023-04-30T02:39:19.868600
| 2023-04-22T01:56:21
| 2023-04-22T01:56:21
| 357,671,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
#!/usr/bin/python3
from lib import maze
import argparse
parser = argparse.ArgumentParser(description="Generate a random maze.")
parser.add_argument('size', help="size of maze on one side", type=int)
parser.add_argument('-r', help="render maze as ascii", action="store_true")
args = parser.parse_args()
maze = maze.Maze()
if(args.size > 0):
maze.generate(args.size)
if(args.r):
print(maze.getMazeRender())
else:
print(maze.toString())
|
[
"Stephen@Janet.local"
] |
Stephen@Janet.local
|
|
a3f8472743fefea8dc4b08dc4ae24ade165536c6
|
f5fd4bd6271b5a2b8bb0329b6d987bde00764b47
|
/src/cookbook/settings/base.py
|
5601ea9d2fd47db67bb097b44009c623f8661fed
|
[
"MIT"
] |
permissive
|
triump0870/cookbook
|
540151f064df5145e6f382ecb8cf80cb61461848
|
58aedc67d0bcf32a397953ae5389076edc26e1ae
|
refs/heads/master
| 2021-01-09T09:37:24.344609
| 2017-02-21T09:35:26
| 2017-02-21T09:35:26
| 82,543,051
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,541
|
py
|
"""
Django settings for recipes project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from django.core.urlresolvers import reverse_lazy
from os.path import dirname, join, exists
import mongoengine
# Build paths inside the project like this: join(BASE_DIR, "directory")
BASE_DIR = dirname(dirname(dirname(__file__)))
STATICFILES_DIRS = [join(BASE_DIR, 'static')]
MEDIA_ROOT = join(BASE_DIR, 'media')
MEDIA_URL = "/media/"
# Use Django templates using the new Django 1.8 TEMPLATES settings
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
join(BASE_DIR, 'templates'),
# insert more TEMPLATE_DIRS here
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Use 12factor inspired environment variables or from a file
import environ
env = environ.Env()
# Ideally move env file should be outside the git repo
# i.e. BASE_DIR.parent.parent
env_file = join(dirname(__file__), 'local.env')
if exists(env_file):
environ.Env.read_env(str(env_file))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Raises ImproperlyConfigured exception if SECRET_KEY not in os.environ
SECRET_KEY = env('SECRET_KEY')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'mongoengine.django.mongo_auth',
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'easy_thumbnails',
'rest_framework',
'rest_framework_mongoengine',
# 'profiles',
'accounts',
'recipes',
'apis',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cookbook.urls'
WSGI_APPLICATION = 'cookbook.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in
# os.environ
'default': {
"ENGINE": 'django.db.backends.dummy'
},
}
# SESSION_ENGINE = 'mongoengine.django.sessions'
# SESSION_SERIALIZER = 'mongoengine.django.sessions.BSONSerializer'
_MONGODB_DATABASE_HOST = env("MONGODB_DATABASE_HOST")
_MONGODB_NAME = env("MONGODB_NAME")
mongoengine.connect(_MONGODB_NAME, host=_MONGODB_DATABASE_HOST)
AUTHENTICATION_BACKENDS = (
'mongoengine.django.auth.MongoEngineBackend',
)
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
# Crispy Form Theme - Bootstrap 3
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# For Bootstrap 3, change error alert to 'danger'
from django.contrib import messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# Authentication Settings
AUTH_USER_MODEL = 'mongo_auth.MongoUser'
MONGOENGINE_USER_DOCUMENT = 'mongoengine.django.auth.User'
LOGIN_REDIRECT_URL = reverse_lazy("profiles:show_self")
LOGIN_URL = reverse_lazy("accounts:login")
THUMBNAIL_EXTENSION = 'png' # Or any extn for your thumbnails
|
[
"b4you0870@gmail.com"
] |
b4you0870@gmail.com
|
c5a9dce6468abc8cb2729e8cc8e6805d9b844826
|
aeadf4ba76c940f47420220a998df4b14d59539c
|
/x64/Release/benchmark.py
|
27635f51dde7a8a73843a9a5d4561aa43f1d0edd
|
[] |
no_license
|
acraddoc91/PythonCorrelationsLibrary
|
cb0baaeba89fda05f40209cfa6548a96e9d300d3
|
b284855231d1281c2c5c68bdc99be39e0aec501d
|
refs/heads/master
| 2021-09-10T12:42:06.513308
| 2018-03-26T13:09:18
| 2018-03-26T13:09:18
| 116,185,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,084
|
py
|
from corrLib import g2ToFile
import numpy as np
import time
import scipy
mat_directory = "C:/Users/Ryd Berg/Google Drive/Rydberg Experiment/Matlab/CorrelationCalculations/"
data_directory = "C:/Users/Ryd Berg/Downloads/"
data_folder = data_directory+"g2_benchmark/"
mat_file = mat_directory+"g2_n22_test_opencl"
benchmark_mat = mat_directory+"g2_benchmark_gpu_cuda"
bin_width = 82.3e-12*12
pulse_spacing = 100e-6
max_pulse_distance = 4
#half_tau_bins = np.array([200,500,1000,2000,4000])
#half_tau_bins = np.array([1,4,10,20,50,100,200,500,1000,2000,4000])
half_tau_bins = np.array([1,100,500,1000,2000,4000,8000,16000,32000,64000,128000,256000])
calc_bins = half_tau_bins * 2 + 1
time_taken = np.zeros(len(half_tau_bins))
for i in range(len(half_tau_bins)):
max_time = half_tau_bins[i] * bin_width
start_time = time.time()
g2ToFile(data_folder,mat_file,max_time,bin_width,pulse_spacing,max_pulse_distance)
time_taken[i] = time.time()-start_time
scipy.io.savemat(benchmark_mat,{'num_bins':calc_bins,'time':time_taken,'device':'Threadripper 1950x','block_size':32})
|
[
"acraddoc@umd.edu"
] |
acraddoc@umd.edu
|
9f1f38050f2114767124aaa270d7ca0007966853
|
d485ac12220d6febfe383bde45d55b3160cdc930
|
/treasury/urls.py
|
ff6d1f7c70a14d46f8715fc9d6fe161a1b7223bb
|
[] |
no_license
|
argon2008-aiti/lcidarkuman
|
03ef2b2c200ca21b57f7b8089976c8b3a1c03612
|
3e54fffdf9605edd87e7bfce134d0c5203dc72a9
|
refs/heads/master
| 2021-01-13T03:09:32.388512
| 2019-05-19T14:16:48
| 2019-05-19T14:16:48
| 77,407,801
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
from django.conf.urls import url
from django.contrib.auth import views
from django.views.generic import TemplateView
from views import *
urlpatterns = [
url(r'^offering/all/$', get_all_offerings, name="offering-all"),
url(r'^tithe/all/$', get_all_tithes, name="tithe-all"),
url(r'^tithe/add/$', add_tithe, name="tithe-add"),
url(r'^offering/add/$', add_offering, name="offering-add"),
]
|
[
"yunguta@gmail.com"
] |
yunguta@gmail.com
|
76b81aa534882a53060e39b9e6a9a654146ec745
|
a9edafa96979580729aad2eacc3f70f1eacb3152
|
/src/budy/subscription.py
|
dc3a1ecb08ebda897f5f7af76838b95304e8d460
|
[
"Apache-2.0"
] |
permissive
|
gcandal/budy_api
|
c34b9fa9fb1cf2fff61e4b14e899414dc5bc1049
|
eaac58105d95b6ac993b96e186a5f246e998b982
|
refs/heads/master
| 2021-01-18T01:11:46.190186
| 2016-08-09T14:02:29
| 2016-08-09T14:02:29
| 63,064,209
| 0
| 0
| null | 2016-07-11T12:00:32
| 2016-07-11T12:00:32
| null |
UTF-8
|
Python
| false
| false
| 1,482
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Budy API
# Copyright (c) 2008-2016 Hive Solutions Lda.
#
# This file is part of Hive Budy API.
#
# Hive Budy API is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Budy API is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Budy API. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2016 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
class SubscriptionApi(object):
def create_subscription(self, payload):
url = self.base_url + "subscriptions"
contents = self.post(url, data_j = payload, auth = False)
return contents
|
[
"joamag@gmail.com"
] |
joamag@gmail.com
|
194ea95f9c7318ab3957d1f2ed41dce581ddeac3
|
b7a81ca6378dab90ac0e39678401d7f8842c4fed
|
/quantization/WqAq/IAO/models/util_wqaq.py
|
a66f1e8201fb486c53843577d237e12a79464d0e
|
[
"MIT"
] |
permissive
|
jth19961209/SSD-Pruning-and-quantization
|
62cfdff1c8180495c6bdc4130a1562eeced2a1b4
|
64b84dfa88a1686593addaa9941cc14579e129ee
|
refs/heads/master
| 2023-03-17T03:30:48.020816
| 2021-01-28T14:31:27
| 2021-01-28T14:31:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,922
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import distributed
from torch.nn import init
from torch.nn.parameter import Parameter
from torch.autograd import Function
#Quantization and Training of Neural Networks for Efficient Integer-Arithmetic-Only Inference
# ********************* range_trackers(范围统计器,统计量化前范围) *********************
class RangeTracker(nn.Module):
def __init__(self, q_level):
super().__init__()
self.q_level = q_level
def update_range(self, min_val, max_val):
raise NotImplementedError
@torch.no_grad()
def forward(self, input):
if self.q_level == 'L': # A,min_max_shape=(1, 1, 1, 1),layer级
min_val = torch.min(input)
max_val = torch.max(input)
elif self.q_level == 'C': # W,min_max_shape=(N, 1, 1, 1),channel级 out,in,w,h
min_val = torch.min(torch.min(torch.min(input, 3, keepdim=True)[0], 2, keepdim=True)[0], 1, keepdim=True)[0]
max_val = torch.max(torch.max(torch.max(input, 3, keepdim=True)[0], 2, keepdim=True)[0], 1, keepdim=True)[0]
self.update_range(min_val, max_val)
class GlobalRangeTracker(RangeTracker): # W,min_max_shape=(N, 1, 1, 1),channel级,取本次和之前相比的min_max —— (N, C, W, H)
def __init__(self, q_level, out_channels):
super().__init__(q_level)
self.register_buffer('min_val', torch.zeros(out_channels, 1, 1, 1))
self.register_buffer('max_val', torch.zeros(out_channels, 1, 1, 1))
self.register_buffer('first_w', torch.zeros(1))
def update_range(self, min_val, max_val):
temp_minval = self.min_val
temp_maxval = self.max_val
if self.first_w == 0:
self.first_w.add_(1)
self.min_val.add_(min_val)
self.max_val.add_(max_val)
else:
self.min_val.add_(-temp_minval).add_(torch.min(temp_minval, min_val))
self.max_val.add_(-temp_maxval).add_(torch.max(temp_maxval, max_val))
class AveragedRangeTracker(RangeTracker): # A,min_max_shape=(1, 1, 1, 1),layer级,取running_min_max —— (N, C, W, H)
def __init__(self, q_level, momentum=0.1):
super().__init__(q_level)
self.momentum = momentum
self.register_buffer('min_val', torch.zeros(1))
self.register_buffer('max_val', torch.zeros(1))
self.register_buffer('first_a', torch.zeros(1))
def update_range(self, min_val, max_val):
if self.first_a == 0:
self.first_a.add_(1)
self.min_val.add_(min_val)
self.max_val.add_(max_val)
else:
self.min_val.mul_(1 - self.momentum).add_(min_val * self.momentum)
self.max_val.mul_(1 - self.momentum).add_(max_val * self.momentum)
# ********************* quantizers(量化器,量化) *********************
class Round(Function):
@staticmethod
def forward(self, input):
output = torch.round(input)
return output
@staticmethod
def backward(self, grad_output):
grad_input = grad_output.clone()
return grad_input
class Quantizer(nn.Module):
def __init__(self, bits, range_tracker):
super().__init__()
self.bits = bits
self.range_tracker = range_tracker
self.register_buffer('scale', torch.zeros_like(self.range_tracker.min_val)) # 量化比例因子
self.register_buffer('zero_point',torch.zeros_like(self.range_tracker.min_val)) # 量化零点
def update_params(self):
raise NotImplementedError
# 量化
def quantize(self, input):
output = input * self.scale
self.zero_point
return output
def round(self, input):
output = Round.apply(input)
return output
# 截断
def clamp(self, input):
output = torch.clamp(input, self.min_val, self.max_val)
return output
# 反量化
def dequantize(self, input):
output = (input + self.zero_point) / self.scale
return output
def forward(self, input):
if self.bits == 32:
output = input
elif self.bits == 1:
print('!Binary quantization is not supported !')
assert self.bits != 1
else:
self.range_tracker(input)
self.update_params()
output = self.quantize(input) # 量化
output = self.round(output)
output = self.clamp(output) # 截断
output = self.dequantize(output)# 反量化
return output
class SignedQuantizer(Quantizer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_buffer('min_val', torch.tensor(-(1 << (self.bits - 1))))
self.register_buffer('max_val', torch.tensor((1 << (self.bits - 1)) - 1))
class UnsignedQuantizer(Quantizer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_buffer('min_val', torch.tensor(0))
self.register_buffer('max_val', torch.tensor((1 << self.bits) - 1))
# 对称量化
class SymmetricQuantizer(SignedQuantizer):
def update_params(self):
quantized_range = torch.min(torch.abs(self.min_val), torch.abs(self.max_val)) # 量化后范围
float_range = torch.max(torch.abs(self.range_tracker.min_val), torch.abs(self.range_tracker.max_val)) # 量化前范围
self.scale = quantized_range / float_range # 量化比例因子
self.zero_point = torch.zeros_like(self.scale) # 量化零点
# 非对称量化
class AsymmetricQuantizer(UnsignedQuantizer):
def update_params(self):
quantized_range = self.max_val - self.min_val # 量化后范围
float_range = self.range_tracker.max_val - self.range_tracker.min_val # 量化前范围
self.scale = quantized_range / float_range # 量化比例因子
self.zero_point = torch.round(self.range_tracker.min_val * self.scale) # 量化零点
# ********************* 量化卷积(同时量化A/W,并做卷积) *********************
class Conv2d_Q(nn.Conv2d):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
a_bits=8,
w_bits=8,
q_type=0,
first_layer=0,
):
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias
)
# 实例化量化器(A-layer级,W-channel级)
if q_type == 0:#对称量化 量化零点为0
self.activation_quantizer = SymmetricQuantizer(bits=a_bits, range_tracker=AveragedRangeTracker(q_level='L'))
self.weight_quantizer = SymmetricQuantizer(bits=w_bits, range_tracker=GlobalRangeTracker(q_level='C', out_channels=out_channels))
else:
self.activation_quantizer = AsymmetricQuantizer(bits=a_bits, range_tracker=AveragedRangeTracker(q_level='L'))
self.weight_quantizer = AsymmetricQuantizer(bits=w_bits, range_tracker=GlobalRangeTracker(q_level='C', out_channels=out_channels))
self.first_layer = first_layer
def forward(self, input):
# 量化A和W
if not self.first_layer:
input = self.activation_quantizer(input)
q_input = input
q_weight = self.weight_quantizer(self.weight)
# 量化卷积
output = F.conv2d(
input=q_input,
weight=q_weight,
bias=self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
return output
def reshape_to_activation(input):
return input.reshape(1, -1, 1, 1)
def reshape_to_weight(input):
return input.reshape(-1, 1, 1, 1)
def reshape_to_bias(input):
return input.reshape(-1)
# ********************* bn融合_量化卷积(bn融合后,同时量化A/W,并做卷积) *********************
class BNFold_Conv2d_Q(Conv2d_Q):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False, #BN_fold这一版默认没有bias
eps=1e-5,
momentum=0.01, # 考虑量化带来的抖动影响,对momentum进行调整(0.1 ——> 0.01),削弱batch统计参数占比,一定程度抑制抖动。经实验量化训练效果更好,acc提升1%左右
a_bits=8,
w_bits=8,
q_type=0,
first_layer=0,
):
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias
)
self.eps = eps
self.momentum = momentum
self.gamma = Parameter(torch.Tensor(out_channels))
self.beta = Parameter(torch.Tensor(out_channels))
self.register_buffer('running_mean', torch.zeros(out_channels))
self.register_buffer('running_var', torch.ones(out_channels))
self.register_buffer('first_bn', torch.zeros(1))
init.uniform_(self.gamma)
init.zeros_(self.beta)
# 实例化量化器(A-layer级,W-channel级)
if q_type == 0:
self.activation_quantizer = SymmetricQuantizer(bits=a_bits, range_tracker=AveragedRangeTracker(q_level='L'))
self.weight_quantizer = SymmetricQuantizer(bits=w_bits, range_tracker=GlobalRangeTracker(q_level='C', out_channels=out_channels))
else:
self.activation_quantizer = AsymmetricQuantizer(bits=a_bits, range_tracker=AveragedRangeTracker(q_level='L'))
self.weight_quantizer = AsymmetricQuantizer(bits=w_bits, range_tracker=GlobalRangeTracker(q_level='C', out_channels=out_channels))
self.first_layer = first_layer
def forward(self, input):
# 训练态
if self.training:
# 先做普通卷积得到A,以取得BN参数
output = F.conv2d(
input=input,
weight=self.weight,
bias=self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
# 更新BN统计参数(batch和running)
dims = [dim for dim in range(4) if dim != 1]
batch_mean = torch.mean(output, dim=dims)
batch_var = torch.var(output, dim=dims)
with torch.no_grad():
if self.first_bn == 0:
self.first_bn.add_(1)
self.running_mean.add_(batch_mean)
self.running_var.add_(batch_var)
else:
self.running_mean.mul_(1 - self.momentum).add_(batch_mean * self.momentum)
self.running_var.mul_(1 - self.momentum).add_(batch_var * self.momentum)
# BN融合
if self.bias is not None:
bias = reshape_to_bias(self.beta + (self.bias - batch_mean) * (self.gamma / torch.sqrt(batch_var + self.eps)))
else:
bias = reshape_to_bias(self.beta - batch_mean * (self.gamma / torch.sqrt(batch_var + self.eps)))# b融batch
weight = self.weight * reshape_to_weight(self.gamma / torch.sqrt(self.running_var + self.eps)) # w融running
# 测试态
else:
#print(self.running_mean, self.running_var)
# BN融合
if self.bias is not None:
bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (self.gamma / torch.sqrt(self.running_var + self.eps)))
else:
bias = reshape_to_bias(self.beta - self.running_mean * (self.gamma / torch.sqrt(self.running_var + self.eps))) # b融running
weight = self.weight * reshape_to_weight(self.gamma / torch.sqrt(self.running_var + self.eps)) # w融running
# 量化A和bn融合后的W
if not self.first_layer:
input = self.activation_quantizer(input)
q_input = input
q_weight = self.weight_quantizer(weight)
# 量化卷积
if self.training: # 训练态
output = F.conv2d(
input=q_input,
weight=q_weight,
bias=self.bias, # 注意,这里不加bias(self.bias为None)
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
# (这里将训练态下,卷积中w融合running参数的效果转为融合batch参数的效果)running ——> batch
output *= reshape_to_activation(torch.sqrt(self.running_var + self.eps) / torch.sqrt(batch_var + self.eps))
output += reshape_to_activation(bias)
else: # 测试态
output = F.conv2d(
input=q_input,
weight=q_weight,
bias=bias, # 注意,这里加bias,做完整的conv+bn
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups
)
return output
#量化文件夹下的那种写法避免偏置不为0 D:\yanxue\project\量化
# 网上的策略:activations:range取决与input,为了估计range,只用了exponential moving averages(EMA)。同时在训练的开始阶段,range变化非常的快,此时完全关停activations的quantization,等到稳定之后再此开启。
|
[
"1067280907@qq.com"
] |
1067280907@qq.com
|
b3c7ff7640bc10a3dad3d39b85ab90664ade33f3
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/elus.py
|
0c2e2273602092d391eb81d1516dac677e2ff7bc
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619
| 2015-09-23T11:54:06
| 2015-09-23T11:54:06
| 42,749,205
| 2
| 3
| null | 2015-09-23T11:54:07
| 2015-09-18T22:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 221
|
py
|
ii = [('MarrFDI.py', 1), ('AubePRP2.py', 1), ('ChalTPW2.py', 1), ('WilkJMC2.py', 1), ('MarrFDI2.py', 1), ('LyelCPG.py', 1), ('WestJIT2.py', 1), ('WheeJPT.py', 1), ('MereHHB3.py', 1), ('MereHHB.py', 1), ('MereHHB2.py', 1)]
|
[
"prabhjyotsingh95@gmail.com"
] |
prabhjyotsingh95@gmail.com
|
15f52cc761a1c7297e3a8a9b8ce9720b374d5920
|
ad9aa194cee160080a3a7047c5dd1585b2b2eb7e
|
/lieu_de_travail/admin.py
|
d7f55e6c15c0911af4df9df924f66887d67e7a72
|
[
"Apache-2.0"
] |
permissive
|
ghassen3699/Site_web_Projet
|
ba2236cd69841f6733a79d86a4f9beac27350618
|
20eca8ded72f4e798862dd5440000afe04892092
|
refs/heads/main
| 2023-08-04T22:33:48.427216
| 2021-09-20T16:50:31
| 2021-09-20T16:50:31
| 389,125,501
| 0
| 0
|
Apache-2.0
| 2021-07-26T14:04:40
| 2021-07-24T14:58:49
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 224
|
py
|
from django.contrib import admin
from . import models
admin.site.register(models.Province)
admin.site.register(models.Region)
admin.site.register(models.Commissariat_De_Police)
admin.site.register(models.Lieu_De_Travail)
|
[
"ghassenkhammessi123@icloud.com"
] |
ghassenkhammessi123@icloud.com
|
595e41579da4f5deb8fcfcbc59c1d14cbd2cae6b
|
4d332c45578246847ef2cdcdeb827ca29ab06090
|
/modules/Bio/Align/Applications/_TCoffee.py
|
d56019d21977b00c9d0b6949d80e39ddf4a7e309
|
[
"MIT"
] |
permissive
|
prateekgupta3991/justforlearn
|
616cc297a2a6119fa959b9337a5e91c77a11ebf7
|
3984c64063b356cf89003e17a914272983b6cf48
|
refs/heads/master
| 2021-03-12T22:09:12.184638
| 2014-01-28T10:37:07
| 2014-01-28T10:37:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 54
|
py
|
/usr/share/pyshared/Bio/Align/Applications/_TCoffee.py
|
[
"prateekgupta.3991@gmail.com"
] |
prateekgupta.3991@gmail.com
|
c53b377de5894d28d1e81d8b4451d37e07002ee0
|
589bec013b65761769a9d3916b22c5d1f4d7cda3
|
/main.py
|
6183dbc149e865bbaadf7b6f83bcbdbb5d8409d1
|
[] |
no_license
|
JejeDurden/TSP-Solver
|
c4330cf07f37e70135288930f5cf77893db17764
|
655d32d654093c35b2e22ebac89e51e4e0dd62d4
|
refs/heads/master
| 2021-05-14T23:51:26.693841
| 2017-09-26T11:59:26
| 2017-09-26T11:59:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,505
|
py
|
#!/usr/bin/python3
import sys
import pandas as pd
from parser import parse_args
from algos import kmeans, nearest_neighbor, two_opt, three_opt
def get_csv():
cities_graph = pd.read_csv("cities.csv", header=None)
return cities_graph
def formatting(l):
result = []
for i in range(len(l)):
result.append(l[i].name)
return result
def get_result(K, cities_graph):
result = []
if K > 1:
for i in range(0, K):
l = []
for index, city in cities_graph.iterrows():
if city["cluster"] == i:
l.append(city)
result.append(formatting(two_opt(three_opt(nearest_neighbor(l)))))
else:
l = []
for index, city in cities_graph.iterrows():
l.append(city)
result.append(formatting(two_opt(three_opt(nearest_neighbor(l)))))
return result
def file_write(result):
f = open("kopt.txt","w")
for i in range(len(result)):
l = len(result[i])
for nb in result[i]:
f.write(str(nb) + "")
l -= 1
if l > 0:
f.write(", ")
f.write("\n")
f.close()
def main(arg):
parse_args(arg)
K = int(arg[0])
cities_graph = get_csv()
cities_graph.columns = ['name', 'x', 'y']
if K > 1:
cities_graph = kmeans(cities_graph, K)
cities_graph["marked"] = False
result = get_result(K, cities_graph)
file_write(result)
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"jdesmare@e6r1p4.vp.42.fr"
] |
jdesmare@e6r1p4.vp.42.fr
|
55fa06e49a994bde4419719cc785bed9c09bc0c4
|
e1ec1f5bb8829080cc8cc8a0bce880cca9528176
|
/ssd/train.py
|
2cfad7596e06c17aac074f2a168d9a900827ba22
|
[] |
no_license
|
danielhr444/SSD_EpiSci
|
26a3bfb22ed38131f02affdc2968546e7354c0de
|
cfef2037e8832ca958ebf009c5dcb39730234cc7
|
refs/heads/master
| 2020-06-24T08:42:08.303757
| 2019-07-26T00:45:44
| 2019-07-26T00:45:44
| 198,918,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,375
|
py
|
import time
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from model import SSD300, MultiBoxLoss
from datasets import PascalVOCDataset
from utils import *
# Data parameters: change as needed
data_folder = './' # folder with data files
keep_difficult = True # use objects considered difficult to detect?
# Model parameters
# Not too many here since the SSD300 has a very specific structure
n_classes = len(label_map) # number of different types of objects
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Learning parameters
checkpoint = None # path to model checkpoint, None if none
batch_size = 8 # batch size
start_epoch = 0 # start at this epoch
epochs = 200 # number of epochs to run without early-stopping
epochs_since_improvement = 0 # number of epochs since there was an improvement in the validation metric
best_loss = 100. # assume a high loss at first
workers = 4 # number of workers for loading data in the DataLoader
print_freq = 200 # print training or validation status every __ batches
lr = 1e-3 # learning rate
momentum = 0.9 # momentum
weight_decay = 5e-4 # weight decay
grad_clip = None # clip if gradients are exploding, which may happen at larger batch sizes (sometimes at 32) - you will recognize it by a sorting error in the MuliBox loss calculation
cudnn.benchmark = True
def main():
"""
Training and validation.
"""
global epochs_since_improvement, start_epoch, label_map, best_loss, epoch, checkpoint
# Initialize model or load checkpoint
if checkpoint is None:
model = SSD300(n_classes=n_classes)
# Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo
biases = list()
not_biases = list()
for param_name, param in model.named_parameters():
if param.requires_grad:
if param_name.endswith('.bias'):
biases.append(param)
else:
not_biases.append(param)
optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}],
lr=lr, momentum=momentum, weight_decay=weight_decay)
else:
checkpoint = torch.load(checkpoint)
start_epoch = checkpoint['epoch'] + 1
epochs_since_improvement = checkpoint['epochs_since_improvement']
best_loss = checkpoint['best_loss']
print('\nLoaded checkpoint from epoch %d. Best loss so far is %.3f.\n' % (start_epoch, best_loss))
model = checkpoint['model']
optimizer = checkpoint['optimizer']
# Move to default device
model = model.to(device)
criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device)
# Custom dataloaders
train_dataset = PascalVOCDataset(data_folder,
split='train',
keep_difficult=keep_difficult)
val_dataset = PascalVOCDataset(data_folder,
split='test',
keep_difficult=keep_difficult)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
collate_fn=train_dataset.collate_fn, num_workers=workers,
pin_memory=True) # note that we're passing the collate function here
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=True,
collate_fn=val_dataset.collate_fn, num_workers=workers,
pin_memory=True)
# Epochs
for epoch in range(start_epoch, epochs):
# Paper describes decaying the learning rate at the 80000th, 100000th, 120000th 'iteration', i.e. model update or batch
# The paper uses a batch size of 32, which means there were about 517 iterations in an epoch
# Therefore, to find the epochs to decay at, you could do,
# if epoch in {80000 // 517, 100000 // 517, 120000 // 517}:
# adjust_learning_rate(optimizer, 0.1)
# In practice, I just decayed the learning rate when loss stopped improving for long periods,
# and I would resume from the last best checkpoint with the new learning rate,
# since there's no point in resuming at the most recent and significantly worse checkpoint.
# So, when you're ready to decay the learning rate, just set checkpoint = 'BEST_checkpoint_ssd300.pth.tar' above
# and have adjust_learning_rate(optimizer, 0.1) BEFORE this 'for' loop
# One epoch's training
train(train_loader=train_loader,
model=model,
criterion=criterion,
optimizer=optimizer,
epoch=epoch)
# One epoch's validation
val_loss = validate(val_loader=val_loader,
model=model,
criterion=criterion)
# Did validation loss improve?
is_best = val_loss < best_loss
best_loss = min(val_loss, best_loss)
if not is_best:
epochs_since_improvement += 1
print("\nEpochs since last improvement: %d\n" % (epochs_since_improvement,))
else:
epochs_since_improvement = 0
# Save checkpoint
save_checkpoint(epoch, epochs_since_improvement, model, optimizer, val_loss, best_loss, is_best)
def train(train_loader, model, criterion, optimizer, epoch):
"""
One epoch's training.
:param train_loader: DataLoader for training data
:param model: model
:param criterion: MultiBox loss
:param optimizer: optimizer
:param epoch: epoch number
"""
model.train() # training mode enables dropout
batch_time = AverageMeter() # forward prop. + back prop. time
data_time = AverageMeter() # data loading time
losses = AverageMeter() # loss
start = time.time()
# Batches
for i, (images, boxes, labels, _) in enumerate(train_loader):
data_time.update(time.time() - start)
# Move to default device
images = images.to(device) # (batch_size (N), 3, 300, 300)
boxes = [b.to(device) for b in boxes]
labels = [l.to(device) for l in labels]
# Forward prop.
predicted_locs, predicted_scores = model(images) # (N, 8732, 4), (N, 8732, n_classes)
# Loss
loss = criterion(predicted_locs, predicted_scores, boxes, labels) # scalar
# Backward prop.
optimizer.zero_grad()
loss.backward()
# Clip gradients, if necessary. Used to prevent gradient explosion
if grad_clip is not None:
clip_gradient(optimizer, grad_clip)
# Update model
optimizer.step()
losses.update(loss.item(), images.size(0))
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_loader),
batch_time=batch_time,
data_time=data_time, loss=losses))
del predicted_locs, predicted_scores, images, boxes, labels # free some memory since their histories may be stored
def validate(val_loader, model, criterion):
"""
One epoch's validation.
:param val_loader: DataLoader for validation data
:param model: model
:param criterion: MultiBox loss
:return: average validation loss
"""
model.eval() # eval mode disables dropout
batch_time = AverageMeter()
losses = AverageMeter()
start = time.time()
# Prohibit gradient computation explicity because I had some problems with memory
with torch.no_grad():
# Batches
for i, (images, boxes, labels, difficulties) in enumerate(val_loader):
# Move to default device
images = images.to(device) # (N, 3, 300, 300)
boxes = [b.to(device) for b in boxes]
labels = [l.to(device) for l in labels]
# Forward prop.
predicted_locs, predicted_scores = model(images) # (N, 8732, 4), (N, 8732, n_classes)
# Loss
loss = criterion(predicted_locs, predicted_scores, boxes, labels)
losses.update(loss.item(), images.size(0))
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i % print_freq == 0:
print('[{0}/{1}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(i, len(val_loader),
batch_time=batch_time,
loss=losses))
print('\n * LOSS - {loss.avg:.3f}\n'.format(loss=losses))
return losses.avg
if __name__ == '__main__':
main()
|
[
"user@gmail.com"
] |
user@gmail.com
|
fd3b78ffdbb57d84e5a47701783137b83a0f5135
|
ae5964d4a9ce49ef5a3f0847ff17ff7bc09a115e
|
/convert_mass_clubs.py
|
f9b3818a36c2acd88ff98dee299853b1bfe39fed
|
[] |
no_license
|
JT-Green/j-t1000
|
b711e696cde303b5f60cd78d418f069a3b7053b5
|
aaecae907709d4be527f49e2577c48a8f9d2bc13
|
refs/heads/master
| 2021-07-13T11:41:57.045713
| 2017-10-18T21:06:41
| 2017-10-18T21:06:41
| 107,462,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,008
|
py
|
import pandas as pd
import numpy as np
FILEPATH = r'G:\Public\National Accounts\WeeklyBestsellerImports\Temp\MassClub.xlsx'
SAVEPATH = r'G:\Public\National Accounts\WeeklyBestsellerImports\~MassClub.xlsx'
INITIAL_COLUMNS = ['\n\nAgency', '\nMaster \nChain Code',
'\nMaster \nChain Name','\n\nChain Code', '\n\nChain Name',
'Saturday \nWeek Ending\nDate','\n\nEAN', '\n\nTitle',
'\nMktg\nCode', '\nProg\nCode','\nBusiness\nChannel',
'\nVendor\nCode', '\nVendor\nName','\nPOS \nUnits',
'\nPOS \nConsumer $', 'Chain \nOn Hand\nUnits**']
DESIRED_COLUMNS = ['\n\nChain Code', '\nPOS \nUnits',
'Chain \nOn Hand\nUnits**']
CLUBS_CODES = ['BJ','CW','SA']
MASS_CODES = ['AQ','BR','DH','GA','GE','HB','JJ','KO','KZ','MJ','NA','RA','SE','SF','SY','TR','TX',"WC","WF",'WM']
def mass_club_conv(chain_code):
if chain_code in CLUBS_CODES:
return "Clubs"
if chain_code in MASS_CODES:
return "Mass"
initial_df = pd.DataFrame(pd.read_excel(FILEPATH,skip_footer=3, index_col=6,
converters={'\n\nChain Code': mass_club_conv}))
initial_df.index.names = ['EAN']
initial_df.index = initial_df.index.map(str)
middle_df = initial_df[DESIRED_COLUMNS]
clubs_df = middle_df[middle_df['\n\nChain Code'] == 'Clubs'].drop('\n\nChain Code',axis=1).groupby('EAN').sum()
clubs_df.columns = ['ClubsSold', 'ClubsOH']
mass_df = middle_df[middle_df['\n\nChain Code'] == 'Mass'].drop('\n\nChain Code',axis=1).groupby('EAN').sum()
mass_df.columns = ['MassSold', 'MassOH']
output_df = pd.DataFrame()
output_df['EAN'] = initial_df.index.values
output_df = output_df.set_index('EAN')
output_df = output_df[~output_df.index.duplicated(keep='first')]
output_df = output_df.join(clubs_df,how='outer').join(mass_df,how='outer')
output_df['ClubsYTD'] = np.NaN
output_df['MassYTD'] = np.NaN
writer = pd.ExcelWriter(SAVEPATH)
output_df.to_excel(writer)
writer.save()
|
[
"31543546+JT-Green@users.noreply.github.com"
] |
31543546+JT-Green@users.noreply.github.com
|
18d9688db1dbb88f9df1a33d3aae010adc4ebbf1
|
564d6a4d305a8ac6a7e01c761831fb2081c02d0f
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/aio/operations/_express_route_cross_connection_peerings_operations.py
|
8fbdc1d2ff97d2d24bb4eadb4d0eb5d99fa451a1
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
paultaiton/azure-sdk-for-python
|
69af4d889bac8012b38f5b7e8108707be679b472
|
d435a1a25fd6097454b7fdfbbdefd53e05029160
|
refs/heads/master
| 2023-01-30T16:15:10.647335
| 2020-11-14T01:09:50
| 2020-11-14T01:09:50
| 283,343,691
| 0
| 0
|
MIT
| 2020-07-28T22:43:43
| 2020-07-28T22:43:43
| null |
UTF-8
|
Python
| false
| false
| 21,108
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCrossConnectionPeeringsOperations:
"""ExpressRouteCrossConnectionPeeringsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
cross_connection_name: str,
**kwargs
) -> AsyncIterable["models.ExpressRouteCrossConnectionPeeringList"]:
"""Gets all peerings in a specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCrossConnectionPeeringList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_04_01.models.ExpressRouteCrossConnectionPeeringList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCrossConnectionPeeringList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionPeeringList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified peering from the ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def get(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
**kwargs
) -> "models.ExpressRouteCrossConnectionPeering":
"""Gets the specified peering for the ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCrossConnectionPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_04_01.models.ExpressRouteCrossConnectionPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCrossConnectionPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
peering_parameters: "models.ExpressRouteCrossConnectionPeering",
**kwargs
) -> "models.ExpressRouteCrossConnectionPeering":
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCrossConnectionPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCrossConnectionPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
peering_parameters: "models.ExpressRouteCrossConnectionPeering",
**kwargs
) -> AsyncLROPoller["models.ExpressRouteCrossConnectionPeering"]:
"""Creates or updates a peering in the specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update
ExpressRouteCrossConnection peering operation.
:type peering_parameters: ~azure.mgmt.network.v2018_04_01.models.ExpressRouteCrossConnectionPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCrossConnectionPeering or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_04_01.models.ExpressRouteCrossConnectionPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCrossConnectionPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
|
[
"noreply@github.com"
] |
paultaiton.noreply@github.com
|
9a8cc9ae26df01035725f242bf5a9bc7582b02a9
|
8c46543db2ac0645fe3a90e69b9e8e13c3e9d68d
|
/test/functional/rpcnamedargs.py
|
b3a0c04741bada2996a08022bfeeef95f1093ebe
|
[
"MIT"
] |
permissive
|
LISY-Network/LISY-Network
|
4552cc21e201238969633b4d3552cea4a645c189
|
ba0a8ebc8ab79437aca9372a192a687398da5314
|
refs/heads/master
| 2023-01-14T14:12:21.328616
| 2020-10-10T04:33:15
| 2020-10-10T04:33:15
| 302,812,964
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,235
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Copyright (c) 2017 The LISYNetwork Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test using named arguments for RPCs."""
from test_framework.test_framework import LISYNetworkTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class NamedArgumentTest(LISYNetworkTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
node = self.nodes[0]
h = node.help(command='getinfo')
assert(h.startswith('getinfo\n'))
assert_raises_jsonrpc(-8, 'Unknown named parameter', node.help, random='getinfo')
h = node.getblockhash(height=0)
node.getblock(blockhash=h)
assert_equal(node.echo(), [])
assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9])
assert_equal(node.echo(arg1=1), [None, 1])
assert_equal(node.echo(arg9=None), [None]*10)
assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9])
if __name__ == '__main__':
NamedArgumentTest().main()
|
[
"lisy_network@163.com"
] |
lisy_network@163.com
|
8f71d5aa302c85c4b833a7caaa7b03ebb0f3aa3d
|
dbf65a728e020d84e730978574ee5bd9cf1af5e5
|
/products/forms.py
|
86f8ef7d82c3ce256e4d284c6cc74ba44e966888
|
[] |
no_license
|
GladkihAnton/Parfum-site
|
b65f35c5ec1ec9516491f0571a3d5e7600ef0de5
|
ecee2ff8b25bfcdd43bf76e46bf0388a5e988f4f
|
refs/heads/master
| 2023-02-16T13:21:27.169826
| 2020-07-19T13:41:38
| 2020-07-19T13:41:38
| 326,384,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 115
|
py
|
import csv
from django.forms import Form, forms
class ImportCsv(Form):
file = forms.FileField(required=True)
|
[
"Antoha_1997@mail.ru"
] |
Antoha_1997@mail.ru
|
ecfa02b7fd389f8564b7bcc8227ac2c2335c8780
|
2930c21d382f182cdf4c6fbea16a16a7d95bf6c4
|
/src/train.py
|
25fa4c62a25029dd24f76d60fb9e6179370f0eb0
|
[] |
no_license
|
mylee16/food-classifier-docker
|
4e6a9b8d760ad854622e4fd06b5b6291f582cef0
|
4be4b1c2b6f0f4a6bdf2ce57cdf85cdee4070f23
|
refs/heads/master
| 2023-02-25T17:49:25.053625
| 2021-01-22T14:53:18
| 2021-01-22T14:53:18
| 330,350,555
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,158
|
py
|
import os
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard, EarlyStopping, ModelCheckpoint
from tensorflow.keras.optimizers import Adam
from polyaxon_client.tracking import get_outputs_path
class FoodClassifier:
def __init__(self, data_path):
self.batchsize = 16
self.train_dir = os.path.join(data_path, 'train')
self.test_dir = os.path.join(data_path, 'test')
self.train_datagen = None
self.test_datagen = None
self.train_generator = None
self.validation_generator = None
self.test_generator = None
self.model = None
tf.random.set_seed(16)
def load_data(self):
# make sure preprocessing is same as preprocessing as the network
# reduce mean, and divide by a value to do scaling
""" Split the data into train, validation, and test"""
self.train_datagen = ImageDataGenerator(
rescale=1./ 255,
shear_range=0.05,
rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range=[0.9, 1.1], # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
brightness_range=[0.8, 1.2],
fill_mode='reflect',
validation_split=0.2)
self.test_datagen = ImageDataGenerator(rescale=1. / 255)
self.train_generator = self.train_datagen.flow_from_directory(
self.train_dir,
target_size=(224, 224),
shuffle=True,
batch_size=self.batchsize,
class_mode='categorical',
subset="training")
self.validation_generator = self.train_datagen.flow_from_directory(
self.train_dir,
target_size=(224, 224),
shuffle=True,
batch_size=self.batchsize,
class_mode='categorical',
subset="validation")
self.test_generator = self.test_datagen.flow_from_directory(
self.test_dir,
target_size=(224, 224),
shuffle=False,
batch_size=1,
class_mode='categorical')
def create_model(self, model_base='MobileNetV2', base_model_trainable=False, dense_activation=128):
"""Create the neural net with pretrained weights"""
if model_base == 'MobileNetV2':
model_base = tf.keras.applications.MobileNetV2(weights='imagenet', include_top=False,
input_shape=(224, 224, 3))
elif model_base == 'EfficientNetB0':
model_base = tf.keras.applications.EfficientNetB0(weights='imagenet', include_top=False,
input_shape=(224, 224, 3))
model_base.trainable = base_model_trainable
x = model_base.output
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(dense_activation, activation='relu')(x)
x = tf.keras.layers.Dropout(0.25)(x)
predictions = tf.keras.layers.Dense(12, activation='softmax')(x)
self.model = tf.keras.Model(inputs=model_base.input, outputs=predictions)
def train_model(self):
"""Training the model"""
# callbacks
learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy', patience=1, verbose=1,
factor=0.6, min_lr=0.00001)
checkpointer = ModelCheckpoint('checkpoint.h5', monitor='val_loss', verbose=1, save_best_only=True)
early_stopper = EarlyStopping(monitor='val_loss', min_delta=0, patience=3, verbose=1, mode='auto')
optimizer = Adam(learning_rate=0.01)
self.model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = self.model.fit(self.train_generator,
epochs=20,
shuffle=True,
verbose=1,
validation_data=self.validation_generator,
callbacks=[learning_rate_reduction, checkpointer, early_stopper])
return history
def evaluate_model(self):
"""Evaluating the model"""
model_loss, model_accuracy = self.model.evaluate(self.test_generator)
return model_loss, model_accuracy
def save_model(self, directory):
"""Saving the model"""
os.makedirs(directory)
h5_directory = get_outputs_path() + '/tensorfood.h5'
# json_directory = os.path.join(directory, 'tensorfood.json')
# save .h5
self.model.save(h5_directory)
# save .json
# model_json = self.model.to_json()
# with open(json_directory, "w") as json_file:
# json_file.write(model_json)
|
[
"meng_yong_lee@aiap.sg"
] |
meng_yong_lee@aiap.sg
|
5a12fa462f8d806510b790eeb904d7f5934cdfed
|
afa2ebb439e6592caf42c507a789833b9fbf44b2
|
/pipeline/0x01-apis/3-upcoming.py
|
ccb1825e88e1888b19b9f97a17cc6215828acf01
|
[] |
no_license
|
anaruzz/holbertonschool-machine_learning
|
64c66a0f1d489434dd0946193747ed296760e6c8
|
91300120d38acb6440a6dbb8c408b1193c07de88
|
refs/heads/master
| 2023-07-30T20:09:30.416167
| 2021-09-23T16:22:40
| 2021-09-23T16:22:40
| 279,293,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,206
|
py
|
#!/usr/bin/env python3
"""
A script that prints:
the Name of the launch
The date (in local time)
The rocket name
The name (with the locality) of the launchpad
"""
import requests
if __name__ == "__main__":
"""
returns: the Name of the launch
The date (in local time)
The rocket name
The name (with the locality) of the launchpad
"""
url = "https://api.spacexdata.com/v4/"
req1 = requests.get(url + "launches/upcoming")
data = req1.json()
data.sort(key=lambda json: json['date_unix'])
data = data[0]
v_name = data["name"]
v_localtime = data["date_local"]
req2 = requests.get(url + "rockets/" + data["rocket"])
rock_data = req2.json()
v_rock_name = rock_data['name']
req3 = requests.get(url + "launchpads/" + data["launchpad"])
launch_data = req3.json()
v_launch_name = launch_data['name']
v_lauch_local = launch_data['locality']
print("{} ({}) {} - {} ({})".format(v_name,
v_localtime,
v_rock_name,
v_launch_name,
v_lauch_local))
|
[
"laabidigh@gmail.com"
] |
laabidigh@gmail.com
|
b62204921be03f06b20e13e3d0332571e475e7e3
|
81bad22641705683c68ff89f19362ba202891652
|
/examples/inherit_viewer_style.py
|
c4aca965d0bbad1436bc65de90221fa3f5d813fc
|
[
"BSD-3-Clause"
] |
permissive
|
sofroniewn/napari
|
ee2a39a1a1132910db6f2a47994671e8138edb51
|
beaa98efe5cf04ba659086e7a514b2ade05277af
|
refs/heads/main
| 2023-07-12T02:46:41.185932
| 2022-09-14T21:57:15
| 2022-09-14T21:57:15
| 154,751,137
| 2
| 3
|
BSD-3-Clause
| 2023-07-01T10:26:45
| 2018-10-25T23:43:01
|
Python
|
UTF-8
|
Python
| false
| false
| 2,156
|
py
|
"""
Method to get napari style in magicgui based windows
====================================================
Example how to embed magicgui widget in dialog to inherit style
from main napari window.
"""
from typing import Callable
from qtpy.QtWidgets import QDialog, QWidget, QVBoxLayout, QPushButton, QGridLayout, QLabel, QSpinBox
from magicgui import magicgui
import napari
from napari.qt import get_stylesheet
from napari.settings import get_settings
# The magicgui widget shown by selecting the 'Show widget' button of MyWidget
@magicgui
def sample_add(a: int, b: int) -> int:
return a + b
def change_style():
sample_add.native.setStyleSheet(get_stylesheet(get_settings().appearance.theme))
get_settings().appearance.events.theme.connect(change_style)
change_style()
class MyDialog(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.first_input = QSpinBox()
self.second_input = QSpinBox()
self.btn = QPushButton('Add')
layout = QGridLayout()
layout.addWidget(QLabel("first input"), 0, 0)
layout.addWidget(self.first_input, 0, 1)
layout.addWidget(QLabel("second input"), 1, 0)
layout.addWidget(self.second_input, 1, 1)
layout.addWidget(self.btn, 2, 0, 1, 2)
self.setLayout(layout)
self.btn.clicked.connect(self.run)
def run(self):
print('run', self.first_input.value() + self.second_input.value())
self.close()
class MyWidget(QWidget):
def __init__(self):
super().__init__()
self.btn1 = QPushButton('Show dialog')
self.btn1.clicked.connect(self.show_dialog)
self.btn2 = QPushButton('Show widget')
self.btn2.clicked.connect(self.show_widget)
self.layout = QVBoxLayout()
self.layout.addWidget(self.btn1)
self.layout.addWidget(self.btn2)
self.setLayout(self.layout)
def show_dialog(self):
dialog = MyDialog(self)
dialog.exec_()
def show_widget(self):
sample_add.show()
viewer = napari.Viewer()
widget = MyWidget()
viewer.window.add_dock_widget(widget, area='right')
napari.run()
|
[
"noreply@github.com"
] |
sofroniewn.noreply@github.com
|
9877facc22b668b02cd961572f0f5c76acb4b1c6
|
7dce2f4754775f4f1bcebbddd5508d062f8a6a90
|
/_AceVision_testcodes/A3239_backside_검출/backsideBarcode.py
|
8c794a636ca88a382e6d38c6cb9177a7cb7416bb
|
[
"MIT"
] |
permissive
|
lyj911111/OpenCV_Project
|
67d6bb35c90b7a8d40c20c4de3715b49d882ade7
|
9acbfbf666188b6ebb7f2ec4500bb3ab3d2994b9
|
refs/heads/master
| 2022-12-07T19:10:01.193459
| 2020-09-17T12:48:13
| 2020-09-17T12:48:13
| 161,764,211
| 0
| 0
|
MIT
| 2022-11-22T03:31:47
| 2018-12-14T09:45:43
|
Python
|
UTF-8
|
Python
| false
| false
| 7,306
|
py
|
'''
Resized된 화면에서 원하는 부분의 ROI 잡으면,
원본영상에서 확대되어 출력.
"i" key : 화면 정지, ROI 지정모드
마우스 좌클릭-드레그 : ROI 지정
'''
import cv2
import numpy as np
import glob
import re
import pyzbar.pyzbar as pyzbar
col, width, row, height = -1, -1, -1, -1
frame = None
frame2 = None
inputmode = False
rectangle = False
trackWindow = None
roi_hist = None
# 키보드 'i' 키를 누를때, 화면을 멈추고 마우스 클릭 모드 활성화
def onMouse(event, x, y, flags, param):
global col, width, row, height, frame, frame2, inputmode, img
global rectangle, roi_hist, trackWindow
if inputmode:
# 왼쪽 마우스 클릭시 rectangle 플레그 활성화,
if event == cv2.EVENT_LBUTTONDOWN:
rectangle = True # 마우스가 움직일때 이벤트를 발생시키기 위해
col, row = x, y # 왼쪽마우스 클릭시 좌표를 기억.
print("왼쪽마우스 클릭 위치", x, y)
# 마우스를 움직일 때 발생 이벤트
elif event == cv2.EVENT_MOUSEMOVE:
if rectangle:
# 멈춘 화면에서 진행.
frame = frame2.copy()
cv2.rectangle(frame, (col, row), (x, y), (0, 255, 0), 2)
cv2.imshow('frame', frame)
elif event == cv2.EVENT_LBUTTONUP:
print("좌표", (col, row), (x, y))
inputmode = False
rectangle = False
cv2.rectangle(frame, (col, row), (x, y), (0, 255, 0), 2)
height, width = abs(row - y), abs(col - x)
trackWindow = (col, row, width, height)
# 선택영역 확대
displayRate(img, col, row, x, y)
# roi_hist = cv2.calcHist([roi], [0], None, [180], [0, 180])
# cv2.normalize(roi_hist, roi_hist, 0, 255 , cv2.NORM_MINMAX)
return
def decode(im):
# Find barcodes and QR codes
decodedObjects = pyzbar.decode(im)
print(decodedObjects)
# Print results
for obj in decodedObjects:
print('Type : ', obj.type)
print('Data : ', obj.data, '\n')
return decodedObjects
# Display barcode and QR code location
def display(im, decodedObjects):
# Loop over all decoded objects
for decodedObject in decodedObjects:
points = decodedObject.polygon
# If the points do not form a quad, find convex hull
if len(points) > 4:
hull = cv2.convexHull(np.array([point for point in points], dtype=np.float32))
hull = list(map(tuple, np.squeeze(hull)))
else:
hull = points
# Number of points in the convex hull
n = len(hull)
# Draw the convext hull
for j in range(0, n):
cv2.line(im, hull[j], hull[(j + 1) % n], (255, 0, 0), 3)
# 각도 회전시켜 수평으로 맞춤.
im = rotate_bound(im, -90)
# Display results
cv2.imshow("Results", im)
cv2.waitKey(0)
cv2.destroyWindow('Results') # 화면 파괴
'''
함수) Resized된 ROI 구간을 원본영상에서 확대
화면 비율 원본 - 4024 : 3036
축소 비율 - 1260 : 960
(계산 x축 => 1260:4024 = 1:x)
(계산 y축 => 960:3036 = 1:y)
화면 배율 x = 3.1936, y = 3.1625
param
ori_img : 원본 영상
x1, y1 : Resized 된 영상속에서 ROI 지정. (시작지점)
x2, y2 : Resized 된 영상속에서 ROI 지정. (끝지점)
'''
def displayRate(ori_img, x1, y1, x2, y2):
x1 = int(x1 * 3.1936)
x2 = int(x2 * 3.1936)
y1 = int(y1 * 3.1625)
y2 = int(y2 * 3.1625)
# ROI 영역 원본영상에서 확대
roi = ori_img[y1:y2, x1:x2]
# 바코드가 선명해지도록 Contrast(대조) 적용
roi = img_Contrast(roi)
roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
decodedObjects = decode(roi)
display(roi, decodedObjects)
# print(st_pt)
# print(end_pt)
'''
함수) 이미지와 각도를 입력하면, 회전된 이미지를 리턴.
'''
def rotate_bound(image, angle):
# grab the dimensions of the image and then determine the
# center
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH))
'''
함수) 이미지를 더 선명하게 Contrast(대조) 기법을 적용시킴.
param : 컬러 이미지
return : 대조된 이미지
'''
def img_Contrast(img):
# -----Converting image to LAB Color model-----------------------------------
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
# -----Splitting the LAB image to different channels-------------------------
l, a, b = cv2.split(lab)
# -----Applying CLAHE to L-channel-------------------------------------------
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
cl = clahe.apply(l)
# -----Merge the CLAHE enhanced L-channel with the a and b channel-----------
limg = cv2.merge((cl, a, b))
# -----Converting image from LAB Color model to RGB model--------------------
final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
return final
def main():
global frame2, frame, inputmode, trackWindow, roi_hist, img
# img = cv2.imread("./img/backside.bmp") # 연속 이미지 취득 (비디오 프레임 가정) - 정상제품
img = cv2.imread("./img/backside_no.bmp") # 연속 이미지 취득 (비디오 프레임 가정) - 불량품
# 해상도 지정.
resolution = (1260, 960)
# 기본 프레임.
cv2.namedWindow('frame')
cv2.setMouseCallback('frame', onMouse, param=(frame, frame2)) # 'frame' 이라는 화면에 마우스 콜백 함수가 뒤에서 실행
while True:
#img = cv2.imread("./img/backside.bmp") # 연속 이미지 취득 (비디오 프레임 가정) - 정상제품
img = cv2.imread("./img/backside_no.bmp") # 연속 이미지 취득 (비디오 프레임 가정) - 불량품
frame = img.copy()
frame = cv2.resize(frame, resolution)
# print(frame.shape)
print('continue')
cv2.imshow('frame', frame)
k = cv2.waitKey(1)
if k == 27: # ESC 종료
break
# i 키를 누를때 input Mode 활성화하고 화면을 멈춤. (바코드 리딩할 ROI 지정)
if k == ord('i'):
print('Select Area for Camshift and Enter a Key')
inputmode = True
frame2 = frame.copy()
while inputmode:
cv2.imshow('frame', frame)
cv2.waitKey(0)
if __name__ == "__main__":
main()
|
[
"lyj911111@naver.com"
] |
lyj911111@naver.com
|
8f33981389eeed072b029272dddb155b4a643d03
|
957f52df04cb786fd1b53b4be14ab5dfb6a0684d
|
/af_pan_mixer_test.py
|
71ebd7b3a7d5ca8edab15ba0ac9d1454e2b73160
|
[
"BSD-3-Clause"
] |
permissive
|
hvrauhal/panfiltermixer
|
34b857f02e1ed3b62d040161f639c512a380176c
|
47745e075f19c0afe19c633180fdafcad7c01244
|
refs/heads/master
| 2021-01-16T00:27:46.854864
| 2010-11-27T12:46:09
| 2010-11-27T12:46:09
| 1,034,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,541
|
py
|
import unittest
from af_pan_mixer import AfPanGenerator
class AfPanTests(unittest.TestCase):
def test_split_single(self):
self.assertEquals([[1]], AfPanGenerator.group_mixer_to_Is_and_Os("1",1))
def test_split_two_inputs(self):
self.assertEquals([[1,2]], AfPanGenerator.group_mixer_to_Is_and_Os("1:2",2))
def test_split_two_inputs_two_outputs(self):
self.assertEquals([[1,2],[3,4]], AfPanGenerator.group_mixer_to_Is_and_Os("1:2:3:4",2))
def test_split_three_inputs(self):
self.assertEquals([[1,2,3]], AfPanGenerator.group_mixer_to_Is_and_Os("1:2:3",3))
def test_split_three_inputs_two_outputs(self):
self.assertEquals([[1,2,3],[4,5,6]], AfPanGenerator.group_mixer_to_Is_and_Os("1:2:3:4:5:6",3))
def test_mixerstring_to_scales_and_cmdline(self):
class MockMixerString():
def get(self):
return "0.1:1.1:2.1:3.1:4.1:5.1"
class MS:
def __init__(self, value):
self.value = value
def __repr__(self):
return str(self.value)
def set(self, value):
self.value = value
afPan = AfPanGenerator(None)
afPan.channelScales=[[MS(0),MS(1),MS(2)],[MS(3),MS(4),MS(5)]]
afPan.mixerString = MockMixerString()
afPan.output_channels = 3
afPan.mixerstring_to_scales_and_cmdline()
self.assertEquals("[[0.1, 1.1, 2.1], [3.1, 4.1, 5.1]]", str(afPan.channelScales))
if __name__ == '__main__':
unittest.main()
|
[
"hvrauhal@gmail.com"
] |
hvrauhal@gmail.com
|
21ef510d70850e9e5388e69c15325c14cb3d9c0c
|
7db77c9e3bbd079e49535c526b4e47fe538c4e73
|
/python/inlPlot.py
|
b7cdf30d7f031629586a74f2c6896022869fdc98
|
[
"BSD-3-Clause"
] |
permissive
|
fermi-lat/calibGenCAL
|
d451a242c793b6ddb10cb1d523205da6a0b0279b
|
9b207d7ba56031f5ecd7aab544e68a6dedc7d776
|
refs/heads/master
| 2022-02-25T16:05:43.694826
| 2019-08-27T17:29:03
| 2019-08-27T17:29:03
| 103,186,940
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,663
|
py
|
"""
Create plots of Cal intNonlin curves
inlXML2TXT <input_xml_file> <output_root_file
where:
<input_xml_file> = input intNonlin GLAST Cal offline calibration file
<output_root_file> = output ROOT file with plots
"""
__facility__ = "Offline"
__abstract__ = "Create plots of Cal intNonlin curves"
__author__ = "Z. Fewtrell"
__date__ = "$Date: 2008/04/21 14:36:57 $"
__version__ = "$Revision: 1.1 $, $Author: fewtrell $"
__release__ = "$Name: $"
__credits__ = "NRL code 7650"
import getopt
import sys
import calCalibXML
import ROOT
import cgc_util
import array
import numarray
import calConstant
if __name__ == '__main__':
# check commandline
try:
(opts,args) = getopt.getopt(sys.argv[1:], "")
except getopt.GetoptError:
log.error(__doc__)
sys.exit(1)
if len(args) != 2:
# should just be the one input file.
print __doc__
sys.exit(1)
# retrieve commandline parms
inName = args[0]
outName = args[1]
# open and read XML IntNonlin file
xmlFile = calCalibXML.calIntNonlinCalibXML(inName)
(lenData, dacData, adcData) = xmlFile.read()
towers = xmlFile.getTowers()
xmlFile.close()
# create output file
rootFile = ROOT.TFile(outName, "RECREATE")
# create summary plots for each ADC range
summaryHists = {}
for rng in range(calConstant.NUM_RNG):
plot_name = "inl_summary_" + calConstant.CRNG[rng]
summaryHists[rng] = ROOT.TH2S(plot_name,
plot_name,
4096,0,4096,
4096,0,4096)
for twr in towers:
print "inlPLot.py processing TEM# " + str(twr)
for offline_lyr in range(8):
# calCalibXML uses 'row' indexing, not layer
online_row = calCalibXML.layerToRow(offline_lyr)
for col in range(12):
for offline_face in range(2):
online_face = calConstant.offline_face_to_online[offline_face]
for rng in range(4):
nPts = lenData[rng][twr,online_row,online_face, col,0]
# hack: some xml files have splines w/ single point of (0,0) since dtd does not allow for missing data
if nPts <= 1:
continue
adcs = array.array('d',adcData[rng][twr,online_row,online_face, col])
dacs = array.array('d',dacData[rng][twr,online_row,online_face, col])
# plot spline method
channel_str = "T%dL%dC%dF%dR%d"%(twr,offline_lyr,col,offline_face,rng)
spline = ROOT.TSpline3(channel_str, dacs, adcs, nPts)
c = ROOT.TCanvas(channel_str, channel_str,-1)
spline.Draw("C")
g = ROOT.TGraph(nPts,
dacs,
adcs)
g.Fit("pol1","Q")
g.Draw("*")
# save plot to file
c.Write()
# write points to summary histogram
summaryHists[rng].FillN(len(adcs),
dacs,
adcs,
array.array('d',[1]*len(adcs)))
for rng in range(calConstant.NUM_RNG):
summaryHists[rng].Write()
rootFile.Close()
|
[
""
] | |
7e7b234c64a77fe9d295516ec00fe25fe309b80e
|
8b3d892bc381bb6bac78955b3d3418550fe31f98
|
/methods/image_dissimilarity/models/dissimilarity_model.py
|
4d85008da095e43a227d22dd4f198e4a9c2debfe
|
[] |
no_license
|
zhuyifan1993/road-anomaly-benchmark
|
843f1285617f268bc30895dc9e3b1843f3a135d1
|
669dee269863401637db767e940c12a7f52715e0
|
refs/heads/master
| 2023-08-29T03:54:45.026856
| 2021-10-18T19:17:23
| 2021-10-18T19:17:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 56,419
|
py
|
import torch.nn as nn
import torch
import torchvision.models
import sys
# sys.path.append("..")
# from image_dissimilarity.models.semantic_encoder import SemanticEncoder, ResNetSemanticEncoder
# from image_dissimilarity.models.vgg_features import VGGFeatures, VGGSPADE
# from image_dissimilarity.models.resnet_features import resnet
# from image_dissimilarity.models.normalization import SPADE, FILM, GuideCorrelation, GuideNormalization
#
from .semantic_encoder import SemanticEncoder
from .vgg_features import VGGFeatures, VGGSPADE
from .normalization import SPADE
class DissimNet(nn.Module):
def __init__(self, architecture='vgg16', semantic=True, pretrained=True, correlation = True, prior = False, spade='',
num_semantic_classes = 19):
super(DissimNet, self).__init__()
#get initialization parameters
self.correlation = correlation
self.spade = spade
self.semantic = semantic
# generate encoders
if self.spade == 'encoder' or self.spade == 'both':
self.vgg_encoder = VGGSPADE(pretrained=pretrained, label_nc=num_semantic_classes)
else:
self.vgg_encoder = VGGFeatures(architecture=architecture, pretrained=pretrained)
if self.semantic:
self.semantic_encoder = SemanticEncoder(architecture=architecture, in_channels=num_semantic_classes)
# layers for decoder
# all the 3x3 convolutions
if correlation:
self.conv1 = nn.Sequential(nn.Conv2d(513, 256, kernel_size=3, padding=1), nn.SELU())
self.conv12 = nn.Sequential(nn.Conv2d(513, 256, kernel_size=3, padding=1), nn.SELU())
self.conv3 = nn.Sequential(nn.Conv2d(385, 128, kernel_size=3, padding=1), nn.SELU())
self.conv5 = nn.Sequential(nn.Conv2d(193, 64, kernel_size=3, padding=1), nn.SELU())
else:
self.conv1 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.SELU())
self.conv12 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.SELU())
self.conv3 = nn.Sequential(nn.Conv2d(384, 128, kernel_size=3, padding=1), nn.SELU())
self.conv5 = nn.Sequential(nn.Conv2d(192, 64, kernel_size=3, padding=1), nn.SELU())
if self.spade == 'decoder' or self.spade == 'both':
self.conv2 = SPADEDecoderLayer(nc=256, label_nc=num_semantic_classes)
self.conv13 = SPADEDecoderLayer(nc=256, label_nc=num_semantic_classes)
self.conv4 = SPADEDecoderLayer(nc=128, label_nc=num_semantic_classes)
self.conv6 = SPADEDecoderLayer(nc=64, label_nc=num_semantic_classes)
else:
self.conv2 = nn.Sequential(nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.SELU())
self.conv13 = nn.Sequential(nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.SELU())
self.conv4 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.SELU())
self.conv6 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.SELU())
# all the tranposed convolutions
self.tconv1 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, padding=0)
self.tconv3 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, padding=0)
self.tconv2 = nn.ConvTranspose2d(128, 128, kernel_size=2, stride=2, padding=0)
# all the other 1x1 convolutions
if self.semantic:
self.conv7 = nn.Conv2d(1280, 512, kernel_size=1, padding=0)
self.conv8 = nn.Conv2d(640, 256, kernel_size=1, padding=0)
self.conv9 = nn.Conv2d(320, 128, kernel_size=1, padding=0)
self.conv10 = nn.Conv2d(160, 64, kernel_size=1, padding=0)
self.conv11 = nn.Conv2d(64, 2, kernel_size=1, padding=0)
else:
self.conv7 = nn.Conv2d(1024, 512, kernel_size=1, padding=0)
self.conv8 = nn.Conv2d(512, 256, kernel_size=1, padding=0)
self.conv9 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
self.conv10 = nn.Conv2d(128, 64, kernel_size=1, padding=0)
self.conv11 = nn.Conv2d(64, 2, kernel_size=1, padding=0)
#self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def forward(self, original_img, synthesis_img, semantic_img, softmax_out=False):
# get all the image encodings
if self.spade == 'encoder' or self.spade == 'both':
encoding_og = self.vgg_encoder(original_img, semantic_img)
encoding_syn = self.vgg_encoder(synthesis_img, semantic_img)
else:
encoding_og = self.vgg_encoder(original_img)
encoding_syn = self.vgg_encoder(synthesis_img)
if self.semantic:
encoding_sem = self.semantic_encoder(semantic_img)
# concatenate the output of each encoder
layer1_cat = torch.cat((encoding_og[0], encoding_syn[0], encoding_sem[0]), dim=1)
layer2_cat = torch.cat((encoding_og[1], encoding_syn[1], encoding_sem[1]), dim=1)
layer3_cat = torch.cat((encoding_og[2], encoding_syn[2], encoding_sem[2]), dim=1)
layer4_cat = torch.cat((encoding_og[3], encoding_syn[3], encoding_sem[3]), dim=1)
else:
layer1_cat = torch.cat((encoding_og[0], encoding_syn[0]), dim=1)
layer2_cat = torch.cat((encoding_og[1], encoding_syn[1]), dim=1)
layer3_cat = torch.cat((encoding_og[2], encoding_syn[2]), dim=1)
layer4_cat = torch.cat((encoding_og[3], encoding_syn[3]), dim=1)
# use 1x1 convolutions to reduce dimensions of concatenations
layer4_cat = self.conv7(layer4_cat)
layer3_cat = self.conv8(layer3_cat)
layer2_cat = self.conv9(layer2_cat)
layer1_cat = self.conv10(layer1_cat)
if self.correlation:
# get correlation for each layer (multiplication + 1x1 conv)
corr1 = torch.sum(torch.mul(encoding_og[0], encoding_syn[0]), dim=1).unsqueeze(dim=1)
corr2 = torch.sum(torch.mul(encoding_og[1], encoding_syn[1]), dim=1).unsqueeze(dim=1)
corr3 = torch.sum(torch.mul(encoding_og[2], encoding_syn[2]), dim=1).unsqueeze(dim=1)
corr4 = torch.sum(torch.mul(encoding_og[3], encoding_syn[3]), dim=1).unsqueeze(dim=1)
# concatenate correlation layers
layer4_cat = torch.cat((corr4, layer4_cat), dim = 1)
layer3_cat = torch.cat((corr3, layer3_cat), dim = 1)
layer2_cat = torch.cat((corr2, layer2_cat), dim = 1)
layer1_cat = torch.cat((corr1, layer1_cat), dim = 1)
# Run Decoder
x = self.conv1(layer4_cat)
if self.spade == 'decoder' or self.spade == 'both':
x = self.conv2(x, semantic_img)
else:
x = self.conv2(x)
x = self.tconv1(x)
x = torch.cat((x, layer3_cat), dim=1)
x = self.conv12(x)
if self.spade == 'decoder' or self.spade == 'both':
x = self.conv13(x, semantic_img)
else:
x = self.conv13(x)
x = self.tconv3(x)
x = torch.cat((x, layer2_cat), dim=1)
x = self.conv3(x)
if self.spade == 'decoder' or self.spade == 'both':
x = self.conv4(x, semantic_img)
else:
x = self.conv4(x)
x = self.tconv2(x)
x = torch.cat((x, layer1_cat), dim=1)
x = self.conv5(x)
if self.spade == 'decoder' or self.spade == 'both':
x = self.conv6(x, semantic_img)
else:
x = self.conv6(x)
logits = self.conv11(x)
return logits
class DissimNetPrior(nn.Module):
def __init__(self, architecture='vgg16', semantic=True, pretrained=True, correlation=True, prior=False, spade='',
num_semantic_classes=19):
super(DissimNetPrior, self).__init__()
# get initialization parameters
self.correlation = correlation
self.spade = spade
# self.semantic = False if spade else semantic
self.semantic = semantic
self.prior = prior
# generate encoders
if self.spade == 'encoder' or self.spade == 'both':
self.vgg_encoder = VGGSPADE(pretrained=pretrained, label_nc=num_semantic_classes)
else:
self.vgg_encoder = VGGFeatures(architecture=architecture, pretrained=pretrained)
if self.semantic:
self.semantic_encoder = SemanticEncoder(architecture=architecture, in_channels=num_semantic_classes)
self.prior_encoder = SemanticEncoder(architecture=architecture, in_channels=3, base_feature_size=64)
# layers for decoder
# all the 3x3 convolutions
if correlation:
self.conv1 = nn.Sequential(nn.Conv2d(513, 256, kernel_size=3, padding=1), nn.SELU())
self.conv12 = nn.Sequential(nn.Conv2d(513, 256, kernel_size=3, padding=1), nn.SELU())
self.conv3 = nn.Sequential(nn.Conv2d(385, 128, kernel_size=3, padding=1), nn.SELU())
self.conv5 = nn.Sequential(nn.Conv2d(193, 64, kernel_size=3, padding=1), nn.SELU())
else:
self.conv1 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.SELU())
self.conv12 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.SELU())
self.conv3 = nn.Sequential(nn.Conv2d(384, 128, kernel_size=3, padding=1), nn.SELU())
self.conv5 = nn.Sequential(nn.Conv2d(192, 64, kernel_size=3, padding=1), nn.SELU())
if self.spade == 'decoder' or self.spade == 'both':
self.conv2 = SPADEDecoderLayer(nc=256, label_nc=num_semantic_classes)
self.conv13 = SPADEDecoderLayer(nc=256, label_nc=num_semantic_classes)
self.conv4 = SPADEDecoderLayer(nc=128, label_nc=num_semantic_classes)
self.conv6 = SPADEDecoderLayer(nc=64, label_nc=num_semantic_classes)
else:
self.conv2 = nn.Sequential(nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.SELU())
self.conv13 = nn.Sequential(nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.SELU())
self.conv4 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.SELU())
self.conv6 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.SELU())
# all the tranposed convolutions
self.tconv1 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, padding=0)
self.tconv3 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, padding=0)
self.tconv2 = nn.ConvTranspose2d(128, 128, kernel_size=2, stride=2, padding=0)
# all the other 1x1 convolutions
if self.semantic:
self.conv7 = nn.Conv2d(1280, 512, kernel_size=1, padding=0)
self.conv8 = nn.Conv2d(640, 256, kernel_size=1, padding=0)
self.conv9 = nn.Conv2d(320, 128, kernel_size=1, padding=0)
self.conv10 = nn.Conv2d(160, 64, kernel_size=1, padding=0)
self.conv11 = nn.Conv2d(64, 2, kernel_size=1, padding=0)
else:
self.conv7 = nn.Conv2d(1024, 512, kernel_size=1, padding=0)
self.conv8 = nn.Conv2d(512, 256, kernel_size=1, padding=0)
self.conv9 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
self.conv10 = nn.Conv2d(128, 64, kernel_size=1, padding=0)
self.conv11 = nn.Conv2d(64, 2, kernel_size=1, padding=0)
# self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def forward(self, original_img, synthesis_img, semantic_img, entropy, mae, distance, softmax_out=False):
# get all the image encodings
prior_img = torch.cat((entropy, mae, distance), dim=1)
if self.spade == 'encoder' or self.spade == 'both':
encoding_og = self.vgg_encoder(original_img, semantic_img)
encoding_syn = self.vgg_encoder(synthesis_img, semantic_img)
else:
encoding_og = self.vgg_encoder(original_img)
encoding_syn = self.vgg_encoder(synthesis_img)
if self.semantic:
encoding_sem = self.semantic_encoder(semantic_img)
# concatenate the output of each encoder
layer1_cat = torch.cat((encoding_og[0], encoding_syn[0], encoding_sem[0]), dim=1)
layer2_cat = torch.cat((encoding_og[1], encoding_syn[1], encoding_sem[1]), dim=1)
layer3_cat = torch.cat((encoding_og[2], encoding_syn[2], encoding_sem[2]), dim=1)
layer4_cat = torch.cat((encoding_og[3], encoding_syn[3], encoding_sem[3]), dim=1)
else:
layer1_cat = torch.cat((encoding_og[0], encoding_syn[0]), dim=1)
layer2_cat = torch.cat((encoding_og[1], encoding_syn[1]), dim=1)
layer3_cat = torch.cat((encoding_og[2], encoding_syn[2]), dim=1)
layer4_cat = torch.cat((encoding_og[3], encoding_syn[3]), dim=1)
# use 1x1 convolutions to reduce dimensions of concatenations
layer4_cat = self.conv7(layer4_cat)
layer3_cat = self.conv8(layer3_cat)
layer2_cat = self.conv9(layer2_cat)
layer1_cat = self.conv10(layer1_cat)
if self.prior:
encoding_pior = self.prior_encoder(prior_img)
layer1_cat = torch.mul(layer1_cat, encoding_pior[0])
layer2_cat = torch.mul(layer2_cat, encoding_pior[1])
layer3_cat = torch.mul(layer3_cat, encoding_pior[2])
layer4_cat = torch.mul(layer4_cat, encoding_pior[3])
if self.correlation:
# get correlation for each layer (multiplication + 1x1 conv)
corr1 = torch.sum(torch.mul(encoding_og[0], encoding_syn[0]), dim=1).unsqueeze(dim=1)
corr2 = torch.sum(torch.mul(encoding_og[1], encoding_syn[1]), dim=1).unsqueeze(dim=1)
corr3 = torch.sum(torch.mul(encoding_og[2], encoding_syn[2]), dim=1).unsqueeze(dim=1)
corr4 = torch.sum(torch.mul(encoding_og[3], encoding_syn[3]), dim=1).unsqueeze(dim=1)
# concatenate correlation layers
layer4_cat = torch.cat((corr4, layer4_cat), dim=1)
layer3_cat = torch.cat((corr3, layer3_cat), dim=1)
layer2_cat = torch.cat((corr2, layer2_cat), dim=1)
layer1_cat = torch.cat((corr1, layer1_cat), dim=1)
# Run Decoder
x = self.conv1(layer4_cat)
if self.spade == 'decoder' or self.spade == 'both':
x = self.conv2(x, semantic_img)
else:
x = self.conv2(x)
x = self.tconv1(x)
x = torch.cat((x, layer3_cat), dim=1)
x = self.conv12(x)
if self.spade == 'decoder' or self.spade == 'both':
x = self.conv13(x, semantic_img)
else:
x = self.conv13(x)
x = self.tconv3(x)
x = torch.cat((x, layer2_cat), dim=1)
x = self.conv3(x)
if self.spade == 'decoder' or self.spade == 'both':
x = self.conv4(x, semantic_img)
else:
x = self.conv4(x)
x = self.tconv2(x)
x = torch.cat((x, layer1_cat), dim=1)
x = self.conv5(x)
if self.spade == 'decoder' or self.spade == 'both':
x = self.conv6(x, semantic_img)
else:
x = self.conv6(x)
logits = self.conv11(x)
return logits
# class ResNetDissimNet(nn.Module):
# def __init__(self, architecture='resnet18', semantic=True, pretrained=True, correlation=True, spade='',
# num_semantic_classes = 19):
# super(ResNetDissimNet, self).__init__()
#
# # get initialization parameters
# self.correlation = correlation
# self.spade = spade
# self.semantic = False if spade else semantic
#
# # generate encoders
# if self.spade == 'encoder' or self.spade == 'both':
# raise NotImplementedError()
# #self.encoder = VGGSPADE()
# else:
# self.encoder = resnet(architecture=architecture, pretrained=pretrained)
#
# if self.semantic:
# self.semantic_encoder = ResNetSemanticEncoder()
#
# # layers for decoder
# # all the 3x3 convolutions
# if correlation:
# self.conv1 = nn.Sequential(nn.Conv2d(513, 256, kernel_size=3, padding=1), nn.SELU())
# self.conv12 = nn.Sequential(nn.Conv2d(513, 256, kernel_size=3, padding=1), nn.SELU())
# self.conv3 = nn.Sequential(nn.Conv2d(385, 128, kernel_size=3, padding=1), nn.SELU())
# self.conv5 = nn.Sequential(nn.Conv2d(193, 64, kernel_size=3, padding=1), nn.SELU())
#
# else:
# self.conv1 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.SELU())
# self.conv12 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.SELU())
# self.conv3 = nn.Sequential(nn.Conv2d(384, 128, kernel_size=3, padding=1), nn.SELU())
# self.conv5 = nn.Sequential(nn.Conv2d(192, 64, kernel_size=3, padding=1), nn.SELU())
#
# if self.spade == 'decoder' or self.spade == 'both':
# self.conv2 = SPADEDecoderLayer(nc=256, label_nc=num_semantic_classes)
# self.conv13 = SPADEDecoderLayer(nc=256, label_nc=num_semantic_classes)
# self.conv4 = SPADEDecoderLayer(nc=128, label_nc=num_semantic_classes)
# self.conv6 = SPADEDecoderLayer(nc=64, label_nc=num_semantic_classes)
# else:
# self.conv2 = nn.Sequential(nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.SELU())
# self.conv13 = nn.Sequential(nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.SELU())
# self.conv4 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.SELU())
# self.conv6 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.SELU())
#
# # all the tranposed convolutions
# self.tconv1 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, padding=0)
# self.tconv5 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, padding=0)
# self.tconv2 = nn.ConvTranspose2d(128, 128, kernel_size=2, stride=2, padding=0)
# self.tconv3 = nn.ConvTranspose2d(64, 64, kernel_size=2, stride=2, padding=0)
# self.tconv4 = nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2, padding=0)
#
# # all the other 1x1 convolutions
# if self.semantic:
# self.conv7 = nn.Conv2d(1280, 512, kernel_size=1, padding=0)
# self.conv8 = nn.Conv2d(640, 256, kernel_size=1, padding=0)
# self.conv9 = nn.Conv2d(320, 128, kernel_size=1, padding=0)
# self.conv10 = nn.Conv2d(160, 64, kernel_size=1, padding=0)
# self.conv11 = nn.Conv2d(32, 2, kernel_size=1, padding=0)
# else:
# self.conv7 = nn.Conv2d(1024, 512, kernel_size=1, padding=0)
# self.conv8 = nn.Conv2d(512, 256, kernel_size=1, padding=0)
# self.conv9 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
# self.conv10 = nn.Conv2d(128, 64, kernel_size=1, padding=0)
# self.conv11 = nn.Conv2d(32, 2, kernel_size=1, padding=0)
#
# # self._initialize_weights()
#
# def _initialize_weights(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# if m.bias is not None:
# nn.init.constant_(m.bias, 0)
# elif isinstance(m, nn.BatchNorm2d):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
# elif isinstance(m, nn.Linear):
# nn.init.normal_(m.weight, 0, 0.01)
# nn.init.constant_(m.bias, 0)
#
# def forward(self, original_img, synthesis_img, semantic_img, softmax_out=False):
# # get all the image encodings
# if self.spade == 'encoder' or self.spade == 'both':
# self.encoding_og = self.encoder(original_img, semantic_img)
# self.encoding_syn = self.encoder(synthesis_img, semantic_img)
# else:
# self.encoding_og = self.encoder(original_img)
# self.encoding_syn = self.encoder(synthesis_img)
#
# if self.semantic:
# self.encoding_sem = self.semantic_encoder(semantic_img)
# # concatenate the output of each encoder
# layer1_cat = torch.cat((self.encoding_og[0], self.encoding_syn[0], self.encoding_sem[0]), dim=1)
# layer2_cat = torch.cat((self.encoding_og[1], self.encoding_syn[1], self.encoding_sem[1]), dim=1)
# layer3_cat = torch.cat((self.encoding_og[2], self.encoding_syn[2], self.encoding_sem[2]), dim=1)
# layer4_cat = torch.cat((self.encoding_og[3], self.encoding_syn[3], self.encoding_sem[3]), dim=1)
# else:
# layer1_cat = torch.cat((self.encoding_og[0], self.encoding_syn[0]), dim=1)
# layer2_cat = torch.cat((self.encoding_og[1], self.encoding_syn[1]), dim=1)
# layer3_cat = torch.cat((self.encoding_og[2], self.encoding_syn[2]), dim=1)
# layer4_cat = torch.cat((self.encoding_og[3], self.encoding_syn[3]), dim=1)
#
# # use 1x1 convolutions to reduce dimensions of concatenations
# layer4_cat = self.conv7(layer4_cat)
# layer3_cat = self.conv8(layer3_cat)
# layer2_cat = self.conv9(layer2_cat)
# layer1_cat = self.conv10(layer1_cat)
#
# if self.correlation:
# # get correlation for each layer (multiplication + 1x1 conv)
# corr1 = torch.sum(torch.mul(self.encoding_og[0], self.encoding_syn[0]), dim=1).unsqueeze(dim=1)
# corr2 = torch.sum(torch.mul(self.encoding_og[1], self.encoding_syn[1]), dim=1).unsqueeze(dim=1)
# corr3 = torch.sum(torch.mul(self.encoding_og[2], self.encoding_syn[2]), dim=1).unsqueeze(dim=1)
# corr4 = torch.sum(torch.mul(self.encoding_og[3], self.encoding_syn[3]), dim=1).unsqueeze(dim=1)
#
# # concatenate correlation layers
# layer4_cat = torch.cat((corr4, layer4_cat), dim=1)
# layer3_cat = torch.cat((corr3, layer3_cat), dim=1)
# layer2_cat = torch.cat((corr2, layer2_cat), dim=1)
# layer1_cat = torch.cat((corr1, layer1_cat), dim=1)
#
# # Run Decoder
# x = self.conv1(layer4_cat)
# if self.spade == 'decoder' or self.spade == 'both':
# x = self.conv2(x, semantic_img)
# else:
# x = self.conv2(x)
# x = self.tconv1(x)
#
# x = torch.cat((x, layer3_cat), dim=1)
# x = self.conv12(x)
# if self.spade == 'decoder' or self.spade == 'both':
# x = self.conv13(x, semantic_img)
# else:
# x = self.conv13(x)
# x = self.tconv5(x)
#
# x = torch.cat((x, layer2_cat), dim=1)
# x = self.conv3(x)
# if self.spade == 'decoder' or self.spade == 'both':
# x = self.conv4(x, semantic_img)
# else:
# x = self.conv4(x)
# x = self.tconv2(x)
#
# x = torch.cat((x, layer1_cat), dim=1)
# x = self.conv5(x)
# if self.spade == 'decoder' or self.spade == 'both':
# x = self.conv6(x, semantic_img)
# else:
# x = self.conv6(x)
# x = self.tconv3(x)
# x = self.tconv4(x)
#
# x = self.conv11(x)
#
# self.final_prediction = x
#
# return self.final_prediction
#
# class GuidedDissimNet(nn.Module):
# def __init__(self, architecture='vgg16', semantic=True, pretrained=True, correlation = True, spade=True,
# num_semantic_classes = 19):
# super(GuidedDissimNet, self).__init__()
#
# vgg_pretrained_features = torchvision.models.vgg16_bn(pretrained=pretrained).features
#
# # Encoder
# self.norm_layer_1 = FILM(nc=64, guide_nc=64)
# self.norm_layer_2 = FILM(nc=64, guide_nc=64)
# self.norm_layer_3 = FILM(nc=128, guide_nc=128)
# self.norm_layer_4 = FILM(nc=128, guide_nc=128)
# self.norm_layer_5 = FILM(nc=256, guide_nc=256)
# self.norm_layer_6 = FILM(nc=256, guide_nc=256)
# self.norm_layer_7 = FILM(nc=256, guide_nc=256)
# self.norm_layer_8 = FILM(nc=512, guide_nc=512)
# self.norm_layer_9 = FILM(nc=512, guide_nc=512)
# self.norm_layer_10 = FILM(nc=512, guide_nc=512)
# self.norm_layer_11 = FILM(nc=64, guide_nc=64)
# self.norm_layer_12 = FILM(nc=64, guide_nc=64)
# self.norm_layer_13 = FILM(nc=128, guide_nc=128)
# self.norm_layer_14 = FILM(nc=128, guide_nc=128)
# self.norm_layer_15 = FILM(nc=256, guide_nc=256)
# self.norm_layer_16 = FILM(nc=256, guide_nc=256)
# self.norm_layer_17 = FILM(nc=256, guide_nc=256)
# self.norm_layer_18 = FILM(nc=512, guide_nc=512)
# self.norm_layer_19 = FILM(nc=512, guide_nc=512)
# self.norm_layer_20 = FILM(nc=512, guide_nc=512)
#
# # TODO Reformat to make it more efficient/clean code
# self.slice1 = nn.Sequential()
# self.slice2 = nn.Sequential()
# self.slice3 = nn.Sequential()
# self.slice4 = nn.Sequential()
# self.slice5 = nn.Sequential()
# self.slice6 = nn.Sequential()
# self.slice7 = nn.Sequential()
# self.slice8 = nn.Sequential()
# self.slice9 = nn.Sequential()
# self.slice10 = nn.Sequential()
# self.slice11 = nn.Sequential()
# self.slice12 = nn.Sequential()
# self.slice13 = nn.Sequential()
# self.slice14 = nn.Sequential()
# self.slice15 = nn.Sequential()
# self.slice16 = nn.Sequential()
# self.slice17 = nn.Sequential()
# self.slice18 = nn.Sequential()
# self.slice19 = nn.Sequential()
# self.slice20 = nn.Sequential()
# self.slice21 = nn.Sequential()
# self.slice22 = nn.Sequential()
# self.slice23 = nn.Sequential()
# self.slice24 = nn.Sequential()
# self.slice25 = nn.Sequential()
# self.slice26 = nn.Sequential()
# self.slice27 = nn.Sequential()
# self.slice28 = nn.Sequential()
#
# for x in range(1):
# self.slice1.add_module(str(x), vgg_pretrained_features[x])
# self.slice15.add_module(str(x), vgg_pretrained_features[x])
# for x in range(2, 4):
# self.slice2.add_module(str(x), vgg_pretrained_features[x])
# self.slice16.add_module(str(x), vgg_pretrained_features[x])
# for x in range(5, 6):
# self.slice3.add_module(str(x), vgg_pretrained_features[x])
# self.slice17.add_module(str(x), vgg_pretrained_features[x])
# for x in range(6, 8):
# self.slice4.add_module(str(x), vgg_pretrained_features[x])
# self.slice18.add_module(str(x), vgg_pretrained_features[x])
# for x in range(9, 11):
# self.slice5.add_module(str(x), vgg_pretrained_features[x])
# self.slice19.add_module(str(x), vgg_pretrained_features[x])
# for x in range(12, 13):
# self.slice6.add_module(str(x), vgg_pretrained_features[x])
# self.slice20.add_module(str(x), vgg_pretrained_features[x])
# for x in range(13, 15):
# self.slice7.add_module(str(x), vgg_pretrained_features[x])
# self.slice21.add_module(str(x), vgg_pretrained_features[x])
# for x in range(16, 18):
# self.slice8.add_module(str(x), vgg_pretrained_features[x])
# self.slice22.add_module(str(x), vgg_pretrained_features[x])
# for x in range(19, 21):
# self.slice9.add_module(str(x), vgg_pretrained_features[x])
# self.slice23.add_module(str(x), vgg_pretrained_features[x])
# for x in range(22, 23):
# self.slice10.add_module(str(x), vgg_pretrained_features[x])
# self.slice24.add_module(str(x), vgg_pretrained_features[x])
# for x in range(23, 25):
# self.slice11.add_module(str(x), vgg_pretrained_features[x])
# self.slice25.add_module(str(x), vgg_pretrained_features[x])
# for x in range(26, 28):
# self.slice12.add_module(str(x), vgg_pretrained_features[x])
# self.slice26.add_module(str(x), vgg_pretrained_features[x])
# for x in range(29, 31):
# self.slice13.add_module(str(x), vgg_pretrained_features[x])
# self.slice27.add_module(str(x), vgg_pretrained_features[x])
# for x in range(32, 33):
# self.slice14.add_module(str(x), vgg_pretrained_features[x])
# self.slice28.add_module(str(x), vgg_pretrained_features[x])
#
# # layers for decoder
# # all the 3x3 convolutions
# self.conv1 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.SELU())
# self.conv12 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.SELU())
# self.conv3 = nn.Sequential(nn.Conv2d(384, 128, kernel_size=3, padding=1), nn.SELU())
# self.conv5 = nn.Sequential(nn.Conv2d(192, 64, kernel_size=3, padding=1), nn.SELU())
#
# # spade decoder
# self.conv2 = SPADEDecoderLayer(nc=256, label_nc=num_semantic_classes)
# self.conv13 = SPADEDecoderLayer(nc=256, label_nc=num_semantic_classes)
# self.conv4 = SPADEDecoderLayer(nc=128, label_nc=num_semantic_classes)
# self.conv6 = SPADEDecoderLayer(nc=64, label_nc=num_semantic_classes)
#
# # all the tranposed convolutions
# self.tconv1 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, padding=0)
# self.tconv2 = nn.ConvTranspose2d(128, 128, kernel_size=2, stride=2, padding=0)
#
# # all the other 1x1 convolutions
# self.conv7 = nn.Conv2d(1024, 512, kernel_size=1, padding=0)
# self.conv8 = nn.Conv2d(512, 256, kernel_size=1, padding=0)
# self.conv9 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
# self.conv10 = nn.Conv2d(128, 64, kernel_size=1, padding=0)
# self.conv11 = nn.Conv2d(64, 2, kernel_size=1, padding=0)
#
# # self._initialize_weights()
#
# def _initialize_weights(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# if m.bias is not None:
# nn.init.constant_(m.bias, 0)
# elif isinstance(m, nn.BatchNorm2d):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
# elif isinstance(m, nn.Linear):
# nn.init.normal_(m.weight, 0, 0.01)
# nn.init.constant_(m.bias, 0)
#
# def forward(self, original_img, synthesis_img, semantic_img):
# # get all the image encodings
# og_1 = self.slice1(original_img)
# syn_1 = self.slice15(synthesis_img)
#
# og_2 = self.norm_layer_1(og_1, syn_1)
# syn_2 = self.norm_layer_11(og_1, syn_1)
#
# og_1 = self.slice2(og_2)
# syn_1 = self.slice16(syn_2)
#
# layer1_og = self.slice3(self.norm_layer_2(og_1, syn_1))
# layer1_syn = self.slice17(self.norm_layer_12(og_1, syn_1))
#
# og_1 = self.slice4(layer1_og)
# syn_1 = self.slice18(layer1_syn)
#
# og_2 = self.norm_layer_3(og_1, syn_1)
# syn_2 = self.norm_layer_13(og_1, syn_1)
#
# og_1 = self.slice5(og_2)
# syn_1 = self.slice19(syn_2)
#
# layer2_og = self.slice6(self.norm_layer_4(og_1, syn_1))
# layer2_syn = self.slice20(self.norm_layer_14(og_1, syn_1))
#
# og_1 = self.slice7(layer2_og)
# syn_1 = self.slice21(layer2_syn)
#
# og_2 = self.norm_layer_5(og_1, syn_1)
# syn_2 = self.norm_layer_15(og_1, syn_1)
#
# og_1 = self.slice8(og_2)
# syn_1 = self.slice22(syn_2)
#
# og_2 = self.norm_layer_6(og_1, syn_1)
# syn_2 = self.norm_layer_16(og_1, syn_1)
#
# og_1 = self.slice9(og_2)
# syn_1 = self.slice23(syn_2)
#
# layer3_og = self.slice10(self.norm_layer_7(og_1, syn_1))
# layer3_syn = self.slice24(self.norm_layer_17(og_1, syn_1))
#
# og_1 = self.slice11(layer3_og)
# syn_1 = self.slice25(layer3_syn)
#
# og_2 = self.norm_layer_8(og_1, syn_1)
# syn_2 = self.norm_layer_18(og_1, syn_1)
#
# og_1 = self.slice12(og_2)
# syn_1 = self.slice26(syn_2)
#
# og_2 = self.norm_layer_9(og_1, syn_1)
# syn_2 = self.norm_layer_19(og_1, syn_1)
#
# og_1 = self.slice13(og_2)
# syn_1 = self.slice27(syn_2)
#
# layer4_og = self.slice14(self.norm_layer_10(og_1, syn_1))
# layer4_syn = self.slice28(self.norm_layer_20(og_1, syn_1))
#
# # concatenate the output of each encoder
# layer1_cat = torch.cat((layer1_og, layer1_syn), dim=1)
# layer2_cat = torch.cat((layer2_og, layer2_syn), dim=1)
# layer3_cat = torch.cat((layer3_og, layer3_syn), dim=1)
# layer4_cat = torch.cat((layer4_og, layer4_syn), dim=1)
#
# # use 1x1 convolutions to reduce dimensions of concatenations
# layer4_cat = self.conv7(layer4_cat)
# layer3_cat = self.conv8(layer3_cat)
# layer2_cat = self.conv9(layer2_cat)
# layer1_cat = self.conv10(layer1_cat)
#
# # Run Decoder
# x = self.conv1(layer4_cat)
# x = self.conv2(x, semantic_img)
# x = self.tconv1(x)
#
# x = torch.cat((x, layer3_cat), dim=1)
# x = self.conv12(x)
# x = self.conv13(x, semantic_img)
# x = self.tconv1(x)
#
# x = torch.cat((x, layer2_cat), dim=1)
# x = self.conv3(x)
# x = self.conv4(x, semantic_img)
# x = self.tconv2(x)
#
# x = torch.cat((x, layer1_cat), dim=1)
# x = self.conv5(x)
# x = self.conv6(x, semantic_img)
# x = self.conv11(x)
#
# self.final_prediction = x
#
# return self.final_prediction
#
# class CorrelatedDissimNet(nn.Module):
# def __init__(self, architecture='vgg16', semantic=True, pretrained=True, correlation=True, spade=True,
# num_semantic_classes = 19):
# super(CorrelatedDissimNet, self).__init__()
#
# self.spade = spade
#
# # layers for encoder
# self.og_gel1 = GuideEncoderLayer(nc_in=3, nc_out=64)
# self.syn_gel1 = GuideEncoderLayer(nc_in=3, nc_out=64)
#
# self.og_conv1 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
# self.syn_conv1 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
# self.og_gc1 = GuideCorrelation(nc=64, guide_nc=64)
# self.og_gc2 = GuideCorrelation(nc=64, guide_nc=num_semantic_classes)
# self.og_gn1 = GuideNormalization(nc=64)
# self.og_relu1 = nn.ReLU(inplace=True)
# self.syn_gc1 = GuideCorrelation(nc=64, guide_nc=64)
# self.syn_gc2 = GuideCorrelation(nc=64, guide_nc=num_semantic_classes)
# self.syn_gn1 = GuideNormalization(nc=64)
# self.syn_relu1 = nn.ReLU(inplace=True)
# self.og_max1 = nn.MaxPool2d(kernel_size=2, stride=2)
# self.syn_max1 = nn.MaxPool2d(kernel_size=2, stride=2)
#
# self.og_gel2 = GuideEncoderLayer(nc_in=64, nc_out=128)
# self.syn_gel2 = GuideEncoderLayer(nc_in=64, nc_out=128)
#
# self.og_conv2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
# self.syn_conv2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
# self.og_gc3 = GuideCorrelation(nc=128, guide_nc=128)
# self.og_gc4 = GuideCorrelation(nc=128, guide_nc=num_semantic_classes)
# self.og_gn2 = GuideNormalization(nc=128)
# self.og_relu2 = nn.ReLU(inplace=True)
# self.syn_gc3 = GuideCorrelation(nc=128, guide_nc=128)
# self.syn_gc4 = GuideCorrelation(nc=128, guide_nc=num_semantic_classes)
# self.syn_gn2 = GuideNormalization(nc=128)
# self.syn_relu2 = nn.ReLU(inplace=True)
# self.og_max2 = nn.MaxPool2d(kernel_size=2, stride=2)
# self.syn_max2 = nn.MaxPool2d(kernel_size=2, stride=2)
#
# self.og_gel3 = GuideEncoderLayer(nc_in=128, nc_out=256)
# self.syn_gel3 = GuideEncoderLayer(nc_in=128, nc_out=256)
# self.og_gel4 = GuideEncoderLayer(nc_in=256, nc_out=256)
# self.syn_gel4 = GuideEncoderLayer(nc_in=256, nc_out=256)
#
# self.og_conv3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
# self.syn_conv3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
# self.og_gc5 = GuideCorrelation(nc=256, guide_nc=256)
# self.og_gc6 = GuideCorrelation(nc=256, guide_nc=num_semantic_classes)
# self.og_gn3 = GuideNormalization(nc=256)
# self.og_relu3 = nn.ReLU(inplace=True)
# self.syn_gc5 = GuideCorrelation(nc=256, guide_nc=256)
# self.syn_gc6 = GuideCorrelation(nc=256, guide_nc=num_semantic_classes)
# self.syn_gn3 = GuideNormalization(nc=256)
# self.syn_relu3 = nn.ReLU(inplace=True)
# self.og_max3 = nn.MaxPool2d(kernel_size=2, stride=2)
# self.syn_max3 = nn.MaxPool2d(kernel_size=2, stride=2)
#
# self.og_gel5 = GuideEncoderLayer(nc_in=256, nc_out=512)
# self.syn_gel5 = GuideEncoderLayer(nc_in=256, nc_out=512)
# self.og_gel6 = GuideEncoderLayer(nc_in=512, nc_out=512)
# self.syn_gel6 = GuideEncoderLayer(nc_in=512, nc_out=512)
#
# self.og_conv4 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
# self.syn_conv4 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
# self.og_gc7 = GuideCorrelation(nc=512, guide_nc=512)
# self.og_gc8 = GuideCorrelation(nc=512, guide_nc=num_semantic_classes)
# self.og_gn4 = GuideNormalization(nc=512)
# self.og_relu4 = nn.ReLU(inplace=True)
# self.syn_gc7 = GuideCorrelation(nc=512, guide_nc=512)
# self.syn_gc8 = GuideCorrelation(nc=512, guide_nc=num_semantic_classes)
# self.syn_gn4 = GuideNormalization(nc=512)
# self.syn_relu4 = nn.ReLU(inplace=True)
#
# # layers for decoder
# # all the 3x3 convolutions
# self.conv1 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.SELU())
# self.conv12 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.SELU())
# self.conv3 = nn.Sequential(nn.Conv2d(384, 128, kernel_size=3, padding=1), nn.SELU())
# self.conv5 = nn.Sequential(nn.Conv2d(192, 64, kernel_size=3, padding=1), nn.SELU())
#
# # spade decoder
# if self.spade == 'decoder' or self.spade == 'both':
# self.conv2 = SPADEDecoderLayer(nc=256, label_nc=num_semantic_classes)
# self.conv13 = SPADEDecoderLayer(nc=256, label_nc=num_semantic_classes)
# self.conv4 = SPADEDecoderLayer(nc=128, label_nc=num_semantic_classes)
# self.conv6 = SPADEDecoderLayer(nc=64, label_nc=num_semantic_classes)
# else:
# self.conv2 = nn.Sequential(nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.SELU())
# self.conv13 = nn.Sequential(nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.SELU())
# self.conv4 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.SELU())
# self.conv6 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.SELU())
#
# # all the tranposed convolutions
# self.tconv1 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, padding=0)
# self.tconv3 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, padding=0)
# self.tconv2 = nn.ConvTranspose2d(128, 128, kernel_size=2, stride=2, padding=0)
#
# # all the other 1x1 convolutions
# self.conv7 = nn.Conv2d(1024, 512, kernel_size=1, padding=0)
# self.conv8 = nn.Conv2d(512, 256, kernel_size=1, padding=0)
# self.conv9 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
# self.conv10 = nn.Conv2d(128, 64, kernel_size=1, padding=0)
# self.conv11 = nn.Conv2d(64, 2, kernel_size=1, padding=0)
#
# # self._initialize_weights()
#
# def _initialize_weights(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# if m.bias is not None:
# nn.init.constant_(m.bias, 0)
# elif isinstance(m, nn.BatchNorm2d):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
# elif isinstance(m, nn.Linear):
# nn.init.normal_(m.weight, 0, 0.01)
# nn.init.constant_(m.bias, 0)
#
# def forward(self, original_img, synthesis_img, semantic_img):
# # get all the image encodings
# og = self.og_gel1(original_img)
# syn = self.syn_gel1(synthesis_img)
#
# og = self.og_conv1(og)
# syn = self.syn_conv1(syn)
#
# gamma1, beta1 = self.og_gc1(og, syn)
# gamma2, beta2 = self.og_gc2(og, semantic_img)
#
# gamma3, beta3 = self.syn_gc1(syn, og)
# gamma4, beta4 = self.syn_gc2(syn, semantic_img)
#
# layer1_og = self.og_relu1(self.og_gn1(og, gamma1, beta1, gamma2, beta2))
# layer1_syn = self.syn_relu1(self.syn_gn1(syn, gamma3, beta3, gamma4, beta4))
#
# og = self.og_gel2(self.og_max1(layer1_og))
# syn = self.syn_gel2(self.syn_max1(layer1_syn))
#
# og = self.og_conv2(og)
# syn = self.syn_conv2(syn)
#
# gamma1, beta1 = self.og_gc3(og, syn)
# gamma2, beta2 = self.og_gc4(og, semantic_img)
#
# gamma3, beta3 = self.syn_gc3(syn, og)
# gamma4, beta4 = self.syn_gc4(syn, semantic_img)
#
# layer2_og = self.og_relu2(self.og_gn2(og, gamma1, beta1, gamma2, beta2))
# layer2_syn = self.syn_relu2(self.syn_gn2(syn, gamma3, beta3, gamma4, beta4))
#
# og = self.og_gel3(self.og_max2(layer2_og))
# syn = self.syn_gel3(self.syn_max2(layer2_syn))
# og = self.og_gel4(og)
# syn = self.syn_gel4(syn)
#
# og = self.og_conv3(og)
# syn = self.syn_conv3(syn)
#
# gamma1, beta1 = self.og_gc5(og, syn)
# gamma2, beta2 = self.og_gc6(og, semantic_img)
#
# gamma3, beta3 = self.syn_gc5(syn, og)
# gamma4, beta4 = self.syn_gc6(syn, semantic_img)
#
# layer3_og = self.og_relu3(self.og_gn3(og, gamma1, beta1, gamma2, beta2))
# layer3_syn = self.syn_relu3(self.syn_gn3(syn, gamma3, beta3, gamma4, beta4))
#
# og = self.og_gel5(self.og_max3(layer3_og))
# syn = self.syn_gel5(self.syn_max3(layer3_syn))
# og = self.og_gel6(og)
# syn = self.syn_gel6(syn)
#
# og = self.og_conv4(og)
# syn = self.syn_conv4(syn)
#
# gamma1, beta1 = self.og_gc7(og, syn)
# gamma2, beta2 = self.og_gc8(og, semantic_img)
#
# gamma3, beta3 = self.syn_gc7(syn, og)
# gamma4, beta4 = self.syn_gc8(syn, semantic_img)
#
# layer4_og = self.og_relu4(self.og_gn4(og, gamma1, beta1, gamma2, beta2))
# layer4_syn = self.syn_relu4(self.syn_gn4(syn, gamma3, beta3, gamma4, beta4))
#
# # concatenate the output of each encoder
# layer1_cat = torch.cat((layer1_og, layer1_syn), dim=1)
# layer2_cat = torch.cat((layer2_og, layer2_syn), dim=1)
# layer3_cat = torch.cat((layer3_og, layer3_syn), dim=1)
# layer4_cat = torch.cat((layer4_og, layer4_syn), dim=1)
#
# # use 1x1 convolutions to reduce dimensions of concatenations
# layer4_cat = self.conv7(layer4_cat)
# layer3_cat = self.conv8(layer3_cat)
# layer2_cat = self.conv9(layer2_cat)
# layer1_cat = self.conv10(layer1_cat)
#
# # Run Decoder
# x = self.conv1(layer4_cat)
# if self.spade == 'decoder' or self.spade == 'both':
# x = self.conv2(x, semantic_img)
# else:
# x = self.conv2(x)
# x = self.tconv1(x)
#
# x = torch.cat((x, layer3_cat), dim=1)
# x = self.conv12(x)
# if self.spade == 'decoder' or self.spade == 'both':
# x = self.conv13(x, semantic_img)
# else:
# x = self.conv13(x)
# x = self.tconv3(x)
#
# x = torch.cat((x, layer2_cat), dim=1)
# x = self.conv3(x)
# if self.spade == 'decoder' or self.spade == 'both':
# x = self.conv4(x, semantic_img)
# else:
# x = self.conv4(x)
# x = self.tconv2(x)
#
# x = torch.cat((x, layer1_cat), dim=1)
# x = self.conv5(x)
# if self.spade == 'decoder' or self.spade == 'both':
# x = self.conv6(x, semantic_img)
# else:
# x = self.conv6(x)
# x = self.conv11(x)
#
# self.final_prediction = x
#
# return self.final_prediction
#
# class CorrelatedDissimNetGuide(nn.Module):
# def __init__(self, architecture='vgg16', semantic=True, pretrained=True, correlation=True, spade='decoder',
# num_semantic_classes=19):
# super(CorrelatedDissimNetGuide, self).__init__()
#
# self.spade = spade
#
# # layers for encoder
# self.og_gel1 = GuideEncoderLayer(nc_in=3, nc_out=64)
# self.syn_gel1 = GuideEncoderLayer(nc_in=3, nc_out=64)
#
# self.og_conv1 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
# self.syn_conv1 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
# self.og_gc1 = SPADE(norm_nc=64, label_nc=64)
# self.og_relu1 = nn.ReLU(inplace=True)
# self.syn_gc1 = SPADE(norm_nc=64, label_nc=64)
# self.syn_relu1 = nn.ReLU(inplace=True)
# self.og_max1 = nn.MaxPool2d(kernel_size=2, stride=2)
# self.syn_max1 = nn.MaxPool2d(kernel_size=2, stride=2)
#
# self.og_gel2 = GuideEncoderLayer(nc_in=64, nc_out=128)
# self.syn_gel2 = GuideEncoderLayer(nc_in=64, nc_out=128)
#
# self.og_conv2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
# self.syn_conv2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
# self.og_gc2 = SPADE(norm_nc=128, label_nc=128)
# self.og_relu2 = nn.ReLU(inplace=True)
# self.syn_gc2 = SPADE(norm_nc=128, label_nc=128)
# self.syn_relu2 = nn.ReLU(inplace=True)
# self.og_max2 = nn.MaxPool2d(kernel_size=2, stride=2)
# self.syn_max2 = nn.MaxPool2d(kernel_size=2, stride=2)
#
# self.og_gel3 = GuideEncoderLayer(nc_in=128, nc_out=256)
# self.syn_gel3 = GuideEncoderLayer(nc_in=128, nc_out=256)
# self.og_gel4 = GuideEncoderLayer(nc_in=256, nc_out=256)
# self.syn_gel4 = GuideEncoderLayer(nc_in=256, nc_out=256)
#
# self.og_conv3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
# self.syn_conv3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
# self.og_gc3 = SPADE(norm_nc=256, label_nc=256)
# self.og_relu3 = nn.ReLU(inplace=True)
# self.syn_gc3 = SPADE(norm_nc=256, label_nc=256)
# self.syn_relu3 = nn.ReLU(inplace=True)
# self.og_max3 = nn.MaxPool2d(kernel_size=2, stride=2)
# self.syn_max3 = nn.MaxPool2d(kernel_size=2, stride=2)
#
# self.og_gel5 = GuideEncoderLayer(nc_in=256, nc_out=512)
# self.syn_gel5 = GuideEncoderLayer(nc_in=256, nc_out=512)
# self.og_gel6 = GuideEncoderLayer(nc_in=512, nc_out=512)
# self.syn_gel6 = GuideEncoderLayer(nc_in=512, nc_out=512)
#
# self.og_conv4 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
# self.syn_conv4 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
# self.og_gc4 = SPADE(norm_nc=512, label_nc=512)
# self.og_relu4 = nn.ReLU(inplace=True)
# self.syn_gc4 = SPADE(norm_nc=512, label_nc=512)
# self.syn_relu4 = nn.ReLU(inplace=True)
#
# # layers for decoder
# # all the 3x3 convolutions
# self.conv1 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.SELU())
# self.conv12 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.SELU())
# self.conv3 = nn.Sequential(nn.Conv2d(384, 128, kernel_size=3, padding=1), nn.SELU())
# self.conv5 = nn.Sequential(nn.Conv2d(192, 64, kernel_size=3, padding=1), nn.SELU())
#
# # spade decoder
# if self.spade == 'decoder' or self.spade == 'both':
# self.conv2 = SPADEDecoderLayer(nc=256, label_nc=num_semantic_classes)
# self.conv13 = SPADEDecoderLayer(nc=256, label_nc=num_semantic_classes)
# self.conv4 = SPADEDecoderLayer(nc=128, label_nc=num_semantic_classes)
# self.conv6 = SPADEDecoderLayer(nc=64, label_nc=num_semantic_classes)
# else:
# self.conv2 = nn.Sequential(nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.SELU())
# self.conv13 = nn.Sequential(nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.SELU())
# self.conv4 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.SELU())
# self.conv6 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.SELU())
#
# # all the tranposed convolutions
# self.tconv1 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, padding=0)
# self.tconv3 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, padding=0)
# self.tconv2 = nn.ConvTranspose2d(128, 128, kernel_size=2, stride=2, padding=0)
#
# # all the other 1x1 convolutions
# self.conv7 = nn.Conv2d(1024, 512, kernel_size=1, padding=0)
# self.conv8 = nn.Conv2d(512, 256, kernel_size=1, padding=0)
# self.conv9 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
# self.conv10 = nn.Conv2d(128, 64, kernel_size=1, padding=0)
# self.conv11 = nn.Conv2d(64, 2, kernel_size=1, padding=0)
#
# # self._initialize_weights()
#
# def _initialize_weights(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# if m.bias is not None:
# nn.init.constant_(m.bias, 0)
# elif isinstance(m, nn.BatchNorm2d):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
# elif isinstance(m, nn.Linear):
# nn.init.normal_(m.weight, 0, 0.01)
# nn.init.constant_(m.bias, 0)
#
# def forward(self, original_img, synthesis_img, semantic_img):
# # get all the image encodings
# og = self.og_gel1(original_img)
# syn = self.syn_gel1(synthesis_img)
#
# og_1 = self.og_conv1(og)
# syn_1 = self.syn_conv1(syn)
#
# og_2 = self.og_gc1(og_1, syn_1)
# syn_2 = self.syn_gc1(syn_1, og_1)
#
# layer1_og = self.og_relu1(og_2)
# layer1_syn = self.syn_relu1(syn_2)
#
# og = self.og_gel2(self.og_max1(layer1_og))
# syn = self.syn_gel2(self.syn_max1(layer1_syn))
#
# og_1 = self.og_conv2(og)
# syn_1 = self.syn_conv2(syn)
#
# og_2 = self.og_gc2(og_1, syn_1)
# syn_2 = self.syn_gc2(syn_1, og_1)
#
# layer2_og = self.og_relu2(og_2)
# layer2_syn = self.syn_relu2(syn_2)
#
# og = self.og_gel3(self.og_max2(layer2_og))
# syn = self.syn_gel3(self.syn_max2(layer2_syn))
# og = self.og_gel4(og)
# syn = self.syn_gel4(syn)
#
# og_1 = self.og_conv3(og)
# syn_1 = self.syn_conv3(syn)
#
# og_2 = self.og_gc3(og_1, syn_1)
# syn_2 = self.syn_gc3(syn_1, og_1)
#
# layer3_og = self.og_relu3(og_2)
# layer3_syn = self.syn_relu3(syn_2)
#
# og = self.og_gel5(self.og_max3(layer3_og))
# syn = self.syn_gel5(self.syn_max3(layer3_syn))
# og = self.og_gel6(og)
# syn = self.syn_gel6(syn)
#
# og_1 = self.og_conv4(og)
# syn_1 = self.syn_conv4(syn)
#
# og_2 = self.og_gc4(og_1, syn_1)
# syn_2 = self.syn_gc4(syn_1, og_1)
#
# layer4_og = self.og_relu4(og_2)
# layer4_syn = self.syn_relu4(syn_2)
#
# # concatenate the output of each encoder
# layer1_cat = torch.cat((layer1_og, layer1_syn), dim=1)
# layer2_cat = torch.cat((layer2_og, layer2_syn), dim=1)
# layer3_cat = torch.cat((layer3_og, layer3_syn), dim=1)
# layer4_cat = torch.cat((layer4_og, layer4_syn), dim=1)
#
# # use 1x1 convolutions to reduce dimensions of concatenations
# layer4_cat = self.conv7(layer4_cat)
# layer3_cat = self.conv8(layer3_cat)
# layer2_cat = self.conv9(layer2_cat)
# layer1_cat = self.conv10(layer1_cat)
#
# # Run Decoder
# x = self.conv1(layer4_cat)
# if self.spade == 'decoder' or self.spade == 'both':
# x = self.conv2(x, semantic_img)
# else:
# x = self.conv2(x)
# x = self.tconv1(x)
#
# x = torch.cat((x, layer3_cat), dim=1)
# x = self.conv12(x)
# if self.spade == 'decoder' or self.spade == 'both':
# x = self.conv13(x, semantic_img)
# else:
# x = self.conv13(x)
# x = self.tconv3(x)
#
# x = torch.cat((x, layer2_cat), dim=1)
# x = self.conv3(x)
# if self.spade == 'decoder' or self.spade == 'both':
# x = self.conv4(x, semantic_img)
# else:
# x = self.conv4(x)
# x = self.tconv2(x)
#
# x = torch.cat((x, layer1_cat), dim=1)
# x = self.conv5(x)
# if self.spade == 'decoder' or self.spade == 'both':
# x = self.conv6(x, semantic_img)
# else:
# x = self.conv6(x)
# x = self.conv11(x)
#
# self.final_prediction = x
#
# return self.final_prediction
class SPADEDecoderLayer(nn.Module):
def __init__(self, nc=256, label_nc=19):
super(SPADEDecoderLayer, self).__init__()
# create conv layers
self.norm1 = SPADE(norm_nc=nc, label_nc=label_nc)
self.selu1 = nn.SELU()
self.conv = nn.Conv2d(nc, nc, kernel_size=3, padding=1)
self.norm2 = SPADE(norm_nc=nc, label_nc=label_nc)
self.selu2 = nn.SELU()
def forward(self, x, seg):
out = self.selu2(self.norm2(self.conv(self.selu1(self.norm1(x, seg))), seg))
return out
#
# class GuideEncoderLayer(nn.Module):
# def __init__(self, nc_in=3, nc_out=64):
# super(GuideEncoderLayer, self).__init__()
#
# # create conv layers
# self.conv = nn.Conv2d(nc_in, nc_out, kernel_size=3, padding=1)
# self.norm = nn.BatchNorm2d(nc_out, affine=False)
# self.relu = nn.ReLU(inplace=True)
#
# def forward(self, x):
# x = self.conv(x)
# x = self.norm(x)
# x = self.relu(x)
# return x
#
#
# if __name__ == "__main__":
# from PIL import Image
# import torchvision.models as models
# import torchvision.transforms as transforms
#
# img = Image.open('../../sample_images/zm0002_100000.png')
# diss_model = CorrelatedDissimNet()
# img_transform = transforms.Compose([transforms.ToTensor()])
# img_tensor = img_transform(img)
# outputs = diss_model(img_tensor.unsqueeze(0), img_tensor.unsqueeze(0), img_tensor.unsqueeze(0))
# print(img_tensor[0].data.shape)
# print(outputs.data.shape)
|
[
"chan@math.uni-wuppertal.de"
] |
chan@math.uni-wuppertal.de
|
bbbec17ee2645b5b53b371e2eae122c817a9ed94
|
bad62c2b0dfad33197db55b44efeec0bab405634
|
/sdk/rdbms/azure-mgmt-rdbms/azure/mgmt/rdbms/mysql/models/_my_sql_management_client_enums.py
|
94a9deb6bcd1c36157e380399c3b0727cf44c4d3
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
test-repo-billy/azure-sdk-for-python
|
20c5a2486456e02456de17515704cb064ff19833
|
cece86a8548cb5f575e5419864d631673be0a244
|
refs/heads/master
| 2022-10-25T02:28:39.022559
| 2022-10-18T06:05:46
| 2022-10-18T06:05:46
| 182,325,031
| 0
| 0
|
MIT
| 2019-07-25T22:28:52
| 2019-04-19T20:59:15
|
Python
|
UTF-8
|
Python
| false
| false
| 5,089
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from azure.core import CaseInsensitiveEnumMeta
class CreateMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The mode to create a new server.
"""
DEFAULT = "Default"
POINT_IN_TIME_RESTORE = "PointInTimeRestore"
GEO_RESTORE = "GeoRestore"
REPLICA = "Replica"
class GeoRedundantBackup(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Enable Geo-redundant or not for server backup.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class IdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The identity type. Set this to 'SystemAssigned' in order to automatically create and assign an
Azure Active Directory principal for the resource.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
class InfrastructureEncryption(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Add a second layer of encryption for your data using new encryption algorithm which gives
additional data protection. Value is optional but if passed in, must be 'Disabled' or
'Enabled'.
"""
#: Default value for single layer of encryption for data at rest.
ENABLED = "Enabled"
#: Additional (2nd) layer of encryption for data at rest.
DISABLED = "Disabled"
class MinimalTlsVersionEnum(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Enforce a minimal Tls version for the server.
"""
TLS1_0 = "TLS1_0"
TLS1_1 = "TLS1_1"
TLS1_2 = "TLS1_2"
TLS_ENFORCEMENT_DISABLED = "TLSEnforcementDisabled"
class OperationOrigin(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The intended executor of the operation.
"""
NOT_SPECIFIED = "NotSpecified"
USER = "user"
SYSTEM = "system"
class PrivateEndpointProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""State of the private endpoint connection.
"""
APPROVING = "Approving"
READY = "Ready"
DROPPING = "Dropping"
FAILED = "Failed"
REJECTING = "Rejecting"
class PrivateLinkServiceConnectionStateActionsRequire(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The actions required for private link service connection.
"""
NONE = "None"
class PrivateLinkServiceConnectionStateStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The private link service connection status.
"""
APPROVED = "Approved"
PENDING = "Pending"
REJECTED = "Rejected"
DISCONNECTED = "Disconnected"
class PublicNetworkAccessEnum(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Whether or not public network access is allowed for this server. Value is optional but if
passed in, must be 'Enabled' or 'Disabled'
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class QueryPerformanceInsightResetDataResultState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Indicates result of the operation.
"""
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class SecurityAlertPolicyName(str, Enum, metaclass=CaseInsensitiveEnumMeta):
DEFAULT = "Default"
class ServerKeyType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The key type like 'AzureKeyVault'.
"""
AZURE_KEY_VAULT = "AzureKeyVault"
class ServerSecurityAlertPolicyState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Specifies the state of the policy, whether it is enabled or disabled.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class ServerState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""A state of a server that is visible to user.
"""
READY = "Ready"
DROPPING = "Dropping"
DISABLED = "Disabled"
INACCESSIBLE = "Inaccessible"
class ServerVersion(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The version of a server.
"""
FIVE6 = "5.6"
FIVE7 = "5.7"
EIGHT0 = "8.0"
class SkuTier(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The tier of the particular SKU, e.g. Basic.
"""
BASIC = "Basic"
GENERAL_PURPOSE = "GeneralPurpose"
MEMORY_OPTIMIZED = "MemoryOptimized"
class SslEnforcementEnum(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Enable ssl enforcement or not when connect to server.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class StorageAutogrow(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Enable Storage Auto Grow.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class VirtualNetworkRuleState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Virtual Network Rule State
"""
INITIALIZING = "Initializing"
IN_PROGRESS = "InProgress"
READY = "Ready"
DELETING = "Deleting"
UNKNOWN = "Unknown"
|
[
"noreply@github.com"
] |
test-repo-billy.noreply@github.com
|
5ae8aea8f0bc145c51db544e8cfe00147bcdb6aa
|
5c208a3a25133342fcfc24aeaf05475cef1a6208
|
/face.py
|
5ae09b96330fa7b95c418f709d6c1e3baecc0661
|
[] |
no_license
|
saeedaghabozorgi/FaceDetection
|
e5e26e26e85008ec8b011f3ee73bb500ba251b36
|
77cc9fcf344c1442c44a49c7528d9faa4a017158
|
refs/heads/master
| 2020-03-23T18:54:03.244065
| 2018-07-26T15:53:57
| 2018-07-26T15:53:57
| 141,939,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,961
|
py
|
import os
import re
import sys
import tarfile
import cv2
import numpy as np
from six.moves import urllib
import tensorflow as tf
import face_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 64, """Number of images to process in a batch.""")
tf.app.flags.DEFINE_boolean('use_fp16', False, """Train the model using fp16.""")
# Global constants describing the face data set.
IMAGE_SIZE = face_input.IMAGE_SIZE
NUM_CLASSES = face_input.NUM_CLASSES
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
# NUM_EPOCHS_PER_DECAY = 2.0 # Epochs after which learning rate decays.
DECAY_STEPS = 3000 # after how many batches
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'https://ibm.box.com/shared/static/y28pnufdhzkjj3oyc4yytxema4tfxexr.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
features,labels = face_input.read_face2()
if FLAGS.use_fp16:
features = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return features, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference_color(images):
"""Build the SETI model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def inference(images):
"""Build the SETI model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
######## Layer 1
# conv1 - 5x5 convolution
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights', shape=[5, 5, 1, 4], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [4], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
print("layer 1:" + str(norm1.shape))
######## Layer 2
# conv2 - 3x3 convolution
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights', shape=[3, 3, 4, 16], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [16], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
print("layer 2:" + str(pool2.shape))
######## Layer 3
# conv3 - 3x3 convolution
with tf.variable_scope('conv3') as scope:
kernel = _variable_with_weight_decay('weights', shape=[3, 3, 16, 32], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [32], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv3)
# norm3
norm3 = tf.nn.lrn(conv3, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm3')
# pool3
pool3 = tf.nn.max_pool(norm3, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool3')
print("layer 3:" + str(pool3.shape))
#Fully connected layer 1 - 600 units
with tf.variable_scope('fully1') as scope:
# Move everything into depth so we can perform a single matrix multiply.
dim = np.prod(pool3.get_shape()[1:]).value
reshape = tf.reshape(pool3, shape=[-1, dim])
# dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 600], stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [600], tf.constant_initializer(0.1))
fullyConn1 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
print ("Fully conected 1:" + str(fullyConn1.shape))
_activation_summary(fullyConn1)
#Fully connected layer 2- 200 units
with tf.variable_scope('fully2') as scope:
weights = _variable_with_weight_decay('weights', shape=[600, 200], stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [200], tf.constant_initializer(0.1))
fullyConn2 = tf.nn.relu(tf.matmul(fullyConn1, weights) + biases, name=scope.name)
print ("Fully conected 2:" + str(fullyConn2.shape))
_activation_summary(fullyConn2)
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
#Output softmax layer - 2 unit
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [200, NUM_CLASSES], stddev=1/200.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES], tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(fullyConn2, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'SETI-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
# num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
# decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
decay_steps = DECAY_STEPS
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op, lr
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
[
"saeed@ca.ibm.com"
] |
saeed@ca.ibm.com
|
a4d4e2ccc8d69afb1d39cc45e2fb449b5ea67b2e
|
d301c28ca761a6e59681e86cfca8d479dd9ea106
|
/pymdwizard/gui/metainfo.py
|
6758f4931be450ea8b2e2447e9992716c01d82c0
|
[
"CC-BY-4.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
ehbaker/fort-pymdwizard
|
1f184c957d25a1508e6ad128ba600757a323af4e
|
77375030bdd7184279fee7976ed9f92d5c276ef5
|
refs/heads/master
| 2021-06-20T04:33:45.156187
| 2017-07-12T20:15:29
| 2017-07-12T20:15:29
| 94,485,610
| 0
| 0
| null | 2017-06-15T23:09:08
| 2017-06-15T23:09:07
| null |
UTF-8
|
Python
| false
| false
| 6,532
|
py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
License: Creative Commons Attribution 4.0 International (CC BY 4.0)
http://creativecommons.org/licenses/by/4.0/
PURPOSE
------------------------------------------------------------------------------
Provide a pyqt widget for a Identification Information <idinfo> section
SCRIPT DEPENDENCIES
------------------------------------------------------------------------------
None
U.S. GEOLOGICAL SURVEY DISCLAIMER
------------------------------------------------------------------------------
Any use of trade, product or firm names is for descriptive purposes only and
does not imply endorsement by the U.S. Geological Survey.
Although this information product, for the most part, is in the public domain,
it also contains copyrighted material as noted in the text. Permission to
reproduce copyrighted items for other than personal use must be secured from
the copyright owner.
Although these data have been processed successfully on a computer system at
the U.S. Geological Survey, no warranty, expressed or implied is made
regarding the display or utility of the data on any other system, or for
general or scientific purposes, nor shall the act of distribution constitute
any such warranty. The U.S. Geological Survey shall not be held liable for
improper or incorrect use of the data described and/or contained herein.
Although this program has been used by the U.S. Geological Survey (USGS), no
warranty, expressed or implied, is made by the USGS or the U.S. Government as
to the accuracy and functioning of the program and related program material
nor shall the fact of distribution constitute any such warranty, and no
responsibility is assumed by the USGS in connection therewith.
------------------------------------------------------------------------------
"""
import sys
from lxml import etree
from PyQt5.QtGui import QPainter, QFont, QPalette, QBrush, QColor, QPixmap
from PyQt5.QtWidgets import QMainWindow, QApplication
from PyQt5.QtWidgets import QWidget, QLineEdit, QSizePolicy, QTableView
from PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout
from PyQt5.QtWidgets import QStyleOptionHeader, QHeaderView, QStyle, QSpacerItem
from PyQt5.QtCore import QAbstractItemModel, QModelIndex, QSize, QRect, QPoint, Qt
from pymdwizard.core import utils
from pymdwizard.core import xml_utils
from pymdwizard.gui.wiz_widget import WizardWidget
from pymdwizard.gui.ui_files import UI_metainfo
from pymdwizard.gui.ContactInfo import ContactInfo
from pymdwizard.gui.fgdc_date import FGDCDate
class MetaInfo(WizardWidget):
drag_label = "Metadata Information <metainfo>"
acceptable_tags = ['metainfo', 'cntinfo', 'ptcontact']
ui_class = UI_metainfo.Ui_fgdc_metainfo
def __init__(self, root_widget=None):
super(self.__class__, self).__init__()
self.root_widget = root_widget
def build_ui(self):
self.ui = self.ui_class()
self.ui.setupUi(self)
self.setup_dragdrop(self)
self.contactinfo = ContactInfo(parent=self)
self.metd = FGDCDate(parent=self, fgdc_name='fgdc_metd')
self.ui.help_metd.layout().addWidget(self.metd)
self.ui.fgdc_metc.layout().addWidget(self.contactinfo)
def connect_events(self):
self.ui.fgdc_metstdn.currentTextChanged.connect(self.update_metstdv)
self.ui.fgdc_metstdv.currentIndexChanged.connect(self.update_metstdn)
self.ui.button_use_dataset.clicked.connect(self.pull_datasetcontact)
def update_metstdn(self):
if self.ui.fgdc_metstdv.currentText() == 'FGDC-STD-001-1998':
self.ui.fgdc_metstdn.setCurrentIndex(0)
self.root_widget.switch_schema('fgdc')
elif self.ui.fgdc_metstdv.currentText() == 'FGDC-STD-001.1-1999':
self.ui.fgdc_metstdn.setCurrentIndex(1)
self.root_widget.switch_schema('bdp')
def update_metstdv(self):
if 'biological' in self.ui.fgdc_metstdn.currentText().lower() or \
'bdp' in self.ui.fgdc_metstdn.currentText().lower():
self.ui.fgdc_metstdv.setCurrentIndex(1)
self.root_widget.switch_schema('bdp')
else:
self.ui.fgdc_metstdv.setCurrentIndex(0)
self.root_widget.switch_schema('fgdc')
def pull_datasetcontact(self):
self.contactinfo._from_xml(self.root_widget.idinfo.ptcontac._to_xml())
def _to_xml(self):
# add code here to translate the form into xml representation
metainfo_node = xml_utils.xml_node('metainfo')
metd = xml_utils.xml_node('metd', text=self.metd.get_date(),
parent_node=metainfo_node)
metc = xml_utils.xml_node('metc', parent_node=metainfo_node)
cntinfo = self.contactinfo._to_xml()
metc.append(cntinfo)
metstdn = xml_utils.xml_node('metstdn',
text=self.ui.fgdc_metstdn.currentText(),
parent_node=metainfo_node)
metstdv = xml_utils.xml_node('metstdv',
text=self.ui.fgdc_metstdv.currentText(),
parent_node=metainfo_node)
return metainfo_node
def _from_xml(self, xml_metainfo):
if xml_metainfo.tag == 'metainfo':
if xml_metainfo.xpath('metc/cntinfo'):
self.contactinfo._from_xml(xml_metainfo.xpath('metc/cntinfo')[0])
if xml_metainfo.xpath('metstdn'):
standard = xml_utils.get_text_content(xml_metainfo, 'metstdn')
self.ui.fgdc_metstdn.setCurrentText(standard)
# switch wizard content to reflect the standard in this record
if "biological" in standard.lower() \
or 'bdp' in standard.lower():
self.root_widget.switch_schema('bdp')
else:
self.root_widget.switch_schema('fgdc')
metstdv = xml_utils.get_text_content(xml_metainfo, 'metstdv')
self.ui.fgdc_metstdv.setCurrentText(metstdv)
metd = xml_utils.get_text_content(xml_metainfo, 'metd')
self.metd.set_date(metd)
elif xml_metainfo.tag in ['ptcontac', 'cntinfo']:
if xml_metainfo.tag == 'ptcontac':
xml_metainfo = xml_utils.search_xpath(xml_metainfo, 'cntinfo')
self.contactinfo._from_xml(xml_metainfo)
if __name__ == "__main__":
utils.launch_widget(MetaInfo, "MetaInfo testing")
|
[
"talbertc@usgs.gov"
] |
talbertc@usgs.gov
|
2326a0aec4923a378c606ceb9a289d21a37f7006
|
189517b63e0dc8c6e9ee2defd5e5fc98f43425c7
|
/Gaps_meteorology/Scripts/gapswindspeed.py
|
54246545e7cf39ad3ef45e00ca839de26960bac2
|
[] |
no_license
|
Raquel-Araujo/gap_dynamics_BCI50ha
|
bd15ff86b3aefd697de48f2d4782f78b3d801a85
|
15a05f60eb16d9ef215d422dbc2895c09a7910a8
|
refs/heads/main
| 2023-04-06T15:18:26.165602
| 2021-12-16T16:52:18
| 2021-12-16T16:52:18
| 416,399,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,809
|
py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
from matplotlib.cm import ScalarMappable
import collections
import statsmodels.api as sm
import sys
import statsmodels.formula.api as smf
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from scipy import stats
from scipy.stats import shapiro
###This code is for relations of gap area and wind speed
gaps = pd.read_csv('../Entrance/gaps_area_days_5years.csv')
meteo = pd.read_csv('../Entrance/tabmeteorology.csv')
######################################################################################
######################################################################################
###Gaps
#Convert date to datetime format
gaps.loc[:,'date1'] = pd.to_datetime(gaps.date, format='%Y/%m/%d')
#Convert days in integer format to datetime format. Map atributes to each value of the column
gaps['dayscor'] = gaps['days'].map(dt.timedelta)
###Vector of image dates
dates = gaps.date
dates.index = dates.index + 1
datestart = pd.Series('2014-10-02')
datesall = datestart.append(dates)
#Convert dateall to datetime format
imgdates = pd.to_datetime(datesall, format='%Y/%m/%d')
print(imgdates)
######################################################################################
######################################################################################
###wind 15 minutes
#Convert datetime15 to datetime format
meteo.loc[:,'datetime1'] = pd.to_datetime(meteo.datetime15, format='%Y/%m/%d %H:%M:%S')
#Select wind until Nov30, 2019 (there is data until Dec 4)
#Last day opened because cut function has opened interval in the last date
wind = meteo.loc[meteo.datetime1.between('2014-10-02 00:00:00','2019-11-28 00:00:00'),['datetime1','wsmx']]
#Add periods
wind['per'] = pd.cut(wind.datetime1, imgdates, labels=range(1,len(imgdates)),include_lowest=True)
print(wind)
#Rename
wind15m = wind
######################################################################################
######################################################################################
###wind hour
#Convert datetime15 to datetime format
meteo.loc[:,'datetime1'] = pd.to_datetime(meteo.datetime15, format='%Y/%m/%d %H:%M:%S')
#Select wind until Nov30, 2019 (there is data until Dec 4)
#Last day opened because cut function has opened interval in the last date
wind = meteo.loc[meteo.datetime1.between('2014-10-02 00:00:00','2019-11-28 00:00:00'),['datetime1','wsmx']]
print(wind)
#Group by each hour
wind['datehour'] = wind['datetime1'].dt.to_period("H")
wind = pd.pivot_table(data=wind, values='wsmx', index='datehour', aggfunc=np.max).reset_index()
print(wind)
datehourstr = wind.datehour.astype(str)
wind['datehour1'] = pd.to_datetime(datehourstr, format='%Y/%m/%d %H:%M:%S')
# print(wind)
#Add periods
wind['per'] = pd.cut(wind.datehour1, imgdates, labels=range(1,len(imgdates)), include_lowest=True)
print(wind)
#Rename
windhour = wind
######################################################################################
######################################################################################
###wind day
#Convert datetime15 to datetime format
meteo.loc[:,'datetime1'] = pd.to_datetime(meteo.datetime15, format='%Y/%m/%d %H:%M:%S')
#Select wind until Nov30, 2019 (there is data until Dec 4)
#Last day opened because cut function has opened interval in the last date
wind = meteo.loc[meteo.datetime1.between('2014-10-02 00:00:00','2019-11-28 00:00:00'),['datetime1','wsmx']]
#Group by each day
wind['date'] = wind['datetime1'].dt.date
wind = pd.pivot_table(data=wind, values='wsmx', index='date', aggfunc=np.max).reset_index()
wind.date = pd.to_datetime(wind.date, format='%Y/%m/%d')
#Add periods
wind['per'] = pd.cut(wind.date, imgdates, labels=range(1,len(imgdates)), include_lowest=True)
# print(wind)
#Rename
windday = wind
listwind = [wind15m, windhour, windday]
listname = ['15m', 'hour', 'day']
listprefix = ['m_p', 'h_p', 'd_p']
i = 0
while i < len(listwind):
######################################################################################
###Input tables
#Filter zero wind
windp = listwind[i].loc[listwind[i].wsmx>0]
# print(windp)
#Calculate percentiles 90.0 until 99.9
#Percentiles calculated on positive wind values
perc = np.percentile(windp.wsmx, np.arange(90.,100., 0.1))
num = np.arange(90.,100., 0.1)
# print(num)
# print(perc)
# print(len(num))
# print(len(perc))
#To not change the entrance name
entrance = perc
# print(entrance)
######################################################################################
###Create input tables and run the metrics
functions = ['count']
prefix = map(lambda x: listprefix[i]+'{:.1f}'.format(x),num)
# prefix = ['all', 'p']+pref
# print(prefix)
# print(len(prefix))
# print(len(entrance))
coletor = []
j=0
while j < len(entrance):
mask = listwind[i].wsmx > entrance[j]
wind_filter = listwind[i].loc[mask]
pivotwind = pd.pivot_table(data=wind_filter, values='wsmx', index='per', aggfunc=functions)
pivotwind.columns = [prefix[j]+'_wsnumber']
coletor.append(pivotwind)
j+=1
windmetrics = reduce(lambda df1,df2: df1.join(df2), coletor)
# print(windmetrics)
######################################################################################
###Add area information to the wind metrics table - standardized values
#Adjust index to start from 1
gaps.index = np.arange(1, len(gaps) + 1)
windmetrics_gap = windmetrics.join(gaps['area'])
windmetrics_days = windmetrics.join(gaps[['area','days']])
# print(windmetrics_days)
#Divide all columns by number of days
#Exclude the last column days
#Multiply per 30 to have per metrics per month
windstand = (windmetrics_days.iloc[:,:-1].div(windmetrics_days.days, axis=0))*30
# print(windstand)
#Drop long interval
windmetrics_stand = windstand.drop(15)
# print(windmetrics_stand)
#Rename to join at the end
windmetrics_stand.to_csv('../Exit_ws/wind'+listname[i]+'_stand_metrics.csv')
i+=1
# print(windmetrics_stand)
######################################################################################
######################################################################################
###wind analysis - standardized values (per month)
wind15m_stand = pd.read_csv('../Exit_ws/wind15m_stand_metrics.csv').set_index('per')
windhour_stand = pd.read_csv('../Exit_ws/windhour_stand_metrics.csv').set_index('per')
windday_stand = pd.read_csv('../Exit_ws/windday_stand_metrics.csv').set_index('per')
# print(windday_stand)
#Join absolute tables
windstand = wind15m_stand.iloc[:,0:100].join(windhour_stand.iloc[:,0:100]).join(windday_stand).fillna(0)
print(windstand)
###Linear fit
metrics = windstand.columns.values
# print(metrics)
#Create an empty sumario
sumario = pd.DataFrame(columns=['model', 'a', 'b', 'r2', 'r', 'pvalue', 'n>0'])
# print(sumario)
#While to do the fit and create the summary results table
i=0
while i < len(metrics):
x = np.log(windstand.loc[:,metrics[i]]+1)
y = np.log(windstand.area)
x1 = sm.add_constant(x)
model = sm.OLS(y, x1)
results = model.fit()
name = metrics[i]
modell = 'a+bx'
a = results.params[0]
b = results.params[1]
r2 = results.rsquared
r = stats.pearsonr(x, y)
p = results.pvalues[1]
n = len(x[x>0])
sumario.loc[metrics[i],:] = modell, a, b, r2, r[0], p, n
i += 1
#Choose the highest r values
sumariosort = sumario.sort_values(by='r', ascending=False)
print(sumariosort)
sumario_sel = sumariosort.iloc[1:7,:]
sumario_sel.to_csv('../Exit_ws/summary_highest_r_windstand.csv')
##Residual
x = np.log(windstand.loc[:,sumario_sel.index.values[0]]+1)
y = np.log(windstand.area)
x1 = sm.add_constant(x)
model = sm.OLS(y, x1)
results = model.fit()
modell = 'a+bx'
a = results.params[0]
b = results.params[1]
r2 = results.rsquared
r = stats.pearsonr(x, y)
p = results.pvalues[1]
n = len(x[x>0])
# ypred = a + b*x
ypred = results.predict(x1)
# res = y - ypred
res = results.resid
print(a)
plt.hist(res)
plt.savefig('../Exit_ws/hist_residual.png', dpi=300, bbox_inches='tight')
plt.close()
plt.scatter(ypred,res)
plt.axhline(y=0)
plt.savefig('../Exit_ws/scatter_residual.png', dpi=300, bbox_inches='tight')
plt.close()
#Log scale: p = 0.100399978459 (normal)
stat, p = shapiro(res)
print(stat)
print(p)
#####################################################################################################
#####################################################################################################
#####################################################################################################
#####################################################################################################
#Data for graph of windspeed (mm.s-1) and percentiles
listwind = [wind15m, windhour, windday]
listwindname = ['wind15m', 'windhour', 'windday']
# #Number of 15 min in 1 hour = 4
# const15min = 4
# constday = 1./24
# print(constday)
coletor = []
i = 0
while i < len(listwind):
#Filter zero wind
windp = listwind[i].loc[listwind[i].wsmx>0]
#I will only need the 1-day value
#Value of windspeed of the p99.3
###The percentile of 99.3 is 11 m/s
pd.Series(np.percentile(windp.wsmx, 99.3)/3.6).to_csv('../Exit_ws/wind_ms'+listwindname[i]+'_equal_p993.csv')
#Calculate percentiles 90.0 until 99.9
#Percentiles calculated on positive wind values
perc = np.percentile(windp.wsmx, np.arange(90.,100., 0.1))
# if i == 0:
# perc = perc*const15min
# elif i == 2:
# perc = perc*constday
coletor.append(perc)
i+=1
print(coletor)
num = np.arange(90.,100., 0.1)
print(num)
#####################################################################################################
#####################################################################################################
#####################################################################################################
####Season data = to color scatter by season
# print(windstand)
# print(gaps1)
gaps1 = gaps.drop(15)
season = pd.read_csv('../Entrance/seasons.csv') #the file is in exit folder
season1 = season.set_index('per')
# print(season1)
windstand = windstand.merge(season1, left_index=True, right_index=True)
gaps1 = gaps1.merge(season1, left_index=True, right_index=True)
# print(gaps1)
#####################################################################################################
#####################################################################################################
#####################################################################################################
#####################################################################################################
###Plot scatter plots, R2 and wind rates together (3 subplots)
##Parameters scatter
constha = (1/500000.)*100.
metrics = sumario_sel.index.values
# x = windstand.loc[:,metrics[0]]
# y = gaps1.areapercmonth
xdry = np.log(windstand.loc[windstand.season==0,metrics[0]]+1)
ydry = gaps1.loc[gaps1.season==0, 'areapercmonth']
xwet = np.log(windstand.loc[windstand.season==1,metrics[0]]+1)
ywet = gaps1.loc[gaps1.season==1, 'areapercmonth']
# print(xdry)
# print(ydry)
a = np.array(sumario_sel.iloc[0, 1])
b = np.array(sumario_sel.iloc[0, 2])
r2 = np.array(sumario_sel.iloc[0, 3])
r = np.array(sumario_sel.iloc[0, 4])
p = np.array(sumario_sel.iloc[0, 5])
xplot = np.array([np.min(x), np.max(x)])
yplot = (a+b*xplot)*constha
print(yplot)
print(xplot)
print(a)
print(b)
#Multiply the coeficients by constha to write the equation on plot
#I checked this in excel
aha = a*constha
bha = b*constha
print(aha)
print(bha)
plt.rc('font', family='Times New Roman', size=12)
fig, (ax1, ax2, ax3) = plt.subplots(1,3, figsize=(12, 3))
plt.subplots_adjust(wspace=0.3) #wspace=0.25
# ax1.scatter(xwet, ywet,s=30, facecolors='none', edgecolors='royalblue', label='Wet season')
# ax1.scatter(xdry, ydry,s=30, marker='^', facecolors='m', edgecolors='none', label='Dry season', alpha=0.5)
ax1.scatter(xdry, ydry,s=60, marker='^', facecolors='none', edgecolors='k', label='Dry season', alpha=0.5) #marker='^',
ax1.scatter(xwet, ywet,s=20, facecolors='dimgrey', edgecolors='none', label='Wet season', alpha=0.5)
# ax1.scatter(xwet, ywet,s=30, facecolors='royalblue', edgecolors='none', label='Wet season', alpha=0.4)
# ax1.plot(xplot,yplot, color='k', linestyle='--', linewidth=1)
ax1.set_xlabel(r'Log frequency periods with 1-day max windspeed > 99.3$^{th}$ (mo$^{-1}$)', labelpad=10, fontsize=12)
ax1.set_ylabel(r'Canopy disturbance rate (% mo$^{-1}$)', labelpad=10, fontsize=12)
ax1.text(0.65,0.45,'p-value = %.2f' % float(p))
ax1.legend(loc='best', prop={'size': 10})
# ax1.text(0,1.3,'y = %.2f + %.2fx' % (float(aha),float(bha)))
ax1.set_yscale('log')
ax1.set_yticks([0.01, 0.05, 0.1, 0.5, 1.5])
ax1.set_yticklabels(['0.01','0.05','0.1','0.5','1.5'])
# ax1.set_xlim(-0.1, 2.1)
ax1.set_ylim(0.005, 3)
##Parameters R2 graph
#100 is the number of percentiles
listcount15m = np.arange(0,100,1)
listcounthour = 100+listcount15m
listcountday = 100+100+listcount15m
listall = [listcount15m, listcounthour, listcountday]
listcor = ['darkmagenta', 'royalblue', 'k']
listlabel = ['Frequency 15-min', 'Frequency 1-hour', 'Frequency 1-day']
namesxticks = ['90', '91', '92', '93', '94', '95', '96', '97', '98', '99']
num = np.arange(90.,100., 0.1)
#Position of ticks
xtickspos = np.arange(0,100,10)
i = 0
while i < len (listall):
#r values of metrics
y = np.array(sumario.iloc[listall[i],4])
# print(y)
#Entrances
# x = np.arange(1,len(y)+1, 1)
x = num
# print(x)
# ax2.scatter(x,y, color=listcor[i], s=2)
ax2.plot(x,y, color=listcor[i], label=listlabel[i], linewidth=1, zorder=1)
i += 1
ax2.scatter(99.3, 0.21,s=20, edgecolors='r', facecolors='none', zorder=2)
ax2.legend(loc='upper left', prop={'size': 10})
ax2.set_ylabel('r', labelpad=10)
ax2.set_xlabel('Wind speed percentile thresholds', labelpad=10)
# ax2.xaxis.set_tick_params(axis=0, which='minor', bottom=True )
# ax2.set_xticks(xtickspos,minor=True)
# ax2.set_xticks(np.arange(90.,100., 0.1),minor=True)
# ax2.set_xlim([95.5,99.9])
ax1.set_title('(a)', loc='left')
ax2.set_title('(b)', loc='left')
ax3.set_title('(c)', loc='left')
ax2.set_xticks(np.arange(90.,100., 1))
ax2.set_xticklabels(namesxticks, rotation=45)
# ax2.set_ylim(-0.25, 0.25)
#Proportion of ylimites, scale transformation
# ymax = (0.67-(-0.1))/(0.9-(-0.1))
# ax2.axvline(x=99.4, ymin=-0.1, ymax=ymax, ls='--', color='gray', linewidth=0.5, zorder=1)
# ymax3 = (11/12)-0.2
# ymax3 = 0.7
xmax3 = (99.3/99.9)-0.0875
ax3.plot(num, coletor[0]/3.6, color='darkmagenta', linewidth=1, label='Wind 15-min')
ax3.plot(num, coletor[1]/3.6, color='royalblue', linewidth=1, label='Wind 1-hour')
ax3.plot(num, coletor[2]/3.6, color='k', linewidth=1, label='Wind 1-day')
ax3.set_ylabel('Wind speed (m s$^{-1}$)')
ax3.set_xlabel('Wind speed percentile thresholds', labelpad=10)
ax3.set_xticks([90, 91, 92, 93, 94, 95, 96, 97, 98, 99])
ax3.set_xticklabels(namesxticks, rotation=45)
ax3.minorticks_on()
ax3.legend(loc='upper left', prop={'size': 10})
ax3.axvline(x=99.3, ymin=0, ymax=0.785,ls='--', color='r', linewidth=0.5, zorder=2)
ax3.axhline(y=11.01, xmin=0.0001, xmax=xmax3,ls='--', color='r', linewidth=0.5, zorder=2)
# ax2.set_xticks(xtickspos)
# ax2.set_xticklabels(namesxticks)
plt.savefig('../Exit_ws/graphs_scatter_r_wind_stand2.png', dpi=300, bbox_inches='tight')
plt.close()
|
[
"araujo.raquelf@gmail.com"
] |
araujo.raquelf@gmail.com
|
5146d05cbe70510bac2fff4baf6eedb080681893
|
a5a0161cf91378479d1512ebf3000002317789dc
|
/src/pymeica/py_meica_subject.py
|
abebeb2ba33549ef13f3692565d04077405d7554
|
[] |
no_license
|
pappyhammer/bonn_assemblies
|
0de0323b0fc864e3e149ac24cf52d5a22064463e
|
510eef8c9b6f7ea8c86f74554c56f859d59eec28
|
refs/heads/master
| 2023-01-02T19:19:28.231485
| 2020-10-20T15:02:21
| 2020-10-20T15:02:21
| 150,748,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51,136
|
py
|
from cicada.analysis.cicada_analysis_format_wrapper import CicadaAnalysisFormatWrapper
import os
import hdf5storage
import numpy as np
from sortedcontainers import SortedList, SortedDict
import neo
import quantities as pq
from pymeica.utils.spike_trains import create_spike_train_neo_format, spike_trains_threshold_by_firing_rate
import elephant.conversion as elephant_conv
from pymeica.utils.mcad import MCADOutcome
from pymeica.utils.file_utils import find_files
from pymeica.utils.misc import get_unit_label
import yaml
import pandas as pd
class SpikeStructure:
def __init__(self, patient, spike_trains, microwire_labels, cluster_labels, cluster_indices,
spike_nums=None, title=None): # , ordered_indices=None, ordered_spike_data=None):
"""
Args:
patient:
spike_nums:
spike_trains:
microwire_labels:
cluster_labels:
cluster_indices: indicate what is the index of the units among the clusters (indexing starts at 0)
activity_threshold:
title:
ordered_indices:
ordered_spike_data:
"""
self.patient = patient
self.spike_trains = spike_trains
# self.ordered_spike_data = ordered_spike_data
# array of int, representing the channel number actually such as in 'times_pos_CSC2.mat'
self.microwire_labels = np.array(microwire_labels)
# array of int
self.cluster_labels = np.array(cluster_labels)
self.cluster_indices = np.array(cluster_indices)
self.title = title
# cells labels
self.labels = self.get_labels()
# self.ordered_indices = ordered_indices
# self.ordered_labels = None
# self.ordered_spike_trains = None
# if self.ordered_indices is not None:
# self.ordered_spike_trains = list()
# for index in ordered_indices:
# self.ordered_spike_trains.append(self.spike_trains[index])
# self.ordered_labels = []
# # y_ticks_labels_ordered = spike_nums_struct.labels[best_seq]
# for old_cell_index in self.ordered_indices:
# self.ordered_labels.append(self.labels[old_cell_index])
def get_labels(self):
labels = []
# cluster_to_label = {1: "MU ", 2: "SU ", -1: "Artif ", 0: ""}
cluster_to_label = {1: "MU", 2: "SU", -1: "Artif", 0: ""}
# print(f"get_labels self.microwire_labels {self.microwire_labels}")
for i, micro_wire in enumerate(self.microwire_labels):
channel = self.patient.channel_info_by_microwire[micro_wire]
unit_label = get_unit_label(cluster_label=cluster_to_label[self.cluster_labels[i]],
cluster_index=self.cluster_indices[i],
channel_index=micro_wire, region_label=channel)
labels.append(unit_label)
# labels.append(f"{cluster_to_label[self.cluster_labels[i]]}{micro_wire} "
# f"{channel}")
return labels
# def set_order(self, ordered_indices):
# if ordered_indices is None:
# self.ordered_spike_trains = np.copy(self.spike_trains)
# else:
# self.ordered_spike_trains = []
# for index in ordered_indices:
# self.ordered_spike_trains.append(self.spike_trains[index])
# self.ordered_indices = ordered_indices
# self.ordered_labels = []
# for old_cell_index in self.ordered_indices:
# self.ordered_labels.append(self.labels[old_cell_index])
class SleepStage:
def __init__(self, number, start_time, stop_time, sleep_stage, conversion_datetime, conversion_timestamp):
# timstamps are float, it's needed to multiple by 10^3 to get the real value, with represents microseconds
self.start_time = start_time * 1000
self.stop_time = stop_time * 1000
# self.stop_time = stop_time * 1000
# duration is in microseconds
self.duration = self.stop_time - self.start_time
self.duration_sec = self.duration / 1000000
# string describing the stage (like "W", "3", "R")
self.sleep_stage = sleep_stage
self.conversion_datetime = conversion_datetime
self.conversion_timestamp = conversion_timestamp * 1000
self.number = number
# first key is a tuple of int representing first_bin and last_bin
# value is an instance of MCADOutcome, bin_size is available in MCADOutcome
self.mcad_outcomes = SortedDict()
# TODO: See to build an array with int key to get the MCADOutcome from a bin index or timestamps
def add_mcad_outcome(self, mcad_outcome, bins_tuple):
"""
Add an instance of MCADOutcome
:param mcad_outcome:
:param bins_tuple:
:return:
"""
self.mcad_outcomes[bins_tuple] = mcad_outcome
def __str__(self):
result = ""
result += f"num {self.number}, "
result += f"stage {self.sleep_stage}, "
# result += f"start_time {self.start_time}, "
# result += f"stop_time {self.stop_time}, \n"
# result += f"duration (usec) {self.duration}, "
result += f"duration: {self.duration_sec:.1f} sec, {(self.duration / 1000000) / 60:.1f} min\n"
if len(self.mcad_outcomes) == 0:
result += f" No MCAD outcome\n"
else:
for bins_tuple, mcad_outcome in self.mcad_outcomes.items():
first_bin_index = bins_tuple[0]
last_bin_index = bins_tuple[1]
chunk_duration = (last_bin_index - first_bin_index + 1) * mcad_outcome.spike_trains_bin_size
# passing it in sec
chunk_duration /= 1000
result += f"{bins_tuple} {mcad_outcome.side} {mcad_outcome.n_cell_assemblies} cell " \
f"assemblies on {chunk_duration:.2f} sec segment.\n"
if mcad_outcome.n_cell_assemblies > 0:
# cell_assembly is an instance of CellAssembly
for ca_index, cell_assembly in enumerate(mcad_outcome.cell_assemblies):
result += f" CA n° {ca_index}: {cell_assembly.n_units} units, " \
f"{cell_assembly.n_repeats} repeats, " \
f"{cell_assembly.n_invariant_units} RU, " \
f"{cell_assembly.n_responsive_units} IU, " \
f"score: {cell_assembly.probability_score:.4f} \n"
# result += f",\n conversion_datetime {self.conversion_datetime}, "
# result += f"conversion_timestamp {self.conversion_timestamp}, "
return result
#
"""
compatible with cicada.analysis.cicada_analysis_format_wrapper.CicadaAnalysisFormatWrapper
because it implements identifier decorator, as well as load_data and is_data_valid
But we don't import cicada code so this package could also be independent.
"""
class PyMeicaSubject(CicadaAnalysisFormatWrapper):
DATA_FORMAT = "PyMEICA"
WRAPPER_ID = "PyMEICASubject"
def __init__(self, data_ref, load_data=True, verbose=1):
CicadaAnalysisFormatWrapper.__init__(self, data_ref=data_ref, data_format=self.DATA_FORMAT, load_data=load_data)
# self._data_ref = data_ref
# self.load_data_at_init = load_data
# self._data_format = self.DATA_FORMAT
# always load at the start
self._identifier = os.path.basename(data_ref)
self.verbose = verbose
# variables initiated in self.load_data()
# number of units, one Multi-unit count as one unit
self.n_units = 0
# The variable 'spikes' stores the spike shape from all spikes
# measured in this channel.
# This variable contains a matrix with dimension N_spikes x 64.
# Each row corresponds to a single spike and gives 64 voltage values
# of this spike aligned to the maximum.
self.spikes_by_microwire = dict()
# The variable 'cluster_class' provides information about the timing of
# each spike and the cluster that it corresponds to.
# This variable contains a N_spikes x 2 matrix in which the first
# column contains the cluster that the spike belongs to and the
# second column saves the time of the spike.
# replace by the code of the type of unit: SU, MU etc... 1 = MU 2 = SU -1 = Artif.
self.spikes_time_by_microwire = dict()
self.channel_info_by_microwire = None
# list of SleepStage instances (in chronological order)
self.sleep_stages = list()
self.cluster_info = None
self.n_microwires = 0
# self.available_micro_wires = 0
self.nb_sleep_stages = 0
# list of int, corresponding of the int representing the micro_wire such as in files 'times_CSC1'
self.available_micro_wires = list()
# key the stimulus number (int) and as value the string describing the
# stimulus (like "Barack Obama"). Init in load_stimuli_name
self.stimuli_name_dict = dict()
# key is the label (str) representing a unit such as 'MU 7 25 LMH2'
# (SU or MU, cluster_index, microwireçindex, Side&Channel),
# value is a list of two int representing the prefered stimulus in the evening and in the morning.
# if -1, means no answer at this moment
# if a label (cell) is not in this dict, it means it is not a responsive units
self.is_responsive_units_dict = dict()
# same as for is_responsive_units_dict but for invariant units
self.is_invariant_units_dict = dict()
if self.load_data_at_init:
self.load_data()
def _load_responsive_and_invariant_units(self, df, invariant_units):
"""
:param df: panda dataframe to explore
:param invariant_units: (bool) if True then it's invariant_units, else it is responsive units
:return:
"""
if invariant_units:
units_dict = self.is_invariant_units_dict = dict()
else:
units_dict = self.is_responsive_units_dict = dict()
df_response = df.loc[(df['Patient'] == int(self.identifier[1:3]))]
if len(df_response) == 0:
return
# print(f"invariant_units {invariant_units}")
for index in df_response.index:
channel = df.loc[df.index[index], 'Channel']
# removing one so it matches the actual indexing
channel -= 1
cluster = df.loc[df.index[index], 'Cluster']
hemisphere = df.loc[df.index[index], 'Hemisphere']
region = df.loc[df.index[index], 'Region']
wire = df.loc[df.index[index], 'Wire']
preferred_stim_num_e = df.loc[df.index[index], 'preferred_stim_num_e']
preferred_stim_num_m = df.loc[df.index[index], 'preferred_stim_num_m']
# print(f"channel {channel}, cluster {cluster}, hemisphere {hemisphere}, region {region}, "
# f"wire {wire}, preferred_stim_num_e {preferred_stim_num_e}, "
# f"preferred_stim_num_m {preferred_stim_num_m} ")
# print(f"self.cluster_info[micro_wire] {len(self.cluster_info)}")
cluster_infos = self.cluster_info[channel][0]
cluster_match_index = cluster_infos[cluster]
# print(f"cluster_infos {cluster_infos}")
cluster_to_label = {1: "MU", 2: "SU", -1: "Artif", 0: ""}
unit_label = get_unit_label(cluster_label=cluster_to_label[cluster_match_index],
cluster_index=cluster,
channel_index=channel,
region_label=f"{hemisphere}{region}{wire}")
units_dict[unit_label] = (preferred_stim_num_e, preferred_stim_num_m)
@staticmethod
def is_data_valid(data_ref):
"""
Check if the data can be an input for this wrapper as data_ref
Args:
data_ref: file or directory
Returns: a boolean
"""
if not os.path.isdir(data_ref):
return False
files_in_dir = [item for item in os.listdir(data_ref)
if os.path.isfile(os.path.join(data_ref, item))]
identifier = os.path.basename(data_ref)
files_to_find = ["cluster_info.mat", f"{identifier}_sleepstages.mat"]
for file_to_find in files_to_find:
if file_to_find not in files_in_dir:
return False
return True
# def get_sleep_stage_epoch(self, sleep_stage_name):
# """
# Return the epoch of a given type of slepe stage.
# :param sleep_stage_name:
# :return: List of list of 2 int represent the timestamps in sec of the beginning and end of each epoch
# """
# epochs = []
#
# for sleep_stage in self.sleep_stages:
# if sleep_stage.sleep_stage != sleep_stage_name:
# continue
# epochs.append(sleep_stage.start_time, sleep_stage.stop_time)
def load_data(self):
CicadaAnalysisFormatWrapper.load_data(self)
if self.verbose:
print(f"Loading data for PyMeicaSubject {self._identifier}")
# number of units, one Multi-unit count as one unit
self.n_units = 0
# Filter the items and only keep files (strip out directories)
files_in_dir = [item for item in os.listdir(self._data_ref)
if os.path.isfile(os.path.join(self._data_ref, item))]
# The variable 'spikes' stores the spike shape from all spikes
# measured in this channel.
# This variable contains a matrix with dimension N_spikes x 64.
# Each row corresponds to a single spike and gives 64 voltage values
# of this spike aligned to the maximum.
self.spikes_by_microwire = dict()
# The variable 'cluster_class' provides information about the timing of
# each spike and the cluster that it corresponds to.
# This variable contains a N_spikes x 2 matrix in which the first
# column contains the cluster that the spike belongs to and the
# second column saves the time of the spike.
original_spikes_cluster_by_microwire = dict()
# replace by the code of the type of unit: SU, MU etc... 1 = MU 2 = SU -1 = Artif.
spikes_cluster_by_microwire = dict()
self.spikes_time_by_microwire = dict()
cluster_correspondance_by_microwire = dict()
cluster_info_file = hdf5storage.loadmat(os.path.join(self._data_ref, "cluster_info.mat"))
label_info = cluster_info_file["label_info"]
# contains either an empty list if no cluster, or a list containing a list containing the type of cluster
# 1 = MU 2 = SU -1 = Artif.
# 0 = Unassigned (is ignored)
self.cluster_info = cluster_info_file['cluster_info'][0, :]
# adding cluster == 0 in index so it can match index in cluster_class
for i, cluster in enumerate(self.cluster_info):
if len(cluster) == 0:
self.cluster_info[i] = [[0]]
else:
new_list = [0]
new_list.extend(self.cluster_info[i][0])
self.cluster_info[i] = [new_list]
self.channel_info_by_microwire = cluster_info_file["cluster_info"][1, :]
self.channel_info_by_microwire = [c[0] for c in self.channel_info_by_microwire]
# print_mat_file_content(cluster_info_file)
sleep_stages_file = hdf5storage.loadmat(os.path.join(self._data_ref, self._identifier + "_sleepstages.mat"),
mat_dtype=True)
conversion_datetime = sleep_stages_file["conversion_datetime"]
conversion_timestamp = sleep_stages_file["conversion_timestamp"]
# The variable 'sleepstages' is a N_sleepstages list size that contains 2 lists
# with the first having 3 elements:
# the starttime and stoptime of each sleep stage and the sleepstage label.
sleep_stages_tmp = sleep_stages_file["sleepstages"][0, :]
self.sleep_stages = []
total_duration = 0
for ss_index, sleep_stage_data in enumerate(sleep_stages_tmp):
# sleep_stage_data = sleep_stage_data[0]
# print(f"{ss_index} sleep_stage_data {sleep_stage_data}")
# The start time of the first stage, might not be the same as the one of the first spike
# recorded for this stage, as the data we have don't start at the beginning of a stage.
ss = SleepStage(number=ss_index, start_time=sleep_stage_data[0][0][0], stop_time=sleep_stage_data[1][0][0],
sleep_stage=sleep_stage_data[2][0], conversion_datetime=conversion_datetime[0],
conversion_timestamp=conversion_timestamp[0][0])
# print(f"ss {ss}")
total_duration += ss.duration
self.sleep_stages.append(ss)
self.nb_sleep_stages = len(self.sleep_stages)
# TODO: See to build a vector that give for any timestamps in the whole recording to which
# Sleepstage it belongs
# print(f"sleepstages[0]: {sleepstages[1]}")
# print(f"conversion_datetime {conversion_datetime}")
# print(f"conversion_timestamp {conversion_timestamp[0][0]}")
# print(f"conversion_timestamp int ? {isinstance(conversion_timestamp[0][0], int)}")
if self.verbose:
print(f"Data total duration (min): {(total_duration / 1000000) / 60}")
# print_mat_file_content(sleep_stages_file)
self.available_micro_wires = []
for file_in_dir in files_in_dir:
if file_in_dir.endswith("yaml") and (not file_in_dir.startswith(".")) and ("stimuli_name" in file_in_dir):
self.load_stimuli_name(stimuli_yaml_file=os.path.join(self._data_ref, file_in_dir))
continue
# times_pos_CSC matched the full night recordings
if file_in_dir.startswith("times_pos_CSC") or file_in_dir.startswith("times_CSC"):
if file_in_dir.startswith("times_pos_CSC"):
# -1 to start by 0, to respect other matrices order
microwire_number = int(file_in_dir[13:-4]) - 1
else:
microwire_number = int(file_in_dir[9:-4]) - 1
self.available_micro_wires.append(microwire_number)
data_file = hdf5storage.loadmat(os.path.join(self._data_ref, file_in_dir))
# print(f"data_file {data_file}")
self.spikes_by_microwire[microwire_number] = data_file['spikes']
cluster_class = data_file['cluster_class']
# .astype(int)
# if value is 0, no cluster
original_spikes_cluster_by_microwire = cluster_class[:, 0].astype(int)
# spikes_cluster_by_microwire[microwire_number] = cluster_class[:, 0].astype(int)
# changing the cluster reference by the cluster type, final values will be
# 1 = MU 2 = SU -1 = Artif.
# 0 = Unassigned (is ignored)
# We want for each microwire to create as many lines of "units" as cluster
go_for_debug_mode = False
if go_for_debug_mode:
print(f"microwire_number {microwire_number}")
print(f"channel {self.channel_info_by_microwire[microwire_number]}")
print(f"self.cluster_info[microwire_number] {self.cluster_info[microwire_number]}")
print(f"np.unique(original_spikes_cluster_by_microwire) "
f"{np.unique(original_spikes_cluster_by_microwire)}")
print(f"original_spikes_cluster_by_microwire "
f"{original_spikes_cluster_by_microwire}")
print("")
# for i, cluster_ref in enumerate(spikes_cluster_by_microwire[microwire_number]):
# if cluster_ref > 0:
# cluster_ref -= 1
# spikes_cluster_by_microwire[microwire_number][i] = \
# self.cluster_info[microwire_number][0][cluster_ref]
# it's matlab indices, so we need to start with zero
# not needed anymore because we add 0
# for i, cluster_ref in enumerate(original_spikes_cluster_by_microwire):
# if cluster_ref > 0:
# original_spikes_cluster_by_microwire[i] -= 1
# rounded to int
# for each microwire, we add a dict with as many key as cluster, and for each key
# we give as a value the spikes for this cluster
self.spikes_time_by_microwire[microwire_number] = dict()
# cluster_infos contains the list of clusters for this microwire.
# original_spikes_cluster_by_microwire is same length as cluster_class[:, 1].astype(int), ie
# nb spikes
cluster_infos = self.cluster_info[microwire_number][0]
for index_cluster, n_cluster in enumerate(cluster_infos):
# keep the spikes of the corresponding cluster
mask = np.zeros(len(cluster_class[:, 1]), dtype="bool")
mask[np.where(original_spikes_cluster_by_microwire == index_cluster)[0]] = True
# timstamps are float, it's needed to multiple by 10^3 to get the real value,
# represented as microseconds
self.spikes_time_by_microwire[microwire_number][index_cluster] = \
(cluster_class[mask, 1] * 1000)
# .astype(int)
# print(f"cluster_class[mask, 1] {cluster_class[mask, 1]}")
# print(f"- cluster_class[mask, 1] {cluster_class[mask, 1][0] - int(cluster_class[mask, 1][0])}")
self.n_units += 1
if microwire_number < 0:
print(f"times_pos_CSC{microwire_number}")
print(f"spikes shape 0: {self.spikes_by_microwire[microwire_number][0, :]}")
# plt.plot(spikes_by_microwire[microwire_number][0, :])
# plt.show()
print(f"spikes cluster: {spikes_cluster_by_microwire[microwire_number]}")
# print(f"spikes time: {self.spikes_time_by_microwire[microwire_number].astype(int)}")
# print_mat_file_content(data_file)
print(f"\n \n")
# 2nd round for responsive and invariant units
for file_in_dir in files_in_dir:
if file_in_dir.endswith("csv") and "responsive_units" in file_in_dir:
# load responsive_units info
responsive_units_file = os.path.join(self._data_ref, file_in_dir)
responsive_units_df = pd.read_csv(responsive_units_file)
self._load_responsive_and_invariant_units(df=responsive_units_df, invariant_units=False)
elif file_in_dir.endswith("csv") and "invariant_units" in file_in_dir:
# load invariant_units info
invariant_units_file = os.path.join(self._data_ref, file_in_dir)
invariant_units_df = pd.read_csv(invariant_units_file)
self._load_responsive_and_invariant_units(df=invariant_units_df, invariant_units=True)
self.n_microwires = len(self.spikes_by_microwire)
self.available_micro_wires = np.array(self.available_micro_wires)
def elapsed_time_from_falling_asleep(self, sleep_stage, from_first_stage_available=False):
"""
Looking at the time of the first sleep sleep_stage (it could start with Wake), return the number
of time that separate it in seconds (could be negative if the stage is the wake one before sleep)
:param sleep_stage: SleepStage instance
:param from_first_stage_available: if True, the time is measured from the first stage recorded.
:return:
"""
for sleep_stage_index in range(len(self.sleep_stages)):
ss = self.sleep_stages[sleep_stage_index]
if not from_first_stage_available:
if ss.sleep_stage == "W":
continue
return (sleep_stage.start_time - ss.start_time) / 1000000
return -1
def load_stimuli_name(self, stimuli_yaml_file):
"""
Load the file containing as key the stimulus number (int) and as value the string describing the
stimulus (like "Barack Obama")
:param stimuli_yaml_file:
:return:
"""
with open(stimuli_yaml_file, 'r') as stream:
self.stimuli_name_dict = yaml.load(stream, Loader=yaml.Loader)
def load_mcad_data(self, data_path, side_to_load=None,
sleep_stage_indices_to_load=None,
macd_comparison_key=MCADOutcome.BEST_SILHOUETTE,
min_repeat=3, update_progress_bar_fct=None,
time_started=None,
total_increment=1):
"""
Explore all directories in data_path (recursively) and load the data issues from Malvache Cell Assemblies
Detection code in yaml file.
:param data_path:
:param macd_comparison_key: indicate how to compare two outcomes for the same spike_trains section
Choice among: MCADOutcome.BEST_SILHOUETTE & MCADOutcome.MAX_N_ASSEMBLIES
:param min_repeat: minimum of times of cell assembly should repeat to be considered True.
:param side_to_load: (str) if None, both side are loaded, otherwise should be 'L' or 'R'
:param sleep_stage_indices_to_load: (list of int) if None, all stages are loaded, otherwise
only the ones listed
:param update_progress_bar_fct: for Cicada progress bar progress (optional), fct that take the initial time,
and the increment at each step of the loading
:return:
"""
if data_path is None:
return
mcad_files = find_files(dir_to_explore=data_path, keywords=["stage"], extensions=("yaml", "yml"))
# for progress bar purpose
n_files = len(mcad_files)
n_mcad_outcomes = 0
increment_value = 0
increment_step_for_files = (total_increment * 0.9) / n_files
# first key: sleep_stage index, 2nd key: tuple of int representing firt and last bin,
# value is a list of dict representing the content of the yaml file
mcad_by_sleep_stage = dict()
for file_index, mcad_file in enumerate(mcad_files):
mcad_file_basename = os.path.basename(mcad_file)
# to avoid loading the file, we filter based on the file_name, see to change if the file_names should
# be changed, so far contain subject_id, sleep_index, side, bin of the chunk, stage_name
if self.identifier not in mcad_file_basename:
continue
if (side_to_load is not None) and (side_to_load not in mcad_file_basename):
continue
if sleep_stage_indices_to_load is not None:
split_file_name = mcad_file_basename.split()
if split_file_name[4] == "index":
stage_index_from_file = int(mcad_file_basename.split()[5])
else:
stage_index_from_file = int(mcad_file_basename.split()[3])
if stage_index_from_file not in sleep_stage_indices_to_load:
continue
with open(mcad_file, 'r') as stream:
mcad_results_dict = yaml.load(stream, Loader=yaml.Loader)
# first we check if it contains some of the field typical of mcad file
if ("subject_id" not in mcad_results_dict) or ("sleep_stage_index" not in mcad_results_dict):
continue
# then we check that it matches the actual subject_id
if mcad_results_dict["subject_id"] != self.identifier:
continue
sleep_stage_index = mcad_results_dict["sleep_stage_index"]
first_bin_index = mcad_results_dict["first_bin_index"]
last_bin_index = mcad_results_dict["last_bin_index"]
bins_tuple = (first_bin_index, last_bin_index)
side = mcad_results_dict["side"]
if (side_to_load is not None) and (side != side_to_load):
continue
if sleep_stage_indices_to_load is not None:
if sleep_stage_index not in sleep_stage_indices_to_load:
continue
if sleep_stage_index not in mcad_by_sleep_stage:
mcad_by_sleep_stage[sleep_stage_index] = dict()
if bins_tuple not in mcad_by_sleep_stage[sleep_stage_index]:
mcad_by_sleep_stage[sleep_stage_index][bins_tuple] = []
mcad_by_sleep_stage[sleep_stage_index][bins_tuple].append(mcad_results_dict)
n_mcad_outcomes += 1
if update_progress_bar_fct is not None:
increment_value += increment_step_for_files
if increment_value > 1:
update_progress_bar_fct(time_started=time_started,
increment_value=1)
increment_value -= 1
# now we want to keep only one result for each chunk a given sleep_stage
# and add it to the SleepStage instance
increment_step_for_mcad_outcomes = (total_increment * 0.1) / n_mcad_outcomes
for sleep_stage_index in mcad_by_sleep_stage.keys():
for bins_tuple, mcad_dicts in mcad_by_sleep_stage[sleep_stage_index].items():
best_mcad_outcome = None
for mcad_dict in mcad_dicts:
mcad_outcome = MCADOutcome(mcad_yaml_dict=mcad_dict,
comparison_key=macd_comparison_key,
subject=self)
if update_progress_bar_fct is not None:
increment_value += increment_step_for_mcad_outcomes
if increment_value > 1:
update_progress_bar_fct(time_started=time_started,
increment_value=1)
increment_value -= 1
if best_mcad_outcome is None:
best_mcad_outcome = mcad_outcome
else:
best_mcad_outcome = best_mcad_outcome.best_mcad_outcome(mcad_outcome)
if best_mcad_outcome.n_cell_assemblies == 0:
# if no cell assembly we don't keep it
continue
# if one cell assembly we test that it repeats a minimum of time
if best_mcad_outcome.n_cell_assemblies == 1:
if np.max(best_mcad_outcome.n_repeats_in_each_cell_assembly()) < min_repeat:
continue
sleep_stage = self.sleep_stages[sleep_stage_index]
sleep_stage.add_mcad_outcome(mcad_outcome=best_mcad_outcome,
bins_tuple=best_mcad_outcome.bins_tuple)
def build_spike_nums(self, sleep_stage_index, side_to_analyse, keeping_only_SU, remove_high_firing_cells,
firing_rate_threshold, spike_trains_binsize):
"""
Build a spike_nums (bin version of spike_trains) from a sleep stage index and side.
:param sleep_stage_index: (int)
:param side_to_analyse: (str) 'L' or 'R'
:param keeping_only_SU: (bool)
:param remove_high_firing_cells: (bool)
:param firing_rate_threshold: (int) in Hz
:param spike_trains_binsize: (int) in ms
:return:
"""
spike_struct = self.construct_spike_structure(sleep_stage_indices=[sleep_stage_index],
channels_starting_by=[side_to_analyse],
keeping_only_SU=keeping_only_SU)
spike_trains = spike_struct.spike_trains
cells_label = spike_struct.labels
binsize = spike_trains_binsize * pq.ms
# first we create a spike_trains in the neo format
spike_trains, t_start, t_stop = create_spike_train_neo_format(spike_trains)
duration_in_sec = (t_stop - t_start) / 1000
if remove_high_firing_cells:
filtered_spike_trains, cells_below_threshold = \
spike_trains_threshold_by_firing_rate(spike_trains=spike_trains,
firing_rate_threshold=firing_rate_threshold,
duration_in_sec=duration_in_sec)
backup_spike_trains = spike_trains
spike_trains = filtered_spike_trains
n_cells_total = len(cells_label)
cells_label_removed = [(index, label) for index, label in enumerate(cells_label) if
index not in cells_below_threshold]
cells_label = [label for index, label in enumerate(cells_label) if index in cells_below_threshold]
n_cells = len(cells_label)
# print(
# f"{n_cells_total - n_cells} cells had firing rate > {firing_rate_threshold} Hz and have been removed.")
# if len(cells_label_removed):
# for index, label in cells_label_removed:
# print(f"{label}, {len(backup_spike_trains[index])}")
n_cells = len(spike_trains)
neo_spike_trains = []
for cell in np.arange(n_cells):
spike_train = spike_trains[cell]
# print(f"n_spikes: {cells_label[cell]}: {len(spike_train)}")
neo_spike_train = neo.SpikeTrain(times=spike_train, units='ms',
t_start=t_start,
t_stop=t_stop)
neo_spike_trains.append(neo_spike_train)
spike_trains_binned = elephant_conv.BinnedSpikeTrain(neo_spike_trains, binsize=binsize)
# transform the binned spike train into array
use_z_score_binned_spike_trains = False
if use_z_score_binned_spike_trains:
data = spike_trains_binned.to_array()
# print(f"data.type() {type(data)}")
# z-score
spike_nums = np.zeros(data.shape, dtype="int8")
for cell, binned_spike_train in enumerate(data):
mean_train = np.mean(binned_spike_train)
print(f"mean_train {mean_train} {np.max(binned_spike_train)}")
binned_spike_train = binned_spike_train - mean_train
n_before = len(np.where(data[cell] > 0)[0])
n = len(np.where(binned_spike_train >= 0)[0])
print(f"{cell}: n_before {n_before} vs {n}")
spike_nums[cell, binned_spike_train >= 0] = 1
else:
spike_nums = spike_trains_binned.to_bool_array().astype("int8")
# A list of lists for each spike train (i.e., rows of the binned matrix),
# that in turn contains for each spike the index into the binned matrix where this spike enters.
spike_bins_indices = spike_trains_binned.spike_indices
return spike_trains, spike_nums, cells_label, spike_bins_indices
def construct_spike_structure(self, sleep_stage_indices=None,
selection_range_time=None,
sleep_stage_selection=None, channels_starting_by=None,
channels_without_number=None, channels_with_number=None,
title=None, keeping_only_SU=False):
"""
Construct a spike structure (instance of SpikeStructure containing a spike trains with the labels
corresponding to each spike train. There might be big gap between spike train in case two non contiguous
time interval are included.
:param selection_range_time: (tuple of float) represents two timestamps determining an epoch over which
to select the spikes. If given, It is prioritized over sleep stages selection.
:param sleep_stage_indices: (list of int) list of sleep_stage_indices
:param sleep_stage_selection: (list of str) list of sleep stage according to their identifier. All sleep stages
in this category will be added
:param channels: list of str, if empty list, take them all, otherwise take the one starting with the same
name (like "RAH" take RAH1, RAH2 etc...., if just "R" take all microwire on the right)
:param channels_to_study: full name without numbers
:param keeping_only_SU: if True, MU are also included
:return:
"""
# TODO: See to add in spike structure an option to know when there are time gaps
# print(f"construct_spike_structure start for {self.identifier}")
# don't put non-assigned clusters
only_SU_and_MU = True
micro_wire_to_keep = []
if (channels_starting_by is None) and (channels_without_number is None) and (channels_with_number is None):
micro_wire_to_keep = self.available_micro_wires
else:
if channels_starting_by is None:
channels_starting_by = []
if channels_without_number is None:
channels_without_number = []
if channels_with_number is None:
channels_with_number = []
indices, channels = self.select_channels_starting_by(channels_starting_by)
micro_wire_to_keep.extend(indices)
indices, channels = self.select_channels_with_exact_same_name_without_number(channels_without_number)
micro_wire_to_keep.extend(indices)
micro_wire_to_keep.extend(self.select_channels_with_exact_same_name_with_number(channels_with_number))
# remove redondant microwire and sort them
micro_wire_to_keep = np.unique(micro_wire_to_keep)
# then we check if all the micro_wire data are available
to_del = np.setdiff1d(micro_wire_to_keep, self.available_micro_wires)
if len(to_del) > 0:
for d in to_del:
micro_wire_to_keep = micro_wire_to_keep[micro_wire_to_keep != d]
channels_to_keep = [self.channel_info_by_microwire[micro_wire] for micro_wire in micro_wire_to_keep]
sleep_stages_to_keep = []
if sleep_stage_indices is not None:
for index in sleep_stage_indices:
sleep_stages_to_keep.append(self.sleep_stages[index])
if sleep_stage_selection is not None:
sleep_stages_to_keep.extend(self.selection_sleep_stage_by_stage(sleep_stage_selection))
if len(sleep_stages_to_keep) == 0:
# In case no stage have been selected, then we put all stages in the order they were recorded
sleep_stages_to_keep = self.sleep_stages
# selecting spikes that happen during the time interval of selected sleep stages
# in order to plot a raster plot, a start time and end time is needed
# so for each stage selected, we should keep the timestamp of the first spike and the timestamp of the
# last spike
# first we count how many spike_trains (how many SU & MU),
nb_units_spike_nums = 0
for mw_index, micro_wire in enumerate(micro_wire_to_keep):
if only_SU_and_MU:
nb_units_to_keep = 0
cluster_infos = self.cluster_info[micro_wire][0]
for unit_cluster, spikes_time in self.spikes_time_by_microwire[micro_wire].items():
cluster = cluster_infos[unit_cluster]
if (cluster < 1) or (cluster > 2):
continue
# 1 == MU, 2 == SU
if keeping_only_SU:
if cluster == 1:
# not taking into consideraiton MU
continue
# looking if there are spiking
at_least_a_spike = False
if selection_range_time is not None:
start_time = selection_range_time[0]
stop_time = selection_range_time[1]
spikes_time = np.copy(spikes_time)
spikes_time = spikes_time[spikes_time >= start_time]
spikes_time = spikes_time[spikes_time <= stop_time]
if len(spikes_time) > 0:
at_least_a_spike = True
else:
for ss in sleep_stages_to_keep:
start_time = ss.start_time
stop_time = ss.stop_time
spikes_time = np.copy(spikes_time)
spikes_time = spikes_time[spikes_time >= start_time]
spikes_time = spikes_time[spikes_time <= stop_time]
if len(spikes_time) > 0:
at_least_a_spike = True
break
# counting it only if there is some spike during that interval
if at_least_a_spike:
nb_units_to_keep += 1
nb_units_spike_nums += nb_units_to_keep
else:
nb_units_spike_nums += len(self.spikes_time_by_microwire[micro_wire])
spike_trains = [np.zeros(0)] * nb_units_spike_nums
# used to labels the ticks
micro_wire_labels = []
cluster_labels = []
# cluster_indices indicate what is the index of the units among the clusters (indexing starts at 0)
cluster_indices = []
if selection_range_time is not None:
start_time = selection_range_time[0]
stop_time = selection_range_time[1]
time_epochs = [(start_time, stop_time)]
else:
time_epochs = [(ss.start_time, ss.stop_time) for ss in sleep_stages_to_keep]
for time_epoch in time_epochs:
start_time = time_epoch[0]
stop_time = time_epoch[1]
unit_index = 0
for mw_index, micro_wire in enumerate(micro_wire_to_keep):
cluster_infos = self.cluster_info[micro_wire][0]
for unit_cluster_index, spikes_time in self.spikes_time_by_microwire[micro_wire].items():
cluster = cluster_infos[unit_cluster_index]
# not taking into consideration artifact or non clustered
if (cluster < 1) or (cluster > 2):
continue
if keeping_only_SU:
if cluster == 1:
# not taking into consideraiton MU
continue
spikes_time = np.copy(spikes_time)
spikes_time = spikes_time[spikes_time >= start_time]
spikes_time = spikes_time[spikes_time <= stop_time]
if len(spikes_time) == 0:
# if no spikes we don't keep it
continue
if len(spike_trains[unit_index]) == 0:
spike_trains[unit_index] = spikes_time
else:
spike_trains[unit_index] = np.concatenate((spike_trains[unit_index], spikes_time))
micro_wire_labels.append(micro_wire)
cluster_labels.append(cluster)
cluster_indices.append(unit_cluster_index)
unit_index += 1
# 1 = MU 2 = SU -1 = Artif.
# 0 = Unassigned (is ignored)
spike_struct = SpikeStructure(patient=self, spike_trains=spike_trains,
microwire_labels=micro_wire_labels,
cluster_labels=cluster_labels,
title=title, cluster_indices=cluster_indices)
# print(f"End of construct_spike_structure for {self.patient_id}")
return spike_struct
def select_channels_starting_by(self, channels_starting_by):
"""
:param channels_starting_by: list of str, if empty list, return empty list, otherwise take the one starting
with the same
name (like "RAH" take RAH1, RAH2 etc...., if just "R" take all microwire on the right)
:return:
"""
result_indices = []
result_channels = []
for channel in channels_starting_by:
result_indices.extend([i for i, ch in enumerate(self.channel_info_by_microwire)
if isinstance(ch, str) and ch.startswith(channel)])
result_channels.extend([ch for ch in self.channel_info_by_microwire
if isinstance(ch, str) and ch.startswith(channel)])
return result_indices, result_channels
def select_channels_with_exact_same_name_without_number(self, channels):
"""
Select channels without the precise index, for example select all "A" (all amygdala channels)
:param channels: list of str: full name without numbers
:return:
"""
result_indices = []
result_channels = []
for channel in channels:
result_indices.extend([i for i, ch in enumerate(self.channel_info_by_microwire)
if (ch.startswith(channel) and (len(ch) == (len(channel) + 1)))])
result_channels.extend([ch for ch in self.channel_info_by_microwire
if (ch.startswith(channel) and (len(ch) == (len(channel) + 1)))])
return result_indices, result_channels
def select_channels_with_exact_same_name_with_number(self, channels):
"""
Select channels with the precise index, for example select all "A1" (amygdala channel 1)
:param channels: list of full name with numbers
:return:
"""
result = []
result.extend([i for i, ch in enumerate(self.channel_info_by_microwire)
if ch in channels])
return result
def selection_sleep_stage_by_stage(self, sleep_stage_selection):
"""
:param sleep_stage_selection: list of str
:return:
"""
return [ss for ss in self.sleep_stages if ss.sleep_stage in sleep_stage_selection]
def get_indices_of_sleep_stage(self, sleep_stage_name):
return [i for i, ss in enumerate(self.sleep_stages) if ss.sleep_stage == sleep_stage_name]
def descriptive_stats(self):
"""
Print some descriptive stats about a patient
:return:
"""
for channels_starting_by in [None, "L", "R"]:
n_su = 0
n_mu = 0
micro_wire_to_keep = []
if channels_starting_by is None:
micro_wire_to_keep = self.available_micro_wires
print(f"n units: {len(micro_wire_to_keep)}")
print(f"n invariant units: {len(self.is_invariant_units_dict)}")
print(f"n responsive units: {len(self.is_responsive_units_dict)}")
else:
indices, channels = self.select_channels_starting_by(channels_starting_by)
micro_wire_to_keep.extend(indices)
# remove redondant microwire and sort them
micro_wire_to_keep = np.unique(micro_wire_to_keep)
# then we check if all the micro_wire data are available
to_del = np.setdiff1d(micro_wire_to_keep, self.available_micro_wires)
if len(to_del) > 0:
for d in to_del:
micro_wire_to_keep = micro_wire_to_keep[micro_wire_to_keep != d]
# print(f"n units in {channels_starting_by}: {len(micro_wire_to_keep)}")
invariant_keys = list(self.is_invariant_units_dict.keys())
responsive_keys = list(self.is_responsive_units_dict.keys())
print(f"n invariant units: {len([k for k in invariant_keys if channels_starting_by in k])}")
print(f"n responsive units: {len([k for k in responsive_keys if channels_starting_by in k])}")
mu_by_area_count = SortedDict()
su_by_area_count = SortedDict()
# A AH EC MH PH PHC
# print(f"self.channel_info_by_microwire {self.channel_info_by_microwire}")
# print(f"self.available_micro_wires {self.available_micro_wires}")
for micro_wire in micro_wire_to_keep:
cluster_infos = self.cluster_info[micro_wire][0]
for unit_cluster, spikes_time in self.spikes_time_by_microwire[micro_wire].items():
cluster = cluster_infos[unit_cluster]
if (cluster < 1) or (cluster > 2):
continue
if cluster == 1:
# == MU
n_mu += 1
counter_dict = mu_by_area_count
else:
n_su += 1
counter_dict = su_by_area_count
channel_name = self.channel_info_by_microwire[micro_wire]
# print(f'channel_name {channel_name}')
unique_channels = ["EC", "AH", "MH", "PHC"]
for channel in unique_channels:
if channel in channel_name:
counter_dict[channel] = counter_dict.get(channel, 0) + 1
if ("A" in channel_name) and ("AH" not in channel_name):
counter_dict["A"] = counter_dict.get("A", 0) + 1
if ("PH" in channel_name) and ("PHC" not in channel_name):
counter_dict["PH"] = counter_dict.get("PH", 0) + 1
if channels_starting_by is None:
print(f"From both side: n_su {n_su}, n_mu {n_mu}")
else:
print(f"For side {channels_starting_by}: n_su {n_su}, n_mu {n_mu}, total {n_su+n_mu}")
print(f"mu_by_area_count: {mu_by_area_count}")
print(f"su_by_area_count: {su_by_area_count}")
print("")
if len(self.stimuli_name_dict) > 0:
print(f"Stimuli content: {self.stimuli_name_dict}")
print(" ")
print("sleep stages: ")
for sleep_stage in self.sleep_stages:
print(sleep_stage)
@property
def identifier(self):
return self._identifier
|
[
"julien.denis3@gmail.com"
] |
julien.denis3@gmail.com
|
8757ce41ff686de72bf880c4579a31a36cf628ba
|
3c7aa6ecb5acf2b82edd284b956b86d1d7fbd29d
|
/test/test_rest_api.py
|
4df1a092fadae98674a8d06dda0350de9febbff8
|
[
"Apache-2.0"
] |
permissive
|
tanhimislam/haystack
|
7ca1991badb61b08cd9ab409cb3128568f353255
|
5cfdabda2c339b16a327e6ff10877ecea00c3038
|
refs/heads/master
| 2023-08-24T11:52:59.058072
| 2021-10-15T08:29:36
| 2021-10-15T08:29:36
| 417,428,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,263
|
py
|
import os
from pathlib import Path
import pytest
from fastapi.testclient import TestClient
from haystack import Label
from rest_api.application import app
FEEDBACK={
"id": "123",
"query": "Who made the PDF specification?",
"document": {
"content": "A sample PDF file\n\nHistory and standardization\nFormat (PDF) Adobe Systems made the PDF specification available free of charge in 1993. In the early years PDF was popular mainly in desktop publishing workflows, and competed with a variety of formats such as DjVu, Envoy, Common Ground Digital Paper, Farallon Replica and even Adobe's own PostScript format. PDF was a proprietary format controlled by Adobe until it was released as an open standard on July 1, 2008, and published by the International Organization for Standardization as ISO 32000-1:2008, at which time control of the specification passed to an ISO Committee of volunteer industry experts. In 2008, Adobe published a Public Patent License to ISO 32000-1 granting royalty-free rights for all patents owned by Adobe that are necessary to make, use, sell, and distribute PDF-compliant implementations. PDF 1.7, the sixth edition of the PDF specification that became ISO 32000-1, includes some proprietary technologies defined only by Adobe, such as Adobe XML Forms Architecture (XFA) and JavaScript extension for Acrobat, which are referenced by ISO 32000-1 as normative and indispensable for the full implementation of the ISO 32000-1 specification. These proprietary technologies are not standardized and their specification is published only on Adobes website. Many of them are also not supported by popular third-party implementations of PDF. Column 1",
"content_type": "text",
"score": None,
"id": "fc18c987a8312e72a47fb1524f230bb0",
"meta": {}
},
"answer":
{
"answer": "Adobe Systems",
"type": "extractive",
"context": "A sample PDF file\n\nHistory and standardization\nFormat (PDF) Adobe Systems made the PDF specification available free of charge in 1993. In the early ye",
"offsets_in_context": [{"start": 60, "end": 73}],
"offsets_in_document": [{"start": 60, "end": 73}],
"document_id": "fc18c987a8312e72a47fb1524f230bb0"
},
"is_correct_answer": True,
"is_correct_document": True,
"origin": "user-feedback",
"pipeline_id": "some-123",
}
@pytest.mark.elasticsearch
@pytest.fixture(scope="session")
def client() -> TestClient:
os.environ["PIPELINE_YAML_PATH"] = str((Path(__file__).parent / "samples"/"pipeline"/"test_pipeline.yaml").absolute())
os.environ["INDEXING_PIPELINE_NAME"] = "indexing_text_pipeline"
client = TestClient(app)
yield client
# Clean up
client.post(url="/documents/delete_by_filters", data='{"filters": {}}')
@pytest.mark.elasticsearch
@pytest.fixture(scope="session")
def populated_client(client: TestClient) -> TestClient:
client.post(url="/documents/delete_by_filters", data='{"filters": {}}')
files_to_upload = [
{'files': (Path(__file__).parent / "samples"/"pdf"/"sample_pdf_1.pdf").open('rb')},
{'files': (Path(__file__).parent / "samples"/"pdf"/"sample_pdf_2.pdf").open('rb')}
]
for index, fi in enumerate(files_to_upload):
response = client.post(url="/file-upload", files=fi, data={"meta": f'{{"meta_key": "meta_value", "meta_index": "{index}"}}'})
assert 200 == response.status_code
yield client
client.post(url="/documents/delete_by_filters", data='{"filters": {}}')
def test_get_documents():
os.environ["PIPELINE_YAML_PATH"] = str((Path(__file__).parent / "samples"/"pipeline"/"test_pipeline.yaml").absolute())
os.environ["INDEXING_PIPELINE_NAME"] = "indexing_text_pipeline"
client = TestClient(app)
# Clean up to make sure the docstore is empty
client.post(url="/documents/delete_by_filters", data='{"filters": {}}')
# Upload the files
files_to_upload = [
{'files': (Path(__file__).parent / "samples"/"docs"/"doc_1.txt").open('rb')},
{'files': (Path(__file__).parent / "samples"/"docs"/"doc_2.txt").open('rb')}
]
for index, fi in enumerate(files_to_upload):
response = client.post(url="/file-upload", files=fi, data={"meta": f'{{"meta_key": "meta_value_get"}}'})
assert 200 == response.status_code
# Get the documents
response = client.post(url="/documents/get_by_filters", data='{"filters": {"meta_key": ["meta_value_get"]}}')
assert 200 == response.status_code
response_json = response.json()
# Make sure the right docs are found
assert len(response_json) == 2
names = [doc["meta"]["name"] for doc in response_json]
assert "doc_1.txt" in names
assert "doc_2.txt" in names
meta_keys = [doc["meta"]["meta_key"] for doc in response_json]
assert all("meta_value_get"==meta_key for meta_key in meta_keys)
def test_delete_documents():
os.environ["PIPELINE_YAML_PATH"] = str((Path(__file__).parent / "samples"/"pipeline"/"test_pipeline.yaml").absolute())
os.environ["INDEXING_PIPELINE_NAME"] = "indexing_text_pipeline"
client = TestClient(app)
# Clean up to make sure the docstore is empty
client.post(url="/documents/delete_by_filters", data='{"filters": {}}')
# Upload the files
files_to_upload = [
{'files': (Path(__file__).parent / "samples"/"docs"/"doc_1.txt").open('rb')},
{'files': (Path(__file__).parent / "samples"/"docs"/"doc_2.txt").open('rb')}
]
for index, fi in enumerate(files_to_upload):
response = client.post(url="/file-upload", files=fi, data={"meta": f'{{"meta_key": "meta_value_del", "meta_index": "{index}"}}'})
assert 200 == response.status_code
# Make sure there are two docs
response = client.post(url="/documents/get_by_filters", data='{"filters": {"meta_key": ["meta_value_del"]}}')
assert 200 == response.status_code
response_json = response.json()
assert len(response_json) == 2
# Delete one doc
response = client.post(url="/documents/delete_by_filters", data='{"filters": {"meta_index": ["0"]}}')
assert 200 == response.status_code
# Now there should be only one doc
response = client.post(url="/documents/get_by_filters", data='{"filters": {"meta_key": ["meta_value_del"]}}')
assert 200 == response.status_code
response_json = response.json()
assert len(response_json) == 1
# Make sure the right doc was deleted
response = client.post(url="/documents/get_by_filters", data='{"filters": {"meta_index": ["0"]}}')
assert 200 == response.status_code
response_json = response.json()
assert len(response_json) == 0
response = client.post(url="/documents/get_by_filters", data='{"filters": {"meta_index": ["1"]}}')
assert 200 == response.status_code
response_json = response.json()
assert len(response_json) == 1
def test_file_upload(client: TestClient):
file_to_upload = {'files': (Path(__file__).parent / "samples"/"pdf"/"sample_pdf_1.pdf").open('rb')}
response = client.post(url="/file-upload", files=file_to_upload, data={"meta": '{"meta_key": "meta_value"}'})
assert 200 == response.status_code
client.post(url="/documents/delete_by_filters", data='{"filters": {}}')
def test_query_with_no_filter(populated_client: TestClient):
query_with_no_filter_value = {"query": "Who made the PDF specification?"}
response = populated_client.post(url="/query", json=query_with_no_filter_value)
assert 200 == response.status_code
response_json = response.json()
assert response_json["answers"][0]["answer"] == "Adobe Systems"
def test_query_with_one_filter(populated_client: TestClient):
query_with_filter = {"query": "Who made the PDF specification?", "params": {"filters": {"meta_key": "meta_value"}}}
response = populated_client.post(url="/query", json=query_with_filter)
assert 200 == response.status_code
response_json = response.json()
assert response_json["answers"][0]["answer"] == "Adobe Systems"
def test_query_with_filter_list(populated_client: TestClient):
query_with_filter_list = {
"query": "Who made the PDF specification?",
"params": {"filters": {"meta_key": ["meta_value", "another_value"]}}
}
response = populated_client.post(url="/query", json=query_with_filter_list)
assert 200 == response.status_code
response_json = response.json()
assert response_json["answers"][0]["answer"] == "Adobe Systems"
def test_query_with_invalid_filter(populated_client: TestClient):
query_with_invalid_filter = {
"query": "Who made the PDF specification?", "params": {"filters": {"meta_key": "invalid_value"}}
}
response = populated_client.post(url="/query", json=query_with_invalid_filter)
assert 200 == response.status_code
response_json = response.json()
assert len(response_json["answers"]) == 0
def test_write_feedback(populated_client: TestClient):
response = populated_client.post(url="/feedback", json=FEEDBACK)
assert 200 == response.status_code
def test_get_feedback(client: TestClient):
response = client.post(url="/feedback", json=FEEDBACK)
resp = client.get(url="/feedback")
labels = [Label.from_dict(i) for i in resp.json()]
def test_export_feedback(populated_client: TestClient):
response = populated_client.post(url="/feedback", json=FEEDBACK)
assert 200 == response.status_code
feedback_urls = [
"/export-feedback?full_document_context=true",
"/export-feedback?full_document_context=false&context_size=50",
"/export-feedback?full_document_context=false&context_size=50000",
]
for url in feedback_urls:
response = populated_client.get(url=url, json=FEEDBACK)
response_json = response.json()
context = response_json["data"][0]["paragraphs"][0]["context"]
answer_start = response_json["data"][0]["paragraphs"][0]["qas"][0]["answers"][0]["answer_start"]
answer = response_json["data"][0]["paragraphs"][0]["qas"][0]["answers"][0]["text"]
assert context[answer_start:answer_start+len(answer)] == answer
|
[
"noreply@github.com"
] |
tanhimislam.noreply@github.com
|
77ef79ca9fa00d359c60f5fc9b20d0f93f23c4b1
|
42a4a77e417e59479d8aacb55ca2bc84640b178e
|
/NewsChefAPI/NewsChefAPI/NewsModel/serializers.py
|
6e290fd413ea7e07589717d10ff074124102a106
|
[] |
no_license
|
rahulShrestha89/Midversion-newsChef
|
182e7744c7433716a7cca3f7287d1e7daa7209be
|
a585cfdfd549dc769c776e2796ea2ef983b1d30e
|
refs/heads/master
| 2020-07-20T01:02:02.713270
| 2016-10-27T21:38:40
| 2016-10-27T21:38:40
| 73,749,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 882
|
py
|
from rest_framework import serializers
from NewsModel.models import NewsModel
class NewsModelSerializer(serializers.ModelSerializer):
class Meta:
model = NewsModel
fields = ('id','created','firstName','lastName','email','phoneNumber')
def create(self, validated_data):
"""
Create and return User.
"""
return NewsModel.objects.create(**validated_data)
def update(self, instance, validated_data):
"""
Update and return User.
"""
instance.firstName = validated_data.get('firstName',instance.firstName)
instance.lastName = validated_data.get('lastName',instance.lastName)
instance.email = validated_data.get('email',instance.email)
instance.phoneNumber = validated_data.get('phoneNumber',instance.phoneNumber)
instance.save()
return instance
|
[
"PrayushPokharel@Prayushs-MacBook-Pro.local"
] |
PrayushPokharel@Prayushs-MacBook-Pro.local
|
0ec93526b33317ffa6a2f38a2e79cc249998bd4a
|
42474f0f92339992d9e8ded982e0458d01c028fd
|
/inputnumber_and_displaymenu.py
|
dc043deb83e5b17b4accccb6ec067d8239ddaa0d
|
[] |
no_license
|
Frehoni/TestProjekt-1
|
14871f5a7d994cce87202d6b7f51bca75d276cd4
|
ff37a4e96e9234c6c73f5fed0e144e3770afe47c
|
refs/heads/master
| 2020-09-06T04:47:17.608913
| 2019-11-14T07:51:58
| 2019-11-14T07:51:58
| 220,326,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 945
|
py
|
def inputNumber(prompt):
#Inputnumber pronts user to input a number
#
#Usage: num=inputNumber(promt) Displays promt and asks user for a number.
#Repeats until user inputs a valid number.
while True:
try:
num = float(input(prompt))
break
except ValueError:
pass
return num
def displayMenu(options):
#DisplayMenu displays a menu of options, ask the user to choose an number
#and returns the number of the menu item chosen.
#
#Usage: choice = displayMenu(options)
#
#Input options Menu options (cell array of strings)
#Output choice Chosen option (integer)
for i in range(len(options)):
print("{:d}. {:s}".format(i+1,options[i]))
# Get a valid menu choice
choice = 0
while not(np.any(choice == np.arange(len(options))+1)):
choice = inputNumber("Please choose a menu item: ")
return choice
|
[
"frehoni@gmail.com"
] |
frehoni@gmail.com
|
9c11636c982ae342d684df2b366e4ec72d64b10e
|
f41c999e9e367bf6d091caa58633c9a277d1d920
|
/loottable.py
|
1779035cfbe4b109b62620fade4b542d870f24a8
|
[] |
no_license
|
tntrobber123/megamanrpg
|
9deff5831626e2797a82c7f256d8bb42099866a2
|
05605618173a15531c38a19ad25300ff10ed83fd
|
refs/heads/master
| 2023-08-20T07:20:42.719557
| 2021-10-21T18:14:26
| 2021-10-21T18:14:26
| 403,707,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
commonloot = []
commonloot.append("Small HP ball")
commonloot.append("Small HP ball")
commonloot.append("Small HP ball")
commonloot.append("Large HP ball")
commonloot.append("Small energy ball")
commonloot.append("Small energy ball")
commonloot.append("Small energy ball")
commonloot.append("Large energy ball")
commonloot.append("e-TANK")
uncommonloot = []
uncommonloot.append("Large HP ball")
uncommonloot.append("Large HP ball")
uncommonloot.append("Large energy ball")
uncommonloot.append("Large energy ball")
uncommonloot.append("e-TANK")
uncommonloot.append("e-TANK")
uncommonloot.append("E-TANK")
rareloot = []
rareloot.append("e-TANK")
rareloot.append("E-TANK")
rareloot.append("E-TANK")
rareloot.append("m-TANK")
rareloot.append("m-TANK")
rareloot.append("M-TANK")
rareloot.append("Free screw box")
rareloot.append("1-UP")
weaponloot = []
bossloot = []
|
[
"tntrobber@gmail.com"
] |
tntrobber@gmail.com
|
79577c7f146364cb36e810bab124664c1cc42b1a
|
f2242ee2b26df80c6290d94a388de7a98a7bbc35
|
/measuring_polyphony/settings.py
|
683665cd6311a40d1b794f9aa8e6081e01ef16ad
|
[] |
no_license
|
misingnoglic/measuring_polyphony_django
|
16c2f45addbaa4b643d012fcc34975c0746647e3
|
dba469bd042ebe67a4f56816a8320ccfc042d07d
|
refs/heads/master
| 2021-01-21T07:03:30.376918
| 2017-06-07T17:21:39
| 2017-06-07T17:21:39
| 91,596,436
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,298
|
py
|
"""
Django settings for measuring_polyphony project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
import secrets
SECRET_KEY = secrets.SECRET_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = secrets.debug
ALLOWED_HOSTS = ['174.138.49.237', '45.55.149.115', '127.0.0.1', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'viewer',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'measuring_polyphony.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'measuring_polyphony.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'polyphony',
'USER': 'polyphony',
'PASSWORD': secrets.DATABASE_PASSWORD,
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
MEDIA_ROOT = 'media/'
MEDIA_URL = '/media/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# insert your TEMPLATE_DIRS here
'templates'
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
|
[
"aboudaie@brandeis.edu"
] |
aboudaie@brandeis.edu
|
7c5ba4db32c119edb7f4051a0cf2d2793e2f4cba
|
f29cf584ec1db1aa4341b1c0ecb7c956a2774ee1
|
/feed/migrations/0001_initial.py
|
fa105ad558f3bf43ff53c8e77590ef7c8e99a267
|
[] |
no_license
|
Ni-c0de-mus/evverest
|
de5512c76ec93ff3c091f70e20d7bffc59afba5d
|
8a31815fd04771f03902cb860c3dc8c8703258fe
|
refs/heads/master
| 2020-08-01T09:23:40.063856
| 2018-05-01T20:54:23
| 2018-05-01T20:54:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,976
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-26 17:17
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment_date', models.DateTimeField(auto_now_add=True)),
('comment_body', models.TextField(max_length=500)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='usercomment', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-comment_date'],
},
),
migrations.CreateModel(
name='UserPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post_date', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(max_length=150)),
('post_body', models.TextField(max_length=1000)),
('image', models.ImageField(blank=True, upload_to='post_pics')),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='userpost', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-post_date'],
},
),
migrations.AddField(
model_name='usercomment',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='feed.UserPost'),
),
]
|
[
"garrettlove@Garretts-MacBook-Pro.local"
] |
garrettlove@Garretts-MacBook-Pro.local
|
7cf89b64a2f8ed6adff97908bb2655d33b9b695b
|
00190a66d7ab1146cd0e08351544edad695925a5
|
/tests/test_attributes.py
|
ca97ea7bb65838752b0e2a531c147749649331e6
|
[
"MIT"
] |
permissive
|
dynata/python-demandapi-client
|
fc52754b57c38a7ab538e371290c7052fc76c90e
|
0a831f4f0db360be1599b7f4d89e0951b27ecc0b
|
refs/heads/dev
| 2020-12-03T17:10:56.123304
| 2020-09-23T23:48:10
| 2020-09-23T23:48:10
| 231,402,965
| 5
| 1
|
MIT
| 2020-09-25T18:32:08
| 2020-01-02T14:55:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,014
|
py
|
# encoding: utf-8
from __future__ import unicode_literals, print_function
import json
import unittest
import responses
from dynatademand.api import DemandAPIClient
BASE_HOST = "http://test-url.example"
class TestAttributeEndpoints(unittest.TestCase):
def setUp(self):
self.api = DemandAPIClient(client_id='test', username='testuser', password='testpass', base_host=BASE_HOST)
self.api._access_token = 'Bearer testtoken'
@responses.activate
def test_get_attributes(self):
with open('./tests/test_files/get_attributes.json', 'r') as attributes_file:
attributes_json = json.load(attributes_file)
responses.add(responses.GET, '{}/sample/v1/attributes/no/no'.format(BASE_HOST), json=attributes_json, status=200)
self.api.get_attributes('no', 'no')
self.assertEqual(len(responses.calls), 1)
print('flaws')
print(responses.calls[0].response.json())
self.assertEqual(responses.calls[0].response.json(), attributes_json)
|
[
"bradley@wogsland.org"
] |
bradley@wogsland.org
|
ee0e105494849fc43fe24a4919a2091672543bf1
|
257cd01623a49e5967457bb89ef676f17484480e
|
/components/Dropdown.py
|
2e97b269f33242f73e3b0fa445fc84e2b5fce10d
|
[] |
no_license
|
aleksProsk/HydroOpt2.0
|
5347a4b5eb07d8d1847436f7c079db6a081bfb49
|
8d260d83c61483d4a88f3d81929cac34a75d9288
|
refs/heads/master
| 2020-03-22T07:33:44.178545
| 2018-08-14T13:21:45
| 2018-08-14T13:21:45
| 139,708,093
| 0
| 1
| null | 2018-07-04T11:49:57
| 2018-07-04T10:38:23
|
Python
|
UTF-8
|
Python
| false
| false
| 810
|
py
|
import dash_core_components as dcc
import dash_html_components as html
from components import DashComponent
CDashComponent = DashComponent.CDashComponent
class CDropdown(CDashComponent):
def __init__(self, options = [], placeholder = 'Select', value = '', multi = False, style = {},
name = None, screenName = None):
super().__init__(name, screenName)
self.setDropdown(options, placeholder, value, multi, style)
def getValue(self):
return self.__value
def update(self, value):
self.__value = value
def setDropdown(self, options, placeholder, value, multi, style):
self.__options = options
self.__value = value
super().setDashRendering(html.Div([dcc.Dropdown(
id=str(super().getID()),
options=options,
placeholder=placeholder,
multi=multi,
value=value,
)], style=style))
|
[
"alexandriksasha@mail.ru"
] |
alexandriksasha@mail.ru
|
40403bb17d0bd03eb266d66d5ab2b8f23ec06e68
|
c85fd3e58367abbbf1b8d4362e9b0919b7d48edd
|
/img_cap_server/main.py
|
3ca4a34619d245ec8da353dcc7075a10efe28fd2
|
[
"MIT"
] |
permissive
|
petergerasimov/VoiceCV
|
fc6d54b2a8ade48a2f3ec61245d0d8a245c9df56
|
b03055f953389b1248cfd37059d97c36ba846295
|
refs/heads/master
| 2020-04-29T04:40:00.697161
| 2019-03-17T20:05:36
| 2019-03-17T20:05:36
| 175,854,674
| 0
| 0
| null | 2019-03-15T16:22:49
| 2019-03-15T16:22:48
| null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
import base64
import os
from PIL import Image
from flask import Flask, make_response, request
from img_cap.main import greedy_search_inference
app = Flask(__name__)
@app.route('/sendImage' , methods=['POST'])
def sendImage():
req = request.get_json(force=True)
img = base64.b64decode(req['imgData'])
image = Image.frombytes('RGB', (320, 240), img)
image.save('image.jpg')
res = greedy_search_inference('./image.jpg')
return make_response(res , 200)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=4000)
|
[
"velev.victor@yahoo.com"
] |
velev.victor@yahoo.com
|
1b148a152b106f282e12431b9d2c56106043d75a
|
ae953b8f45cfb36fe31bfa731055ec0324619aac
|
/h20.py
|
27621a27f97200894acd192ba2ca6a87d1befdaf
|
[] |
no_license
|
Abishek0123/alphabet
|
12afdec8a1b188d9ef22f08e345ccbcf8cd181dc
|
b5ef723afd71da768d842742d4d758212180975a
|
refs/heads/master
| 2020-06-17T00:09:50.129191
| 2019-08-09T09:29:36
| 2019-08-09T09:29:36
| 195,739,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 58
|
py
|
n=int(input())
for a in range(1,6,1):
print(n*a,end=" ")
|
[
"noreply@github.com"
] |
Abishek0123.noreply@github.com
|
ceb7aa406f8fafc550612fba7a5ad884b66e8fc1
|
f2bbc82e51ad56c94457f9e7def958132606f02d
|
/Westar2/wsgi.py
|
472d1f5352c0eb5faa8b334951e5e6d0bf8e711d
|
[] |
no_license
|
kljopu/Westargram_v1.1
|
d89447f1a8d55ba51cbc8716075e2bf8ebe461bf
|
759f3363107a645eb65d4611b1873cb3ca9c0a03
|
refs/heads/master
| 2022-10-31T07:52:54.108108
| 2020-06-17T05:43:41
| 2020-06-17T05:43:41
| 272,887,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
WSGI config for Westar2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Westar2.settings')
application = get_wsgi_application()
|
[
"user@Userui-MacBookPro.local"
] |
user@Userui-MacBookPro.local
|
c36a5c3700af519e773825abb05e0bb108090db1
|
aae0f5c88e07e01a3a23a3d36813144a3d14dc89
|
/polls/migrations/0006_auto_20160216_0027.py
|
a4456ee0b6174bdd064fa1fb5ca92dff390afbb4
|
[] |
no_license
|
jasleenkaur/myproject
|
b00d07d8f9a56775176e456408228d9793141390
|
4d8109b5572d2193832e6e9d7e4ab97cf9582391
|
refs/heads/master
| 2021-01-10T10:36:48.690386
| 2016-02-18T12:30:41
| 2016-02-18T12:30:41
| 51,250,941
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0005_auto_20160214_0955'),
]
operations = [
migrations.AlterField(
model_name='question',
name='pub_date',
field=models.DateTimeField(verbose_name='date published'),
),
]
|
[
"jasleen.7956@gmail.com"
] |
jasleen.7956@gmail.com
|
2c4857574e8ea0feb3f2aa77b1b293c9a54ea578
|
6a6fb158dd1880fbe55e229ccc138cfdf9db8c9c
|
/FileScript.py
|
0ed070814dc210c08070cbcc088282e040143e00
|
[] |
no_license
|
nicolasagudelo/Regresion-Lineal-Series-de-Tiempo
|
8b9dc63ec7f2b3fd7c89d13b0a95b38b6b149f76
|
4a6ceec9cd721b4467beced3b5911939819c995b
|
refs/heads/master
| 2021-05-01T10:19:42.613205
| 2016-10-26T08:12:00
| 2016-10-26T08:12:00
| 70,779,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
import numpy as np
f = open('data chikun.csv', 'r') #Cargamos los datos
np.set_printoptions(suppress=True)
data = []
for line in f.readlines():
line = line.strip()
line = line.split(";")
line = [float(i) for i in line]
data.append(line)
x = np.matrix(data)
x = x.reshape(22,6) #Los acomodamos de la forma que deseamos
np.savetxt('Chikungunya_6_Lag.csv',x,fmt='%.2i',delimiter=';') #Los guardamos en un csv
|
[
"nicoalbert95@hotmail.com"
] |
nicoalbert95@hotmail.com
|
f6ea9b4080ad191b6f9358b2fb9f0f1fa9549f4c
|
e5b37046b72cbed5ed3956b4bb60d74623b22920
|
/education_clinic/models/eye.py
|
0560f08fe47e7c9693137358a10e936588867239
|
[] |
no_license
|
asmaaltahe/erp-system
|
edce254245486098d475a0b26eea0086d5e6185e
|
9cab82e61ffea027b4ede7b6c89a8cfa6e4326f8
|
refs/heads/master
| 2023-08-23T19:00:33.648817
| 2021-10-17T08:37:44
| 2021-10-17T08:37:44
| 414,579,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,671
|
py
|
import datetime
from odoo import models, fields, api, exceptions,_
class Eyeclinic(models.Model):
_name = 'education.eye'
_description = 'Eye'
_order = "id desc"
patient_id = fields.Char(string='Student Number' )
name = fields.Char(string="full name" )
first = fields.Char(string="First Name" )
second = fields.Char(string="Second Name" )
third = fields.Char(string="Third Name" )
last = fields.Char(string="Last Name" )
gender=fields.Char(string="gender" )
brath_day = fields.Date(string='Date Of Birth' )
date = fields.Char(string='Date', default=lambda self: datetime.datetime.today().strftime('%Y-%m-%d'), readonly=True)
phone = fields.Char(string="Phone" )
email = fields.Char(string="Email" )
nationality = fields.Char(string='Nationality' )
religion = fields.Char(string='Religion' )
program = fields.Char(string='Program' )
address = fields.Char(string="address")
# The End Personal Information
general = fields.Char(string="General Vision")
withoutglss = fields.Char(string="With Out Glasses")
withglasses = fields.Char(string='With Glasses')
color = fields.Char(string='Color Vision')
near = fields.Char(string='Near Vision')
opthahmologist = fields.Char(string='Opthahmologist', readonly=True, default=lambda self: self.env.user.name )
assessment = fields.Text(string="Assessment")
diagonis = fields.Text(string='Diagonis')
is_assessment = fields.Boolean(string="Is assessment")
def get_student_name(self):
for re in self:
re.name = str(re.first) + " " + str(re.second) + " " + str(re.third) + " " + str(re.last)
|
[
"mis.drive119@gmail.com"
] |
mis.drive119@gmail.com
|
f8a609f051a748c1a1de50f959c0085374b8fee9
|
74d33e66ffadc73456c62b8bb8d6114f53de63df
|
/Py/Python/MultiProcessing.py
|
192548168486ae58c5022e56c4f50cc84c601ba4
|
[] |
no_license
|
Newester/MyCode
|
97fdf3ca5aa1d2ac2fba6b5173bb7fb5034e5a51
|
1a8e04191cdf7154750c066d7172b9437f720b1b
|
refs/heads/master
| 2022-12-23T20:53:51.699570
| 2020-06-06T08:51:47
| 2020-06-06T08:51:47
| 118,646,066
| 0
| 0
| null | 2022-12-16T07:12:41
| 2018-01-23T17:40:16
|
Java
|
UTF-8
|
Python
| false
| false
| 2,911
|
py
|
#!/usr/bin/env python3
# Unix/Linux 操作系统的 fork() 调用
#调用 1 次,返回 2 次(拷贝一份到子进程执行)
import os
print('Process(%s) start...' % os.getpid())
#Only works on Unix/linux/Mac
'''
pid = os.fork()
if pid == 0:
print('I am child process (%s) and my parent is process (%s)' %(os.getpid(),os.getppid()))
else:
print('I am process (%s) and I created a child process (%s)' % (os.getpid(),pid))
'''
# 跨平台多进程支持
# 创建子进程时只需传递一个执行函数和函数的参数,创建一个 Process 实例,用 start() 方法启动
from multiprocessing import Process
import os
def run_proc(name):
print('Run child process %s pid(%s)' % (name,os.getpid()))
if __name__ == '__main__':
print('Parent process pid(%s)' % os.getpid())
p = Process(target=run_proc,args=('test',))
print('Child process will start.')
p.start()
p.join()
print('Child process end.')
# 进程池
from multiprocessing import Pool
import os, time, random
def long_time_task(name):
print('Run task %s pid(%s)' % (name,os.getpid()))
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
print('Task %s run %.2f seconds.' % (name,(end - start)))
if __name__ == '__main__':
print('Parent process %s.' % os.getpid())
p = Pool(3)
for i in range(4):
p.apply_async(long_time_task,args=(i,))
print('Waiting for all subprocess done...')
p.close()
p.join()
print('All subprocess done.')
# 子进程, subprocess 控制子进程的输入输出
import subprocess
print('$ nslookup www.python.org')
r = subprocess.call(['nslookup','www.python.org'])
print('Exit code',r)
print('$nslookup')
p = subprocess.Popen(['nslookup'], stdin=subprocess.PIPE,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate(b'set q=mx\npython.org\nexit\n')
print(output.decode('gbk'))
print('Exit code:', p.returncode)
# 进程间通信
# Queue , Pipes
from multiprocessing import Queue, Process
import os, time, random
# 写数据进程执行的代码:
def write(q):
print('Process to write: %s' % os.getpid())
for value in ['A', 'B', 'C']:
print('Put %s to queue...' % value)
q.put(value)
time.sleep(random.random())
# 读数据进程执行的代码:
def read(q):
print('Process to read: %s' % os.getpid())
while True:
value = q.get(True)
print('Get %s from queue.' % value)
if __name__=='__main__':
# 父进程创建Queue,并传给各个子进程:
q = Queue()
pw = Process(target=write, args=(q,))
pr = Process(target=read, args=(q,))
# 启动子进程pw,写入:
pw.start()
# 启动子进程pr,读取:
pr.start()
# 等待pw结束:
pw.join()
# pr进程里是死循环,无法等待其结束,只能强行终止:
pr.terminate()
|
[
"2544391722@qq.com"
] |
2544391722@qq.com
|
3d4b05bb5fce37b423b34df4733f641403cc6e2e
|
2cb21fe32e3ef508cffb31963a5192b7328d9d67
|
/vehicles.py
|
b170c2dd4ab3b61aaf9977448e7db673c1ed8900
|
[] |
no_license
|
scottherold/python_OOP
|
c540a6e6d7cf7e5ca213cc14179db6cdd21f8c35
|
2553f80b80a64e7365f951ac163d75997eef21bf
|
refs/heads/master
| 2020-04-02T13:07:49.803669
| 2018-10-24T08:46:55
| 2018-10-24T08:46:55
| 154,468,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 919
|
py
|
# file vehicles.py
class Vehicle:
def __init__(self, wheels, capacity, make, model):
self.wheels = wheels
self.capacity = capacity
self.make = make
self.model = model
self.mileage = 0
def drive(self,miles):
self.mileage += miles
return self
def reverse(self,miles):
self.mileage -= miles
return self
class Bike(Vehicle):
def vehicle_type(self):
return "Bike"
class Car(Vehicle):
def set_wheels(self):
self.wheels = 4
return self
class Airplane(Vehicle):
def fly(self, miles):
self.mileage += miles
return self
v = Vehicle(4,8,"dodge","minivan")
print(v.make)
b = Bike(2,1,"Schwinn","Paramount")
print(b.vehicle_type())
c = Car(8,5,"Toyota", "Matrix")
c.set_wheels()
print(c.wheels)
a = Airplane(22,853,"Airbus","A380")
a.fly(580)
print(a.mileage)
|
[
"sherold@mail.usf.edu"
] |
sherold@mail.usf.edu
|
a82dbe53c59969b738410246af7fb6665b771e0b
|
3cd2ec8afd70235f1271ee483373e0f131f4bf33
|
/Utils/Compute_FDR.py
|
99d0b15145345dd07672daa05860e6f466ab83aa
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
joeyee/Histogram_Layer
|
74678772210bdcc27f22c154af24c8dbf3c61d83
|
d396fc3e066afded3b208588ffccdeb8d1d52cf5
|
refs/heads/master
| 2023-03-28T17:00:37.421362
| 2021-04-05T18:01:59
| 2021-04-05T18:01:59
| 398,550,467
| 0
| 0
|
MIT
| 2021-08-21T12:19:12
| 2021-08-21T12:19:12
| null |
UTF-8
|
Python
| false
| false
| 1,551
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 4 09:04:58 2020
Function to compute FDR score per class
@author: jpeeples
"""
import numpy as np
def Compute_Fisher_Score(features,labels):
#Get index of labels that correspond to each class
Classes = np.unique(labels)
#Get number of instances of each class for P_i
Instances = np.zeros(len(Classes))
for i in range(0,len(Classes)):
Instances[i] = sum(labels==Classes[i])
P_i = Instances/sum(Instances);
#Compute global mean
global_mean = np.mean(features,axis=0)
#For each class compute intra and inter class variations
scores = np.zeros(len(Classes))
log_scores = np.zeros(len(Classes))
for current_class in range(0,len(Classes)):
data = features[labels==Classes[current_class],:]
#Within-class scatter matrix
S_w = P_i[i]*np.cov(data.T)
#Between-class scatter matrix
S_b = P_i[i]*(np.outer((np.mean(data,axis=0)-global_mean),
(np.mean(data,axis=0)-global_mean).T))
#Compute the score, compute abs of score, only care about magnitude
#compute log of scores if too large
#Using pseudoinverse if singular matrix
try:
scores[current_class] = abs((np.matmul(np.linalg.inv(S_w),S_b)).trace())
except:
scores[current_class] = abs((np.matmul(np.linalg.pinv(S_w),S_b)).trace())
log_scores[current_class] = np.log(scores[current_class])
return scores, log_scores
|
[
"jpeeples@ufl.edu"
] |
jpeeples@ufl.edu
|
fc8a6de441a3ea56879dfd96679ebda7be653e4f
|
0529c68d3acf7a68f1f389d6f76d810ba765f0db
|
/run_all_simu.py
|
77d5eb4030e3db62db3cdf18443923bb6159eb14
|
[
"MIT"
] |
permissive
|
prise-3d/LSTM-noise-detection
|
7b015fc16ebfc1c815bc42fcf6e37dacb47b3334
|
42c4a320a127209af3f662c2d8b6232d87ee61d5
|
refs/heads/master
| 2022-05-31T06:35:42.923077
| 2021-10-19T08:43:36
| 2021-10-19T08:43:36
| 246,258,906
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,872
|
py
|
import os
import argparse
command = "python display/display_thresholds_scene_file.py --params \"{0}\" --method {1} --model {2} --selected_zones {3} --scene {4} --thresholds {5} --seq_norm {6} --sequence {7} --save 1 --save_thresholds {8} --label_thresholds {9} --every {10}"
parser = argparse.ArgumentParser(description="Compute simulation for each scenes")
parser.add_argument('--folder', type=str, help="data folder with scenes files", required=True)
parser.add_argument('--method', type=str, help="method name", required=True)
parser.add_argument('--model', type=str, help="model path for simulation", required=True)
parser.add_argument('--params', type=str, help="expected params for model", required=True)
parser.add_argument('--thresholds', type=str, help="thresholds file", required=True)
parser.add_argument('--selected_zones', type=str, help="selected zone file", required=True)
parser.add_argument('--sequence', type=str, help="sequence size of RNN model", required=True)
parser.add_argument('--seqnorm', type=str, help="normalization or not of sequence", required=True)
parser.add_argument('--output', type=str, help="output prediction filename", required=True)
parser.add_argument('--every', type=int, help="every images only", default=1)
args = parser.parse_args()
p_folder = args.folder
p_method = args.method
p_model = args.model
p_params = args.params
p_thresholds = args.thresholds
p_selected_zones = args.selected_zones
p_sequence = args.sequence
p_seqnorm = args.seqnorm
p_output = args.output
p_every = args.every
for scene in sorted(os.listdir(p_folder)):
scene_path = os.path.join(p_folder, scene)
str_command = command.format(p_params, p_method, p_model, p_selected_zones, scene_path, p_thresholds, p_seqnorm, p_sequence, p_output, scene, p_every)
print("Run simulation for {0}".format(scene))
os.system(str_command)
|
[
"contact@jeromebuisine.fr"
] |
contact@jeromebuisine.fr
|
0d524bd3994608fa844316bed3cae69c1dbc939d
|
52b30b68b5c7cd473da41e4e1fcd98a893964d6c
|
/favourites/serializers.py
|
bab8a55144775d981872623379b8faa4e4750933
|
[] |
no_license
|
ymukeshyadavmrj/pharmacyapp
|
57256cc2a3afdcd6e43cb92cf0ffc97bdcb9d018
|
0c1cbd8c6234d18e8f6ce92b973b0aca9201d2e3
|
refs/heads/master
| 2023-06-26T06:13:52.800778
| 2021-07-15T21:26:01
| 2021-07-15T21:26:01
| 385,425,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from rest_framework import serializers
from favourites.models import Favourites
class FavouritesSerializer(serializers.ModelSerializer):
class Meta:
model = Favourites
fields = ['id', 'isFavourite','product','user']
|
[
"ymukeshyadavmrj@gmail.com"
] |
ymukeshyadavmrj@gmail.com
|
d10896c3fa02b7175c070a2cdf0078e88c8f80a0
|
d24c86f058d90fe5a3e39e0ced1285dd13869150
|
/color_check/tests/test_colorcheck.py
|
d85aead82818d171d0e90d58e3c164887c1a0385
|
[] |
no_license
|
MarvvanPal/foundations-sample-website
|
62af6729e8c2fd5d2f976ed7a28c597e6a53ea93
|
73d85a65058f0768ce5a9c25a8cdba4946c8f21d
|
refs/heads/main
| 2023-03-30T11:23:27.187983
| 2021-04-07T17:21:17
| 2021-04-07T17:21:17
| 341,224,087
| 1
| 0
| null | 2021-02-22T14:17:14
| 2021-02-22T14:17:14
| null |
UTF-8
|
Python
| false
| false
| 1,406
|
py
|
from color_check.website import app
from color_check.controllers.get_color_code import get_color_code
# test the function we've written to check on the colors themselves
def test_get_color_code():
# this test should pass right now
assert get_color_code("blue") == "#0000ff"
# the following test will fail at the beginning,
# uncomment when you think you are finished!
# assert get_color_code("red") == "#ff0000"
# our very first functional test
# instead of checking if a function() does it's job alone, this will check
# the entire response from the flask app, including the http status code.
def test_index():
# create a version of our website that we can use for testing
with app.test_client() as test_client:
# mimic a browser: 'GET /', as if you visit the site
response = test_client.get('/')
# check that the HTTP response is a success
assert response.status_code == 200
# Store the contents of the html response in a local variable.
# This should be a string with the same content as the file index.html
html_content = response.data.decode()
assert "<html>" in html_content
# check that there is a route at "/colors" which accepts a POST request
def test_colors():
with app.test_client() as test_client:
response = test_client.post('/color')
assert response.status_code == 200
|
[
"adam.j.roe@gmail.com"
] |
adam.j.roe@gmail.com
|
089777885452d5c25f586f7d738106fd6df0864c
|
f1b378fdc77f52f5ed8b330fcb1a8081da4d64a0
|
/daniel_liang/chapter05/5.5.py
|
c519946c17b2691b19b3021e71653e63e8d0bbc6
|
[] |
no_license
|
brohum10/python_code
|
91e8120930508bd5509a57fe2eb4a536eee6e129
|
0f9db42afa4687b09973bb546fb880e689dbc882
|
refs/heads/main
| 2023-06-18T03:20:12.890049
| 2021-07-18T14:19:48
| 2021-07-18T14:19:48
| 369,736,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
kilogram = 1
pound = 20
print("Kilorgams \t Pounds \t | \t Pounds \t Kilorgams")
while (kilogram in range(1, 200)) or (pound in range(20, 520)):
pounds = kilogram * 2.2
kilograms = pound / 2.2
print(str(format(kilogram, "<4.0f")) + str(format(pounds, "20.1f")) + " \t |"+ str(format(pound, "20.0f")) + str(format(kilograms, "20.2f")))
kilogram+=2
pound+=5
|
[
"noreply@github.com"
] |
brohum10.noreply@github.com
|
79f821f20b5a2e88ba8e4aec45d1428319d5f6ea
|
94eb0a5124bc079afd61daf65d8492243a096abe
|
/dynamic_commands/smartOBD/main.py
|
5421b52bbe6785b4b0ad06ab429fadc65f494266
|
[] |
no_license
|
AvraSaslow/3308project
|
33040d89dc12d7977e9321d8c1b7802f0ca6c266
|
e0b35e266b96c076d0d11e84c5bb6658cfb8e371
|
refs/heads/master
| 2020-09-27T02:37:16.408678
| 2019-12-17T05:50:14
| 2019-12-17T05:50:14
| 226,406,466
| 0
| 0
| null | 2019-12-06T20:39:43
| 2019-12-06T20:39:42
| null |
UTF-8
|
Python
| false
| false
| 1,705
|
py
|
"""
.. module:: main
:platform: Unix
:synopsis: Asynchronous connection
.. moduleauthor:: Will Walker
Initialization and interface
Simple command line interface, with choices for asynchronous data and a full data query
"""
import sys,os
sys.path.insert(
0, os.path.realpath(os.path.dirname(__file__)))
import obd
import time
import psycopg2
import datetime
from psycopg2.extensions import AsIs
from psycopg2 import sql
import smartOBD
from smartOBD import asynco
from smartOBD import test_commands
##main function
#
# initialization and interface for smartOBD
# Simple command line interface, with choices for asynchronous data and a full data query
def main():
"""
This function determines which functionality the user would like to use, and calls it
"""
print("Welcome to smartOBD")
print("Choose your action:\n")
print("(0) Async allows smartOBD to give you live data on your vehicle\n")
print("(1) Full Read will store all the data from your car's computer\n")
choose_action = input("Async(0) or Full Read(1): ")
# * make database connection
# host=198.23.146.166 password=Sweden77
## asynchronous
# @param choose_action The action
if(choose_action == '0'):
asynco.getAsync(60)
# x = 0
# while x < 30:
# data = [datetime.datetime.now()]
# asynco.new_speed(x+3, data)
# asynco.new_rpm(1000+x, data)
# asynco.new_temp(150+x, data)
# asynco.new_fuel(35+x, data, dbtable, dbconn, cur)
# x += 1
## full query
elif(choose_action == '1'):
test_commands.fullQuery()
## constructor
if __name__ == "__main__":
main()
|
[
"trco9595@colorado.edu"
] |
trco9595@colorado.edu
|
8111576dd1ebb8394805deb3617806424c4f5c3e
|
8ac1b02c2cdb5d175cf5534e2c532ddb213922c6
|
/ex10/ex10.py
|
61e3029b31da8b9b8b097338f2d0abda0e71462b
|
[] |
no_license
|
gmzyyzgh/python_practice
|
916db0ce52219da4799a7c51245ce4eca7dcabd3
|
483e2275a5723ca5a9fda6940c4af9e6f798b4cb
|
refs/heads/master
| 2020-05-30T12:29:08.710513
| 2019-07-06T13:56:59
| 2019-07-06T13:56:59
| 189,734,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
tabby_cat = "\tI'm tabbed in."
persian_cat = "I'm split\non a line."
backslash_cat = "I'm \\ a \\cat."
fat_cat = """
I'll do a list:
\t* Cat food
\t* Fishies
\t* Catnip\n\t* Grass
"""
print(tabby_cat)
print(persian_cat)
print(backslash_cat)
print(fat_cat)
|
[
"31633088@qq.com"
] |
31633088@qq.com
|
be510b4e182a65497f390c28e38015bef96593a5
|
f490046b2ed78cc1c68b8dfa44d2647a9e2c87b4
|
/Recursion/kickstart1.py
|
2f93a174c383a8a2b42f50edc22cce9571292cb0
|
[] |
no_license
|
AnkitAvi11/100-Days-of-Code
|
00de02552dbe56b72013efb7df31b3f974d4646d
|
b18a6796f2fe71e9de920de4b8207b919289284d
|
refs/heads/master
| 2023-04-02T01:55:11.614657
| 2021-04-14T14:59:04
| 2021-04-14T14:59:04
| 322,918,001
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
if __name__ == '__main__' :
t = int(input())
for _ in range(t) :
n, b = map(int, input().split())
arr = list(map(int, input().split()))
count = 0
i = 0
while b > 0 and i < n :
if b - arr[i] < 0 :
i+=1
else :
b -= arr[i]
count += 1
i+=1
print(count)
|
[
"kumar.ankit383@gmail.com"
] |
kumar.ankit383@gmail.com
|
3895ee0232ce3f7da016b8440e1b31a495eea231
|
4a562f75c0fd44672b806498e18b67690a5baabd
|
/envs/block_pushing/oracles/oriented_push_oracle.py
|
69a7966ff02812845b00a75bd86d756f26befe23
|
[
"MIT"
] |
permissive
|
skandermoalla/bet-reproduction
|
ea8544953c0fd7b3957154f6fd5cafd5b52ebf00
|
e6dbad80aa4f534d880aa564da41e9bcabb9068f
|
refs/heads/main
| 2023-05-25T03:30:05.481404
| 2023-05-08T13:29:02
| 2023-05-15T11:47:04
| 571,006,196
| 0
| 0
|
MIT
| 2023-02-04T11:11:51
| 2022-11-26T21:06:45
|
Python
|
UTF-8
|
Python
| false
| false
| 9,712
|
py
|
# coding=utf-8
# Copyright 2022 The Reach ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Oracle for pushing task which orients the block then pushes it."""
import envs.block_pushing.oracles.pushing_info as pushing_info_module
import numpy as np
from tf_agents.policies import py_policy
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step as ts
from tf_agents.typing import types
# Only used for debug visualization.
import pybullet # pylint: disable=unused-import
class OrientedPushOracle(py_policy.PyPolicy):
"""Oracle for pushing task which orients the block then pushes it."""
def __init__(self, env, action_noise_std=0.0):
super(OrientedPushOracle, self).__init__(
env.time_step_spec(), env.action_spec()
)
self._env = env
self._np_random_state = np.random.RandomState(0)
self.phase = "move_to_pre_block"
self._action_noise_std = action_noise_std
def reset(self):
self.phase = "move_to_pre_block"
def get_theta_from_vector(self, vector):
return np.arctan2(vector[1], vector[0])
def theta_to_rotation2d(self, theta):
r = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
return r
def rotate(self, theta, xy_dir_block_to_ee):
rot_2d = self.theta_to_rotation2d(theta)
return rot_2d @ xy_dir_block_to_ee
def _get_action_info(self, time_step, block, target):
xy_block = time_step.observation["%s_translation" % block][:2]
theta_block = time_step.observation["%s_orientation" % block]
xy_target = time_step.observation["%s_translation" % target][:2]
xy_ee = time_step.observation["effector_target_translation"][:2]
xy_block_to_target = xy_target - xy_block
xy_dir_block_to_target = (xy_block_to_target) / np.linalg.norm(
xy_block_to_target
)
theta_to_target = self.get_theta_from_vector(xy_dir_block_to_target)
theta_error = theta_to_target - theta_block
# Block has 4-way symmetry.
while theta_error > np.pi / 4:
theta_error -= np.pi / 2.0
while theta_error < -np.pi / 4:
theta_error += np.pi / 2.0
xy_pre_block = xy_block + -xy_dir_block_to_target * 0.05
xy_nexttoblock = xy_block + -xy_dir_block_to_target * 0.03
xy_touchingblock = xy_block + -xy_dir_block_to_target * 0.01
xy_delta_to_nexttoblock = xy_nexttoblock - xy_ee
xy_delta_to_touchingblock = xy_touchingblock - xy_ee
xy_block_to_ee = xy_ee - xy_block
xy_dir_block_to_ee = xy_block_to_ee / np.linalg.norm(xy_block_to_ee)
theta_threshold_to_orient = 0.2
theta_threshold_flat_enough = 0.03
return pushing_info_module.PushingInfo(
xy_block=xy_block,
xy_ee=xy_ee,
xy_pre_block=xy_pre_block,
xy_delta_to_nexttoblock=xy_delta_to_nexttoblock,
xy_delta_to_touchingblock=xy_delta_to_touchingblock,
xy_dir_block_to_ee=xy_dir_block_to_ee,
theta_threshold_to_orient=theta_threshold_to_orient,
theta_threshold_flat_enough=theta_threshold_flat_enough,
theta_error=theta_error,
)
def _get_move_to_preblock(self, xy_pre_block, xy_ee):
max_step_velocity = 0.3
# Go 5 cm away from the block, on the line between the block and target.
xy_delta_to_preblock = xy_pre_block - xy_ee
diff = np.linalg.norm(xy_delta_to_preblock)
if diff < 0.001:
self.phase = "move_to_block"
xy_delta = xy_delta_to_preblock
return xy_delta, max_step_velocity
def _get_move_to_block(
self, xy_delta_to_nexttoblock, theta_threshold_to_orient, theta_error
):
diff = np.linalg.norm(xy_delta_to_nexttoblock)
if diff < 0.001:
self.phase = "push_block"
# If need to re-oorient, then re-orient.
if theta_error > theta_threshold_to_orient:
self.phase = "orient_block_left"
if theta_error < -theta_threshold_to_orient:
self.phase = "orient_block_right"
# Otherwise, push into the block.
xy_delta = xy_delta_to_nexttoblock
return xy_delta
def _get_push_block(
self, theta_error, theta_threshold_to_orient, xy_delta_to_touchingblock
):
# If need to reorient, go back to move_to_pre_block, move_to_block first.
if theta_error > theta_threshold_to_orient:
self.phase = "move_to_pre_block"
if theta_error < -theta_threshold_to_orient:
self.phase = "move_to_pre_block"
xy_delta = xy_delta_to_touchingblock
return xy_delta
def _get_orient_block_left(
self,
xy_dir_block_to_ee,
orient_circle_diameter,
xy_block,
xy_ee,
theta_error,
theta_threshold_flat_enough,
):
xy_dir_block_to_ee = self.rotate(0.2, xy_dir_block_to_ee)
xy_block_to_ee = xy_dir_block_to_ee * orient_circle_diameter
xy_push_left_spot = xy_block + xy_block_to_ee
xy_delta = xy_push_left_spot - xy_ee
if theta_error < theta_threshold_flat_enough:
self.phase = "move_to_pre_block"
return xy_delta
def _get_orient_block_right(
self,
xy_dir_block_to_ee,
orient_circle_diameter,
xy_block,
xy_ee,
theta_error,
theta_threshold_flat_enough,
):
xy_dir_block_to_ee = self.rotate(-0.2, xy_dir_block_to_ee)
xy_block_to_ee = xy_dir_block_to_ee * orient_circle_diameter
xy_push_left_spot = xy_block + xy_block_to_ee
xy_delta = xy_push_left_spot - xy_ee
if theta_error > -theta_threshold_flat_enough:
self.phase = "move_to_pre_block"
return xy_delta
def _get_action_for_block_target(self, time_step, block="block", target="target"):
# Specifying this as velocity makes it independent of control frequency.
max_step_velocity = 0.35
info = self._get_action_info(time_step, block, target)
if self.phase == "move_to_pre_block":
xy_delta, max_step_velocity = self._get_move_to_preblock(
info.xy_pre_block, info.xy_ee
)
if self.phase == "move_to_block":
xy_delta = self._get_move_to_block(
info.xy_delta_to_nexttoblock,
info.theta_threshold_to_orient,
info.theta_error,
)
if self.phase == "push_block":
xy_delta = self._get_push_block(
info.theta_error,
info.theta_threshold_to_orient,
info.xy_delta_to_touchingblock,
)
orient_circle_diameter = 0.025
if self.phase == "orient_block_left" or self.phase == "orient_block_right":
max_step_velocity = 0.15
if self.phase == "orient_block_left":
xy_delta = self._get_orient_block_left(
info.xy_dir_block_to_ee,
orient_circle_diameter,
info.xy_block,
info.xy_ee,
info.theta_error,
info.theta_threshold_flat_enough,
)
if self.phase == "orient_block_right":
xy_delta = self._get_orient_block_right(
info.xy_dir_block_to_ee,
orient_circle_diameter,
info.xy_block,
info.xy_ee,
info.theta_error,
info.theta_threshold_flat_enough,
)
if self._action_noise_std != 0.0:
xy_delta += self._np_random_state.randn(2) * self._action_noise_std
max_step_distance = max_step_velocity * (1 / self._env.get_control_frequency())
length = np.linalg.norm(xy_delta)
if length > max_step_distance:
xy_direction = xy_delta / length
xy_delta = xy_direction * max_step_distance
return xy_delta
def _action(self, time_step, policy_state):
if time_step.is_first():
self.reset()
xy_delta = self._get_action_for_block_target(
time_step, block="block", target="target"
)
return policy_step.PolicyStep(action=np.asarray(xy_delta, dtype=np.float32))
class OrientedPushNormalizedOracle(py_policy.PyPolicy):
"""Oracle for pushing task which orients the block then pushes it."""
def __init__(self, env):
super(OrientedPushNormalizedOracle, self).__init__(
env.time_step_spec(), env.action_spec()
)
self._oracle = OrientedPushOracle(env)
self._env = env
def reset(self):
self._oracle.reset()
def _action(self, time_step, policy_state):
time_step = time_step._asdict()
time_step["observation"] = self._env.calc_unnormalized_state(
time_step["observation"]
)
step = self._oracle._action(
ts.TimeStep(**time_step), policy_state
) # pylint: disable=protected-access
return policy_step.PolicyStep(
action=self._env.calc_normalized_action(step.action)
)
|
[
"n.m.mahi@gmail.com"
] |
n.m.mahi@gmail.com
|
ecb61c1d870c35f53fa6d719c2b8073e822d2f84
|
11298c7dedcf696e6c15a52aecaec34442a76bf4
|
/app.py
|
7698c05b819377223bedd89cfee720c23016faac
|
[] |
no_license
|
pashutk/pisdup
|
e355065bbff541e47f24295a41fadd817360b3b9
|
60c056434ee59246a242804a964c81beeda0e19c
|
refs/heads/master
| 2022-12-13T13:54:09.600887
| 2020-09-03T22:11:02
| 2020-09-03T22:11:02
| 291,564,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,908
|
py
|
# import pyudev
from enum import Enum, auto
import time
from pyee import BaseEventEmitter
from config import Config
from display import Display
from cli import Cli
from scene import Manager as SceneManager
from io_event import IOEvent
from devices_waiting import DevicesWaiting
from scenes import SceneId
from dummy import Dummy
from loading import Loading
class MainAppState(Enum):
INIT = auto()
WAITING_FOR_DEVICES = auto()
class App:
def __init__(self):
self.state = MainAppState.INIT
self.display = Display()
self.ee = BaseEventEmitter()
self.cli = None
if Config.ENABLE_CLI:
self.cli = Cli()
def init(self):
if Config.ENABLE_CLI:
self.cli.run()
def handle_cli_command(self, input_str):
command_up = "up"
command_ok = "ok"
command_down = "down"
if input_str == command_ok or input_str == command_up or input_str == command_down:
self.ee.emit(IOEvent.BUTTON)
if (input_str == command_up):
self.ee.emit(IOEvent.BUTTON_UP)
if (input_str == command_down):
self.ee.emit(IOEvent.BUTTON_DOWN)
if (input_str == command_ok):
self.ee.emit(IOEvent.BUTTON_OK)
if (input_str == "isd"):
self.ee.emit(IOEvent.INSERT_SD)
if (input_str == "ihd"):
self.ee.emit(IOEvent.INSERT_HDD)
if (input_str == "esd"):
self.ee.emit(IOEvent.EJECT_SD)
if (input_str == "ehd"):
self.ee.emit(IOEvent.EJECT_HDD)
def run(self):
self.init()
sm = SceneManager()
sm.register_scene(DevicesWaiting(self.display, self.ee))
# sm.go(SceneId.DEVICES_WAITING)
sm.register_scene(Dummy(self.display))
# sm.go(SceneId.DUMMY)
sm.register_scene(Loading(self.display))
sm.go(SceneId.LOADING)
while (True):
if Config.ENABLE_CLI:
input_str = self.cli.read()
if input_str == 'exit':
sm.destroy()
break
self.handle_cli_command(input_str)
time.sleep(0.01)
# context = pyudev.Context()
# monitor = pyudev.Monitor.from_netlink(context)
# monitor.filter_by('block')
# for device in iter(monitor.poll, None):
# if 'ID_FS_TYPE' in device:
# print('{0} partition {1}, {2}'.format(
# device.action, device.get('ID_FS_LABEL'), device.device_node))
# #
# monitor = pyudev.Monitor.from_netlink(context)
# monitor.filter_by('block')
# def log_event(action, device):
# if 'ID_FS_TYPE' in device:
# with open('filesystems.log', 'a+') as stream:
# print('{0} - {1}'.format(action,
# device.get('ID_FS_LABEL')), file=stream)
# observer = pyudev.MonitorObserver(monitor, log_event)
# observer.start()
|
[
"me@pashutk.ru"
] |
me@pashutk.ru
|
16d6b0f14cd8d7d12e31e939e0dffab0b47a4f01
|
8ffefb1481048fe5450a4f73e45e5cbd77ce99e8
|
/ruoyi-system/src/main/java/com/ruoyi/system/controller/AddComment.py
|
69de4beaab24d939e6baf82f2a4cf3a9ba9b1be0
|
[
"MIT"
] |
permissive
|
apple0518/CS
|
6915d1aed033ee8e5113bc5031fa7832cdd88ce0
|
889d8b26f1d67edee7097d141729c2b0d2b3ee90
|
refs/heads/main
| 2023-05-26T04:16:20.377136
| 2021-06-14T07:35:52
| 2021-06-14T07:35:52
| 365,686,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,701
|
py
|
#!python3
# coding: utf-8
import requests, random, os,sys
import time
def refreshCaptcha():
url = "http://c2020502194rsy.scd.wezhan.cn/Common/GenerateCommentCaptcha"
myheaders = {
"Cookie": "yibu_rt_language=zh-CN; ASP.NET_SessionId=zehh3mndik3o5oeynn5pe0nm; __RequestVerificationToken=WgJnwrfswdgo-I4j_F7a6LpoU9HeniDdG0Vbg2rOgwSRWaAXASvV67zRcgLb0WLjpVgPzY0fzPp5-GpyQJZlM7ry63iSujDsOIpsryBdl741; acw_tc=781bad0b16169177065446661e51423afadeceb60e8520165bc7351d1e8a11; SERVERID=9cce0917ca076d8ead327ae4668516bf|1616917784|1616917111",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"
}
timestamp = format(random.random(), ".16f")
captchaResp = requests.get(url, headers=myheaders, params={"Timestamp": timestamp})
# 获取验证码图片
filename = "{}.jpg".format("captcha")
with open(filename, "wb") as file:
file.write(captchaResp.content)
# 将文件保存到data文件夹下
file_jpg = 'data/'+str(time.time()) + '.jpg'
with open(file_jpg, "wb") as file:
file.write(captchaResp.content)
print("Captcha image is : {}".format(os.path.abspath(filename)))
return timestamp
def addComment(captchanum, comment, timestamp):
url = "http://c2020502194rsy.scd.wezhan.cn/Comment/AddComment"
comment_headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
"Cookie": "yibu_rt_language=zh-CN; ASP.NET_SessionId=zehh3mndik3o5oeynn5pe0nm;"
" __RequestVerificationToken=WgJnwrfswdgo-I4j_F7a6LpoU9HeniDdG0Vbg2rOgwSRWaAXASvV67zRcgLb0WLjpVgPzY0fzPp5-GpyQJZlM7ry63iSujDsOIpsryBdl741; "
"acw_tc=781bad0b16169239350794725e513f23f81a57f4a0c5f45e49db4694011377; SERVERID=9cce0917ca076d8ead327ae4668516bf|1616924822|1616924768",
"Referer": "http://c2020502194rsy.scd.wezhan.cn/lyhd",
"Origin": "http://c2020502194rsy.scd.wezhan.cn"
}
comment_payload = {
"CommentText": comment,
"Captcha": captchanum,
"EntityId": 293515,
"EntityType": 1,
"Timestamp": timestamp, #这个值要跟请求验证码时候的Timestamp保持一致
"__RequestVerificationToken": "WDw2cS0TSvXskn8kehzGX_Ixp_J_1fr4Mmb7_ETkCFYlMK5mwCrRXcNwS4lcVgByupVYNJehEIthw_pIntPkwmV2RSuiR5uufTlAt5TxGoo1"
}
resp = requests.post(url, headers=comment_headers, data=comment_payload)
# print("Got response : {}".format(resp.text))
print(resp.json()['IsSuccess'])
capnum = sys.argv[1]
comment = sys.argv[2]
timestamp = sys.argv[3]
addComment(capnum, comment, timestamp)
# capnum = "a723"
# comment = "测试评论"
# timestamp = "0.3147280830624921"
# addComment(capnum, comment, timestamp)
|
[
"2667861645@qq.com"
] |
2667861645@qq.com
|
ae81d7bfbd4cb98a77b9a988fc6541b94449cf0b
|
dfb605df317c8ab653eb8e7363e69876e4081e23
|
/Contraseñas.py
|
ed1fedbb9057b226d9b43a1a97e9279338ce45e2
|
[] |
no_license
|
davalerova/Contrasenas
|
72468a495b35bdcabbb4a2ea874f2639c9348b9e
|
725da2969f3fbd7fcb0f0e8ef8b05b15a2c0cf14
|
refs/heads/master
| 2020-04-22T23:57:40.210245
| 2019-02-23T05:14:17
| 2019-02-23T05:14:17
| 170,760,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
from typing import List
letras: List[str]="a b c d e f g h i j k l m n o p q r s t u v w x y z".split()
opcion=input("Ingrese 1 para cifrar y 2 para decifrar")
mensaje=input("Ingrese el mensaje")
clave=input("Ingrese la clave")
criptograma=""
auxClave=clave.__len__()
cont=0
for i in mensaje:
if opcion=="1":
criptograma+=letras[(letras.index(i)+(letras.index(clave[cont %auxClave])))%26]
cont+=1
elif opcion=="2":
criptograma += letras[(letras.index(i)+26-(letras.index(clave[cont % auxClave]))) % 26]
cont +=1
if opcion=="1":
print("El mensaje ",mensaje, "cifrado con la clave ",clave, "es ",criptograma)
elif opcion=="2":
print("El criptograma ",mensaje, "descifrado con la clave ",clave, "significa ",criptograma)
|
[
"dvdovni@gmail.com"
] |
dvdovni@gmail.com
|
51500b0dd972b4cb3c6c9ffdf273d82847cbe4ed
|
6b49fdae71d48738b483a7e25f0d9d1a46920cf3
|
/src/versionwidget.py
|
9910a09aa6f5069d8cb30228e68a0c6562f99280
|
[] |
no_license
|
eckamm/rut
|
12d9fe4183db7afb66833555ad74a2ca7f7c11a3
|
f0fe26c5760c9887f2d989e150348bf1b2fc0da9
|
refs/heads/master
| 2020-05-20T05:02:06.068214
| 2014-01-12T18:32:56
| 2014-01-12T18:32:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
from common import *
from fonts import Fonts
class VersionWidget:
def __init__(self):
antialias = True
self.render = make_text(Fonts.f15, "Version %s" % (VERSION,), antialias, THECOLORS["white"], TEXT_BACKGROUND)
self.box = self.render.get_rect()
self.box.bottomright = (SCREEN_WIDTH, SCREEN_HEIGHT)
def draw(self, surface):
surface.blit(self.render, self.box)
|
[
"eckamm@gmail.com"
] |
eckamm@gmail.com
|
392393423b9b400c275ce8f6176f681b7c7e261f
|
15ecf3815e9e340fde53ef344a01b696fcd4cea0
|
/docs/conf.py
|
fb30348c7d004bd2899261a9df6a1caebf10ac0b
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
suzuken/dynamic-dynamodb
|
eddc76e21e1a5dba284841c3eeb43ccd206918b6
|
e1089a9330c65a5b3030220deea05c030701f8c2
|
refs/heads/master
| 2021-01-12T19:59:52.732542
| 2014-04-07T07:01:44
| 2014-04-07T07:01:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,252
|
py
|
# -*- coding: utf-8 -*-
#
# Dynamic DynamoDB documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 15 17:42:37 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Dynamic DynamoDB'
copyright = u'2013, Sebastian Dahlgren'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.10'
# The full version, including alpha/beta/rc tags.
release = '1.10.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DynamicDynamoDBdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'DynamicDynamoDB.tex', u'Dynamic DynamoDB Documentation',
u'Sebastian Dahlgren', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dynamicdynamodb', u'Dynamic DynamoDB Documentation',
[u'Sebastian Dahlgren'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DynamicDynamoDB', u'Dynamic DynamoDB Documentation',
u'Sebastian Dahlgren', 'DynamicDynamoDB', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
[
"sebastian.dahlgren@gmail.com"
] |
sebastian.dahlgren@gmail.com
|
1cc79a4b56db7f1e0cd4f30f446c7ce9b1d785e3
|
852b214ee5279262c55e47a86b3c4ad62fcaafb1
|
/libs/bin/chardetect
|
afd85eac8e245dfdaad446a96b94b126dce1ad2e
|
[] |
no_license
|
darrenoon/ASXETOScrape
|
f1c3a759e1d633b472c76c7fe6697b1b0f6db838
|
32e819a9149bbd45429aa6025a75351a81ab5c0a
|
refs/heads/master
| 2020-04-09T21:39:37.069129
| 2018-12-15T01:41:33
| 2018-12-15T01:41:33
| 160,608,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
#!/home/darrenoon/envs/hello_world/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"darrenoon@gmail.com"
] |
darrenoon@gmail.com
|
|
e8c95fbedcbe1eb5059ecb60b7717de86483c562
|
09aff11fe5ccac7548d49db3a92803675117eaf3
|
/BackEnd/Semana3/Dia3/7-operadores-de-identidad.py
|
b9371551d3323699773fcf93e42552ada8165ea9
|
[] |
no_license
|
jorgegarba/CodiGo8
|
4defe235a790ebc248f9278f18ca050fde6c195f
|
f22b2b405ad999e5b960ce5f52936cd4e472af35
|
refs/heads/master
| 2023-01-09T02:14:27.134870
| 2020-03-18T03:15:52
| 2020-03-18T03:15:52
| 211,871,983
| 4
| 1
| null | 2023-01-07T22:11:22
| 2019-09-30T13:54:52
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 221
|
py
|
# Operadores de Identidad
# is ->es
# is not ->no es
# sirve para ver si estan apuntando a la misma direccion de
# memoria
frutas = ["manzana","pera"]
frutas3 = frutas
print(frutas3 is frutas)
print(frutas is not frutas3)
|
[
"ederiveroman@gmail.com"
] |
ederiveroman@gmail.com
|
38ae25b8603fcc76935e07a811ba2144eb374b9d
|
c224e403165e0461d90a0a1ec22e8e8a050a376e
|
/Week3/asymmetric_friendships.py
|
451f09deb1df2f7345126f8ac57d94d356090fdf
|
[] |
no_license
|
nasimulhasan/Data_Science
|
14eae70c1983f1a24ec9944d270dd904508fd7e9
|
d2f037ce331d9936256b0eb0bc21f962de0c7f4d
|
refs/heads/master
| 2020-03-18T08:44:02.514118
| 2017-08-27T10:14:52
| 2017-08-27T10:14:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,355
|
py
|
import MapReduce
import sys
"""
Word Count Example in the Simple Python MapReduce Framework
"""
mr = MapReduce.MapReduce()
# =============================
# Do not modify above this line
def mapper(record):
# key: document identifier
# value: document contents
#print record
key = record[0]
value = record[1]
## print "value"
## print value
## print "Key: " + key + " Value: " + value
## words = value.split()
## for w in words:
mr.emit_intermediate(key, [key, value])
def reducer(key, list_of_values):
# key: word
# value: list of occurrence counts
#print (key, list_of_values)
## print key
## print type(list_of_values)
dct = {}
#mr.emit((list_of_values))
a = [key]
#print a
#print type(a)
## tab = list_of_values
## print "Mapped list: "
## print tab
## print "======================================================="
#print "================"
for i in range(len(list_of_values)):
l1 = list_of_values[i]
l3 = tuple(list(l1))
#print l3
##
mr.emit((l3))
list_of_values[i][1], list_of_values[i][0] = list_of_values[i][0], list_of_values[i][1]
x = list_of_values
if x[i] != l1:
l2 = x[i]
#l1 = tuple(l1)
#print l2
mr.emit((x[i]))
## print "Swapped list: "
## l = list_of_values
## print x
## print "======================================================="
## for i in list_of_values:
## print i
## t = []
## #if tab != list_of_values:
## x = x + tab
## print "Joined list: "
## print x
#print tab
## for i in t:
## print i
## for j in tab:
## for k in list_of_values:
## if j != k:
## tab += list_of_values
## print tab
## for i in list_of_values:
## print type(i)
## print i[0]
#print tab
## for i in range(len(tab)):
## print i
#dct[tab[i]] = tab[i + 1]
#print dct
# Do not modify below this line
# =============================
##if __name__ == '__main__':
## inputdata = open(sys.argv[1])
## mr.execute(inputdata, mapper, reducer)
import json
friends = open("friends.json", "r")
#friends = open("asymmetric_friendships.json", "r")
mr.execute(friends, mapper, reducer)
|
[
"taufeeq525@gmail.com"
] |
taufeeq525@gmail.com
|
664ac5a99f3c33152a268fcf92e42e161423f1a6
|
e5d5e4d0ef2db5ebeab4a6464f672568ad961961
|
/scripts/external_libs/scapy-2.3.1/python2/scapy/layers/vxlan.py
|
78171762a1f43034c9f8b411e0d440efde6f6ca7
|
[
"Apache-2.0",
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only"
] |
permissive
|
elados93/trex-core
|
d7eebc5efbbb6ca0e971d0d1eca79793eb389a3b
|
3a6d63af1ff468f94887a091e3a408a8449cf832
|
refs/heads/master
| 2020-07-03T13:11:03.017040
| 2019-11-25T09:20:04
| 2019-11-25T09:20:04
| 182,962,731
| 1
| 0
|
Apache-2.0
| 2019-05-06T09:05:56
| 2019-04-23T07:49:11
|
C
|
UTF-8
|
Python
| false
| false
| 3,035
|
py
|
#! /usr/bin/env python
# RFC 7348 - Virtual eXtensible Local Area Network (VXLAN):
# A Framework for Overlaying Virtualized Layer 2 Networks over Layer 3 Networks
# http://tools.ietf.org/html/rfc7348
# https://www.ietf.org/id/draft-ietf-nvo3-vxlan-gpe-02.txt
#
# VXLAN Group Policy Option:
# http://tools.ietf.org/html/draft-smith-vxlan-group-policy-00
from scapy.packet import Packet, bind_layers
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
from scapy.layers.inet6 import IPv6
from scapy.fields import FlagsField, XByteField, ThreeBytesField, \
ConditionalField, ShortField, ByteEnumField, X3BytesField
_GP_FLAGS = ["R", "R", "R", "A", "R", "R", "D", "R"]
class VXLAN(Packet):
name = "VXLAN"
fields_desc = [
FlagsField("flags", 0x8, 8,
['OAM', 'R', 'NextProtocol', 'Instance',
'V1', 'V2', 'R', 'G']),
XByteField("reserved0", 0),
# ConditionalField(
# ShortField("reserved0", 0),
# lambda pkt: pkt.flags.NextProtocol,
# ),
# ConditionalField(
# ByteEnumField('NextProtocol', 0,
# {0: 'NotDefined',
# 1: 'IPv4',
# 2: 'IPv6',
# 3: 'Ethernet',
# 4: 'NSH'}),
# lambda pkt: pkt.flags.NextProtocol,
# ),
# ConditionalField(
# ThreeBytesField("reserved1", 0),
# lambda pkt: (not pkt.flags.G) and (not pkt.flags.NextProtocol),
# ),
ConditionalField(
FlagsField("gpflags", 0, 8, _GP_FLAGS),
lambda pkt: pkt.flags & 1,
),
#ConditionalField(
ShortField("gpid", 0),
#lambda pkt: pkt.flags & 1,
#),
X3BytesField("vni", 0),
XByteField("reserved2", 0),
]
# Use default linux implementation port
overload_fields = {
UDP: {'dport': 8472},
}
def mysummary(self):
if self.flags.G:
return self.sprintf("VXLAN (vni=%VXLAN.vni% gpid=%VXLAN.gpid%)")
else:
return self.sprintf("VXLAN (vni=%VXLAN.vni%)")
bind_layers(UDP, VXLAN, dport=4789) # RFC standard vxlan port
bind_layers(UDP, VXLAN, dport=4790) # RFC standard vxlan-gpe port
bind_layers(UDP, VXLAN, dport=6633) # New IANA assigned port for use with NSH
bind_layers(UDP, VXLAN, dport=8472) # Linux implementation port
bind_layers(UDP, VXLAN, sport=4789)
bind_layers(UDP, VXLAN, sport=4790)
bind_layers(UDP, VXLAN, sport=6633)
bind_layers(UDP, VXLAN, sport=8472)
# By default, set both ports to the RFC standard
bind_layers(UDP, VXLAN, sport=4789, dport=4789)
bind_layers(VXLAN, Ether)
# bind_layers(VXLAN, IP, NextProtocol=1)
# bind_layers(VXLAN, IPv6, NextProtocol=2)
# bind_layers(VXLAN, Ether, flags=4, NextProtocol=0)
# bind_layers(VXLAN, IP, flags=4, NextProtocol=1)
# bind_layers(VXLAN, IPv6, flags=4, NextProtocol=2)
# bind_layers(VXLAN, Ether, flags=4, NextProtocol=3)
|
[
"mamonney@cisco.com"
] |
mamonney@cisco.com
|
b62ddc114d6ea1f271bfcbb5b0486aa58b36366d
|
8fd28b248511f42ad8732ca1e574aada33908376
|
/configs/recognition/r2plus1d/r2plus1d_r34_8x8x1_750e_hmdb51_rgb_40percent_vidssl.py
|
9f6ecdb8f30c7078c713e9ad3e2cb3ebe7440308
|
[
"Apache-2.0"
] |
permissive
|
vt-vl-lab/video-data-aug
|
28bd175535cab1444055502389c8f5d7d75e4bd2
|
01667cdbd1b952f2510af3422beeeb76e0d9e15a
|
refs/heads/main
| 2023-09-01T02:36:40.034893
| 2021-07-21T01:31:42
| 2021-07-21T01:31:42
| 352,920,339
| 29
| 6
|
Apache-2.0
| 2021-07-21T01:29:36
| 2021-03-30T08:06:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,234
|
py
|
# model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='ResNet2Plus1d',
depth=34,
pretrained=None,
pretrained2d=False,
norm_eval=False,
conv_cfg=dict(type='Conv2plus1d'),
norm_cfg=dict(type='SyncBN', requires_grad=True, eps=1e-3),
act_cfg=dict(type='ReLU'),
conv1_kernel=(3, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(1, 1, 1, 1),
spatial_strides=(1, 2, 2, 2),
temporal_strides=(1, 2, 2, 2),
zero_init_residual=False),
cls_head=dict(
type='I3DHead',
num_classes=101,
in_channels=512,
spatial_type='avg',
dropout_ratio=0.5,
init_std=0.01))
# model training and testing settings
train_cfg = None
test_cfg = dict(average_clips=None)
# dataset settings
dataset_type = 'RawframeDataset'
data_root = 'data/hmdb51/rawframes/'
data_root_val = 'data/hmdb51/rawframes/'
split = 1 # official train/test splits. valid numbers: 1, 2, 3
ann_file_train = f'data/hmdb51/videossl_splits/hmdb51_train_40_percent_labeled_split_{split}_rawframes.txt'
ann_file_val = f'data/hmdb51/hmdb51_val_split_{split}_rawframes.txt'
ann_file_test = f'data/hmdb51/hmdb51_val_split_{split}_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(
type='SampleFrames',
clip_len=8,
frame_interval=8,
num_clips=1,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='SampleFrames',
clip_len=8,
frame_interval=8,
num_clips=10,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='ThreeCrop', crop_size=256),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=16,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=test_pipeline,
test_mode=True))
# optimizer
optimizer = dict(
type='SGD', lr=0.2, momentum=0.9,
weight_decay=0.0001) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0)
total_epochs = 750
checkpoint_config = dict(interval=5)
evaluation = dict(
interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'], topk=(1, 5))
log_config = dict(
interval=20,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/r2plus1d_r34_8x8x1_750e_hmdb51_rgb_40percent_vidssl/'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
|
[
"zouyuliang123@gmail.com"
] |
zouyuliang123@gmail.com
|
cc571ebd67196de901eb9f0656e5b556db4137d6
|
86cbaea221b5404dfdde68c89e2471ba07609bff
|
/anti_code/config/defaults.py
|
82aee2ce9df5304e3c728e9394f219dcc873dc3b
|
[] |
no_license
|
tommyjiang/iccv-2021-anti-spoofing
|
d3481e086f452cac560da1d1a69e7e452d304be4
|
144748c381bdf32040f985f60b76a4fdc2965115
|
refs/heads/main
| 2023-06-04T12:43:52.076534
| 2021-06-24T06:41:02
| 2021-06-24T06:41:02
| 378,965,229
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,007
|
py
|
from yacs.config import CfgNode as CN
# -----------------------------------------------------------------------------
# Convention about Training / Test specific parameters
# -----------------------------------------------------------------------------
# Whenever an argument can be either used for training or for testing, the
# corresponding name will be post-fixed by a _TRAIN for a training parameter,
# or _TEST for a test-specific parameter.
# For example, the number of images during training will be
# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be
# IMAGES_PER_BATCH_TEST
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
_C.MODEL = CN()
# Using cuda or cpu for training
_C.MODEL.DEVICE = "cuda"
# ID number of GPU
_C.MODEL.DEVICE_ID = '0,1,2,3,4,5,6,7' #0,1,2,3,4,5,6,7
# Name of backbone
_C.MODEL.NAME = 'resnet50'
_C.MODEL.ARCH = 'b0'
_C.MODEL.ENCODER = 'resnet50'
# Last stride of backbone
_C.MODEL.LAST_STRIDE = 1
# Path to pretrained model of backbone
_C.MODEL.PRETRAIN_PATH = ''
# Use ImageNet pretrained model to initialize backbone or use self trained model to initialize the whole model
# Options: 'imagenet' or 'self'
_C.MODEL.PRETRAIN_CHOICE = 'imagenet'
# If train with BNNeck, options: 'bnneck' or 'no'
_C.MODEL.NECK = 'bnneck'
# If train loss include center loss, options: 'yes' or 'no'. Loss with center loss has different optimizer configuration
_C.MODEL.IF_WITH_CENTER = 'no'
# The loss type of metric loss
# options:['triplet'](without center loss) or ['center','triplet_center'](with center loss)
_C.MODEL.METRIC_LOSS_TYPE = 'triplet'
# For example, if loss type is cross entropy loss + triplet loss + center loss
# the setting should be: _C.MODEL.METRIC_LOSS_TYPE = 'triplet_center' and _C.MODEL.IF_WITH_CENTER = 'yes'
# If train with label smooth, options: 'on', 'off'
_C.MODEL.IF_LABELSMOOTH = 'on'
# -----------------------------------------------------------------------------
# INPUT
# -----------------------------------------------------------------------------
_C.INPUT = CN()
# Size of the image during training
_C.INPUT.SIZE_TRAIN = [333, 333]
_C.INPUT.TARGET_TRAIN = [256,256]
# Size of the image during test
_C.INPUT.SIZE_TEST = [256, 256]
# Random probability for image horizontal flip
_C.INPUT.PROB = 0.5
# Random probability for random erasing
_C.INPUT.RE_PROB = 0.5
# Values to be used for image normalization
_C.INPUT.PIXEL_MEAN = [0.485, 0.456, 0.406]
# Values to be used for image normalization
_C.INPUT.PIXEL_STD = [0.229, 0.224, 0.225]
# Value of padding size
_C.INPUT.PADDING = 10
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASETS = CN()
# List of the dataset names for training, as present in paths_catalog.py
_C.DATASETS.NAMES = ('market1501')
# Root directory where datasets should be used (and downloaded if not found)
_C.DATASETS.ROOT_DIR = ('./data')
# -----------------------------------------------------------------------------
# DataLoader
# -----------------------------------------------------------------------------
_C.DATALOADER = CN()
# Number of data loading threads
_C.DATALOADER.NUM_WORKERS = 8
# Sampler for data loading
_C.DATALOADER.SAMPLER = 'softmax'
# Number of instance for one batch
_C.DATALOADER.NUM_INSTANCE = 16
_C.DATALOADER.TRANSFORMS = 'torch' ## or albu
# ---------------------------------------------------------------------------- #
# Solver
# ---------------------------------------------------------------------------- #
_C.SOLVER = CN()
# Name of optimizer
_C.SOLVER.OPTIMIZER_NAME = "Adam"
# Number of max epoches
_C.SOLVER.MAX_EPOCHS = 50
# Base learning rate
_C.SOLVER.BASE_LR = 3e-4
# Factor of learning bias
_C.SOLVER.BIAS_LR_FACTOR = 2
# Momentum
_C.SOLVER.MOMENTUM = 0.9
# Margin of triplet loss
_C.SOLVER.MARGIN = 0.3
# Margin of cluster ;pss
_C.SOLVER.CLUSTER_MARGIN = 0.3
# Learning rate of SGD to learn the centers of center loss
_C.SOLVER.CENTER_LR = 0.5
# Balanced weight of center loss
# _C.SOLVER.CENTER_LOSS_WEIGHT = 0.0005
# # Settings of range loss
# _C.SOLVER.RANGE_K = 2
# _C.SOLVER.RANGE_MARGIN = 0.3
# _C.SOLVER.RANGE_ALPHA = 0
# _C.SOLVER.RANGE_BETA = 1
# _C.SOLVER.RANGE_LOSS_WEIGHT = 1
# Settings of weight decay
_C.SOLVER.WEIGHT_DECAY = 0.0005
_C.SOLVER.WEIGHT_DECAY_BIAS = 0.
# decay rate of learning rate
_C.SOLVER.GAMMA = 0.1
# decay step of learning rate
_C.SOLVER.STEPS = (30, 55)
# warm up factor
_C.SOLVER.WARMUP_FACTOR = 1.0 / 3
# iterations of warm up
_C.SOLVER.WARMUP_ITERS = 500
# method of warm up, option: 'constant','linear'
_C.SOLVER.WARMUP_METHOD = "linear"
# epoch number of saving checkpoints
_C.SOLVER.CHECKPOINT_PERIOD = 50
# iteration of display training log
_C.SOLVER.LOG_PERIOD = 100
# epoch number of validation
_C.SOLVER.EVAL_PERIOD = 50
_C.SOLVER.PESUDO_UPDATE_PERIOD = 1
_C.SOLVER.PESUDO_SKIP = 5
# Number of images per batch
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.SOLVER.IMS_PER_BATCH = 64
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.TEST = CN()
# Number of images per batch during test
_C.TEST.IMS_PER_BATCH = 128
# If test with re-ranking, options: 'yes','no'
_C.TEST.RE_RANKING = 'no'
# Path to trained model
_C.TEST.WEIGHT = ""
# Which feature of BNNeck to be used for test, before or after BNNneck, options: 'before' or 'after'
_C.TEST.NECK_FEAT = 'after'
# Whether feature is nomalized before test, if yes, it is equivalent to cosine distance
_C.TEST.FEAT_NORM = 'yes'
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
# Path to checkpoint and saved log of trained model
_C.OUTPUT_DIR = ""
|
[
"tommy_jiang@foxmail.com"
] |
tommy_jiang@foxmail.com
|
4060f4f822289e06e61ca70655568a106382745c
|
594680eb2d243ea0d5c10c1f8bd74cbc180f0165
|
/core_models/view.py
|
ccff4a7b2e6424d56c1cc8447b632cdd95e02a2b
|
[] |
no_license
|
shalevy1/flare
|
d538ce20a221c0c59c251245ccdaa6ab16d59d99
|
18c8ff329e17e24c381d840cdd584abfa528db1e
|
refs/heads/master
| 2022-12-22T08:35:30.163090
| 2020-09-26T19:11:57
| 2020-09-26T19:11:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 621
|
py
|
import peewee as pw
from flare import BaseModel, Registry
import json
class JSONField(pw.TextField):
def db_value(self, value):
return json.dumps(value)
def python_value(self, value):
if value is not None:
return json.loads(value)
class FlrView(BaseModel):
name = pw.CharField()
definition = JSONField()
view_type = pw.CharField(choices=[("list","List"),("form","Form")],default="list")
menu_id = pw.ForeignKeyField(Registry["FlrMenu"], null=True, backref="views")
model = pw.CharField()
sequence = pw.IntegerField(default=1)
FlrView.r()
|
[
"yayforme789@gmail.com"
] |
yayforme789@gmail.com
|
6671c788b36f34bfed85317a2c93531493733fab
|
0567fcd808397a7024b5009cc290de1c414eff06
|
/src/633.sum-of-square-numbers.py
|
1809710572c9e6319075ad61abf8525f28204e92
|
[] |
no_license
|
tientheshy/leetcode-solutions
|
d3897035a7fd453b9f47647e95f0f92a03bff4f3
|
218a8a97e3926788bb6320dda889bd379083570a
|
refs/heads/master
| 2023-08-23T17:06:52.538337
| 2021-10-03T01:47:50
| 2021-10-03T01:47:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
#
# @lc app=leetcode id=633 lang=python3
#
# [633] Sum of Square Numbers
#
# @lc code=start
# TAGS: Math, Two Pointers, Binary Search
class Solution:
# Time: O(logN). Space: O(logN)
def judgeSquareSum(self, c: int) -> bool:
squares = set(n*n for n in range(int(c**0.5) + 1))
for square in squares:
if square <= c and (c - square) in squares:
return True
return False
# @lc code=end
|
[
"trung.nang.hoang@gmail.com"
] |
trung.nang.hoang@gmail.com
|
fe584bf4c09946e95cd5192c04c0686749124445
|
083ca3df7dba08779976d02d848315f85c45bf75
|
/StrongPasswordChecker.py
|
d78b7b329b3d78aed6ae00e785028b68a397ea7a
|
[] |
no_license
|
jiangshen95/UbuntuLeetCode
|
6427ce4dc8d9f0f6e74475faced1bcaaa9fc9f94
|
fa02b469344cf7c82510249fba9aa59ae0cb4cc0
|
refs/heads/master
| 2021-05-07T02:04:47.215580
| 2020-06-11T02:33:35
| 2020-06-11T02:33:35
| 110,397,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,620
|
py
|
class Solution:
def strongPasswordChecker(self, s):
"""
:type s: str
:rtype: int
"""
needLower, needUpper, needDigit = 1, 1, 1
lenCounts = []
count = 1
for i in range(len(s)):
if 'a' <= s[i] <= 'z':
needLower = 0
elif 'A' <= s[i] <= 'Z':
needUpper = 0
elif '0' <= s[i] <= '9':
needDigit = 0
if i > 0 and s[i] == s[i - 1]:
count += 1
else:
if count >= 3:
lenCounts.append(count)
count = 1
if count >= 3:
lenCounts.append(count)
if len(s) < 6:
return max(6 - len(s), needLower + needUpper + needDigit)
else:
over = max(0, len(s) - 20)
step = over
for i in range(len(lenCounts)):
if over > 0 and lenCounts[i] % 3 != 2:
t = lenCounts[i] % 3 + 1
if over - t >= 0:
over -= t
lenCounts[i] -= t
left = 0
for i in range(len(lenCounts)):
if over > 0 and lenCounts[i] >= 3:
t = lenCounts[i] - 2
lenCounts[i] -= over
over -= t
if lenCounts[i] >= 3:
left += lenCounts[i] // 3
return step + max(left, needLower + needUpper + needDigit)
if __name__ == '__main__':
s = input()
solution = Solution()
print(solution.strongPasswordChecker(s))
|
[
"jiangshen95@163.com"
] |
jiangshen95@163.com
|
ca2d4092b151262bd72862c203860802ce9addff
|
45197bf7c0921435035b0fd4b7c83057f75e50f1
|
/app/logger.py
|
1e08509f2233940fabf03c81154f90433f78a099
|
[] |
no_license
|
fiefdx/CallingViewer
|
4307e9376afc1ace2bcbd05e918e05109498c14d
|
6c1d5ff6e098f69cbaab31f61dcdd81b1a81fc06
|
refs/heads/master
| 2020-07-03T10:20:55.385050
| 2018-06-23T04:19:12
| 2018-06-23T04:19:12
| 67,335,981
| 34
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,204
|
py
|
# -*- coding: utf-8 -*-
'''
Created on 2014-05-07
@summary: A custom logging
@author: YangHaitao
'''
import os
import logging
import logging.handlers
CWD = os.path.split(os.path.realpath(__file__))[0]
LEVELS = {'NOSET': logging.NOTSET,
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL}
class ConsoleStreamHandler(logging.StreamHandler):
# color names to indices
color_map = {
'black': 0,
'red': 1,
'green': 2,
'yellow': 3,
'blue': 4,
'magenta': 5,
'cyan': 6,
'white': 7,
}
# levels to (background, foreground, bold/intense)
level_map = {
logging.DEBUG: (None, 'blue', False),
logging.INFO: (None, 'white', False),
logging.WARNING: (None, 'yellow', False),
logging.ERROR: (None, 'red', False),
logging.CRITICAL: ('red', 'white', True),
}
csi = '\x1b['
reset = '\x1b[0m'
def colorize(self, message, record):
"""
Colorize a message for a logging event.
This implementation uses the ``level_map`` class attribute to
map the LogRecord's level to a colour/intensity setting, which is
then applied to the whole message.
:param message: The message to colorize.
:param record: The ``LogRecord`` for the message.
"""
if record.levelno in self.level_map:
bg, fg, bold = self.level_map[record.levelno]
params = []
if bg in self.color_map:
params.append(str(self.color_map[bg] + 40))
if fg in self.color_map:
params.append(str(self.color_map[fg] + 30))
if bold:
params.append('1')
if params:
message = ''.join((self.csi, ';'.join(params), 'm', message, self.reset))
return message
def format(self, record):
"""
Formats a record for output.
This implementation colorizes the message line, but leaves
any traceback unolorized.
"""
message = logging.StreamHandler.format(self, record)
parts = message.split('\n', 1)
parts[0] = self.colorize(parts[0], record)
message = '\n'.join(parts)
return message
def emit(self, record):
try:
message = self.format(record)
stream = self.stream
if unicode and isinstance(message, unicode):
enc = getattr(stream, 'encoding', 'utf-8')
message = message.encode(enc, 'replace')
stream.write(message)
stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def config_logging(logger_name = "",
file_name = "main.log",
log_level = "NOSET",
dir_name = "logs",
day_rotate = False,
when = "D",
interval = 1,
max_size = 50,
backup_count = 5,
console = True):
format_log_string = "%(asctime)s %(name)-12s %(levelname)-8s %(message)s"
format_console_string = "%(name)-12s: %(levelname)-8s %(message)s"
logs_dir = os.path.join(CWD, dir_name)
file_dir = os.path.join(logs_dir, file_name)
# init logs directory
if os.path.exists(logs_dir) and os.path.isdir(logs_dir):
pass
else:
os.makedirs(logs_dir)
# clear all handlers
logging.getLogger(logger_name).handlers = []
# init rotating handler
if day_rotate == True:
rotatingFileHandler = logging.handlers.TimedRotatingFileHandler(filename = file_dir,
when = when,
interval = interval,
backupCount = backup_count)
else:
rotatingFileHandler = logging.handlers.RotatingFileHandler(filename = file_dir,
maxBytes = 1024 * 1024 * max_size,
backupCount = backup_count)
formatter = logging.Formatter(format_log_string)
rotatingFileHandler.setFormatter(formatter)
logging.getLogger(logger_name).addHandler(rotatingFileHandler)
# add a console handler
if console == True:
if os.name == 'nt':
consoleHandler = logging.StreamHandler()
else:
consoleHandler = ConsoleStreamHandler()
# set console log level
consoleHandler.setLevel(LEVELS[log_level.upper()])
formatter = logging.Formatter(format_console_string)
consoleHandler.setFormatter(formatter)
logging.getLogger(logger_name).addHandler(consoleHandler)
# set log level
logger = logging.getLogger(logger_name)
level = LEVELS[log_level.upper()]
logger.setLevel(level)
|
[
"fiefdx@163.com"
] |
fiefdx@163.com
|
964bd18c9fbcba2039e1c36cbe3a58d254079f78
|
2859786cee0205545cda3af367e031c54778176b
|
/webui.py
|
3b4f567d2e261f0ecd32e6333df8b4bad176af30
|
[] |
no_license
|
indivisible/rpi_switch_control_webui
|
5e784fca630d662c7f2f8045f62c5d408daa57a2
|
68e99a4447549f111918898717dec4af21fcb4e5
|
refs/heads/master
| 2023-07-07T13:43:28.821328
| 2021-08-06T06:13:29
| 2021-08-06T06:13:29
| 393,043,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,916
|
py
|
#!/usr/bin/env python3
import logging
import asyncio
import threading
import json
from http.server import HTTPServer, SimpleHTTPRequestHandler
from functools import partial
from pathlib import Path
import websockets
from backend_manager import BackendManager
class SocketConnection:
def __init__(self, socket, backend: BackendManager):
self.socket = socket
self.backend = backend
async def send(self, obj):
return await self.socket.send(json.dumps(obj))
async def send_message(self, severity: str, message):
return await self.send(self.message(severity, message))
def action(self, action: str, **rest):
rest['action'] = action
return rest
def message(self, severity: str, message):
return self.action(
'message', severity=severity, message=message)
def error(self, msg):
return self.message('error', message=msg)
async def handle_message(self, raw: str):
data = json.loads(raw)
action = data.pop('action').replace('-', '_')
try:
handler = getattr(self, f'handle_action_{action}')
except AttributeError:
return self.error(f'Unkown action f{action}')
try:
reply = await handler(**data)
return reply
except Exception as e:
logging.exception(f'Error handling {action}:')
return self.error(f'Error handling {action}: {e}')
async def serve(self):
async for message in self.socket:
try:
reply = await self.handle_message(message)
except Exception:
logging.error(f'Error handling message {message}: ')
reply = self.error('Invalid message')
if reply is not None:
await self.send(reply)
async def handle_action_status(self):
# return self.action('status', ok=(con is not None))
return self.action('status', ok=True)
async def handle_action_run_script(self, text):
if not text:
return self.error('empty script')
else:
try:
self.backend.start_script(text)
return self.message('info', 'script started')
except Exception as e:
logging.exception('Error running script:')
return self.error(f'error running script: {e!r}')
async def handle_action_abort_script(self):
self.backend.abort_script()
return self.message('warning', 'Script aborted')
async def handle_action_restart(self):
Path('restart_app').touch()
return self.error('Restarting app')
async def handle_action_input(self, state):
await self.backend.manual_input(state)
class SocketServer:
def __init__(self, backend):
self.backend = backend
backend.socket_send_message = self.send_message
self.connections = []
async def send_message(self, severity: str, message):
for con in self.connections:
try:
await con.send_message(severity, message)
except Exception:
logging.exception('error sending message to websocket')
async def serve(self, websocket, path):
connection = SocketConnection(websocket, self.backend)
self.connections.append(connection)
try:
await connection.serve()
finally:
self.connections.remove(connection)
async def start_websocket_server(backend):
await backend.start()
server = SocketServer(backend)
await websockets.serve(server.serve, "0.0.0.0", 6789)
logging.debug('started websocket server')
def run_http_server(path):
server_address = ('', 8000)
handler = partial(SimpleHTTPRequestHandler, directory=path)
httpd = HTTPServer(server_address, handler)
logging.debug('starting HTTP server')
httpd.serve_forever()
def main():
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('cmd', default=['/bin/cat'], nargs='*')
args = parser.parse_args()
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger('websockets.protocol').setLevel(logging.WARNING)
logging.getLogger('websockets.server').setLevel(logging.WARNING)
cmd, *cmd_args = args.cmd
backend = BackendManager(cmd, cmd_args)
# http server for static files of the GUI
httpd_thread = threading.Thread(
target=run_http_server,
# the directory where the served files are
args=('html',),
daemon=True)
httpd_thread.start()
# websocket for client controls
loop = asyncio.get_event_loop()
loop.run_until_complete(start_websocket_server(backend))
loop.run_forever()
return 0
if __name__ == '__main__':
import sys
sys.exit(main())
|
[
"islandofcalmness@gmail.com"
] |
islandofcalmness@gmail.com
|
f31e8bdef0f35d1da25ef414dec22eaf8212ec51
|
b27aa0989f06d46da7f650d1ae0048cc394dbbef
|
/FFA_action_pattern_analysis/4representation.py
|
6fcd9d71862c0dd15a06868edb0bf33f5391a18b
|
[] |
no_license
|
sunshineDrizzle/Projects
|
fb97be79d46c39a0d07f07cfe717fb64148c99c4
|
e5eac7aca238a887a7fd1fa556333758fff1505d
|
refs/heads/master
| 2020-03-31T11:59:40.113282
| 2020-03-21T05:27:07
| 2020-03-21T05:27:07
| 152,199,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,255
|
py
|
import numpy as np
from scipy.spatial.distance import cdist
from scipy.stats import sem
from matplotlib import pyplot as plt
def subgroup_mean_representation(mean_maps, maps, group_labels):
labels_uniq = np.unique(group_labels)
representations = np.zeros_like(labels_uniq, np.object)
for i, label in enumerate(labels_uniq):
mean_map = mean_maps[[i]]
sub_maps = np.atleast_2d(maps[group_labels == label])
representations[i] = 1 - cdist(mean_map, sub_maps, 'correlation')[0]
representation_means = []
representation_sems = []
for repre in representations:
representation_means.append(np.mean(repre))
representation_sems.append(sem(repre))
x = np.arange(len(representations))
plt.bar(x, representation_means, yerr=representation_sems,
color='white', edgecolor='black')
plt.title('{}FFA_patterns'.format(hemi[0]))
plt.ylabel('correlation')
plt.xticks(x, labels_uniq)
plt.tight_layout()
plt.show()
def leave_one_out_representation(FFA_patterns, group_labels, metric):
labels_uniq = np.unique(group_labels)
labels_num = len(labels_uniq)
sub_FFA_patterns_list = [np.atleast_2d(FFA_patterns[group_labels == label]) for label in labels_uniq]
X = np.zeros((labels_num, labels_num), np.object)
for row in range(labels_num):
sub_FFA_patterns_mean = np.atleast_2d(np.mean(sub_FFA_patterns_list[row], 0))
for col in range(labels_num):
if row == col:
sub_subjects = list(range(sub_FFA_patterns_list[row].shape[0]))
dists = []
for subject in sub_subjects:
sub_FFA_patterns_leave_out = np.atleast_2d(sub_FFA_patterns_list[row][subject])
sub_subjects_reserve = sub_subjects.copy()
sub_subjects_reserve.remove(subject)
sub_FFA_patterns_reserve = np.atleast_2d(sub_FFA_patterns_list[row][sub_subjects_reserve])
sub_FFA_patterns_reserve_mean = np.atleast_2d(np.mean(sub_FFA_patterns_reserve, 0))
dists.append(cdist(sub_FFA_patterns_reserve_mean, sub_FFA_patterns_leave_out, metric)[0][0])
X[row, col] = np.array(dists)
else:
X[row, col] = cdist(sub_FFA_patterns_mean, sub_FFA_patterns_list[col], metric)[0]
fig, axes = plt.subplots(labels_num)
axes[0].set_title('{}FFA_patterns'.format(hemi[0]))
xlabels = 'mean{} and individual{}'
for row in range(labels_num):
print('row{0}col1 vs. row{0}col2'.format(row + 1), ttest_ind(X[row][0], X[row][1]))
axes[row].violinplot(X[row], showmeans=True)
axes[row].set_ylabel(metric)
axes[row].set_xticks(np.arange(1, labels_num + 1))
axes[row].set_xticklabels([xlabels.format(labels_uniq[row], labels_uniq[col]) for col in range(labels_num)])
plt.tight_layout()
plt.show()
return X
if __name__ == '__main__':
import nibabel as nib
from os.path import join as pjoin
from commontool.io.io import CiftiReader
hemi = 'lh'
brain_structure = {
'lh': 'CIFTI_STRUCTURE_CORTEX_LEFT',
'rh': 'CIFTI_STRUCTURE_CORTEX_RIGHT'
}
project_dir = '/nfs/s2/userhome/chenxiayu/workingdir/study/FFA_clustering/'
analysis_dir = pjoin(project_dir, 's2_25_zscore')
cluster_num_dir = pjoin(analysis_dir, 'HAC_ward_euclidean/50clusters')
acti_dir = pjoin(cluster_num_dir, 'activation')
mean_map_file = pjoin(acti_dir, '{}_mean_maps.nii.gz'.format(hemi))
FFA_label_file = pjoin(project_dir, 'data/HCP_1080/face-avg_s2/label/{}FFA_25.label'.format(hemi[0]))
map_file = pjoin(project_dir,
'data/HCP_1080/face-avg_s2/S1200.1080.FACE-AVG_level2_zstat_hp200_s2_MSMAll.dscalar.nii')
group_labels_file = pjoin(cluster_num_dir, 'group_labels')
mean_maps = nib.load(mean_map_file).get_data()
FFA_vertices = nib.freesurfer.read_label(FFA_label_file)
reader = CiftiReader(map_file)
maps = reader.get_data(brain_structure[hemi], True)
group_labels = np.array(open(group_labels_file).read().split(' '), dtype=np.uint16)
subgroup_mean_representation(mean_maps[:, FFA_vertices], maps[:, FFA_vertices], group_labels)
|
[
"954830460@qq.com"
] |
954830460@qq.com
|
134bf53ab21d5f345076fc4304195e18fea457bb
|
f77c23ec7f9d8d29b30b3ef00ca7f6f1f08cfbb7
|
/vae/test_vae.py
|
346f1bf8e446e674c686ac54524af19d1e95a258
|
[] |
no_license
|
wangtingc/aevb
|
584ebcc7b70411c2bc0a9a0bc3a3a552fd470378
|
ee024cdea9855c1244e3eddeb9c77965be7107f3
|
refs/heads/master
| 2021-01-19T14:10:35.068618
| 2015-09-19T10:20:15
| 2015-09-19T10:20:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,631
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from itertools import cycle
from matplotlib import cm as cm
import matplotlib.pyplot as plt
import numpy as np
import theano
from vae import GaussianVAE, BernoulliVAE
from utils import load_data
from gaussian_vae import M1_GVAE
def test_vae(
opt='adagrad',
n_iters=1000,
learning_rate=1e-4,
n_mc_samples=1,
scale_init=0.01,
dim_h=100,
dim_z=2,
model='Gaussian'):
##################
# load data
##################
datasets = load_data('../../20150717-/mnist.pkl.gz')
train_set_x, train_set_y = datasets
xs = train_set_x[:10000]
sgd_params = {
'learning_rate' : learning_rate,
'n_iters' : n_iters,
'size_minibatch': 100,
'calc_hist' : 'all',
'n_mod_hist' : 100,
}
adagrad_params = sgd_params
all_params = {
'hyper_params': {
'rng_seed' : 1234,
'dim_z' : dim_z,
'dim_h_generate' : dim_h,
'dim_h_recognize' : dim_h,
'n_mc_samples' : n_mc_samples,
'scale_init' : scale_init
}
}
if opt == 'adagrad':
all_params.update({'adagrad_params': adagrad_params})
elif opt == 'sgd':
all_params.update({'sgd_params': sgd_params})
if model == 'Gaussian':
model = GaussianVAE(**all_params)
# model = M1_GVAE(**all_params)
elif model == 'Bernoulli':
model = BernoulliVAE(**all_params)
model.fit(xs)
zs = model.encode(xs)
xs_recon = model.decode(zs)
err = np.sum(0.5 * (xs - xs_recon) ** 2) / xs.shape[0]
print ('Error: %f' % err)
return datasets, model
def plot_weights(model):
fig, axes = plt.subplots(nrows=10, ncols=10)
fig.subplots_adjust(hspace=.001, wspace=.001)
fig.set_size_inches(10, 10)
w3 = model.model_params_['w2_'].get_value()
nx = np.sqrt(w3.shape[1]).astype(int)
ny = nx
w3 = w3.reshape((w3.shape[0], ny, nx))
for i, ax in enumerate(axes.reshape(-1)):
ax.imshow(w3[i], interpolation='none', cmap=cm.gray)
def plot_manifold(
model, z1s=np.arange(-0.8, 1.2, .2), z2s=np.arange(-0.8, 1.2, .2)):
zs = np.array([[z1, z2] for z2 in z2s
for z1 in z1s]).astype(theano.config.floatX)
xs = model.decode(zs)
nx = np.sqrt(xs.shape[1]).astype(int)
ny = nx
xs = xs.reshape((xs.shape[0], ny, nx))
fig, axes = plt.subplots(nrows=len(z1s), ncols=len(z2s))
fig.subplots_adjust(hspace=.001, wspace=.001)
fig.set_size_inches(10, 10)
for i, ax in enumerate(axes.reshape(-1)):
ax.imshow(xs[i], interpolation='none', cmap=cm.gray)
ax.set_xticklabels([])
ax.set_yticklabels([])
def plot_hiddens(model, xs, cs):
zs = model.encode(xs)
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
markers = ['+', 'o', '^']
plt.figure(figsize=(7, 7))
for c, color, marker in zip(np.unique(cs), cycle(colors), cycle(markers)):
ixs = np.where(cs == c)[0]
plt.scatter(zs[ixs, 0], zs[ixs, 1], c=color, marker=marker, label=c)
plt.legend(loc='best', scatterpoints=1, framealpha=1)
if __name__ == '__main__':
data, model = test_vae(
n_iters=10000,
learning_rate=0.01,
n_mc_samples=1,
scale_init=1.,
dim_h=500,
dim_z=2,
model='Gaussian',
opt='adagrad'
)
hist = np.vstack(model.hist)
plt.plot(hist[:, 0], hist[:, 1])
test_vae.plot_manifold(
model, z1s=np.arange(-8., 8., 1.), z2s=np.arange(8., -8., -1.))
plt.show()
# End of Line.
|
[
"makoto.kawano@gmail.com"
] |
makoto.kawano@gmail.com
|
7b4149d14710a240656326a31db2af81cf823dda
|
31defa3ee9a0926b0a1bbdf849915dfe3d838dcf
|
/models/resnet50.py
|
57be2fc16adabd4d0722c404132a825c5ea8eb18
|
[] |
no_license
|
sagecodes/cnn-pytorch-starter
|
38abcec40ae851f7fd4544e8799e731d180593d1
|
689918aaa7bb450d4f3ce0648479d3cb6604a9bc
|
refs/heads/master
| 2020-12-04T19:25:14.594504
| 2020-05-23T20:34:59
| 2020-05-23T20:34:59
| 231,880,668
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
import torchvision.models as models
import torch.nn as nn
import torch
import torch.optim as optim
import torchvision.transforms as transforms
import numpy as np
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from PIL import Image
class Resnet50_pretrained:
def __init__(self, num_classes):
# self.device = device
self.num_classes = num_classes
self.model = models.resnet50(pretrained=True)
self.fc_out = nn.Linear(2048, num_classes, bias=True)
# freeze model params for features
for param in self.model.parameters():
param.requires_grad = False
self.model.fc = self.fc_out
# def forward(self):
# No forward needed imported model
|
[
"hello@sageelliott.com"
] |
hello@sageelliott.com
|
0f381d9bfb49b868e112f69fdc6263215ce14091
|
e82b761f53d6a3ae023ee65a219eea38e66946a0
|
/All_In_One/addons/ProceduralBuildingGenerator/UI.py
|
2f5754195f1f884cccbae7780fbab989eecf851e
|
[] |
no_license
|
2434325680/Learnbgame
|
f3a050c28df588cbb3b14e1067a58221252e2e40
|
7b796d30dfd22b7706a93e4419ed913d18d29a44
|
refs/heads/master
| 2023-08-22T23:59:55.711050
| 2021-10-17T07:26:07
| 2021-10-17T07:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,426
|
py
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# Procedural building generator
# Copyright (C) 2019 Luka Simic
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
from bpy.types import Panel, PropertyGroup
from bpy.props import FloatProperty, BoolProperty, EnumProperty, IntProperty
class PBGPropertyGroup(PropertyGroup):
# TODO: docstring
building_width = FloatProperty(
name="Building width",
default=25.0
)
building_depth = FloatProperty(
name="Building depth",
default=15.0
)
building_chamfer = FloatProperty(
name="Chamfer size",
default=1
)
building_wedge_depth = FloatProperty(
name="Wedge depth",
default=1.5
)
building_wedge_width = FloatProperty(
name="Wedge width",
default=8
)
floor_first_offset = FloatProperty(
name="FIrst floor offset",
default=0.7
)
floor_height = FloatProperty(
name="Floor height",
default=3
)
floor_count = IntProperty(
name="Number of floors",
default=2
)
floor_separator_include = BoolProperty(
name="Separator between floors",
default=True
)
floor_separator_height = FloatProperty(
name="Separator height",
default=0.5
)
floor_separator_width = FloatProperty(
name="Separator width",
default=0.5
)
window_width = FloatProperty(
name="Total window width",
default=1.2
)
distance_window_window = FloatProperty(
name="Distance between windows",
default=2.5
)
generate_pillar = BoolProperty(
name="Generate Pillar",
default=True
)
distance_window_pillar = FloatProperty(
name="Distance Window to Pillar",
default=0.8
)
pillar_width = FloatProperty(
name="Pillar width",
default=0.2
)
pillar_depth = FloatProperty(
name="Pillar depth",
default=0.15
)
pillar_chamfer = FloatProperty(
name="Pillar Chamfer",
default=0.05
)
pillar_offset_height = FloatProperty(
name="Pillar Offset Height",
default=0.7
)
pillar_offset_size = FloatProperty(
name="Pillar Offset Size",
default=0.05
)
pillar_include_floor_separator = BoolProperty(
name="Include floor separator",
default=True
)
pillar_include_first_floor = BoolProperty(
name="Include first floor",
default=True
)
wall_types = [
("FLAT", "FLAT", "", 0),
("ROWS", "ROWS", "", 1)
]
wall_type = EnumProperty(
items=wall_types,
default="ROWS"
)
wall_mortar_size = FloatProperty(
name="Mortar size",
default=0.02
)
wall_section_size = FloatProperty(
name="Brick section size",
default=0.04
)
wall_row_count = IntProperty(
name="Rows per floor",
default=7
)
wall_offset_size = FloatProperty(
name="Wall offset size",
default=0.1
)
wall_offset_type = EnumProperty(
items=wall_types,
default="ROWS"
)
wall_offset_mortar_size = FloatProperty(
name="Offset Mortar size",
default=0.03
)
wall_offset_section_size = FloatProperty(
name="Offset Brick section size",
default=0.06
)
wall_offset_row_count = IntProperty(
name="Offset Rows per floor",
default=3
)
window_height = FloatProperty(
name="Window total height",
default=1.0
)
window_offset = FloatProperty(
name="Window offset",
default=0.5
)
window_under_types = [
("WALL", "WALL", "", 0),
("PILLARS", "PILLARS", "", 1),
("SIMPLE", "SIMPLE", "", 2),
("SINE", "SINE", "", 3),
("CYCLOID", "CYCLOID", "", 4)
]
windows_under_type = EnumProperty(
items=window_under_types,
default="WALL"
)
windows_under_width = FloatProperty(
name="under window offset width",
default=0.1
)
windows_under_height = FloatProperty(
name="Under Window offset height",
default=0.1
)
windows_under_depth = FloatProperty(
name="under Window offset depth",
default=0.05
)
windows_under_inset_depth = FloatProperty(
name="under Window inset depth",
default=0.1
)
windows_under_amplitude = FloatProperty(
name="under Window amplitude",
default=0.05
)
windows_under_period_count = IntProperty(
name="under Window period count",
default=8
)
windows_under_simple_width = FloatProperty(
name="Under window simple width",
default=0.04
)
windows_under_simple_depth = FloatProperty(
name="Under window simple depth",
default=0.03
)
windows_under_pillar_base_diameter = FloatProperty(
name="Under window pillar base diameter",
default=0.08
)
windows_under_pillar_base_height = FloatProperty(
name="Under window pillar base height",
default=0.04
)
windows_under_pillar_min_diameter = FloatProperty(
name="Under window pillar min diameter",
default=0.05
)
windows_under_pillar_max_diameter = FloatProperty(
name="Under window pillar max diameter",
default=0.08
)
door_size = FloatProperty(
name="Door size",
default=2.5
)
# end PBGPropertyGroup
class PBGToolbarGeneralPanel(Panel):
# TODO: docstring
bl_label = "General Settings"
bl_category = "PBG"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
properties = context.scene.PBGPropertyGroup
col = layout.column(align=True)
col.label(text="Overall Building Dimensions")
col.prop(properties, "building_width")
col.prop(properties, "building_depth")
col.prop(properties, "building_chamfer")
col.prop(properties, "building_wedge_depth")
col.prop(properties, "building_wedge_width")
col.label(text="Floor and separator layout")
col.prop(properties, "floor_count")
col.prop(properties, "floor_height")
col.prop(properties, "floor_first_offset")
col.prop(properties, "floor_separator_include")
col.prop(properties, "floor_separator_width")
col.prop(properties, "floor_separator_height")
# end draw
# end PBGToolbarPanel
class PBGToolbarLayoutPanel(Panel):
# TODO: docstring
bl_label = "Layout Settings"
bl_category = "PBG"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
properties = context.scene.PBGPropertyGroup
col = layout.column(align=True)
col.prop(properties, "distance_window_window")
col.prop(properties, "distance_window_pillar")
# end draw
# end PBGLayoutPanel
class PBGToolbarPillarPanel(Panel):
# TODO: docstring
bl_label = "Pillar Settings"
bl_category = "PBG"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
properties = context.scene.PBGPropertyGroup
col = layout.column(align=True)
col.prop(properties, "generate_pillar")
col.prop(properties, "pillar_width")
col.prop(properties, "pillar_depth")
col.prop(properties, "pillar_chamfer")
col.prop(properties, "pillar_offset_height")
col.prop(properties, "pillar_offset_size")
col.prop(properties, "pillar_include_floor_separator")
col.prop(properties, "pillar_include_first_floor")
# end draw
# end PBGPillarPanel
class PBGToolbarWallPanel(Panel):
# TODO: docstring
bl_label = "Wall settings"
bl_category = "PBG"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
properties = context.scene.PBGPropertyGroup
col = layout.column(align=True)
col.label(text="Wall settings")
col.prop(properties, "wall_type")
col.prop(properties, "wall_mortar_size")
col.prop(properties, "wall_section_size")
col.prop(properties, "wall_row_count")
col.label(text="First floor offset settings")
col.prop(properties, "wall_offset_size")
col.prop(properties, "wall_offset_type")
col.prop(properties, "wall_offset_mortar_size")
col.prop(properties, "wall_offset_section_size")
col.prop(properties, "wall_offset_row_count")
# end draw
# end PBGToolbarWallPanel
class PBGToolbarWindowPanel(Panel):
bl_label = "Window Settings"
bl_category = "PBG"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
properties = context.scene.PBGPropertyGroup
col = layout.column(align=True)
col.label(text="Overall window dimensions")
col.prop(properties, "window_width")
col.prop(properties, "window_height")
col.prop(properties, "window_offset")
col.label(text="Under windows area")
col.prop(properties, "windows_under_type")
col.prop(properties, "windows_under_width")
col.prop(properties, "windows_under_height")
col.prop(properties, "windows_under_depth")
col.prop(properties, "windows_under_inset_depth")
col.label(text="Sine/Cycloid params")
col.prop(properties, "windows_under_amplitude")
col.prop(properties, "windows_under_period_count")
col.label(text="Simple params")
col.prop(properties, "windows_under_simple_width")
col.prop(properties, "windows_under_simple_depth")
col.label(text="Pillar params")
col.prop(properties, "windows_under_pillar_base_diameter")
col.prop(properties, "windows_under_pillar_base_height")
col.prop(properties, "windows_under_pillar_min_diameter")
col.prop(properties, "windows_under_pillar_max_diameter")
# end draw
# end PBGToolbarWindowPanel
class PBGToolbarGeneratePanel(Panel):
# TODO: docstring
bl_label = "Generate"
bl_category = "PBG"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
row = layout.row(align=True)
row.operator("pbg.generate_building", text="Generate")
# end draw
# end PBGGeneratePanel
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
1868f06fc25aca409aa9c40093ccad77e51f6e74
|
82b7d5d153b815cafc3cd1b1c156803d0af0e30e
|
/idaes/unit_models/tests/test_heat_exchanger.py
|
3b27f9235f14f30e7c7037c17fb317ed7d84ec86
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
spevenhe/idaes-pse
|
f045ddfa848bbf2bbdda69599222375adbd97e73
|
cd509f98eb3d60ea468922f793c8b8040f99723c
|
refs/heads/master
| 2020-06-10T20:31:45.265044
| 2019-06-13T20:39:34
| 2019-06-13T20:39:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,895
|
py
|
##############################################################################
# Institute for the Design of Advanced Energy Systems Process Systems
# Engineering Framework (IDAES PSE Framework) Copyright (c) 2018-2019, by the
# software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia
# University Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.txt and LICENSE.txt for full copyright and
# license information, respectively. Both files are also available online
# at the URL "https://github.com/IDAES/idaes-pse".
##############################################################################
"""
Tests for 0D heat exchanger models.
Author: John Eslick
"""
import pytest
from pyomo.environ import ConcreteModel, SolverFactory, value
from idaes.core import FlowsheetBlock
from idaes.unit_models import Heater, HeatExchanger
from idaes.property_models import iapws95_ph
from idaes.property_models.iapws95 import iapws95_available
from idaes.core.util.model_statistics import degrees_of_freedom
prop_available = iapws95_available()
# -----------------------------------------------------------------------------
# See if ipopt is available and set up solver
if SolverFactory('ipopt').available():
solver = SolverFactory('ipopt')
solver.options = {'tol': 1e-6}
else:
solver = None
@pytest.fixture()
def build_heater():
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties = iapws95_ph.Iapws95ParameterBlock()
m.fs.heater = Heater(default={"property_package": m.fs.properties})
return m
@pytest.fixture()
def build_heat_exchanger():
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties = iapws95_ph.Iapws95ParameterBlock()
m.fs.heat_exchanger = HeatExchanger(default={
"side_1":{"property_package": m.fs.properties},
"side_2":{"property_package": m.fs.properties}})
return m
def test_build_heat_exchanger(build_heat_exchanger):
m = build_heat_exchanger
assert hasattr(m.fs.heat_exchanger, "inlet_1")
assert hasattr(m.fs.heat_exchanger, "outlet_1")
assert hasattr(m.fs.heat_exchanger, "inlet_2")
assert hasattr(m.fs.heat_exchanger, "outlet_2")
m.fs.heat_exchanger.set_scaling_factor_energy(1e-3)
assert(m.fs.heat_exchanger.side_1.scaling_factor_energy == 1e-3)
assert(m.fs.heat_exchanger.side_2.scaling_factor_energy == 1e-3)
@pytest.mark.skipif(not prop_available, reason="IAPWS not available")
@pytest.mark.skipif(solver is None, reason="Solver not available")
def test_initialize_heat_exchanger(build_heat_exchanger):
m = build_heat_exchanger
init_state1 = {
"flow_mol":100,
"pressure":101325,
"enth_mol":4000}
init_state2 = {
"flow_mol":100,
"pressure":101325,
"enth_mol":3500}
m.fs.heat_exchanger.area.fix(1000)
m.fs.heat_exchanger.overall_heat_transfer_coefficient.fix(100)
prop_in_1 = m.fs.heat_exchanger.side_1.properties_in[0]
prop_out_1 = m.fs.heat_exchanger.side_1.properties_out[0]
prop_in_2 = m.fs.heat_exchanger.side_2.properties_in[0]
prop_out_2 = m.fs.heat_exchanger.side_2.properties_out[0]
prop_in_1.flow_mol.fix(100)
prop_in_1.pressure.fix(101325)
prop_in_1.enth_mol.fix(4000)
prop_in_2.flow_mol.fix(100)
prop_in_2.pressure.fix(101325)
prop_in_2.enth_mol.fix(3000)
m.fs.heat_exchanger.heat_duty.value = 10000
m.fs.heat_exchanger.initialize(state_args_1=init_state1,
state_args_2=init_state2,
outlvl=5)
solver.solve(m)
assert degrees_of_freedom(m) == 0
print(value(m.fs.heat_exchanger.delta_temperature[0]))
print(value(m.fs.heat_exchanger.side_1.heat[0]))
print(value(m.fs.heat_exchanger.side_2.heat[0]))
assert abs(value(prop_in_1.temperature) - 326.1667075078748) <= 1e-4
assert abs(value(prop_out_1.temperature) - 313.81921851031814) <= 1e-4
assert abs(value(prop_in_2.temperature) - 312.88896252921734) <= 1e-4
assert abs(value(prop_out_2.temperature) - 325.23704823703537) <= 1e-4
assert abs(value(prop_in_1.phase_frac["Liq"]) - 1) <= 1e-6
assert abs(value(prop_out_1.phase_frac["Liq"]) - 1) <= 1e-6
assert abs(value(prop_in_1.phase_frac["Vap"]) - 0) <= 1e-6
assert abs(value(prop_out_1.phase_frac["Vap"]) - 0) <= 1e-6
def test_build_heater(build_heater):
m = build_heater
assert hasattr(m.fs.heater, "inlet")
assert hasattr(m.fs.heater, "outlet")
assert len(m.fs.heater.inlet.vars) == 3
assert len(m.fs.heater.outlet.vars) == 3
for port in [m.fs.heater.inlet, m.fs.heater.outlet]:
assert hasattr(port, "flow_mol")
assert hasattr(port, "enth_mol")
assert hasattr(port, "pressure")
@pytest.mark.skipif(not prop_available, reason="IAPWS not available")
@pytest.mark.skipif(solver is None, reason="Solver not available")
def test_initialize_heater(build_heater):
m = build_heater
m.fs.heater.inlet.enth_mol.fix(4000)
m.fs.heater.inlet.flow_mol.fix(100)
m.fs.heater.inlet.pressure.fix(101325)
m.fs.heater.heat_duty[0].fix(100*20000)
m.fs.heater.initialize()
prop_in = m.fs.heater.control_volume.properties_in[0]
prop_out = m.fs.heater.control_volume.properties_out[0]
assert abs(value(prop_in.temperature) - 326.1667075078748) <= 1e-4
assert abs(value(prop_out.temperature) - 373.12429584768876) <= 1e-4
assert abs(value(prop_in.phase_frac["Liq"]) - 1) <= 1e-6
assert abs(value(prop_out.phase_frac["Liq"]) - 0.5953218682380845) <= 1e-6
assert abs(value(prop_in.phase_frac["Vap"]) - 0) <= 1e-6
assert abs(value(prop_out.phase_frac["Vap"]) - 0.40467813176191547) <= 1e-6
@pytest.mark.skipif(not prop_available, reason="IAPWS not available")
@pytest.mark.skipif(solver is None, reason="Solver not available")
def test_heater_q1(build_heater):
m = build_heater
m.fs.heater.inlet.enth_mol.fix(4000)
m.fs.heater.inlet.flow_mol.fix(100)
m.fs.heater.inlet.pressure.fix(101325)
m.fs.heater.heat_duty[0].fix(100*20000)
m.fs.heater.initialize()
assert degrees_of_freedom(m) == 0
solver.solve(m)
prop_in = m.fs.heater.control_volume.properties_in[0]
prop_out = m.fs.heater.control_volume.properties_out[0]
assert abs(value(prop_in.temperature) - 326.1667075078748) <= 1e-4
assert abs(value(prop_out.temperature) - 373.12429584768876) <= 1e-4
assert abs(value(prop_in.phase_frac["Liq"]) - 1) <= 1e-6
assert abs(value(prop_out.phase_frac["Liq"]) - 0.5953218682380845) <= 1e-6
assert abs(value(prop_in.phase_frac["Vap"]) - 0) <= 1e-6
assert abs(value(prop_out.phase_frac["Vap"]) - 0.40467813176191547) <= 1e-6
|
[
"KSBeattie@lbl.gov"
] |
KSBeattie@lbl.gov
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.