blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c18d60c101cf021cb7502e1ab795bc93b85c90b2
|
d532b85841b459c61d88d380e88dd08d29836d43
|
/solutions/816_ambiguous_coordinates.py
|
e8f3eaf9f85148b54de335d76a563d0903a82e39
|
[
"MIT"
] |
permissive
|
YiqunPeng/leetcode_pro
|
ad942468df5506de9dc48a4019933f658e2a3121
|
4a508a982b125a3a90ea893ae70863df7c99cc70
|
refs/heads/master
| 2022-05-15T09:32:02.699180
| 2022-05-14T16:32:17
| 2022-05-14T16:32:17
| 182,453,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 910
|
py
|
class Solution:
def ambiguousCoordinates(self, S: str) -> List[str]:
"""String.
"""
res = []
for i in range(2, len(S) - 1):
l = self._add_decimal_point(S[1:i])
r = self._add_decimal_point(S[i:-1])
for ll in l:
for rr in r:
res.append(str('(') + ll + str(', ') + rr + str(')'))
return res
def _add_decimal_point(self, s):
if self._is_valid_int(s):
r = [s]
else:
r = []
for i in range(1, len(s)):
if self._is_valid_int(s[:i]) and self._is_valid_decimal(s[i:]):
r.append(s[:i] + '.' + s[i:])
return r
def _is_valid_int(self, n):
if n[0] == '0':
return len(n) == 1
else:
return True
def _is_valid_decimal(self, n):
return n[-1] != '0'
|
[
"ypeng1@andrew.cmu.edu"
] |
ypeng1@andrew.cmu.edu
|
9788da8b4de7ba916379740dfda4c000d6c2d7d2
|
1d674f917a8cc927caccad6379b520a76bd86b39
|
/.c9/metadata/environment/products/models.py
|
dabed12159b671b696d4c9501f818613d245c7aa
|
[] |
no_license
|
Code-Institute-Submissions/project_shop
|
612c8d44f58465b7e10ad4eea292860f1111c2c4
|
8158199d7d8abc7b81ef8a1b96aae015dc29294c
|
refs/heads/master
| 2021-02-07T13:28:07.713206
| 2020-02-29T19:41:21
| 2020-02-29T19:41:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
{"filter":false,"title":"models.py","tooltip":"/products/models.py","undoManager":{"mark":-1,"position":-1,"stack":[]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":2,"column":0},"end":{"row":2,"column":0},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1579547307064,"hash":"dc4eb8b59d0cc62bbbaeb8a7593bc04481905b61"}
|
[
"ubuntu@ip-172-31-23-168.ec2.internal"
] |
ubuntu@ip-172-31-23-168.ec2.internal
|
de5b67bf4bede60f07a29e7e3795d1182bf15531
|
1df00826ff83bb8e7a6175b2594c09c78bb76c41
|
/sim/mujoco_env/base_demonstration_env.py
|
a430f05d7b0afea7f209b173dd208df56cfe9a8b
|
[
"MIT"
] |
permissive
|
SudeepDasari/video_prediction-1
|
71046223dba00300eed592359b829215f3a0805a
|
ef0953b514aa1b7a1f5e96fd30aebef01334fb2d
|
refs/heads/master
| 2020-04-02T01:21:15.605474
| 2018-12-13T04:19:52
| 2018-12-13T04:19:52
| 153,851,075
| 1
| 0
|
MIT
| 2018-10-19T23:13:15
| 2018-10-19T23:13:14
| null |
UTF-8
|
Python
| false
| false
| 1,281
|
py
|
from .base_mujoco_env import BaseMujocoEnv
class BaseDemoEnv(BaseMujocoEnv):
"""
Wrapper class which allows easy creation of demonstration tuned environments w/ Multiple inheritance
- Pros:
* Easy code reuse
* Test demo enviornmnet once and easily apply to new Action Spaces (example in sawyer_sim.vanilla_env)
- Cons:
* Bugs can be hard to deal with. Refer to BaseSawyerMujocoEnv.reset for how this was mitigated
"""
def reset(self):
self._demo_t, self._cur_stage = 0, -1
obs, reset = super().reset()
obs = self.insert_stage(obs)
return obs, reset
def step(self, action):
self._demo_t += 1
return self.insert_stage(super().step(action))
def insert_stage(self, obs_dict):
if 'stage' in obs_dict:
for k in obs_dict:
print('key {}'.format(k))
print('val {}'.format(obs_dict[k]))
assert 'stage' not in obs_dict, "Demonstration Environment sets Stage"
obs_dict['stage'] = self.get_stage()
return obs_dict
def get_stage(self):
raise NotImplementedError
def has_goal(self):
return True
def goal_reached(self):
raise NotImplementedError
|
[
"sdasari@berkeley.edu"
] |
sdasari@berkeley.edu
|
2c2c8903d403e44545c9590c7a95dbd2ab02cb17
|
aeb427e7e779d030f914662e56a094cf745a8800
|
/zmanim_bot/zmanim_api/models.py
|
80867cd3d3b86b22a584417eca1182d1646f5fde
|
[] |
no_license
|
kakamband/zmanim_bot
|
f1f908cd8f112a25646183a10c9fd1a8c1ab1131
|
2446da00d00bcf947905eaae0ccd0c7465bd2ae7
|
refs/heads/master
| 2023-03-10T03:49:43.481971
| 2020-11-13T06:51:39
| 2020-11-13T06:51:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,149
|
py
|
from __future__ import annotations
from typing import List, Optional, Tuple
from datetime import datetime, time, date
from pydantic import BaseModel
__all__ = [
'Settings',
'SimpleSettings',
'AsurBeMelachaDay',
'DafYomi',
'RoshChodesh',
'Shabbat',
'YomTov',
'Holiday',
'Fast',
'Zmanim',
'IsraelHolidays'
]
class SimpleSettings(BaseModel):
date_: Optional[date] = None
jewish_date: Optional[str] = None
holiday_name: Optional[str] = None
class Config:
fields = {'date_': 'date'}
class Settings(SimpleSettings):
cl_offset: Optional[int] = None
havdala_opinion: Optional[str] = None
coordinates: Optional[Tuple[float, float]] = None
elevation: Optional[int] = None
fast_name: Optional[str] = None
yomtov_name: Optional[str] = None
class Zmanim(BaseModel):
settings: Settings
alos: Optional[datetime] = None
misheyakir_10_2: Optional[datetime] = None
sunrise: Optional[datetime] = None
sof_zman_shema_ma: Optional[datetime] = None
sof_zman_shema_gra: Optional[datetime] = None
sof_zman_tefila_ma: Optional[datetime] = None
sof_zman_tefila_gra: Optional[datetime] = None
chatzos: Optional[datetime] = None
mincha_gedola: Optional[datetime] = None
mincha_ketana: Optional[datetime] = None
plag_mincha: Optional[datetime] = None
sunset: Optional[datetime] = None
tzeis_5_95_degrees: Optional[datetime] = None
tzeis_8_5_degrees: Optional[datetime] = None
tzeis_42_minutes: Optional[datetime] = None
tzeis_72_minutes: Optional[datetime] = None
chatzot_laila: Optional[datetime] = None
astronomical_hour_ma: Optional[time] = None
astronomical_hour_gra: Optional[time] = None
class AsurBeMelachaDay(BaseModel):
date: Optional[date] = None
candle_lighting: Optional[datetime] = None
havdala: Optional[datetime] = None
class Shabbat(AsurBeMelachaDay):
settings: Settings
torah_part: str = None
late_cl_warning: bool = False
class RoshChodesh(BaseModel):
settings: SimpleSettings
month_name: str
days: List[date]
duration: int
molad: Tuple[datetime, int]
class Config:
json_encoders = {
datetime: lambda d: d.isoformat(timespec='minutes')
}
class DafYomi(BaseModel):
settings: SimpleSettings
masehet: str
daf: int
class Holiday(BaseModel):
settings: SimpleSettings
date: date
class IsraelHolidays(BaseModel):
settings: SimpleSettings
holiday_list: List[Tuple[str, date]]
class YomTov(BaseModel):
settings: Settings
pre_shabbat: Optional[AsurBeMelachaDay] = None
day_1: AsurBeMelachaDay
day_2: Optional[AsurBeMelachaDay] = None
post_shabbat: Optional[AsurBeMelachaDay] = None
hoshana_rabba: Optional[date] = None
pesach_part_2_day_1: Optional[AsurBeMelachaDay] = None
pesach_part_2_day_2: Optional[AsurBeMelachaDay] = None
class Fast(BaseModel):
settings: Settings
moved_fast: Optional[bool] = False
fast_start: Optional[datetime] = None
chatzot: Optional[datetime] = None
havdala: Optional[datetime] = None
|
[
"benyomin.94@gmail.com"
] |
benyomin.94@gmail.com
|
fd7e3b565aecf3af65b29e1e7268d81ad6deed71
|
234951b11d5b7fdf254cd1e713fde9c6c1c9cb3e
|
/classes.py
|
b3615ab6afc29b6db08b42d0fb33a986566ccae2
|
[] |
no_license
|
gordon26827/tronV1
|
15bd584c01a0d2c57a7e4d1872dcae0a4cb5c835
|
09bd80cc1f34044c71f0b68161baf0d81b535b0a
|
refs/heads/main
| 2023-02-22T01:52:09.530204
| 2021-01-20T23:33:27
| 2021-01-20T23:33:27
| 331,459,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,840
|
py
|
import pygame
from random import randint
bike_n = pygame.image.load("bike_n.png")
bike_s = pygame.image.load("bike_s.png")
bike_e = pygame.image.load("bike_e.png")
bike_w = pygame.image.load("bike_w.png")
width = 983
height = 598
players = [(217, 155, 0), (42, 42, 247), (109, 41, 255), (240, 240, 240)]
directions = {'N': (0, -1), 'S': (0, 1), 'E': (1, 0), 'W': (-1, 0)}
bikes = {'N': bike_n, 'S': bike_s, 'E': bike_e, 'W': bike_w}
spawn_points = [[20, 20], [width - 20, 20], [20, height - 20], [width - 20, height - 20]]
spawn_directions = ['E', 'W', 'E', 'W']
class Node:
def __init__(self, x, y):
self.status = None
self.x = x
self.y = y
self.width = 20
class Rider:
instances = []
def __init__(self, player_num):
self.__class__.instances.append(self)
self.id = player_num
self.color = players[player_num]
self.lines = [[spawn_points[player_num], spawn_points[player_num]]]
self.recent = [spawn_points[player_num], spawn_points[player_num]]
self.direction = spawn_directions[player_num]
self.desired_length = 0
self.length = 0
self.is_alive = True
self.velocity = 5
def move(self, new_direction, players):
if self.is_alive:
new = directions[new_direction]
prev = self.lines[-1][1]
x, y = prev[0] + (new[0] * self.velocity), prev[1] + (new[1] * self.velocity)
if self.direction == new_direction:
self.lines[-1][1] = [x, y]
else:
self.direction = new_direction
self.lines.append([prev, [x, y]])
self.recent = self.lines[-1]
if not 0 <= x <= width:
sign = 1 if x < 0 else -1
self.lines.append([[x + (width + 1) * sign, y], [x + (width + 1) * sign, y]])
self.recent = self.lines[-1]
elif not 0 <= y <= height:
sign = 1 if y < 0 else -1
self.lines.append([[x, y + (height + 1) * sign], [x, y + (height + 1) * sign]])
self.recent = self.lines[-1]
self.collision_detection(players)
if self.desired_length:
self.shorten()
def collision_detection(self, players):
recent = [self.recent[1], self.lines[-1][1]]
recent_is_vertical = recent[0][0] == recent[1][0]
if recent_is_vertical:
for player in players:
lines = player.lines[:-2] if player.id == self.id else player.lines
for line in lines:
line_is_horizontal = line[0][1] == line[1][1]
if line_is_horizontal:
lines_intersect_x = line[0][0] <= recent[0][0] <= line[1][0] or line[1][0] <= recent[0][0] <= line[0][0]
lines_intersect_y = recent[0][1] <= line[0][1] <= recent[1][1] or recent[1][1] <= line[0][1] <= recent[0][1]
if lines_intersect_x and lines_intersect_y:
self.kill()
return
else:
lines_intersect_x = line[0][0] == recent[0][0]
lines_intersect_y = line[0][1] <= recent[1][1] <= line[1][1] or line[1][1] <= recent[1][1] <= line[0][1]
if lines_intersect_x and lines_intersect_y:
self.kill()
return
else:
for player in players:
lines = player.lines[:-2] if player.id == self.id else player.lines
for line in lines:
line_is_vertical = line[0][0] == line[1][0]
if line_is_vertical:
lines_intersect_y = line[0][1] <= recent[0][1] <= line[1][1] or line[1][1] <= recent[0][1] <= line[0][1]
lines_intersect_x = recent[0][0] <= line[0][0] <= recent[1][0] or recent[1][0] <= line[0][0] <= recent[0][0]
if lines_intersect_y and lines_intersect_x:
self.kill()
return
else:
lines_intersect_y = line[0][1] == recent[0][1]
lines_intersect_x = line[0][0] <= recent[1][0] <= line[1][0] or line[1][0] <= recent[1][0] <= line[0][0]
if lines_intersect_y and lines_intersect_x:
self.kill()
return
self.recent = recent
def shorten(self):
if self.length >= self.desired_length:
last = self.lines[0]
last_is_vertical = last[0][0] == last[1][0]
if last_is_vertical:
last_is_up = last[0][1] > last[1][1]
if last_is_up:
last[0][1] -= self.velocity
else:
last[0][1] += self.velocity
if last[0][1] == last[1][1]:
self.lines.pop(0)
return
else:
last_is_left = last[0][0] > last[1][0]
if last_is_left:
last[0][0] -= self.velocity
else:
last[0][0] += self.velocity
if last[0][0] == last[1][0]:
self.lines.pop(0)
return
self.lines[0] = last
else:
self.length += self.velocity
def kill(self):
self.is_alive = False
self.color = (112, 18, 18)
def get_head(self):
return self.lines[-1][1]
def draw(self, screen):
for line in self.lines:
pygame.draw.lines(screen, [0, 0, 0], False, line, 5)
for line in self.lines:
pygame.draw.lines(screen, self.color, False, line, 3)
if self.is_alive:
head = self.get_head()
screen.blit(bikes[self.direction], (head[0] - 8, head[1] - 8))
class Button:
instances = []
def __init__(self, x, y, width, height, color):
self.__class__.instances.append(self)
self.rect = pygame.Rect(x, y, width, height)
self.color = color
self.dark = (min(x + 20, 255) for x in color)
self.selected = False
def is_in(self, pos):
return self.rect.collidepoint(pos)
def draw(self, screen, inside):
color = self.dark if inside else self.color
pygame.draw.rect(screen, color, self.rect)
class UserInput(Button):
def __init__(self):
super().__init__(Button)
self.string = ""
def click(self):
keys = pygame.key.get_pressed()
|
[
"noreply@github.com"
] |
gordon26827.noreply@github.com
|
c8085e831df6d6a5f6e73fc2cdf44b01e3d08513
|
3a58280e0bd417f306a357bbbc53680c66b1769c
|
/python/yasl/vendored_tsl2561/__init__.py
|
da809f77a5d2917d997e8d2facb8fe172d019a05
|
[
"Apache-2.0"
] |
permissive
|
chripell/yasl
|
e39974fdc9d2457c3e0fcacb097ed7b07f84292e
|
1cc920be58873dc3ae6e62bde1bc949a7fe05f31
|
refs/heads/master
| 2021-11-30T02:53:18.342159
| 2021-11-28T18:58:21
| 2021-11-28T18:58:21
| 165,526,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,479
|
py
|
# The MIT License (MIT)
#
# Copyright (c) 2017 Carter Nelson for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_tsl2561`
====================================================
CircuitPython driver for TSL2561 Light Sensor.
* Author(s): Carter Nelson
Implementation Notes
--------------------
**Hardware:**
* Adafruit `TSL2561 Digital Luminosity/Lux/Light Sensor Breakout
<https://www.adafruit.com/product/439>`_ (Product ID: 439)
* Adafruit `STEMMA - TSL2561 Digital Lux / Light Sensor
<https://www.adafruit.com/product/3611>`_ (Product ID: 3611)
* Adafruit `Flora Lux Sensor - TSL2561 Light Sensor
<https://www.adafruit.com/product/1246>`_ (Product ID: 1246)
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the ESP8622 and M0-based boards:
https://github.com/adafruit/circuitpython/releases
* Adafruit's Bus Device library: https://github.com/adafruit/Adafruit_CircuitPython_BusDevice
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_TSL2561.git"
_DEFAULT_ADDRESS = 0x39
_COMMAND_BIT = 0x80
_WORD_BIT = 0x20
_CONTROL_POWERON = 0x03
_CONTROL_POWEROFF = 0x00
_REGISTER_CONTROL = 0x00
_REGISTER_TIMING = 0x01
_REGISTER_TH_LOW = 0x02
_REGISTER_TH_HIGH = 0x04
_REGISTER_INT_CTRL = 0x06
_REGISTER_CHAN0_LOW = 0x0C
_REGISTER_CHAN1_LOW = 0x0E
_REGISTER_ID = 0x0A
_GAIN_SCALE = (16, 1)
_TIME_SCALE = (1 / 0.034, 1 / 0.252, 1)
_CLIP_THRESHOLD = (4900, 37000, 65000)
class TSL2561:
"""Class which provides interface to TSL2561 light sensor."""
def __init__(self, i2c, address=_DEFAULT_ADDRESS):
self.buf = bytearray(3)
self.i2c = i2c
self.addr = address
async def open(self):
partno, revno = await self.chip_id()
# data sheet says TSL2561 = 0001, reality says 0101
if not partno == 5:
raise RuntimeError(
"Failed to find TSL2561! Part 0x%x Rev 0x%x" % (partno, revno)
)
await self.set_enabled(True)
async def chip_id(self):
"""A tuple containing the part number and the revision number."""
chip_id = await self._read_register(_REGISTER_ID)
partno = (chip_id >> 4) & 0x0F
revno = chip_id & 0x0F
return (partno, revno)
async def enabled(self):
"""The state of the sensor."""
return (await self._read_register(_REGISTER_CONTROL) & 0x03) != 0
async def set_enabled(self, enable):
"""Enable or disable the sensor."""
if enable:
await self._enable()
else:
await self._disable()
async def lux(self):
"""The computed lux value or None when value is not computable."""
return await self._compute_lux()
async def broadband(self):
"""The broadband channel value."""
return await self._read_broadband()
async def infrared(self):
"""The infrared channel value."""
return await self._read_infrared()
async def luminosity(self):
"""The overall luminosity as a tuple containing the broadband
channel and the infrared channel value."""
return (await self.broadband(), await self.infrared())
async def gain(self):
"""The gain. 0:1x, 1:16x."""
return await self._read_register(_REGISTER_TIMING) >> 4 & 0x01
async def set_gain(self, value):
"""Set the gain. 0:1x, 1:16x."""
value &= 0x01
value <<= 4
current = await self._read_register(_REGISTER_TIMING)
cmd = _COMMAND_BIT | _REGISTER_TIMING
data = (current & 0xEF) | value
await self.i2c.write_i2c_block_data(self.addr, cmd, [data])
async def integration_time(self):
"""The integration time. 0:13.7ms, 1:101ms, 2:402ms, or 3:manual"""
current = await self._read_register(_REGISTER_TIMING)
return current & 0x03
async def set_integration_time(self, value):
"""Set the integration time. 0:13.7ms, 1:101ms, 2:402ms, or 3:manual."""
value &= 0x03
current = await self._read_register(_REGISTER_TIMING)
cmd = _COMMAND_BIT | _REGISTER_TIMING
data = (current & 0xFC) | value
await self.i2c.write_i2c_block_data(self.addr, cmd, [data])
async def threshold_low(self):
"""The low light interrupt threshold level."""
low, high = await self._read_register(_REGISTER_TH_LOW, 2)
return high << 8 | low
async def set_threshold_low(self, value):
cmd = _COMMAND_BIT | _WORD_BIT | _REGISTER_TH_LOW
buf = (value & 0xFF, (value >> 8) & 0xFF)
await self.i2c.write_i2c_block_data(self.addr, cmd, buf)
async def threshold_high(self):
"""The upper light interrupt threshold level."""
low, high = await self._read_register(_REGISTER_TH_HIGH, 2)
return high << 8 | low
async def set_threshold_high(self, value):
cmd = _COMMAND_BIT | _WORD_BIT | _REGISTER_TH_HIGH
buf = (value & 0xFF, (value >> 8) & 0xFF)
await self.i2c.write_i2c_block_data(self.addr, cmd, buf)
async def cycles(self):
"""The number of integration cycles for which an out of bounds
value must persist to cause an interrupt."""
value = await self._read_register(_REGISTER_INT_CTRL)
return value & 0x0F
async def set_cycles(self, value):
current = await self._read_register(_REGISTER_INT_CTRL)
cmd = _COMMAND_BIT | _REGISTER_INT_CTRL
data = current | (value & 0x0F)
await self.i2c.write_i2c_block_data(self.addr, cmd, [data])
async def interrupt_mode(self):
"""The interrupt mode selection.
==== =========================
Mode Description
==== =========================
0 Interrupt output disabled
1 Level Interrupt
2 SMBAlert compliant
3 Test Mode
==== =========================
"""
return (await self._read_register(_REGISTER_INT_CTRL) >> 4) & 0x03
async def set_interrupt_mode(self, value):
current = await self._read_register(_REGISTER_INT_CTRL)
cmd = _COMMAND_BIT | _REGISTER_INT_CTRL
data = (current & 0x0F) | ((value & 0x03) << 4)
await self.i2c.write_i2c_block_data(self.addr, cmd, [data])
async def clear_interrupt(self):
"""Clears any pending interrupt."""
cmd = 0xC0
await self.i2c.write_i2c_block_data(self.addr, cmd, [])
async def _compute_lux(self):
"""Based on datasheet for FN package."""
ch0, ch1 = await self.luminosity()
if ch0 == 0:
return None
if ch0 > _CLIP_THRESHOLD[await self.integration_time()]:
return None
if ch1 > _CLIP_THRESHOLD[await self.integration_time()]:
return None
ratio = ch1 / ch0
if 0 <= ratio <= 0.50:
lux = 0.0304 * ch0 - 0.062 * ch0 * ratio ** 1.4
elif ratio <= 0.61:
lux = 0.0224 * ch0 - 0.031 * ch1
elif ratio <= 0.80:
lux = 0.0128 * ch0 - 0.0153 * ch1
elif ratio <= 1.30:
lux = 0.00146 * ch0 - 0.00112 * ch1
else:
lux = 0.0
# Pretty sure the floating point math formula on pg. 23 of datasheet
# is based on 16x gain and 402ms integration time. Need to scale
# result for other settings.
# Scale for gain.
lux *= _GAIN_SCALE[await self.gain()]
# Scale for integration time.
lux *= _TIME_SCALE[await self.integration_time()]
return lux
async def _enable(self):
await self._write_control_register(_CONTROL_POWERON)
async def _disable(self):
await self._write_control_register(_CONTROL_POWEROFF)
async def _read_register(self, reg, count=1):
cmd = _COMMAND_BIT | reg
if count == 2:
cmd |= _WORD_BIT
ret = await self.i2c.read_i2c_block_data(self.addr, cmd, count)
if count == 2:
return ret
return ret[0]
async def _write_control_register(self, reg):
cmd = _COMMAND_BIT | _REGISTER_CONTROL
await self.i2c.write_i2c_block_data(self.addr, cmd, [reg])
async def _read_broadband(self):
low, high = await self._read_register(_REGISTER_CHAN0_LOW, 2)
return high << 8 | low
async def _read_infrared(self):
low, high = await self._read_register(_REGISTER_CHAN1_LOW, 2)
return high << 8 | low
|
[
"chri@evolware.org"
] |
chri@evolware.org
|
25540607bf75631f4df2367961a0213d0954ed22
|
0f4a48cf7b1b6e23197e97b9771f25c160290fed
|
/quarry/forms/main.py
|
a1b6a8efbebc75b2cc33d7375b21f900977b1ee3
|
[] |
no_license
|
arfamzr/jmg
|
170bb245c7a67febd4b04842a99f06c33dea30e8
|
136e5dd4767f5e738e0fc4358d23a5c0c57b152f
|
refs/heads/master
| 2023-03-13T00:03:23.043873
| 2021-02-24T05:42:46
| 2021-02-24T05:42:46
| 341,754,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,232
|
py
|
from crispy_forms.layout import Div
from django import forms
from django.forms import formsets
from django.forms.models import formset_factory, modelformset_factory
from crispy_forms.helper import FormHelper, Layout
from crispy_forms.bootstrap import Div
from account.widgets import XDSoftDatePickerInput
from ..models import (
Data,
MainProductionStatistic,
SideProductionStatistic,
SalesSubmission,
LocalFinalUses,
ExportFinalUses,
LocalOperator,
LocalContractor,
ForeignOperator,
ForeignContractor,
InternalCombustionMachinery,
ElectricMachinery,
DailyExplosive,
EnergySupply,
OperatingRecord,
Royalties,
Other,
)
class DataForm(forms.ModelForm):
class Meta:
model = Data
fields = ['month', 'year']
def __init__(self, *args, **kwargs):
self.manager = kwargs.pop('manager', None)
super().__init__(*args, **kwargs)
def clean(self, added_error=False):
cleaned_data = super().clean()
month = cleaned_data.get('month')
year = cleaned_data.get('year')
if Data.objects.filter(manager=self.manager, month=month, year=year):
raise forms.ValidationError(
'Data untuk bulan dan tahun tersebut telah tersedia!')
return cleaned_data
class MainProductionStatisticForm(forms.ModelForm):
class Meta:
model = MainProductionStatistic
exclude = ['data']
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# for field_name, field in self.fields.items():
# field.label = ''
class SideProductionStatisticForm(forms.ModelForm):
class Meta:
model = SideProductionStatistic
exclude = ['data']
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# for field_name, field in self.fields.items():
# field.label = ''
class SalesSubmissionForm(forms.ModelForm):
class Meta:
model = SalesSubmission
exclude = ['data']
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# for field_name, field in self.fields.items():
# field.label = ''
class SalesSubmissionFormHelper(FormHelper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# self.form_method = 'post'
self.form_tag = False
self.disable_csrf = True
self.layout = Layout(
Div('submission_size', css_class='col-4'),
Div('amount', css_class='col-4'),
Div('worth', css_class='col-4'),
# Div('total', css_class='col-3'),
)
# self.render_required_fields = True
SalesSubmissionFormSet = modelformset_factory(
model=SalesSubmission, form=SalesSubmissionForm, extra=10)
class LocalFinalUsesForm(forms.ModelForm):
class Meta:
model = LocalFinalUses
exclude = ['data']
widgets = {
'state_other': forms.Textarea(attrs={'rows': 2})
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.label = ''
class ExportFinalUsesForm(forms.ModelForm):
class Meta:
model = ExportFinalUses
exclude = ['data']
widgets = {
'state_other': forms.Textarea(attrs={'rows': 2})
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.label = ''
class LocalOperatorForm(forms.ModelForm):
class Meta:
model = LocalOperator
exclude = ['data']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.label = ''
class LocalContractorForm(forms.ModelForm):
class Meta:
model = LocalContractor
exclude = ['data']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.label = ''
class ForeignOperatorForm(forms.ModelForm):
class Meta:
model = ForeignOperator
exclude = ['data']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.label = ''
class ForeignContractorForm(forms.ModelForm):
class Meta:
model = ForeignContractor
exclude = ['data']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.label = ''
class InternalCombustionMachineryForm(forms.ModelForm):
class Meta:
model = InternalCombustionMachinery
exclude = ['data']
widgets = {
'state_other': forms.Textarea(attrs={'rows': 2})
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.label = ''
class ElectricMachineryForm(forms.ModelForm):
class Meta:
model = ElectricMachinery
exclude = ['data']
widgets = {
'state_other': forms.Textarea(attrs={'rows': 2})
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.label = ''
class DailyExplosiveForm(forms.ModelForm):
date = forms.DateField(
input_formats=['%d/%m/%Y'],
widget=XDSoftDatePickerInput(format='%d/%m/%Y'),
)
class Meta:
model = DailyExplosive
exclude = ['data']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.label = ''
class EnergySupplyForm(forms.ModelForm):
class Meta:
model = EnergySupply
exclude = ['data']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.label = ''
class OperatingRecordForm(forms.ModelForm):
class Meta:
model = OperatingRecord
exclude = ['data']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.label = ''
class RoyaltiesForm(forms.ModelForm):
class Meta:
model = Royalties
exclude = ['data']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.label = ''
class OtherForm(forms.ModelForm):
class Meta:
model = Other
exclude = ['data']
widgets = {
'title': forms.Textarea(attrs={'rows': 1}),
'comment': forms.Textarea(attrs={'rows': 3}),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.label = ''
|
[
"ijat191999@gmail.com"
] |
ijat191999@gmail.com
|
8e0d31432bf24af9a1fbe66b548405b05ed44705
|
117d3d4dc9c0fc86965c6f2f428be399102b5f06
|
/application/mainGUI.py
|
ce8ae9a04cead61c0a9dfda7d61127a872aa9d9b
|
[] |
no_license
|
anakpindahan/cryptography-1
|
bd9bda54112e7ef5b9713ddbf05f7685377def4b
|
958ba2a8541ab83d919db32c9dcad79de27bcc26
|
refs/heads/main
| 2023-07-19T10:23:04.509932
| 2021-09-06T04:46:26
| 2021-09-06T04:46:26
| 403,188,664
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,653
|
py
|
from tkinter import *
from tkinter import filedialog as fd
import cryptoCommands
root = Tk()
root.title("Encrypt It")
root.geometry('480x400')
frame = Frame(root)
frame.grid()
def cipher():
plainText = plainTextVar.get()
key = keyVar.get()
mKey = mKeyVar.get()
bKey = bKeyVar.get()
value = cipherVar.get()
ciphered = ''
if(value == 1):
ciphered = cryptoCommands.vigenereEncrypt(plainText, key)
elif(value == 2):
ciphered = cryptoCommands.autoKeyVigenereEncrypt(plainText, key)
elif(value == 3):
ciphered = cryptoCommands.extendedVigenereEncrypt(plainText, key)
elif(value == 4):
ciphered = cryptoCommands.playfairEncrypt(plainText, key)
elif(value == 5):
ciphered = cryptoCommands.affineEncrypt(plainText, mKey, bKey)
showTextVar.set(ciphered)
showText.delete('1.0', END)
showText.insert(END, showTextVar.get())
def decipher():
cipherText = cipherTextVar.get()
key = keyVar.get()
mKey = mKeyVar.get()
bKey = bKeyVar.get()
value = cipherVar.get()
deciphered = ''
if(value == 1):
deciphered = cryptoCommands.vigenereDecrypt(cipherText, key)
elif(value == 2):
deciphered = cryptoCommands.autoKeyVigenereDecrypt(cipherText, key)
elif(value == 3):
deciphered = cryptoCommands.extendedVigenereDecrypt(cipherText, key)
elif(value == 4):
deciphered = cryptoCommands.playfairDecrypt(cipherText, key)
elif(value == 5):
deciphered = cryptoCommands.affineDecrypt(cipherText, mKey, bKey)
showTextVar.set(deciphered)
showText.delete('1.0', END)
showText.delete('1.0', END)
showText.insert(END, showTextVar.get())
def buildKeyInput():
cipherType = cipherVar.get()
if(cipherType == 5):
keyLabel.grid_remove()
keyEntry.grid_remove()
mKeyLabel.grid()
mKeyEntry.grid()
bKeyLabel.grid()
bKeyEntry.grid()
else:
mKeyLabel.grid_remove()
mKeyEntry.grid_remove()
bKeyLabel.grid_remove()
bKeyEntry.grid_remove()
keyLabel.grid()
keyEntry.grid()
def open_file_en():
file = fd.askopenfile(mode = "r")
if file is not None:
plainTextVar.set(file.read())
file.close()
cipher()
def open_file_de():
file = fd.askopenfile(mode = "r")
if file is not None:
cipherTextVar.set(file.read())
file.close()
decipher()
def open_file_en_by():
file = fd.askopenfile(mode = "rb")
if file is not None:
plainTextVar.set(file.read())
file.close()
cipher()
def open_file_de_by():
file = fd.askopenfile(mode = "rb")
if file is not None:
cipherTextVar.set(file.read())
file.close()
decipher()
cipherVar = IntVar()
plainTextVar = StringVar()
keyVar = StringVar()
cipherTextVar = StringVar()
mKeyVar = IntVar()
bKeyVar = IntVar()
showTextVar = StringVar()
normalVigenereButton = Radiobutton(root, text = 'Normal Vigenere', variable = cipherVar, value = 1, command = buildKeyInput)
normalVigenereButton.grid(row = 0, column = 0)
autoKeyVigenereButton = Radiobutton(root, text = 'Autokey Vigenere', variable = cipherVar, value = 2, command = buildKeyInput)
autoKeyVigenereButton.grid(row = 0, column = 1)
extendedVigenereButton = Radiobutton(root, text = 'Extended Vigenere', variable = cipherVar, value = 3, command = buildKeyInput)
extendedVigenereButton.grid(row = 0, column = 2)
playfairButton = Radiobutton(root, text = 'Playfair', variable = cipherVar, value = 4, command = buildKeyInput)
playfairButton.grid(row = 1, column = 0)
affineCipher = Radiobutton(root, text = 'Affine', variable = cipherVar, value = 5, command = buildKeyInput)
affineCipher.grid(row = 1, column = 2)
plainTextLabel = Label(root, text = 'Plain Text')
plainTextEntry = Entry(root, textvariable = plainTextVar)
plainTextLabel.grid(row = 2, column = 0)
plainTextEntry.grid(row = 3, column = 0)
encipherButton = Button(root, text = 'Encipher', command = cipher)
encipherButton.grid(row = 4, column = 0)
cipherTextLabel = Label(root, text = 'Cipher Text')
cipherTextEntry = Entry(root, textvariable = cipherTextVar)
cipherTextLabel.grid(row = 2, column = 2)
cipherTextEntry.grid(row = 3, column = 2)
decipherButton = Button(root, text = 'Decipher', command = decipher)
decipherButton.grid(row = 4, column = 2)
mKeyLabel = Label(root, text = "Key m")
mKeyEntry = Entry(root, textvariable = mKeyVar)
mKeyLabel.grid(row = 6, column = 0)
mKeyEntry.grid(row = 7, column = 0)
bKeyLabel = Label(root, text = "Key b")
bKeyEntry = Entry(root, textvariable = bKeyVar)
bKeyLabel.grid(row = 7, column = 2)
bKeyEntry.grid(row = 8, column = 2)
uploadEncipherButton = Button(root, text = 'Upload file', command = open_file_en)
uploadEncipherButton.grid(row = 5, column = 0)
uploadDecipherButton = Button(root, text = 'Upload file', command = open_file_de)
uploadDecipherButton.grid(row = 5, column = 2)
uploadEncipherByteButton = Button(root, text = 'Upload file as byte', command = open_file_en_by)
uploadEncipherByteButton.grid(row = 6, column = 0)
uploadDecipherByteButton = Button(root, text = 'Upload file as byte', command = open_file_de_by)
uploadDecipherByteButton.grid(row = 6, column = 2)
keyLabel = Label(root, text = "Key")
keyEntry = Entry(root, textvariable = keyVar)
keyLabel.grid(row = 7, column = 1)
keyEntry.grid(row = 8, column = 1)
keyLabel.grid_remove()
keyEntry.grid_remove()
mKeyLabel.grid_remove()
mKeyEntry.grid_remove()
bKeyLabel.grid_remove()
bKeyEntry.grid_remove()
showText = Text(root, height = 20, width = 20)
showText.grid(row = 9, column = 1)
root.mainloop()
|
[
"akeylanaufal@gmail.com"
] |
akeylanaufal@gmail.com
|
1c20fdfddb14b598d6123898144923a14342543e
|
6aeedd0cddbb72bcbec956d0911ad980f0de84dd
|
/keras_text_cnn.py
|
aa55ca2ceb1ffe182d5ebc4f3e1bcd6164c2e54e
|
[
"MIT"
] |
permissive
|
fedelopez77/cnn-sentences-classification
|
255c5622e26e485f66f2b9457ec74f6694fdc20f
|
c9cd89f0bdbd45e991742dda38c9e5343dbd5074
|
refs/heads/master
| 2020-03-12T01:04:09.674947
| 2018-05-17T07:37:38
| 2018-05-17T07:37:38
| 130,366,287
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,865
|
py
|
from keras.layers.embeddings import Embedding
from keras.layers.core import Reshape, Dense, Dropout
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.merge import Concatenate
from keras.models import Model
from keras.layers import Input
from keras import regularizers
from keras.callbacks import TensorBoard, EarlyStopping
from tensorflow.contrib import learn
import numpy as np
import load_data
# Load data
print("Loading data...")
x_text, y = load_data.load_data_and_labels("datasets/rt-polarity.pos", "datasets/rt-polarity.neg")
# Taken from http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/
# Build vocabulary
max_document_length = max([len(x.split(" ")) for x in x_text])
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(x_text)))
# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]
# Split train/test set
dev_sample_index = -1 * int(len(y) * 0.1) # Uses 10% as test (dev)
x_train, x_test = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
y_train, y_test = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]
del x, y, x_shuffled, y_shuffled
print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
print("Train/test split: {:d}/{:d}".format(len(y_train), len(y_test)))
print("\n\nX-train instance 0: {}".format(x_train[0]))
print("\n\nY-train instance 0: {}".format(y_train[0]))
def get_model(embedding_dim=128, filter_sizes=(3, 4, 5), num_filters=128, dropout=0.2, l1_reg=0.01, l2_reg=0.01):
# Embedding layer
embedding = Embedding(input_dim=len(vocab_processor.vocabulary_),
output_dim=embedding_dim,
input_length=max_document_length,
name="embedding")
input_sentence = Input(shape=(max_document_length,), name="input_sentence")
sentence_vector = embedding(input_sentence)
# expected sentence_vector.shape = (batch_size, max_doc_length, embedding_dim)
# This is necessary because Conv2D expects a 4-D tensor (counting the batch_size)
sentence_vector = Reshape((1, max_document_length, embedding_dim))(sentence_vector)
# 3 Conv2D layers, with num_filters (128) of filters size = (filter_len=[3,4,5], output_dim)
# each filter produces an output of expected shape (max_doc_len - filter_len + 1)
# the input of each Conv2D layer is the same sentence_vector
pool_outputs = []
for filter_len in filter_sizes:
conv_name = "Conv2D_{}".format(filter_len)
conv = Conv2D(filters=num_filters, kernel_size=(filter_len, embedding_dim), strides=(1, 1),
activation='relu', data_format='channels_first', padding='valid',
kernel_regularizer=regularizers.l2(l2_reg), activity_regularizer=regularizers.l1(l1_reg),
name=conv_name)
# expected output shape = (samples?, num_filters, new_rows=max_doc_len - filter_len + 1, new_cols=1)
conv_output = conv(sentence_vector)
max_pool_name = "MaxPool_{}".format(filter_len)
pooling = MaxPooling2D(pool_size=(max_document_length - filter_len + 1, 1), data_format='channels_first',
name=max_pool_name)
# expected output (batch_size, num_filters, pooled_rows=1, pooled_cols=1)
pool_output = pooling(conv_output)
pool_outputs.append(pool_output)
# Concatenate the len(filter_sizes) outputs in only one
concatenated = Concatenate(axis=1)(pool_outputs)
# expected concatenated.shape = (batch_size, num_filters * len(filter_sizes), 1, 1)
feature_vector = Reshape((num_filters * len(filter_sizes),))(concatenated)
# expected feature_vector.shape = (batch_size, num_filters * len(filter_sizes))
feature_vector = Dropout(dropout, seed=123)(feature_vector)
classes = 2 # positive o negative
final_output = Dense(classes, activation='softmax',
kernel_regularizer=regularizers.l2(l2_reg), activity_regularizer=regularizers.l1(l1_reg),
name="fully_connected")(feature_vector) # 2 because it can be positive or negative
# expected final_output.shape = (batch_size, 2)
model = Model(inputs=input_sentence, outputs=final_output)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def grid_search():
best_accuracy = 0.0
best_loss = 0.0
best_embed = 0
best_num_filters = 0
best_dropout = 0
best_l1 = 0
best_l2 = 0
embedding_dims = [300, 400, 500]
nums_of_filters = [300, 400, 500]
dropouts = [0.1, 0.2, 0.3, 0.4]
l1s = [0.0001]
l2s = [0.0001]
for embedding_dim in embedding_dims:
for num_filters in nums_of_filters:
for dropout in dropouts:
for l1 in l1s:
for l2 in l2s:
model = get_model(embedding_dim=embedding_dim, num_filters=num_filters, dropout=dropout,
l1_reg=l1, l2_reg=l2)
# Training
model.fit(x_train, y_train, batch_size=64, epochs=50, verbose=2, validation_split=0.1,
callbacks=[EarlyStopping(monitor='val_loss', patience=3, verbose=1)])
# Evaluation
score = model.evaluate(x_test, y_test, verbose=1)
loss, accuracy = score[0], score[1]
print("-- Partial Result: Accuracy: {}, Loss: {}".format(accuracy, loss))
print("Parameters: embed: {}, num_filters: {}, dropout: {}, l1: {}, l2: {}".format(
embedding_dim, num_filters, dropout, l1, l2))
if accuracy > best_accuracy:
print("-------- NEW BEST RESULT --------")
print("previous acc: {}, new accuracy: {}, Loss: {}".format(best_accuracy, accuracy, loss))
best_accuracy = accuracy
best_loss = loss
best_embed = embedding_dim
best_num_filters = num_filters
best_dropout = dropout
best_l1 = l1
best_l2 = l2
print("FINAL RESULTS: Best accuracy: {}, best loss: {}".format(best_accuracy, best_loss))
print("Best Embedding: {}\nBest num filter: {}\nBest dropout: {}\nBest L1: {}\nBest L2: {}".format(
best_embed, best_num_filters, best_dropout, best_l1, best_l2))
# grid_search()
|
[
"fedelopez77@gmail.com"
] |
fedelopez77@gmail.com
|
4e6b5f54446ffc15fd3f9b680aaad9f785eeef70
|
76d29feb7e37d26df792f8584f79b3a36947f314
|
/Desktop/cms/proTwo/settings.py
|
5ef67b4a3e0eb53a74bb7b89f4adb2d7f6aed644
|
[] |
no_license
|
Nihaa21/test
|
5dfc2bb16c09d913effc6ec9eb3a9bb8261898b2
|
8916442dddf2a336ddaa6b8b5d5afcdb573d8112
|
refs/heads/master
| 2023-03-02T06:36:48.872926
| 2021-02-03T11:18:41
| 2021-02-03T11:18:41
| 335,598,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,175
|
py
|
from pathlib import Path
import os
import django_heroku
import dj_database_url
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATE_DIR= os.path.join(BASE_DIR,'templates')
STATIC_DIR = os.path.join(BASE_DIR,'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd*p)4y5v%k^wx^dx2qdrr=ws+vcy4fnf186ap34)3@6nbb2ing'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
APPEND_SLASH=False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'AppTwo',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware'
]
ROOT_URLCONF = 'proTwo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'proTwo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
STATIC_DIR,
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
django_heroku.settings(locals())
|
[
"nihaghali554@gmail.com"
] |
nihaghali554@gmail.com
|
596d84deb4fdb9018e23cbe15c900d2818d55eb8
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/XrQnBBLaGRkXZuM8n_8.py
|
113e0e0122b1a48f075923d4f4bd9b5bed92ad1c
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 77
|
py
|
index_filter = lambda inx,string: "".join(string[i].lower() for i in inx);
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
d363763e565b0aa03eae97cfef6890a850266f45
|
63db5fa33cae990bf62a27ca53bbe027abe62df0
|
/Transforms/GetIngressSource.py
|
6a0f2a8a992dd8168259690b634220f78f29bdc8
|
[] |
no_license
|
znb/Elastic-Elephant
|
92cf5178c1412e85199a4d98737c693cbd8df0cc
|
51df26246c903e0ae6cce00b201b0888be456982
|
refs/heads/master
| 2020-04-06T04:31:02.173052
| 2017-06-05T14:21:52
| 2017-06-05T14:21:52
| 34,060,453
| 31
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,336
|
py
|
#!/usr/bin/python
# Pull all the ingress sources attached to an ingress rule
from MaltegoTransform import *
import boto.ec2
import sys
from init import load_credentials
creds = load_credentials()
REGION = creds[2]
m = MaltegoTransform()
m.parseArguments(sys.argv)
ingress_src = m.getVar("GroupID")
try:
conn = boto.ec2.connect_to_region(REGION, aws_access_key_id=creds[0], aws_secret_access_key=creds[1])
reservations = conn.get_all_instances()
for i in reservations:
group_nums = len(i.instances[0].groups)
for z in range(group_nums):
group_id = i.instances[0].groups[z].id
if str(group_id) == str(ingress_src):
sec_rules = conn.get_all_security_groups(group_ids=group_id)[0].rules
rule_nums = len(sec_rules)
for g in range(rule_nums):
ent = m.addEntity('matterasmus.AmazonEC2IngressSource', str(conn.get_all_security_groups(group_ids=group_id)[0].rules[g].grants))
ent.addAdditionalFields("Source", "Source", "strict", str(conn.get_all_security_groups(group_ids=group_id)[0].rules[g].grants))
ent.addAdditionalFields("GroupID", "Group ID", "strict", str(group_id))
m.addUIMessage("Completed.")
except Exception as e:
m.addUIMessage(str(e))
m.returnOutput()
|
[
"matt.erasmus@ticketmaster.co.uk"
] |
matt.erasmus@ticketmaster.co.uk
|
6cdb34336e4cac23410e1f13ecbba2c8c37a0f52
|
574b2f4137d9606dc347dda42428d7204eb6923a
|
/MVFF_Version2/mvff_rfcn/symbols/resnet_v1_101_motion_vector_rfcn.py
|
95296f0098baa2a9f95e1d0270956d84b1b15ae8
|
[
"MIT"
] |
permissive
|
OrdinaryCrazy/mvff-sideversions
|
09ad6e4522efeb236a4eab1c9f5da98872d71626
|
5ebdbf0187145abf53ad8e2c661938134296ae47
|
refs/heads/master
| 2020-07-04T22:46:49.946795
| 2019-09-15T03:32:47
| 2019-09-15T03:32:47
| 202,447,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 76,184
|
py
|
# --------------------------------------------------------
# Rivulet
# Licensed under The MIT License [see LICENSE for details]
# Modified by Boyuan Feng
# --------------------------------------------------------
# --------------------------------------------------------
# Deep Feature Flow
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Yuwen Xiong, Xizhou Zhu
# --------------------------------------------------------
import cPickle
import mxnet as mx
from utils.symbol import Symbol
from operator_py.proposal import *
from operator_py.proposal_target import *
from operator_py.box_annotator_ohem import *
from operator_py.rpn_inv_normalize import *
from operator_py.tile_as import *
class resnet_v1_101_motion_vector_rfcn(Symbol):
def __init__(self):
"""
Use __init__ to define parameter network needs
"""
self.eps = 1e-5
self.use_global_stats = True
self.workspace = 512
self.units = (3, 4, 23, 3) # use for 101
self.filter_list = [256, 512, 1024, 2048]
def get_resnet_v1(self, data):
conv1 = mx.symbol.Convolution(name='conv1', data=data , num_filter=64, pad=(3,3), kernel=(7,7), stride=(2,2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1 , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1 , act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu , pad=(1,1), kernel=(3,3), stride=(2,2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1 , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1 , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1 , num_filter=64, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a , act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu , num_filter=64, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b , act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(name='res2a', *[scale2a_branch1,scale2a_branch2c] )
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a , act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu , num_filter=64, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a , act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu , num_filter=64, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b , act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu,scale2b_branch2c] )
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b , act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu , num_filter=64, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a , act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu , num_filter=64, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b , act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu,scale2c_branch2c] )
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c , act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu , num_filter=512, pad=(0,0), kernel=(1,1), stride=(2,2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1 , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu , num_filter=128, pad=(0,0), kernel=(1,1), stride=(2,2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a , act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu , num_filter=128, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b , act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu , num_filter=512, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(name='res3a', *[scale3a_branch1,scale3a_branch2c] )
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a , act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu , num_filter=128, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a , act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu , num_filter=128, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b , act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu , num_filter=512, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(name='res3b1', *[res3a_relu,scale3b1_branch2c] )
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1 , act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu , num_filter=128, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a , act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu , num_filter=128, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b , act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu , num_filter=512, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu,scale3b2_branch2c] )
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2 , act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu , num_filter=128, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a , act_type='relu')
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu , num_filter=128, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b , act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu , num_filter=512, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu,scale3b3_branch2c] )
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3 , act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(2,2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1 , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(2,2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a , act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b , act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(name='res4a', *[scale4a_branch1,scale4a_branch2c] )
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a , act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a , act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b , act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu,scale4b1_branch2c] )
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1 , act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a , act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b , act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu,scale4b2_branch2c] )
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2 , act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a , act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b , act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu,scale4b3_branch2c] )
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3 , act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a , act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b , act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu,scale4b4_branch2c] )
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4 , act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a , act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b , act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu,scale4b5_branch2c] )
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5 , act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a , act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b , act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu,scale4b6_branch2c] )
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6 , act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a , act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b , act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu,scale4b7_branch2c] )
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7 , act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a , act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b , act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu,scale4b8_branch2c] )
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8 , act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a , act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b , act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu,scale4b9_branch2c] )
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9 , act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a , act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b , act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu,scale4b10_branch2c] )
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10 , act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a , act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b , act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu,scale4b11_branch2c] )
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11 , act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a , act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b , act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu,scale4b12_branch2c] )
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12 , act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a , act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b , act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(name='res4b13', *[res4b12_relu,scale4b13_branch2c] )
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13 , act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a , act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b , act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(name='res4b14', *[res4b13_relu,scale4b14_branch2c] )
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14 , act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a , act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b , act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(name='res4b15', *[res4b14_relu,scale4b15_branch2c] )
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15 , act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a , act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b , act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(name='res4b16', *[res4b15_relu,scale4b16_branch2c] )
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16 , act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a , act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b , act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(name='res4b17', *[res4b16_relu,scale4b17_branch2c] )
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17 , act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a , act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b , act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(name='res4b18', *[res4b17_relu,scale4b18_branch2c] )
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18 , act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a , act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b , act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(name='res4b19', *[res4b18_relu,scale4b19_branch2c] )
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19 , act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a , act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b , act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(name='res4b20', *[res4b19_relu,scale4b20_branch2c] )
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20 , act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a , act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b , act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(name='res4b21', *[res4b20_relu,scale4b21_branch2c] )
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21 , act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu , num_filter=256, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a , act_type='relu')
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu , num_filter=256, pad=(1,1), kernel=(3,3), stride=(1,1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b , act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu , num_filter=1024, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(name='res4b22', *[res4b21_relu,scale4b22_branch2c] )
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22 , act_type='relu')
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu , num_filter=2048, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1 , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5a_branch1 = bn5a_branch1
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu , num_filter=512, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a , act_type='relu')
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu , num_filter=512, pad=(2,2), dilate=(2,2), kernel=(3,3), stride=(1,1), no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b , act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu , num_filter=2048, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5a_branch2c = bn5a_branch2c
res5a = mx.symbol.broadcast_add(name='res5a', *[scale5a_branch1,scale5a_branch2c] )
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a , act_type='relu')
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu , num_filter=512, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a , act_type='relu')
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu , num_filter=512, pad=(2,2), dilate=(2,2), kernel=(3,3), stride=(1,1), no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b , act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu , num_filter=2048, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5b_branch2c = bn5b_branch2c
res5b = mx.symbol.broadcast_add(name='res5b', *[res5a_relu,scale5b_branch2c] )
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b , act_type='relu')
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu , num_filter=512, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a , act_type='relu')
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu , num_filter=512, pad=(2,2), dilate=(2,2), kernel=(3,3), stride=(1,1), no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b , act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu , num_filter=2048, pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c , use_global_stats=self.use_global_stats, eps=self.eps, fix_gamma=False)
scale5c_branch2c = bn5c_branch2c
res5c = mx.symbol.broadcast_add(name='res5c', *[res5b_relu,scale5c_branch2c] )
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c , act_type='relu')
feat_conv_3x3 = mx.sym.Convolution(
data=res5c_relu, kernel=(3, 3), pad=(6, 6), dilate=(6, 6), num_filter=1024, name="feat_conv_3x3")
feat_conv_3x3_relu = mx.sym.Activation(data=feat_conv_3x3, act_type="relu", name="feat_conv_3x3_relu")
return feat_conv_3x3_relu
def get_train_symbol(self, cfg):
# config alias for convenient
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
num_anchors = cfg.network.NUM_ANCHORS
# data = mx.sym.Variable(name="data") # OK
data_ref = mx.sym.Variable(name="data_ref") # OK
# if non-key frame, eq_flag == 0; if key frame, eq_flag == 1
eq_flag = mx.sym.Variable(name="eq_flag") # OK
im_info = mx.sym.Variable(name="im_info") # OK
gt_boxes = mx.sym.Variable(name="gt_boxes") # OK
rpn_label = mx.sym.Variable(name='label') # OK
rpn_bbox_target = mx.sym.Variable(name='bbox_target') # OK
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight') # OK
motion_vector = mx.sym.Variable(name='motion_vector') # TODO
motion_vector_scale = mx.sym.Convolution(name='motion_vector_scale', data=motion_vector , num_filter=2, pad=(0,0), kernel=(1,1), stride=(1,1))
# shared convolutional layers
conv_feat = self.get_resnet_v1(data_ref)
# flow_grid = mx.sym.GridGenerator(data=motion_vector_scale, transform_type='warp', name='flow_grid')
flow_grid = mx.sym.GridGenerator(data=motion_vector, transform_type='warp', name='flow_grid')
warp_conv_feat = mx.sym.BilinearSampler(data=conv_feat, grid=flow_grid, name='warping_feat')
select_conv_feat = mx.sym.take(mx.sym.Concat(*[warp_conv_feat, conv_feat], dim=0), eq_flag)
conv_feats = mx.sym.SliceChannel(select_conv_feat, axis=1, num_outputs=2)
# RPN layers
rpn_feat = conv_feats[0]
rpn_cls_score = mx.sym.Convolution(
data=rpn_feat, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.sym.Convolution(
data=rpn_feat, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# prepare rpn data
rpn_cls_score_reshape = mx.sym.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
# classification
rpn_cls_prob = mx.sym.SoftmaxOutput(data=rpn_cls_score_reshape, label=rpn_label, multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1, name="rpn_cls_prob")
# bounding box regression
if cfg.network.NORMALIZE_RPN:
rpn_bbox_loss_ = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_', scalar=1.0, data=(rpn_bbox_pred - rpn_bbox_target))
rpn_bbox_pred = mx.sym.Custom(
bbox_pred=rpn_bbox_pred, op_type='rpn_inv_normalize', num_anchors=num_anchors,
bbox_mean=cfg.network.ANCHOR_MEANS, bbox_std=cfg.network.ANCHOR_STDS)
else:
rpn_bbox_loss_ = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_', scalar=3.0, data=(rpn_bbox_pred - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss_, grad_scale=1.0 / cfg.TRAIN.RPN_BATCH_SIZE)
# ROI proposal
rpn_cls_act = mx.sym.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_act")
rpn_cls_act_reshape = mx.sym.Reshape(
data=rpn_cls_act, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_act_reshape')
if cfg.TRAIN.CXX_PROPOSAL:
rois = mx.contrib.sym.Proposal(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS),
rpn_pre_nms_top_n=cfg.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TRAIN.RPN_POST_NMS_TOP_N,
threshold=cfg.TRAIN.RPN_NMS_THRESH, rpn_min_size=cfg.TRAIN.RPN_MIN_SIZE)
else:
rois = mx.sym.Custom(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=cfg.network.RPN_FEAT_STRIDE,
scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS),
rpn_pre_nms_top_n=cfg.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TRAIN.RPN_POST_NMS_TOP_N,
threshold=cfg.TRAIN.RPN_NMS_THRESH, rpn_min_size=cfg.TRAIN.RPN_MIN_SIZE)
# ROI proposal target
gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')
rois, label, bbox_target, bbox_weight = mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape,
op_type='proposal_target',
num_classes=num_reg_classes,
batch_images=cfg.TRAIN.BATCH_IMAGES,
batch_rois=cfg.TRAIN.BATCH_ROIS,
cfg=cPickle.dumps(cfg),
fg_fraction=cfg.TRAIN.FG_FRACTION)
# res5
rfcn_feat = conv_feats[1]
rfcn_cls = mx.sym.Convolution(data=rfcn_feat, kernel=(1, 1), num_filter=7*7*num_classes, name="rfcn_cls")
rfcn_bbox = mx.sym.Convolution(data=rfcn_feat, kernel=(1, 1), num_filter=7*7*4*num_reg_classes, name="rfcn_bbox")
psroipooled_cls_rois = mx.contrib.sym.PSROIPooling(name='psroipooled_cls_rois', data=rfcn_cls, rois=rois, group_size=7,pooled_size=7,
output_dim=num_classes, spatial_scale=0.0625)
psroipooled_loc_rois = mx.contrib.sym.PSROIPooling(name='psroipooled_loc_rois', data=rfcn_bbox, rois=rois, group_size=7,pooled_size=7,
output_dim=8, spatial_scale=0.0625)
cls_score = mx.sym.Pooling(name='ave_cls_scors_rois', data=psroipooled_cls_rois, pool_type='avg', global_pool=True, kernel=(7, 7))
bbox_pred = mx.sym.Pooling(name='ave_bbox_pred_rois', data=psroipooled_loc_rois, pool_type='avg', global_pool=True, kernel=(7, 7))
cls_score = mx.sym.Reshape(name='cls_score_reshape', data=cls_score, shape=(-1, num_classes))
bbox_pred = mx.sym.Reshape(name='bbox_pred_reshape', data=bbox_pred, shape=(-1, 4 * num_reg_classes))
bbox_pred_for_train_mAP = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_for_train_mAP')
# classification
if cfg.TRAIN.ENABLE_OHEM:
print 'use ohem!'
labels_ohem, bbox_weights_ohem = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes,
num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM,
cls_score=cls_score, bbox_pred=bbox_pred, labels=label,
bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1)
bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)
rcnn_label = labels_ohem
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS)
rcnn_label = label
# reshape output
rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, -1), name='label_reshape')
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape')
group = mx.sym.Group([rpn_cls_prob, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label),
mx.sym.BlockGrad(bbox_pred), mx.sym.BlockGrad(bbox_pred_for_train_mAP), mx.sym.BlockGrad(rois),
mx.sym.BlockGrad(im_info), mx.sym.BlockGrad(data_ref), motion_vector_scale])
self.sym = group
return group
def get_key_test_symbol(self, cfg):
# config alias for convenient
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
num_anchors = cfg.network.NUM_ANCHORS
data = mx.sym.Variable(name="data")
im_info = mx.sym.Variable(name="im_info")
data_key = mx.sym.Variable(name="data_key")
# motion_vector = mx.sym.Variable(name='motion_vector')
# feat_key = mx.sym.Variable(name="feat_key")
# shared convolutional layers
conv_feat = self.get_resnet_v1(data)
conv_feats = mx.sym.SliceChannel(conv_feat, axis=1, num_outputs=2)
# RPN
rpn_feat = conv_feats[0]
rpn_cls_score = mx.sym.Convolution(
data=rpn_feat, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.sym.Convolution(
data=rpn_feat, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
if cfg.network.NORMALIZE_RPN:
rpn_bbox_pred = mx.sym.Custom(
bbox_pred=rpn_bbox_pred, op_type='rpn_inv_normalize', num_anchors=num_anchors,
bbox_mean=cfg.network.ANCHOR_MEANS, bbox_std=cfg.network.ANCHOR_STDS)
# ROI Proposal
rpn_cls_score_reshape = mx.sym.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_prob = mx.sym.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_prob")
rpn_cls_prob_reshape = mx.sym.Reshape(
data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape')
if cfg.TEST.CXX_PROPOSAL:
rois = mx.contrib.sym.Proposal(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES),
ratios=tuple(cfg.network.ANCHOR_RATIOS),
rpn_pre_nms_top_n=cfg.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TEST.RPN_POST_NMS_TOP_N,
threshold=cfg.TEST.RPN_NMS_THRESH, rpn_min_size=cfg.TEST.RPN_MIN_SIZE)
else:
rois = mx.sym.Custom(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=cfg.network.RPN_FEAT_STRIDE,
scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS),
rpn_pre_nms_top_n=cfg.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TEST.RPN_POST_NMS_TOP_N,
threshold=cfg.TEST.RPN_NMS_THRESH, rpn_min_size=cfg.TEST.RPN_MIN_SIZE)
# res5
rfcn_feat = conv_feats[1]
rfcn_cls = mx.sym.Convolution(data=rfcn_feat, kernel=(1, 1), num_filter=7*7*num_classes, name="rfcn_cls")
rfcn_bbox = mx.sym.Convolution(data=rfcn_feat, kernel=(1, 1), num_filter=7*7*4*num_reg_classes, name="rfcn_bbox")
psroipooled_cls_rois = mx.contrib.sym.PSROIPooling(name='psroipooled_cls_rois', data=rfcn_cls, rois=rois, group_size=7, pooled_size=7,
output_dim=num_classes, spatial_scale=0.0625)
psroipooled_loc_rois = mx.contrib.sym.PSROIPooling(name='psroipooled_loc_rois', data=rfcn_bbox, rois=rois, group_size=7, pooled_size=7,
output_dim=8, spatial_scale=0.0625)
cls_score1 = mx.sym.Pooling(name='ave_cls_scors_rois', data=psroipooled_cls_rois, pool_type='avg', global_pool=True, kernel=(7, 7))
bbox_pred1 = mx.sym.Pooling(name='ave_bbox_pred_rois', data=psroipooled_loc_rois, pool_type='avg', global_pool=True, kernel=(7, 7))
# classification
cls_score = mx.sym.Reshape(name='cls_score_reshape', data=cls_score1, shape=(-1, num_classes))
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
# bounding box regression
bbox_pred2 = mx.sym.Reshape(name='bbox_pred_reshape2', data=bbox_pred1, shape=(-1, 4 * num_reg_classes))
# reshape output
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred2, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape')
# group output
group = mx.sym.Group([data_key, bbox_pred, bbox_pred1, bbox_pred2, conv_feat, rois, cls_prob, rpn_cls_score, rpn_bbox_pred, rpn_cls_prob, rfcn_cls, rfcn_bbox, cls_score1])
self.sym = group
return group
def get_cur_test_symbol(self, cfg):
# config alias for convenient
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
num_anchors = cfg.network.NUM_ANCHORS
im_info = mx.sym.Variable(name="im_info")
motion_vector = mx.sym.Variable(name='motion_vector')
conv_feat = mx.sym.Variable(name="feat_key")
'''
for i in range(9):
motion_vector = mx.symbol.concat(motion_vector, motion_vector, dim=1)
conv_feat = mx.symbol.concat(conv_feat, motion_vector, dim=1)
conv_feat = mx.sym.SliceChannel(conv_feat, axis=1, num_outputs=2)[0]
'''
motion_vector_scale = mx.symbol.Convolution(name='motion_vector_scale', data=motion_vector , num_filter=2, pad=(0,0), kernel=(1,1), stride=(1,1))
'''
motion_vector_scale = mx.symbol.LeakyReLU(name='motion_ReLU1', data=motion_vector_scale , act_type='leaky', slope=0.1)
motion_vector_scale = mx.symbol.Convolution(name='motion_vector_scale2', data=motion_vector_scale , num_filter=16, pad=(1,1), kernel=(3,3), stride=(1,1))
motion_vector_scale = mx.symbol.LeakyReLU(name='motion_ReLU2', data=motion_vector_scale , act_type='leaky', slope=0.1)
motion_vector_scale = mx.symbol.Convolution(name='motion_vector_scale3', data=motion_vector_scale , num_filter=32, pad=(1,1), kernel=(3,3), stride=(1,1))
motion_vector_scale = mx.symbol.LeakyReLU(name='motion_ReLU3', data=motion_vector_scale , act_type='leaky', slope=0.1)
motion_vector_scale = mx.symbol.Convolution(name='motion_vector_scale4', data=motion_vector_scale , num_filter=16, pad=(1,1), kernel=(3,3), stride=(1,1))
motion_vector_scale = mx.symbol.LeakyReLU(name='motion_ReLU4', data=motion_vector_scale , act_type='leaky', slope=0.1)
motion_vector_scale = mx.symbol.Convolution(name='motion_vector_scale5', data=motion_vector_scale , num_filter=2, pad=(1,1), kernel=(3,3), stride=(1,1))
'''
# shared convolutional layers
flow_grid = mx.sym.GridGenerator(data=motion_vector_scale, transform_type='warp', name='flow_grid')
conv_feat = mx.sym.BilinearSampler(data=conv_feat, grid=flow_grid, name='warping_feat')
conv_feats = mx.sym.SliceChannel(conv_feat, axis=1, num_outputs=2)
# RPN
rpn_feat = conv_feats[0]
rpn_cls_score = mx.sym.Convolution(
data=rpn_feat, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.sym.Convolution(
data=rpn_feat, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
if cfg.network.NORMALIZE_RPN:
rpn_bbox_pred = mx.sym.Custom(
bbox_pred=rpn_bbox_pred, op_type='rpn_inv_normalize', num_anchors=num_anchors,
bbox_mean=cfg.network.ANCHOR_MEANS, bbox_std=cfg.network.ANCHOR_STDS)
# ROI Proposal
rpn_cls_score_reshape = mx.sym.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_prob = mx.sym.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_prob")
rpn_cls_prob_reshape = mx.sym.Reshape(
data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape')
if cfg.TEST.CXX_PROPOSAL:
rois = mx.contrib.sym.Proposal(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES),
ratios=tuple(cfg.network.ANCHOR_RATIOS),
rpn_pre_nms_top_n=cfg.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TEST.RPN_POST_NMS_TOP_N,
threshold=cfg.TEST.RPN_NMS_THRESH, rpn_min_size=cfg.TEST.RPN_MIN_SIZE)
else:
rois = mx.sym.Custom(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=cfg.network.RPN_FEAT_STRIDE,
scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS),
rpn_pre_nms_top_n=cfg.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TEST.RPN_POST_NMS_TOP_N,
threshold=cfg.TEST.RPN_NMS_THRESH, rpn_min_size=cfg.TEST.RPN_MIN_SIZE)
# res5
rfcn_feat = conv_feats[1]
rfcn_cls = mx.sym.Convolution(data=rfcn_feat, kernel=(1, 1), num_filter=7*7*num_classes, name="rfcn_cls")
rfcn_bbox = mx.sym.Convolution(data=rfcn_feat, kernel=(1, 1), num_filter=7*7*4*num_reg_classes, name="rfcn_bbox")
psroipooled_cls_rois = mx.contrib.sym.PSROIPooling(name='psroipooled_cls_rois', data=rfcn_cls, rois=rois, group_size=7, pooled_size=7,
output_dim=num_classes, spatial_scale=0.0625)
psroipooled_loc_rois = mx.contrib.sym.PSROIPooling(name='psroipooled_loc_rois', data=rfcn_bbox, rois=rois, group_size=7, pooled_size=7,
output_dim=8, spatial_scale=0.0625)
cls_score1 = mx.sym.Pooling(name='ave_cls_scors_rois', data=psroipooled_cls_rois, pool_type='avg', global_pool=True, kernel=(7, 7))
bbox_pred1 = mx.sym.Pooling(name='ave_bbox_pred_rois', data=psroipooled_loc_rois, pool_type='avg', global_pool=True, kernel=(7, 7))
# classification
cls_score = mx.sym.Reshape(name='cls_score_reshape', data=cls_score1, shape=(-1, num_classes))
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
# bounding box regression
bbox_pred2 = mx.sym.Reshape(name='bbox_pred_reshape2', data=bbox_pred1, shape=(-1, 4 * num_reg_classes))
# the bbox_pred2 is same
# reshape output
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred2, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape')
# the bbox_pred_reshape is not same
# group output
group = mx.sym.Group([rois, cls_prob, bbox_pred, bbox_pred1, bbox_pred2, conv_feat, rpn_cls_score, rpn_bbox_pred, rpn_cls_prob, rfcn_cls, rfcn_bbox, cls_score1])
self.sym = group
return group
def get_batch_test_symbol(self, cfg):
# TODO
return
def init_weight(self, cfg, arg_params, aux_params):
#arg_params['Convolution5_scale_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['Convolution5_scale_weight'])
#arg_params['Convolution5_scale_bias'] = mx.nd.ones(shape=self.arg_shape_dict['Convolution5_scale_bias'])
arg_params['motion_vector_scale_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['motion_vector_scale_weight'])
arg_params['motion_vector_scale_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['motion_vector_scale_bias'])
arg_params['feat_conv_3x3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['feat_conv_3x3_weight'])
arg_params['feat_conv_3x3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['feat_conv_3x3_bias'])
arg_params['rpn_cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['rpn_cls_score_weight'])
arg_params['rpn_cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['rpn_cls_score_bias'])
arg_params['rpn_bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['rpn_bbox_pred_weight'])
arg_params['rpn_bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['rpn_bbox_pred_bias'])
arg_params['rfcn_cls_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['rfcn_cls_weight'])
arg_params['rfcn_cls_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['rfcn_cls_bias'])
arg_params['rfcn_bbox_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['rfcn_bbox_weight'])
arg_params['rfcn_bbox_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['rfcn_bbox_bias'])
|
[
"zhangjingtun@gmail.com"
] |
zhangjingtun@gmail.com
|
a668d9bb5a7cb156f866167d1f2b3499bdb9adb9
|
1be23b236762d927f5a4ab97a177b57e4f26bac6
|
/interface/zimp/ui/cligame.py
|
f7e1162670b697fb6053085b4c01f88f23bc78d8
|
[] |
no_license
|
rikkimax/zimp-interface
|
74db39183982df5e472cd366f15af5a1bd85d491
|
0701e1ed53581fdd5fa4c564931c4603d1fdd646
|
refs/heads/master
| 2021-01-22T03:30:00.430807
| 2014-03-22T10:03:21
| 2014-03-22T10:03:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 80
|
py
|
from zimp.engine.gamestate import GameState
class CliGame(GameState):
pass
|
[
"alphaglosined@gmail.com"
] |
alphaglosined@gmail.com
|
845297b4a9a26e32a464c24deb23f097735ec0e6
|
699a147205ed3b59cfc05281896121675e1fe8fb
|
/properties/pipelines.py
|
ec6cf90e9b0f8b96a8a3e43958e295d188331081
|
[] |
no_license
|
FSund/finn_car_scraper
|
9e1c04ec667cf912f6ac217b1767a35cdcf8e708
|
9d795f033dae1fe19964b81b1e059771ad474fad
|
refs/heads/master
| 2022-12-03T14:20:15.441552
| 2020-08-16T07:13:29
| 2020-08-16T07:13:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 905
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import re
import locale
class PropertiesPipeline(object):
"""{"size": "111 m\u00b2", "price": "2\u00a0900\u00a0000 kr"}"""
def process_item(self, item, spider):
item['size'] = (re.sub('[^0-9,-]', "", item['size'])).split("-") # Remove non-ascii chars, split on hyphen
item['price'] = (re.sub('[^0-9,-]', "", item['price'])).split("-") # Parse price, split on hyphen
item['size'][0] = int(item['size'][0])
item['price'][0] = int(item['price'][0])
if len(item['size']) > 1:
item['size'][1] = int(item['size'][1])
if len(item['price']) > 1:
item['price'][1] = int(item['price'][1])
return item
|
[
"filip.sund@gmail.com"
] |
filip.sund@gmail.com
|
933ba9394395b4ad2fe19c7f013fd1522c8d357b
|
440f814f122cfec91152f7889f1f72e2865686ce
|
/generate/configure/extension/python/ccevent/npc/ttypes.py
|
582acb07b04c6ebecdbaf7bccaf76b6bd1134483
|
[] |
no_license
|
hackerlank/buzz-server
|
af329efc839634d19686be2fbeb700b6562493b9
|
f76de1d9718b31c95c0627fd728aba89c641eb1c
|
refs/heads/master
| 2020-06-12T11:56:06.469620
| 2015-12-05T08:03:25
| 2015-12-05T08:03:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 5,043
|
py
|
#
# Autogenerated by Thrift Compiler (0.9.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import ccevent.ttypes
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class EventNpcCreate:
"""
Attributes:
- id_
- reborn_
- reborn_secs_
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'id_', None, None, ), # 1
(2, TType.BOOL, 'reborn_', None, None, ), # 2
(3, TType.I32, 'reborn_secs_', None, None, ), # 3
)
def __init__(self, id_=None, reborn_=None, reborn_secs_=None,):
self.id_ = id_
self.reborn_ = reborn_
self.reborn_secs_ = reborn_secs_
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.id_ = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.reborn_ = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.reborn_secs_ = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('EventNpcCreate')
if self.id_ is not None:
oprot.writeFieldBegin('id_', TType.I64, 1)
oprot.writeI64(self.id_)
oprot.writeFieldEnd()
if self.reborn_ is not None:
oprot.writeFieldBegin('reborn_', TType.BOOL, 2)
oprot.writeBool(self.reborn_)
oprot.writeFieldEnd()
if self.reborn_secs_ is not None:
oprot.writeFieldBegin('reborn_secs_', TType.I32, 3)
oprot.writeI32(self.reborn_secs_)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id_ is None:
raise TProtocol.TProtocolException(message='Required field id_ is unset!')
if self.reborn_ is None:
raise TProtocol.TProtocolException(message='Required field reborn_ is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class EventNpcDestory:
"""
Attributes:
- id_
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'id_', None, None, ), # 1
)
def __init__(self, id_=None,):
self.id_ = id_
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.id_ = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('EventNpcDestory')
if self.id_ is not None:
oprot.writeFieldBegin('id_', TType.I64, 1)
oprot.writeI64(self.id_)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id_ is None:
raise TProtocol.TProtocolException(message='Required field id_ is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
[
"251729465@qq.com"
] |
251729465@qq.com
|
fc9e72242eea798bc9571ab6336db70ebd20e5a0
|
51abb655fa400339fd54d91e660cc92f4d2a8f48
|
/blog/app/forms.py
|
ec1335fa5c958caf69ac7258a283ea73c7cd1b67
|
[] |
no_license
|
4LittleBlips/flask-project
|
b2951ee98e5e8d20b43f571cc6036f2ab300bd09
|
387328312f9449e035df0a88f4056249a1b2951e
|
refs/heads/master
| 2022-12-16T01:59:36.696029
| 2020-09-25T13:52:31
| 2020-09-25T13:52:31
| 298,585,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,103
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, HiddenField
from wtforms.validators import DataRequired, Email, ValidationError, EqualTo, Length
from app.models import User
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField('Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username): #default naming validate_<parameter> calls automatically
user = User.query.filter_by(username=username.data).first()
if user != None:
raise ValidationError('Please use a different username')
def validate_email(self, email):
user_email = User.query.filter_by(email=email.data).first()
if user_email != None:
raise VaildationError('Please use a different email address.')
class EditProfileForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
about_me = TextAreaField('About me', validators=[Length(min=0, max=144)])
submit =SubmitField('Submit')
class CreatePostForm(FlaskForm):
text = TextAreaField('Post', validators=[Length(min=10, max=250)])
submit = SubmitField('Submit')
class AddCommentForm(FlaskForm):
text = TextAreaField('Comment', validators=[Length(min=5, max=200)])
submit = SubmitField('Submit')
class AddReplyForm(FlaskForm):
text = TextAreaField('Reply', validators=[Length(min=2, max=100)])
submit = SubmitField('Submit')
class EditCommentForm(FlaskForm):
text = TextAreaField('Edit', validators=[Length(min=2, max=100)])
|
[
"samharb2002@hotmail.com"
] |
samharb2002@hotmail.com
|
6b1c9c319e7f9beb81ae9fbc9678d8a80adec444
|
e1a3dc7fb567acc38886ac3b4f5521c5991585cf
|
/sampling_method.py
|
ef4a11d712fe79bcb293fa76785fc1cb2e8788e3
|
[] |
no_license
|
cdmaok/MinQuestion
|
306c3b0def940483154528a0fd153b04b1003808
|
d6fcce7920c27e031bf9877908a047ab5abb745a
|
refs/heads/master
| 2021-01-11T17:58:19.837970
| 2017-04-20T12:20:34
| 2017-04-20T12:20:34
| 79,885,125
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,197
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import random
import pandas as pd
import os
from collections import Counter
import math
import threading
from sklearn import tree
from sklearn.preprocessing import Imputer
from sklearn import linear_model
from sklearn.model_selection import cross_val_score
from sklearn import svm
import pydotplus
import collections
import merge
import sys
import pandas as pd
import fs
def log_info(df):
size = df.shape[0]
print 'info',df.Class.value_counts()
class EntropyVoter(threading.Thread):
def __init__(self,sampled_df,f_num):
threading.Thread.__init__(self)
self.sampled_df = sampled_df
self.topics = []
self.num = f_num
def run(self):
self.entropy(self.sampled_df)
def entropy(self,sampled_df):
topic_nums = len(sampled_df.irow(0))-1
topic_index = []
for i in range(1,topic_nums):
topic_index.append(i)
#print(topic_index)
choose_new = []
choose_old = []
samples = len(sampled_df)
k = range(self.num)
#log_info(sampled_df)
#每组根据信息增益IG(Y;Q)=H(Y)-H(Y|Q)贪心选出IG最大的10个query
#每次迭代选出一个query,我是直接计算H(Y|Q),选最小的。有个问题是当信息增益可能一样时,默认index最小的query(待改进)
for iter in k:
print("-----iter ----",iter+1)
new_index = topic_index[1]
max = 2
for topic_i in topic_index:
#print("----choose topic ---",topic_i)
choose_new = choose_old[:]
choose_new.append(sampled_df.iloc[:,topic_i])
#print(choose_new)
sum_total = 0
for k1, group in sampled_df.groupby(choose_new):
#print("------group-----",k1,len(group))
p_group = len(group)/len(sampled_df)
p = group.Class.value_counts()/len(group)
sum =0
for i in p:
#print("i=",i)
if(i==1 or i==0):
pi = 0
else:
pi = -i*math.log(i)
sum += pi
#print(pi,sum)
sum_total += p_group*sum
#print("----sum_total=",sum_total)
if(sum_total<max):
max = sum_total
new_index = topic_i
#print("-------------------------min----------- ",new_index,max)
k = new_index #k = argmax(info_gian_c_i)
print("----------select--",k,"-------",sampled_df.iloc[:,k].name)
choose_old.append(sampled_df.iloc[:,k])
self.topics.append(k)
topic_index.remove(k)
def getTopic(self):
return self.topics
class EntropyVoterSimple(threading.Thread):
def __init__(self,sampled_df,f_num):
threading.Thread.__init__(self)
self.sampled_df = sampled_df
self.topics = []
self.num = f_num
def run(self):
self.entropy(self.sampled_df)
def entropy(self,sampled_df):
headers = list(sampled_df.columns)
#start = headers.index('user_topic')
start = -1
end = headers.index('Class')
sampled_df = sampled_df.ix[:,start+1:end+1]
#print f_num
topic_nums = len(sampled_df.iloc[0])-1
topic_index = []
for i in range(0,topic_nums):
topic_index.append(i)
#print(topic_index)
#print(sampled_df.iloc[:,1].name)
samples = len(sampled_df)
k = range(1)
#log_info(sampled_df)
#IG(Y;Q)=H(Y)-H(Y|Q) greedy to choose 10 query that has biggest IG
#before : iterate 10 times : each time calculate all ,choose the smallest H(Y|Q)
#2017/01/05 modify iterate one time:greedy to choose 10 query that has biggest IG
for iter in k:
h_i=[]
#print("-----iter ----",iter+1)
count = 0
for topic_i in topic_index:
#print("----choose topic ---",topic_i)
choose_new = []
#choose_new.append(sampled_df.iloc[:,topic_i])
choose_new = sampled_df.iloc[:,topic_i]
#print(choose_new)
sum_total = 0
#choose_new.any()
if(choose_new.any()):
for k1, group in sampled_df.groupby(choose_new):
#print("------group-----",k1,len(group),len(sampled_df))
p_group = len(group)/len(sampled_df)
p = group.Class.value_counts()/len(group)
sum =0
for i in p:
#print("i=",i)
if(i==1 or i==0):
pi = 0
else:
pi = -i*math.log(i)
sum += pi
#print(pi,sum)
sum_total += p_group*sum
#print("----sum_total=",sum_total,'=',p_group,'*',sum)
else:
#count+=1
sum_total = 5
#print sum_total
h_i.append(sum_total)
#print count
#print h_i
t = sorted(range(len(h_i)),key=lambda k:h_i[k],reverse=False)
self.topics = t[:self.num]
test = [h_i[i] for i in t[:self.num]]
#print self.topics
#print test
#print self.topics
def getTopic(self):
return self.topics
def emsemble_sampling(ti,en,probs_file,origin_file,type=0,f_size=10,frac=0.8):
#print f_size
time = range(ti)
all_topic = []
voters = []
fs_method = fs.get_method(type)
print str(fs_method)
#output = open('../mq_result/other_rules/Labor/labor_feature_rank', 'a')
#output.write(str(fs_method)+'\n')
#output.close( )
#do 10 times, according to the attribute probability prediction to sampling each time
for t in time:
#print("----------------------iteration------------------- no.",t+1)
if(en): # 0 sample, 1 resample
sampled_df = merge.get_sample(probs_file,origin_file,t,frac,0)
#sampled_df.to_csv('./test_0.csv')
else:
sampled_df = pd.read_csv(origin_file,index_col=0,dtype={"user_topic":str,"Class":str})
#test
#sampled_df.to_csv('./test_0.csv')
##feature_selection
#fs_method = fs.get_method(t)
#print str(fs_method)
voter = fs_method(sampled_df,f_size)
voters.append(voter)
for v in voters:
v.setDaemon(True)
v.start()
for v in voters:
#v.setDaemon(True)
v.join()
for v in voters:
all_topic += v.getTopic()
#feature rank
#output = open('../mq_result/other_rules/Labor/labor_feature_rank', 'a')
#output.write(str(v.getTopic())+'\n')
#output.close( )
print("-------print feature------")
feature = []
for i in Counter(all_topic).most_common(f_size):#f_size
a = str(i[0]+1)+"+"+str(i[1])+"+"+sampled_df.iloc[:,i[0]].name
print(a)
feature.append(i[0]+1)
print(feature)
#feature rank
#output = open('../mq_result/other_rules/age/old_feature_rank', 'a')
#output.write(str(fs_method)+'\n')
#output.write(str(feature)+'\n')
#output.close( )
return feature
def main():
#all = []
#path = r'./test_simple'
rule = {'Ethnicity':'White','Age':[40,50,70,90,100]}
filename = './white_old'
feature_size = 200
probs_file = filename + '.pro'
origin_file = './user_topic_origin.csv'
origin_fill = filename + '_origin_fill.csv'
goal_file = filename + '_goal.csv'
goal_fill = filename + '_goal_fill.csv'
origin_file = origin_fill #choose fill>
goal_file = goal_fill
feature = emsemble_sampling(probs_file,origin_file,feature_size)
classifier(feature,goal_file,i)
if __name__ == '__main__':
main()
|
[
"398144247@qq.com"
] |
398144247@qq.com
|
18d25718ec8d10097b4c9d0a54a03b783d240270
|
f05396eba183ff093143416c95a0ccfc09870089
|
/relay.py
|
dd5c12dea4c164fc54e543f3027d6692a8b97d2a
|
[] |
no_license
|
uabryanblue/vhive-remote-vertical-farming
|
8cbfaac127685c8ada7cf7a177b550d16e1a6151
|
d0466c8d1a70a5ef119accf96f894e94cea78f2e
|
refs/heads/main
| 2023-04-11T23:55:53.133951
| 2021-05-18T17:33:44
| 2021-05-18T17:33:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
# set up relays
from gpiozero import OutputDevice
import time
import sys
# devices specified by GPIO number
# gpiozero.OutputDevice(pin, *, active_high=True, initial_value=False, pin_factory=None)
air_pump = OutputDevice(18, False)
nutrient_pump = OutputDevice(23, False)
solenoid = OutputDevice(24, False)
l_board = OutputDevice(25, False)
motors = OutputDevice(4, False)
#function to toggle relay power
def TogglePower(relay):
relay.toggle()
#TogglePower(air_pump)
#time.sleep(1)
#TogglePower(motors)
#time.sleep(1)
#TogglePower(l_board)
#time.sleep(1)
#TogglePower(nutrient_pump)
#TogglePower(solenoid)
#time.sleep(1)
#TogglePower(solenoid)
|
[
"noreply@github.com"
] |
uabryanblue.noreply@github.com
|
86fae73bd34898186a53fb9e90055e6d404664e8
|
efae4a5ad2671d80b80c706cb8abe6521dba6627
|
/clean_md_commodities.py
|
e05fc37872e4469ad0c4cf90781d321ec79b266f
|
[] |
no_license
|
lweeks20/resourcespace-migration
|
0d988eaa4ffdedb434d28e1119ed6597eb43922d
|
526466cde1fd4b96f1d78e589746d57be400db8d
|
refs/heads/master
| 2020-12-22T14:32:37.052616
| 2020-01-23T00:02:12
| 2020-01-23T00:02:12
| 236,824,931
| 0
| 0
| null | 2020-01-28T19:43:07
| 2020-01-28T19:43:06
| null |
UTF-8
|
Python
| false
| false
| 1,090
|
py
|
import pandas as pd
import numpy as np
import sys
np.set_printoptions(threshold=sys.maxsize)
# read in both excel files into pandas dataframes
md_files = pd.read_excel('G:\\datasets\\mining_district\\mining_district_files_12202019.xlsx')
commodities = pd.read_excel('G:\\datasets\\mining_district\\nv_commodities.xlsx')
# take a look at unique values in the mining district files commodities list
comm_list = md_files.commodity.str.cat(sep="; ")
comm_list_arr = comm_list.split("; ")
comm_list_arr = np.unique(comm_list_arr, axis=0)
# identify values that are in the mining district file spreadsheet but NOT in the commodities master list
#first, change lists to sets and make them all lower case for comparison
lower_md_comm = {item.lower() for item in comm_list_arr}
lower_gen_comm = {item.lower() for item in commodities.name}
#find out what values are in the mining district files that do overlap with the commodities master list, and which ones don't
not_in = lower_md_comm.difference(lower_gen_comm)
intersection = lower_md_comm.intersection(lower_gen_comm)
|
[
"noreply@github.com"
] |
lweeks20.noreply@github.com
|
404f340763feb5d585bb6a02180e22d960c4160b
|
1dd72195bc08460df7e5bb82d3b7bac7a6673f49
|
/api/app/utils/dewpoint.py
|
83e9717eda039cd0cedfaf74f0cdd88dca5fba86
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
bcgov/wps
|
c4347c39cadfad6711502d47776abc8d03895593
|
0ba707b0eddc280240964efa481988df92046e6a
|
refs/heads/main
| 2023-08-19T00:56:39.286460
| 2023-08-16T18:03:06
| 2023-08-16T18:03:06
| 235,861,506
| 35
| 9
|
Apache-2.0
| 2023-09-11T21:35:07
| 2020-01-23T18:42:10
|
Python
|
UTF-8
|
Python
| false
| false
| 777
|
py
|
""" This module contains functions related to dewpoint.
"""
import math
import logging
logger = logging.getLogger(__name__)
def compute_dewpoint(temp, relative_humidity):
""" Computes dewpoint based on code from the legacy system.
See: https://chat.developer.gov.bc.ca/channel/wildfire-wfwx?msg=vzjt28hWCP9J5pZtK
"""
logger.debug("Computing dewpoint for temp: %s and rh: %s", temp, relative_humidity)
if temp is None or relative_humidity is None:
return None
return (temp - (14.55 + 0.114 * temp) *
(1 - (0.01 * relative_humidity)) -
math.pow(((2.5 + 0.007 * temp) *
(1 - (0.01 * relative_humidity))), 3) - (15.9 + 0.117 * temp) *
math.pow((1 - (0.01 * relative_humidity)), 14))
|
[
"noreply@github.com"
] |
bcgov.noreply@github.com
|
444565106d7d6d7c0ba515f706b193908140e983
|
7eae6569ecbdfcd23976191cbce411567afaddff
|
/Command/composite_command.py
|
1b079e16db06f31c58e968b73a05bb99e737a0d4
|
[] |
no_license
|
MihailMihaylov75/DesignPatterns
|
d016386d53ddbde604745b9937c580f0556b3f48
|
496918aeada2b8447e9e1441fc02a4a886262046
|
refs/heads/master
| 2023-01-13T03:27:57.700995
| 2020-11-23T18:00:02
| 2020-11-23T18:00:02
| 293,622,074
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,183
|
py
|
__author__ = 'Mihail Mihaylov'
# Composite Command a.k.a. Macro
# also: Composite design pattern ;)
import unittest
from abc import ABC, abstractmethod
from enum import Enum
class BankAccount:
OVERDRAFT_LIMIT = -500
def __init__(self, balance=0):
self.balance = balance
def deposit(self, amount):
self.balance += amount
print(f'Deposited {amount}, balance = {self.balance}')
def withdraw(self, amount):
if self.balance - amount >= BankAccount.OVERDRAFT_LIMIT:
self.balance -= amount
print(f'Withdrew {amount}, balance = {self.balance}')
return True
return False
def __str__(self):
return f'Balance = {self.balance}'
class Command(ABC):
def __init__(self):
self.success = False
def invoke(self):
pass
def undo(self):
pass
class BankAccountCommand(Command):
def __init__(self, account, action, amount):
super().__init__()
self.amount = amount
self.action = action
self.account = account
class Action(Enum):
DEPOSIT = 0
WITHDRAW = 1
def invoke(self):
if self.action == self.Action.DEPOSIT:
self.account.deposit(self.amount)
self.success = True
elif self.action == self.Action.WITHDRAW:
self.success = self.account.withdraw(self.amount)
def undo(self):
if not self.success:
return
# strictly speaking this is not correct
# (you don't undo a deposit by withdrawing)
# but it works for this demo, so...
if self.action == self.Action.DEPOSIT:
self.account.withdraw(self.amount)
elif self.action == self.Action.WITHDRAW:
self.account.deposit(self.amount)
# try using this before using MoneyTransferCommand!
class CompositeBankAccountCommand(Command, list):
def __init__(self, items=[]):
super().__init__()
for i in items:
self.append(i)
def invoke(self):
for x in self:
x.invoke()
def undo(self):
for x in reversed(self):
x.undo()
class MoneyTransferCommand(CompositeBankAccountCommand):
def __init__(self, from_acct, to_acct, amount):
super().__init__([
BankAccountCommand(from_acct,
BankAccountCommand.Action.WITHDRAW,
amount),
BankAccountCommand(to_acct,
BankAccountCommand.Action.DEPOSIT,
amount)])
def invoke(self):
ok = True
for cmd in self:
if ok:
cmd.invoke()
ok = cmd.success
else:
cmd.success = False
self.success = ok
class TestSuite(unittest.TestCase):
def test_composite_deposit(self):
ba = BankAccount()
deposit1 = BankAccountCommand(ba, BankAccountCommand.Action.DEPOSIT, 1000)
deposit2 = BankAccountCommand(ba, BankAccountCommand.Action.DEPOSIT, 1000)
composite = CompositeBankAccountCommand([deposit1, deposit2])
composite.invoke()
print(ba)
composite.undo()
print(ba)
def test_transfer_fail(self):
ba1 = BankAccount(100)
ba2 = BankAccount()
# composite isn't so good because of failure
amount = 1000 # try 1000: no transactions should happen
wc = BankAccountCommand(ba1, BankAccountCommand.Action.WITHDRAW, amount)
dc = BankAccountCommand(ba2, BankAccountCommand.Action.DEPOSIT, amount)
transfer = CompositeBankAccountCommand([wc, dc])
transfer.invoke()
print('ba1:', ba1, 'ba2:', ba2) # end up in incorrect state
transfer.undo()
print('ba1:', ba1, 'ba2:', ba2)
def test_better_tranfer(self):
ba1 = BankAccount(100)
ba2 = BankAccount()
amount = 1000
transfer = MoneyTransferCommand(ba1, ba2, amount)
transfer.invoke()
print('ba1:', ba1, 'ba2:', ba2)
transfer.undo()
print('ba1:', ba1, 'ba2:', ba2)
print(transfer.success)
|
[
"chakala1975@gmail.com"
] |
chakala1975@gmail.com
|
c4a74100a95490a0390254c4f1815bbed38bad99
|
a62d29004fe56d6d580044d1f4dc32a8f32e60de
|
/shared_functions.py
|
a0709d64371bffb20bdcde1a37c39b20889373ff
|
[] |
no_license
|
Lianathanoj/table-tennis-automation
|
19918981f1a358bfbce2e3d12a5ea576aaf301cc
|
f770b0033561fb2b4c1e1acadde5742de33aec15
|
refs/heads/master
| 2022-08-08T04:12:48.522997
| 2022-07-23T21:47:01
| 2022-07-23T21:47:01
| 102,046,862
| 0
| 0
| null | 2022-01-21T05:27:20
| 2017-08-31T21:26:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,985
|
py
|
from warnings import filterwarnings
filterwarnings("ignore")
import os, sys
from apiclient import errors
from oauth2client.file import Storage
from oauth2client import client
from oauth2client import tools
from re import split
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
class HiddenPrints:
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout = self._original_stdout
def check_permissions(service, folder_id, cache_file_name):
try:
with HiddenPrints():
service.permissions().list(fileId=folder_id).execute()
except errors.HttpError:
print("You don't have permission to access these files.")
remove_file_from_cache(cache_file_name)
def remove_file_from_cache(cache_file_name):
credential_path = get_credential_path(cache_file_name)
print('Removing credentials from {}'.format(credential_path))
os.remove(credential_path)
sys.exit()
def get_credential_path(cache_file_name):
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, cache_file_name)
return credential_path
def get_credentials(cache_file_name, client_secret_file, scopes, application_name):
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
credential_path = get_credential_path(cache_file_name)
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
with HiddenPrints():
flow = client.flow_from_clientsecrets(client_secret_file, scopes)
flow.user_agent = application_name
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to {}'.format(credential_path))
return credentials
def file_name_split(file_name):
name_elements = split(r'[\/\-\s]+', file_name.replace('.xlsx', ''))
return name_elements
def reformat_file_name(file_name, tryout_string='tryout'):
is_tryouts = True if tryout_string in file_name.lower() else False
name_elements = file_name_split(file_name)
if is_tryouts:
date = name_elements[:-1]
else:
date = name_elements
date_short = (date[0], date[1], date[2][:2])
date_long = tuple([int(element) for element in (date[0], date[1], '20' + date[2][:2])])
return date_long, date_short, is_tryouts
|
[
"jlian@gatech.edu"
] |
jlian@gatech.edu
|
b60102d9bf012dfb2866d9309bb23d0adc4ee66e
|
a379686a0baf3824691edcb897c8ba2e02aebb17
|
/dic.py
|
f7c5dc09d3576082ab278fb895e2bb613552f437
|
[] |
no_license
|
bas87/Hangman
|
e67a71770ccf1d3a568f6adaeddc1cd58d7a57ba
|
1d342a78f1512f797fee5895991bc809d51c1e72
|
refs/heads/master
| 2016-08-11T22:11:23.043257
| 2015-12-13T18:20:09
| 2015-12-13T18:20:09
| 47,745,787
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os, sys
import random,re
class WordReader:
default_words = u"airplane home school"
def __init__(self, filename, min_length = 5):
self.min_length = min_length
filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), filename)
try:
f = open(filename, "r")
except:
self.words = self.default_words
self.filename = None
return
self.words = f.read()
self.filename = filename
def SetMinLength(min_length):
self.min_length = min_length
def Get(self):
reg = re.compile('\s+([a-zA-Z]+)\s+')
n = 30 # maximum number of tries to find a suitable word
while n:
index = int(random.random()*len(self.words))
m = reg.search(self.words[index:])
if m and len(m.groups()[0]) >= self.min_length: break
n = n - 1
if n: return m.groups()[0].lower()
return 'hangman' # last attempt to get some word :-)
|
[
"toman@devzone.cz"
] |
toman@devzone.cz
|
fe96d782627bf563ef2b14a3f77efcccdcf594f1
|
92754bb891a128687f3fbc48a312aded752b6bcd
|
/Algorithms/Python3.x/463-Island_Perimeter.py
|
34e7693ea8b1084994a2b09637c307905a7e1e11
|
[] |
no_license
|
daidai21/Leetcode
|
ddecaf0ffbc66604a464c3c9751f35f3abe5e7e5
|
eb726b3411ed11e2bd00fee02dc41b77f35f2632
|
refs/heads/master
| 2023-03-24T21:13:31.128127
| 2023-03-08T16:11:43
| 2023-03-08T16:11:43
| 167,968,602
| 8
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,817
|
py
|
# Runtime: 600 ms, faster than 47.41% of Python3 online submissions for Island Perimeter.
# Memory Usage: 13.8 MB, less than 25.00% of Python3 online submissions for Island Perimeter.
class Solution:
def islandPerimeter(self, grid: List[List[int]]) -> int:
perimeters = 0
len_row, len_col = len(grid), len(grid[0])
for row in range(len_row):
for col in range(len_col):
if grid[row][col] == 0: # water
if row > 0: # up
perimeters += grid[row - 1][col]
if row < len_row - 1: # down
perimeters += grid[row + 1][col]
if col > 0: # left
perimeters += grid[row][col - 1]
if col < len_col - 1: # right
perimeters += grid[row][col + 1]
else: # land
perimeters += row == 0
perimeters += row == len_row - 1
perimeters += col == 0
perimeters += col == len_col - 1
return perimeters
# Runtime: 536 ms, faster than 91.59% of Python3 online submissions for Island Perimeter.
# Memory Usage: 14.3 MB, less than 16.67% of Python3 online submissions for Island Perimeter.
class Solution:
def islandPerimeter(self, grid: List[List[int]]) -> int:
area, connect = 0, 0
for row in range(len(grid)):
for col in range(len(grid[0])):
if grid[row][col] == 1:
area += 1
if row > 0 and grid[row - 1][col]:
connect += 1
if col > 0 and grid[row][col - 1]:
connect += 1
return 4 * area - 2 * connect
|
[
"daidai4269@aliyun.com"
] |
daidai4269@aliyun.com
|
1269bac5e53b2f4357476eb85fc432e651d179db
|
6f93efe976e1484fe526a6ba82c0cd277975f74f
|
/code/attack_perception.py
|
64831ea85cc5c588411d6737b2c2e8b9423a320b
|
[] |
no_license
|
JerishDansolBalala/FeatureSpaceAtk
|
2629c8b63de8f25854371f065fc93d98957ce24d
|
fce2aedc511f925e8033c523e9a3a64a9a1abd17
|
refs/heads/master
| 2020-09-09T20:49:52.355462
| 2020-04-21T01:15:52
| 2020-04-21T01:15:52
| 221,565,952
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,013
|
py
|
# Train the Style Transfer Net
from __future__ import print_function
import numpy as np
import sys
np.set_printoptions(threshold=sys.maxsize)
import tensorflow as tf
import os
import settings
task_name = "imperceptability"
data_set = "imagenet" # "imagenet"
model_name = "imagenet_normal"
decoder_name = "imagenet_shallowest"
"""
data_set = "cifar10" # "imagenet"
model_name = "cifar10_adv"
decoder_name = "cifar10"
"""
exec(open('base.py').read())
from style_transfer_net import StyleTransferNet, StyleTransferNet_adv
from utils import get_train_images
from cifar10_class import Model
import cifar10_input
from PIL import Image
from adaptive_instance_norm import normalize
STYLE_LAYERS = settings.config["STYLE_LAYERS"]
# (height, width, color_channels)
TRAINING_IMAGE_SHAPE = settings.config["IMAGE_SHAPE"]
EPOCHS = 4
EPSILON = 1e-5
BATCH_SIZE = settings.config["BATCH_SIZE"]
if data_set=="cifar10":
LEARNING_RATE = 1e-2
LR_DECAY_RATE = 1e-4 #5e-5
DECAY_STEPS = 1.0
adv_weight = 5000
ITER=500
CLIP_NORM_VALUE = 10.0
else:
if decoder_name == "imagenet_shallowest_smooth":
LEARNING_RATE = 1e-3
else:
LEARNING_RATE = 1e-2
LR_DECAY_RATE = 0 # 5e-5
DECAY_STEPS = 1.0
adv_weight = 10
ITER=1000
CLIP_NORM_VALUE = 1000.0
style_weight = 1
if data_set == "cifar10":
raw_cifar = cifar10_input.CIFAR10Data("cifar10_data")
def get_data(sess):
x_batch, y_batch = raw_cifar.eval_data.get_next_batch(
batch_size=BATCH_SIZE, multiple_passes=True)
return x_batch, y_batch
elif data_set == "imagenet":
inet = imagenet(BATCH_SIZE, "val")
def get_data(sess):
x_batch, y_batch = inet.get_next_batch(sess)
return x_batch, y_batch
def save_rgb_img( img, path):
img = img.astype(np.uint8)
#img=np.reshape(img,[28,28])
Image.fromarray(img, mode='RGB').save(path)
def get_scope_var(scope_name):
var_list = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope=scope_name)
assert (len(var_list) >= 1)
return var_list
encoder_path=ENCODER_WEIGHTS_PATH
#model_save_path= "./transform.ckpt"
debug=True
logging_period=100
if debug:
from datetime import datetime
start_time = datetime.now()
def grad_attack():
sess.run(stn.init_style, feed_dict=fdict)
sess.run(global_step.initializer)
rst_img, rst_loss, rst_acc,rst_mean,rst_sigma = sess.run(
[adv_img, content_loss_y, adv_acc_y_5, stn.meanS, stn.sigmaS], feed_dict=fdict)
_ITER = ITER
flag=True
last_update= [0 for i in range(BATCH_SIZE)]
min_val = [1e10 for i in range(BATCH_SIZE)]
for i in range(_ITER):
#_, acc, aloss, closs, closs1, sigma, mean, sigmaS, meanS = sess.run(
# [train_op, adv_acc, adv_loss, content_loss_y, content_loss, stn.sigmaC, stn.meanC, stn.sigmaS, stn.meanS], feed_dict=fdict)
_, _l2_g = sess.run([train_op, l2_norm_g], feed_dict=fdict)
sess.run(stn.style_bound, feed_dict = fdict)
flag1 = True
_adv_img, acc, aloss, closs, _mean, _sigma = sess.run( [adv_img, adv_acc_y_5, adv_loss, content_loss_y, stn.meanS, stn.sigmaS], feed_dict = fdict)
ups=[]
for j in range(BATCH_SIZE):
if aloss[j]<=min_val[j]*0.95:
min_val[j] = aloss[j]
last_update[j] = i
if acc[j]<rst_acc[j] or (acc[j]==rst_acc[j] and closs[j]<rst_loss[j]):
rst_img[j]=_adv_img[j]
rst_acc[j] = acc[j]
rst_loss[j] = closs[j]
rst_mean[j] = _mean[j]
rst_sigma[j] = _sigma[j]
last_update[j] = i
if i-last_update[j]<=200:
flag1 = False
#ups.append(stn.init_style_rand[j])
#print("\treset %d"%j,end="\t")
#last_update[j] = i
if flag1:
break
if len(ups)>0:
sess.run(ups,feed_dict=fdict)
if i>_ITER:
break
if flag and np.mean(acc)==0:
_ITER=i+500
flag=False
if i%50==0 :
"""for j in range(BATCH_SIZE):
gan_out = sess.run(adv_img, feed_dict=fdict)
save_out = np.concatenate(
(gan_out[j], x_batch[j], np.abs(gan_out[j]-x_batch[j])))
sz = TRAINING_IMAGE_SHAPE[1]
full_path = os.path.join("temp", "%d" % i, "%d.jpg" % j)
os.makedirs(os.path.join("temp", "%d" % i), exist_ok=True)
save_out = np.reshape(save_out, newshape=[sz*3, sz, 3])
save_rgb_img(save_out, path=full_path)"""
#print("sigma", sigma[0])
#print("mean", mean[0])
#print("sigma", sigma)
#print("mean", mean)
#print("sigmaS", sigma)
#print("meanS", mean)
acc=np.mean(acc)
print(i, acc, "advl", aloss, "contentl", closs, "norm", _l2_g)
#if np.sum(acc) == 0 and np.all(np.less_equal(closs,2*128)):
#break
#if i==1:
# exit()
sess.run(stn.asgn, feed_dict={stn.meanS_ph: rst_mean, stn.sigmaS_ph: rst_sigma})
return rst_img
def rand_attack():
for i in range(10):
sess.run(stn.init_style, feed_dict=fdict)
sess.run(global_step.initializer)
for j in range(10):
_, acc, aloss, closs = sess.run(
[train_op, adv_acc, adv_loss, content_loss], feed_dict=fdict)
save_rgb_img(save_out, path=full_path)
sess.run(stn.style_bound, feed_dict = fdict)
print(i,acc,"advl",aloss,"contentl",closs)
if acc < 0.05 and closs < 2000:
break
# get the traing image shape
HEIGHT, WIDTH, CHANNELS = TRAINING_IMAGE_SHAPE
INPUT_SHAPE = (None, HEIGHT, WIDTH, CHANNELS)
def gradient(opt, vars, loss ):
global l2_norm_g
gradients, variables = zip(*opt.compute_gradients(loss,vars))
l2_norm_g = tf.norm(gradients)
g_split = [tf.unstack(g, BATCH_SIZE, axis=0) for g in gradients]
g1_list=[]
g2_list=[]
DIM = settings.config["DECODER_DIM"][-1]
limit = 10/np.sqrt(DIM)
for g1,g2 in zip(g_split[0],g_split[1]):
#(g1, g2), _ = tf.clip_by_global_norm([g1, g2], CLIP_NORM_VALUE)
g1 = tf.clip_by_value(g1,-1/np.sqrt(limit),1/np.sqrt(limit))
g2 = tf.clip_by_value(g2,-1/np.sqrt(limit),1/np.sqrt(limit))
g1_list.append(g1)
g2_list.append(g2)
gradients = [tf.stack(g1_list, axis=0), tf.stack(g2_list, axis=0)]
#gradients, _ = tf.clip_by_global_norm(gradients, 1.0)
opt = opt.apply_gradients(zip(gradients, variables), global_step=global_step)
return opt
def gradient1(opt, vars, loss):
gradients, variables = zip(*opt.compute_gradients(loss, vars))
gradients, _ = tf.clip_by_global_norm(gradients, 1.0)
opt = opt.apply_gradients(
zip(gradients, variables), global_step=global_step)
return opt
# create the graph
tf_config = tf.ConfigProto()
#tf_config.gpu_options.per_process_gpu_memory_fraction=0.5
tf_config.gpu_options.allow_growth = True
with tf.Graph().as_default(), tf.Session(config=tf_config) as sess:
content = tf.placeholder(tf.float32, shape=INPUT_SHAPE, name='content')
style = tf.placeholder(tf.float32, shape=INPUT_SHAPE, name='style')
label = tf.placeholder(tf.int64, shape =None, name="label")
#style = tf.placeholder(tf.float32, shape=INPUT_SHAPE, name='style')
mgt = tf.get_variable(dtype=tf.float32, shape =[], name="magnititude")
mgt_ph = tf.placeholder(tf.float32, shape= [])
mgt_asgn = tf.assign (mgt,mgt_ph)
# create the style transfer net
stn = StyleTransferNet_adv(encoder_path)
# pass content and style to the stn, getting the generated_img
generated_img , generated_img_adv = stn.transform(content, p=mgt)
adv_img=generated_img_adv
img = generated_img
stn_vars = get_scope_var("transform")
# get the target feature maps which is the output of AdaIN
target_features = stn.target_features
# pass the generated_img to the encoder, and use the output compute loss
generated_img_adv = tf.reverse(
generated_img_adv, axis=[-1]) # switch RGB to BGR
adv_img_bgr = generated_img_adv
generated_img_adv = stn.encoder.preprocess(
generated_img_adv) # preprocess image
enc_gen_adv, enc_gen_layers_adv = stn.encoder.encode(generated_img_adv)
generated_img = tf.reverse(
generated_img, axis=[-1]) # switch RGB to BGR
img_bgr = generated_img
generated_img = stn.encoder.preprocess(
generated_img) # preprocess image
enc_gen, enc_gen_layers = stn.encoder.encode(generated_img)
if data_set == "cifar10":
classifier = Model("eval", raw_cifar.train_images)
classifier._build_model(adv_img, label, reuse=False)
adv_loss = - classifier.target_loss
adv_acc = classifier.accuracy
classifier._build_model(img, label, reuse=True)
normal_loss = - classifier.target_loss
norm_acc = classifier.accuracy
elif data_set == "imagenet":
classifier = build_imagenet_model(adv_img_bgr, label, conf=1)
adv_loss = - classifier.target_loss5
adv_acc = classifier.accuracy
adv_acc_y = classifier.acc_y
adv_acc_y_5 = classifier.acc_y_5
content_bgr = tf.reverse(
content, axis=[-1]) # switch RGB to BGR
classifier = build_imagenet_model(content_bgr, label, reuse=True)
normal_loss = - classifier.target_loss5
norm_acc = classifier.accuracy
acc_y = classifier.acc_y
acc_y_5 = classifier.acc_y_5
classifier = build_imagenet_model(img_bgr, label, reuse=True)
decode_acc_y = classifier.acc_y
decode_acc_y_5 = classifier.acc_y_5
# compute the content loss
bar=3000/64/128
content_loss_y = tf.reduce_sum(
tf.reduce_mean(tf.square(enc_gen_adv - target_features), axis=[1, 2]),axis=-1)
content_loss = tf.reduce_sum(tf.reduce_mean(
tf.square(enc_gen_adv - target_features), axis=[1, 2]))
#
#content_loss += tf.reduce_sum(tf.reduce_mean(
# tf.square(enc_gen - stn.norm_features), axis=[1, 2]))
# compute the style loss
style_layer_loss = []
# compute the total loss
# adv_loss * adv_weight
loss = tf.reduce_sum((1-adv_acc_y_5) * content_loss_y)
loss += tf.reduce_sum(adv_loss * BATCH_SIZE * adv_weight)# style_weight * style_loss
l2_embed = normalize(enc_gen)[0] - normalize(stn.norm_features)[0]
l2_embed = tf.reduce_mean(
tf.sqrt(tf.reduce_sum((l2_embed * l2_embed), axis=[1, 2, 3])))
loss=loss
if data_set == "cifar10":
classifier_vars = get_scope_var("model")
decoder_vars = get_scope_var("decoder")
# Training step
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.inverse_time_decay(LEARNING_RATE, global_step, DECAY_STEPS, LR_DECAY_RATE)
#tf.train.AdamOptimizer(learning_rate).minimize( # MomentumOptimizer(learning_rate, momentum=0.9) tf.train.GradientDescentOptimizer(learning_rate)
#train_op = tf.train.AdamOptimizer(learning_rate).minimize(
# loss, var_list=stn_vars, global_step=global_step)
##gradient clipping
train_op = gradient(tf.train.AdamOptimizer(learning_rate, beta1= 0.5),vars=stn_vars, loss=loss)
#train_op = tf.train.AdamOptimizer(learning_rate).minimize(
# loss, var_list=stn_vars, global_step=global_step)
sess.run(tf.global_variables_initializer())
if data_set == "cifar10":
classifier_saver = tf.train.Saver(classifier_vars, max_to_keep=1)
classifier_saver.restore(sess, settings.config["hardened_model"])
elif data_set == "imagenet":
restore_parameter(sess)
# saver
saver = tf.train.Saver(decoder_vars, max_to_keep=1)
saver.restore(sess,Decoder_Model)
###### Start Training ######
step = 0
if debug:
elapsed_time = datetime.now() - start_time
start_time = datetime.now()
print('\nElapsed time for preprocessing before actually train the model: %s' % elapsed_time)
print('Now begin to train the model...\n')
uid = 0
data_set = "imagenet" # "imagenet"
model_name = "imagenet_normal_backup"
decoder_name = "imagenet_shallowest_smooth"
base_dir_model_old = base_dir_model
base_dir_model = os.path.join(
"store", data_set, decoder_name, model_name)
def merge_dict(dict_tot, dict1):
for k, v in dict1.items():
if k in dict_tot:
dict_tot[k] = np.concatenate([dict_tot[k], dict1[k]])
else:
dict_tot[k] = dict1[k]
return dict_tot
def get_np_dict():
np_dict = {}
for sf in ["", "_pgd_pgd_linf"]:
for i in range(1, 100):
np_file_path = os.path.join(
base_dir_model, "saved_samples%s%d.npy" % (sf, i)) # "target_attack",
if os.path.exists(np_file_path):
_np_dict = np.load(np_file_path, allow_pickle=True).item()
merge_dict(np_dict, _np_dict)
return np_dict
class np_dictionary():
def __init__(self, attrs, data=None):
self.attrs = attrs
if data is None:
data = {}
for attr in self.attrs:
data[attr] = None
tot_succ = 0
cnt = 50
#for i, (im, file_name) in enumerate(dataset_loader):
np_dict = get_np_dict()
print(np_dict.keys())
np_label_arr_tot = np_dict["label"]
np_benign_image_arr_tot = np_dict["benign_image"]
idx = np_dict["index"]
base_dir_model = base_dir_model_old
report_batch = 2
assert len(np_benign_image_arr_tot) >= 21*8*8
for batch in range(1,8+1):
#x_batch, y_batch = get_data(sess)
x_batch = np_benign_image_arr_tot[21*8*(batch-1):21*8*(batch-1)+8]
y_batch = np_label_arr_tot[21*8*(batch-1):21*8*(batch-1)+8]
fdict = {content: x_batch, label: y_batch}
if batch % report_batch == 1:
np_adv_image = []
np_benign_image = []
np_content_loss = []
np_acc_attack = []
np_acc_attack_5 = []
np_acc = []
np_acc_5 = []
np_decode_acc = []
np_decode_acc_5 = []
np_acc_5 = []
np_label = []
np_mgt = []
np_index = []
start=1.0
end=2.0
divides=40
for j in range(0,25,2):
mgt_val = (start*(divides-j)+end*j)/divides
sess.run(mgt_asgn, feed_dict={mgt_ph:mgt_val})
# run the training step
grad_attack()
step += 1
for i in range(BATCH_SIZE):
gan_out = sess.run(adv_img, feed_dict=fdict)
save_out = np.concatenate(
(gan_out[i], x_batch[i], np.abs(gan_out[i]-x_batch[i])))
sz = TRAINING_IMAGE_SHAPE[1]
full_path = os.path.join(
base_dir_model, "%d" % step, "%d.jpg" % i)
os.makedirs(os.path.join(base_dir_model, "%d" %
step), exist_ok=True)
save_out = np.reshape(save_out, newshape=[sz*3, sz, 3])
save_rgb_img(save_out, path=full_path)
if batch % 1 == 0:
elapsed_time = datetime.now() - start_time
_content_loss, _adv_acc, _adv_loss, _loss, \
= sess.run([ content_loss, adv_acc, adv_loss, loss,], feed_dict=fdict)
_adv_img, _loss_y, _adv_acc_y, _adv_acc_y_5, _acc_y, _acc_y_5, _decode_acc_y, _decode_acc_y_5 = sess.run([
adv_img, content_loss_y, adv_acc_y, adv_acc_y_5, acc_y, acc_y_5, decode_acc_y, decode_acc_y_5], feed_dict=fdict)
#_normal_loss, _normal_acc = sess.run([normal_loss, norm_acc], feed_dict=fdict)
np_adv_image.append(_adv_img)
np_benign_image.append(x_batch)
np_content_loss.append(_loss_y)
np_acc_attack.append(_adv_acc_y)
np_acc_attack_5 .append(_adv_acc_y_5)
np_acc_5 .append(_acc_y_5)
np_acc .append(_acc_y)
np_label.append(y_batch)
np_decode_acc.append(_decode_acc_y)
np_decode_acc_5.append(_decode_acc_y_5)
np_mgt . append(BATCH_SIZE*[mgt_val])
np_index.append([batch*BATCH_SIZE+k for k in range(BATCH_SIZE)])
_adv_loss = np.sum(_adv_loss)
#_normal_loss = np.sum(_normal_loss)
l2_loss = (_adv_img - x_batch) /255
l2_loss = np.sum(l2_loss*l2_loss)/8
li_loss = np.mean( np.amax(np.abs(_adv_img - x_batch) / 255, axis=-1))
l1_loss = np.mean(np.sum(np.abs(_adv_img - x_batch) / 255, axis=-1))
#print(_normal_acc)
print("l2_loss", l2_loss, "li_loss", li_loss, "l1_loss", l1_loss)
print('step: %d, total loss: %.3f, elapsed time: %s' % (step, _loss, elapsed_time))
print('content loss: %.3f' % (_content_loss))
print('adv loss : %.3f, weighted adv loss: %.3f , adv acc %.3f' %
(_adv_loss, adv_weight * _adv_loss, _adv_acc))
print("_acc_y_5", _acc_y_5)
print("_adv_acc_y_5", _adv_acc_y_5)
#print('normal loss : %.3f normal acc: %.3f\n' %
# (_normal_loss, _normal_acc))
if batch % report_batch == 0:
np_adv_image_arr = np.concatenate(np_adv_image)
np_benign_image_arr = np.concatenate(np_benign_image)
np_content_loss_arr = np.concatenate(np_content_loss)
np_acc_attack_arr = np.concatenate(np_acc_attack)
np_acc_attack_5_arr = np.concatenate(np_acc_attack_5)
np_acc_arr = np.concatenate(np_acc)
np_acc_5_arr = np.concatenate(np_acc_5)
np_decode_acc_arr = np.concatenate(np_decode_acc)
np_decode_acc_5_arr = np.concatenate(np_decode_acc_5)
np_label_arr = np.concatenate(np_label)
np_mgt_arr = np.concatenate(np_mgt)
np_index_arr = np.concatenate(np_index)
saved_dict = {"adv_image": np_adv_image_arr,
"benign_image": np_benign_image_arr,
"content_loss": np_content_loss_arr,
"acc_attack": np_acc_attack_arr,
"acc_attack_5": np_acc_attack_5_arr,
"acc": np_acc_arr,
"acc_5": np_acc_5_arr,
"decode_acc": np_decode_acc_arr,
"decode_acc_5": np_decode_acc_5_arr,
"label": np_label_arr,
"magnititude": np_mgt_arr,
"index":np_index_arr}
np.save(os.path.join(base_dir_model, "saved_samples%d.npy" %
(batch//report_batch)), saved_dict)
###### Done Training & Save the model ######
#saver.save(sess, model_save_path)
if debug:
elapsed_time = datetime.now() - start_time
print('Done training! Elapsed time: %s' % elapsed_time)
#print('Model is saved to: %s' % model_save_path)
|
[
"simpleword2014@gmail.com"
] |
simpleword2014@gmail.com
|
2c9f8b16f0b649a13821363189a71d563ab46156
|
a082af407c1e049942ca3b08471b52924dd7ef70
|
/serve.py
|
e19de82da4cba9ebcc45df3eccb33a975a147c28
|
[] |
no_license
|
ctb/meep
|
57760f1a7adbb53050ecbe0ae7c30e7068723e0f
|
2e15284b40a920acdc011a26c16f2adeb5bda653
|
refs/heads/master
| 2021-01-22T18:23:41.696558
| 2012-04-26T18:09:53
| 2012-04-26T18:09:53
| 3,142,298
| 0
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
from meep_example_app import MeepExampleApp, initialize
from wsgiref.simple_server import make_server
initialize()
app = MeepExampleApp()
httpd = make_server('', 8000, app)
print "Serving HTTP on port 8000..."
# Respond to requests until process is killed
httpd.serve_forever()
# Alternative: serve one request, then exit
httpd.handle_request()
|
[
"titus@idyll.org"
] |
titus@idyll.org
|
9eb260f2bffd639a620f45023fb160561e99239c
|
809582fe3345aff92faba9a179115d3c2806027a
|
/Manifest.py
|
c34fed896e2dacfb304938a80f0d17aa4884ec6d
|
[] |
no_license
|
markfickett/gors
|
dd8e24f373563a48d1290e00b0d17409a939429d
|
539ebba280dac29f22fa358400845a99433f48ae
|
refs/heads/master
| 2020-05-13T06:34:28.667209
| 2012-10-12T04:51:46
| 2012-10-12T04:51:46
| 822,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
import logging
logging.basicConfig(
format='[%(levelname)s %(name)s] %(message)s',
level=logging.INFO)
import enum
import webbrowser, urllib2, xml, email, time, optparse, os
import plistlib
import threading
VERSION = (0, 1)
VERSION_STRING = '.'.join([str(v) for v in VERSION])
|
[
"mark.fickett@gmail.com"
] |
mark.fickett@gmail.com
|
37fb97e45b8603215c2c999f3363dc847d0ee683
|
edcd74f8f65119bdbe737360c2ca33b4a6da160a
|
/python/problem-matrix/special_positions_in_a_binary_matrix.py
|
2c1cb53093621131e77d527d634a8374cfd560c9
|
[] |
no_license
|
hyunjun/practice
|
72e83de6a1d5e04ddcd16526f16110ea2dd00373
|
5376dd48b1cefb4faba9d2ef6a8a497b6b1d6c67
|
refs/heads/master
| 2023-08-31T07:00:37.320351
| 2023-08-17T07:29:24
| 2023-08-17T07:29:24
| 2,704,126
| 3
| 2
| null | 2022-12-14T20:25:07
| 2011-11-03T18:28:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,249
|
py
|
# https://leetcode.com/problems/special-positions-in-a-binary-matrix
from typing import List
class Solution:
# runtime; 168ms, 100.00%
# memory; 14.2MB, 16.67%
def numSpecial(self, mat: List[List[int]]) -> int:
R, C, cnt = len(mat), len(mat[0]), 0
for r, m in enumerate(mat):
if sum(m) != 1:
continue
for c in range(C):
if mat[r][c] != 1:
continue
cnt += 1 if sum(mat[y][c] for y in range(R)) == 1 else 0
return cnt
s = Solution()
mat1 = [[1,0,0],
[0,0,1],
[1,0,0]]
mat2 = [[1,0,0],
[0,1,0],
[0,0,1]]
mat3 = [[0,0,0,1],
[1,0,0,0],
[0,1,1,0],
[0,0,0,0]]
mat4 = [[0,0,0,0,0],
[1,0,0,0,0],
[0,1,0,0,0],
[0,0,1,0,0],
[0,0,0,1,1]]
mat5 = [[0,0,0,0,0,1,0,0],
[0,0,0,0,1,0,0,1],
[0,0,0,0,1,0,0,0],
[1,0,0,0,1,0,0,0],
[0,0,1,1,0,0,0,0]]
data = [(mat1, 1),
(mat2, 3),
(mat3, 2),
(mat4, 3),
(mat5, 1),
]
for mat, expect in data:
real = s.numSpecial(mat)
for m in mat:
print(m)
print(f'expect {expect} real {real} result {expect == real}')
|
[
"hyunjun.chung@gmail.com"
] |
hyunjun.chung@gmail.com
|
e1c8d82aff4eaedf094895ca6212154433f582dd
|
26d39d416573319c5b477ad70b8dd99bfea1a959
|
/crm/apps.py
|
7598b1416ae2ce7478ddbb1e7b44be33474931f0
|
[] |
no_license
|
Koha90/crm_fg
|
59fcf280a110d39ccde2b379ae07476c16c8c717
|
2d97729e6f315a901ed153da90abc1cd44414aed
|
refs/heads/master
| 2023-02-28T13:55:01.202036
| 2021-02-08T18:35:40
| 2021-02-08T18:35:40
| 335,657,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
from django.apps import AppConfig
class CrmConfig(AppConfig):
name = 'crm'
verbose_name = 'CRM'
|
[
"aleksey.jake@gmail.com"
] |
aleksey.jake@gmail.com
|
7a508448a497c81e716884a0444d226477e22d83
|
6bb30e928513cda55a54c9d3880512ea9eba5f56
|
/vps/vultr/regions.py
|
7720d46cff610eb8f39f5fce85c02253f99ffd23
|
[
"BSD-3-Clause"
] |
permissive
|
germfue/vps-tools
|
9f41a8c9da522c227d597140ea3fe637143c5887
|
90fa39403be747b6ec73697064a58bab8d575526
|
refs/heads/master
| 2021-06-15T18:14:50.026943
| 2017-03-26T15:45:28
| 2017-03-26T15:45:28
| 80,643,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,245
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Germán Fuentes Capella <development@fuentescapella.com>
# BSD 3-Clause License
#
# Copyright (c) 2017, Germán Fuentes Capella
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from invoke import task, Collection
from vultr import Vultr
from .query import query
@task(name='list',
help={
'criteria': 'Filter queried data. Example usage: ' +
'"{\'continent\': \'Europe\'}"'
})
def regions_list(ctx, criteria=''):
"""
Retrieve a list of all active regions
Note that just because a region is listed here, does not mean that there is
room for new servers
"""
return query(ctx, lambda x: Vultr(x).regions.list(), criteria)
regions_coll = Collection()
regions_coll.add_task(regions_list)
|
[
"github@gfc.33mail.com"
] |
github@gfc.33mail.com
|
65b3e86687880c869c80396e9b072a135c440d24
|
ef87ad1790aed1a7f2f24a2cdedaf5d815309fef
|
/scripts/main_solo12_demo_estimator.py
|
a7807ada42edd50103e71a1cb90e7439a81bc9c0
|
[
"BSD-2-Clause"
] |
permissive
|
paLeziart/quadruped-reactive-walking
|
4f9092205e31d7231ae094e17cbbe7438acdd055
|
2fd5429afc19a2a33aa8afc0bf16b750ad29c7f0
|
refs/heads/devel
| 2023-05-12T10:56:43.801864
| 2021-06-10T08:26:01
| 2021-06-10T08:26:01
| 322,542,716
| 7
| 4
|
BSD-2-Clause
| 2021-04-20T12:54:20
| 2020-12-18T09:02:42
|
Python
|
UTF-8
|
Python
| false
| false
| 6,901
|
py
|
# coding: utf8
from utils.logger import Logger
import tsid as tsid
import pinocchio as pin
import argparse
import numpy as np
from mpctsid.Estimator import Estimator
from utils.viewerClient import viewerClient, NonBlockingViewerFromRobot
import os
import sys
sys.path.insert(0, './mpctsid')
SIMULATION = True
LOGGING = False
if SIMULATION:
from mpctsid.utils_mpc import PyBulletSimulator
else:
from pynput import keyboard
from solo12 import Solo12
from utils.qualisysClient import QualisysClient
DT = 0.002
key_pressed = False
def on_press(key):
"""Wait for a specific key press on the keyboard
Args:
key (keyboard.Key): the key we want to wait for
"""
global key_pressed
try:
if key == keyboard.Key.enter:
key_pressed = True
# Stop listener
return False
except AttributeError:
print('Unknown key {0} pressed'.format(key))
def put_on_the_floor(device, q_init):
"""Make the robot go to the default initial position and wait for the user
to press the Enter key to start the main control loop
Args:
device (robot wrapper): a wrapper to communicate with the robot
q_init (array): the default position of the robot
"""
global key_pressed
key_pressed = False
Kp_pos = 3.
Kd_pos = 0.01
imax = 3.0
pos = np.zeros(device.nb_motors)
for motor in range(device.nb_motors):
pos[motor] = q_init[device.motorToUrdf[motor]] * \
device.gearRatioSigned[motor]
listener = keyboard.Listener(on_press=on_press)
listener.start()
print("Put the robot on the floor and press Enter")
while not key_pressed:
device.UpdateMeasurment()
for motor in range(device.nb_motors):
ref = Kp_pos*(pos[motor] - device.hardware.GetMotor(motor).GetPosition() -
Kd_pos*device.hardware.GetMotor(motor).GetVelocity())
ref = min(imax, max(-imax, ref))
device.hardware.GetMotor(motor).SetCurrentReference(ref)
device.SendCommand(WaitEndOfCycle=True)
print("Start the motion.")
def mcapi_playback(name_interface):
"""Main function that calibrates the robot, get it into a default waiting position then launch
the main control loop once the user has pressed the Enter key
Args:
name_interface (string): name of the interface that is used to communicate with the robot
"""
if SIMULATION:
device = PyBulletSimulator()
qc = None
else:
device = Solo12(name_interface, dt=DT)
qc = QualisysClient(ip="140.93.16.160", body_id=0)
if LOGGING:
logger = Logger(device, qualisys=qc)
# Number of motors
nb_motors = device.nb_motors
q_viewer = np.array((7 + nb_motors) * [0., ])
# Gepetto-gui
v = viewerClient()
v.display(q_viewer)
# PyBullet GUI
enable_pyb_GUI = True
# Maximum duration of the demonstration
t_max = 300.0
# Default position after calibration
q_init = np.array([0, 0.8, -1.6, 0, 0.8, -1.6, 0, -0.8, 1.6, 0, -0.8, 1.6])
# Create Estimator object
estimator = Estimator(DT, np.int(t_max/DT))
# Set the paths where the urdf and srdf file of the robot are registered
modelPath = "/opt/openrobots/share/example-robot-data/robots"
urdf = modelPath + "/solo_description/robots/solo12.urdf"
vector = pin.StdVec_StdString()
vector.extend(item for item in modelPath)
# Create the robot wrapper from the urdf model (which has no free flyer) and add a free flyer
robot = tsid.RobotWrapper(urdf, vector, pin.JointModelFreeFlyer(), False)
model = robot.model()
# Creation of the Invverse Dynamics HQP problem using the robot
# accelerations (base + joints) and the contact forces
invdyn = tsid.InverseDynamicsFormulationAccForce("tsid", robot, False)
# Compute the problem data with a solver based on EiQuadProg
invdyn.computeProblemData(0.0, np.hstack(
(np.zeros(7), q_init)), np.zeros(18))
# Initiate communication with the device and calibrate encoders
if SIMULATION:
device.Init(calibrateEncoders=True, q_init=q_init, envID=0,
use_flat_plane=True, enable_pyb_GUI=enable_pyb_GUI, dt=DT)
else:
device.Init(calibrateEncoders=True, q_init=q_init)
# Wait for Enter input before starting the control loop
put_on_the_floor(device, q_init)
# CONTROL LOOP ***************************************************
t = 0.0
k = 0
while ((not device.hardware.IsTimeout()) and (t < t_max)):
device.UpdateMeasurment() # Retrieve data from IMU and Motion capture
# Run estimator with hind left leg touching the ground
estimator.run_filter(k, np.array(
[0, 0, 1, 0]), device, invdyn.data(), model)
# Zero desired torques
tau = np.zeros(12)
# Set desired torques for the actuators
device.SetDesiredJointTorque(tau)
# Call logger
if LOGGING:
logger.sample(device, qualisys=qc, estimator=estimator)
# Send command to the robot
device.SendCommand(WaitEndOfCycle=True)
if ((device.cpt % 100) == 0):
device.Print()
# Gepetto GUI
if k > 0:
pos = np.array(estimator.data.oMf[26].translation).ravel()
q_viewer[0:3] = np.array(
[-pos[0], -pos[1], estimator.FK_h]) # Position
q_viewer[3:7] = estimator.q_FK[3:7, 0] # Orientation
q_viewer[7:] = estimator.q_FK[7:, 0] # Encoders
v.display(q_viewer)
t += DT
k += 1
# ****************************************************************
# Whatever happened we send 0 torques to the motors.
device.SetDesiredJointTorque([0]*nb_motors)
device.SendCommand(WaitEndOfCycle=True)
if device.hardware.IsTimeout():
print("Masterboard timeout detected.")
print("Either the masterboard has been shut down or there has been a connection issue with the cable/wifi.")
# Shut down the interface between the computer and the master board
device.hardware.Stop()
# Save the logs of the Logger object
if LOGGING:
logger.saveAll()
if SIMULATION and enable_pyb_GUI:
# Disconnect the PyBullet server (also close the GUI)
device.Stop()
print("End of script")
quit()
def main():
"""Main function
"""
parser = argparse.ArgumentParser(
description='Playback trajectory to show the extent of solo12 workspace.')
parser.add_argument('-i',
'--interface',
required=True,
help='Name of the interface (use ifconfig in a terminal), for instance "enp1s0"')
mcapi_playback(parser.parse_args().interface)
if __name__ == "__main__":
main()
|
[
"odri@furano.laas.fr"
] |
odri@furano.laas.fr
|
2b87db4e14f1272da1ee35b3bc1503ec082556a5
|
d39d1a13750cfe8d4b117a22ce026a8e2d25c20e
|
/models.py
|
76e3c0d6dfc304c47882ea8aba47cd59ca536975
|
[] |
no_license
|
pocket-j/Movie_ticket_booking_system
|
bd4b441c80f29142bf64e82b29f6fa7df048504b
|
bcaf0447feb1dbf8af4de932e4e5f620c1320c2d
|
refs/heads/main
| 2023-07-12T19:39:24.319482
| 2021-08-19T21:16:03
| 2021-08-19T21:16:03
| 398,071,109
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,654
|
py
|
import sqlite3
class database:
def __init__(self):
self.conn = sqlite3.connect('movies.db')
self.create_database()
self.populate_database()
def __del__(self):
self.conn.commit()
self.conn.close()
def create_database(self):
self.create_table_users()
self.create_table_movies()
self.create_table_booking()
def populate_database(self):
self.insert_movies()
def create_table_users(self):
query = """
CREATE TABLE IF NOT EXISTS user (
id INTEGER PRIMARY KEY AUTOINCREMENT,
username TEXT NOT NULL,
password TEXT
);
"""
self.conn.execute(query)
def create_table_movies(self):
query = """DROP TABLE IF EXISTS Movies"""
self.conn.execute(query)
query = """
CREATE TABLE IF NOT EXISTS Movies (
movie_name TEXT ,
theatre_name TEXT,
location TEXT,
screen TEXT,
showtime TEXT,
available_seats text,
id INTEGER PRIMARY KEY AUTOINCREMENT
);
"""
self.conn.execute(query)
def create_table_booking(self):
query = """
CREATE TABLE IF NOT EXISTS booking (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_name TEXT,
movie_name TEXT,
theatre_name TEXT,
location_name TEXT,
showtime TEXT,
screen TEXT,
seat_number TEXT
);
"""
self.conn.execute(query)
def insert_movies(self):
query = """
INSERT INTO Movies (movie_name,theatre_name,location,screen, showtime,available_seats)
VALUES
('Shershaah', 'PVR', 'Bangalore', 'A', '13:30', '1,2,3,4,5'),
('Shershaah', 'Inox', 'Bangalore', 'A', '20:00', '1,2,3,4,5'),
('Shershaah', 'Gopalan', 'Bangalore', 'A', '21:30', '1,2,3,4,5'),
('Shershaah', 'ABC', 'Bangalore', 'A', '11:30', '1,2,3,4,5'),
('Mimi', 'Suresh', 'Bangalore', 'C', '21:00', '1,2,3,4,5'),
('Oxygen', 'Inox', 'Bangalore', 'A', '20:30', '11,12,13,14,15'),
('Nizhal', 'PVR', 'Hyderabad', 'C', '9:30', '1,2,3,4,5'),
('Pagglait', 'Inox', 'Hyderabad', 'B', '11:30', '11,12,13,14,15'),
('Master', 'PVR', 'Hyderabad', 'A', '13:30', '1,2,3,4,5'),
('Joji', 'Suresh', 'Bangalore', 'C', '21:00', '1,2,3,4,5'),
('Sherni', 'Inox', 'Bangalore', 'B', '20:30', '11,12,13,14,15'),
('Dia', 'PVR', 'Hyderabad', 'A', '9:30', '1,2,3,4,5'),
('Choked', 'Inox', 'Hyderabad', 'B', '11:30', '11,12,13,14,15');
"""
self.conn.execute(query)
class request:
def __init__(self):
self.conn = sqlite3.connect("movies.db")
self.conn.row_factory = sqlite3.Row
def __del__(self):
self.conn.commit()
self.conn.close()
def getMovieByLocation(self, location):
query = "select movie_name, theatre_name, location,showtime, screen, available_seats from Movies where " \
f"location = '{location}';"
result_set = self.conn.execute(query).fetchall()
result = [{column: row[i]
for i, column in enumerate(result_set[0].keys())}
for row in result_set]
return result
def getTheatreByMovies(self, movie_name):
query = "select movie_name, theatre_name, location, showtime, screen, available_seats" \
" from Movies where " \
f"movie_name = '{movie_name}';"
result_set = self.conn.execute(query).fetchall()
result = [{column: row[i]
for i, column in enumerate(result_set[0].keys())}
for row in result_set]
return result
def createUser(self, username, password):
query = f'insert into user ' \
f'(username, password) ' \
f'values ("{username}","{password}")'
self.conn.execute(query)
def validateUser(self, username, password):
query = "select * from user where " \
f"username = '{username}' and password = '{password}';"
result_set = self.conn.execute(query).fetchall()
if len(result_set) == 0:
return False
return True
def createEntry(self, location, movie_name, theatre, screen, seat, showtime, logged_in_user):
query = "INSERT INTO booking (user_name, movie_name, " \
"theatre_name, location_name, showtime, screen, seat_number)" \
" VALUES " \
f"('{logged_in_user}', '{movie_name}', '{theatre}', " \
f"'{location}', '{showtime}', '{screen}', '{seat}') ;"
self.conn.execute(query)
def updateMovies(self, location, movie_name, theatre, screen, seat, showtime):
query = "update Movies " \
f"set available_seats = '{seat}' where " \
f"location = '{location}' " \
f"and movie_name = '{movie_name}' " \
f"and theatre_name = '{theatre}' " \
f"and screen = '{screen}' " \
f"and showtime = '{showtime}' ;"
self.conn.execute(query)
def bookTicket(self, location, movie_name, theatre, screen, seat, showtime, logged_in_user):
query = "select movie_name, theatre_name, location, showtime, screen, available_seats" \
" from Movies where " \
f"location = '{location}' " \
f"and movie_name = '{movie_name}' " \
f"and theatre_name = '{theatre}' " \
f"and screen = '{screen}' "\
f"and showtime = '{showtime}' ;"
result_set = self.conn.execute(query).fetchall()
if len(result_set) != 1:
return False
s = result_set[0]["available_seats"]
li = s.split(",")
if seat not in li:
return False
self.createEntry(location, movie_name, theatre, screen, seat, showtime, logged_in_user)
li.remove(seat)
s = ','.join(li)
self.updateMovies(location, movie_name, theatre, screen, s, showtime)
return True
|
[
"noreply@github.com"
] |
pocket-j.noreply@github.com
|
4d694cebc77c9de5da773ceb56aa01a9e6ec5e73
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/leetcode-cn/0710.0_Random_Pick_with_Blacklist.py
|
3b7bb299f5cb18c059cfea8c345313621ac0f66e
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792
| 2023-08-22T16:43:36
| 2023-08-22T16:43:36
| 153,090,613
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,892
|
py
|
'''
66 / 68 个通过测试用例
状态:超出时间限制
'''
class Solution:
def __init__(self, n: int, blacklist: List[int]):
black = set(blacklist)
self.white = [i for i in range(n) if i not in black]
def pick(self) -> int:
return choice(self.white)
# Your Solution object will be instantiated and called as such:
# obj = Solution(n, blacklist)
# param_1 = obj.pick()
'''
64 / 68 个通过测试用例
状态:超出时间限制
'''
class Solution:
def __init__(self, n: int, blacklist: List[int]):
self.n = n
self.black = set(blacklist)
def pick(self) -> int:
while True:
x = randint(0, self.n - 1)
if x not in self.black:
return x
# Your Solution object will be instantiated and called as such:
# obj = Solution(n, blacklist)
# param_1 = obj.pick()
'''
n - k, k
i black, n-k-i white k-i black, i white
black -> white
hash table
执行用时:268 ms, 在所有 Python3 提交中击败了78.59% 的用户
内存消耗:26.1 MB, 在所有 Python3 提交中击败了24.58% 的用户
通过测试用例:68 / 68
'''
class Solution:
def __init__(self, n: int, blacklist: List[int]):
self.white_cnt = n - len(blacklist)
black = set(b for b in blacklist if b >= self.white_cnt)
self.b2w = dict()
white_idx = self.white_cnt
for b in blacklist:
if b < self.white_cnt:
while white_idx in black:
white_idx += 1
self.b2w[b] = white_idx
white_idx += 1 #!!!!
def pick(self) -> int:
x = randint(0, self.white_cnt - 1)
return self.b2w.get(x, x)
# Your Solution object will be instantiated and called as such:
# obj = Solution(n, blacklist)
# param_1 = obj.pick()
|
[
"laoxing201314@outlook.com"
] |
laoxing201314@outlook.com
|
55d5c6d576016a5fcd1aa217737c43f2d162ca7e
|
bbbf80a7db71031a2e8608b1dc100f7d35e5316a
|
/SHINE_LIB/Evolution/Selector.py
|
100989e0ff4acf9a5ff66ac83126f9da2e7a8fc4
|
[] |
no_license
|
ouaguenouni/Illuminated_Learning
|
82d6404879b332688254ec8af07599507f6e8899
|
99e06f9af773dadf56fe71b4185a5caff6445c43
|
refs/heads/master
| 2023-02-14T13:28:50.293463
| 2021-01-14T11:51:47
| 2021-01-14T11:51:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,182
|
py
|
from .Archive import *
from deap import algorithms
from deap import base
from deap import benchmarks
from deap import creator
from deap import tools
from .grid_management import *
class Selector():
def __init__(self, archive_class, **kwargs):
self.archive = archive_class(k=kwargs["nov_k"], lambda_ = kwargs["nov_lambda"])
self.grid = Grid(kwargs["grid_min_v"],kwargs["grid_max_v"],kwargs["dim_grid"])
def update_with_offspring(self, offspring):
for ind in offspring:
self.grid.add(ind)
self.archive.update_offspring(offspring)
pass
def compute_objectifs(self, population):
pass
def select(self, pq, mu):
return tools.selNSGA2(pq,mu)
def save_stats(self,resdir):
self.grid.dump(resdir)
self.grid.get_stats(resdir, 1000)
class Selector_FITNS(Selector):
def __init__(self, **kwargs):
super().__init__(Novelty_Archive_random, **kwargs)
def compute_objectifs(self, population):
self.archive.apply_novelty_estimation(population)
for i in population:
i.fitness.values = (i.fit, i.novelty)
class Selector_FIT(Selector):
def __init__(self, **kwargs):
super().__init__(Novelty_Archive_random, **kwargs)
def compute_objectifs(self, population):
self.archive.apply_novelty_estimation(population)
for i in population:
i.fitness.values = (i.fit, )
class Selector_NS(Selector):
def __init__(self, **kwargs):
super().__init__(Novelty_Archive_random, **kwargs)
def compute_objectifs(self, population):
self.archive.apply_novelty_estimation(population)
for i in population:
i.fitness.values = (i.novelty, )
class Selector_SHINE(Selector):
def __init__(self, **kwargs):
self.archive = Shine_Archive(600,600,alpha=kwargs["alpha"],beta=kwargs["beta"])
self.grid = Grid(kwargs["grid_min_v"],kwargs["grid_max_v"],kwargs["dim_grid"])
def update_with_offspring(self, offspring):
for ind in offspring:
self.grid.add(ind)
self.archive.update_offspring(offspring)
pass
def compute_objectifs(self, population):
for i in population:
n = self.archive.search(Behaviour_Descriptor(i))
if(len(n.val) > 0):
i.fitness.values = (self.archive.beta / (self.archive.beta*n.level + len(n.val) ),)
else:
i.fitness.values = (-np.inf,)
pass
def select(self, pq, mu):
return tools.selNSGA2(pq,mu)
class Selector_SHINE_DISC(Selector): #M.O
def __init__(self, **kwargs):
self.archive = Shine_Archive(600,600,alpha=kwargs["alpha"],beta=kwargs["beta"])
self.grid = Grid(kwargs["grid_min_v"],kwargs["grid_max_v"],kwargs["dim_grid"])
def update_with_offspring(self, offspring):
for ind in offspring:
self.grid.add(ind)
self.archive.update_offspring(offspring)
pass
def compute_objectifs(self, population):
for i in population:
n = self.archive.search(Behaviour_Descriptor(i))
if(n!= None and len(n.val) > 0):
i.fitness.values = (n.level ,len(n.val) )
else:
i.fitness.values = (np.inf,self.archive.beta,len(n.val))
pass
def select(self, pq, mu):
return tools.selNSGA2(pq,mu)
class Selector_SHINE_COL(Selector):
def __init__(self, **kwargs):
self.archive = Shine_Archive_COL(600,600,alpha=kwargs["alpha"],beta=kwargs["beta"])
self.grid = Grid(kwargs["grid_min_v"],kwargs["grid_max_v"],kwargs["dim_grid"])
def update_with_offspring(self, offspring):
for ind in offspring:
self.grid.add(ind)
self.archive.update_offspring(offspring)
pass
def compute_objectifs(self, population):
for i in population:
n = self.archive.search(Behaviour_Descriptor(i))
if(n!= None and len(n.val) > 0):
i.fitness.values = (n.level ,len(n.val) )
else:
i.fitness.values = (np.inf,self.archive.beta,len(n.val))
pass
def select(self, pq, mu):
return tools.selNSGA2(pq,mu)
class Selector_SHINE_PARETO(Selector):
def __init__(self, **kwargs):
self.archive = Shine_Archive_PARETO(600,600,alpha=kwargs["alpha"],beta=kwargs["beta"])
self.grid = Grid(kwargs["grid_min_v"],kwargs["grid_max_v"],kwargs["dim_grid"])
def update_with_offspring(self, offspring):
for ind in offspring:
self.grid.add(ind)
self.archive.update_offspring(offspring)
pass
def compute_objectifs(self, population):
for i in population:
n = self.archive.search(Behaviour_Descriptor(i))
if(n!= None and len(n.val) > 0):
i.fitness.values = (n.level ,len(n.val) )
else:
i.fitness.values = (np.inf,self.archive.beta,len(n.val))
pass
def select(self, pq, mu):
return tools.selNSGA2(pq,mu)
class Selector_MAPElites_FIT(Selector):
def __init__(self, **kwargs):
self.grid = Grid(kwargs["grid_min_v"],kwargs["grid_max_v"],kwargs["dim_grid"], comparator=self.compare)
def update_with_offspring(self, offspring):
for i in offspring:
self.grid.add(i)
pass
def compute_objectifs(self, population):
for i in population:
i.fitness.values = (i.fit, )
pass
def compare(self,ind1,ind2):
return ind1.fit > ind2.fit
def select(self, pq, mu):
self.update_with_offspring(pq)
inds = sorted(self.grid.content.values(), key = lambda x:(x.fitness.values[0]), reverse=True)[:mu] #Descendant
return inds
class Selector_MAPElites_COL(Selector):
def __init__(self, **kwargs):
self.grid = Grid(kwargs["grid_min_v"],kwargs["grid_max_v"],kwargs["dim_grid"])
def update_with_offspring(self, offspring):
for i in offspring:
self.grid.add(i)
pass
def compute_objectifs(self, population):
for i in population:
i.fitness.values = (i.fit, )
pass
def select(self, pq, mu):
self.update_with_offspring(pq)
inds = sorted(self.grid.content.values(), key = lambda x:(x.fitness.values[0]), reverse=True)[:mu] #Ascendant
return inds
class Selector_MAPElites_POL(Selector):
def __init__(self, **kwargs):
self.grid = Grid_POL(kwargs["grid_min_v"],kwargs["grid_max_v"],kwargs["dim_grid"])
def update_with_offspring(self, offspring):
for i in offspring:
self.grid.add(i)
pass
def compute_objectifs(self, population):
for i in population:
i.fitness.values = (i.fit, )
pass
def select(self, pq, mu):
self.update_with_offspring(pq)
inds = sorted(self.grid.content.values(), key = lambda x:(x.fitness.values[0]), reverse=True)[:mu+1] #Descendant
#print("Selected individuals : ",[(self.grid.get_grid_coord(ind),ind.fit) for ind in inds])
return inds
|
[
"ouaguenouni.hachemi@gmail.com"
] |
ouaguenouni.hachemi@gmail.com
|
a06a2166be08f9c98592869160cbf1bc3e7f9086
|
4ce093f592839370ec677f6987d6d758cf3fd862
|
/junk_trainer.py
|
73c6dbb429468de1ff45b189d87c47bdab4a7d5d
|
[] |
no_license
|
KonstantinPakulev/cohortney
|
935c3db916197275dc1be9fa768f24b591251351
|
67973c7b60598572e4c792efca0623e712777ca6
|
refs/heads/main
| 2023-05-01T12:25:21.165347
| 2021-05-14T01:43:04
| 2021-05-14T01:43:04
| 367,213,053
| 0
| 0
| null | 2021-05-14T01:02:15
| 2021-05-14T01:02:15
| null |
UTF-8
|
Python
| false
| false
| 37,248
|
py
|
"""
This file contains trainers, that conduct training of the model according to considered methods
"""
import time
import numpy as np
from src.utils.metrics import purity, info_score
import torch
import math
from sklearn.cluster import KMeans
from src.networks.lstm_pp import LSTMMultiplePointProcesses
class TrainerSingle:
"""
Trainer for single point process model
"""
def __init__(self, model, optimizer, criterion, x, val,
max_epochs=100, batch_size=30, generator_model=None):
"""
input:
model - torch.nn.Module, model to train
optimizer - optimizer to train model
criterion - loss to optimize, takes batch, lambdas, dts
x - torch.Tensor, training data
val - torch.Tensor, validation data
max_epochs - int, number of epochs for sgd training
batch_size - int, batch size
generator_model - torch.nn.Module, true model, that was used for generation or None
model parameters:
the same as inputs
"""
self.N = x.shape[0]
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.x = x
self.val = val
self.max_epochs = max_epochs
self.batch_size = batch_size
self.generator_model = generator_model
def train_epoch(self):
"""
Conducts one epoch training
input:
None
output:
log_likelihood - list, list of all losses obtained during batch iterations
mse - list, list of mean squared errors between obtained lambdas and lambdas of true model,
if true model is not provided, then the list is empty
val_ll - torch.Tensor, size = (1), log likelihood on validation dataset
val_mse - float, mean squared error between obtained lambdas and lambdas of true model on validation,
if true model is not provided, then None
"""
indices = np.random.permutation(self.N)
self.model.train()
# initializing outputs
log_likelihood = []
mse = []
val_mse = None
# iterations over minibatches
for iteration, start in enumerate(range(0, self.N - self.batch_size, self.batch_size)):
batch_ids = indices[start:start + self.batch_size]
batch = self.x[batch_ids]
# optimization
self.optimizer.zero_grad()
lambdas = self.model(batch)
loss = self.criterion(batch, lambdas, batch[:, 0, 0])
loss.backward()
self.optimizer.step()
# saving results
log_likelihood.append(loss.item())
if self.generator_model:
true_lambdas = self.generator_model(batch)
mse.append(np.var((lambdas.detach().numpy() - true_lambdas.detach().numpy())))
# validation
self.model.eval()
lambdas = self.model(self.val)
val_ll = self.criterion(self.val, lambdas, self.val[:, 0, 0])
if self.generator_model:
true_lambdas = self.generator_model(self.val)
val_mse = np.var((lambdas.detach().numpy() - true_lambdas.detach().numpy()))
return log_likelihood, mse, val_ll, val_mse
def train(self):
"""
Conducts training
input:
None
output:
losses - list, list of all mean log likelihoods obtained during training on each epoch
val_losses - list, list of all log likelihoods obtained during training on each epoch on validation
mses - list, list of all mean squared errors between obtained lambdas and true lambdas on each epoch
val_mses - list, the same but on validation
"""
self.generator_model.eval()
# initializing outputs
losses = []
val_losses = []
mses = []
val_mses = []
# iterations over epochs
for epoch in range(self.max_epochs):
ll, mse, val_ll, val_mse = self.train_epoch()
losses.append(np.mean(ll))
val_losses.append(val_ll)
mses.append(np.mean(mse))
val_mses.append(val_mse)
# logs
if len(mse):
print('On epoch {}/{}, ll = {}, mse = {}, val_ll = {}, val_mse = {}'
.format(epoch, self.max_epochs,
np.mean(ll), np.mean(mse), val_ll, val_mse))
else:
print('On epoch {}/{}, ll = {}, val_ll = {}'.format(epoch, self.max_epochs,
np.mean(ll), val_ll))
return losses, val_losses, mses, val_mses
class TrainerClusterwise:
"""
Trainer for multiple point processes clustering
"""
def __init__(self, model, optimizer, device, data, n_clusters, target=None,
epsilon=1e-8, max_epoch=50, max_m_step_epoch=50, weight_decay=1e-5, lr=1e-3, lr_update_tol=25,
lr_update_param=0.5, random_walking_max_epoch=40, true_clusters=5, upper_bound_clusters=10,
min_lr=None, updated_lr=None, batch_size=150, verbose=False, best_model_path=None,
max_computing_size=None, full_purity=True):
"""
inputs:
model - torch.nn.Module, model to train
optimizer - optimizer used for training
device - device, that is used for training
data - torch.Tensor, size = (N, sequence length, number of classes + 1),
partitions of the point processes
n_clusters - int, initial number of different point processes
target - torch.Tensor, size = (N), true labels or None
epsilon - float, used for log-s regularization log(x) -> log(x + epsilon)
max_epoch - int, number of epochs of EM algorithm
max_m_step_epoch - float, number of epochs of neural net training on M-step
lr_update_tol - int, tolerance before updating learning rate
lr_update_param - float, learning rate multiplier
random_walking_max_epoch - int, number of epochs when random walking of the number of clusters
is available
true_clusters - int, true number of clusters
upper_bound_clusters - int, upper bound of the number of clusters
min_lr - float - minimal lr value, when achieved lr is updated to updated_lr and update params set
to default
updated_lr - float, lr after achieving min_lr
batch_size - int, batch size during neural net training
verbose - bool, if True, provides info during training
best_model_path - str, where the best model according to loss should be saved or None
max_computing_size - int, if not None, then constraints gamma size (one EM-algorithm step)
fool_purity - bool, if True, purity is computed on all dataset
parameters:
N - int, number of data points
model - torch.nn.Module, model to train
optimizer - optimizer used for training
device - device used for training
X - torch.Tensor, size = (N, sequence length, number of classes + 1),
partitions of the point processes
target - torch.Tensor, size = (N), true labels or None
n_clusters - int, number of different point processes
max_epoch - int, number of epochs of EM algorithm
lr_update_tol - int, tolerance before updating learning rate
update_checker - int, checker, that is compared to tolerance, increased by one every time loss is
greater then on the previous iteration
lr_update_param - float, learning rate multiplier
random_walking_max_epoch - int, number of epochs when random walking of the number of clusters
is available
true_clusters - int, true number of clusters
upper_bound_clusters - int, upper bound of the number of clusters
min_lr - float - minimal lr value, when achieved lr is updated to updated_lr and update params set
to default
updated_lr - float, lr after achieving min_lr
epsilon - float, used for log-s regularization log(x) -> log(x + epsilon)
prev_loss - float, loss on previous iteration, used for updating update_checker
batch_size - int, batch size during neural net training
pi - torch.Tensor, size = (n_clusters), mixing coefficients, here are fixed and equal 1/n_clusters
gamma - torch.Tensor, size = (n_clusters, number of data points), probabilities p(k|x_n)
best_model_path - str, where the best model according to loss should be saved or None
prev_loss_model - float, loss obtained for the best model
max_computing_size - int, if not None, then constraints gamma size (one EM-algorithm step)
fool_purity - bool, if True, purity is computed on all dataset
"""
self.N = data.shape[0]
self.model = model
self.optimizer = optimizer
self.device = device
if max_computing_size is None:
self.X = data.to(device)
if type(target):
self.target = target.to(device)
else:
self.target = None
else:
self.X = data
if type(target):
self.target = target
else:
self.target = None
self.n_clusters = n_clusters
self.max_epoch = max_epoch
self.weight_decay = weight_decay
self.lr = lr
self.lr_update_tol = lr_update_tol
self.min_lr = min_lr
self.updated_lr = updated_lr
self.update_checker = -1
self.epsilon = epsilon
self.lr_update_param = lr_update_param
self.prev_loss = 0
self.max_m_step_epoch = max_m_step_epoch
self.batch_size = batch_size
self.pi = (torch.ones(n_clusters) / n_clusters).to(device)
if max_computing_size is None:
self.gamma = torch.zeros(n_clusters, self.N).to(device)
else:
self.gamma = torch.zeros(n_clusters, max_computing_size).to(device)
self.max_computing_size = max_computing_size
self.verbose = verbose
self.best_model_path = best_model_path
self.prev_loss_model = 0
self.full_purity = full_purity
self.start_time = time.time()
self.random_walking_max_epoch = random_walking_max_epoch
self.true_clusters = true_clusters
self.upper_bound_clusters = upper_bound_clusters
def loss(self, partitions, lambdas, gamma):
"""
Computes loss
inputs:
partitions - torch.Tensor, size = (batch_size, seq_len, number of classes + 1)
lambdas - torch.Tensor, size = (batch_size, seq_len, number of classes), model output
gamma - torch.Tensor, size = (n_clusters, batch_size), probabilities p(k|x_n)
outputs:
loss - torch.Tensor, size = (1), sum of output log likelihood weighted with convoluted gamma
and prior distribution log likelihood
"""
# computing poisson parameters
dts = partitions[:, 0, 0].to(self.device)
dts = dts[None, :, None, None].to(self.device)
tmp = lambdas * dts
# preparing partitions
p = partitions[None, :, :, 1:].to(self.device)
# computing log likelihoods of every timestamp
tmp1 = tmp - p * torch.log(tmp + self.epsilon) + torch.lgamma(p + 1)
# computing log likelihoods of data points
tmp2 = torch.sum(tmp1, dim=(2, 3))
# computing loss
tmp3 = gamma.to(self.device) * tmp2
loss = torch.sum(tmp3)
return loss
def compute_gamma(self, lambdas, x=None, size=None, device='cpu'):
"""
Computes gamma
inputs:
lambdas - torch.Tensor, size = (batch_size or N, seq_len, number of classes), model output
x - torch.Tensor, size = (batch_size or N, seq_len, number of classes + 1),
data, that was processed or None
size - tuple, gamma size or None
device - device to compute
outputs:
gamma - torch.Tensor, size = (n_clusters, batch_size or N), probabilities p(k|x_n)
"""
# preparing gamma template
if size is None:
gamma = torch.zeros_like(self.gamma)
else:
gamma = torch.zeros(size)
# preparing delta times and partitions for computing gamma
if x is None:
dts = self.X[:, 0, 0].to(device)
dts = dts[None, :, None, None].to(device)
partitions = self.X[:, :, 1:].to(device)
partitions = partitions[None, :, :, :].to(device)
else:
dts = x[:, 0, 0].to(device)
dts = dts[None, :, None, None].to(device)
partitions = x[:, :, 1:].to(device)
partitions = partitions[None, :, :, :].to(device)
# iterations over clusters
for k in range(self.n_clusters):
# lambdas of current cluster
lambdas_k = lambdas[k, :, :, :]
lambdas_k = lambdas_k[None, :, :, :]
# weighs in sum
w = self.pi / self.pi[k]
w = w[:, None].to(device)
# computing gamma for k-th cluster
tmp_sub = (lambdas.to(device) - lambdas_k.to(device)) * dts.to(device)
tmp = torch.sum(- tmp_sub + partitions * (
torch.log(lambdas.to(device) + self.epsilon) - torch.log(lambdas_k.to(device) + self.epsilon)),
dim=(2, 3))
tmp = 1 / (torch.sum(w * torch.exp(tmp), dim=0))
# resolving nans
tmp[tmp != tmp] = 0
gamma[k, :] = tmp
return gamma
def get_gamma_stats(self):
"""
Obtains gamma (probabilities) stats
inputs:
None
outputs:
stats - dict:
stats['min'] - minimal probability per cluster
stats['max'] - maximal probability per cluster
stats['min_main'] - minimal probability of predicted cluster
stats['max_main'] - maximal probability of predicted cluster
stats['mean_main'] - mean probability of predicted cluster
stats['std_main'] - std of probabilities of predicted cluster
stats['median_main'] - median of probabilities of predicted cluster
"""
stats = dict()
# computing stats
stats['min'] = torch.min(self.gamma, dim=1).values
stats['max'] = torch.max(self.gamma, dim=1).values
stats['min_main'] = torch.min(torch.max(self.gamma, dim=0).values)
stats['max_main'] = torch.max(torch.max(self.gamma, dim=0).values)
stats['mean_main'] = torch.mean(torch.max(self.gamma, dim=0).values)
stats['std_main'] = torch.std(torch.max(self.gamma, dim=0).values)
stats['median_main'] = torch.median(torch.max(self.gamma, dim=0).values)
return stats
def get_model_stats(self):
"""
Obtains model parameters stats
inputs:
None
outputs:
stats - list of dicts:
stats[i]['min'] - minimal value in weighs of i-th parameter
stats[i]['max'] - maximal value in weighs of i-th parameter
stats[i]['mean'] - mean value of weighs of i-th parameter
stats[i]['std'] - std of values of weighs of i-th parameter
stats[i]['median'] - median of values of weighs of i-th parameter
"""
stats = []
# iterations over model parameters
for param in self.model.parameters():
sub_stats = dict()
sub_stats['min'] = torch.min(param.data)
sub_stats['max'] = torch.max(param.data)
sub_stats['mean'] = torch.mean(param.data)
sub_stats['std'] = torch.std(param.data)
sub_stats['median'] = torch.median(param.data)
stats.append(sub_stats)
return stats
@staticmethod
def get_lambda_stats(lambdas):
"""
Obtains lambda stats
inputs:
lambdas - torch.Tensor, size = (batch_size or N, seq_len, number of classes), model output
outputs;
stats - dict:
stats['min'] - minimal value of lambdas in cluster for each type of event
stats['max'] - maximal value of lambdas in cluster for each type of event
stats['mean'] - mean value of lambdas in each cluster for each type of event
stats['std'] - std of values of lambdas in each cluster for each type of event
"""
stats = dict()
# computing stats
stats['min'] = torch.min(lambdas, dim=1).values
stats['min'] = torch.min(stats['min'], dim=1).values
stats['max'] = torch.max(lambdas, dim=1).values
stats['max'] = torch.max(stats['max'], dim=1).values
stats['mean'] = torch.mean(lambdas, dim=(1, 2))
stats['std'] = torch.std(lambdas, dim=(1, 2))
return stats
def e_step(self, ids=None):
"""
Conducts E-step of EM-algorithms, saves the result to self.gamma
inputs:
None
outputs:
None
"""
self.model.eval()
with torch.no_grad():
if ids is None:
lambdas = self.model(self.X)
self.gamma = self.compute_gamma(lambdas)
else:
lambdas = self.model(self.X[ids].to(self.device))
self.gamma = self.compute_gamma(lambdas, x=self.X[ids], size=(self.n_clusters, len(ids)))
def train_epoch(self, big_batch=None):
"""
Conducts one epoch of Neural Net training
inputs:
None
outputs:
log_likelihood - list of losses obtained during iterations over minibatches
"""
# preparing random indices
if self.max_computing_size is None:
indices = np.random.permutation(self.N)
else:
indices = np.random.permutation(self.max_computing_size)
# setting model to training and preparing output template
self.model.train()
log_likelihood = []
# iterations over minibatches
for iteration, start in enumerate(range(0, (self.N if self.max_computing_size is None
else self.max_computing_size) - self.batch_size, self.batch_size)):
# preparing batch
batch_ids = indices[start:start + self.batch_size]
if self.max_computing_size is None:
batch = self.X[batch_ids].to(self.device)
else:
batch = big_batch[batch_ids].to(self.device)
# one step of training
self.optimizer.zero_grad()
lambdas = self.model(batch).to(self.device)
loss = self.loss(batch, lambdas, self.gamma[:, batch_ids])
loss.backward()
self.optimizer.step()
# saving results
log_likelihood.append(loss.item())
if np.mean(log_likelihood) > self.prev_loss:
self.update_checker += 1
if self.update_checker >= self.lr_update_tol:
self.update_checker = 0
lr = 0
for param_group in self.optimizer.param_groups:
param_group['lr'] *= self.lr_update_param
lr = param_group['lr']
if self.min_lr is not None:
if lr < self.min_lr:
param_group['lr'] = self.updated_lr
if self.min_lr is not None:
if lr < self.min_lr:
lr = self.updated_lr
self.lr = lr
# saving previous loss
self.prev_loss = np.mean(log_likelihood)
return log_likelihood
def m_step(self, big_batch=None, ids=None):
"""
Conducts M-step of EM-algorithm
inputs:
None
outputs:
log_likelihood_curve - list of floats, losses, obtained during iterations over M-step epochs
and minibatches
m_step_results - [log_likelihood, purity], mean value of log_likelihood on the last epoch
and purity on the last epoch
cluster_partitions - float, the minimal value of cluster partition
"""
# preparing output template
log_likelihood_curve = []
ll = []
# iterations over M-step epochs
for epoch in range(int(self.max_m_step_epoch)):
# one epoch training
ll = self.train_epoch(big_batch=big_batch)
log_likelihood_curve += ll
# checking for failure
if np.mean(ll) != np.mean(ll):
return None, None, None
# logs
if epoch % 10 == 0 and self.verbose:
print('Loss on sub_epoch {}/{}: {}'.format(epoch + 1,
self.max_m_step_epoch,
np.mean(ll)))
# evaluating model
self.model.eval()
with torch.no_grad():
if (self.max_computing_size is None) or self.full_purity:
lambdas = self.model(self.X.to(self.device))
gamma = self.compute_gamma(lambdas, x=self.X, size=(self.n_clusters, self.N))
loss = self.loss(self.X.to(self.device), lambdas.to(self.device), gamma.to(self.device)).item()
else:
lambdas = self.model(big_batch)
gamma = self.compute_gamma(lambdas, x=big_batch, size=(self.n_clusters, self.max_computing_size))
loss = self.loss(big_batch.to(self.device), lambdas.to(self.device), gamma.to(self.device)).item()
clusters = torch.argmax(gamma, dim=0)
if self.verbose:
print('Cluster partition')
cluster_partition = 2
for i in np.unique(clusters.cpu()):
if self.verbose:
print('Cluster', i, ': ', np.sum((clusters.cpu() == i).cpu().numpy()) / len(clusters),
' with pi = ', self.pi[i])
cluster_partition = min(cluster_partition, np.sum((clusters.cpu() == i).cpu().numpy()) / len(clusters))
if type(self.target):
pur = purity(clusters.to('cpu'),
self.target[ids] if (ids is not None) and (not self.full_purity) else self.target.to(
'cpu'))
info = info_score(clusters.to('cpu'),
self.target[ids] if (ids is not None) and (not self.full_purity) else \
self.target.to('cpu'), len(np.unique(self.target.to('cpu'))))
else:
pur = -1
info = -1
return log_likelihood_curve, [loss, pur, info], cluster_partition
def compute_ll(self, big_batch, ids, to_print):
if (self.max_computing_size is None) or self.full_purity:
lambdas = self.model(self.X.to(self.device))
gamma = self.compute_gamma(lambdas, x=self.X, size=(self.n_clusters, self.N))
ll = self.loss(self.X.to(self.device), lambdas.to(self.device), gamma.to(self.device)).item()
else:
lambdas = self.model(big_batch)
gamma = self.compute_gamma(lambdas, x=big_batch, size=(self.n_clusters, self.max_computing_size))
ll = self.loss(big_batch.to(self.device), lambdas.to(self.device), gamma.to(self.device)).item()
clusters = torch.argmax(gamma, dim=0)
if self.verbose:
print('Cluster partition')
cluster_partition = 2
for i in np.unique(clusters.cpu()):
if self.verbose:
print('Cluster', i, ': ', np.sum((clusters.cpu() == i).cpu().numpy()) / len(clusters),
' with pi = ', self.pi[i])
cluster_partition = min(cluster_partition, np.sum((clusters.cpu() == i).cpu().numpy()) / len(clusters))
if type(self.target):
pur = purity(clusters.to('cpu'),
self.target[ids] if (ids is not None) and (not self.full_purity) else self.target.to('cpu'))
else:
pur = None
if self.verbose:
print('{} loss = {}, purity = {}'.format(to_print, ll, pur))
return ll
def train(self):
"""
Conducts training
inputs:
None
outputs:
losses - list, list of losses obtained during training
purities - list of [loss, purity, cluster_partition]
cluster_part - the last cluster partition
all_stats - all_stats on every EM-algorithm epoch
"""
self.start_time = time.time()
# preparing output templates
losses = []
purities = []
cluster_part = 0
all_stats = []
# iterations over EM-algorithm epochs
for epoch in range(self.max_epoch):
if self.verbose:
print('Beginning e-step')
# preparing big_batch if needed
if self.max_computing_size is not None:
ids = np.random.permutation(self.N)[:self.max_computing_size]
big_batch = self.X[ids].to(self.device)
else:
ids = None
big_batch = None
# E-step
self.e_step(ids=ids)
# Random model results
if epoch == 0:
if (ids is None) or (not self.full_purity):
clusters = torch.argmax(self.gamma, dim=0)
else:
clusters = torch.argmax(self.compute_gamma(self.model(self.X.to(self.device)), x=self.X,
size=(self.n_clusters, self.N)), dim=0)
if self.verbose:
print('Cluster partition')
for i in np.unique(clusters.cpu()):
print('Cluster', i, ': ', np.sum((clusters.cpu() == i).cpu().numpy()) / len(clusters),
' with pi = ', self.pi[i])
if type(self.target):
random_pur = purity(clusters,
self.target[ids] if (ids is not None) and (
not self.full_purity) else self.target)
else:
random_pur = None
if self.verbose:
print('Purity for random model: {}'.format(random_pur))
# saving stats
all_stats.append(dict())
all_stats[-1]['gamma'] = self.get_gamma_stats()
all_stats[-1]['model'] = self.get_model_stats()
if big_batch is not None:
lambdas = self.model(big_batch)
else:
lambdas = self.model(self.X)
all_stats[-1]['lambdas'] = self.get_lambda_stats(lambdas)
# M-step
if self.verbose:
print('Beginning m-step')
for param_group in self.optimizer.param_groups:
lr = param_group['lr']
break
print('lr =', lr)
print('lr_update_param =', self.lr_update_param)
ll, ll_pur, cluster_part = self.m_step(big_batch=big_batch, ids=ids)
# failure check
if ll is None:
return None, None, None, None
# saving results
losses += ll
t = time.time()
time_from_start = t - self.start_time
purities.append(ll_pur[:2] + [cluster_part, self.n_clusters, time_from_start])
if self.verbose:
print('On epoch {}/{} loss = {}, purity = {}, info = {}'.format(epoch + 1, self.max_epoch,
ll_pur[0], ll_pur[1], ll_pur[2]))
print('Time from start = {}'.format(time_from_start))
# saving model
if self.best_model_path and (ll_pur[0] < self.prev_loss_model or epoch == 0):
if self.verbose:
print('Saving model')
torch.save(self.model, self.best_model_path)
self.prev_loss_model = ll_pur[0]
# computing stats
self.model.eval()
with torch.no_grad():
all_stats.append(dict())
all_stats[-1]['gamma'] = self.get_gamma_stats()
all_stats[-1]['model'] = self.get_model_stats()
if big_batch is not None:
lambdas = self.model(big_batch)
else:
lambdas = self.model(self.X)
all_stats[-1]['lambdas'] = self.get_lambda_stats(lambdas)
if epoch > self.random_walking_max_epoch and self.n_clusters > self.true_clusters:
enforce = True
else:
enforce = False
# updating number of clusters
if epoch <= self.random_walking_max_epoch or enforce:
if ((torch.rand(1) > 0.5)[
0] or self.n_clusters == 1) and self.n_clusters < self.upper_bound_clusters and not enforce:
split = True
else:
split = False
torch.save(self.model, 'tmp.pt')
pre_ll = float(self.compute_ll(big_batch, ids, 'Before:'))
if split:
if self.verbose:
print('Splitting')
for cluster in range(self.n_clusters):
self.model.to('cpu')
self.model.eval()
with torch.no_grad():
self.model.split_cluster(cluster, 'cpu')
self.n_clusters += 1
self.model.to(self.device)
self.pi = torch.ones(self.n_clusters) / self.n_clusters
post_ll = float(self.compute_ll(big_batch, ids, 'After splitting {} cluster:'.format(cluster)))
remain_prob = min(1, math.exp(min(- post_ll + pre_ll, math.log(math.e))))
if self.verbose:
print('Remain probability: {}'.format(remain_prob))
if (torch.rand(1) > remain_prob)[0]:
if self.verbose:
print('Loading model')
self.model = torch.load('tmp.pt')
self.n_clusters -= 1
self.pi = torch.ones(self.n_clusters) / self.n_clusters
else:
enforce = False
break
else:
if enforce:
best_loss_enf = 1e+9
if (torch.rand(1) > 0.5)[0]:
merge = True
else:
merge = False
if merge and not enforce:
if self.verbose:
print('Merging')
cluster_0 = int(torch.randint(self.n_clusters, size=(1,))[0])
for cluster_1 in range(self.n_clusters):
if cluster_1 == cluster_0:
continue
self.model.to('cpu')
self.model.eval()
with torch.no_grad():
self.model.merge_clusters(cluster_0, cluster_1, 'cpu')
self.n_clusters -= 1
self.pi = torch.ones(self.n_clusters) / self.n_clusters
self.model.to(self.device)
post_ll = float(self.compute_ll(big_batch, ids,
'After merging {} and {} clusters:'.format(cluster_0,
cluster_1)))
remain_prob = min(1, math.exp(min(- post_ll + pre_ll, math.log(math.e))))
if self.verbose:
print('Remain probability: {}'.format(remain_prob))
if (torch.rand(1) > remain_prob)[0]:
if self.verbose:
print('Loading model')
self.model = torch.load('tmp.pt')
self.n_clusters += 1
self.pi = torch.ones(self.n_clusters) / self.n_clusters
else:
break
else:
if self.verbose:
print('Deleting')
for cluster in range(self.n_clusters):
self.model.to('cpu')
self.model.eval()
with torch.no_grad():
self.model.delete_cluster(cluster, 'cpu')
self.n_clusters -= 1
self.pi = torch.ones(self.n_clusters) / self.n_clusters
self.model.to(self.device)
post_ll = float(
self.compute_ll(big_batch, ids, 'After deleting {} cluster:'.format(cluster)))
remain_prob = min(1, math.exp(min(- post_ll + pre_ll, math.log(math.e))))
if self.verbose:
print('Remain probability: {}'.format(remain_prob))
if (torch.rand(1) > remain_prob)[0]:
if enforce:
if post_ll < best_loss_enf:
if self.verbose:
print('Saving enforced model')
best_loss_enf = post_ll
torch.save(self.model, 'best_tmp.pt')
if self.verbose:
print('Loading model')
self.model = torch.load('tmp.pt')
self.n_clusters += 1
self.pi = torch.ones(self.n_clusters) / self.n_clusters
else:
break
if enforce:
self.model = torch.load('best_tmp.pt')
self.n_clusters -= 1
self.pi = torch.ones(self.n_clusters) / self.n_clusters
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
if self.max_computing_size is None:
self.gamma = torch.zeros(self.n_clusters, self.N).to(self.device)
else:
self.gamma = torch.zeros(self.n_clusters, self.max_computing_size).to(self.device)
return losses, purities, cluster_part, all_stats
|
[
"noreply@github.com"
] |
KonstantinPakulev.noreply@github.com
|
fe0d86c917fe8a4b58fe67ae395e3a0c4e53093e
|
2d566fd55879a07940ed04189982932b37754349
|
/portal_inbox/controllers/controllers.py
|
034b80f9f75bc11cfe7812ebec0180660ff7f0d5
|
[] |
no_license
|
ETharwat/fogits
|
e3ab23c1ebc766e2094423552b08b0f986ebf42f
|
e96aa472cc272fe05b6dc1bd768db270ba753666
|
refs/heads/main
| 2023-04-14T05:32:58.513012
| 2021-04-09T23:53:14
| 2021-04-09T23:53:14
| 356,424,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,084
|
py
|
# -*- coding: utf-8 -*-
from odoo import http, _, fields
from odoo.http import request
from odoo.addons.portal.controllers.portal import CustomerPortal
class MessagesPortal(CustomerPortal):
def _prepare_portal_layout_values(self):
values = super(MessagesPortal, self)._prepare_portal_layout_values()
current_user = request.env.user.id
current_student = request.env['op.student'].sudo().search([('user_id', '=', current_user)])
inbox = request.env['portal.sent'].sudo().search(
[('student_id', 'in', current_student.id), ('state', '=', 'sent'),('read_by_student', '!=', True)])
values['message_count'] = len(inbox)
return values
class PortalInbox(http.Controller):
@http.route(['/my/incoming'], type='http', auth="user", website=True)
def portal_incoming(self, **kw):
current_user = request.env.user.id
current_student = request.env['op.student'].sudo().search([('user_id', '=', current_user)])
inbox = request.env['portal.sent'].sudo().search([('student_id', 'in', current_student.id),
('state', '=', 'sent')])
values = {
'student': current_student,
'inbox': inbox,
}
return request.render("portal_inbox.portal_incoming_messages", values)
@http.route(['/my/incoming/<int:message_id>'], type='http', auth="public", website=True)
def portal_my_incoming(self, message_id, **kw):
current_student_assignment = request.env['portal.sent'].sudo().search([('id', '=', message_id)])
values = {
'message': current_student_assignment
}
request.env['portal.sent'].sudo().search([('id', '=', message_id)]).write({'read_by_student':True})
return request.render("portal_inbox.portal_incoming_details", values)
class PortalOutgoing(http.Controller):
@http.route(['/my/outgoing'], type='http', auth="user", website=True)
def portal_outgoing(self, **kw):
current_user = request.env.user.id
current_student = request.env['op.student'].sudo().search([('user_id', '=', current_user)])
inbox = request.env['portal.inbox'].sudo().search([('student_id', '=', current_student.id)])
teachers = request.env['op.faculty'].sudo().search([])
values = {
'inbox': inbox,
'teachers':teachers,
}
return request.render("portal_inbox.portal_outgoing_messages", values)
@http.route(['/my/outgoing/<int:message_id>'], type='http', auth="public", website=True)
def portal_my_outgoing(self, message_id, access_token=None, report_type=None, download=False, **kw):
current_message = request.env['portal.inbox'].sudo().sudo().search([('id', '=', message_id)])
values = {
'message': current_message
}
return request.render("portal_inbox.portal_outgoing_details", values)
@http.route(['/NewMessage'], type='http', auth="user", website=True)
def portal_new_outgoing(self, **kw):
current_user = request.env.user.id
current_student = request.env['op.student'].sudo().search([('user_id', '=', current_user)])
teachers = request.env['op.faculty'].sudo().search([])
values = {
'student': current_student,
'teachers':teachers,
}
return request.render("portal_inbox.portal_new_outgoing_messages", values)
@http.route(['/MessageSent'], type='http', auth="user", methods=['POST'], website=True)
def portal_sent_outgoing(self, **kw):
current_user = request.env.user.id
current_student = request.env['op.student'].sudo().search([('user_id', '=', current_user)])
request.env['portal.inbox'].sudo().create({'name':kw['subject'],
'student_id':current_student.id,
'teacher_id':kw['teachers'],
'message':kw['message']})
return request.redirect('/my/outgoing')
|
[
"eslam.saber@peerless-tech.com"
] |
eslam.saber@peerless-tech.com
|
e5ca7c47c8ee2910e8d577565d2345ac0543ef91
|
ab0ececf2fa5dac3996f5e3201e464a2933bbeb0
|
/remainder/asgi.py
|
ff1ab612eff3185a46f015b16c7de7c60685033e
|
[] |
no_license
|
dennisparathanathu/Reminder
|
9546dae608490dcb3a054d4821e10ccb71b4af70
|
9768bcfb3d2ebcc9d58321885fa89b71ba81c6bf
|
refs/heads/master
| 2023-08-01T06:44:05.993453
| 2020-08-02T18:18:37
| 2020-08-02T18:18:37
| 276,581,524
| 0
| 0
| null | 2021-09-22T19:32:49
| 2020-07-02T07:43:13
|
Python
|
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
ASGI config for remainder project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'remainder.settings')
application = get_asgi_application()
|
[
"dennisparathanathu@gmail.com"
] |
dennisparathanathu@gmail.com
|
96e7f043698752f1e7cf913ff817a44ad45d2be3
|
9255876d33cefedbd7d60fe6528793ba262fc5ca
|
/publicfun/restartapp.py
|
964b2ca965fac969e3e2719bd1202ee23df7fd8f
|
[] |
no_license
|
xuguojun1989/studentautocodeunit1
|
6b041529ee39f9d64abbea6074f310a95c59e5e0
|
bb226026fcbe697a4fa6ff424e86254bb6c8fd65
|
refs/heads/master
| 2022-09-27T03:07:23.478702
| 2020-06-02T07:15:16
| 2020-06-02T07:15:16
| 261,709,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
#coding=utf-8
from base.base_driver import BaseDriver
class RestartApp:
def restartandroid(self):
self.base_driver=BaseDriver()
self.base_driver.android_driver()
|
[
"422486265@qq.com"
] |
422486265@qq.com
|
c229ef5b98c6ec365c60ef63dcf42918dc310a98
|
78d64ff1782251900c2a001ab45655dafeaef3d6
|
/rom_generator/spriteChangeHarvin.py
|
98a6c0155c37447c623828243aed7b8c191ae236
|
[
"MIT"
] |
permissive
|
ikarth/game-boy-rom-generator
|
c86f0b2c2ca10bab6b8c02bfeff18599c17b92d9
|
29576a4bbe87a0032f80967d4b740059a65ea5c9
|
refs/heads/main
| 2023-06-26T20:25:47.317321
| 2021-08-04T14:32:21
| 2021-08-04T14:32:21
| 273,097,107
| 5
| 0
|
MIT
| 2021-08-04T21:24:27
| 2020-06-17T23:20:33
|
Python
|
UTF-8
|
Python
| false
| false
| 4,856
|
py
|
import argparse
import copy
import random
from generator import makeElement, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addSceneBackground, makeCol, makeColBorder
from background import getTileList, makeCheckerboardArray, generateBackgroundImageFromTiles, generateBackground, makeBackgroundCollisions
def spriteChangeHarvin():
# Set up a barebones project
project = makeBasicProject()
# Create sprite sheet for the player sprite
player_sprite_sheet = addSpriteSheet(
project, "player.png", "player", "player")
project.settings["playerSpriteSheetId"] = player_sprite_sheet["id"]
# add sprites
a_rock_sprite = addSpriteSheet(project, "rock.png", "rock", "static")
a_dog_sprite = addSpriteSheet(project, "dog.png", "dog", "static")
# Adding actors
actor = makeActor(a_rock_sprite, 9, 8)
rock_script = []
element = makeElement()
element["command"] = "EVENT_PLAYER_SET_SPRITE"
element["args"] = {
"spriteSheetId": "7f5d7c09-6fca-4107-a6fe-cd370e64e667",
"__collapse": True
}
rock_script.append(element)
element = makeElement()
element["command"] = "EVENT_END"
rock_script.append(element)
actor["script"] = rock_script
#dog script
dog_actor = makeActor(a_dog_sprite, 5, 6)
dog_script = []
element = makeElement()
element["command"] = "EVENT_PLAYER_SET_SPRITE"
element["args"] = {
"spriteSheetId": "7f5d7c09-6fca-4107-a6fe-cd370e64e667",
"__collapse": True
}
dog_script.append(element)
element = makeElement()
element["command"] = "EVENT_END"
dog_script.append(element)
dog_actor["script"] = dog_script
# Add a background image
default_bkg = makeBackground("placeholder.png", "placeholder")
project.backgrounds.append(default_bkg)
# Add scenes with some actors
a_scene2 = copy.deepcopy(makeScene(f"Scene", default_bkg))
a_scene2["actors"].append(dog_actor)
scene2_script = []
element = makeElement()
project.scenes.append(copy.deepcopy(a_scene2))
random.seed(1)
num = random.randint(1, 20)
print ("this is num: ")
print (num)
for y in range(num):
a_scene = copy.deepcopy(makeScene(f"Scene", default_bkg))
# makeColBorder(a_scene)
if y%2 == 0:
a_scene["actors"].append(actor)
project.scenes.append(copy.deepcopy(a_scene))
# Adding connections
scene_connections_translations = {"right":0, "left":1, "up":2, "down":3}
scene_connections = [[True, True, True, True] for n in range(num)]
for y in range(num):
for attempts in range(num):
other_scene = random.randint(0, num - 2)
if other_scene >= y:
other_scene += 1
chosen_direction = random.choice(["right", "left", "up", "down"])
if scene_connections[y][scene_connections_translations[chosen_direction]]:
if scene_connections[other_scene][scene_connections_translations[reverse_direction[chosen_direction]]]:
scene_connections[y][scene_connections_translations[chosen_direction]] = False
scene_connections[other_scene][scene_connections_translations[reverse_direction[chosen_direction]]] = False
# addSymmetricSceneConnections(project, project.scenes[y], project.scenes[other_scene], chosen_direction, doorway_sprite)
break
# Get information about the background
bkg_x = default_bkg["imageWidth"]
bkg_y = default_bkg["imageHeight"]
bkg_width = default_bkg["width"]
bkg_height = default_bkg["height"]
# Add some music
project.music.append(makeMusic("template", "template.mod"))
# Set the starting scene
project.settings["startSceneId"] = project.scenes[0]["id"]
return project
# Utilities
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# Run the generator
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Generate a Game Boy ROM via a GB Studio project file.")
parser.add_argument('--destination', '-d', type=str, help="destination folder name", default="../gbprojects/projects3/")
args = parser.parse_args()
initializeGenerator()
project = spriteChangeHarvin()
writeProjectToDisk(project, output_path = args.destination)
if args.destination == "../gbprojects/projects/":
print(f"{bcolors.WARNING}NOTE: Used default output directory, change with the -d flag{bcolors.ENDC}")
print(f"{bcolors.OKBLUE}See generate.py --help for more options{bcolors.ENDC}")
|
[
"50000691+harvinparknight@users.noreply.github.com"
] |
50000691+harvinparknight@users.noreply.github.com
|
fcaa93806b2495249b10a1d5255db9e391bacfeb
|
3d5673feff1adbfe51e803eb3fa2a3525e602f18
|
/cosmic-core/test/integration/smoke/test_secondary_storage.py
|
f50ff80648c5dc90995aa791402b5a0d9b84bb18
|
[
"Apache-2.0"
] |
permissive
|
maduhu/MissionCriticalCloud-cosmic
|
dcccb933fb93b2e17aa4fc8c9844f70895bac8aa
|
099151372f56499a22744f49b0af05667eb18c11
|
refs/heads/master
| 2020-12-02T17:47:42.128140
| 2017-04-03T14:49:35
| 2017-04-03T14:49:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,271
|
py
|
""" BVT tests for Secondary Storage
"""
# Import Local Modules
from marvin.cloudstackAPI import *
from marvin.cloudstackTestCase import *
from marvin.lib.base import *
from marvin.lib.common import *
from marvin.lib.utils import *
from nose.plugins.attrib import attr
# Import System modules
import time
_multiprocess_shared_ = True
class TestSecStorageServices(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.apiclient = super(TestSecStorageServices, cls).getClsTestClient().getApiClient()
cls._cleanup = []
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.cleanup = []
# Get Zone and pod
self.zones = []
self.pods = []
for zone in self.config.zones:
cmd = listZones.listZonesCmd()
cmd.name = zone.name
z = self.apiclient.listZones(cmd)
if isinstance(z, list) and len(z) > 0:
self.zones.append(z[0].id)
for pod in zone.pods:
podcmd = listPods.listPodsCmd()
podcmd.zoneid = z[0].id
p = self.apiclient.listPods(podcmd)
if isinstance(p, list) and len(p) > 0:
self.pods.append(p[0].id)
self.domains = []
dcmd = listDomains.listDomainsCmd()
domains = self.apiclient.listDomains(dcmd)
assert isinstance(domains, list) and len(domains) > 0
for domain in domains:
self.domains.append(domain.id)
return
def tearDown(self):
try:
# Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false")
def test_01_sys_vm_start(self):
"""Test system VM start
"""
# 1. verify listHosts has all 'routing' hosts in UP state
# 2. verify listStoragePools shows all primary storage pools
# in UP state
# 3. verify that secondary storage was added successfully
list_hosts_response = list_hosts(
self.apiclient,
type='Routing',
)
self.assertEqual(
isinstance(list_hosts_response, list),
True,
"Check list response returns a valid list"
)
# ListHosts has all 'routing' hosts in UP state
self.assertNotEqual(
len(list_hosts_response),
0,
"Check list host response"
)
for host in list_hosts_response:
self.assertEqual(
host.state,
'Up',
"Check state of routing hosts is Up or not"
)
# ListStoragePools shows all primary storage pools in UP state
list_storage_response = list_storage_pools(
self.apiclient,
)
self.assertEqual(
isinstance(list_storage_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_storage_response),
0,
"Check list storage pools response"
)
for primary_storage in list_hosts_response:
self.assertEqual(
primary_storage.state,
'Up',
"Check state of primary storage pools is Up or not"
)
for _ in range(2):
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
"Check list response returns a valid list"
)
# Verify SSVM response
self.assertNotEqual(
len(list_ssvm_response),
0,
"Check list System VMs response"
)
for ssvm in list_ssvm_response:
if ssvm.state != 'Running':
time.sleep(30)
continue
for ssvm in list_ssvm_response:
self.assertEqual(
ssvm.state,
'Running',
"Check whether state of SSVM is running"
)
return
@attr(tags=["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false")
def test_02_sys_template_ready(self):
"""Test system templates are ready
"""
# Validate the following
# If SSVM is in UP state and running
# 1. wait for listTemplates to show all builtin templates downloaded and
# in Ready state
hypervisors = { }
for zone in self.config.zones:
for pod in zone.pods:
for cluster in pod.clusters:
hypervisors[cluster.hypervisor] = "self"
for zid in self.zones:
for k, v in hypervisors.items():
self.debug("Checking BUILTIN templates in zone: %s" % zid)
list_template_response = list_templates(
self.apiclient,
hypervisor=k,
zoneid=zid,
templatefilter=v,
listall=True,
account='system'
)
self.assertEqual(validateList(list_template_response)[0], PASS, \
"templates list validation failed")
# Ensure all BUILTIN templates are downloaded
templateid = None
for template in list_template_response:
if template.templatetype == "BUILTIN":
templateid = template.id
template_response = list_templates(
self.apiclient,
id=templateid,
zoneid=zid,
templatefilter=v,
listall=True,
account='system'
)
if isinstance(template_response, list):
template = template_response[0]
else:
raise Exception("ListTemplate API returned invalid list")
if template.status == 'Download Complete':
self.debug("Template %s is ready in zone %s" % (template.templatetype, zid))
elif 'Downloaded' not in template.status.split():
self.debug("templates status is %s" % template.status)
self.assertEqual(
template.isready,
True,
"Builtin template is not ready %s in zone %s" % (template.status, zid)
)
|
[
"miguelferreira@me.com"
] |
miguelferreira@me.com
|
07ff18e0d7c3d12f28a5c0e05d924539ab2888f3
|
2b4e133329a5ca1ee205b026a46606b027d3f205
|
/Customer/migrations/0003_rename_watchlist_wishlist.py
|
9994fea4d9934cf1641816d6a6a726a5d2f331c6
|
[] |
no_license
|
wadeeat786486962/bladerscenter.github.io-
|
06884c5ad3e7b874ce761e21ab5c00c9ab74fcfc
|
410d11feb6bc1885e614069a7bc5007521cf982d
|
refs/heads/main
| 2023-06-16T04:24:23.697174
| 2021-07-11T18:05:14
| 2021-07-11T18:05:14
| 384,936,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
# Generated by Django 3.2.4 on 2021-06-27 16:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Vendors', '0004_products_product_quantity'),
('Signup', '0003_customer_model_user_type'),
('Customer', '0002_watchlist'),
]
operations = [
migrations.RenameModel(
old_name='Watchlist',
new_name='Wishlist',
),
]
|
[
"mohibullahsahi419@gmail.com"
] |
mohibullahsahi419@gmail.com
|
a9e51ecf5a370bc04793bb28f5311eb7f237491e
|
aa27f97fabdde7b6a5eeb15cf9459c56688dd2f3
|
/searchSorting/quicksort.py
|
6aa03d38e0bfe5a7a15c5ce975f2031492ddca19
|
[] |
no_license
|
djunh1/practice_algorithms
|
8bbde53359bfc8a9ffe640774910a66d9f810675
|
74c45e9dafdb450046e116ad3853a813a7022d30
|
refs/heads/master
| 2020-09-09T21:21:56.531017
| 2019-11-14T00:12:15
| 2019-11-14T00:12:15
| 221,574,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
def partition(arr,low,high):
i = (low-1)
pivot = arr[high]
for j in range(low , high):
if arr[j] < pivot:
i = i+1
arr[i],arr[j] = arr[j],arr[i]
arr[i+1],arr[high] = arr[high],arr[i+1]
return ( i+1 )
def quickSort(arr,low,high):
if low < high:
pi = partition(arr,low,high)
quickSort(arr, low, pi-1)
quickSort(arr, pi+1, high)
arr = [10, 7, 8, 9, 1, 5]
n = len(arr)
quickSort(arr,0,n-1)
print ("Sorted array is:")
for i in range(n):
print ("%d" %arr[i]),
|
[
"djunh1@gmail.com"
] |
djunh1@gmail.com
|
d7bea8f0cb00c61dccf45124c3453b943dda0f78
|
2dfcb9c5cd6994b72fa6ac632c627ea05616d3c8
|
/Python 编程/Chapter 8 函数/function_16_结合使用位置实参和任意数量实参.py
|
2277ca263c6a1d20428ec5e406f23500792b8bef
|
[] |
no_license
|
Summer-Xuan/PythonBasic
|
e41432ebb204475102715895f0bb2c3b50241a98
|
05807b615452bf0a886630bb78840975b8f05b39
|
refs/heads/main
| 2023-05-02T20:53:03.333265
| 2021-06-01T15:53:53
| 2021-06-01T15:53:53
| 332,984,381
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
"""
如果要让函数接受不同类型的实参,必须在函数定义中将接纳任意数量实参的形参放在最后。
Python先匹配位置实参和关键字实参,再将余下的实参都收集到最后一个形参中*。
"""
def make_pizza(size, *toppings):
"""
概述要制作的披萨
Python将收到的第一个值存储在形参size中,并将其他的所有值都存储在元组toppings中。
"""
print("\nMaking a " + str(size) + "-inch pizza with the following toppings:")
for topping in toppings:
print('_' + topping)
make_pizza(16, 'pepperoni')
make_pizza(12, 'mushrooms', 'green peppers', 'extra cheese')
|
[
"xuanhsh@inspur.com"
] |
xuanhsh@inspur.com
|
941d44fe8b0574f0b5b9302f6739135ab4da3e96
|
10e5b1b2e42a2ff6ec998ed900071e8b5da2e74e
|
/bit_manipulation/0231_power_of_two/0231_power_of_two.py
|
ae8781e440f8895332fcb70dcf3368fa68b831db
|
[] |
no_license
|
zdyxry/LeetCode
|
1f71092d687316de1901156b74fbc03588f0b0a5
|
b149d1e8a83b0dfc724bd9dc129a1cad407dd91f
|
refs/heads/master
| 2023-01-29T11:59:14.162531
| 2023-01-26T03:20:23
| 2023-01-26T03:20:23
| 178,754,208
| 6
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
# -*- coding: utf-8 -*-
class Solution:
def isPowerOfTwo(self, n):
if n < 1:
return False
while n > 1:
if n % 2 == 1:
return False
n >>= 1
return True
print(Solution().isPowerOfTwo(5))
|
[
"zdyxry@gmail.com"
] |
zdyxry@gmail.com
|
8bb122a561283d2bcf817bb5e04625d493544607
|
253477f2b8c771677d6f658b859667447b11c500
|
/django01/autenticacao/forms.py
|
94f1b01fdf53a27a265375d03cddf096e99cc5ba
|
[] |
no_license
|
marianawerneck/DesenvWeb
|
25716c6f7e9548b92b7ca125dc493a821f1d4b0f
|
8cfd84d787a6b9bd23e5ca1dce85a03fb3e9bd33
|
refs/heads/master
| 2022-12-07T19:12:57.391340
| 2020-08-26T01:36:22
| 2020-08-26T01:36:22
| 289,738,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,197
|
py
|
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
class AuthenticationFormCustomizado(AuthenticationForm):
error_messages = {
'invalid_login': 'Login inválido',
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].error_messages={'required': 'Campo obrigatório'}
self.fields['username'].widget.attrs.update({'class': 'form-control form-control-sm'})
# <input type="text" name="username" autofocus="" autocapitalize="none" autocomplete="username"
# maxlength="150" required="" id="id_username">
self.fields['password'].error_messages={'required': 'Campo obrigatório'}
self.fields['password'].widget.attrs.update({'class': 'form-control form-control-sm'})
# <input type="password" name="password" autocomplete="current-password" required=""
# id="id_password">
class UsuarioFormCustomizado(UserCreationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['first_name'].label = 'Nome'
self.fields['first_name'].required = True
self.fields['last_name'].label = 'Sobrenome'
self.fields['last_name'].required = True
self.fields['email'].label = 'Email'
self.fields['email'].required = True
self.fields['email'].error_messages = {'invalid': 'O campo Email é inválido.'}
self.fields['username'].label = 'Usuário'
self.fields['username'].error_messages = {
'invalid': 'Usuário inválido. Use letras, números, @, ., +, -, _',
'unique': 'Usuário já cadastrado.'
}
self.fields['password1'].label = 'Senha'
self.fields['password1'].maxlength = 128
self.fields['password2'].label = 'Confirmação de Senha'
self.fields['password2'].maxlength = 128
for field in self.fields.values():
field.error_messages['required'] = \
'Campo {nome_do_campo} de preenchimento obrigatório'.format(nome_do_campo=field.label)
self.fields['password1'].validators.append(self.validate_password_strength)
class Meta:
model = User
fields = ('first_name', 'last_name', 'email', 'username', 'password1', 'password2')
error_messages = {
'password_mismatch': 'As senhas informadas não conferem.'
}
def clean_email(self):
email = self.cleaned_data.get("email")
usuarios = User.objects.filter(email=email)
if usuarios.exists():
self.add_error('email', 'Email duplicado.')
return email
def validate_password_strength(self, valor):
if len(valor) < 8:
raise ValidationError('A senha deve ter pelo menos 8 caracteres.')
if not any(char.isdigit() for char in valor):
raise ValidationError('A senha deve ter pelo menos 1 dígito.')
if not any(char.isalpha() for char in valor):
raise ValidationError('A senha deve ter pelo menos 1 letra.')
|
[
"mwerneckroque00@gmail.com"
] |
mwerneckroque00@gmail.com
|
304268c55dfbf4b21b4a4b7e0ff53dde5449bcbd
|
6cb1d8f1416af7b7c5c83ab35cb6928ea9955aff
|
/venv/Scripts/pip-script.py
|
eab8e523d3f70c83c893918b55582674ac186b2a
|
[] |
no_license
|
lee-saint/practice-nlp
|
f68ccc3140f725f3edcd7048c324b847583b7f20
|
19003fcd5f55f4f110417a3950a32bb5fba1850c
|
refs/heads/master
| 2020-12-01T20:05:15.014495
| 2020-01-21T09:22:18
| 2020-01-21T09:22:18
| 230,750,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
#!D:\dev\python\practice-nlp\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
|
[
"plutorian131@gmail.com"
] |
plutorian131@gmail.com
|
94c2a2677d6c1a8fc2daf40364be8f4c3bd522dd
|
7c8c6a09d7ac7941f75c05fc5bc7b8d772175783
|
/orders/migrations/0022_auto_20181009_1407.py
|
f20979b1fdc1abd261e9a1ced9a0d5ec9cfd89ed
|
[] |
no_license
|
firaan1/mypizzajoint
|
ee23e9a830fbe0ddd0ab87ee5e17241dd479b471
|
8cea3a2451a54fdba6f2a189c62dab9327f0ffa5
|
refs/heads/master
| 2020-03-26T04:02:20.211480
| 2018-10-09T21:39:05
| 2018-10-09T21:39:05
| 144,482,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,371
|
py
|
# Generated by Django 2.0.3 on 2018-10-09 14:07
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('orders', '0021_auto_20181007_1148'),
]
operations = [
migrations.CreateModel(
name='DeliveryAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=1000)),
('phone_number', models.CharField(blank=True, max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')])),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='address_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='placedorder',
name='deliveryaddress',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='delivery_address', to='orders.DeliveryAddress'),
),
]
|
[
"firaan1@gmail.com"
] |
firaan1@gmail.com
|
03d57170acf5fa701a3e80315decbefb2e79b42d
|
c81b633c452616636120daba9ef3fa9a2b2640b3
|
/Class09/example_7_capitals_game.py
|
5dcb895c4194ba612fb00358a37ee89ae83c23b3
|
[] |
no_license
|
formigaVie/SNWD_Works_201711
|
ba3dca2ef4cf74166b8a5c7c804ea01ccc866876
|
16ec6c169a5828cadc515b7612dbfd8638ba7224
|
refs/heads/master
| 2021-09-07T03:41:36.674833
| 2018-02-16T18:58:47
| 2018-02-16T18:58:47
| 110,582,326
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,851
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -
import random
# define variable name
creator = "FormigaVIE"
current_points = 0
# print welcome to user
print "=" *80
print "Welcome to {} Capital Game" .format(creator)
print "=" *80
# Put Make string lower case to a personal greeting
user=raw_input("\nPlease enter your name: ")
print "\n Hello {}, pleasure to have you here at the Capital Game" .format(user.upper())
print "=" *80
points = {user:current_points}
capitals = {"FRANCE":"PARIS",
"ICELAND":"REYKJAVIK",
"DENMARK":"COPENHAGEN",
"LITHUANIA":"VILNIUS",
"CANADA":"OTTAWA",
"AUSTRIA":"VIENNA",
"GERMANY":"BERLIN",
"SUISSE":"BERN"}
while True:
for x in range (1,4):
current_country = random.choice(capitals.keys())
print current_country
y = 3 - x
# Schreibe ein Programm mit random (Land) mit Eingabe und checken ob es korrekt war und Ausgabe des
answer=raw_input("Please enter the capital of {}: ".format(current_country))
if answer.upper() == capitals[current_country]:
print "\n Congratulations - You are right - the capital of {} is {}" .format(current_country,capitals[current_country])
points [user] += 1
print "Your actual points are: {}".format(points[user])
elif answer.upper() != capitals[current_country]:
print "Sorry your answer isn't correct - the capital of {} is {}" .format(current_country,capitals[current_country])
print "Only {} tries left" .format(y)
again = raw_input("\nDo you like to calculate one more time (n for exit): ")
if again.lower() == "n":
print "\nThank you {} for choining us, your points: {}." .format(user.upper(), points[user])
break
# Erweiterung für Weiterführung
|
[
"manfredg30@gmail.com"
] |
manfredg30@gmail.com
|
bf43e8f2a7d70fec6a75358ab9bb44c2ecd154cd
|
8503102336e77f783e0393f3ce0657492322c619
|
/perceptron.py
|
a0894df9f66332b94bffbf916418c54802e667bf
|
[] |
no_license
|
panxiaobai/MachineLearning
|
a9344942eda9d9240afcda0746aba13bb85ca6f9
|
b2c6747e665dfae4d75eccca7c12f17bfad8e006
|
refs/heads/master
| 2020-06-21T08:02:43.928549
| 2019-12-16T13:02:22
| 2019-12-16T13:02:22
| 197,389,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 898
|
py
|
import numpy as np
def train(data,label,w,b,lr):
'''
:param data: data.shape=(data_num,data_length)
:param label: label.shape=(data_num,1)
:param w: w.shape=(data_length)
:param b:
:param lr: learning rate
:return:
'''
flag=True
while(flag):
flag=False
for i in range(data.shape[0]):
x=data[i]
x_=x[:,np.newaxis]
w_ = w[:,np.newaxis]
y_pred=np.dot(w_.T,x_)+b
y=label[i]
if y*y_pred<=0:
w=w+lr*y*x
b=b+lr*y
flag=True
return w,b
def param_init(data):
w=np.zeros((data.shape[1]))
b=0
return w,b
def main():
data=np.array([[3,3],[4,3],[1,1]])
label=np.array([[1],[1],[-1]])
w,b=param_init(data)
w,b=train(data,label,w,b,1)
print(w)
print(b)
if __name__ == '__main__':
main()
|
[
"panyucsu@163.com"
] |
panyucsu@163.com
|
22045d1896e3fb88a25a5ccafc8a837252aaefdd
|
8732232026b0a42eb4912266cc8768f82fd85660
|
/phonevoicecallASR.py
|
f73607d827ac38cbfcfe336e54d6b6f8ee8f0e76
|
[] |
no_license
|
KornbotDevUltimatorKraton/Phonecallwithspeechrecognition
|
b3b1b86ef15e860982abff3d40b5f4ee821e29b5
|
18aee657e98ce06a290122aae2e24786e1a58f23
|
refs/heads/main
| 2023-08-30T12:56:41.157409
| 2021-10-10T14:46:07
| 2021-10-10T14:46:07
| 414,650,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,396
|
py
|
#!/usr/bin/env python3
# NOTE: this example requires PyAudio because it uses the Microphone class
import time
import math
import speech_recognition as sr
import itertools
import numpy as np
import serial #Serial communication talking to the GPRS module
from google_speech import*
from googletrans import Translator # Google translate
import os
import sys
import wordninja
import difflib #Finding the similarlity of the matching sequence
#from twilio.rest import TwilioRestClient
import json #Json load the data from the phonecountry code
import serial
from gpiozero import LED
translator = Translator(service_urls=['translate.google.com','translate.google.com',])
lang = 'th'
lang2 = 'en'
sox_effects = ('speed',"1.14")
Activate_word = ["translation","Translation","mode","translate","Translate"] #Activate translate mode concern word need more vocabulary
Direction_translate = ["to","in to"]
Call_active_com = ["Call","to","number"]
Code_active_country = ["to","destination","destinations","Destination"]
Cancel_Call = ["Cancel","call"]
Receive_call_mode = ['Receive', 'phone', 'calls']
mem_country_destination = []
Current_mode = []
Remover_country = []
vibrator = LED(21) #vibrator function
reset = LED(6) #reset function
led = LED(26) #LED light
#List language of translation function
Languages = {
'af': 'Afrikaans',
'sq': 'Albanian',
'am': 'Amharic',
'ar': 'Arabic',
'hy': 'Armenian',
'az': 'Azerbaijani',
'eu': 'Aasque',
'be': 'Belarusian',
'bn': 'Bengali',
'bs': 'Bosnian',
'bg': 'Bulgarian',
'ca': 'Batalan',
'ceb': 'Bebuano',
'ny': 'Chichewa',
'zh-cn': 'Chinese',
'zh-tw': 'Chinese (traditional)',
'co': 'Corsican',
'hr': 'Croatian',
'cs': 'Czech',
'da': 'Danish',
'nl': 'Dutch',
'en': 'English',
'eo': 'Esperanto',
'et': 'Estonian',
'tl': 'Filipino',
'fi': 'Finnish',
'fr': 'French',
'fy': 'Frisian',
'gl': 'Galician',
'ka': 'Georgian',
'de': 'German',
'el': 'Greek',
'gu': 'Gujarati',
'ht': 'Haitian creole',
'ha': 'Hausa',
'haw': 'Hawaiian',
'iw': 'Hebrew',
'he': 'Hebrew',
'hi': 'Hindi',
'hmn': 'Hmong',
'hu': 'Hungarian',
'is': 'Icelandic',
'ig': 'Igbo',
'id': 'Indonesian',
'ga': 'Irish',
'it': 'Italian',
'ja': 'Japanese',
'jw': 'Javanese',
'kn': 'Kannada',
'kk': 'Kazakh',
'km': 'Khmer',
'ko': 'Korean',
'ku': 'Kurdish (kurmanji)',
'ky': 'Kyrgyz',
'lo': 'Lao',
'la': 'Latin',
'lv': 'Latvian',
'lt': 'Lithuanian',
'lb': 'Luxembourgish',
'mk': 'Macedonian',
'mg': 'Malagasy',
'ms': 'Malay',
'ml': 'Malayalam',
'mt': 'Maltese',
'mi': 'Maori',
'mr': 'Marathi',
'mn': 'Mongolian',
'my': 'Myanmar (burmese)',
'ne': 'Nepali',
'no': 'Norwegian',
'or': 'Odia',
'ps': 'Pashto',
'fa': 'Persian',
'pl': 'Polish',
'pt': 'Portuguese',
'pa': 'Punjabi',
'ro': 'Romanian',
'ru': 'Russian',
'sm': 'Samoan',
'gd': 'Scots gaelic',
'sr': 'Serbian',
'st': 'Sesotho',
'sn': 'Shona',
'sd': 'Sindhi',
'si': 'Sinhala',
'sk': 'Slovak',
'sl': 'Slovenian',
'so': 'Somali',
'es': 'Spanish',
'su': 'Sundanese',
'sw': 'Swahili',
'sv': 'Swedish',
'tg': 'Tajik',
'ta': 'Tamil',
'te': 'Telugu',
'th': 'Thai',
'tr': 'Turkish',
'uk': 'Ukrainian',
'ur': 'Urdu',
'ug': 'Uyghur',
'uz': 'Uzbek',
'vi': 'Vietnamese',
'cy': 'Welsh',
'xh': 'Xhosa',
'yi': 'Yiddish',
'yo': 'Yoruba',
'zu': 'Zulu'}
file = open("Extracted_code_country.json",'r') #Read the file
codedata = json.load(file) #code data for load json country code and name
# Twilio phone number goes here. Grab one at https://twilio.com/try-twilio
# and use the E.164 format, for example: "+12025551234"
#TWILIO_PHONE_NUMBER = "+12055743990" #Trial number for phone call
# list of one or more phone numbers to dial, in "+19732644210" format
DIAL_NUMBERS = []
# URL location of TwiML instructions for how to handle the phone call
#TWIML_INSTRUCTIONS_URL = \
# "http://static.fullstackpython.com/phone-calls-python.xml"
#Joining number
Joiningnumber = []
# replace the placeholder values with your Account SID and Auth Token
# found on the Twilio Console: https://www.twilio.com/console
#client = TwilioRestClient("AC2700afd0f2277138948384d03c83df73", "243684daa3aa1f20c234f472309d0d9f")
Current_country_code = [] #Store the current country code
try:
sim800l = serial.Serial('/dev/ttyS0',115200)
print("GPRS module found................[OK]")
sim800l.write('AT\r'.encode('UTF-8')) #
Getresponse = sim800l.readline().decode('UTF-8')
print("GPRS command.........",Getresponse)
Getresponse_status = sim800l.readline().decode('UTF-8')
print("GPRS status.........",Getresponse_status)
speech = Speech("GPRS status........."+str(Getresponse_status)+"Smart glasses is now working 100%",'en')
speech.play(sox_effects)
except:
print("Please check the UART connection between the GPRS module")
'''
class SIM800L:
def __init__(self,status,phonenumber,Country_code,reset):
#Phone initial function
self.status = status
self.phonenumber = phonenumber
self.reset = reset
self.Country_code = Country_code
def __str__(self):
return f"Status:{self.status} Phonenumber:{self.phonenumber} Country_code:{self.Country_code} reset:{self.reset}"
def Callmode(self,status,phonenumber): #Getting the phonenumber
if status == "Call_mode":
dials = "ATD"+phonenumber+";\n"
sim800l.write(dials.encode('UTF-8')) #Getting the sim800l call
get_response = sim800l.readline().decode('UTF-8')
return get_response
if status == "Receive_call_mode":
sim800l.write("ATA\n".encode('UTF-8'))
get_response = sim800l.readline().decode('UTF-8')
return get_response
if status == "Hangup_mode":
sim800l.write("ATH\n".encode('UTF-8'))
get_response = sim800l.readline().decode('UTF-8')
return get_response
def Check_battery(self): #getting the batteryvalue
sim800l.write("AT+CBC\n".encode('UTF-8'))
get_response = sim800l.read().decode('UTF-8')
return get_response
def Vibrator(self,state):
if state == "RING":
for vib in range(0,1):
if vib == 0:
vibrator.off()
time.sleep(0.5)
if vib == 1:
vibrator.on()
time.sleep(0.5)
if state !="RING":
vibrator.off()
'''
#Data of preposition word in the list of the dictionary file
Detected_language = ['th','en'] #Detected language
Inner_trans = 'en'
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
def Hangup_call(listwordinput,executelist):
word_intersection = intersection(listwordinput,executelist)
print("Getting the intersection word",word_intersection)
superposition = intersection(word_intersection,executelist)
print("Word superpositioning",superposition)
percent=difflib.SequenceMatcher(None,superposition,executelist)
print(percent.ratio()*100)
if percent.ratio()*100 > 33:
for Dat in range(0,len(executelist)-1):
if Cancel_Call[Dat] in listwordinput:
print("Hangup mode activated..........")
sim800l.write("ATH\n".encode('UTF-8'))
get_responsezero = sim800l.readline().decode('UTF-8')
print(get_responsezero)
speech = Speech("Hangup mode activated",'en')
speech.play(sox_effects)
reset.on()
time.sleep(0.5)
reset.off()
def Receive_call(listwordinput,executelist):
#get_responsezero = sim800l.readline().decode('UTF-8')
#print(get_responsezero)
get_response = sim800l.readline().decode('UTF-8')
print(get_response)
word_intersection = intersection(listwordinput,executelist)
print("Getting the intersection word",word_intersection)
superposition = intersection(word_intersection,executelist)
print("Word superpositioning",superposition)
percent=difflib.SequenceMatcher(None,superposition,executelist)
print(percent.ratio()*100)
if percent.ratio()*100 >= 33:
get_responsezero = sim800l.readline().decode('UTF-8')
print(get_responsezero)
get_response = sim800l.readline().decode('UTF-8')
print(get_response)
if get_response == "RING":
vibrator.on()
for rc in range(0,len(executelist)-1):
if Receive_call_mode[rc] in listwordinput:
print("Receive call activated..........")
sim800l.write("ATA\n".encode('UTF-8'))
vibrator.off()
speech = Speech("Receive call activated",'en')
speech.play(sox_effects)
#Function call back of the speech recognition command
def Call_command(splitword,Call_active_com):
word_intersection = intersection(splitword,Call_active_com)
print("Getting the intersection word",word_intersection)
superposition = intersection(word_intersection,Call_active_com)
print("Word superpositioning",superposition)
percent=difflib.SequenceMatcher(None,superposition,Call_active_com)
print(percent.ratio()*100)
if percent.ratio()*100 >= 66:
if Current_mode !=[]:
Current_mode.clear()
if Current_mode == []:
Current_mode.append("Call mode") #add the current function to activate
for call in range(0,len(Call_active_com)-1):
if Call_active_com[call] in splitword:
print("Detect Phone call mode")
print(splitword)
if Joiningnumber !=[]:
Joiningnumber.clear()
if Joiningnumber ==[]:
try:
for i in range(0,len(Call_active_com)):
splitword.remove(word_intersection[i])
except:
print("System command flaw detected")
print("Phone number extracted:",splitword)
#Joiningnumber.append(splitword)
frontnumber = splitword[0]
numlist = list(frontnumber)
try:
numlist.remove('0')
print(numlist,numlist[0]+numlist[1])
front_pt = numlist[0]+numlist[1]
splitword.remove(splitword[0])
print(numlist,front_pt,splitword)
Insertnumlist = splitword.insert(0,front_pt)
print(' '.join(splitword))
Phone_rearanged = ' '.join(splitword)
DIAL_NUMBERS.clear()
if DIAL_NUMBERS == []:
DIAL_NUMBERS.append(Phone_rearanged)
print(DIAL_NUMBERS)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#Extract destination code
#Extract_and_Execute(DIAL_NUMBERS,code_active_country)
for country in range(0,len(DIAL_NUMBERS[0].split(" "))-1):
if DIAL_NUMBERS[0].split(" ")[country] in list(codedata):
print(DIAL_NUMBERS[0].split(" ")[country])
extracted_country = DIAL_NUMBERS[0].split(" ")[country]
get_code = codedata.get(extracted_country)
print("Country:"+"\t"+extracted_country+"code:",get_code)
print(DIAL_NUMBERS[0].split(" "))
if mem_country_destination != []:
mem_country_destination.clear()
if mem_country_destination == []:
for re in range(0,len(DIAL_NUMBERS[0].split(" "))):
mem_country_destination.append(str(DIAL_NUMBERS[0].split(" ")[re]))
print("From memory:",mem_country_destination)
#Remover_country.append(str(extracted_country))
#Remover_country.append(str(Code_active_country[0]))
#for rem in range(0,len(Remover_country)-1):
#The problem is code active country list need to find new solution
try:
mem_country_destination.remove(str(extracted_country))
for rrev in range(0,len(Code_active_country)-1):
if Code_active_country[rrev] in mem_country_destination:
mem_country_destination.remove(str(Code_active_country[rrev]))
except:
print("Removing the wrong sorting order")
#mem_country_destination.remove(str(Code_active_country[0]))
#mem_country_destination.remove(str(extracted_country))
#mem_country_destination.remove(str(Code_active_country[0]))
print(mem_country_destination)
Phonenumber = ' '.join(mem_country_destination)
Phonedails = get_code+' '.join(mem_country_destination)
print("Complete phonenumber",Phonedails)
if Current_mode[0] == "Call mode":
speech = Speech(str(Phonedails)+"destination"+str(extracted_country),'en')
speech.play(sox_effects)
if Current_mode[0] != "Call mode":
print("You are now in"+"\t"+str(Current_mode[0]))
dials_num = "ATD"+str(Phonedails)+";\n"
sim800l.write(dials_num.encode('UTF-8'))
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
except:
print("Processing number error")
speech = Speech('Processing number error','en')
speech.play(sox_effects)
def callback(recognizer, audio):
# received audio data, now we'll recognize it using Google Speech Recognition
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
print("Smart glasses Speech Recognition thinks you said " + recognizer.recognize_google(audio,language = 'th'))
'''
translation = translator.translate(recognizer.recognize_google(audio,language = 'th'))
print(translation)
speech = Speech(translation,lang)
speech.play(sox_effects)
'''
if len(Detected_language) >=2:
translations = translator.translate(str(recognizer.recognize_google(audio,language =str(Detected_language[0]))),dest =str(Detected_language[len(Detected_language)-1]))
translations2 = translator.translate(str(recognizer.recognize_google(audio,language =str(Detected_language[0]))),dest = Inner_trans)
#Setting default of the language detected from the function of the language detection activate from the unknown non destination language
if len(Detected_language) <2:
translations = translator.translate(str(recognizer.recognize_google(audio,language =str(Detected_language[0]))),dest =str(Detected_language[0]))
translations2 = translator.translate(str(recognizer.recognize_google(audio,language =str(Detected_language[0]))),dest = Inner_trans)
#for translation in translations:
print(translations.text) # Print out translation
if len(Detected_language) >=2:
speech = Speech(translations.text,Detected_language[1])
if len(Detected_language) <2:
speech = Speech(translations.text,Detected_language[0])
Detected_language.clear()
Detected_language.append('en')
speech = Speech("Not detected destination language now using"+"\t"+str(Languages.get(Detected_language[0])+"\t"+"as default"),'en')
splitword = wordninja.split(str(translations2.text))
print(splitword)
word_intersection = intersection(splitword,Activate_word)
print("Getting the intersection word",word_intersection)
superposition = intersection(word_intersection,Activate_word)
print("Word superpositioning",superposition)
percent=difflib.SequenceMatcher(None,superposition,Activate_word)
print(percent.ratio()*100)
values_languages = list(Languages.values())
key_languages = list(Languages.keys())
if percent.ratio()*100 >= 33:
print("Detect translation mode")
if Current_mode !=[]:
Current_mode.clear()
if Current_mode == []:
Current_mode.append("Translate mode") #add the current function to activate
print(values_languages)
Detected_language.clear()
for lang in range(0,len(splitword)):
if splitword[lang] in values_languages:
Detected_language.append(key_languages[values_languages.index(splitword[lang])]) # Detected language translation on each language detected in the array
print(Detected_language)
if Current_mode !=[]:
if Current_mode[0] == "Translate mode":
speech.play(sox_effects)
if Current_mode[0] != "Translate mode":
print("You now in"+"\t"+str(Current_mode[0]))
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>..
#Phone call function
Call_command(splitword,Call_active_com) #Call mode function
Hangup_call(splitword,Cancel_Call)
Receive_call(splitword,Receive_call_mode)
except sr.UnknownValueError:
print("Smart glasses Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Smart glasses Speech Recognition service; {0}".format(e))
r = sr.Recognizer()
m = sr.Microphone()
with m as source:
r.adjust_for_ambient_noise(source)
stop_listening = r.listen_in_background(m, callback)
for i in itertools.count():time.sleep(0.2)
|
[
"noreply@github.com"
] |
KornbotDevUltimatorKraton.noreply@github.com
|
55bfbab6f1d81a7878499a97f644c7e7976e9b66
|
b252d1f8ec5f68bf5f935c000e0bb011718ea691
|
/virtualenvs/ninetyseven/src/savoy/contrib/bookmarks/importers/.svn/text-base/delicious.py.svn-base
|
f2573c2503f8a5502f3019290bedeaac227ec76a
|
[] |
no_license
|
syncopated/97bottles
|
2ceace7ed6a852bef61796733a08eb878b045152
|
08f4210e3de77c4564fcc8c1a2e9b47a0088249f
|
refs/heads/master
| 2016-08-05T07:48:51.109089
| 2012-12-02T17:38:35
| 2012-12-02T17:38:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,783
|
from savoy.utils.path import append_third_party_path
append_third_party_path()
import time
import datetime
import logging
import urllib
from django.conf import settings
from django.db import transaction
from django.utils.encoding import smart_unicode
from django.template.defaultfilters import slugify
from savoy.contrib.bookmarks.models import Bookmark, DeliciousBookmark
from savoy.utils import importers
#
# Super-mini Delicious API
# Nabbed (and modified) from Jacob's Jellyroll.
#
class DeliciousClient(object):
"""
A super-minimal delicious client :)
"""
lastcall = 0
def __init__(self, username, password, method='v1'):
self.username, self.password = username, password
self.method = method
def __getattr__(self, method):
return DeliciousClient(self.username, self.password, '%s/%s' % (self.method, method))
def __repr__(self):
return "<DeliciousClient: %s>" % self.method
def __call__(self, **params):
# Enforce Yahoo's "no calls quicker than every 1 second" rule
delta = time.time() - DeliciousClient.lastcall
if delta < 2:
time.sleep(2 - delta)
DeliciousClient.lastcall = time.time()
url = ("https://api.del.icio.us/%s?" % self.method) + urllib.urlencode(params)
return importers.getxml(url, username=self.username, password=self.password)
#
# Public API
#
def enabled():
return hasattr(settings, 'DELICIOUS_USERNAME') and hasattr(settings, 'DELICIOUS_PASSWORD')
def update():
delicious = DeliciousClient(settings.DELICIOUS_USERNAME, settings.DELICIOUS_PASSWORD)
_update_bookmarks(delicious)
#
# Private API
#
@transaction.commit_on_success
def _update_bookmarks(delicious):
xml = delicious.posts.all()
for post in xml.getiterator('post'):
info = dict((k, smart_unicode(post.get(k))) for k in post.keys())
_handle_bookmark(info)
def _handle_bookmark(info):
try:
del_bookmark = DeliciousBookmark.objects.get(hash=info['hash'])
bookmark = del_bookmark.bookmark
except:
del_bookmark = DeliciousBookmark(hash=info['hash'])
bookmark = Bookmark (
url = info['href'],
)
offset = 8+settings.UTC_OFFSET
time_difference = datetime.timedelta(hours=offset)
bookmark.title = info['description']
bookmark.description = info.get('extended', '')
bookmark.date_published = importers.parsedate(info['time']) + time_difference
bookmark.slug = slugify(info['description'])
bookmark.tags = info.get('tag', '')
bookmark.save()
del_bookmark.bookmark = bookmark
del_bookmark.save()
if __name__ == '__main__':
update()
|
[
"keith@dkeithrobinson.com"
] |
keith@dkeithrobinson.com
|
|
64a9a28bf53bda9c6982ef7d9822dfabd748ac21
|
d317d92f2ce0f84bc59d3346956cbd6592f87374
|
/src/session/key.py
|
c853cdeb4b712fada814c63993043e86d925cf83
|
[] |
no_license
|
osneven/cryptochat
|
b922a93ab07c38c3cf8668fc373aa8b821da1c5b
|
c4c31063ad30bed7caaeb9c306cc4aa4e0ccc99f
|
refs/heads/master
| 2020-06-27T20:05:26.921264
| 2016-11-26T21:01:57
| 2016-11-26T21:01:57
| 74,518,713
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,257
|
py
|
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import ec, rsa
from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePublicKey, EllipticCurvePrivateKey
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey, RSAPublicKey
from cryptography.hazmat.primitives.kdf.concatkdf import ConcatKDFHMAC
from utils.exceptions import KeyNotGeneratedError, RemoteKeyNotRecievedError, KeyAlreadyGeneratedError, KeyNotDerivedError
import base64
# A session that holds and manages the cryptography keys needed for communication between the local and remote
class KeySession:
def __init__(self):
self.__rsa_key = self.__RSAKeyHold()
self.__ecdh_key = self.__ECDHKeyHold()
self.__shared_key = self.__SharedKeyHold()
self.reload_session()
# Generates and stores both the private ECDH and RSA keys
def reload_session(self):
# Reset all the keys
self.__rsa_key.reset()
self.__ecdh_key.reset()
self.__shared_key.reset()
# Generate the keys that can be
self.__rsa_key.generate()
self.__ecdh_key.generate()
# Returns the private local keys
def get_private_key_rsa(self): return self.__rsa_key.get_private()
def get_private_key_ecdh(self): return self.__ecdh_key.get_private()
def get_shared(self): return self.__shared_key.get_private()
# Returns the public local keys
def get_public_key_rsa(self): return self.__rsa_key.get_public()
def get_public_key_ecdh(self): return self.__ecdh_key.get_public()
def get_public_key_rsa_bytes(self): return self.__rsa_key.get_public_bytes()
def get_public_key_ecdh_bytes(self): return self.__ecdh_key.get_public_bytes()
# Sets the public remote keys
def set_remote_public_key_ecdh(self, key): # This also generates the shared private key
self.__ecdh_key.set_public_remote(key)
self.__shared_key.generate(self.__ecdh_key.get_private(), self.__ecdh_key.get_public_remote())
def set_remote_public_key_rsa(self, key): self.__rsa_key.set_public_remote(key)
def set_remote_public_key_ecdh_bytes(self, key_bytes):
self.__ecdh_key.set_public_remote_bytes(key_bytes)
self.__shared_key.generate(self.__ecdh_key.get_private(), self.__ecdh_key.get_public_remote())
def set_remote_public_key_rsa_bytes(self, key_bytes): self.__rsa_key.set_public_remote_bytes(key_bytes)
# Returns the public remote keys
def get_remote_public_key_ecdh(self): return self.__ecdh_key.get_public_remote()
def get_remote_public_key_rsa(self): return self.__rsa_key.get_public_remote()
def get_remote_public_key_ecdh_bytes(self): return self.__ecdh_key.get_public_remote_bytes()
def get_remote_public_key_rsa_bytes(self): return self.__rsa_key.get_public_remote_bytes()
# Returns the SHA256 fingerprint of any key in bytes
# key, the bytes to hash
@classmethod
def fingerprint(self, key):
# Hash the key bytes
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(key)
fingerprint = digest.finalize()
return fingerprint
# Super class for all the needed keys in the session
class __KeyHold:
def __init__(self):
self.reset()
# Sets the key as 'not' generated
def reset(self):
self._generated = False
# Generates a key
def generate(self):
if self._generated:
raise KeyAlreadyGeneratedError("The key has already been generated")
self._generated = True
# Returns true if the generation method is called
def is_generated(self):
return self._generated
# Generates and holds the exchanged shared key
class __SharedKeyHold(__KeyHold):
def __init__(self):
super().__init__()
self.__salt = b'\xe6\xb3\xdf\x8e\xbc\x95\x94Qi%)a"o\xde\xcb' # TODO: Load this from a config file
self.__otherinfo = b'Derivation of the exchanged ECDH key.'
self.__derived = False
def reset(self):
super().reset()
# Generates and derives the shared key from the private and public keys
# The key is derived and URL-safe base64 encoded
# private_key, the private elliptic curve key
# public_key, the public elliptic curve key
def generate(self, private_key, public_key):
super().generate()
if not isinstance(private_key, EllipticCurvePrivateKey):
raise TypeError("The private_key must be an instance of EllipticCurvePrivateKey")
if not isinstance(public_key, EllipticCurvePublicKey):
raise TypeError("The public_key must be an instance of EllipticCurvePublicKey")
shared_key = private_key.exchange(ec.ECDH(), public_key)
self.__private_key = self.__encode_key(self.__derive_key(shared_key))
# Derives and returns a key
def __derive_key(self, key):
ckdf = ConcatKDFHMAC(
algorithm=hashes.SHA256(),
length=32,
salt=self.__salt,
otherinfo=self.__otherinfo,
backend=default_backend())
self.__derived = True
return ckdf.derive(key)
# URL-safe base64 encodes the key
def __encode_key(self, key):
if not self.__derived:
raise KeyNotDerivedError("The shared key has not been derived")
return base64.urlsafe_b64encode(key)
# Returns the private shared key
def get_private(self):
if not self._generated:
raise KeyNotGeneratedError("The shared key has not been generated")
return self.__private_key
# Super class for all the needed assymmetric keys in the session
class __AssymmetricKeyHold(__KeyHold):
def __init__(self):
super().__init__()
# Sets the remote public key as 'not' recieved
def reset(self):
super().reset()
self._recieved_remote = False
# Returns the local private key
def get_private(self):
if not self._generated:
raise KeyNotGeneratedError("The assymmetric key has not been generated")
return self._private_key
# Returns the local public key
def get_public(self):
return self.get_private().public_key()
# Sets the remote public key as 'recieved'
def set_public_remote(self):
self._recieved_remote = True
# Returns the remote public key
def get_public_remote(self):
if not self._recieved_remote:
raise RemoteKeyNotRecievedError("The remote public key has not been recieved")
return self._remote_public_key
# Decodes and sets the public remote assymmetric key
# key_bytes, the key to decode
def set_public_remote_bytes(self, key_bytes):
if not isinstance(key_bytes, bytes):
raise TypeError("The encoded_key must be an instance of bytes")
self._recieved_remote = True
self._remote_public_key = serialization.load_der_public_key(key_bytes, default_backend())
# Encodes and returns the public remote assymmetric key
def get_public_remote_bytes(self):
return self.get_public_remote().public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
# Encodes and returns the public assymmetric key
def get_public_bytes(self):
return self.get_public().public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
# Generates and holds the local RSA key pair and remote public key
class __RSAKeyHold(__AssymmetricKeyHold):
def __init__(self):
super().__init__()
# Generates a RSA key pair
def generate(self):
super().generate()
self._private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=4096,
backend=default_backend())
# Sets the remote public RSA key
# key, the key to set
def set_public_remote(self, key):
super().set_public_remote()
if not isinstance(key, RSAPublicKey):
raise TypeError("The public key must be an instance of RSAPublicKey")
self._remote_public_key = key
# Generates and holds the local ECDH key pair and remote public key
class __ECDHKeyHold(__AssymmetricKeyHold):
def __init__(self):
super().__init__()
# Generates a ECDH key pair, using the secp256k1 curve
def generate(self):
super().generate()
self._private_key = ec.generate_private_key(
ec.SECP256K1(),
default_backend())
# Sets the remote public ECDH key
# key, the key to set
def set_public_remote(self, key):
super().set_public_remote()
if not isinstance(key, EllipticCurvePublicKey):
raise TypeError("The public key must be an instance of EllipticCurvePublicKey")
self._remote_public_key = key
|
[
"oliver@neven.dk"
] |
oliver@neven.dk
|
e7c21c0f616fcf16cb42efee43650f0ff92bf467
|
31a9d63d2cb4e0fded5347c3dd622befb7dacd5e
|
/app/views.py
|
97f9547d886274b8cb2e85f6d90006a1dc1f22ce
|
[
"Apache-2.0"
] |
permissive
|
xod442/pets-api-dev
|
042f1ff834853d5c1f13f93565dfd8f1fc205252
|
226252effdad38f7921c208c878d3cae66fe39cb
|
refs/heads/main
| 2023-04-08T20:03:32.533677
| 2021-04-13T00:58:05
| 2021-04-13T00:58:05
| 351,601,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
from flask import Blueprint
from app.api import AppAPI, AccessAPI
app_app = Blueprint('app_app', __name__)
# An app is a client like a username and password
app_view = AppAPI.as_view('app_api')
app_app.add_url_rule('/apps/', view_func=app_view, methods=['POST',])
access_view = AccessAPI.as_view('access_api')
app_app.add_url_rule('/apps/access_token/', view_func=access_view, methods=['POST',])
|
[
"rick@rickkauffman.com"
] |
rick@rickkauffman.com
|
35179409789ca93e4566a6b1b2dbf6a457f112ba
|
372dac75d63a5fc179f848ed7235f27da928f435
|
/triviaQuiz/decorators.py
|
ab0dc9b5f72747bfe5d4a9ec347d892cbbce975f
|
[] |
no_license
|
Bradenm1/Django-quiz
|
53034aa452a45ded8ece582576426641bee22987
|
58081fd46749e9ca5dea1597f479025c872bccfe
|
refs/heads/master
| 2020-03-21T14:48:00.761396
| 2018-06-26T02:53:44
| 2018-06-26T02:53:44
| 138,676,378
| 0
| 1
| null | 2018-08-01T02:06:07
| 2018-06-26T02:50:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,528
|
py
|
from django.http import HttpResponseRedirect
from django.urls import reverse
from . import queries
""" Wrapper for caching users information """
def cache_user_information():
def _method_wrapper(f):
def _arguments_wrapper(request, *args, **kwargs):
# Get the tournament the user is on, if any
tournament = queries.ErrorHandling().tournament_exists(kwargs.get('slug'))
# Create the singleton instance, this is created each page call for the given page due to erros
queries.UserSessionCache().getInstance().setUp(user=request.user, tournament=tournament, request=request)
return f(request, *args, **kwargs)
return _arguments_wrapper
return _method_wrapper
""" Wrapper for redrecting to different pages """
def redirect_on_post_get(get, post):
def _method_wrapper(f):
def _arguments_wrapper(request, *args, **kwargs):
if request.method == 'GET':
return HttpResponseRedirect(reverse(get))
else:
return HttpResponseRedirect(reverse(post))
return f(request, *args, **kwargs)
return _arguments_wrapper
return _method_wrapper
""" Checks if a user is a admin, if not rasies a error """
def is_admin(f):
def wrapper(*args, **kwargs):
# Check if user is a staff member
if (args[0].user.is_staff):
# Returns if so
return f(*args, **kwargs)
# Else rasie error
return PermissionError
return wrapper
|
[
"bradenm650@gmail.com"
] |
bradenm650@gmail.com
|
f2a22a8a75c9bb883e81656961f74ba2e06ba952
|
d41b7bee52cf71b3b1b5671f4a13e9e465587f96
|
/Python/076.py
|
0486c8e90e728e32d0bac3188701b6576e996d9a
|
[] |
no_license
|
shramkoartem/Project-Euler
|
8e53b2ffec0fff20b45cc95754097a7fbecaf32c
|
eb79f4b6cda553a05e0188b7e329332ea0282a01
|
refs/heads/master
| 2020-04-09T04:54:08.664240
| 2020-02-05T16:19:42
| 2020-02-05T16:19:42
| 160,042,484
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
# / 0 (k>n)
# p(k,n)={ 1 (k=n)
# \ p(k+1,n)+p(k,n-k) (k<n)
def partitions(k, n):
if k > n:
return 0
elif k == n:
return 1
else:
return partitions(k+1, n) + partitions(k, n-k)
ans = partitions(1,100)-1
|
[
"noreply@github.com"
] |
shramkoartem.noreply@github.com
|
afaa56f6afbce9f9b3c9e475f523692567da978a
|
d35e3d18d7ef89b13e23a2156bc2df48342f02a6
|
/常用函数/05-SimpleHttpServer.py
|
3fbd50e84503522fcec45c40cf6c71484b90ee3c
|
[] |
no_license
|
yingrinsing/python_grammar
|
a03359fac7a930cebfedaef96c5f78de93d584a1
|
53ffd56091e522d7f0051a0fe03a1611eacb84bb
|
refs/heads/master
| 2023-08-30T02:17:33.874598
| 2023-08-28T02:24:01
| 2023-08-28T02:24:01
| 206,771,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 683
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on 2017-11-3
@author: laok@ArgusTech
@email: 1306743659@qq.com
@copyright: Apache License, Version 2.0
"""
import SimpleHTTPServer
import sys
import socket, webbrowser
#===============================================================================
#
#===============================================================================
#设置默认端口
if len(sys.argv) == 1:
sys.argv.append('80')
#打开网页
print(socket.gethostname())
print(socket.gethostbyname(socket.gethostname()))
url = "http://%s" % socket.gethostbyname(socket.gethostname())
webbrowser.open(url)
#启动服务器
SimpleHTTPServer.test()
|
[
"guying@kuaishou.com"
] |
guying@kuaishou.com
|
7414ddeef6a10ebaef96b3d13637ceea3d975140
|
bfe6c95fa8a2aae3c3998bd59555583fed72900a
|
/minSubsequence.py
|
a6e5cb2f3d82e1cab5da5bed032e54785c87360b
|
[] |
no_license
|
zzz136454872/leetcode
|
f9534016388a1ba010599f4771c08a55748694b2
|
b5ea6c21bff317884bdb3d7e873aa159b8c30215
|
refs/heads/master
| 2023-09-01T17:26:57.624117
| 2023-08-29T03:18:56
| 2023-08-29T03:18:56
| 240,464,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
from typing import List
class Solution:
def minSubsequence(self, nums: List[int]) -> List[int]:
s = sum(nums) / 2
nums.sort(reverse=True)
t = 0
for i in range(len(nums)):
t += nums[i]
if t > s:
nums = nums[:i + 1]
break
return nums
nums = [4, 3, 10, 9, 8]
nums = [4, 4, 7, 6, 7]
# nums = [6]
print(Solution().minSubsequence(nums))
|
[
"zzz136454872@163.com"
] |
zzz136454872@163.com
|
859127b273296f4937decab8644e9d059d97f173
|
361b159f338b50d4f70cd036be160cca1c173589
|
/Go-Data(Web-Source Code)/delete.py
|
448d65e277b3fbb0a3dbabc723b10014c5d8ed5d
|
[] |
no_license
|
kuberkaul/Go-Data
|
5e99885fcca389bf45b5b3a335584932fdc9e574
|
7d77185e587ac28ded1460f7852ab8525eb78f9a
|
refs/heads/master
| 2016-09-05T19:26:26.003982
| 2013-05-18T00:41:00
| 2013-05-18T00:41:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 836
|
py
|
from google.appengine.api import users
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
from uploadfile import BHandler
from dbtables import FileInfo
import os
class DeleteFile(BHandler):
def get(self,fid):
delete_file=FileInfo.get_by_id(long(fid))
db.delete(delete_file)
blobstore.delete([delete_file.blobkey])
# filelist=FileInfo.all()
# filelist=filelist.filter('user =', users.get_current_user())
self.redirect("/list/")
# self.render_template("list.html",{'filelist':filelist,'logout_url':users.create_logout_url('/'),
# })
app = webapp.WSGIApplication([
('/delete/(.*)',DeleteFile),
])
|
[
"kuberkaul1989@gmail.com"
] |
kuberkaul1989@gmail.com
|
6194c5a6a36d1344d0da4e7773249ac6f48be3e6
|
1207ededfd1a64c590cfab8071381029eeb241d8
|
/Assignment8/StartingPoint-QLearningMountainCar.py
|
8487659a3a812455092643ba78788671912230d1
|
[] |
no_license
|
BigEggStudy/UW-CSEP-546-Au18-Machine-Learning
|
32c680b8196ae2ae8ebcdf5574997e0116a59d95
|
68b2b1272b7f9b3552a65003f9c55a1a06a137f0
|
refs/heads/master
| 2020-04-05T01:53:52.917002
| 2018-12-14T01:24:35
| 2018-12-14T01:24:35
| 156,454,911
| 2
| 2
| null | 2018-12-05T22:40:20
| 2018-11-06T22:10:30
|
Python
|
UTF-8
|
Python
| false
| false
| 13,931
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
import random
import datetime
import gym
env = gym.make('MountainCar-v0')
import QLearning # your implementation goes here...
import Assignment7Support
# discountRate = 0.98 # Controls the discount rate for future rewards -- this is gamma from 13.10
# actionProbabilityBase = 1.8 # This is k from the P(a_i|s) expression from section 13.3.5 and influences how random exploration is
# randomActionRate = 0.01 # Percent of time the next action selected by GetAction is totally random
# learningRateScale = 0.01 # Should be multiplied by visits_n from 13.11.
# trainingIterations = 20000
if __name__=="__main__":
def training_ten(discountRate = 0.98, actionProbabilityBase = 1.8, trainingIterations = 20000, mountainCarBinsPerDimension = 20, randomActionRate = 0.01, learningRateScale = 0.01, use_memory=False, times = 10):
print(f'{times} Attempt for this parameters set')
print(f'discountRate = {discountRate}, actionProbabilityBase = {actionProbabilityBase}, trainingIterations = {trainingIterations}, mountainCarBinsPerDimension = {mountainCarBinsPerDimension}, randomActionRate = {randomActionRate}, learningRateScale = {learningRateScale}')
total_scores = Parallel(n_jobs=6)(delayed(training_one)(discountRate, actionProbabilityBase, trainingIterations, mountainCarBinsPerDimension, False, randomActionRate, learningRateScale) for _ in range(times))
return (sum(total_scores) / float(len(total_scores)), total_scores)
def training_one(discountRate = 0.98, actionProbabilityBase = 1.8, trainingIterations = 20000, mountainCarBinsPerDimension = 20, render = False, randomActionRate = 0.01, learningRateScale = 0.01, use_memory=False):
qlearner = QLearning.QLearning(stateSpaceShape=Assignment7Support.MountainCarStateSpaceShape(mountainCarBinsPerDimension), numActions=env.action_space.n, discountRate=discountRate)
for trialNumber in range(trainingIterations):
observation = env.reset()
reward = 0
qlearner.clear_record()
for i in range(200):
currentState = Assignment7Support.MountainCarObservationToStateSpace(observation, mountainCarBinsPerDimension)
action = qlearner.GetAction(currentState, learningMode=True, randomActionRate=randomActionRate, actionProbabilityBase=actionProbabilityBase)
oldState = Assignment7Support.MountainCarObservationToStateSpace(observation, mountainCarBinsPerDimension)
observation, reward, isDone, info = env.step(action)
newState = Assignment7Support.MountainCarObservationToStateSpace(observation, mountainCarBinsPerDimension)
# learning rate scale
qlearner.ObserveAction(oldState, action, newState, reward, learningRateScale=learningRateScale)
if use_memory:
qlearner.record(oldState, action, newState, reward)
if isDone:
if use_memory:
qlearner.replay(learningRateScale)
# if (trialNumber + 1) % 1000 == 0:
# print(trialNumber + 1, i + 1, np.min(qlearner.q_table), np.mean(qlearner.q_table))
break
n = 20
totalRewards = []
for runNumber in range(n):
observation = env.reset()
totalReward = 0
reward = 0
for i in range(200):
if render:
renderDone = env.render()
currentState = Assignment7Support.MountainCarObservationToStateSpace(observation, mountainCarBinsPerDimension)
observation, reward, isDone, info = env.step(qlearner.GetAction(currentState, learningMode=False))
totalReward += reward
if isDone:
if render:
renderDone = env.render()
# print(runNumber + 1, i + 1, totalReward)
totalRewards.append(totalReward)
break
if render:
env.close()
average_score = sum(totalRewards) / float(len(totalRewards))
print(f'[{datetime.datetime.now()}] The average score of this one attempt is {average_score}')
return average_score
def plot_result(x, y, diagram_name, parameter_name, save_time = False, rewrite_x = False):
print('')
print(f'### Plot {diagram_name}.')
if save_time:
print(x)
print(y)
return
fig, ax = plt.subplots()
ax.grid(True)
if rewrite_x:
xi = list(range(len(x)))
plt.plot(xi, y)
plt.xlabel(parameter_name)
plt.xticks(xi, x)
else:
plt.plot(x, y)
plt.xlabel(parameter_name)
plt.ylabel('Score')
plt.title(diagram_name)
print('Close the plot diagram to continue program')
plt.show()
#########################################
best_score = float('-Inf')
best_base = 0
x = []
y = []
print('Tune the Action Probability Base')
for base in [1.1, 1.2, 1.3, 1.4, 1.5, 1.8, 2.7, 5, 7]:
print(f'[{datetime.datetime.now()}] Training with actionProbabilityBase {base}')
score, all_score = training_ten(actionProbabilityBase=base)
x.append(base)
y.append(score)
if score > best_score:
best_score = score
best_base = base
print(f'[{datetime.datetime.now()}] The average score is {score}')
plot_result(x, y, 'Action Probability Base vs Score', 'Action Probability Base', save_time = True, rewrite_x = True)
print(f'When Action Probability Base is {best_base}, the Q-Learning Agent performance the best')
print(f'The best score is {best_score}')
best_base = 7
#########################################
best_score = float('-Inf')
best_bins = 0
x = []
y = []
print('Tune the Bins per Dimension')
for bins in range(20, 201, 10):
print(f'[{datetime.datetime.now()}] mountainCarBinsPerDimension {bins}')
score, all_score = training_ten(actionProbabilityBase=best_base, mountainCarBinsPerDimension=bins)
x.append(bins)
y.append(score)
if score > best_score:
best_score = score
best_bins = bins
print(f'[{datetime.datetime.now()}] The average score is {score}')
plot_result(x, y, 'Bins per Dimension vs Score', 'Bins per Dimension', save_time = True)
print(f'When Bins per Dimension is {best_bins}, the Q-Learning Agent performance the best')
print(f'The best score is {best_score}')
best_bins = 90
#########################################
best_score = float('-Inf')
best_discount_rate = 0
x = []
y = []
print('Tune the Discount Rate')
for discount_rate in [1, 0.99, 0.98, 0.97, 0.96, 0.95, 0.9, 0.8, 0.75]:
print(f'[{datetime.datetime.now()}] Training with discountRate {discount_rate}')
score, all_score = training_ten(mountainCarBinsPerDimension=best_bins, actionProbabilityBase=best_base, discountRate=discount_rate)
x.append(discount_rate)
y.append(score)
if score > best_score:
best_score = score
best_discount_rate = discount_rate
print(f'[{datetime.datetime.now()}] The average score is {score}')
plot_result(x, y, 'Discount Rate vs Score', 'Discount Rate', save_time = True, rewrite_x = True)
print(f'When Discount Rate is {best_discount_rate}, the Q-Learning Agent performance the best')
print(f'The best score is {best_score}')
best_discount_rate = 1
#########################################
best_score = float('-Inf')
best_iteration = 0
x = []
y = []
print('Tune the Training Iterations')
for iteration in [20000, 25000, 30000, 35000, 40000, 50000]:
print(f'[{datetime.datetime.now()}] Training with trainingIterations {iteration}')
score, all_score = training_ten(actionProbabilityBase=best_base, mountainCarBinsPerDimension=best_bins, discountRate=best_discount_rate, trainingIterations=iteration)
x.append(iteration)
y.append(score)
if score > best_score:
best_score = score
best_iteration = iteration
print(f'[{datetime.datetime.now()}] The average score is {score}')
plot_result(x, y, 'Training Iterations vs Score', 'Training Iterations', save_time = False)
print(f'When Training Iterations is {best_iteration}, the Q-Learning Agent performance the best')
print(f'The best score is {best_score}')
best_iteration = 35000
#########################################
print('========== Find a better Parameters Set ==========')
best_score = -101.86999999999999
best_parameters = (7, 50, 1, 30000, 0.01, 0.01)
for iteration in [30000, 35000, 40000]:
for random_action_rate in [0.01, 0.02, 0.03, 0.05]:
for learning_rate_scale in [0.01, 0.02, 0.03, 0.05]:
for bins in range(50, 101, 10):
for base in [5, 7, 11, 13]:
for discount_rate in [1, 0.99, 0.98]:
score, all_score = training_ten(actionProbabilityBase=base, mountainCarBinsPerDimension=bins, discountRate=discount_rate, trainingIterations=iteration, randomActionRate=random_action_rate, learningRateScale=learning_rate_scale)
if score > best_score:
best_score = score
best_parameters = (base, bins, discount_rate, iteration, random_action_rate, learning_rate_scale)
print(f'[{datetime.datetime.now()}] The average score is {score}')
print(f'For Now....')
(base, bins, discount_rate, iteration, random_action_rate, learning_rate_scale) = best_parameters
print(f'When with the following parameters, the Q-Learning Agent performance the best')
print(f'discountRate = {discount_rate}, actionProbabilityBase = {base}, trainingIterations = {iteration}, mountainCarBinsPerDimension = {bins}, randomActionRate = {random_action_rate}, learningRateScale = {learning_rate_scale}')
print(f'The best score is {best_score}')
(base, bins, discount_rate, iteration, random_action_rate, learning_rate_scale) = best_parameters
print(f'Overall....')
print(f'When with the following parameters, the Q-Learning Agent performance the best')
print(f'discountRate = {discount_rate}, actionProbabilityBase = {base}, trainingIterations = {iteration}, mountainCarBinsPerDimension = {bins}, randomActionRate = {random_action_rate}, learningRateScale = {learning_rate_scale}')
print(f'The best score is {best_score}')
#########################################
print('========== Find a better Parameters Set ==========')
print('========== Add memory for Q Learning ==========')
best_score = -float('-inf')
best_parameters = (7, 50, 1, 30000, 0.01, 0.01)
random_action_rate = 0.01
learning_rate_scale = 0.01
for iteration in [30000, 35000]:
for bins in range(50, 91, 10):
for base in [2, 2.7, 5, 7, 11]:
for discount_rate in [1, 0.99, 0.98]:
score, all_score = training_ten(actionProbabilityBase=base, mountainCarBinsPerDimension=bins, discountRate=discount_rate, trainingIterations=iteration, randomActionRate=random_action_rate, learningRateScale=learning_rate_scale, use_memory=True)
if score > best_score:
best_score = score
best_parameters = (base, bins, discount_rate, iteration, random_action_rate, learning_rate_scale)
print(f'[{datetime.datetime.now()}] The average score is {score}')
print(f'For Now....')
(base, bins, discount_rate, iteration, random_action_rate, learning_rate_scale) = best_parameters
print(f'When with the following parameters, the Q-Learning Agent performance the best')
print(f'discountRate = {discount_rate}, actionProbabilityBase = {base}, trainingIterations = {iteration}, mountainCarBinsPerDimension = {bins}, randomActionRate = {random_action_rate}, learningRateScale = {learning_rate_scale}')
print(f'The best score is {best_score}')
(base, bins, discount_rate, iteration, random_action_rate, learning_rate_scale) = best_parameters
print(f'Overall....')
print(f'When with the following parameters, the Q-Learning Agent performance the best')
print(f'discountRate = {discount_rate}, actionProbabilityBase = {base}, trainingIterations = {iteration}, mountainCarBinsPerDimension = {bins}, randomActionRate = {random_action_rate}, learningRateScale = {learning_rate_scale}')
print(f'The best score is {best_score}')
#########################################
print('========== More Runs on Best Parameters ==========')
score, all_score = training_ten(actionProbabilityBase=5, mountainCarBinsPerDimension=50, discountRate=0.99, trainingIterations=30000, randomActionRate=0.01, learningRateScale=0.01, use_memory=True, times=100)
print(f'[{datetime.datetime.now()}] The average score is {score}')
score, all_score = training_ten(actionProbabilityBase=7, mountainCarBinsPerDimension=50, discountRate=0.99, trainingIterations=30000, randomActionRate=0.01, learningRateScale=0.01, use_memory=True, times=100)
print(f'[{datetime.datetime.now()}] The average score is {score}')
score, all_score = training_ten(actionProbabilityBase=7, mountainCarBinsPerDimension=50, discountRate=1, trainingIterations=30000, randomActionRate=0.01, learningRateScale=0.01, use_memory=False, times=100)
print(f'[{datetime.datetime.now()}] The average score is {score}')
|
[
"cd_bigegg@hotmail.com"
] |
cd_bigegg@hotmail.com
|
68758229821a1941caa7d17367aaa5fe71853b4f
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/CK6mdHahiJBjuiDBi_3.py
|
82944b6300ab8edf0127f1346cd7ee428df32e08
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
def can_fit(weights, bags):
sum = 0
for i in weights:
sum+=i
if (sum/bags)<=10:
return True
else:
return False
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
ab0ef9f0cccf83301597bd7448262e7b08cd2fe4
|
9d8fe7c15a8118053015973e08daca9402eed01d
|
/binding.gyp
|
1ffebc940f76ca637370b793ec63ea6cbcd98b15
|
[
"MIT"
] |
permissive
|
adriankierman/teddybear
|
cf3aa7bd57c4d6c9b644f5b33fce217ea7dc738b
|
9feb6ce75d88988c2cf79f69f74840f7119d27c0
|
refs/heads/master
| 2021-01-18T04:07:03.628165
| 2014-07-22T18:26:54
| 2014-07-22T18:26:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 104
|
gyp
|
{
'targets': [
{
'target_name': 'teddybear',
'sources': [ 'teddybear.cc' ]
}
]
}
|
[
"amwebdk@gmail.com"
] |
amwebdk@gmail.com
|
45b002d9e8437b28c679132060b3b6c24b6dc881
|
bb893ddb702d744da8ce2b6c4f34ee50869d0e33
|
/Laptop/Laptop_slam/build/hector_slam/hector_mapping/catkin_generated/pkg.develspace.context.pc.py
|
c975d742a73caad3f67a3960adc8f1e8bfb1e9d4
|
[] |
no_license
|
AndreWongZH/Alex-01-01-02
|
4d94ed455556f98a1b8bea60a845a9125a542281
|
8cff777cd5354c4758978f97178fc646e9183b47
|
refs/heads/master
| 2022-04-25T16:44:06.203253
| 2020-04-20T13:53:51
| 2020-04-20T13:53:51
| 246,491,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 731
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/cascasm/Desktop/slam/devel/include;/home/cascasm/Desktop/slam/src/hector_slam/hector_mapping/include;/usr/include/eigen3".split(';') if "/home/cascasm/Desktop/slam/devel/include;/home/cascasm/Desktop/slam/src/hector_slam/hector_mapping/include;/usr/include/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;nav_msgs;visualization_msgs;tf;message_filters;laser_geometry;tf_conversions;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hector_mapping"
PROJECT_SPACE_DIR = "/home/cascasm/Desktop/slam/devel"
PROJECT_VERSION = "0.4.0"
|
[
"wongandre98@gmail.com"
] |
wongandre98@gmail.com
|
ef1e75e6df1ee13cca3ab40c9ab43e756ca9fb75
|
ce0108679fc6412e3811023b943b2c105899480b
|
/testing/hashing_timer.py
|
99309230c08ba25d9cfe2e0e582bba247c4e0854
|
[] |
no_license
|
baka-rust/sign
|
88fa95ae19aa8553916863e9ef9ff8f7a86f1891
|
18d3cd7608cf9def98a2321bb1532e1b959cc88b
|
refs/heads/master
| 2020-07-28T01:23:48.719622
| 2016-12-06T00:31:03
| 2016-12-14T18:28:17
| 73,420,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 755
|
py
|
#!/usr/bin/env python3
import subprocess, shlex, timeit
from signer import sign_binary
num_iters = 20
for size in range(1, int(1e8), int(1e5)):
code = """static int array [""" + str(size) + """] = {5};
int main() {
return 0;
}
"""
with open("hashing_test_file.c", "w+") as file:
file.write(code)
command = "gcc -o hashing_test_file hashing_test_file.c"
subprocess.check_call(shlex.split(command))
kwargs = {
'password': 'crypto'
}
sign_binary('hashing_test_file', 'cert.pem', 'privatekey.pem', **kwargs)
command = "./hashing_test_file"
time = timeit.timeit("subprocess.check_call({})".format(shlex.split(command)), number=num_iters, setup="import subprocess")
print("{}, {}".format(size, time/num_iters))
|
[
"ejvaughan@gmail.com"
] |
ejvaughan@gmail.com
|
1bc81e62e2c47c24403678d581bfbd2a56a4852b
|
6f14cc7dedd69d640c0a3cc888d9a03d6eb45534
|
/app-venv-pip/example_pkg/main.py
|
791b7fde212b1d68bb29e5027bbaa00ae2248b99
|
[
"MIT"
] |
permissive
|
rosscoleman/python-templates
|
a995ff0d7e6f4c884aba75408e009372150a2a20
|
17e1b9b83f199c1f22eba953401776ef51665c28
|
refs/heads/master
| 2022-12-11T00:15:29.820884
| 2019-10-08T04:04:17
| 2019-10-08T04:05:44
| 211,236,967
| 1
| 0
|
MIT
| 2021-06-02T00:28:15
| 2019-09-27T04:35:21
|
Python
|
UTF-8
|
Python
| false
| false
| 302
|
py
|
import logging
import example_pkg.mylib as mylib
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
def main():
log.info("in main()")
log.info(mylib.person)
y = mylib.get_person()
log.info(y)
if __name__ == '__main__':
log.info("in __main__")
main()
|
[
"rossco85@gmail.com"
] |
rossco85@gmail.com
|
31983bb849cc11d4305e91b71bb2e60d231fe4f8
|
4130d4def7653ce7cf05dc6ad752a88cb1dbc91b
|
/201003629-first-ex.py
|
b3cf70ea051123e81684d6f195471e5cb2dee58e
|
[] |
no_license
|
siriusJinwooChoi/Algorithm_class
|
930b168dd1baa7917b3e60f3fb68a3e8719fa2fe
|
89195f844ed2c28ca1b3e8cced7f689a1d03de2d
|
refs/heads/master
| 2020-07-17T20:08:36.345982
| 2016-09-07T01:51:31
| 2016-09-07T01:51:31
| 66,989,465
| 0
| 0
| null | null | null | null |
UHC
|
Python
| false
| false
| 74
|
py
|
# -*- coding:utf-8 -*-
print '학번은 201003629'
print "안녕은 Hello"
|
[
"indra4812@gmail.com"
] |
indra4812@gmail.com
|
dc4f6e68eddb294df351bb90097058936ffae1e3
|
eba2347c0f8f4805adfd33915ec81da141a1c026
|
/Assignments/Assignment1/A1.py
|
698a394e80e70133a184a0903065568da76fae8a
|
[] |
no_license
|
R-Qu/DD2424-Deep-Learning-in-Data-Science
|
9796967e1cc28d53175773d25c889d0ab3e0a439
|
d64bb7a3e05bbb9d2b50d4dffb2fae79e871a953
|
refs/heads/master
| 2020-05-05T09:21:06.144454
| 2019-06-03T11:31:50
| 2019-06-03T11:31:50
| 179,900,609
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,219
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# author: RuiQu rqu@kth.se
import pickle
import numpy as np
import matplotlib.pyplot as plt
from math import floor, sqrt
from tqdm import tqdm
#Dataset layout, each batch contains a dictionary with DATA:10000*3072 numpy array 32*32*3(R,G,B), LABELS:10000numbers in range 0-9(10labels).
N = 10000
d = 3072
K = 10
cifar10_labels = ["airplane","automobile","bird","cat","deer","dog","frog","horse","ship","truck"]
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
#One-Hot Encoding for categorical variables/nominal
def OneHotEncoding(labels):
one_hot_labels = np.zeros((N, K))
for i in range(len(labels)):
one_hot_labels[i][labels[i]] = 1
return one_hot_labels
def LoadBatch(filename):
data = np.zeros((N, d))
labels = np.zeros((N, 1))
one_hot_labels = np.zeros((N, K))
dict = unpickle(filename)
data = dict[bytes("data", 'utf-8')] / 255.0
labels = np.array(dict[bytes("labels", 'utf-8')])
one_hot_labels = OneHotEncoding(labels)
return data.T, one_hot_labels.T, labels
def LoadDataset():
trainSet = {}
testSet = {}
validationSet = {}
for i in [1, 3, 4, 5]:
t1, t2, t3 = LoadBatch("dataset/data_batch_" + str(i))
if i == 1:
trainSet["data"] = t1
trainSet["one_hot"] = t2
trainSet["labels"] = t3
else:
trainSet["data"] = np.column_stack((trainSet["data"], t1))
trainSet["one_hot"] = np.column_stack((trainSet["one_hot"], t2))
trainSet["labels"] = np.append(trainSet["labels"], t3)
a, b, c = LoadBatch("dataset/data_batch_2")
#k-fold cross validation
validationSet["data"], validationSet["one_hot"], validationSet["labels"] = a[:, :1000], b[:, :1000], c[:1000]
trainSet["data"] = np.column_stack((trainSet["data"], a[:, 1000:]))
trainSet["one_hot"] = np.column_stack((trainSet["one_hot"], b[:, 1000:]))
trainSet["labels"] = np.append(trainSet["labels"], c[1000:])
testSet["data"], testSet["one_hot"], testSet["labels"] = LoadBatch("dataset/test_batch")
temp = np.copy(trainSet["data"]).reshape((32, 32, 3, 49000), order='F')
temp = np.flip(temp, 0)
temp = temp.reshape((3072, 49000), order='F')
trainSet["data"] = np.column_stack((trainSet["data"], temp))
trainSet["one_hot"] = np.column_stack((trainSet["one_hot"], trainSet["one_hot"]))
trainSet["labels"] = np.append(trainSet["labels"], trainSet["labels"])
mean = np.mean(trainSet["data"], axis=1)
mean = mean[:, np.newaxis]
trainSet["data"] = trainSet["data"] - mean
validationSet["data"] = validationSet["data"] - mean
testSet["data"] = testSet["data"] - mean
return trainSet, validationSet, testSet
class Classifier():
def __init__(self, learning_rate, lambda_regularization, n_batch, n_epochs, decay_factor, SVM=False):
self.W = np.zeros((K, d))
self.b = np.zeros((K, 1))
self.eta = learning_rate
self.lambda_reg = lambda_regularization
self.n_batch = n_batch
self.n_epochs = n_epochs
self.decay_factor = decay_factor
self.SVM = SVM
np.random.seed(1)
self.initialization()
def initialization(self):
mu = 0
sigma = sqrt(2) / sqrt(d)
self.W = np.random.normal(mu, sigma, (K, d))
self.b = np.random.normal(mu, sigma, (K, 1))
def evaluateClassifier(self, X, W, b):
s = np.dot(W, X) + b
P = self.softmax(s)
assert(P.shape == (K, X.shape[1]))
return P
def softmax(self, x):
softmax = np.exp(x) / sum(np.exp(x))
return softmax
def computeCost(self, X, Y, W, b):
regularization = self.lambda_reg * np.sum(np.square(W))
loss_sum = 0
for i in range(X.shape[1]):
x = np.zeros((d, 1))
y = np.zeros((K, 1))
x = X[:, [i]]
y = Y[:, [i]]
if (self.SVM):
loss_sum += self.svm_loss(x, y, W=W, b=b)
else:
loss_sum += self.cross_entropy(x, y, W=W, b=b)
loss_sum /= X.shape[1]
final = loss_sum + regularization
assert(len(final) == 1)
return final
def cross_entropy(self, x, y, W, b):
l = - np.log(np.dot(y.T, self.evaluateClassifier(x, W=W, b=b)))[0]
return l
def svm_loss(self, x, y, W, b):
s = np.dot(W, x) + b
l = 0
y_int = np.where(y.T[0] == 1)[0][0]
for j in range(K):
if j != y_int:
l += max(0, s[j] - s[y_int] + 1)
return l
def ComputeAccuracy(self, X, Y):
acc = 0
for i in range(X.shape[1]):
P = self.evaluateClassifier(X[:, [i]], self.W, self.b)
label = np.argmax(P)
if label == Y[i]:
acc += 1
acc /= X.shape[1]
return acc
def compute_gradients(self, X, Y, P, W):
G = -(Y - P.T).T
return (np.dot(G,X)) / X.shape[0] + 2 * self.lambda_reg * W, np.mean(G, axis=-1, keepdims=True)
def compute_gradients_SVM(self, X, Y, W, b):
n = X.shape[1]
gradW = np.zeros((K, d))
gradb = np.zeros((K, 1))
for i in range(n):
x = X[:, i]
y_int = np.where(Y[:, [i]].T[0] == 1)[0][0]
s = np.dot(W, X[:, [i]]) + b
for j in range(K):
if j != y_int:
if max(0, s[j] - s[y_int] + 1) != 0:
gradW[j] += x
gradW[y_int] += -x
gradb[j, 0] += 1
gradb[y_int, 0] += -1
gradW /= n
gradW += self.lambda_reg * W
gradb /= n
return gradW, gradb
def shuffle(self, a, b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def fit(self, X, Y, validationSet=[]):
n = X.shape[1]
costsTraining = []
costsValidation = []
bestW = np.copy(self.W)
bestb = np.copy(self.b)
bestVal = self.computeCost(
validationSet["data"], validationSet["one_hot"], self.W, self.b)[0]
bestEpoch = 0
for i in tqdm(range(self.n_epochs)):
n_batch = floor(n / self.n_batch)
#Shuffle the order of training data before each epoch
X, Y = self.shuffle(X.T, Y.T)
X = X.T
Y = Y.T
#Decay the learning rate by decay factor 0.9
self.eta = self.decay_factor * self.eta
for j in range(n_batch):
j_start = j * self.n_batch
j_end = (j + 1) * self.n_batch
if j == n_batch - 1:
j_end = n
Xbatch = X[:, j_start:j_end]
Ybatch = Y[:, j_start:j_end]
Pbatch = self.evaluateClassifier(Xbatch, self.W, self.b)
if (self.SVM):
grad_W, grad_b = self.compute_gradients_SVM(
Xbatch, Ybatch, self.W, self.b)
else:
grad_W, grad_b = self.compute_gradients(
Xbatch.T, Ybatch.T, Pbatch, self.W)
self.W -= self.eta * grad_W
self.b -= self.eta * grad_b
val = self.computeCost(
validationSet["data"], validationSet["one_hot"], self.W, self.b)[0]
print("Validation loss: " + str(val))
if val < bestVal:
bestVal = np.copy(val)
bestW = np.copy(self.W)
bestb = np.copy(self.b)
bestEpoch = np.copy(i)
costsTraining.append(self.computeCost(X, Y, self.W, self.b)[0])
costsValidation.append(val)
self.W = np.copy(bestW)
self.b = np.copy(bestb)
print("Best epoch: " + str(bestEpoch))
print("Best cost: " + str(self.computeCost(
validationSet["data"], validationSet["one_hot"], self.W, self.b)[0]))
plt.plot(costsTraining, label="Training cost")
plt.plot(costsValidation, label="Validation cost")
plt.xlabel('Epoch')
plt.ylabel('Cost')
plt.title('Traning &Validation Cost')
plt.legend(loc='best')
plt.savefig("training_validation_cost.png")
plt.show()
for i, row in enumerate(self.W):
img = (row - row.min()) / (row.max() - row.min())
plt.subplot(2, 5, i + 1)
img = np.rot90(np.reshape(img, (32, 32, 3), order='F'), k=3)
plt.imshow(img)
plt.axis('off')
plt.title(cifar10_labels[i])
plt.savefig("weights.png")
plt.show()
def main():
print("Loading dataset...")
trainSet, validationSet, testSet = LoadDataset()
print("Dataset loaded!")
#Classifier(learning_rate, lambda_regularization, n_batch, n_epochs, decay_factor)
lambda_regularization = .1
n_epochs = 40
n_batch= 100
eta = 0.01
decay_factor = 0.95
'''
#Exercise1
Exercise_1 = Classifier(eta, lambda_regularization, n_batch, n_epochs, decay_factor)
Exercise_1.fit(trainSet["data"], trainSet["one_hot"], validationSet = validationSet)
print("lambda=" + str(lambda_regularization) + ",", "n_epochs=" + str(n_epochs) + ",", "n_batch=" + str(n_batch) + ",", "eta=" + str(eta) + ",", "decay_factor=" + str(decay_factor))
print("Final accuracy:" + str(Exercise_1.ComputeAccuracy(testSet["data"], testSet["labels"])))
'''
#Exercise2
Exercise2 = Classifier(eta, lambda_regularization, n_batch, n_epochs, decay_factor, SVM = True)
Exercise2.fit(trainSet["data"], trainSet["one_hot"], validationSet = validationSet)
print("lambda=" + str(lambda_regularization) + ",", "n_epochs=" + str(n_epochs) + ",", "n_batch=" + str(n_batch) + ",", "eta=" + str(eta) + ",", "decay_factor=" + str(decay_factor),"SVM loss")
print("Final accuracy:" + str(Exercise2.ComputeAccuracy(testSet["data"], testSet["labels"])))
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
R-Qu.noreply@github.com
|
2ddd2c7f2ca4f090480c6cfd552b181033c0daa5
|
d81e533783d0188a632bcd4457faaeacafef50b5
|
/prob7.py
|
7c6d627096119c3a3838527badb9b3e9638d943f
|
[] |
no_license
|
rcuhljr/MyEuler
|
4f91d797604e40ba970f6308985ef9a7a06c044d
|
5c03be90cf04b4e9f034d994e4e6f237be383aff
|
refs/heads/master
| 2020-04-07T16:30:13.865951
| 2013-03-31T01:54:28
| 2013-03-31T01:54:28
| 3,154,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
import math
import cProfile
primes = [2]
def pumpPrime(x):
cutoff = math.sqrt(x)
for n in primes:
if x%n == 0:
return
elif n > cutoff:
break
primes.append(x)
def solve(x):
count = 3
while len(primes) < x:
pumpPrime(count)
count += 2
print primes[len(primes)-1]
print cProfile.run('solve(10001)')
#yay code reuse
|
[
"rcuhl@sep.com"
] |
rcuhl@sep.com
|
d82d741910853f672aa47410d5b6cd3deeb0a5b4
|
e78154abbb8bacf5afccda9da371684cbeabad36
|
/popego/popserver/popserver/lib/app_globals.py
|
9d5e4446853d9c86f57be645c6ee6155079a3b73
|
[
"BSD-3-Clause"
] |
permissive
|
enterstudio/popego
|
1a196fabc374c0f45764e5c74bd7752236424040
|
2d09e793d9d2f297139edb325b8a70ddda9b2705
|
refs/heads/master
| 2021-04-09T16:39:40.781634
| 2016-10-14T16:53:47
| 2016-10-14T16:53:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,978
|
py
|
# -*- coding: utf-8 -*-
__docformat__='restructuredtext'
"""The application's Globals object"""
from pylons import config
import glob, os
from os.path import dirname, exists, join, abspath
class Globals(object):
"""Globals acts as a container for objects available throughout the
life of the application
"""
def __init__(self):
"""One instance of Globals is created during application
initialization and is available during requests via the 'g'
variable
"""
self._initBundles()
self._getRevision()
def _getRevision(self):
""" Intenta recuperar la revision de 'popserver' a partir de un archivo 'REVISION'
en el app root """
rev_file = join(dirname(abspath(__file__)), '..', '..', 'REVISION')
self.revision = open(rev_file).read().strip() if exists(rev_file) else None
def _initBundles(self):
self.stylesheet_bundle_path = None
self.javascript_bundle_path = None
root = dirname(dirname(abspath(__file__)))
mtime_cmp = lambda fname1, fname2: cmp(os.path.getmtime(fname1), os.path.getmtime(fname2))
if config.get('popego.serve_bundled_stylesheets', False):
bundles = glob.glob(os.path.join(root, 'public/css', 'popego_style_[0-9]*.css'))
# si llegara a haber más de un 'bundle', traer el más nuevo (ie, mayor modification time)
self.stylesheet_bundle_path = '/css/%s' % os.path.basename(sorted(bundles, mtime_cmp)[-1]) if len(bundles) > 0 else None
# if config.get('popego.serve_bundled_javascripts', False):
# bundles = glob.glob(os.path.join(root, 'public/javascripts', 'popego_scripts_[0-9]*.css'))
# # si llegara a haber más de un 'bundle', traer el más nuevo (ie, mayor modification time)
# self.javascript_bundle_path = '/javascripts/%s' % os.path.basename(sorted(bundles, mtime_cmp)[-1]) if len(bundles) > 0 else None
|
[
"santisiri@gmail.com"
] |
santisiri@gmail.com
|
17333f888ef5aa2338e99e40500099d63f4041b7
|
58f745de327deb81144412b4b4b5ba7b0c48ad3c
|
/tests/test_day2.py
|
2a3204ac7a52406d864fdd8518e2676d65a36d44
|
[
"MIT"
] |
permissive
|
aartitayade/predict
|
b1188aef60804bca81908f7e9adc4828663d7bf4
|
960141f30034649618468912b93afbac4c82e23f
|
refs/heads/master
| 2022-06-06T10:17:28.012370
| 2020-05-03T15:27:01
| 2020-05-03T15:27:01
| 260,945,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
import pytest
from src.day2_copy import predict_cnt
def test_predict_cnt():
assert predict_cnt(0.229270,0.436957,0.186900)==2239
assert predict_cnt(0.363625,0.805833,0.160446)==2626
|
[
"aartitayade96@gmail.com"
] |
aartitayade96@gmail.com
|
195a23658e0c693a4e1da301ddba5e4eb70f5682
|
6a700571d75e6ac131cc54b0f53937064fcd50fe
|
/CIFAR/cifar/vgg19.py
|
c6773a7d9514b5ef526aed804a7dccb2ad4c9ab3
|
[] |
no_license
|
destinyc/cifar
|
6be9f4e71e2caccc1b5c6cda431c9565ef0cc16e
|
62c8cd15385874b144a96cc9e60750566307d09a
|
refs/heads/master
| 2020-06-17T13:00:47.085306
| 2019-07-09T04:25:37
| 2019-07-09T04:25:37
| 195,932,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,738
|
py
|
# -*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
import time
import os
import sys
import pickle
import random
#from read_data import Read_cifar10
class_num = 10
image_size = 32
img_channels = 3
iterations = 200
batch_size = 250
total_epoch = 164
weight_decay = 0.0003
dropout_rate = 0.5
momentum_rate = 0.9
log_save_path = './vgg_logs'
model_save_path = '../model/'
def load_data_one(file):
with open(file, 'rb') as f:
dict = pickle.load(f, encoding='bytes')
return dict[b'data'], dict[b'labels']
def load_data(files, data_dir, label_count):
data, labels = [], []
for f in files:
data_n, labels_n = load_data_one(data_dir + '/' + f)
labels_n = np.array([[float(i == label) for i in range(label_count)] for label in labels_n])
data.append(data_n.reshape(10000, 3, 32, 32).transpose(0,2,3,1))
labels.append(labels_n)
data = np.concatenate(np.array(data))
labels = np.concatenate(np.array(labels))
return data, labels
def prepare_data():
print("======Loading data======")
data_dir = '../database/cifar10'
image_dim = image_size * image_size * img_channels
label_count = 10
train_files = ['data_batch_%d.bin' % d for d in range(1, 6)]
train_data, train_labels = load_data(train_files, data_dir, label_count)
test_data, test_labels = load_data(['test_batch.bin'], data_dir, label_count)
print("Train data:", np.shape(train_data), np.shape(train_labels))
print("Test data :", np.shape(test_data), np.shape(test_labels))
print("======Load finished======")
print("======Shuffling data======")
indices = np.random.permutation(len(train_data))
train_data = train_data[indices]
train_labels = train_labels[indices]
print("======Prepare Finished======")
return train_data, train_labels, test_data, test_labels
def data_preprocessing(x_train,x_test):
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train[:, :, :, 0] = (x_train[:, :, :, 0] - np.mean(x_train[:, :, :, 0])) / np.std(x_train[:, :, :, 0])
x_train[:, :, :, 1] = (x_train[:, :, :, 1] - np.mean(x_train[:, :, :, 1])) / np.std(x_train[:, :, :, 1])
x_train[:, :, :, 2] = (x_train[:, :, :, 2] - np.mean(x_train[:, :, :, 2])) / np.std(x_train[:, :, :, 2])
x_test[:, :, :, 0] = (x_test[:, :, :, 0] - np.mean(x_test[:, :, :, 0])) / np.std(x_test[:, :, :, 0])
x_test[:, :, :, 1] = (x_test[:, :, :, 1] - np.mean(x_test[:, :, :, 1])) / np.std(x_test[:, :, :, 1])
x_test[:, :, :, 2] = (x_test[:, :, :, 2] - np.mean(x_test[:, :, :, 2])) / np.std(x_test[:, :, :, 2])
return x_train, x_test
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool(input, k_size=1, stride=1, name=None):
return tf.nn.max_pool(input, ksize=[1, k_size, k_size, 1], strides=[1, stride, stride, 1],
padding='SAME', name=name)
def batch_norm(input):
return tf.contrib.layers.batch_norm(input, decay=0.9, center=True, scale=True, epsilon=1e-3,
is_training=train_flag, updates_collections=None)
def _random_crop(batch, crop_shape, padding=None):
oshape = np.shape(batch[0])
if padding:
oshape = (oshape[0] + 2*padding, oshape[1] + 2*padding)
new_batch = []
npad = ((padding, padding), (padding, padding), (0, 0))
for i in range(len(batch)):
new_batch.append(batch[i])
if padding:
new_batch[i] = np.lib.pad(batch[i], pad_width=npad,
mode='constant', constant_values=0)
nh = random.randint(0, oshape[0] - crop_shape[0])
nw = random.randint(0, oshape[1] - crop_shape[1])
new_batch[i] = new_batch[i][nh:nh + crop_shape[0],
nw:nw + crop_shape[1]]
return new_batch
def _random_flip_leftright(batch):
for i in range(len(batch)):
if bool(random.getrandbits(1)):
batch[i] = np.fliplr(batch[i])
return batch
def data_augmentation(batch):
batch = _random_flip_leftright(batch)
batch = _random_crop(batch, [32, 32], 4)
return batch
def learning_rate_schedule(epoch_num):
if epoch_num < 81:
return 0.1
elif epoch_num < 121:
return 0.01
else:
return 0.001
def run_testing(sess, ep):
acc = 0.0
loss = 0.0
pre_index = 0
add = 1000
for it in range(10):
batch_x = test_x[pre_index:pre_index+add]
batch_y = test_y[pre_index:pre_index+add]
pre_index = pre_index + add
loss_, acc_ = sess.run([cross_entropy, accuracy],
feed_dict={x: batch_x, y_: batch_y, keep_prob: 1.0, train_flag: False})
loss += loss_ / 10.0
acc += acc_ / 10.0
summary = tf.Summary(value=[tf.Summary.Value(tag="test_loss", simple_value=loss),
tf.Summary.Value(tag="test_accuracy", simple_value=acc)])
return acc, loss, summary
if __name__ == '__main__':
#read = Read_cifar10()
#train_x, train_y, test_x, test_y = read.read_data()
train_x, train_y, test_x, test_y = prepare_data()
train_x, test_x = data_preprocessing(train_x, test_x)
# define placeholder x, y_ , keep_prob, learning_rate
x = tf.placeholder(tf.float32,[None, image_size, image_size, 3])
y_ = tf.placeholder(tf.float32, [None, class_num])
keep_prob = tf.placeholder(tf.float32)
learning_rate = tf.placeholder(tf.float32)
train_flag = tf.placeholder(tf.bool)
# build_network
W_conv1_1 = tf.get_variable('conv1_1', shape=[3, 3, 3, 64], initializer=tf.contrib.keras.initializers.he_normal())
b_conv1_1 = bias_variable([64])
output = tf.nn.relu(batch_norm(conv2d(x, W_conv1_1) + b_conv1_1))
W_conv1_2 = tf.get_variable('conv1_2', shape=[3, 3, 64, 64], initializer=tf.contrib.keras.initializers.he_normal())
b_conv1_2 = bias_variable([64])
output = tf.nn.relu(batch_norm(conv2d(output, W_conv1_2) + b_conv1_2))
output = max_pool(output, 2, 2, "pool1")
W_conv2_1 = tf.get_variable('conv2_1', shape=[3, 3, 64, 128], initializer=tf.contrib.keras.initializers.he_normal())
b_conv2_1 = bias_variable([128])
output = tf.nn.relu(batch_norm(conv2d(output, W_conv2_1) + b_conv2_1))
W_conv2_2 = tf.get_variable('conv2_2', shape=[3, 3, 128, 128], initializer=tf.contrib.keras.initializers.he_normal())
b_conv2_2 = bias_variable([128])
output = tf.nn.relu(batch_norm(conv2d(output, W_conv2_2) + b_conv2_2))
output = max_pool(output, 2, 2, "pool2")
W_conv3_1 = tf.get_variable('conv3_1', shape=[3, 3, 128, 256], initializer=tf.contrib.keras.initializers.he_normal())
b_conv3_1 = bias_variable([256])
output = tf.nn.relu( batch_norm(conv2d(output,W_conv3_1) + b_conv3_1))
W_conv3_2 = tf.get_variable('conv3_2', shape=[3, 3, 256, 256], initializer=tf.contrib.keras.initializers.he_normal())
b_conv3_2 = bias_variable([256])
output = tf.nn.relu(batch_norm(conv2d(output, W_conv3_2) + b_conv3_2))
W_conv3_3 = tf.get_variable('conv3_3', shape=[3, 3, 256, 256], initializer=tf.contrib.keras.initializers.he_normal())
b_conv3_3 = bias_variable([256])
output = tf.nn.relu( batch_norm(conv2d(output, W_conv3_3) + b_conv3_3))
W_conv3_4 = tf.get_variable('conv3_4', shape=[3, 3, 256, 256], initializer=tf.contrib.keras.initializers.he_normal())
b_conv3_4 = bias_variable([256])
output = tf.nn.relu(batch_norm(conv2d(output, W_conv3_4) + b_conv3_4))
output = max_pool(output, 2, 2, "pool3")
W_conv4_1 = tf.get_variable('conv4_1', shape=[3, 3, 256, 512], initializer=tf.contrib.keras.initializers.he_normal())
b_conv4_1 = bias_variable([512])
output = tf.nn.relu(batch_norm(conv2d(output, W_conv4_1) + b_conv4_1))
W_conv4_2 = tf.get_variable('conv4_2', shape=[3, 3, 512, 512], initializer=tf.contrib.keras.initializers.he_normal())
b_conv4_2 = bias_variable([512])
output = tf.nn.relu(batch_norm(conv2d(output, W_conv4_2) + b_conv4_2))
W_conv4_3 = tf.get_variable('conv4_3', shape=[3, 3, 512, 512], initializer=tf.contrib.keras.initializers.he_normal())
b_conv4_3 = bias_variable([512])
output = tf.nn.relu(batch_norm(conv2d(output, W_conv4_3) + b_conv4_3))
W_conv4_4 = tf.get_variable('conv4_4', shape=[3, 3, 512, 512], initializer=tf.contrib.keras.initializers.he_normal())
b_conv4_4 = bias_variable([512])
output = tf.nn.relu(batch_norm(conv2d(output, W_conv4_4)) + b_conv4_4)
output = max_pool(output, 2, 2)
W_conv5_1 = tf.get_variable('conv5_1', shape=[3, 3, 512, 512], initializer=tf.contrib.keras.initializers.he_normal())
b_conv5_1 = bias_variable([512])
output = tf.nn.relu(batch_norm(conv2d(output, W_conv5_1) + b_conv5_1))
W_conv5_2 = tf.get_variable('conv5_2', shape=[3, 3, 512, 512], initializer=tf.contrib.keras.initializers.he_normal())
b_conv5_2 = bias_variable([512])
output = tf.nn.relu(batch_norm(conv2d(output, W_conv5_2) + b_conv5_2))
W_conv5_3 = tf.get_variable('conv5_3', shape=[3, 3, 512, 512], initializer=tf.contrib.keras.initializers.he_normal())
b_conv5_3 = bias_variable([512])
output = tf.nn.relu(batch_norm(conv2d(output, W_conv5_3) + b_conv5_3))
W_conv5_4 = tf.get_variable('conv5_4', shape=[3, 3, 512, 512], initializer=tf.contrib.keras.initializers.he_normal())
b_conv5_4 = bias_variable([512])
output = tf.nn.relu(batch_norm(conv2d(output, W_conv5_4) + b_conv5_4))
# output = tf.contrib.layers.flatten(output)
output = tf.reshape(output, [-1, 2*2*512])
W_fc1 = tf.get_variable('fc1', shape=[2048, 4096], initializer=tf.contrib.keras.initializers.he_normal())
b_fc1 = bias_variable([4096])
output = tf.nn.relu(batch_norm(tf.matmul(output, W_fc1) + b_fc1) )
output = tf.nn.dropout(output, keep_prob)
W_fc2 = tf.get_variable('fc7', shape=[4096, 4096], initializer=tf.contrib.keras.initializers.he_normal())
b_fc2 = bias_variable([4096])
output = tf.nn.relu(batch_norm(tf.matmul(output, W_fc2) + b_fc2))
output = tf.nn.dropout(output, keep_prob)
W_fc3 = tf.get_variable('fc3', shape=[4096, 10], initializer=tf.contrib.keras.initializers.he_normal())
b_fc3 = bias_variable([10])
output = tf.nn.relu(batch_norm(tf.matmul(output, W_fc3) + b_fc3))
# output = tf.reshape(output,[-1,10])
# loss function: cross_entropy
# train_step: training operation
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=output))
l2 = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
train_step = tf.train.MomentumOptimizer(learning_rate, momentum_rate, use_nesterov=True).\
minimize(cross_entropy + l2 * weight_decay)
correct_prediction = tf.equal(tf.argmax(output, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# initial an saver to save model
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter(log_save_path,sess.graph)
# epoch = 164
# make sure [bath_size * iteration = data_set_number]
for ep in range(1, total_epoch+1):
lr = learning_rate_schedule(ep)
pre_index = 0
train_acc = 0.0
train_loss = 0.0
start_time = time.time()
print("\n epoch %d/%d:" % (ep, total_epoch))
for it in range(1, iterations+1):
batch_x = train_x[pre_index:pre_index+batch_size]
batch_y = train_y[pre_index:pre_index+batch_size]
batch_x = data_augmentation(batch_x)
_, batch_loss = sess.run([train_step, cross_entropy],
feed_dict={x: batch_x, y_: batch_y, keep_prob: dropout_rate,
learning_rate: lr, train_flag: True})
batch_acc = accuracy.eval(feed_dict={x: batch_x, y_: batch_y, keep_prob: 1.0, train_flag: True})
train_loss += batch_loss
train_acc += batch_acc
pre_index += batch_size
if it == iterations:
train_loss /= iterations
train_acc /= iterations
loss_, acc_ = sess.run([cross_entropy, accuracy],
feed_dict={x: batch_x, y_: batch_y, keep_prob: 1.0, train_flag: True})
train_summary = tf.Summary(value=[tf.Summary.Value(tag="train_loss", simple_value=train_loss),
tf.Summary.Value(tag="train_accuracy", simple_value=train_acc)])
val_acc, val_loss, test_summary = run_testing(sess, ep)
summary_writer.add_summary(train_summary, ep)
summary_writer.add_summary(test_summary, ep)
summary_writer.flush()
print("iteration: %d/%d, cost_time: %ds, train_loss: %.4f, "
"train_acc: %.4f, test_loss: %.4f, test_acc: %.4f"
% (it, iterations, int(time.time()-start_time), train_loss, train_acc, val_loss, val_acc))
else:
print("iteration: %d/%d, train_loss: %.4f, train_acc: %.4f"
% (it, iterations, train_loss / it, train_acc / it), end='\r')
save_path = saver.save(sess, model_save_path)
print("Model saved in file: %s" % save_path)
|
[
"2608572577@qq.com"
] |
2608572577@qq.com
|
16be7162f22c5585d5daaee411524b5ed067d518
|
09c4154c837e83d85ec9ed8d3b7c220f99f82645
|
/PyCrawler/AppleCrawler.py
|
b7c011f67ed62237cad61e9da9d917ff5ca8561c
|
[] |
no_license
|
AI5M/PublicOpinion
|
c8b7eb6f1842cd9558f3cf46cb18e856d56b66d5
|
9157048007cfbbb52b43b353bedc082ba470581f
|
refs/heads/master
| 2020-03-18T07:12:33.280072
| 2018-05-30T15:58:35
| 2018-05-30T15:58:35
| 134,438,968
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,483
|
py
|
import connDB
import config as cfg
import time
import traceback
import logging
import os
import requests as req
from urllib.parse import urljoin
from bs4 import BeautifulSoup as bs
from os import system
system("title AppleCrawler") #set cmd title
if(os.path.exists("./log/apple.log")):
os.remove("./log/apple.log")
def writeLogging(page=0, category="", title="", url=""):
print('something wrong in page',page)
print('title =', title)
traceback.print_exc()
logging.info('something wrong in page'+str(page))
logging.info('category='+category)
logging.info('title='+title)
logging.info('site_url='+url)
logging.info(traceback.format_exc())
#set log
logging.basicConfig(level=logging.INFO,
format='%(message)s',
filename='./log/apple.log') #filemode預設為'a'
category_dict = cfg.category_dict #category_id
conn = connDB.MyConnecter() #連接資料庫物件
conn.connect() #開始連接
MAXPAGES = 5
while(True):
# list declaration
page = 1
timestamp = time.time()
while(page<=MAXPAGES):
try:
print('page:',page)
website = 'https://tw.appledaily.com/new/realtime/{}'.format(page)
result = req.get(website).text
if(result == '<script>alert("網址不存在 !");location.href="/";</script>'):
break
soup = bs(result,'html.parser')
news_list = soup.select('.rtddt')
for news in news_list: #進入文章網站
try:
site_url = news.select('a')[0]['href']
#site_url = urljoin(website,site_url)
category = news.h2.string
category_id = category_dict[category]
result = req.get(site_url).text
soup = bs(result,'html.parser')
title = soup.select('hgroup h1')[0].text.strip()
view = soup.select('.ndArticle_view')
view = view[0].text if len(view) else 0 #'a' if (determine statements) else 'b'
create_time = soup.select('.ndArticle_creat')[0].text
#將時間依照格式轉換為time物件
create_time = time.strptime(create_time.replace("出版時間:",""),'%Y/%m/%d %H:%M')
create_time = time.mktime(create_time) #將時間元組改傳換為時間戳
content = soup.select('.ndArticle_margin p')[0]
if content.style: #把sytle標籤去掉
for cont in content.style:
cont.extract()
content = content.text.strip()
data = {'title' : connDB.escape_str(title),
'category_id' : category_id,
'content' : connDB.escape_str(content),
'create_time' : create_time,
'view' : view,
'site_url' : site_url}
conn.insert_replace(table='apple', data=data) #replace to database table
#list append
# print(title)
# print(category_id)
# print(content)
# print(create_time)
# print(view)
# print(site_url)
time.sleep(1)
except:
if(not title):
title = ""
writeLogging(page=page, title=title, url=site_url)
time.sleep(10)
page += 1 #下一頁
time.sleep(30)
except:
logging.info('**********page wrong**********')
writeLogging(page=page)
logging.info('**********page wrong**********')
time.sleep(10)
print('cost time :',round(time.time()-timestamp,2),'second')
MAXPAGES = 5 #第一次過後只取前3頁
# titleList = []
# viewList = []
# createTimeList = []
# contentList = []
# categoryList = []
# urlList=[]
# titleList.append(title)
# viewList.append(view)
# createTimeList.append(create_time)
# contentList.append(content)
# categoryList.append(category)
# urlList.append(site_url)
|
[
"alex856236@gmail.com"
] |
alex856236@gmail.com
|
72af6167e7ff140977ff5a8b7fac552dcbeb92bd
|
1840ec7b62a479c652948d293c2f813a453380c6
|
/blogapp/models.py
|
675117dc25c8cc44e94963787a8c9396f5b7fed9
|
[] |
no_license
|
jervislam0911/Postgram
|
0bb50547e845569b69b7e80f474c3b0f960ee399
|
e00c389d32c231620719985fcd7cc317c777229f
|
refs/heads/master
| 2020-06-23T19:31:34.711455
| 2016-10-14T15:13:29
| 2016-10-14T15:13:29
| 67,747,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,105
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils import timezone
PRIORITY_CHOICES = (('Python', 'Python'),
('Django', 'Django'),
('GitHub', 'GitHub'),
('Selenium', 'Selenium'))
# Create your models here.
class User(AbstractUser):
follows = models.ManyToManyField('self', related_name='follow_to', symmetrical=False)
class Post(models.Model):
author = models.ForeignKey(User, related_name='posts')
title = models.CharField(max_length=50)
type = models.CharField(max_length=100, choices=PRIORITY_CHOICES, default='Python')
body = models.TextField(blank=True, null=True)
image = models.FileField(blank=True, null=True)
create_date = models.DateTimeField(default=timezone.now)
publish_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.publish_date = timezone.now()
self.save()
class Photo(models.Model):
post = models.ForeignKey(Post, related_name='photos')
image = models.ImageField(upload_to='%Y/%m/%d')
|
[
"lin.jervis@yahoo.com"
] |
lin.jervis@yahoo.com
|
3f1a8ffa2740fae96cf29c212a01e1a98cc0cfdb
|
dce97c2bad511d0ba01fa52c1949e06a6946ac71
|
/tp2/euler.py
|
f2acb0ddf0d87e095cd571cd3884b6a90348180c
|
[] |
no_license
|
martinstd96/Analisis-Numerico1-7512
|
7d29dcaec75bb3b537551bb384a55f70b09cf4d6
|
1b8ec4f4895cb45c244738f346f9a88866525c2f
|
refs/heads/master
| 2020-07-26T05:12:36.771716
| 2019-11-28T05:32:45
| 2019-11-28T05:32:45
| 208,545,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
def main():
print( euler(1000,1/999,1,y_prima))
#funcion y_prima
#y0 valor inicial
def euler(y0,h,x,funcion):
uk = y0
i = 0
while i<x:
uk = uk + h*funcion(uk,i)
i+=h
return uk
def y_prima(y,t):
return 0.7*y
main()
|
[
"jpdicomo@live.com"
] |
jpdicomo@live.com
|
f503bbdf229263c6210bbe2d35e1d0dbfcec1848
|
e3a939d85f58a167bf4b91add18eb66dfa21fdaa
|
/login/migrations/0016_auto_20170326_2008.py
|
031addde59312877dfd7190a3d20b688b8940cd8
|
[
"Apache-2.0"
] |
permissive
|
anilkumarmeena/Bitora
|
3ca328a00505a0d72abe0c53b8d68dc61dac6aa0
|
c8ccfa6c2369c9da4bd273dddb783f870c09889f
|
refs/heads/master
| 2022-12-11T16:38:46.258490
| 2020-03-21T05:42:27
| 2020-03-21T05:42:27
| 168,194,931
| 0
| 2
|
Apache-2.0
| 2022-12-07T23:54:15
| 2019-01-29T17:14:55
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 625
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-26 14:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0015_auto_20170326_1250'),
]
operations = [
migrations.AlterField(
model_name='person',
name='cv',
field=models.FileField(blank=True, upload_to=''),
),
migrations.AlterField(
model_name='person',
name='display_pic',
field=models.FileField(blank=True, upload_to=''),
),
]
|
[
"anil98meena@gmail.com"
] |
anil98meena@gmail.com
|
22131b5d7a5a639409e4ac8f18241d4026439be0
|
5e3d8071719a38c878c08e15d5a9bb26db323d7f
|
/test/webapp_skel_test.py
|
b3133bbd1843a1ef54cdd47b0a032f11ad27a11f
|
[] |
no_license
|
apostvav/webapp_skel
|
b8e645f78f9cc92df1196e2d739eb92989a10563
|
00a7962cff05fdf3a6592a9d7211f7077d3f40c2
|
refs/heads/master
| 2023-06-25T16:27:54.936311
| 2023-06-08T20:27:46
| 2023-06-08T20:27:46
| 92,541,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
from flask import url_for
from flask_testing import TestCase
import webapp_skel
from webapp_skel.models import User, Article
class TestWebapp(TestCase):
def create_app(self):
return webapp_skel.create_app('test')
def setUp(self):
self.db = webapp_skel.db
self.db.create_all()
self.client = self.app.test_client()
testUser = User(username='test', email='test@example.com', password='test')
testArticle = Article(user=testUser, title="My Test", article="My Test Text",
tags="test1,test2")
self.db.session.add(testUser)
self.db.session.add(testArticle)
self.db.session.commit()
self.client.post(url_for('auth.login'),
data = dict(username='test', password='test'))
def tearDown(self):
webapp_skel.db.session.remove()
webapp_skel.db.drop_all()
def test_delete_all_tags(self):
response = self.client.post(
url_for('articles.edit', article_id=1),
data = dict(
title = "My test edited",
article = "My Test Text edited",
tags = ""
),
follow_redirects = True
)
assert response.status_code == 200
article1 = Article.query.first()
assert not article1._tags
|
[
"vitotol@gmail.com"
] |
vitotol@gmail.com
|
6b88cd00ac481c379f97b7407384b3593d72a780
|
fec709734f524f1d9dfcb7cbd826b1a8dcbe081a
|
/calculator.py
|
98baddbf09aeebc8e983418fa09917372e72dd4a
|
[
"MIT"
] |
permissive
|
yaakovlom/tahara_calculator
|
c3116f3547bf34d447d17ecff00e9bf7b2215914
|
2c5794a4795a59806fc84a8848e54cdb20d8658b
|
refs/heads/main
| 2023-07-11T02:22:40.606381
| 2021-08-16T09:30:33
| 2021-08-16T09:30:33
| 395,632,004
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,905
|
py
|
from pyluach import dates, hebrewcal
import os
import sys
class Period:
def __init__(self, date, ona, haflaga=None):
self.date = date
self.ona = ona
self.weekday = date.weekday()
self.haflaga = haflaga
self.seclusion_list = []
@property
def seclusion_list(self):
return self._seclusion_list
@seclusion_list.setter
def seclusion_list(self, seclusion_list):
self._seclusion_list = seclusion_list
def add_seclusion(self, seclusion):
self.seclusion_list.append(seclusion)
@property
def details(self):
self._details = [self.ona, self.haflaga]
return self._details
@details.setter
def details(self, haflaga):
self._haflaga = haflaga
class Seclusion:
def __init__(self, period, name, date, ona):
self.period = period
self.name = name
self.date = date
self.year = date.year
self.month = date.month
self.day = date.day
self.weekday = date.weekday()
self.ona = ona
self.details = [self.name, self.ona, self.period.date.month]
def get_details(self):
return self.details
ona_dict = {0 : "ליל", 1 : "יום"}
weekday_dict = {
1 : "ראשון",
2 : "שני",
3 : "שלישי",
4 : "רביעי",
5 : "חמישי",
6 : "שישי",
7 : "שבת"
}
def read_periods_list_file(file_path:str):
#get txt from the dates file
if os.path.isfile(file_path):
with open(file_path, "r") as f:
date_list = f.readlines()
return date_list
def export_results(file_name, lines):
#export results in a file
try:
with open(file_name, "w") as f:
f.writelines(lines)
except NameError as err:
print(err)
def convert_txt_to_period(date_txt):
#convert date text to period
details = date_txt.split()
if len(details) > 1:
digits_of_date = [int(n) for n in details[0].split("/")]
ona = int(details[1][0])
if ona == 1 or ona == 0:
period = Period(dates.HebrewDate(*digits_of_date[::-1]), ona)
return period
def get_month_len(month:hebrewcal.Month):
#get the length of the month
date = dates.HebrewDate((month + 1).year, (month + 1).month, 1) - 1
month_len = date.day
return month_len
def get_seclusions(period, haflagot_list=None):
#get list of seclusions from a period
date = period.date
year = date.year
month = date.month
day = date.day
month_len = get_month_len(hebrewcal.Month(year, month))
ona_beinonit30 = Seclusion(period, 'עונה בינונית 30', date + 29, period.ona)
veset_hachodesh = Seclusion(period, 'וסת החודש', date + month_len, period.ona)
ona_beinonit31 = Seclusion(period, 'עונה בינונית 31', date + 30, period.ona)
seclusion_list = [ona_beinonit30, veset_hachodesh, ona_beinonit31]
if period.haflaga:
haflaga = Seclusion(period, 'הפלגה', date + period.haflaga - 1, period.ona)
seclusion_list.append(haflaga)
if haflagot_list:
if len(haflagot_list) >= 2:
haflagot_lechumra = []
for h1 in haflagot_list[-1::-1]:
akira = False
for h2 in haflagot_list[-1:haflagot_list.index(h1):-1]:
if h2 > h1:
akira = True
if not akira:
haf = Seclusion(period, str(h1), date + h1 - 1, period.ona)
haflagot_lechumra.append(haf)
seclusion_list.append(haflagot_lechumra)
if not period.ona:
or_zarua = Seclusion(period, 'אור זרוע', ona_beinonit30.date - 1, period.ona + 1)
kartyupleity = Seclusion(period, 'כרתי ופלתי', ona_beinonit30.date, period.ona + 1)
seclusion_list.insert(0, or_zarua)
seclusion_list.insert(2, kartyupleity)
else:
or_zarua = Seclusion(period, 'אור זרוע', ona_beinonit30.date, period.ona - 1)
seclusion_list.insert(0, or_zarua)
return seclusion_list
def main():
#check the dates file
if len(sys.argv) > 1:
file_path = sys.argv[1]
else:
file_path = input("Date data file not found. Please enter the date file path:\n")
for i in range(3):
date_list = read_periods_list_file(file_path)
if date_list:
break
else:
file_path = input("Date data file not found. Please enter the date file path:\n")
if not date_list:
print("Date data file not found.\n")
exit()
#check if export file path is in args
export_file = None
if len(sys.argv) > 2:
export_file = sys.argv[2]
#convert txt to poriods
periods_list = []
for date in date_list:
try:
period = convert_txt_to_period(date)
if period:
periods_list.append(period)
except NameError as err:
print(err)
periods_dates = {period.date: period for period in periods_list}
#get the "haflagot" form periods
seclusion_list = []
for i, period in enumerate(periods_list[1:]):
haflaga = period.date - periods_list[i].date + 1
period.haflaga = int(haflaga)
haflagot_list = [period.haflaga for period in periods_list[1:]]
#get seclusions from periods
for i, period in enumerate(periods_list):
if haflagot_list:
seclusion_list = get_seclusions(period, haflagot_list[:i])
else:
seclusion_list = get_seclusions(period)
period.seclusion_list = seclusion_list
#set results
mid_line = "-" * 25
lines = [f"רשימת הפלגות:\n{haflagot_list}\n{mid_line}\n"]
for period in periods_dates:
lines.append((f"{period.hebrew_date_string()} ב{ona_dict[periods_dates[period].ona]} {weekday_dict[period.weekday()]}:\n"))
for seclusion in periods_dates[period].seclusion_list:
if type(seclusion) != list:
lines.append(f" {seclusion.name} - {seclusion.date.hebrew_date_string()} ב{ona_dict[seclusion.ona]} {weekday_dict[seclusion.weekday]}\n")
else:
lines.append((" הפלגות שלא נעקרו:\n"))
for s in seclusion:
lines.append(f" {s.name} - {s.date.hebrew_date_string()} ב{ona_dict[s.ona]} {weekday_dict[s.weekday]}\n")
lines.append(mid_line + "\n")
#export or print results
if export_file:
export_results(export_file, lines)
else:
print("")
for line in lines:
print(line[:-1])
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
yaakovlom.noreply@github.com
|
ee499d4f8713d4cbcdd106d89b4218153b158201
|
5d4267bf377bea5beb50ee70e7a853b1d5be88d0
|
/Kattis/Euler's Number.py
|
bc0123081a4a390f44cb05fc20176ae7194d6c9c
|
[] |
no_license
|
Penguin-71630/Python-3
|
043b4d7b7525478f87c2404ff0d585d030d50d11
|
fc3acf1a2b7a204282503d581cc61275b39911a4
|
refs/heads/master
| 2022-01-20T04:14:51.005757
| 2019-08-11T08:08:58
| 2019-08-11T08:08:58
| 198,004,811
| 0
| 0
| null | 2019-07-21T04:27:52
| 2019-07-21T02:52:04
|
Python
|
UTF-8
|
Python
| false
| false
| 114
|
py
|
total, deno = 1, 1
for i in range(1, int(input()) + 1):
deno *= i
total += 1 / deno
print(total)
|
[
"noreply@github.com"
] |
Penguin-71630.noreply@github.com
|
ec6f2dcf95c86f6207dc7e001d328bd4279cd4fc
|
bd7d091565ba63c930351203ed912ff82461d540
|
/dp_72_minDistanceOfEditing.py
|
74d09689a717bd362a7ef359ab99b57ca3824e46
|
[] |
no_license
|
screnary/Algorithm_python
|
6ea3ab571763b5c0a519bdb7eed64dd5b74e8a8f
|
8290ad1c763d9f7c7f7bed63426b4769b34fd2fc
|
refs/heads/master
| 2022-12-07T02:59:42.786259
| 2020-08-25T04:27:45
| 2020-08-25T04:27:45
| 258,499,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,028
|
py
|
"""给你两个单词 word1 和 word2,请你计算出将 word1 转换成 word2 所使用的最少操作数 。
你可以对一个单词进行如下三种操作:
插入一个字符
删除一个字符
替换一个字符
"""
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
""" input| word1: str, word2: str
output| int
"""
m = len(word1)
n = len(word2)
dp = [[0] * (n+1) for _ in range(m+1)] # matrix [m+1, n+1]
for i in range(1, m+1):
dp[i][0] = i
for j in range(1, n+1):
dp[0][j] = j
for i in range(1, m+1):
for j in range(1, n+1):
if word1[i-1] == word2[j-1]:
dp[i][j] = dp[i-1][j-1]
else:
dp[i][j] = min(
dp[i][j-1] + 1, # insert
dp[i-1][j] + 1, # delete
dp[i-1][j-1] + 1 # replace
)
return dp[m][n]
|
[
"screnary@qq.com"
] |
screnary@qq.com
|
d61f416f47cbbd230617c8716874d45fbadbfd0d
|
e7b199ce6e3ae0ed28d5edb943e8b35469808f7e
|
/manage.py
|
92cadf718f1d58ba755d19c03b7d2dfb37041d57
|
[
"MIT"
] |
permissive
|
dankiki/sweetly
|
4a2b38ea6a6a7c2994fb5d8deb44bfa4da06991d
|
a6cf05a9ff36704c9e753203d79d0ede60431f28
|
refs/heads/master
| 2020-09-08T00:37:15.777517
| 2019-11-11T11:15:07
| 2019-11-11T11:15:07
| 220,958,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sweetly.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"daniel.kislyuk@gmail.com"
] |
daniel.kislyuk@gmail.com
|
ed1e477db5118c0ed1436a59bc6d9d6830bf5c93
|
3b983c1a990341d5e56f848e5b10d7834dd8fea8
|
/game/intro.py
|
9339d531588d51f7e0d43a0673e50face5165496
|
[] |
no_license
|
Crowbeak/LD26
|
5ccd40e7ed5594dbffdbb77d283db04c52271958
|
fac5cc8cdb155044ea296825ff0ebb491129666b
|
refs/heads/master
| 2020-05-18T07:10:06.318489
| 2013-12-22T08:30:43
| 2013-12-22T08:30:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
import pyglet
import resources
class Title(pyglet.sprite.Sprite):
def __init__(self, *args, **kwargs):
super(Title, self).__init__(img=resources.crow_logo, x=760, y=40,
*args, **kwargs)
self.game_title = pyglet.text.Label("Poke", font_size = 36,
anchor_y="top", x=40, y=560,
color=(0,0,0,255))
self.credits = pyglet.text.Label("A game by Lena LeRay",
x=40, y=485, color=(0,0,0,255))
self.timer = 200
|
[
"crowbeak@gmail.com"
] |
crowbeak@gmail.com
|
b03e484b92549f6640ea40fb5511f1647646fa81
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/comp/rcvdbyteshist1mo.py
|
c9563a2299d98cd5d31053c3b0fb2951568717e0
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904
| 2021-03-26T22:07:54
| 2021-03-26T22:07:54
| 351,855,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,923
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RcvdBytesHist1mo(Mo):
"""
A class that represents historical statistics for received bytes in a 1 month sampling interval. This class updates every day.
"""
meta = StatsClassMeta("cobra.model.comp.RcvdBytesHist1mo", "received bytes")
counter = CounterMeta("usage", CounterCategory.GAUGE, "bytes-per-second", "received rate")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "usageMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "usageMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "usageAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "usageSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "usageThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "usageTr"
meta._counters.append(counter)
meta.moClassName = "compRcvdBytesHist1mo"
meta.rnFormat = "HDcompRcvdBytes1mo-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical received bytes stats in 1 month"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.comp.Hv")
meta.parentClasses.add("cobra.model.comp.HpNic")
meta.parentClasses.add("cobra.model.comp.VNic")
meta.parentClasses.add("cobra.model.comp.Vm")
meta.superClasses.add("cobra.model.comp.RcvdBytesHist")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.rnPrefixes = [
('HDcompRcvdBytes1mo-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 5926, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "usageAvg", "usageAvg", 7602, PropCategory.IMPLICIT_AVG)
prop.label = "received rate average value"
prop.isOper = True
prop.isStats = True
meta.props.add("usageAvg", prop)
prop = PropMeta("str", "usageMax", "usageMax", 7601, PropCategory.IMPLICIT_MAX)
prop.label = "received rate maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("usageMax", prop)
prop = PropMeta("str", "usageMin", "usageMin", 7600, PropCategory.IMPLICIT_MIN)
prop.label = "received rate minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("usageMin", prop)
prop = PropMeta("str", "usageSpct", "usageSpct", 7603, PropCategory.IMPLICIT_SUSPECT)
prop.label = "received rate suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("usageSpct", prop)
prop = PropMeta("str", "usageThr", "usageThr", 7604, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "received rate thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("usageThr", prop)
prop = PropMeta("str", "usageTr", "usageTr", 7605, PropCategory.IMPLICIT_TREND)
prop.label = "received rate trend"
prop.isOper = True
prop.isStats = True
meta.props.add("usageTr", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"bkhoward@live.com"
] |
bkhoward@live.com
|
5fdb8b04700662ee8b20d6fdd178566241f4b87c
|
01db5ec488b5a07c43fde6f6e718632c83b3d100
|
/ecs-fargate-isolated/app.py
|
09169eefd54219ce494f605c5c0cde1ea415f486
|
[] |
no_license
|
littlejo/cdk-examples
|
0bce63d9eb54fef58b748f9f328bc6db4701ab67
|
22106de8373facebaa64f5e46cc20d3bf78fd2aa
|
refs/heads/master
| 2023-07-13T05:53:41.158543
| 2020-12-07T09:03:20
| 2020-12-07T09:03:20
| 261,969,897
| 0
| 0
| null | 2023-06-22T20:42:11
| 2020-05-07T06:36:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,487
|
py
|
#!/usr/bin/env python3
from aws_cdk import core
import json
import os
from environment.environment_stack import EnvironmentStack
from ecr.task_ecr_stack import ECRStack
# Read global configuration file
with open('environment_conf.json') as config_file:
global_conf = json.load(config_file)
app = core.App()
stage = app.node.try_get_context("stage")
if stage is None :
stage = "dev"
print("# Deploy stage [{}]".format(stage))
common_tags = []
common_tags.append( core.CfnTag( key="Project", value=global_conf["global"]["project"]))
common_tags.append( core.CfnTag( key="Stage", value=stage))
env = core.Environment(
account=os.environ.get("CDK_DEPLOY_ACCOUNT", os.environ["CDK_DEFAULT_ACCOUNT"]),
region=os.environ.get("CDK_DEPLOY_REGION", os.environ["CDK_DEFAULT_REGION"])
)
EnvironmentStack(app, f"env-{stage}", tags=common_tags, name_extension=global_conf["global"]["extension"]+stage, stage=stage, conf=global_conf, env=env )
ECRStack(app, f"ecr-{stage}", tags=common_tags, name_extension=global_conf["global"]["extension"]+stage, stage=stage , vpc_name=global_conf[stage]["vpc_name"] , region=os.environ["CDK_DEFAULT_REGION"], env=env, ecs_conf=global_conf[stage]["ecs"]["nginx-1"])
ECRStack(app, f"ecr-2-{stage}", tags=common_tags, name_extension=global_conf["global"]["extension"]+stage, stage=stage , vpc_name=global_conf[stage]["vpc_name"] , region=os.environ["CDK_DEFAULT_REGION"], env=env, ecs_conf=global_conf[stage]["ecs"]["nginx-2"])
app.synth()
|
[
"joseph.ligier@gmail.com"
] |
joseph.ligier@gmail.com
|
39fdeb0804fff370665a33ca7fbced711a6f15d5
|
b2e727332b6f94f0844164bd972a7a48878f5292
|
/uncategorized/minimum-absolute-difference-in-an-array.py
|
dfd39dec01988758aa250eaf654150186c0a5986
|
[] |
no_license
|
mzfr/Competitive-coding
|
496024f940b9103a39b5b9affed70552b31e20b8
|
33074c9b21a240fd285d38d8320e4037defdd3eb
|
refs/heads/master
| 2023-01-01T04:49:59.027502
| 2020-10-23T11:41:23
| 2020-10-23T11:41:23
| 265,234,568
| 2
| 0
| null | 2020-10-01T04:55:32
| 2020-05-19T11:58:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
"""
https://www.hackerrank.com/challenges/minimum-absolute-difference-in-an-array
SOLUTION:
First I thought of using the itertools.combination to get all the combinations
and then find their minimum difference but that had a high complexity and some
of the test cases were failing so instead of that approach I did the following:
1) Sort the given list
2) assume the minimum difference to be between the first two element
3) Then compare all the other diff with that minimum
"""
import math
import os
import random
import re
import sys
def minimumAbsoluteDifference(arr):
arr.sort()
minimum = abs(arr[0]-arr[1])
for i in range(len(arr)-1):
diff = abs(arr[i]-arr[i+1])
if minimum > diff:
minimum = diff
return minimum
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
arr = list(map(int, input().rstrip().split()))
result = minimumAbsoluteDifference(arr)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"Mehtab.zafar98@gmail.com"
] |
Mehtab.zafar98@gmail.com
|
059d5a506246d959ae5d4dd9b7edbf7edaa4fcbe
|
0d12b52791f4dbd63e7c4309bd8128430708a686
|
/PCI/PCI_Code/chapter4/searchengine.py
|
f024a0fcdb0967828664c3d2d586a5fe45dc7c05
|
[
"LicenseRef-scancode-oreilly-notice"
] |
permissive
|
linzb-xyz/PCI_code
|
ac889a7fb72df513f42c59f3644f4fdc8c735799
|
02ec6d1a0dd6dda494999a568c499af1bb23ad1c
|
refs/heads/master
| 2020-03-06T22:24:27.297313
| 2018-03-28T07:49:18
| 2018-03-28T07:49:18
| 127,102,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,161
|
py
|
import urllib.request
from bs4 import *
from urllib.parse import urljoin
from sqlite3 import dbapi2 as sqlite
import nn
import re
mynet=nn.searchnet('nn.db')
# Create a list of words to ignore
ignorewords={'the':1,'of':1,'to':1,'and':1,'a':1,'in':1,'is':1,'it':1}
class crawler:
# Initialize the crawler with the name of database
def __init__(self,dbname):
self.con=sqlite.connect(dbname)
def __del__(self):
self.con.close()
def dbcommit(self):
self.con.commit()
# Auxilliary function for getting an entry id and adding
# it if it's not present
def getentryid(self,table,field,value,createnew=True):
cur=self.con.execute(
"select rowid from %s where %s='%s'" % (table,field,value))
res=cur.fetchone()
if res==None:
cur=self.con.execute(
"insert into %s (%s) values ('%s')" % (table,field,value))
return cur.lastrowid
else:
return res[0]
# Index an individual page
def addtoindex(self,url,soup):
if self.isindexed(url): return
print('Indexing '+url)
# Get the individual words
text=self.gettextonly(soup) #@@@@@@@@@@@@@@@@@
#print(text)
words=self.separatewords(text) #@@@@@@@@@@@@@@@@@@@@!!!
# Get the URL id
#print(words)
urlid=self.getentryid('urllist','url',url)
# Link each word to this url
for i in range(len(words)):
word=words[i]
if word in ignorewords: continue
wordid=self.getentryid('wordlist','word',word)
self.con.execute("insert into wordlocation(urlid,wordid,location) values (%d,%d,%d)" % (urlid,wordid,i))
# Extract the text from an HTML page (no tags)
def gettextonly(self,soup):
v=soup.string
if v==None:
c=soup.contents
resulttext=''
for t in c:
subtext=self.gettextonly(t)
resulttext+=subtext+'\n'
return resulttext
else:
return v.strip()
# Seperate the words by any non-whitespace character
def separatewords(self,text):
splitter = re.compile('\\W*')
x = splitter.split(text)
return [s.lower() for s in x if s!='']
# Return true if this url is already indexed
def isindexed(self,url):
u = self.con.execute("select rowid from urllist where url='%s'" %url).fetchone()
if u!=None:
v = self.con.execute('select * from wordlocation where urlid=%d' % u[0]).fetchone()
if v!=None: return True
return False
# Add a link between two pages
def addlinkref(self,urlFrom,urlTo,linkText):
words=self.separateWords(linkText)
fromid=self.getentryid('urllist','url',urlFrom)
toid=self.getentryid('urllist','url',urlTo)
if fromid==toid: return
cur=self.con.execute("insert into link(fromid,toid) values (%d,%d)" % (fromid,toid))
linkid=cur.lastrowid
for word in words:
if word in ignorewords: continue
wordid=self.getentryid('wordlist','word',word)
self.con.execute("insert into linkwords(linkid,wordid) values (%d,%d)" % (linkid,wordid))
# Starting with a list of pages, do a breadth
# first search to the given depth, indexing pages
# as we go
def crawl(self,pages,depth=2):
for i in range(depth):
newpages={}
for page in pages:
try:
c=urllib.request.urlopen(page)
except:
print("Could not open %s" % page)
continue
try:
soup=BeautifulSoup(c.read(),'html5lib')
print(soup.title)
self.addtoindex(page,soup)
links=soup('a')
for link in links:
if ('href' in dict(link.attrs)):
url=urljoin(page,link['href'])
if url.find("'")!=-1: continue
url=url.split('#')[0] # remove location portion
if url[0:4]=='http' and not self.isindexed(url):
newpages[url]=1
linkText=self.gettextonly(link)
self.addlinkref(page,url,linkText)
self.dbcommit()
except:
print("Could not parse page %s" % page)
pages=newpages
# Create the database tables
def createindextables(self):
self.con.execute('create table urllist(url)')
self.con.execute('create table wordlist(word)')
self.con.execute('create table wordlocation(urlid,wordid,location)')
self.con.execute('create table link(fromid integer,toid integer)')
self.con.execute('create table linkwords(wordid,linkid)')
self.con.execute('create index wordidx on wordlist(word)')
self.con.execute('create index urlidx on urllist(url)')
self.con.execute('create index wordurlidx on wordlocation(wordid)')
self.con.execute('create index urltoidx on link(toid)')
self.con.execute('create index urlfromidx on link(fromid)')
self.dbcommit()
def calculatepagerank(self,iterations=20):
# clear out the current page rank tables
self.con.execute('drop table if exists pagerank')
self.con.execute('create table pagerank(urlid primary key,score)')
# initialize every url with a page rank of 1
for (urlid,) in self.con.execute('select rowid from urllist'):
self.con.execute('insert into pagerank(urlid,score) values (%d,1.0)' % urlid)
self.dbcommit()
for i in range(iterations):
print("Iteration %d" % (i))
for (urlid,) in self.con.execute('select rowid from urllist'):
pr=0.15
# Loop through all the pages that link to this one
for (linker,) in self.con.execute(
'select distinct fromid from link where toid=%d' % urlid):
# Get the page rank of the linker
linkingpr=self.con.execute(
'select score from pagerank where urlid=%d' % linker).fetchone()[0]
# Get the total number of links from the linker
linkingcount=self.con.execute(
'select count(*) from link where fromid=%d' % linker).fetchone()[0]
pr+=0.85*(linkingpr/linkingcount)
self.con.execute(
'update pagerank set score=%f where urlid=%d' % (pr,urlid))
self.dbcommit()
class searcher:
def __init__(self,dbname):
self.con=sqlite.connect(dbname)
def __del__(self):
self.con.close()
def getmatchrows(self,q):
# Strings to build the query
fieldlist='w0.urlid'
tablelist=''
clauselist=''
wordids=[]
# Split the words by spaces
words=q.split(' ')
tablenumber=0
for word in words:
# Get the word ID
wordrow=self.con.execute(
"select rowid from wordlist where word='%s'" % word).fetchone()
if wordrow!=None:
wordid=wordrow[0]
wordids.append(wordid)
if tablenumber>0:
tablelist+=','
clauselist+=' and '
clauselist+='w%d.urlid=w%d.urlid and ' % (tablenumber-1,tablenumber)
fieldlist+=',w%d.location' % tablenumber
tablelist+='wordlocation w%d' % tablenumber
clauselist+='w%d.wordid=%d' % (tablenumber,wordid)
tablenumber+=1
# Create the query from the separate parts
fullquery='select %s from %s where %s' % (fieldlist,tablelist,clauselist)
print(fullquery)
cur=self.con.execute(fullquery)
rows=[row for row in cur]
return rows,wordids
def getscoredlist(self,rows,wordids):
totalscores=dict([(row[0],0) for row in rows])
# This is where we'll put our scoring functions
weights=[(1.0,self.locationscore(rows)),
(1.0,self.frequencyscore(rows)),
(1.0,self.pagerankscore(rows)),
(1.0,self.linktextscore(rows,wordids)),
(5.0,self.nnscore(rows,wordids))]
for (weight,scores) in weights:
for url in totalscores:
totalscores[url]+=weight*scores[url]
return totalscores
def geturlname(self,id):
return self.con.execute(
"select url from urllist where rowid=%d" % id).fetchone()[0]
def query(self,q):
rows,wordids=self.getmatchrows(q)
scores=self.getscoredlist(rows,wordids)
rankedscores=[(score,url) for (url,score) in scores.items()]
rankedscores.sort()
rankedscores.reverse()
for (score,urlid) in rankedscores[0:10]:
print('%f\t%s' % (score,self.geturlname(urlid)))
return wordids,[r[1] for r in rankedscores[0:10]]
def normalizescores(self,scores,smallIsBetter=0):
vsmall=0.00001 # Avoid division by zero errors
if smallIsBetter:
minscore=min(scores.values())
return dict([(u,float(minscore)/max(vsmall,l)) for (u,l) in scores.items()])
else:
maxscore=max(scores.values())
if maxscore==0: maxscore=vsmall
return dict([(u,float(c)/maxscore) for (u,c) in scores.items()])
def frequencyscore(self,rows):
counts=dict([(row[0],0) for row in rows])
for row in rows: counts[row[0]]+=1
return self.normalizescores(counts)
def locationscore(self,rows):
locations=dict([(row[0],1000000) for row in rows])
for row in rows:
loc=sum(row[1:])
if loc<locations[row[0]]: locations[row[0]]=loc
return self.normalizescores(locations,smallIsBetter=1)
def distancescore(self,rows):
# If there's only one word, everyone wins!
if len(rows[0])<=2: return dict([(row[0],1.0) for row in rows])
# Initialize the dictionary with large values
mindistance=dict([(row[0],1000000) for row in rows])
for row in rows:
dist=sum([abs(row[i]-row[i-1]) for i in range(2,len(row))])
if dist<mindistance[row[0]]: mindistance[row[0]]=dist
return self.normalizescores(mindistance,smallIsBetter=1)
def inboundlinkscore(self,rows):
uniqueurls=dict([(row[0],1) for row in rows])
inboundcount=dict([(u,self.con.execute('select count(*) from link where toid=%d' % u).fetchone()[0]) for u in uniqueurls])
return self.normalizescores(inboundcount)
def linktextscore(self,rows,wordids):
linkscores=dict([(row[0],0) for row in rows])
for wordid in wordids:
cur=self.con.execute('select link.fromid,link.toid from linkwords,link where wordid=%d and linkwords.linkid=link.rowid' % wordid)
for (fromid,toid) in cur:
if toid in linkscores:
pr=self.con.execute('select score from pagerank where urlid=%d' % fromid).fetchone()[0]
linkscores[toid]+=pr
maxscore=max(linkscores.values())
normalizedscores=dict([(u,float(l)/maxscore) for (u,l) in linkscores.items()])
return normalizedscores
def pagerankscore(self,rows):
pageranks=dict([(row[0],self.con.execute('select score from pagerank where urlid=%d' % row[0]).fetchone()[0]) for row in rows])
maxrank=max(pageranks.values())
normalizedscores=dict([(u,float(l)/maxrank) for (u,l) in pageranks.items()])
return normalizedscores
def nnscore(self,rows,wordids):
# Get unique URL IDs as an ordered list
urlids=[urlid for urlid in dict([(row[0],1) for row in rows])]
nnres=mynet.getresult(wordids,urlids)
scores=dict([(urlids[i],nnres[i]) for i in range(len(urlids))])
return self.normalizescores(scores)
if __name__ == '__main__':
crawler = crawler('searchindex.db')
#crawler.createindextables()
page=['https://news.google.com/news/?ned=us&gl=US&hl=en']
crawler.crawl(page)
c = [row for row in crawler.con.execute('select word from wordlist')]
print(c)
|
[
"756608359@qq.com"
] |
756608359@qq.com
|
6403ebc410b0769ec59ad0a993e9b9d051316a35
|
d4c761daffc30ae0c6478a084498d4d95fa713d8
|
/app/admin.py
|
fbc59d61e386759369b8705a4a0429ef35f8cefd
|
[] |
no_license
|
futter-krot/a
|
351178a38b758a1080532fc743e025ba8c35d867
|
d4a03727fdc4e35bc212866a9227c0f71ba44c1d
|
refs/heads/master
| 2023-06-03T13:41:49.124471
| 2021-06-24T13:55:48
| 2021-06-24T13:55:48
| 356,860,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
from django.contrib import admin
from app.models import *
# Register your models here.
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
pass
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
pass
|
[
"futterbeta@gmail.com"
] |
futterbeta@gmail.com
|
601c3e834c1baf3a3c1b10fe80f1252c142e09ec
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/93/usersdata/230/54636/submittedfiles/atividade.py
|
2b4b38d2a1c6b323bd65f982ed00e1f5274b16b5
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
# -*- coding: utf-8 -*-
import math
n=int(input('Digite valor de n: '))
x=int(input('Digite valor de x: '))
y=int(input('Digite valor de y: '))
soma=(x**2)+(y**2)
if x>=0 and y>=0 and soma<=1:
print('SIM')
else:
print('NAO')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
4713ad776108a8d64f7f4932b2993264b4344ce6
|
991646dbb3427981ce1b6b246d829751027e6ef6
|
/page/Login_page.py
|
348d610df06f90a9ae813f02e51ee50c89099b97
|
[] |
no_license
|
jiaoyalei/Start_in_batches
|
25f10cc2f44139ac876d6195e487e5e9217fab48
|
7c49f3ec4310bf02c7d8369bf92b933d398d024c
|
refs/heads/master
| 2022-10-28T05:19:17.203897
| 2020-06-12T09:49:39
| 2020-06-12T09:49:39
| 266,676,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,686
|
py
|
from common.base import Base
from selenium import webdriver
from common.common_rwcd import Common_Read
import time,os
class LoginPage():
'''登录类'''
def __init__(self,driver):
#获取浏览器句柄
self.driver = driver
#将被操作的excel文件路径,及具体工作薄
real_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_path = os.path.join(os.path.join(real_path,"data"),"cze_data.xls")
sheetName = "login" #工作薄内容为登录的操作步骤数据
#读取excel文件,并实例化
data = Common_Read(data_path, sheetName)
#转化excel文件数据为可操作字典数据
self.data_value = data.dict_data()
#实例化浏览器基本操作类
self.b = Base(self.driver)
def login(self,username,password):
'''用户登录函数'''
list_value = {}
for i in range(len(self.data_value)):
list_value["loca%d" %(i+1)] = (self.data_value[i]["type"],self.data_value[i]["value"])
#调用浏览器中基本操作事件函数,如:send(定位元素并填入数据)、click(定位元素,并点击)
self.b.send(list_value["loca1"],username)
self.b.send(list_value["loca2"],password)
time.sleep(8)
self.b.click(list_value["loca3"])
# self.driver.get_screenshot_as_file(r"C:\Users\safecode\Desktop\selenium_bug\%s_login_Result.png" %username)
time.sleep(1)
return self.driver
def login_test(self,username,password):
'''用户登录函数'''
print(username,password)
list_value = {}
for i in range(len(self.data_value)):
list_value["loca%d" %(i+1)] = (self.data_value[i]["type"],self.data_value[i]["value"])
#调用浏览器中基本操作事件函数,如:send(定位元素并填入数据)、click(定位元素,并点击)
self.b.send(list_value["loca1"],username)
time.sleep(1.5)
self.b.send(list_value["loca2"],password)
time.sleep(1.5)
self.b.click(list_value["loca3"])
time.sleep(5)
# time.sleep(1)
# loca = ("xpath",".//*[@id='contentMain']/ul/li[7]/span")
# flag = self.b.text_in_element(loca,"设置")
# if flag != False:
# print("用户:%s,登录成功!" %username)
# else:
# print("用户:%s,登录失败!" %username)
# time.sleep(5)
if __name__ == "__main__":
driver = webdriver.Chrome()
driver.maximize_window()
driver.get("https://192.168.235.143/#/login")
c = LoginPage(driver)
c.login("test_j","qaz123456")
|
[
"719521314@qq.com"
] |
719521314@qq.com
|
e55f3d7f6a822bd42e27124ffa5e1f3246a1bf07
|
06a50c3f425423bc054a585e563e92cc7e5db007
|
/EPAM_python_tests/bin/classes/logs_operations.py
|
54cb8a78f234cf32ec26e44302e721306a05a312
|
[] |
no_license
|
eugene-marchenko/pycharm
|
a4ee1729ef84510603902c128adb12ca67ab060c
|
c141873342e3c7a3e9f224e856cf1046ef2197eb
|
refs/heads/master
| 2021-07-06T20:34:48.722600
| 2020-09-12T16:48:46
| 2020-09-12T16:48:46
| 31,311,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,566
|
py
|
import os
import pytest
from time import gmtime, strftime
class LogsOperations(object):
"""
This class describes methods to operate with log files
"""
def __init__(self):
self.WORK_DIR = 'logs/'
self.EXTENSION = '.log'
self.timestamp = strftime('%Y%m%d_%H%M%S', gmtime())
def delete_logs(self):
"""
This method deletes all logs from log directory
:return:
"""
if os.path.isdir(self.WORK_DIR):
filelist = [f for f in os.listdir(self.WORK_DIR) if f.endswith(self.EXTENSION)]
current_dir = os.getcwd()
for f in filelist:
os.chdir(self.WORK_DIR)
os.remove(f)
os.chdir(current_dir)
def create_dir_and_log_file(self, class_name):
"""
This method creates new directory(if does not exists) and log file with custom filename
:param class_name:
:return:
"""
print os.getcwd()
if not os.path.exists(os.path.dirname(self.WORK_DIR)):
os.makedirs(os.path.dirname(self.WORK_DIR))
filename = self.WORK_DIR + str(self.timestamp) + '_' + class_name + self.EXTENSION
return filename
def test_class(self, filename):
"""
This method runs pytests to test functionality of our main classes
:param filename:
:return:
"""
file_specifier = filename.split('/', 1)[1].split('.', 1)[0]
pytest.main(filename + ' --resultlog=%s' % self.create_dir_and_log_file(file_specifier))
|
[
"3.marchenko@gmail.com"
] |
3.marchenko@gmail.com
|
0bf886a5f0c1b6f91006568d96211ce1aa65d30b
|
7c3f3749f28ce9252963b738003b415fda4d4c53
|
/sources/test.py
|
6f2d6357505a21fa03780341e90d83e7fe2bd95a
|
[
"MIT"
] |
permissive
|
phunxv267/face_recognition
|
d725063771142c8dd05c685624d9ad892cdd79d8
|
67acf435c8ecf4c4e0bf8b641777d27209387330
|
refs/heads/master
| 2020-12-27T11:04:00.809524
| 2020-02-03T04:04:26
| 2020-02-03T04:04:26
| 237,879,637
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
import argparse
import cv2
from sources.cv_modules import face_model
parser = argparse.ArgumentParser(description='face model test')
# general
parser.add_argument('--image-size', default='112,112', help='')
parser.add_argument('--model', default='../resources/models/model,0', help='path to load model.')
parser.add_argument('--det', default=0, type=int, help='mtcnn option, 1 means using R+O, 0 means detect from begining')
parser.add_argument('--flip', default=0, type=int, help='whether do lr flip aug')
parser.add_argument('--threshold', default=1.24, type=float, help='ver dist threshold')
args = parser.parse_args()
model = face_model.FaceModel(args)
for i in range(10):
img = cv2.imread('tom-hanks.jpg')
img = model.get_input(img)
f1 = model.get_feature(img)
print(f1)
# gender, age = model.get_ga(img)
# print(gender)
# print(age)
# sys.exit(0)
# img = cv2.imread('/raid5data/dplearn/megaface/facescrubr/112x112/Tom_Hanks/Tom_Hanks_54733.png')
# f2 = model.get_feature(img)
# dist = np.sum(np.square(f1-f2))
# print(dist)
# sim = np.dot(f1, f2.T)
# print(sim)
#diff = np.subtract(source_feature, target_feature)
#dist = np.sum(np.square(diff),1)
|
[
"phunxv267@gmai.com"
] |
phunxv267@gmai.com
|
57d8f24d2896a24eff46d31b11ca4452558f04c1
|
e50f65504f456d3e79549e332e58f416bc6b0871
|
/predictfile.py
|
9ac3e0b7831a1b9c70d372f90b37c76bc412f1d2
|
[] |
no_license
|
haruyasu/animalai
|
a087caceb76e61f8a8aa56ab9b4687196c588011
|
04e2adb5cadd820bc2d7fd10c2658e03003840f5
|
refs/heads/master
| 2020-07-15T06:28:03.715731
| 2019-08-31T05:36:15
| 2019-08-31T05:36:15
| 205,500,436
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,328
|
py
|
import os
from flask import Flask, request, redirect, url_for
from werkzeug.utils import secure_filename
from keras.models import Sequential, load_model
import keras,sys
import numpy as np
from PIL import Image
classes = ["monkey","boar","crow"]
num_classes = len(classes)
image_size = 50
UPLOAD_FOLDER = './uploads'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'gif'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and filename.rsplit('.',1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
if 'file' not in request.files:
flash('ファイルがありません')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('ファイルがありません')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
model = load_model('./animal_cnn_aug.h5')
image = Image.open(filepath)
image = image.convert('RGB')
image = image.resize((image_size, image_size))
data = np.asarray(image)
X = []
X.append(data)
X = np.array(X)
result = model.predict([X])[0]
predicted = result.argmax()
percentage = int(result[predicted] * 100)
return "ラベル: " + classes[predicted] + ", 確率:"+ str(percentage) + " %"
return '''
<!doctype html>
<html>
<head>
<meta charset="UTF-8">
<title>ファイルをアップロードして判定しよう</title></head>
<body>
<h1>ファイルをアップロードして判定しよう!</h1>
<form method = post enctype = multipart/form-data>
<p><input type=file name=file>
<input type=submit value=Upload>
</form>
</body>
</html>
'''
from flask import send_from_directory
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
|
[
"harukun2002@gmail.com"
] |
harukun2002@gmail.com
|
a8a7310d4438b0f180e6da82fd2129f48f1290df
|
4a5f73bad1c81f25600d60e25c44651d849049df
|
/60ACuts/TrueCrossSectionAngleCuts.py
|
a61e7d596d0561d50289b7eec0c5ecc48c52a86c
|
[] |
no_license
|
ElenaGramellini/LArIATTrackinRes
|
b304d652d47a7a5663d35b171d3ca93f195b13a5
|
2db82ccc1fe92a884f0beb27a573c2060f37c146
|
refs/heads/master
| 2020-03-16T04:39:10.213353
| 2018-05-13T03:14:49
| 2018-05-13T03:14:49
| 132,516,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,555
|
py
|
from ROOT import *
import os
import math
import argparse
gStyle.SetOptStat(0);
# Truth 100 A
noAngleCutFile100A = TFile.Open("/Volumes/Seagate/Elena/TPC/AngleCut_100A_histo.root")
File_0083_100A = TFile.Open("AngleCut_0.08334_100Ahisto.root")
noAngleCut_Int100A = noAngleCutFile100A .Get("TrueXS/hInteractingKE")
noAngleCut_Inc100A = noAngleCutFile100A .Get("TrueXS/hIncidentKE")
noAngleCut_Int_0079_100A = noAngleCutFile100A .Get("AngleCutTrueXS007/hInteractingKE")
noAngleCut_Inc_0079_100A = noAngleCutFile100A .Get("AngleCutTrueXS007/hIncidentKE")
noAngleCut_Int_0083_100A = File_0083_100A .Get("AngleCutTrueXS083/hInteractingKE")
noAngleCut_Inc_0083_100A = File_0083_100A .Get("AngleCutTrueXS083/hIncidentKE")
noAngleCut_Int_0157_100A = noAngleCutFile100A .Get("AngleCutTrueXS015/hInteractingKE")
noAngleCut_Inc_0157_100A = noAngleCutFile100A .Get("AngleCutTrueXS015/hIncidentKE")
# Truth 60 A
noAngleCutFile = TFile.Open("TruePionGen60A.root")
angleCutFile0157 = TFile.Open("AngleCut_0.15734_60Ahisto.root")
angleCutFile0092 = TFile.Open("AngleCut_0.09248_60Ahisto.root")
angleCutFile0083 = TFile.Open("AngleCut_0.08334_60Ahisto.root")
angleCutFile0079 = TFile.Open("AngleCut_0.07954_60Ahisto.root")
# Get Interacting and Incident plots
interactingName = "AngleCutTrueXS/hInteractingKE"
incidentName = "AngleCutTrueXS/hIncidentKE"
noAngleCut_Int = noAngleCutFile .Get("TrueXS/hInteractingKE")
angleCut_Int_0157 = angleCutFile0157 .Get(interactingName)
angleCut_Int_0092 = angleCutFile0092 .Get(interactingName)
angleCut_Int_0083 = angleCutFile0083 .Get(interactingName)
angleCut_Int_0079 = angleCutFile0079 .Get(interactingName)
noAngleCut_Inc = noAngleCutFile .Get("TrueXS/hIncidentKE")
angleCut_Inc_0157 = angleCutFile0157 .Get(incidentName)
angleCut_Inc_0092 = angleCutFile0092 .Get(incidentName)
angleCut_Inc_0083 = angleCutFile0083 .Get(incidentName)
angleCut_Inc_0079 = angleCutFile0079 .Get(incidentName)
'''
cP = TCanvas("cP" ,"cP" ,200 ,10 ,600 ,600)
noAngleCut_Int100A.Draw("pe")
noAngleCut_Int.Draw("pe")
cP2 = TCanvas("cP2" ,"cP2" ,200 ,10 ,600 ,600)
noAngleCut_Inc100A.Draw("")
noAngleCut_Inc.Draw("same")
cPC = TCanvas("cPC" ,"cPC" ,200 ,10 ,600 ,600)
noAngleCut_Int100A.Divide(noAngleCut_Inc100A)
noAngleCut_Int100A.Scale(101)
noAngleCut_Int.Divide(noAngleCut_Inc)
noAngleCut_Int.Scale(101)
noAngleCut_Int100A.Draw("histo")
noAngleCut_Int.Draw("samehisto")
'''
cPNoCuts = TCanvas("cPNoCuts" ,"cPNoCuts" ,200 ,10 ,600 ,600)
cPNoCuts.SetGrid()
noAngleCut_Int100A.Add(noAngleCut_Int)
noAngleCut_Inc100A.Add(noAngleCut_Inc)
noAngleCut_Int_0079_100A.Add(angleCut_Int_0079)
noAngleCut_Inc_0079_100A.Add(angleCut_Inc_0079)
noAngleCut_Int_0083_100A.Add(angleCut_Int_0083)
noAngleCut_Inc_0083_100A.Add(angleCut_Inc_0083)
noAngleCut_Int_0157_100A.Add(angleCut_Int_0157)
noAngleCut_Inc_0157_100A.Add(angleCut_Inc_0157)
noAngleCut_Int100A.SetLineColor(kGreen-2)
noAngleCut_Int_0079_100A.SetLineColor(kRed)
noAngleCut_Int_0083_100A.SetLineColor(kBlue)
noAngleCut_Int_0157_100A.SetLineColor(kOrange)
noAngleCut_Int100A.Divide(noAngleCut_Inc100A)
noAngleCut_Int100A.Scale(101)
noAngleCut_Int100A.SetTitle("Geant4 (#pi^{-},Ar) True Cross Section; Kinetic Energy [MeV]; (#pi^{-},Ar) True Cross Section [barn]")
noAngleCut_Int100A.GetYaxis().SetTitleOffset(1.3)
noAngleCut_Int100A.Draw("histo][")
noAngleCut_Int_0079_100A.Divide(noAngleCut_Inc_0079_100A)
noAngleCut_Int_0079_100A.Scale(101)
noAngleCut_Int_0079_100A.Draw("histosame][")
noAngleCut_Int_0083_100A.Divide(noAngleCut_Inc_0083_100A)
noAngleCut_Int_0083_100A.Scale(101)
noAngleCut_Int_0083_100A.Draw("histosame][")
noAngleCut_Int_0157_100A.Divide(noAngleCut_Inc_0157_100A)
noAngleCut_Int_0157_100A.Scale(101)
noAngleCut_Int_0157_100A.Draw("histosame][")
for i in xrange(4):
noAngleCut_Int100A.SetBinContent(i,0)
noAngleCut_Int_0079_100A.SetBinContent(i,0)
noAngleCut_Int_0083_100A.SetBinContent(i,0)
noAngleCut_Int_0157_100A.SetBinContent(i,0)
for i in xrange(20,30):
noAngleCut_Int100A.SetBinContent(i,0)
noAngleCut_Int_0079_100A.SetBinContent(i,0)
noAngleCut_Int_0083_100A.SetBinContent(i,0)
noAngleCut_Int_0157_100A.SetBinContent(i,0)
legend = TLegend(.54,.52,.84,.70)
legend.AddEntry(noAngleCut_Int100A ,"All Angles")
legend.AddEntry(noAngleCut_Int_0079_100A,"Angles > 4.5 Deg")
legend.AddEntry(noAngleCut_Int_0083_100A,"Angles > 5.0 Deg")
legend.AddEntry(noAngleCut_Int_0157_100A,"Angles > 9.5 Deg")
legend.Draw("same")
raw_input()
|
[
"elena.gramellini@yale.edu"
] |
elena.gramellini@yale.edu
|
4aabc43f917c892939d2b4b254c5447a66b821fe
|
e763f24cb774ed67dc2248270ef5dc82109892f0
|
/cpv_project/urls.py
|
3cef10633def659d243c0a475d9de987fce5c728
|
[] |
no_license
|
ColinMaudry/cpv-app
|
225bb182e4e5a31dd90084bcc834c1762bf89b7b
|
20813748fe0f20747fe355d688945fd17524dd43
|
refs/heads/main
| 2023-08-15T02:11:01.316246
| 2021-08-27T15:36:40
| 2021-08-27T15:36:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 835
|
py
|
"""cpv_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('api.urls')),
path('', include('app.urls'))
]
|
[
"colin@maudry.com"
] |
colin@maudry.com
|
9164b3dde2d4d475219881f1f098955b70bba669
|
c368b6e15b17d343afd3096627e377c654963b3b
|
/scripts/deploy.py
|
ea8877342b89b39e88756231ced10bb7559ef07e
|
[] |
no_license
|
ymytheresa/CrowdCoin
|
8091baa56e2ec5425fc3921b5e7e7f33d5ab8f55
|
c606636f0c60ccb61bf5739a1e532d2b2dc51b06
|
refs/heads/main
| 2023-04-15T07:59:40.325389
| 2021-04-19T22:02:58
| 2021-04-19T22:02:58
| 351,365,004
| 1
| 0
| null | 2021-04-02T12:59:17
| 2021-03-25T08:42:21
|
Solidity
|
UTF-8
|
Python
| false
| false
| 573
|
py
|
import os
from brownie import *
import json
dev = accounts.add(os.getenv(config['wallets']['from_key']))
reward = Reward.deploy({'from': dev}, publish_source=True)
crowdcoin = CrowdCoin.deploy({'from': dev}, publish_source=True)
reward.set_coin(crowdcoin.address, {'from': dev})
def main():
print('crowdcoin :', crowdcoin.address)
print('reward :', reward.address)
data = {}
data['CROWDCOIN_ADDRESS'] = crowdcoin.address
data['REWARD_ADDRESS'] = reward.address
with open('address.txt', 'w') as outfile:
json.dump(data, outfile)
|
[
"38038286+ymytheresa@users.noreply.github.com"
] |
38038286+ymytheresa@users.noreply.github.com
|
5f292227a52f3c20c5a645732b51f1e72ff43146
|
35d5e6f5ea13c7a52435e19c503d785ce56db773
|
/sign_data.py
|
00063b13a24896495a172e0a77fc5c10dc985975
|
[] |
no_license
|
wanglinan1220/tcplocust-master
|
a292f2bd56f00927c8f340a8b26c24f2662ff95b
|
c4341aa2ea2a8e8562434040c3279e161cf122ba
|
refs/heads/master
| 2023-05-13T21:54:42.155647
| 2021-06-09T09:52:39
| 2021-06-09T09:52:39
| 361,657,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
import hashlib
data ={
"useid":123123,
"appid":1934342034809,
"sid":234234234234
}
data =sorted(data.items(), key=lambda item:item[0])
for k,j in data:
print(k,j)
data = [("%s"+"="+"%s")% (k,v) for k,v in data ]
data = ("&").join(data)
md5=hashlib.md5()
sign = md5.update(data.encode('UTF-8'))
sign=md5.hexdigest()
print(data)
|
[
"1547118336@qq.com"
] |
1547118336@qq.com
|
95838103359a78288a2b30f87614120db24561bf
|
0f1272e5c93183b3c54a7ae0d840d49f172eaaed
|
/music player code.py
|
3709a3f6bbae0783f84890af99b5e359800e5611
|
[] |
no_license
|
vaneet-hash/Music-Player-using-Python
|
8a36b2802eef9f1936a471e299c0833a98e6463a
|
3a56e7364dba671bb2f8f4642e42897c7e0eb6df
|
refs/heads/main
| 2023-07-20T04:36:23.814326
| 2021-08-29T19:08:22
| 2021-08-29T19:08:22
| 401,120,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,100
|
py
|
import os
import pygame
from tkinter import *
from tkinter.filedialog import askdirectory
pygame.mixer.init()
root=Tk()
root.title('Music Player')
root.minsize(350,300)
listbox = Listbox(root)
listbox.pack(fill = BOTH)
list_songs= []
def choose_directory():
directory = askdirectory()
os.chdir(directory)
for files in os.listdir(directory):
if files.endswith(".mp3"):
list_songs.append(files)
choose_directory()
for items in list_songs:
listbox.insert(0,items)
def play():
global list_songs
pygame.mixer.music.load(list_songs[0])
pygame.mixer.music.play()
def pause():
pygame.mixer.music.pause()
def unpause():
pygame.mixer.music.unpause()
index = 0
def nextsong():
global index
index+=1
pygame.mixer.music.load(list_songs[index])
pygame.mixer.music.play()
updatelabel()
def prevsong():
global index
index -= 1
pygame.mixer.music.load(list_songs[index])
pygame.mixer.music.play()
def exitbutton():
pygame.mixer.music.stop()
root.destroy()
def volume(val):
volume = int(val)/100
pygame.mixer.music.set_volume(volume)
playbutton= Button(root,text = 'Play',height = 2, width = 6, command = play)
playbutton.pack(side=LEFT)
pausebutton = Button(root,text = 'Pause',height = 2, width = 6,command = pause)
pausebutton.pack(side=LEFT)
unpausebutton = Button(root,text = 'Unpause',height = 2, width = 6,command = unpause)
unpausebutton.pack(side=LEFT)
prevbutton = Button(root,text = 'Previous',height = 2, width = 6,command = prevsong)
prevbutton.pack(side = LEFT)
nextbutton = Button(root,text = 'Next',height = 2, width = 6,command = nextsong)
nextbutton.pack(side= LEFT)
exitbutton = Button(root,text = 'Exit',height = 2, width = 6,command = exitbutton)
exitbutton.pack(anchor = 'e',side = BOTTOM )
scale = Scale(root,from_ = 0, to = 100, orient = HORIZONTAL, command= volume)
scale.set(27)
scale.pack()
root.mainloop()
|
[
"noreply@github.com"
] |
vaneet-hash.noreply@github.com
|
ac919936e4c9da07a55faed852422220a5fd552a
|
c2e783091524ae9d7b09f76325e7e66189c839a1
|
/backend/course/migrations/0001_initial.py
|
d37758e1ee7666d30db7dc78100db5a23e6a94e8
|
[] |
no_license
|
crowdbotics-apps/belled-20745
|
97246b9913fd908c188a2b728f3942bcd7bf1e06
|
7e14d156648abf3a3694cbeba2fe29df4755fbf2
|
refs/heads/master
| 2022-12-21T23:37:45.482269
| 2020-09-26T23:47:40
| 2020-09-26T23:47:40
| 298,918,181
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,323
|
py
|
# Generated by Django 2.2.16 on 2020-09-26 23:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Category",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name="Course",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(blank=True, max_length=256, null=True)),
("description", models.TextField(blank=True, null=True)),
(
"author",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="course_author",
to=settings.AUTH_USER_MODEL,
),
),
(
"categories",
models.ManyToManyField(
blank=True,
related_name="course_categories",
to="course.Category",
),
),
],
),
migrations.CreateModel(
name="Event",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=256)),
("date", models.DateTimeField()),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="event_user",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="Group",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name="SubscriptionType",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name="Subscription",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"subscription_type",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="subscription_subscription_type",
to="course.SubscriptionType",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="subscription_user",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="Recording",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("media", models.URLField()),
("published", models.DateTimeField()),
(
"event",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="recording_event",
to="course.Event",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="recording_user",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="PaymentMethod",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("primary", models.BooleanField()),
("token", models.CharField(max_length=256)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="paymentmethod_user",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="Module",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=256)),
("description", models.TextField()),
(
"course",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="module_course",
to="course.Course",
),
),
],
),
migrations.CreateModel(
name="Lesson",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=256)),
("description", models.TextField()),
("media", models.URLField()),
(
"module",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="lesson_module",
to="course.Module",
),
),
],
),
migrations.CreateModel(
name="Enrollment",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"course",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="enrollment_course",
to="course.Course",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="enrollment_user",
to=settings.AUTH_USER_MODEL,
),
),
],
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
dc7064ae3fc0fb7dbe0e4d9d2d6c5020319510d3
|
94625f2cda3d734f84282bdff77732b99235e4ff
|
/saliency-detection/objectness_saliency.py
|
1f091d8b68b146b1045462542eebb7dfb8469623
|
[] |
no_license
|
Walid-Ahmed/imageProcessing
|
87b407532250df46dbcfd5bc830b1cdd411bef93
|
c55ad8d325b44a9ef2cf5d734bc8e5c89d1f6e15
|
refs/heads/master
| 2020-09-01T20:18:31.973536
| 2020-05-10T01:40:56
| 2020-05-10T01:40:56
| 219,045,463
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,783
|
py
|
# USAGE
# python objectness_saliency.py --model objectness_trained_model --image images/barcelona.jpg
#results are saved to folder results
# import the necessary packages
import numpy as np
import argparse
import cv2
import os
if not os.path.exists('Results'):
os.makedirs('Results')
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True,
help="path to BING objectness saliency model")
ap.add_argument("-i", "--image", required=True,
help="path to input image")
ap.add_argument("-n", "--max-detections", type=int, default=10,
help="maximum # of detections to examine")
args = vars(ap.parse_args())
fileNameInputImage=args["image"]
# load the input image
image = cv2.imread(args["image"])
# initialize OpenCV's objectness saliency detector and set the path
# to the input model files
saliency = cv2.saliency.ObjectnessBING_create()
saliency.setTrainingPath(args["model"])
# compute the bounding box predictions used to indicate saliency
(success, saliencyMap) = saliency.computeSaliency(image)
numDetections = saliencyMap.shape[0]
# loop over the detections
for i in range(0, min(numDetections, args["max_detections"])):
# extract the bounding box coordinates
(startX, startY, endX, endY) = saliencyMap[i].flatten()
print(i)
# randomly generate a color for the object and draw it on the image
output = image.copy()
color = np.random.randint(0, 255, size=(3,))
color = [int(c) for c in color]
cv2.rectangle(output, (startX, startY), (endX, endY), color, 2)
# show the output image
cv2.imshow("Image", output)
fileName=os.path.join("Results","_"+str(i)+os.path.basename(fileNameInputImage))
print("Saving image to file " + fileName)
cv2.imwrite(fileName,output)
cv2.waitKey(0)
|
[
"walidahmed@Walids-MacBook-Air.local"
] |
walidahmed@Walids-MacBook-Air.local
|
6585890167300876eac529d4c55ac82e3476fb3e
|
86872de85ce606df099cf2c7bb69bf2c682489bc
|
/welcome/settings.py
|
9c717a5cf5dfe9e039c8bd6db889ad9a6bc28d09
|
[] |
no_license
|
mathengejoan/Python_project
|
555091a031b94a5bab3bf63001c751cf9f95ccf2
|
c8e82303a855f25911b174c5b9671075283c4ae9
|
refs/heads/master
| 2020-05-20T03:06:42.136944
| 2019-05-08T13:32:15
| 2019-05-08T13:32:15
| 185,346,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,222
|
py
|
"""
Django settings for welcome project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3f7t-b2f%$55pg2%d4$l$36fx04$c(x+ws5yh^7@(5uyi-#!_g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'new',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'welcome.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'welcome.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
|
[
"mathengejoan971@gmail.com"
] |
mathengejoan971@gmail.com
|
b2e7d6e13551c158c1121c23199f3b25273d8cbc
|
6863faabb955717cab25dca371f7fcfd99433343
|
/edu_server/server/migrations/0038_auto_20180827_1122.py
|
9b07274c121e221a21c405f04f8010bc9ccff319
|
[] |
no_license
|
kirkutirev/edu-server
|
23342d7c062231c388f767fab6d9b0353faf1f79
|
dae4476300dd9db536b848ec8629d1160c8976ce
|
refs/heads/master
| 2020-07-05T05:05:42.260579
| 2018-08-30T16:57:56
| 2018-08-30T16:57:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 940
|
py
|
# Generated by Django 2.0.7 on 2018-08-27 08:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('server', '0037_auto_20180820_1140'),
]
operations = [
migrations.AddField(
model_name='user',
name='expires_in',
field=models.PositiveIntegerField(default=3600, null=True),
),
migrations.AddField(
model_name='user',
name='refresh_token',
field=models.CharField(default=None, max_length=256, null=True),
),
migrations.AddField(
model_name='user',
name='scope',
field=models.CharField(default=None, max_length=128, null=True),
),
migrations.AddField(
model_name='user',
name='token_type',
field=models.CharField(default=None, max_length=32, null=True),
),
]
|
[
"kutirev-k@mail.ru"
] |
kutirev-k@mail.ru
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.