blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ca51a71cab24e6fbbf6e65d2e6207782ed25a1b3
|
9accb7992c7a4b9c7130a039a469f99b3be99182
|
/djangoprojects/moviemodelformproject/moviemodelformproject/settings.py
|
a81e8047493fabb5a09d112971b4df49b2533a03
|
[] |
no_license
|
djangoprojectsbyrahul/crud-fbv-operations
|
a76a43aa3842e2a50fa288de11b8a691c6ad741b
|
f6954a7b6c50e2d582b23e56ab3f10138c2e4d45
|
refs/heads/master
| 2023-02-05T21:11:12.251578
| 2020-12-20T13:21:11
| 2020-12-20T13:21:11
| 304,875,620
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,304
|
py
|
"""
Django settings for moviemodelformproject project.
Generated by 'django-admin startproject' using Django 1.11.17.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR,'templates')
STATIC_DIR = os.path.join(BASE_DIR,'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gh#ufe_4y!b4zhe06wufz3$83+j8!1$bx2l7xq#ir=g1+pzfb%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'testapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'moviemodelformproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'moviemodelformproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
STATIC_DIR,
]
|
[
"pythondjango9011@gmail.com"
] |
pythondjango9011@gmail.com
|
5d89e0a4e0c50d9cc7ef5cf5ea267be2dd1ec2f8
|
380e3bfa645181f3aef4d81554d6b19391c2cbca
|
/pages/migrations/0003_auto__add_index_page_created__add_index_page_path__add_index_page_is_p.py
|
68dd591d6da454581f5b67d06d29c2aca08fd1b5
|
[
"Apache-2.0"
] |
permissive
|
raonyguimaraes/pythondotorg
|
819a5bb03069297ae6da1f149fb53150dbc18f32
|
054d8999ed331f71690a8261d4a1025788f89225
|
refs/heads/master
| 2021-01-24T21:37:12.655528
| 2014-02-20T01:38:56
| 2014-02-20T01:38:56
| 17,006,161
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,059
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Page', fields ['created']
db.create_index('pages_page', ['created'])
# Adding index on 'Page', fields ['path']
db.create_index('pages_page', ['path'])
# Adding index on 'Page', fields ['is_published']
db.create_index('pages_page', ['is_published'])
def backwards(self, orm):
# Removing index on 'Page', fields ['is_published']
db.delete_index('pages_page', ['is_published'])
# Removing index on 'Page', fields ['path']
db.delete_index('pages_page', ['path'])
# Removing index on 'Page', fields ['created']
db.delete_index('pages_page', ['created'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'to': "orm['auth.Permission']", 'symmetrical': 'False'})
},
'auth.permission': {
'Meta': {'object_name': 'Permission', 'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'db_table': "'django_content_type'", 'object_name': 'ContentType', 'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'pages.page': {
'Meta': {'object_name': 'Page', 'ordering': "['title', 'path']"},
'_content_rendered': ('django.db.models.fields.TextField', [], {}),
'content': ('markupfield.fields.MarkupField', [], {'rendered_field': 'True'}),
'content_markup_type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'default': "'restructuredtext'"}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'db_index': 'True', 'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.User']", 'null': 'True', 'related_name': "'pages_page_creator'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'db_index': 'True', 'default': 'True'}),
'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.User']", 'null': 'True', 'related_name': "'pages_page_modified'", 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '500', 'db_index': 'True', 'unique': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'})
},
'users.user': {
'Meta': {'object_name': 'User'},
'_bio_rendered': ('django.db.models.fields.TextField', [], {}),
'bio': ('markupfield.fields.MarkupField', [], {'blank': 'True', 'rendered_field': 'True'}),
'bio_markup_type': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '30', 'default': "'markdown'"}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'blank': 'True', 'max_length': '75'}),
'email_privacy': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'first_name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '30'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'to': "orm['auth.Group']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '30'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'search_visibility': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'to': "orm['auth.Permission']", 'symmetrical': 'False'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
}
}
complete_apps = ['pages']
|
[
"frank@revsys.com"
] |
frank@revsys.com
|
7e30b98942c140658cd5e87434fec47b02c5299d
|
4815908f705d9064ea78adddf963d8f5873a3f7e
|
/toscana_admin/urls.py
|
d6a786705dc334648c829ba8e225cbd886151fac
|
[] |
no_license
|
MartinSyno/toscana
|
06a64874813ba5695da18a2a5f53ac019b569550
|
768ffb4fb7561bd6334cc3d265a8316b4839b572
|
refs/heads/main
| 2023-05-03T19:28:17.525950
| 2021-05-24T11:58:08
| 2021-05-24T11:58:08
| 364,274,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
from django.urls import path
from .views import *
app_name = "toscana_admin"
urlpatterns = [
path("messages/", messages_list, name="messages_list"),
path("messages/update/<int:pk>", message_update, name="message_update"),
]
|
[
"mrmartin200113@gmail.com"
] |
mrmartin200113@gmail.com
|
a6515b21c59c208046addca4d4ac650c15b80e8e
|
4d673f5762be3ceff39be9944872a943a3f75422
|
/Companies/Facebook/two_sum.py
|
9a453b00fe713307f89c6b7e09ddbae913858af3
|
[] |
no_license
|
leeo1116/Algorithms
|
f41d525a413249fc81b2b579cbad0f783138fd73
|
5959400492dc1403d06b884ea81a269671d420de
|
refs/heads/master
| 2021-01-22T06:22:52.402124
| 2018-08-16T02:52:50
| 2018-08-16T02:52:50
| 31,132,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
def two_sum(nums, target):
num_index_dict = {}
for i, n in enumerate(nums):
if num_index_dict.get(target - n, -1) != -1:
return num_index_dict[target - n], i
num_index_dict[n] = i
return ()
|
[
"leeo1116@gmail.com"
] |
leeo1116@gmail.com
|
f0995bf4aca5cc69f3d2f7719c67d4c7ab4a4fdb
|
26f62fe20d2e06239c9182ed9c3b74ba10c77a26
|
/test.py
|
9ababf54166ce7519075dad9593e7f025ae3dccf
|
[] |
no_license
|
ja-noble/Maze-PyGame
|
4b43aa842fdd30ce1ada7ee7fb72fae53d4391c5
|
8bc162696962e6528ba1dcea72bd747f5f8b3818
|
refs/heads/master
| 2022-04-17T11:17:41.211052
| 2020-03-03T02:55:48
| 2020-03-03T02:55:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,004
|
py
|
import pygame
""" Submit a .py file with a project proposal in the comments including:
What's your project?
i want to create a puzzle game that has the same
dynamics and surface interaction as this game: https://www.pygame.org/project/1313, and I will take
the block pushing elements from this game: https://www.pygame.org/project/1177/2100
Tile based game with walls that can be pushed, and walls that are solid and static.
What is your first major milestone?
Learning surface detection and rectangles with pygame
What do you not know that you will need to learn?
uhhh learning surface detection and pushing mechanics with surfaces
In what ways is your project too ambitious?
I don't think it's too ambitious, after I make the first level it should be good to go for more.
The photoshopping tiles will take up a lot of time as well though.
In what ways is it possibly not ambitious enough
so far, using pygame has been easier than i thought, but to be fair
I only just set up the foundation of even getting the game to work so far
"""
# import pygame, sys
# pygame.init()
# screen = pygame.display.set_mode([800,600])
# white = [255, 255, 255]
# red = [255, 0, 0]
# screen.fill(white)
# pygame.display.set_caption("My program")
# pygame.display.flip()
# background = input("What color would you like?: ")
# if background == "red":
# screen.fill(red)
# running = True
# while True:
# for i in pygame.event.get():
# if i.type == pygame.QUIT:
# running = False
# pygame.quit()
import os
import random
import pygame
# # Class for the orange dude
# class Player(object):
# def __init__(self):
# self.rect = pygame.Rect(32, 32, 16, 16)
# def move(self, dx, dy):
# # Move each axis separately. Note that this checks for collisions both times.
# if dx != 0:
# self.move_single_axis(dx, 0)
# if dy != 0:
# self.move_single_axis(0, dy)
# def move_single_axis(self, dx, dy):
# # Move the rect
# self.rect.x += dx
# self.rect.y += dy
# # If you collide with a wall, move out based on velocity
# for wall in walls:
# if self.rect.colliderect(wall.rect):
# if dx > 0: # Moving right; Hit the left side of the wall
# self.rect.right = wall.rect.left
# if dx < 0: # Moving left; Hit the right side of the wall
# self.rect.left = wall.rect.right
# if dy > 0: # Moving down; Hit the top side of the wall
# self.rect.bottom = wall.rect.top
# if dy < 0: # Moving up; Hit the bottom side of the wall
# self.rect.top = wall.rect.bottom
# # Nice class to hold a wall rect
# class Wall(object):
# def __init__(self, pos):
# walls.append(self)
# self.rect = pygame.Rect(pos[0], pos[1], 16, 16)
# # Initialise pygame
# os.environ["SDL_VIDEO_CENTERED"] = "1"
# pygame.init()
# # Set up the display
# pygame.display.set_caption("Get to the red square!")
# screen = pygame.display.set_mode((320, 240))
# clock = pygame.time.Clock()
# walls = [] # List to hold the walls
# player = Player() # Create the player
# # Holds the level layout in a list of strings.
# level = [
# "WWWWWWWWWWWWWWWWWWWW",
# "W W",
# "W WWWWWW W",
# "W WWWW W W",
# "W W WWWW W",
# "W WWW WWWW W",
# "W W W W W",
# "W W W WWW WW",
# "W WWW WWW W W W",
# "W W W W W W",
# "WWW W WWWWW W W",
# "W W WW W",
# "W W WWWW WWW W",
# "W W E W W",
# "WWWWWWWWWWWWWWWWWWWW",
# ]
# # Parse the level string above. W = wall, E = exit
# x = y = 0
# for row in level:
# for col in row:
# if col == "W":
# Wall((x, y))
# if col == "E":
# end_rect = pygame.Rect(x, y, 16, 16)
# x += 16
# y += 16
# x = 0
# running = True
# while running:
# clock.tick(60)
# for e in pygame.event.get():
# if e.type == pygame.QUIT:
# running = False
# if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:
# running = False
# # Move the player if an arrow key is pressed
# key = pygame.key.get_pressed()
# if key[pygame.K_LEFT]:
# player.move(-2, 0)
# if key[pygame.K_RIGHT]:
# player.move(2, 0)
# if key[pygame.K_UP]:
# player.move(0, -2)
# if key[pygame.K_DOWN]:
# player.move(0, 2)
# # Just added this to make it slightly fun ;)
# # if player.rect.colliderect(end_rect):
# # raise SystemExit, "You win!"
# # Draw the scene
# screen.fill((0, 0, 0))
# for wall in walls:
# pygame.draw.rect(screen, (255, 255, 255), wall.rect)
# pygame.draw.rect(screen, (255, 0, 0), end_rect)
# pygame.draw.rect(screen, (255, 200, 0), player.rect)
# pygame.display.flip()
"""
Sample Python/Pygame Programs
Simpson College Computer Science
http://programarcadegames.com/
http://simpson.edu/computer-science/
From:
http://programarcadegames.com/python_examples/f.php?file=maze_runner.py
Explanation video: http://youtu.be/5-SbFanyUkQ
Part of a series:
http://programarcadegames.com/python_examples/f.php?file=move_with_walls_example.py
http://programarcadegames.com/python_examples/f.php?file=maze_runner.py
http://programarcadegames.com/python_examples/f.php?file=platform_jumper.py
http://programarcadegames.com/python_examples/f.php?file=platform_scroller.py
http://programarcadegames.com/python_examples/f.php?file=platform_moving.py
http://programarcadegames.com/python_examples/sprite_sheets/
"""
#this was all for testing the code i did take from, see how it works
import pygame
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
PURPLE = (255, 0, 255)
class Wall(pygame.sprite.Sprite):
"""This class represents the bar at the bottom that the player controls """
def __init__(self, x, y, width, height, color):
""" Constructor function """
# Call the parent's constructor
super().__init__()
# Make a BLUE wall, of the size specified in the parameters
self.image = pygame.Surface([width, height])
self.image.fill(color)
# Make our top-left corner the passed-in location.
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x = x
class Player(pygame.sprite.Sprite):
""" This class represents the bar at the bottom that the
player controls """
# Set speed vector
change_x = 0
change_y = 0
def __init__(self, x, y):
""" Constructor function """
# Call the parent's constructor
super().__init__()
# Set height, width
self.image = pygame.Surface([15, 15])
self.image.fill(WHITE)
# Make our top-left corner the passed-in location.
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x = x
def changespeed(self, x, y):
""" Change the speed of the player. Called with a keypress. """
self.change_x += x
self.change_y += y
def move(self, walls):
""" Find a new position for the player """
# Move left/right
self.rect.x += self.change_x
# Did this update cause us to hit a wall?
block_hit_list = pygame.sprite.spritecollide(self, walls, False)
for block in block_hit_list:
# If we are moving right, set our right side to the left side of
# the item we hit
if self.change_x > 0:
self.rect.right = block.rect.left
else:
# Otherwise if we are moving left, do the opposite.
self.rect.left = block.rect.right
# Move up/down
self.rect.y += self.change_y
# Check and see if we hit anything
block_hit_list = pygame.sprite.spritecollide(self, walls, False)
for block in block_hit_list:
# Reset our position based on the top/bottom of the object.
if self.change_y > 0:
self.rect.bottom = block.rect.top
else:
self.rect.top = block.rect.bottom
class Room(object):
""" Base class for all rooms. """
# Each room has a list of walls, and of enemy sprites.
wall_list = None
enemy_sprites = None
def __init__(self):
""" Constructor, create our lists. """
self.wall_list = pygame.sprite.Group()
self.enemy_sprites = pygame.sprite.Group()
class Room1(Room):
"""This creates all the walls in room 1"""
def __init__(self):
super().__init__()
# Make the walls. (x_pos, y_pos, width, height)
# This is a list of walls. Each is in the form [x, y, width, height]
walls = [[0, 0, 20, 250, WHITE],
[0, 350, 20, 250, WHITE],
[780, 0, 20, 250, WHITE],
[780, 350, 20, 250, WHITE],
[20, 0, 760, 20, WHITE],
[20, 580, 760, 20, WHITE],
[390, 50, 20, 500, BLUE]
]
# Loop through the list. Create the wall, add it to the list
for item in walls:
wall = Wall(item[0], item[1], item[2], item[3], item[4])
self.wall_list.add(wall)
class Room2(Room):
"""This creates all the walls in room 2"""
def __init__(self):
super().__init__()
walls = [[0, 0, 20, 250, RED],
[0, 350, 20, 250, RED],
[780, 0, 20, 250, RED],
[780, 350, 20, 250, RED],
[20, 0, 760, 20, RED],
[20, 580, 760, 20, RED],
[190, 50, 20, 500, GREEN],
[590, 50, 20, 500, GREEN]
]
for item in walls:
wall = Wall(item[0], item[1], item[2], item[3], item[4])
self.wall_list.add(wall)
class Room3(Room):
"""This creates all the walls in room 3"""
def __init__(self):
super().__init__()
walls = [[0, 0, 20, 250, PURPLE],
[0, 350, 20, 250, PURPLE],
[780, 0, 20, 250, PURPLE],
[780, 350, 20, 250, PURPLE],
[20, 0, 760, 20, PURPLE],
[20, 580, 760, 20, PURPLE]
]
for item in walls:
wall = Wall(item[0], item[1], item[2], item[3], item[4])
self.wall_list.add(wall)
for x in range(100, 800, 100):
for y in range(50, 451, 300):
wall = Wall(x, y, 20, 200, RED)
self.wall_list.add(wall)
for x in range(150, 700, 100):
wall = Wall(x, 200, 20, 200, WHITE)
self.wall_list.add(wall)
def main():
""" Main Program """
# Call this function so the Pygame library can initialize itself
pygame.init()
# Create an 800x600 sized screen
screen = pygame.display.set_mode([800, 600])
# Set the title of the window
pygame.display.set_caption('Maze Runner')
# Create the player paddle object
player = Player(50, 50)
movingsprites = pygame.sprite.Group()
movingsprites.add(player)
rooms = []
room = Room1()
rooms.append(room)
room = Room2()
rooms.append(room)
room = Room3()
rooms.append(room)
current_room_no = 0
current_room = rooms[current_room_no]
clock = pygame.time.Clock()
done = False
while not done:
# --- Event Processing ---
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player.changespeed(-5, 0)
if event.key == pygame.K_RIGHT:
player.changespeed(5, 0)
if event.key == pygame.K_UP:
player.changespeed(0, -5)
if event.key == pygame.K_DOWN:
player.changespeed(0, 5)
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT:
player.changespeed(5, 0)
if event.key == pygame.K_RIGHT:
player.changespeed(-5, 0)
if event.key == pygame.K_UP:
player.changespeed(0, 5)
if event.key == pygame.K_DOWN:
player.changespeed(0, -5)
# --- Game Logic ---
player.move(current_room.wall_list)
if player.rect.x < -15:
if current_room_no == 0:
current_room_no = 2
current_room = rooms[current_room_no]
player.rect.x = 790
elif current_room_no == 2:
current_room_no = 1
current_room = rooms[current_room_no]
player.rect.x = 790
else:
current_room_no = 0
current_room = rooms[current_room_no]
player.rect.x = 790
if player.rect.x > 801:
if current_room_no == 0:
current_room_no = 1
current_room = rooms[current_room_no]
player.rect.x = 0
elif current_room_no == 1:
current_room_no = 2
current_room = rooms[current_room_no]
player.rect.x = 0
else:
current_room_no = 0
current_room = rooms[current_room_no]
player.rect.x = 0
# --- Drawing ---
screen.fill(BLACK)
movingsprites.draw(screen)
current_room.wall_list.draw(screen)
pygame.display.flip()
clock.tick(60)
pygame.quit()
if __name__ == "__main__":
main()
|
[
"38360284+ja-noble@users.noreply.github.com"
] |
38360284+ja-noble@users.noreply.github.com
|
1bc1449354d89099bf74ac4ff0f6428898f79dad
|
32367a7abdcc5d7a14c366b839b3eaa2fd62df4f
|
/withotdevide3and5.py
|
f016449ce9c568fe54837cf2001b89b37845ee29
|
[] |
no_license
|
BharatKanzariya/python_basic
|
1735874b6513a44f6725648fe1ace95b8513ccf0
|
0243a968bd2ddf491d34a82f3404ef8ed538b7d1
|
refs/heads/main
| 2023-06-26T02:55:12.398929
| 2021-07-14T08:57:06
| 2021-07-14T08:57:06
| 385,846,534
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
"""
short program
for i in range(1,101):
if i % 3 == 0:
continue
elif i % 5 == 0:
continue
print(i)
"""
i=1
while i <= 100:
if i % 3 == 0 or i % 5 == 0:
i = i+1
else:
print(i)
i = i+1
|
[
"noreply@github.com"
] |
BharatKanzariya.noreply@github.com
|
e1fea1c724e1a3f00970b8e384c1139fa100fb6d
|
ff136c5c5ed845d11b965d4e1d0fd6855ba09ecc
|
/app.py
|
4100b9b093eaf0e3b71c6fb82461db2ff49e42c7
|
[] |
no_license
|
emilbello/sqlalchemy-challenge
|
cd047944684834a8043693aef14b1c2df7c07eef
|
a1ffb5274fc22ae7278b71fe12fa376607c1ce66
|
refs/heads/master
| 2022-12-06T11:42:38.359029
| 2020-08-24T17:16:08
| 2020-08-24T17:16:08
| 288,069,048
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,799
|
py
|
# Import dependencies
import numpy as np
import datetime as dt
from flask import Flask, jsonify
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
# creating an engine using the hawaii.sqlite database file
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
## declaring a Base using 'automap_base()'
Base = automap_base()
# Using the Base class to reflect the dabase tables
Base.prepare(engine, reflect=True)
# Saving the references to each table by assigning the classes (measurement and station), to variables
Measurement = Base.classes.measurement
Station = Base.classes.station
# Creating the app
app = Flask(__name__)
# Flask routes
@app.route("/")
def home_page():
return(
"Welcome to the Climate App API"
"Available Routes<br/>"
"/api/v1.0/precipitation<br/>"
"/api/v1.0/stations<br/>"
"/api/v1.0/tobs<br/>"
"/api/v1.0/start<br/>"
"/api/v1.0/start/end"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
session = Session(engine)
query_date = dt.date(2017, 8, 23) - dt.timedelta(days=365)
results = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date >= query_date).all()
session.close()
prep_dict = {k:v for k, v in results}
return jsonify(prep_dict)
@app.route("/api/v1.0/stations")
def stations():
session = Session(engine)
stations = session.query(Station.station).all()
station_list = np.ravel(stations).tolist()
session.close()
return jsonify(station_list)
@app.route("/api/v1.0/tobs")
def tobs():
session = Session(engine)
query_date = dt.date(2017, 8, 23) - dt.timedelta(days=365)
active_stations = session.query(Measurement.station, func.count(Measurement.station)).\
group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()
station_tobs_12m = session.query(Measurement.tobs).\
filter(Measurement.station == active_stations[0][0]).\
filter(Measurement.date >= query_date).all()
session.close()
tobs_list = [tobs[0] for tobs in station_tobs_12m]
return jsonify(tobs_list)
@app.route("/api/v1.0/<start>")
def start_temp(start):
session = Session(engine)
calc_results = session.query(func.min(Measurement.tobs),\
func.avg(Measurement.tobs),\
func.max(Measurement.tobs)).\
filter(Measurement.date >= start).all()
session.close()
return_val = list(np.ravel(calc_results))
results_dict = {"Start Date": start,
"TMIN": return_val[0],
"TAVG": return_val[1],
"TMAX": return_val[2]}
return jsonify(results_dict)
@app.route("/api/v1.0/<start>/<end>")
def start_end_temp(start, end):
session = Session(engine)
calc_results = session.query(func.min(Measurement.tobs),\
func.avg(Measurement.tobs),\
func.max(Measurement.tobs)).\
filter(Measurement.date >= start).\
filter(Measurement.date <= end).all()
session.close()
return_val = list(np.ravel(calc_results))
results_dict = {"Start Date": start,
"End Date": end,
"TMIN": return_val[0],
"TAVG": return_val[1],
"TMAX": return_val[2]}
return jsonify(results_dict)
if __name__ == "__main__":
app.run(debug=True)
|
[
"emiliobello@Emilios-MBP.zyxel.com"
] |
emiliobello@Emilios-MBP.zyxel.com
|
d3e347fa12b4ed10b8370a118bbf44d9b6cbdccd
|
0195658958b4949cb63a044297ca1599cb9e350c
|
/Outpatient/models.py
|
cf2b746a455ea9da8fa9440c3652fa7538283337
|
[] |
no_license
|
ShiChenDMW/HospitalSystem
|
bc0fd465f4fd1c1b71ea996abd46b2db95656ede
|
d659d60ac5e1af9dac241b75bbf9948c02195a06
|
refs/heads/master
| 2020-09-24T17:05:16.126027
| 2019-12-03T12:49:43
| 2019-12-03T12:49:43
| 225,804,213
| 1
| 0
| null | 2019-12-04T07:19:11
| 2019-12-04T07:19:11
| null |
UTF-8
|
Python
| false
| false
| 4,555
|
py
|
from django.db import models
from django.contrib.auth.models import User, Group
class MedicalRecord(models.Model):
# 病人
user = models.ForeignKey(User, on_delete=models.DO_NOTHING,
related_name='medical_records', verbose_name='病人id')
department = models.ForeignKey(
Group, on_delete=models.DO_NOTHING, related_name='medical_records', verbose_name='就诊科室')
onset_date = models.DateField(verbose_name='发病日期')
# 医生对病人病情诊断结果(是什么病,此项为比较简短的说明)
diagnosis = models.CharField(max_length=255, verbose_name='诊断')
# 医生对病情详细分析
detail = models.TextField(null=True, blank=True, verbose_name='病情详情')
# 病人自己对病情描述
patient_description = models.TextField(
null=True, blank=True, verbose_name='病人主诉')
onset_history = models.TextField(null=True, blank=True, verbose_name='发病史')
time = models.DateTimeField(verbose_name='就诊时间')
medicine_history = models.CharField(
max_length=256, null=True, blank=True, verbose_name='药物史')
# 指示此病人诊病过程是否结束,不可编辑表示此病人已诊断结束
can_modify = models.BooleanField(verbose_name='是否可编辑')
# 一般为医生
creator = models.ForeignKey(User, on_delete=models.DO_NOTHING,
related_name='created_medical_records', verbose_name='创建者id')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
# 一般也为医生
modifier = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True,
blank=True, related_name='modified_medical_records', verbose_name='修改者id')
modify_time = models.DateTimeField(
auto_now_add=True, null=True, blank=True, verbose_name='修改时间')
class Meta:
verbose_name = '病历'
verbose_name_plural = '病历'
db_table = 'medical_record'
class Prescription(models.Model):
patient = models.ForeignKey(User, on_delete=models.DO_NOTHING,
related_name='get_prescriptions', verbose_name='病人id')
IS_PAID = (
(0, '未缴费'),
(1, '已缴费')
)
is_paid = models.BooleanField(
choices=IS_PAID, verbose_name='是否已经缴费', default=0)
creator = models.ForeignKey(User, on_delete=models.DO_NOTHING,
related_name='created_prescriptions', verbose_name='医生id')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
class Meta:
verbose_name = '处方'
verbose_name_plural = '处方'
db_table = 'prescription'
class PrescriptionItem(models.Model):
'''
用来记录一条处方内每个条目的具体信息
'''
prescription = models.ForeignKey(
Prescription, on_delete=models.DO_NOTHING, related_name='items', verbose_name='处方id')
medicine = models.ForeignKey('Medicine.Medicine', on_delete=models.DO_NOTHING,
related_name='prescription_items', verbose_name='药物id')
# 如外用、温水送服等
method = models.CharField(max_length=32, verbose_name='用法')
# 如 3次/天等
ratio = models.CharField(max_length=64, verbose_name='服用频率')
# 服用时长,这个药要用多少天,为空表示此药医生可能有其他安排,应在注意事项内说明
days = models.IntegerField(null=True, blank=True, verbose_name='服用时长')
# 其他注意事项
commet = models.CharField(max_length=64, verbose_name='其他')
# 药物总数量
count = models.IntegerField(verbose_name='药物总数量')
# 数量单位
count_unit = models.CharField(max_length=32, verbose_name='数量单位')
# 每次用量,为防止如 1/3 之类的数量,使用分数来存储
dosage = models.CharField(max_length=32, verbose_name='用量')
# 用量单位
dosage_unit = models.CharField(max_length=32, verbose_name='用量单位')
# 皮试结果,为空表示没有进行皮试
skin_test = models.CharField(
max_length=32, null=True, blank=True, verbose_name='皮试结果')
class Meta:
verbose_name = '处方条目'
verbose_name_plural = '处方条目'
db_table = 'prescription_item'
|
[
"xlyanscor@outlook.com"
] |
xlyanscor@outlook.com
|
4f3bb4ee122b7383120a5a53255d846ed2604481
|
eaaf492c68d89e7414f19128371218c10e11a54e
|
/1/7.py
|
2c588e68924fc35791102605eba07e45f9a68824
|
[] |
no_license
|
alsofro/GB_algorithms
|
9676dfc4ca866ab56acb4c37d393f79d932d8690
|
55c190574045589a591c506892e1dccf73ee15d8
|
refs/heads/master
| 2023-01-11T21:56:30.213001
| 2019-05-28T05:15:51
| 2019-05-28T05:15:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 537
|
py
|
a = int(input('введите длину 1 стороны: '))
b = int(input('введите длину 2 стороны: '))
c = int(input('введите длину 3 стороны: '))
if a == 0 or b == 0 or c == 0:
print('треугольник не существует')
elif a == b == c:
print('треугольник равносторонний')
elif a != b and a != c and b != c:
print('треугольник разносторонний')
else:
print('треугольник равнобедренный')
|
[
"ynxela@gmail.com"
] |
ynxela@gmail.com
|
1673f4fe23c187dc2dbbcf3e423309850a4cfe57
|
ce4489aac20f835364c8c7519cdfbdfb4fff48c1
|
/solver/SymmNetsV2Partial_solver.py
|
52bcd51d1e1877fb942c74e97475461a783b08ef
|
[
"MIT"
] |
permissive
|
chenchiWHU/MultiClassDA
|
5dfbc043cbfc7921b0fb5744b1fef77ddc8bb99d
|
a7e6b6b9874957b835f9a71d2657f7727f5fa6ae
|
refs/heads/master
| 2022-04-13T14:39:39.799416
| 2020-04-05T08:26:04
| 2020-04-05T08:26:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,679
|
py
|
import torch
import torch.nn as nn
import os
import math
import time
from utils.utils import to_cuda, accuracy_for_each_class, accuracy, AverageMeter, process_one_values
from config.config import cfg
import torch.nn.functional as F
from models.loss_utils import TargetDiscrimLoss, ConcatenatedCELoss, CrossEntropyClassWeighted
from .base_solver import BaseSolver
import ipdb
class SymmNetsV2PartialSolver(BaseSolver):
def __init__(self, net, dataloaders, **kwargs):
super(SymmNetsV2PartialSolver, self).__init__(net, dataloaders, **kwargs)
self.num_classes = cfg.DATASET.NUM_CLASSES
self.TargetDiscrimLoss = TargetDiscrimLoss(num_classes=self.num_classes).cuda()
self.ConcatenatedCELoss = ConcatenatedCELoss(num_classes=self.num_classes).cuda()
self.feature_extractor = self.net['feature_extractor']
self.classifier = self.net['classifier']
self.lam = 0
class_weight_initial = torch.ones(self.num_classes) ############################ class-level weight to filter out the outlier classes.
self.class_weight_initial = class_weight_initial.cuda()
class_weight = torch.ones(self.num_classes) ############################ class-level weight to filter out the outlier classes.
self.class_weight = class_weight.cuda()
self.softweight = True
self.CELossWeight = CrossEntropyClassWeighted()
if cfg.RESUME != '':
resume_dict = torch.load(cfg.RESUME)
self.net['feature_extractor'].load_state_dict(resume_dict['feature_extractor_state_dict'])
self.net['classifier'].load_state_dict(resume_dict['classifier_state_dict'])
self.best_prec1 = resume_dict['best_prec1']
self.epoch = resume_dict['epoch']
def solve(self):
stop = False
while not stop:
stop = self.complete_training()
self.update_network()
prediction_weight, acc = self.test()
prediction_weight = prediction_weight.cuda()
if self.softweight:
self.class_weight = prediction_weight * self.lam + self.class_weight_initial * (1 - self.lam)
else:
self.class_weight = prediction_weight
print('the class weight adopted in partial DA')
print(self.class_weight)
if acc > self.best_prec1:
self.best_prec1 = acc
self.save_ckpt()
self.epoch += 1
def update_network(self, **kwargs):
stop = False
self.train_data['source']['iterator'] = iter(self.train_data['source']['loader'])
self.train_data['target']['iterator'] = iter(self.train_data['target']['loader'])
self.iters_per_epoch = len(self.train_data['target']['loader'])
iters_counter_within_epoch = 0
data_time = AverageMeter()
batch_time = AverageMeter()
classifier_loss = AverageMeter()
feature_extractor_loss = AverageMeter()
prec1_fs = AverageMeter()
prec1_ft = AverageMeter()
self.feature_extractor.train()
self.classifier.train()
end = time.time()
if self.opt.TRAIN.PROCESS_COUNTER == 'epoch':
self.lam = 2 / (1 + math.exp(-1 * 10 * self.epoch / self.opt.TRAIN.MAX_EPOCH)) - 1
self.update_lr()
print('value of lam is: %3f' % (self.lam))
while not stop:
if self.opt.TRAIN.PROCESS_COUNTER == 'iteration':
self.lam = 2 / (1 + math.exp(-1 * 10 * self.iters / (self.opt.TRAIN.MAX_EPOCH * self.iters_per_epoch))) - 1
print('value of lam is: %3f' % (self.lam))
self.update_lr()
source_data, source_gt = self.get_samples('source')
target_data, _ = self.get_samples('target')
source_data = to_cuda(source_data)
source_gt = to_cuda(source_gt)
target_data = to_cuda(target_data)
data_time.update(time.time() - end)
feature_source = self.feature_extractor(source_data)
output_source = self.classifier(feature_source)
feature_target = self.feature_extractor(target_data)
output_target = self.classifier(feature_target)
weight_concate = torch.cat((self.class_weight, self.class_weight))
loss_task_fs = self.CELossWeight(output_source[:,:self.num_classes], source_gt, self.class_weight)
loss_task_ft = self.CELossWeight(output_source[:,self.num_classes:], source_gt, self.class_weight)
loss_discrim_source = self.CELossWeight(output_source, source_gt, weight_concate)
loss_discrim_target = self.TargetDiscrimLoss(output_target)
loss_summary_classifier = loss_task_fs + loss_task_ft + loss_discrim_source + loss_discrim_target
source_gt_for_ft_in_fst = source_gt + self.num_classes
loss_confusion_source = 0.5 * self.CELossWeight(output_source, source_gt, weight_concate) + 0.5 * self.CELossWeight(output_source, source_gt_for_ft_in_fst, weight_concate)
loss_confusion_target = self.ConcatenatedCELoss(output_target)
loss_summary_feature_extractor = loss_confusion_source + self.lam * loss_confusion_target
self.optimizer_classifier.zero_grad()
loss_summary_classifier.backward(retain_graph=True)
self.optimizer_classifier.step()
self.optimizer_feature_extractor.zero_grad()
loss_summary_feature_extractor.backward()
self.optimizer_feature_extractor.step()
classifier_loss.update(loss_summary_classifier, source_data.size()[0])
feature_extractor_loss.update(loss_summary_feature_extractor, source_data.size()[0])
prec1_fs.update(accuracy(output_source[:, :self.num_classes], source_gt), source_data.size()[0])
prec1_ft.update(accuracy(output_source[:, self.num_classes:], source_gt), source_data.size()[0])
print(" Train:epoch: %d:[%d/%d], LossCla: %3f, LossFeat: %3f, AccFs: %3f, AccFt: %3f" % \
(self.epoch, iters_counter_within_epoch, self.iters_per_epoch, classifier_loss.avg, feature_extractor_loss.avg, prec1_fs.avg, prec1_ft.avg))
batch_time.update(time.time() - end)
end = time.time()
self.iters += 1
iters_counter_within_epoch += 1
if iters_counter_within_epoch >= self.iters_per_epoch:
log = open(os.path.join(self.opt.SAVE_DIR, 'log.txt'), 'a')
log.write("\n")
log.write(" Train:epoch: %d:[%d/%d], LossCla: %3f, LossFeat: %3f, AccFs: %3f, AccFt: %3f" % \
(self.epoch, iters_counter_within_epoch, self.iters_per_epoch, classifier_loss.avg, feature_extractor_loss.avg, prec1_fs.avg, prec1_ft.avg))
log.close()
stop = True
def test(self):
self.feature_extractor.eval()
self.classifier.eval()
prec1_fs = AverageMeter()
prec1_ft = AverageMeter()
counter_all_fs = torch.FloatTensor(self.opt.DATASET.NUM_CLASSES).fill_(0)
counter_all_ft = torch.FloatTensor(self.opt.DATASET.NUM_CLASSES).fill_(0)
counter_acc_fs = torch.FloatTensor(self.opt.DATASET.NUM_CLASSES).fill_(0)
counter_acc_ft = torch.FloatTensor(self.opt.DATASET.NUM_CLASSES).fill_(0)
class_weight = torch.zeros(self.num_classes)
class_weight = class_weight.cuda()
count = 0
for i, (input, target) in enumerate(self.test_data['loader']):
input, target = to_cuda(input), to_cuda(target)
with torch.no_grad():
feature_test = self.feature_extractor(input)
output_test = self.classifier(feature_test)
prob = F.softmax(output_test[:, self.num_classes:], dim=1)
class_weight = class_weight + prob.data.sum(0)
count = count + input.size(0)
if self.opt.EVAL_METRIC == 'accu':
prec1_fs_iter = accuracy(output_test[:, :self.num_classes], target)
prec1_ft_iter = accuracy(output_test[:, self.num_classes:], target)
prec1_fs.update(prec1_fs_iter, input.size(0))
prec1_ft.update(prec1_ft_iter, input.size(0))
if i % self.opt.PRINT_STEP == 0:
print(" Test:epoch: %d:[%d/%d], AccFs: %3f, AccFt: %3f" % \
(self.epoch, i, len(self.test_data['loader']), prec1_fs.avg, prec1_ft.avg))
elif self.opt.EVAL_METRIC == 'accu_mean':
prec1_ft_iter = accuracy(output_test[:, self.num_classes:], target)
prec1_ft.update(prec1_ft_iter, input.size(0))
counter_all_fs, counter_acc_fs = accuracy_for_each_class(output_test[:, :self.num_classes], target, counter_all_fs, counter_acc_fs)
counter_all_ft, counter_acc_ft = accuracy_for_each_class(output_test[:, self.num_classes:], target, counter_all_ft, counter_acc_ft)
if i % self.opt.PRINT_STEP == 0:
print(" Test:epoch: %d:[%d/%d], Task: %3f" % \
(self.epoch, i, len(self.test_data['loader']), prec1_ft.avg))
else:
raise NotImplementedError
acc_for_each_class_fs = counter_acc_fs / counter_all_fs
acc_for_each_class_ft = counter_acc_ft / counter_all_ft
log = open(os.path.join(self.opt.SAVE_DIR, 'log.txt'), 'a')
log.write("\n")
class_weight = class_weight / count
class_weight = class_weight / max(class_weight)
if self.opt.EVAL_METRIC == 'accu':
log.write(
" Test:epoch: %d, AccFs: %3f, AccFt: %3f" % \
(self.epoch, prec1_fs.avg, prec1_ft.avg))
log.close()
return class_weight, max(prec1_fs.avg, prec1_ft.avg)
elif self.opt.EVAL_METRIC == 'accu_mean':
log.write(
" Test:epoch: %d, AccFs: %3f, AccFt: %3f" % \
(self.epoch,acc_for_each_class_fs.mean(), acc_for_each_class_ft.mean()))
log.write("\nClass-wise Acc of Ft:") ## based on the task classifier.
for i in range(self.opt.DATASET.NUM_CLASSES):
if i == 0:
log.write("%dst: %3f" % (i + 1, acc_for_each_class_ft[i]))
elif i == 1:
log.write(", %dnd: %3f" % (i + 1, acc_for_each_class_ft[i]))
elif i == 2:
log.write(", %drd: %3f" % (i + 1, acc_for_each_class_ft[i]))
else:
log.write(", %dth: %3f" % (i + 1, acc_for_each_class_ft[i]))
log.close()
return class_weight, max(acc_for_each_class_ft.mean(), acc_for_each_class_fs.mean())
def build_optimizer(self):
if self.opt.TRAIN.OPTIMIZER == 'SGD': ## some params may not contribute the loss_all, thus they are not updated in the training process.
self.optimizer_feature_extractor = torch.optim.SGD([
{'params': self.net['feature_extractor'].module.conv1.parameters(), 'name': 'pre-trained'},
{'params': self.net['feature_extractor'].module.bn1.parameters(), 'name': 'pre-trained'},
{'params': self.net['feature_extractor'].module.layer1.parameters(), 'name': 'pre-trained'},
{'params': self.net['feature_extractor'].module.layer2.parameters(), 'name': 'pre-trained'},
{'params': self.net['feature_extractor'].module.layer3.parameters(), 'name': 'pre-trained'},
{'params': self.net['feature_extractor'].module.layer4.parameters(), 'name': 'pre-trained'},
],
lr=self.opt.TRAIN.BASE_LR,
momentum=self.opt.TRAIN.MOMENTUM,
weight_decay=self.opt.TRAIN.WEIGHT_DECAY,
nesterov=True)
self.optimizer_classifier = torch.optim.SGD([
{'params': self.net['classifier'].parameters(), 'name': 'new-added'},
],
lr=self.opt.TRAIN.BASE_LR,
momentum=self.opt.TRAIN.MOMENTUM,
weight_decay=self.opt.TRAIN.WEIGHT_DECAY,
nesterov=True)
else:
raise NotImplementedError
print('Optimizer built')
def update_lr(self):
if self.opt.TRAIN.LR_SCHEDULE == 'inv':
if self.opt.TRAIN.PROCESS_COUNTER == 'epoch':
lr = self.opt.TRAIN.BASE_LR / pow((1 + self.opt.INV.ALPHA * self.epoch / self.opt.TRAIN.MAX_EPOCH), self.opt.INV.BETA)
elif self.opt.TRAIN.PROCESS_COUNTER == 'iteration':
lr = self.opt.TRAIN.BASE_LR / pow((1 + self.opt.INV.ALPHA * self.iters / (self.opt.TRAIN.MAX_EPOCH * self.iters_per_epoch)), self.opt.INV.BETA)
else:
raise NotImplementedError
elif self.opt.TRAIN.LR_SCHEDULE == 'fix':
lr = self.opt.TRAIN.BASE_LR
else:
raise NotImplementedError
lr_pretrain = lr * 0.1
print('the lr is: %3f' % (lr))
for param_group in self.optimizer_feature_extractor.param_groups:
if param_group['name'] == 'pre-trained':
param_group['lr'] = lr_pretrain
elif param_group['name'] == 'new-added':
param_group['lr'] = lr
elif param_group['name'] == 'fixed': ## Fix the lr as 0 can not fix the runing mean/var of the BN layer
param_group['lr'] = 0
for param_group in self.optimizer_classifier.param_groups:
if param_group['name'] == 'pre-trained':
param_group['lr'] = lr_pretrain
elif param_group['name'] == 'new-added':
param_group['lr'] = lr
elif param_group['name'] == 'fixed': ## Fix the lr as 0 can not fix the runing mean/var of the BN layer
param_group['lr'] = 0
def save_ckpt(self):
log = open(os.path.join(self.opt.SAVE_DIR, 'log.txt'), 'a')
log.write(" Best Acc so far: %3f" % (self.best_prec1))
log.close()
if self.opt.TRAIN.SAVING:
save_path = self.opt.SAVE_DIR
ckpt_resume = os.path.join(save_path, 'ckpt_%d.resume' % (self.loop))
torch.save({'epoch': self.epoch,
'best_prec1': self.best_prec1,
'feature_extractor_state_dict': self.net['feature_extractor'].state_dict(),
'classifier_state_dict': self.net['classifier'].state_dict()
}, ckpt_resume)
|
[
"41507970+YabinZhang1994@users.noreply.github.com"
] |
41507970+YabinZhang1994@users.noreply.github.com
|
cb640e13ab60d47da581adfcdb74d4b79c9b31cf
|
1d0a223b743b005cd2ecd904337178e322e63534
|
/Chapter3/P95.py
|
24c0776ee034f175f4fd3d56354e536ca3c2c733
|
[] |
no_license
|
Stefanroets180/all-my-Python-work
|
285607ce1ef50aac4897e0721ead4daca01fa6e0
|
d7937b51a309ebd051bef90e78154447b6e9a8ea
|
refs/heads/main
| 2023-03-27T10:15:54.793489
| 2021-03-18T12:26:20
| 2021-03-18T12:26:20
| 349,063,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
cars = ['bmw', 'audi', 'toyota', 'subaru']
print(cars)
#syntaxError invalid syntax missing a quotation(')
|
[
"61413955+Stefanroets180@users.noreply.github.com"
] |
61413955+Stefanroets180@users.noreply.github.com
|
1b490f3ffe47d1e1b01a6efc80229664cf17dd0e
|
5627e703c4847a55d16e097e60fbde1af6ef87a1
|
/MT4ImgRec/transforms/transforms.py
|
106994b8fbd4c69fba1faf79a4a6470c71307d63
|
[] |
no_license
|
Miracy/MT4ImgRec
|
008d1ba150cbb21acc1b202340158e4c1886ca2a
|
87813f78d1f9dd61de5cc3b7f286333f6534ca17
|
refs/heads/master
| 2023-04-20T09:16:05.272480
| 2021-05-10T03:31:39
| 2021-05-10T03:31:39
| 360,831,064
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 37,002
|
py
|
from __future__ import division
import torch
import math
import random
from PIL import Image
try:
import accimage
except ImportError:
accimage = None
import numpy as np
import numbers
import types
import collections
import warnings
from . import functional as F
__all__ = ["Compose", "ToTensor", "ToPILImage", "Normalize", "Resize", "Scale", "CenterCrop", "Pad",
"Lambda", "RandomApply", "RandomChoice", "RandomOrder", "RandomCrop", "RandomHorizontalFlip",
"RandomVerticalFlip", "RandomResizedCrop", "RandomSizedCrop", "FiveCrop", "TenCrop", "LinearTransformation",
"ColorJitter", "RandomRotation", "RandomAffine", "Grayscale", "RandomGrayscale", "Randomswap"]
_pil_interpolation_to_str = {
Image.NEAREST: 'PIL.Image.NEAREST',
Image.BILINEAR: 'PIL.Image.BILINEAR',
Image.BICUBIC: 'PIL.Image.BICUBIC',
Image.LANCZOS: 'PIL.Image.LANCZOS',
}
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class ToTensor(object):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
"""
def __call__(self, pic):
"""
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
return F.to_tensor(pic)
def __repr__(self):
return self.__class__.__name__ + '()'
class ToPILImage(object):
"""Convert a tensor or an ndarray to PIL Image.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while preserving the value range.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
1. If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
2. If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
3. If the input has 1 channel, the ``mode`` is determined by the data type (i,e,
``int``, ``float``, ``short``).
.. _PIL.Image mode: http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#modes
"""
def __init__(self, mode=None):
self.mode = mode
def __call__(self, pic):
"""
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
Returns:
PIL Image: Image converted to PIL Image.
"""
return F.to_pil_image(pic, self.mode)
def __repr__(self):
format_string = self.__class__.__name__ + '('
if self.mode is not None:
format_string += 'mode={0}'.format(self.mode)
format_string += ')'
return format_string
class Normalize(object):
"""Normalize a tensor image with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform
will normalize each channel of the input ``torch.*Tensor`` i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized Tensor image.
"""
return F.normalize(tensor, self.mean, self.std)
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
class Randomswap(object):
def __init__(self, size):
self.size = size
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
self.size = size
def __call__(self, img):
return F.swap(img, self.size)
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class Resize(object):
"""Resize the input PIL Image to the given size.
Args:
size (sequence or int): Desired output size. If size is a sequence like
(h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
"""
def __init__(self, size, interpolation=Image.BILINEAR):
assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)
self.size = size
self.interpolation = interpolation
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be scaled.
Returns:
PIL Image: Rescaled image.
"""
return F.resize(img, self.size, self.interpolation)
def __repr__(self):
interpolate_str = _pil_interpolation_to_str[self.interpolation]
return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)
class Scale(Resize):
"""
Note: This transform is deprecated in favor of Resize.
"""
def __init__(self, *args, **kwargs):
warnings.warn("The use of the transforms.Scale transform is deprecated, " +
"please use transforms.Resize instead.")
super(Scale, self).__init__(*args, **kwargs)
class CenterCrop(object):
"""Crops the given PIL Image at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
return F.center_crop(img, self.size)
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class Pad(object):
"""Pad the given PIL Image on all sides with the given "pad" value.
Args:
padding (int or tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill: Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
constant: pads with a constant value, this value is specified with fill
edge: pads with the last value at the edge of the image
reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
"""
def __init__(self, padding, fill=0, padding_mode='constant'):
assert isinstance(padding, (numbers.Number, tuple))
assert isinstance(fill, (numbers.Number, str, tuple))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']
if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]:
raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
self.padding = padding
self.fill = fill
self.padding_mode = padding_mode
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be padded.
Returns:
PIL Image: Padded image.
"""
return F.pad(img, self.padding, self.fill, self.padding_mode)
def __repr__(self):
return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\
format(self.padding, self.fill, self.padding_mode)
class Lambda(object):
"""Apply a user-defined lambda as a transform.
Args:
lambd (function): Lambda/function to be used for transform.
"""
def __init__(self, lambd):
assert isinstance(lambd, types.LambdaType)
self.lambd = lambd
def __call__(self, img):
return self.lambd(img)
def __repr__(self):
return self.__class__.__name__ + '()'
class RandomTransforms(object):
"""Base class for a list of transformations with randomness
Args:
transforms (list or tuple): list of transformations
"""
def __init__(self, transforms):
assert isinstance(transforms, (list, tuple))
self.transforms = transforms
def __call__(self, *args, **kwargs):
raise NotImplementedError()
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class RandomApply(RandomTransforms):
"""Apply randomly a list of transformations with a given probability
Args:
transforms (list or tuple): list of transformations
p (float): probability
"""
def __init__(self, transforms, p=0.5):
super(RandomApply, self).__init__(transforms)
self.p = p
def __call__(self, img):
if self.p < random.random():
return img
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += '\n p={}'.format(self.p)
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class RandomOrder(RandomTransforms):
"""Apply a list of transformations in a random order
"""
def __call__(self, img):
order = list(range(len(self.transforms)))
random.shuffle(order)
for i in order:
img = self.transforms[i](img)
return img
class RandomChoice(RandomTransforms):
"""Apply single transformation randomly picked from a list
"""
def __call__(self, img):
t = random.choice(self.transforms)
return t(img)
class RandomCrop(object):
"""Crop the given PIL Image at a random location.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
padding (int or sequence, optional): Optional padding on each border
of the image. Default is 0, i.e no padding. If a sequence of length
4 is provided, it is used to pad left, top, right, bottom borders
respectively.
pad_if_needed (boolean): It will pad the image if smaller than the
desired size to avoid raising an exception.
"""
def __init__(self, size, padding=0, pad_if_needed=False):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
self.pad_if_needed = pad_if_needed
@staticmethod
def get_params(img, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
w, h = img.size
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
if self.padding > 0:
img = F.pad(img, self.padding)
# pad the width if needed
if self.pad_if_needed and img.size[0] < self.size[1]:
img = F.pad(img, (int((1 + self.size[1] - img.size[0]) / 2), 0))
# pad the height if needed
if self.pad_if_needed and img.size[1] < self.size[0]:
img = F.pad(img, (0, int((1 + self.size[0] - img.size[1]) / 2)))
i, j, h, w = self.get_params(img, self.size)
return F.crop(img, i, j, h, w)
def __repr__(self):
return self.__class__.__name__ + '(size={0}, padding={1})'.format(self.size, self.padding)
class RandomHorizontalFlip(object):
"""Horizontally flip the given PIL Image randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Randomly flipped image.
"""
if random.random() < self.p:
return F.hflip(img)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomVerticalFlip(object):
"""Vertically flip the given PIL Image randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Randomly flipped image.
"""
if random.random() < self.p:
return F.vflip(img)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomResizedCrop(object):
"""Crop the given PIL Image to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR):
self.size = (size, size)
self.interpolation = interpolation
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(*scale) * area
aspect_ratio = random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback
w = min(img.size[0], img.size[1])
i = (img.size[1] - w) // 2
j = (img.size[0] - w) // 2
return i, j, w, w
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)
def __repr__(self):
interpolate_str = _pil_interpolation_to_str[self.interpolation]
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
format_string += ', interpolation={0})'.format(interpolate_str)
return format_string
class RandomSizedCrop(RandomResizedCrop):
"""
Note: This transform is deprecated in favor of RandomResizedCrop.
"""
def __init__(self, *args, **kwargs):
warnings.warn("The use of the transforms.RandomSizedCrop transform is deprecated, " +
"please use transforms.RandomResizedCrop instead.")
super(RandomSizedCrop, self).__init__(*args, **kwargs)
class FiveCrop(object):
"""Crop the given PIL Image into four corners and the central crop
.. Note::
This transform returns a tuple of images and there may be a mismatch in the number of
inputs and targets your Dataset returns. See below for an example of how to deal with
this.
Args:
size (sequence or int): Desired output size of the crop. If size is an ``int``
instead of sequence like (h, w), a square crop of size (size, size) is made.
Example:
>>> transform = Compose([
>>> FiveCrop(size), # this is a list of PIL Images
>>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
>>> ])
>>> #In your test loop you can do the following:
>>> input, target = batch # input is a 5d tensor, target is 2d
>>> bs, ncrops, c, h, w = input.size()
>>> result = models(input.view(-1, c, h, w)) # fuse batch size and ncrops
>>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
"""
def __init__(self, size):
self.size = size
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
self.size = size
def __call__(self, img):
return F.five_crop(img, self.size)
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class TenCrop(object):
"""Crop the given PIL Image into four corners and the central crop plus the flipped version of
these (horizontal flipping is used by default)
.. Note::
This transform returns a tuple of images and there may be a mismatch in the number of
inputs and targets your Dataset returns. See below for an example of how to deal with
this.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
vertical_flip(bool): Use vertical flipping instead of horizontal
Example:
>>> transform = Compose([
>>> TenCrop(size), # this is a list of PIL Images
>>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
>>> ])
>>> #In your test loop you can do the following:
>>> input, target = batch # input is a 5d tensor, target is 2d
>>> bs, ncrops, c, h, w = input.size()
>>> result = models(input.view(-1, c, h, w)) # fuse batch size and ncrops
>>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
"""
def __init__(self, size, vertical_flip=False):
self.size = size
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
self.size = size
self.vertical_flip = vertical_flip
def __call__(self, img):
return F.ten_crop(img, self.size, self.vertical_flip)
def __repr__(self):
return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)
class LinearTransformation(object):
"""Transform a tensor image with a square transformation matrix computed
offline.
Given transformation_matrix, will flatten the torch.*Tensor, compute the dot
product with the transformation matrix and reshape the tensor to its
original shape.
Applications:
- whitening: zero-center the data, compute the data covariance matrix
[D x D] with np.dot(X.T, X), perform SVD on this matrix and
pass it as transformation_matrix.
Args:
transformation_matrix (Tensor): tensor [D x D], D = C x H x W
"""
def __init__(self, transformation_matrix):
if transformation_matrix.size(0) != transformation_matrix.size(1):
raise ValueError("transformation_matrix should be square. Got " +
"[{} x {}] rectangular matrix.".format(*transformation_matrix.size()))
self.transformation_matrix = transformation_matrix
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be whitened.
Returns:
Tensor: Transformed image.
"""
if tensor.size(0) * tensor.size(1) * tensor.size(2) != self.transformation_matrix.size(0):
raise ValueError("tensor and transformation matrix have incompatible shape." +
"[{} x {} x {}] != ".format(*tensor.size()) +
"{}".format(self.transformation_matrix.size(0)))
flat_tensor = tensor.view(1, -1)
transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)
tensor = transformed_tensor.view(tensor.size())
return tensor
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += (str(self.transformation_matrix.numpy().tolist()) + ')')
return format_string
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
@staticmethod
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness > 0:
brightness_factor = random.uniform(max(0, 1 - brightness), 1 + brightness)
transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
if contrast > 0:
contrast_factor = random.uniform(max(0, 1 - contrast), 1 + contrast)
transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
if saturation > 0:
saturation_factor = random.uniform(max(0, 1 - saturation), 1 + saturation)
transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
if hue > 0:
hue_factor = random.uniform(-hue, hue)
transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))
random.shuffle(transforms)
transform = Compose(transforms)
return transform
def __call__(self, img):
"""
Args:
img (PIL Image): Input image.
Returns:
PIL Image: Color jittered image.
"""
transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
return transform(img)
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'brightness={0}'.format(self.brightness)
format_string += ', contrast={0}'.format(self.contrast)
format_string += ', saturation={0}'.format(self.saturation)
format_string += ', hue={0})'.format(self.hue)
return format_string
class RandomRotation(object):
"""Rotate the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
def __init__(self, degrees, resample=False, expand=False, center=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError("If degrees is a sequence, it must be of len 2.")
self.degrees = degrees
self.resample = resample
self.expand = expand
self.center = center
@staticmethod
def get_params(degrees):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
angle = random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, img):
"""
img (PIL Image): Image to be rotated.
Returns:
PIL Image: Rotated image.
"""
angle = self.get_params(self.degrees)
return F.rotate(img, angle, self.resample, self.expand, self.center)
def __repr__(self):
format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)
format_string += ', resample={0}'.format(self.resample)
format_string += ', expand={0}'.format(self.expand)
if self.center is not None:
format_string += ', center={0}'.format(self.center)
format_string += ')'
return format_string
class RandomAffine(object):
"""Random affine transformation of the image keeping center invariant
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees). Set to 0 to desactivate rotations.
translate (tuple, optional): tuple of maximum absolute fraction for horizontal
and vertical translations. For example translate=(a, b), then horizontal shift
is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is
randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.
scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is
randomly sampled from the range a <= scale <= b. Will keep original scale by default.
shear (sequence or float or int, optional): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees). Will not apply shear by default
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
fillcolor (int): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0)
"""
def __init__(self, degrees, translate=None, scale=None, shear=None, resample=False, fillcolor=0):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
assert isinstance(degrees, (tuple, list)) and len(degrees) == 2, \
"degrees should be a list or tuple and it must be of length 2."
self.degrees = degrees
if translate is not None:
assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
"translate should be a list or tuple and it must be of length 2."
for t in translate:
if not (0.0 <= t <= 1.0):
raise ValueError("translation values should be between 0 and 1")
self.translate = translate
if scale is not None:
assert isinstance(scale, (tuple, list)) and len(scale) == 2, \
"scale should be a list or tuple and it must be of length 2."
for s in scale:
if s <= 0:
raise ValueError("scale values should be positive")
self.scale = scale
if shear is not None:
if isinstance(shear, numbers.Number):
if shear < 0:
raise ValueError("If shear is a single number, it must be positive.")
self.shear = (-shear, shear)
else:
assert isinstance(shear, (tuple, list)) and len(shear) == 2, \
"shear should be a list or tuple and it must be of length 2."
self.shear = shear
else:
self.shear = shear
self.resample = resample
self.fillcolor = fillcolor
@staticmethod
def get_params(degrees, translate, scale_ranges, shears, img_size):
"""Get parameters for affine transformation
Returns:
sequence: params to be passed to the affine transformation
"""
angle = random.uniform(degrees[0], degrees[1])
if translate is not None:
max_dx = translate[0] * img_size[0]
max_dy = translate[1] * img_size[1]
translations = (np.round(random.uniform(-max_dx, max_dx)),
np.round(random.uniform(-max_dy, max_dy)))
else:
translations = (0, 0)
if scale_ranges is not None:
scale = random.uniform(scale_ranges[0], scale_ranges[1])
else:
scale = 1.0
if shears is not None:
shear = random.uniform(shears[0], shears[1])
else:
shear = 0.0
return angle, translations, scale, shear
def __call__(self, img):
"""
img (PIL Image): Image to be transformed.
Returns:
PIL Image: Affine transformed image.
"""
ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img.size)
return F.affine(img, *ret, resample=self.resample, fillcolor=self.fillcolor)
def __repr__(self):
s = '{name}(degrees={degrees}'
if self.translate is not None:
s += ', translate={translate}'
if self.scale is not None:
s += ', scale={scale}'
if self.shear is not None:
s += ', shear={shear}'
if self.resample > 0:
s += ', resample={resample}'
if self.fillcolor != 0:
s += ', fillcolor={fillcolor}'
s += ')'
d = dict(self.__dict__)
d['resample'] = _pil_interpolation_to_str[d['resample']]
return s.format(name=self.__class__.__name__, **d)
class Grayscale(object):
"""Convert image to grayscale.
Args:
num_output_channels (int): (1 or 3) number of channels desired for output image
Returns:
PIL Image: Grayscale version of the input.
- If num_output_channels == 1 : returned image is single channel
- If num_output_channels == 3 : returned image is 3 channel with r == g == b
"""
def __init__(self, num_output_channels=1):
self.num_output_channels = num_output_channels
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be converted to grayscale.
Returns:
PIL Image: Randomly grayscaled image.
"""
return F.to_grayscale(img, num_output_channels=self.num_output_channels)
def __repr__(self):
return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)
class RandomGrayscale(object):
"""Randomly convert image to grayscale with a probability of p (default 0.1).
Args:
p (float): probability that image should be converted to grayscale.
Returns:
PIL Image: Grayscale version of the input image with probability p and unchanged
with probability (1-p).
- If input image is 1 channel: grayscale version is 1 channel
- If input image is 3 channel: grayscale version is 3 channel with r == g == b
"""
def __init__(self, p=0.1):
self.p = p
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be converted to grayscale.
Returns:
PIL Image: Randomly grayscaled image.
"""
num_output_channels = 1 if img.mode == 'L' else 3
if random.random() < self.p:
return F.to_grayscale(img, num_output_channels=num_output_channels)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={0})'.format(self.p)
|
[
"364370584@qq.com"
] |
364370584@qq.com
|
7446a9fb4f50657d914b2afecd3299cb8a38db4c
|
75c7004744315a22afdad8a68f20c06b8d3efad0
|
/网络设备脚本/迈普.py
|
c85643c747712831bd82f54c35bbdb8ad2b5b26a
|
[
"MIT"
] |
permissive
|
cflw/network_device_script
|
b13cde8719f23402cdd6acd3ca9048a7d65952aa
|
c3644e933a3c557c44951a0a1994a49357e49c02
|
refs/heads/master
| 2023-08-03T11:00:29.188101
| 2023-07-29T13:58:09
| 2023-07-29T13:58:09
| 182,526,402
| 18
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
import enum
#===============================================================================
# 工厂
#===============================================================================
class E型号(enum.IntEnum):
mps4100 = 4100
mps4120 = 4120
def f创建设备(a连接, a型号, a版本 = 0):
from .迈普命令行 import 设备
return 设备.C设备(a连接, a型号, a版本)
|
[
"cflw@outlook.com"
] |
cflw@outlook.com
|
2544770823fc434065202998a3455b76cd21d13f
|
dee1311673e28c8e3915cb2397b596768d96054c
|
/test8_unittest框架.py
|
0419240436f5ac751a4d90c27bc51e459a39d187
|
[] |
no_license
|
HaMr123/ld-project
|
f668a74e4a61b6e20fdc988715e58772a444358a
|
4d9dc5db4894fdf1fb72fc41baec60e8a872093e
|
refs/heads/main
| 2023-08-03T23:51:33.037442
| 2021-09-15T09:34:16
| 2021-09-15T09:34:16
| 406,687,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,754
|
py
|
"""
响应对象.json()跟响应对象.text的区别
"""
import requests
import json
import unittest
class login(unittest.TestCase):
def setUp(self):
self.url = "http://test.hyunst.com/pos/login_cashierLogin.do"
def test_login_1(self):
body = {'Api': 'doLogin',
'Params': '{"username":"dd","password":"E10ADC3949BA59ABBE56E057F20F883E"}',
'ClientId': 100123,
'Timestamp': 1615971042933
}
r = requests.post(self.url, data=body)
r_json = r.json()
print("r_json1:", r_json)
try:
self.assertEqual("成功", r_json['Message'])
except AssertionError as e:
print(e)
def test_login_2(self):
body = {'Api': 'doLogin',
'Params': '{"username":"dd","password":"E10ADC3949BA59ABBE56E057F20F88E"}',
'ClientId': 100123,
'Timestamp': 1615971042933
}
r = requests.post(self.url, data=body)
r_json = r.json()
print("r_json2:", r_json)
try:
self.assertEqual("密码", r_json['Message'])
except AssertionError as e:
print(e)
def test_login_3(self):
body = {'Api': 'doLogin',
'Params': '{"username":"d2222","password":"E10ADC3949BA59ABBE56E057F20F883E"}',
'ClientId': 100123,
'Timestamp': 1615971042933
}
r = requests.post(self.url, data=body)
r_json = r.json()
print("r_json3:", r_json)
try:
self.assertEqual("用户名", r_json['Message'])
except AssertionError as e:
print(e)
if __name__ == '__main__':
unittest.main()
|
[
"339659207@qq.com"
] |
339659207@qq.com
|
324dad53ef6840f7c4b9d4df1900034cb6eb9754
|
22ba91d7ca8eef5dad780050ac19566bcd8f119f
|
/gif.py
|
036ccb2b721bbf286b4fe65b7d995b081710364c
|
[
"MIT"
] |
permissive
|
LitianD/ObjDetect
|
09d54b613f2d3148dfe12b447aa9230e18cbe19d
|
849f63467ce9e25c8ba0c24ca7bfdea9d836b0dd
|
refs/heads/master
| 2023-04-05T13:46:14.474688
| 2019-07-30T09:39:07
| 2019-07-30T09:39:07
| 199,144,849
| 2
| 1
|
MIT
| 2023-03-24T22:43:28
| 2019-07-27T09:34:51
|
Python
|
UTF-8
|
Python
| false
| false
| 860
|
py
|
from PIL import Image
import image2gif
import numpy as np
import os
outfilename = "D:\PyCharmProject\objDetect\keras-yolo3\gif\\1\\1.gif" # 转化的GIF图片名称
l = os.listdir("D:\PyCharmProject\objDetect\keras-yolo3\gif\\1")
frames = []
for image_name in l: # 索引各自目录
im = Image.open("D:\PyCharmProject\objDetect\keras-yolo3\gif\\1\\"+image_name) # 将图片打开,本文图片读取的结果是RGBA格式,如果直接读取的RGB则不需要下面那一步
im = im.convert("RGB") # 通过convert将RGBA格式转化为RGB格式,以便后续处理
im = np.array(im) # im还不是数组格式,通过此方法将im转化为数组
frames.append(im) # 批量化
image2gif.writeGif(outfilename, frames, duration=0.1, subRectangles=False)
|
[
"948167236@qq.com"
] |
948167236@qq.com
|
0532c39bac5f628a9a0e8ef709228777632ad528
|
e32bb97b6b18dfd48760ed28553a564055878d48
|
/source_py2/python_toolbox/binary_search/functions.py
|
eae623883c50c77fcbfcf19f3f61265a31a197ab
|
[
"MIT"
] |
permissive
|
rfdiazpr/python_toolbox
|
26cb37dd42342c478931699b00d9061aedcd924a
|
430dd842ed48bccdb3a3166e91f76bd2aae75a88
|
refs/heads/master
| 2020-12-31T04:15:53.977935
| 2014-04-30T23:54:58
| 2014-04-30T23:54:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,185
|
py
|
# Copyright 2009-2014 Ram Rachum.
# This program is distributed under the MIT license.
'''Module for doing a binary search in a sequence.'''
# Todo: wrap all things in tuples?
#
# todo: add option to specify `cmp`.
#
# todo: i think `binary_search_by_index` should have the core logic, and the
# other one will use it. I think this will save many sequence accesses, and
# some sequences can be expensive.
#
# todo: ensure there are no `if variable` checks where we're thinking of None
# but the variable might be False
from .roundings import (Rounding, roundings, LOW, LOW_IF_BOTH,
LOW_OTHERWISE_HIGH, HIGH, HIGH_IF_BOTH,
HIGH_OTHERWISE_LOW, EXACT, CLOSEST, CLOSEST_IF_BOTH,
BOTH)
def binary_search_by_index(sequence, function, value, rounding=CLOSEST):
'''
Do a binary search, returning answer as index number.
Similiar to binary_search (refer to its documentation for more info). The
difference is that instead of returning a result in terms of sequence
items, it returns the indexes of these items in the sequence.
For documentation of rounding options, check `binary_search.roundings`.
'''
if function is None:
function = lambda x: x
my_range = xrange(len(sequence))
fixed_function = lambda index: function(sequence[index])
result = binary_search(my_range, fixed_function, value, rounding)
return result
def _binary_search_both(sequence, function, value):
'''
Do a binary search through a sequence with the `BOTH` rounding.
It is assumed that `function` is a strictly monotonic rising function on
`sequence`.
Note: This function uses `None` to express its inability to find any
matches; therefore, you better not use it on sequences in which `None` is a
possible item.
'''
# todo: i think this should be changed to return tuples
### Preparing: ############################################################
# #
if function is None:
function = lambda x: x
get = lambda number: function(sequence[number])
low = 0
high = len(sequence) - 1
# #
### Finished preparing. ###################################################
### Handling edge cases: ##################################################
# #
if not sequence:
return (None, None)
low_value, high_value = get(low), get(high)
if value in (low_value, high_value):
return tuple((value, value))
elif low_value > value:
return tuple((None, sequence[low]))
elif high_value < value:
return (sequence[high], None)
# #
### Finished handling edge cases. #########################################
# Now we know the value is somewhere inside the sequence.
assert low_value < value < high_value
while high - low > 1:
medium = (low + high) // 2
medium_value = get(medium)
if medium_value > value:
high = medium; high_value = medium_value
continue
if medium_value < value:
low = medium; low_value = medium_value
continue
if medium_value == value:
return (sequence[medium], sequence[medium])
return (sequence[low], sequence[high])
def binary_search(sequence, function, value, rounding=CLOSEST):
'''
Do a binary search through a sequence.
It is assumed that `function` is a strictly monotonic rising function on
`sequence`.
For all rounding options, a return value of None is returned if no matching
item is found. (In the case of `rounding=BOTH`, either of the items in the
tuple may be `None`)
Note: This function uses `None` to express its inability to find any
matches; therefore, you better not use it on sequences in which None is a
possible item.
For documentation of rounding options, check `binary_search.roundings`.
'''
from .binary_search_profile import BinarySearchProfile
binary_search_profile = BinarySearchProfile(sequence, function, value)
return binary_search_profile.results[rounding]
def make_both_data_into_preferred_rounding(both, function, value, rounding):
'''
Convert results gotten using `BOTH` to a different rounding option.
This function takes the return value from `binary_search` (or other such
functions) with `rounding=BOTH` as the parameter `both`. It then gives the
data with a different rounding, specified with the parameter `rounding`.
'''
# todo optimize and organize: break to individual functions, put in
# `BinarySearchProfile`
if rounding is BOTH:
return both
elif rounding is LOW:
return both[0]
elif rounding is LOW_IF_BOTH:
return both[0] if both[1] is not None else None
elif rounding is LOW_OTHERWISE_HIGH:
return both[0] if both[0] is not None else both[1]
elif rounding is HIGH:
return both[1]
elif rounding is HIGH_IF_BOTH:
return both[1] if both[0] is not None else None
elif rounding is HIGH_OTHERWISE_LOW:
return both[1] if both[1] is not None else both[0]
elif rounding is EXACT:
results = [item for item in both if
(item is not None and function(item) == value)]
return results[0] if results else None
elif rounding in (CLOSEST, CLOSEST_IF_BOTH):
if rounding is CLOSEST_IF_BOTH:
if None in both:
return None
if both[0] is None: return both[1]
if both[1] is None: return both[0]
distances = [abs(function(item)-value) for item in both]
if distances[0] <= distances[1]:
return both[0]
else:
return both[1]
|
[
"ram@rachum.com"
] |
ram@rachum.com
|
610a7317daea57a20502f20c9fb553eabdf68c7b
|
fcb68d461731bb27ec822146cc50197bac98acd5
|
/pypilot/pilots/intellect.py
|
4dee08a4479dee46e96b5c21a64119ff101cf233
|
[] |
no_license
|
guliaka/pypilot
|
c552995dfb6f4b26418653c91c8fd1d59d9f547f
|
9727f82ae1abe737e702b6ea08b072ab5c5fb42c
|
refs/heads/master
| 2021-01-04T13:57:29.860140
| 2020-02-11T02:55:55
| 2020-02-11T02:55:55
| 240,585,392
| 0
| 0
| null | 2020-02-14T19:38:46
| 2020-02-14T19:38:46
| null |
UTF-8
|
Python
| false
| false
| 7,368
|
py
|
#!/usr/bin/env python
#
# Copyright (C) 2019 Sean D'Epagnier
#
# This Program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
import os, sys, time
import tensorflow as tf
from signalk.client import SignalKClient
class History(object):
def __init__(self, conf):
self.conf = conf
self.data = []
def samples(self):
return (self.conf['past']+self.conf['future'])*self.conf['rate']
def put(self, data):
self.data = (self.data+[data])[:self.samples()]
def inputs(history, names):
def select(values, names):
data = []
for name in values:
if not name in names:
continue
value = values[name]
if type(value) == type([]):
data += value
else:
data.append(value)
return data
def flatten(values):
data = []
for value in values:
data += flatten(value)
return data
return flatten(map(lambda input : select(input, names), history))
class Intellect(object):
def __init__(self):
self.train_x, self.train_y = [], []
self.inputs = {}
self.models = {}
self.conf = {'past': 10, # seconds of sensor data
'future': 3, # seconds to consider in the future
'sensors': ['imu.accel', 'imu.gyro', 'imu.heading', 'imu.headingrate', 'servo.current', 'servo.command'],
'actions': ['servo.command'],
'predictions': ['imu.heading', 'imu.headingrate']}
self.state = {'ap.enabled': False,
'ap.mode': 'none',
'imu.rate': 1}
self.history = History(self.conf)
self.last_timestamp = {}
for name in self.conf['sensors']:
self.sensor_timestamps[name] = 0
def load(self, mode):
model = build(self.conf)
try:
self.model.load_weights('~/.pypilot/intellect')
except:
return model
def train(self):
if len(self.history.data) != self.history.samples:
return # not enough data in history yet
present = rate*past
# inputs are the sensors over past time
sensors_data = inputs(self.history.data[:present], sensors)
# and the actions in the future
actions_data = inputs(self.history.data[present:], actions)
# predictions in the future
predictions_data = inputs(self.history.data[present:], predictions)
conf = {'sensors': sensor, 'actions': actions, 'rate': rate, 'mode': self.mode,
'predictions': predictions, 'past': past, 'future': future}
if not self.model or self.model.conf == conf:
self.model = self.build(conf)
self.train_x, self.train_y = [], []
self.train_x.append(sensors_data + actions_data)
self.train_y.append(predictions_data)
pool_size = 100 # how much data to accumulate before training
if len(self.train_x) >= pool_size:
self.model.fit(train_x, train_y, epochs=4)
self.train_x, self.train_y = [], []
def build(self, conf):
input_size = conf['rate']*(conf['past']*len(conf['sensors']) + conf['future']*len(conf['actions']))
output_size = conf['rate']*conf['future']*len(conf['predictions'])
input = tf.keras.layers.Input(shape=(input_size,), name='input_layer')
hidden = tf.keras.layers.Dense(16*output_size, activation='relu')(input)
output = tf.keras.layers.Dense(output_size, activation='tanh')(hidden)
self.model = tf.keras.Model(inputs=input, outputs=output)
self.model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
self.model.conf = conf
def save(self, filename):
converter = tf.lite.TFLiteConverter.from_keras_model(self.model)
tflite_model = converter.convert()
try:
import json
f = open(filename, 'w')
conf['model_filename'] = filename + '.tflite_model'
f.write(json.dumps(conf))
f.close()
f = open(conf['model_filename'], 'w')
f.write(tflite_model)
f.close()
except Exception as e:
print('failed to save', f)
def receive_single(self, name, msg):
value = msg['value']
if name in self.state:
self.state[name] = value
return
if name in self.conf['sensors'] and (1 or self.state['ap.enabled']):
timestamp = msg['timestamp']
dt = timestamp - self.sensor_timestamps[name]
dte = abs(dt - 1.0/float(self.state['imu.rate']))
if dte > .05:
self.history.data = []
self.inputs = {}
return
if name in self.inputs:
print('input already for', name, self.inputs[name], name, timestamp)
self.inputs[name] = value
# see if we have all sensor values, and if so store in the history
if all(map(lambda sensor : sensor in inputs, sensors)):
s = ''
for name in inputs:
s += name + ' ' + inputs[name]
print('input', time.time(), s)
self.history.put(inputs)
self.train()
self.inputs = {}
def receive(self):
msg = self.client.receive_single(1)
while msg:
name, value = msg
self.receive_single(name, value)
msg = self.client.receive_single(-1)
def run_replay(self, filename):
try:
f = open(filename)
print('opened replay file', filename)
while True:
line = f.readline()
if not line:
f.close()
return True
intellect.receive(json.loads(line))
except Exception as e:
return False
def run(self):
host = 'localhost'
if len(sys.argv) > 1:
if self.run_replay(sys.argv[1]):
return
host = sys.argv[1]
# couldn't load try to connect
watches = self.conf['sensors'] + list(self.state)
def on_con(client):
for name in watches:
client.watch(name)
t0 = time.time()
self.client = False
while True:
#try:
if 1:
if not self.client:
print('connecting to', host)
self.client = SignalKClient(on_con, host, autoreconnect=False)
self.receive()
#except Exception as e:
# print('error', e)
# self.client = False
# time.sleep(1)
if time.time() - t0 > 600:
filename = os.getenv('HOME')+'/.pypilot/intellect_'+self.conf['mode']+'.conf'
self.save(filename)
# find cpu usage of training process
#cpu = ps.cpu_percent()
#if cpu > 50:
# print('learning cpu very high', cpu)
def main():
intellect = Intellect()
intellect.run()
if __name__ == '__main__':
main()
|
[
"seandepagnier@gmail.com"
] |
seandepagnier@gmail.com
|
f6251999cd5bce0014cea6dacf6385211fed0816
|
6a82724df32c6c787b9e9ae2fe49807384ae525e
|
/th_evernote/evernote_mgr.py
|
c9fff4618172b847b536052dc0f41a6c4ecd2ecb
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
GuaiLong618/django-th
|
c9940685792a6fe480d6921ee43464f14b43a866
|
07086e4c5691bf76b7dae7e11b25e5482c5de71e
|
refs/heads/master
| 2022-09-10T17:34:46.654594
| 2016-08-01T07:46:32
| 2016-08-01T07:46:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,639
|
py
|
# coding: utf-8
import evernote.edam.type.ttypes as Types
from evernote.edam.error.ttypes import EDAMSystemException, EDAMUserException
from evernote.edam.error.ttypes import EDAMErrorCode
from django.utils.translation import ugettext as _
from django.utils.log import getLogger
from django.core.cache import caches
logger = getLogger('django_th.trigger_happy')
cache = caches['th_evernote']
def get_notebook(note_store, my_notebook):
"""
get the notebook from its name
"""
notebook_id = 0
notebooks = note_store.listNotebooks()
# get the notebookGUID ...
for notebook in notebooks:
if notebook.name.lower() == my_notebook.lower():
notebook_id = notebook.guid
break
return notebook_id
def set_notebook(note_store, my_notebook, notebook_id):
"""
create a notebook
"""
if notebook_id == 0:
new_notebook = Types.Notebook()
new_notebook.name = my_notebook
new_notebook.defaultNotebook = False
notebook_id = note_store.createNotebook(new_notebook).guid
return notebook_id
def get_tag(note_store, my_tags):
"""
get the tags from his Evernote account
:param note_store Evernote Instance
:param my_tags string
:return: array of the tag to create
"""
tag_id = []
listtags = note_store.listTags()
# cut the string by piece of tag with comma
if ',' in my_tags:
for my_tag in my_tags.split(','):
for tag in listtags:
# remove space before and after
# thus we keep "foo bar"
# but not " foo bar" nor "foo bar "
if tag.name.lower() == my_tag.lower().lstrip().rstrip():
tag_id.append(tag.guid)
break
else:
for tag in listtags:
if tag.name.lower() == my_tags.lower():
tag_id.append(tag.guid)
break
return tag_id
def set_tag(note_store, my_tags, tag_id):
"""
create a tag if not exists
:param note_store evernote instance
:param my_tags string
:param tag_id id of the tag(s) to create
:return: array of the tag to create
"""
new_tag = Types.Tag()
if ',' in my_tags:
for my_tag in my_tags.split(','):
new_tag.name = my_tag
tag_id.append(create_tag(note_store, new_tag))
elif my_tags:
new_tag.name = my_tags
tag_id.append(create_tag(note_store, new_tag))
return tag_id
def create_note(note_store, note, trigger_id, data):
"""
create a note
:param note_store Evernote instance
:param note
:param trigger_id id of the trigger
:param data to save or to put in cache
:type note_store: Evernote Instance
:type note: Note instance
:type trigger_id: int
:type data: dict
:return boolean
:rtype boolean
"""
# create the note !
try:
created_note = note_store.createNote(note)
sentence = str('note %s created') % created_note.guid
logger.debug(sentence)
return True
except EDAMSystemException as e:
if e.errorCode == EDAMErrorCode.RATE_LIMIT_REACHED:
sentence = "Rate limit reached {code} " \
"Retry your request in {msg} seconds"
logger.warn(sentence.format(
code=e.errorCode,
msg=e.rateLimitDuration))
# put again in cache the data that could not be
# published in Evernote yet
cache.set('th_evernote_' + str(trigger_id), data, version=2)
return True
else:
logger.critical(e)
return False
except EDAMUserException as e:
if e.errorCode == EDAMErrorCode.ENML_VALIDATION:
sentence = "Data ignored due to validation" \
" error : err {code} {msg}"
logger.warn(sentence.format(
code=e.errorCode,
msg=e.parameter))
return True
except Exception as e:
logger.critical(e)
return False
def create_tag(note_store, new_tag):
"""
:param note_store Evernote instance
:param new_tag: create this new tag
:return: new tag id
"""
try:
return note_store.createTag(new_tag).guid
except EDAMUserException as e:
if e.errorCode == EDAMErrorCode.DATA_CONFLICT:
logger.info("Evernote Data Conflict Err {0}".format(e))
elif e.errorCode == EDAMErrorCode.BAD_DATA_FORMAT:
logger.critical("Evernote Err {0}".format(e))
def set_header():
"""
preparing the hearder of Evernote
"""
return '<?xml version="1.0" encoding="UTF-8"?>' \
'<!DOCTYPE en-note SYSTEM ' \
'"http://xml.evernote.com/pub/enml2.dtd">\n'
def set_note_attribute(data):
"""
add the link of the 'source' in the note
"""
na = False
if data.get('link'):
na = Types.NoteAttributes()
# add the url
na.sourceURL = data.get('link')
# add the object to the note
return na
def set_note_footer(data, trigger):
"""
handle the footer of the note
"""
footer = ''
if data.get('link'):
provided_by = _('Provided by')
provided_from = _('from')
footer_from = "<br/><br/>{} <em>{}</em> {} <a href='{}'>{}</a>"
footer = footer_from.format(
provided_by, trigger.trigger.description, provided_from,
data.get('link'), data.get('link'))
return footer
|
[
"foxmaskhome@gmail.com"
] |
foxmaskhome@gmail.com
|
4d81d7fac5507be465f3a10892b7dd2151c353af
|
102232cb70610cb3654f2c83c14cb5e4460bfaa5
|
/spec_files/8.2/federated.conf.spec
|
6afe765bf820677b64b8bd6a3478dbdbfc7f083d
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
splunk/vscode-extension-splunk
|
850335e5074abf43bbc1d5804f120aa46caa4c49
|
e017b79a5312f219ba728462217173604fce5ca6
|
refs/heads/master
| 2023-08-03T10:18:35.587436
| 2023-07-24T01:34:10
| 2023-07-24T01:34:10
| 220,573,274
| 80
| 26
|
MIT
| 2023-09-14T20:51:14
| 2019-11-09T01:17:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,299
|
spec
|
# Version 8.2.1
#
# This file contains possible setting and value pairs for federated provider entries
# for use in Data Fabric Search (DFS), when the federated search functionality is
# enabled.
#
# A federated search allows authorized users to run searches across multiple federated
# providers. Only Splunk deployments are supported as federated providers. Information
# on the Splunk deployment (i.e. the federated provider) is added in the federated
# provider stanza of the federated.conf file. A federated search deployment can have
# multiple federated search datasets. The settings for federated search dataset stanzas
# are located in savedsearches.conf.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
#
# Here are the settings for the federated provider stanzas.
[<federated-provider-stanza>]
* Create a unique stanza name for each federated provider.
type = [splunk]
* Specifies the type of the federated provider.
* Only Splunk deployments are supported as of this revision.
* Default: splunk
ip = <IP address or Host Name>
* Identifies the IP address or host name of the federated provider.
* Default: No default.
splunk.port = <port>
* Identifies the splunkd REST port on the remote Splunk deployment.
* No default.
splunk.serviceAccount = <user>
* Identifies an authorized user on the remote Splunk deployment.
* The security credentials associated with this account are managed securely in
fshpasswords.conf.
* No default.
splunk.app = <string>
* The name of the Splunk application on the remote Splunk deployment in which
* to perform the search.
* No default.
#
# Federated Provider Stanza
#
[provider]
* Each federated provider definition must have a separate stanza.
* <provider> must follow the following syntax:
provider://<unique-federated-provider-name>
* <unique-federated-provider-name> can contain only alphanumeric characters and
underscores.
type = [splunk]
* Specifies the type of the federated provider.
* Only Splunk deployments are supported as of this version.
* Default: splunk
hostPort = <Host_Name_or_IP_Address>:<service_port>
* Specifies the protocols required to connect to a federated provider.
* You can provide a host name or an IP address.
* The <service_port> can be any legitimate port number.
* No default.
serviceAccount = <user_name>
* Specifies the user name for a service account that has been set up on the
federated provider for the purpose of enabling secure federated search.
* This service account allows the federated search head on your local Splunk
platform deployment to query datasets on the federated provider in a secure
manner.
* No default.
password = <password>
* Specifies the service account password for the user specified in the
'serviceAccount' setting.
* No default.
appContext = <application_short_name>
* Specifies the Splunk application context for the federated searches that will
be run with this federated provider definition.
* Provision of an application context ensures that federated searches which use
the federated provider are limited to the knowledge objects that are
associated with the named application. Application context can also affect
search job quota and resource allocation parameters.
* NOTE: This setting applies only when `useFSHKnowledgeObjects = false`.
* <application_short_name> must be the "short name" of a Splunk application
currently installed on the federated provider. For example, the short name of
Splunk IT Service Intelligence is 'itsi'.
* You can create multiple federated provider definitions for the same remote
search head that differ only by app context.
* Find the short names of apps installed on a Splunk deployment by going to
'Apps > Manage Apps' and reviewing the values in the 'Folder name' column.
* Default: search
useFSHKnowledgeObjects = <boolean>
* Determines whether federated searches with this provider use knowledge
objects from the federated provider (the remote search head) or from the
federated search head (the local search head).
* When set to 'true' federated searches with this provider use knowledge
objects from the federated search head.
* Default: true
|
[
"jason@jasonconger.com"
] |
jason@jasonconger.com
|
a1fc804457d63cf68b8a48fe34eaed92704fa56a
|
42211b7e5878f2b0b8aeb1bbee5a7185ed3580f6
|
/src/utils.py
|
4e1cad1f0e69c8d7c1e78939904b84cafab51ac8
|
[] |
no_license
|
oopzzozzo/eye-pasting
|
3ba3b314ec043a9468d12cb46025bffc998b70be
|
8a0f72bb51869f317f4dff7d88db53adf2b0133e
|
refs/heads/master
| 2020-05-27T00:37:53.953036
| 2019-06-18T09:18:07
| 2019-06-18T09:18:07
| 188,424,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,300
|
py
|
import cv2 as cv
import dlib
import numpy as np
from imutils import face_utils
# face_utils
(l_s_idx, l_e_idx) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(r_s_idx, r_e_idx) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
get_eyes = lambda marks: (marks[l_s_idx:l_e_idx], marks[r_s_idx:r_e_idx])
dp_idx = 33
# helper fuctions to cast cv2 among dlib
rect2pts = lambda rect: ((rect.left(), rect.top()), (rect.right(), rect.bottom()))
shape2pts = lambda shape: list(map(lambda i: (shape.part(i).x, shape.part(i).y), range(68)))
tp2pts = lambda tp: ((tp[0], tp[1]), (tp[0]+tp[2], tp[1]+tp[3]))
pts2rect = lambda pts: dlib.rectangle(*pts[0], *pts[1])
# pts functions
pix = ((0,0), (0,0))
safe = lambda rect, shape: tuple([tuple([min(max(x, 0), b-1) for x, b in zip(p, shape[1::-1])]) for p in rect])
pts_area = lambda ps: (ps[1][0] - ps[0][0]) * (ps[1][1] - ps[0][1])
leftup = lambda pt2: all([(x1 < x2) for x1, x2 in zip(*pt2)])
crop = lambda img, pts: img[pts[0][1]:pts[1][1], pts[0][0]:pts[1][0]]
def preserve(img, pts):
ret = np.zeros(img.shape, np.uint8)
pts = safe(pts, img.shape)
ret[pts[0][1]:pts[1][1], pts[0][0]:pts[1][0]] = img[pts[0][1]:pts[1][1], pts[0][0]:pts[1][0]]
return ret
# colors
blue = (255, 0, 0)
green = (0, 255, 0)
red = (0, 0, 255)
white = (255, 255, 255)
|
[
"ek3ru8m4@gmail.com"
] |
ek3ru8m4@gmail.com
|
98fb05633126284f0264cbf415267d9c26c83b37
|
0030495752530ac832f6a298c2faaa1e75d27ae6
|
/src/template_matching/__init__.py
|
3e943beecb8999cc74755f71e361a89a4e14f287
|
[
"Apache-2.0"
] |
permissive
|
AnttiHaerkoenen/henkikirjat
|
4697d01ada88b332d71d7122b5258210f07fc0a1
|
0c4dcfdc753b3c1af9e2d17917d95d2e783cd234
|
refs/heads/master
| 2021-07-07T03:45:16.408150
| 2020-08-27T09:06:34
| 2020-08-27T09:06:34
| 159,818,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
from enum import Enum
import cv2
from .img_tools import *
from .rectangle import *
from .digits import *
from .matching import *
from .grid_to_xml import page_grid_to_xml
|
[
"antth@hotmail.fi"
] |
antth@hotmail.fi
|
c15ab19b59d0a45c31c4fd4763fbb8d84e0f58bc
|
80ff29422e987f17cbb06211907c555d713c881f
|
/OZON/ML/HOMEWORKS/week1_find_word_in_circle.py
|
66e4f55e0f89b5df75a14c997225b7fef769fa95
|
[] |
no_license
|
be-y-a/smth
|
8780f362305ddb4d25834b921459fb4c9115ab5f
|
2ee632b629f68aca49642322b28c8487ce24fee9
|
refs/heads/master
| 2023-01-10T16:08:25.030319
| 2020-11-02T21:48:10
| 2020-11-09T10:09:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,147
|
py
|
def check(input, index, pattern):
patternIndex = 0
inputIndex = index
result = -1
isRightPossible = True
isLeftPossible = True
while patternIndex < len(pattern):
if inputIndex == len(input):
inputIndex = 0
if input[inputIndex] == pattern[patternIndex]:
patternIndex += 1
inputIndex += 1
else:
isRightPossible = False
break
if isRightPossible:
return (index, 1)
patternIndex = 0
inputIndex = index
while patternIndex < len(pattern):
if inputIndex == -1:
inputIndex = len(input) - 1
if input[inputIndex] == pattern[patternIndex]:
patternIndex += 1
inputIndex -= 1
else:
isLeftPossible = False
break
if isLeftPossible:
return (index, -1)
return -1
def find_word_in_circle(input, pattern):
if pattern == "" or input == pattern:
return (0, 1)
patternIndex = 0
for i in range(len(input)):
result = check(input, i, pattern)
if result != -1:
return result
return -1
|
[
"mvvm@yandex-team.ru"
] |
mvvm@yandex-team.ru
|
f4e82e0fb6fcf19025bb2572c0fa042925daf77f
|
1c589c05222eff2cf10d8d9386f3ef6563a66920
|
/virtual/bin/gunicorn_paster
|
0cd53e6041ba582e0dcea5f319525295ca7a7318
|
[
"MIT"
] |
permissive
|
Okalll/gram-insta
|
c642e998aad925b7de93b7376b328d1125ec227d
|
c17095511aea62950938c1ad0528503b717780e3
|
refs/heads/master
| 2020-04-28T01:49:51.550835
| 2019-03-15T15:49:57
| 2019-03-15T15:49:57
| 174,874,070
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
#!/home/moringa/Desktop/gram-ins/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.pasterapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"vivieokall@gmail.com"
] |
vivieokall@gmail.com
|
|
ca65f667b697e219623f15dc98b33eeee73524ea
|
8dd8f391b1da81bc8d8dd170a44a8f0cd91f9529
|
/NotesLmao.py
|
dcc1c10fa29d777886580fd7f77c9c411861b953
|
[] |
no_license
|
DavidEsketit/CSE7
|
aa7eaf53da991d6c53bb80b84ccb2a9eff07dd37
|
e0a49863e0909f6fa117951ff95cd4b4aa3abf74
|
refs/heads/master
| 2021-09-14T23:10:36.833047
| 2018-05-21T22:59:08
| 2018-05-21T22:59:08
| 112,400,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,602
|
py
|
# print("Hello World!")
#
# # David Solis
#
# print(3 + 5)
# print(5 - 3)
# print(5 * 3)
# print(6 / 2)
# print(3 ** 2)
#
# print()
# print("See if you can figure this out")
# print(100 % 3)
#
# # Variables
# car_name = "Wiebe Mobile"
# car_type = "Lamborghini Sesto Elemento"
# car_cylinders = 8
# car_mpg = 9000.1
#
# # Inline Printing
# print("My car is the %s." % car_name)
# print("My car is the %s. It is a %s" % (car_name, car_type))
#
# # Taking input
# name = input("What is your name? ")
# print("Hello %s." % name)
# print(name)
#
# age = input("What is your age? ")
# print("%s is a nice age to be in." % age)
#
# # Initializing Variable
# answer = random.randint(1, 50)
# turns_left = 5
# correct_guess = False
#
# # Describes exactly ONE turn. The while loop is the Game Controller.
# while turns_left > 0 and correct_guess is False:
# guess = int(input("Guess a number between 1 and 50: "))
# if guess == answer:
# print()
# Lists
# the_count = [1, 2, 3, 4, 5]
# # characters = ["graves", "Dory", "Boots", "Dora", "Shrek", "Odi-Man", "Ian", "Carl"]
# print(characters[0])
# print(characters[4])
#
# print(len(characters))
#
# # Going through lists
# for char in characters:
# print(char)
#
# for num in the_count:
# print(num ** 2)
#
# len(characters)
# range(3) # Makes a list of the number from 0 to 2
# range(len(characters)) # Make a list of ALL INDICES
#
# for num in range(len(characters)):
# char = characters[num]
# print("The character at index %d is %s" % (num, char))
#
# str1 = "Hello World!" # Start counting from 0
# listOne = list(str1)
# print(listOne)
# listOne[11] = '.'
# print(listOne)
# newStr = "".join(listOne)
# print(newStr)
#
# # Adding stuff to list
#
# characters.append("Ironman/Batman/whomever you want")
# print(characters)
#
# characters.append("Black Panther")
#
# # Removing stuff from list
#
# characters.remove("Carl")
#
# characters.pop(6)
# print(characters)
# The string class
# import string
# print(string.ascii_letters)
# print(string.ascii_lowercase)
# print(string.digits)
# print(string.punctuation)
#
# strTwo = 'tHiS sEntEnCe iS uNuSuAl'
# lowercase = strTwo.lower()
# print(lowercase)
# uppercase = strTwo.upper()
# print(uppercase)
# # Hangman Board
# """
# Make a list for the word, letter by letter.
# Add the letters guessed by the user to another list
# """
# # Dictionaries - Make up a key: value pair
# dictionary = {'name': 'Lance', 'age': 18, "height": 6 * 12 + 2}
#
# # Accessing from a dictionary
# print(dictionary['name'])
# print(dictionary['age'])
# print(dictionary['height'])
#
# large_dictionary = {
# "California": "CA",
# "Washington": "WA",
# "Florida": "FL"
# }
# # Adding to a dictionary
# dictionary["eye color"] = "white"
# print(dictionary)
#
#
# print(large_dictionary['Florida'])
#
# larger_dictionary = {
# "California": [
# "Fresno",
# "Sacramento",
# "Los Angeles"
# ],
# "Washington": [
# "Seattle",
# "Tacoma",
# "Olympia",
# "Spokane"
# ],
# "Illinois": [
# "Chicago",
# "Naperville",
# "Peoria"
# ],
# }
#
# print(larger_dictionary["Illinois"])
# print(larger_dictionary["Illinois"][0])
#
# # Spokane
# print(larger_dictionary["Washington"][3])
#
# largest_dictionary = {
# "CA": {
# 'NAME': "California",
# 'Population': 39250000,
# 'BORDER ST': [
# 'Oregon',
# "Nevada",
# "Arizona"
# ],
# },
# "MI": {
# "NAME": "Michigan",
# "POPULATION": 9928000,
# "BORDER ST": [
# 'Wisconsin',
# 'Ohio',
# "Indiana"
# ]
# },
# "FL": {
# "NAME": "Florida",
# "POPULATION": 2610000,
# "BORDER ST": [
# 'Georgia',
# 'Alabama'
# ]
# }
# }
# print(largest_dictionary["MI"]["BORDER ST"][1])
# print(largest_dictionary["FL"]["NAME"])
# Defining Functions
def hello_world():
print("Hello World")
hello_world()
def square_it(number):
return number ** 2
print(square_it(3))
for i in range(15):
print(square_it(i))
def tip_calc(subtotal):
tip_amt = subtotal * 0.18 # tip_amt is a local variable
print("The tip amount is $%d" % tip_amt)
return tip_amt
def total_bills(subtotal):
total = subtotal + tip_calc(subtotal)
return total
print(total_bills(100))
def distance(x1, y1, x2, y2):
inside = (x2-x1) ** 2 + (y2 - y1) ** 2
answer = inside ** 0.5
return answer
print(distance(0, 0, 3, 4))
|
[
"rbpi@wetigers.org"
] |
rbpi@wetigers.org
|
f8c65e0ccdd678cb113f84e3c42f60d8fed79f3b
|
ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31
|
/pylodon-master/pylodon/utilities.py
|
a9209a940682b30eff324e639cfaaef17a0b6769
|
[
"MIT"
] |
permissive
|
babiato/flaskapp1
|
84de2d0b26a54f5820d3bbe97926782ad41e005c
|
530beb9e3b8516e0e93960b99521c23a523ef546
|
refs/heads/master
| 2023-02-26T16:36:49.760632
| 2021-02-04T09:08:40
| 2021-02-04T09:08:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,269
|
py
|
from pylodon import mongo
from config import DEFAULT_HEADERS, VALID_HEADERS, API_URI, ACCEPT_HEADERS, CONTENT_HEADERS
from activipy import core
from flask import abort
from httpsig import HeaderSigner, Signer
from werkzeug.http import http_date
import datetime
def get_time():
"""
for when you need to know what time it is
returns in isoformat because that's what masto uses
"""
return datetime.datetime.now().isoformat()
def as_asobj(obj):
return core.ASObj(obj)
# header checking
def check_accept_headers(r):
"""
checks headers against allowed headers as defined in config
"""
accept = r.headers.get('accept')
if accept and (accept in VALID_HEADERS):
return True
return False
def check_content_headers(r):
"""
checks headers against allowed headers as defined in config
"""
content_type = r.headers.get('Content-Type')
if content_type and (content_type in VALID_HEADERS):
return True
return False
def check_headers(request):
"""
checks whether the client has used the appropriate Accept or
Content-Type headers in their request. if not, aborts with an
appropriate HTTP error code
"""
method = request.method
if method == 'GET':
accept = request.headers.get('accept', None)
if accept and (accept in VALID_HEADERS):
pass
else:
abort(406) # Not Acceptable
elif method == 'POST':
content_type = request.headers.get('Content-Type', None)
if content_type and (content_type in VALID_HEADERS):
pass
else:
abort(415) # Unsupported Media Type
else:
abort(400) # Bad Request
# signatures
def sign_headers(u, headers):
"""
"""
key_id = u['publicKey']['@id']
secret = u['privateKey']
hs = HeaderSigner(key_id, secret, algorithm='rsa-sha256')
auth = hs.sign({"Date": http_date()})
# thanks to https://github.com/snarfed for the authorization -> signature headers hack
# this is necessary because httpsig.HeaderSigner returns an Authorization header instead of Signature
auth['Signature'] = auth.pop('authorization')
assert auth['Signature'].startswith('Signature ')
auth['Signature'] = auth['Signature'][len('Signature '):]
auth.update(headers)
auth.update(DEFAULT_HEADERS)
return auth
def sign_object(u, obj):
"""
creates pubkey signed version of the given object (e.g.
a Note)
"""
# key_id = u['publicKey']['@id']
secret = u['privateKey']
hs = Signer(secret=secret, algorithm="rsa-sha256")
auth_object = hs._sign(obj)
return auth_object
# add headers
def content_headers(u):
"""
"""
return sign_headers(u, CONTENT_HEADERS)
def accept_headers(u):
"""
"""
return sign_headers(u, ACCEPT_HEADERS)
# database queries
def find_user(handle):
"""
"""
u = mongo.db.users.find_one({'username': handle}, {'_id': False})
if not u:
return None
return u
def find_post(handle, post_id):
"""
"""
user_api_uri = API_URI+'/'+handle
id = user_api_uri+'/'+post_id
p = mongo.db.posts.find_one({'object.id': id}, {'_id': False})
if not p:
return None
return p
|
[
"jinxufang@tencent.com"
] |
jinxufang@tencent.com
|
f0fd0c01012661f5f768491e7853b3274c39005b
|
5f09a94da91ecae226dbac687b056bd68d209ebf
|
/project/code/.ipynb_checkpoints/jsonCode-checkpoint.py
|
1c635e5fde75a366a95301cdf18887db70f7bf8b
|
[] |
no_license
|
marshallgrimmett/data-mining-python
|
7a88617bbe5146a9dd2df669743ec1d3bcada0ca
|
b876aa55571f2209495b8e91d99a3447cd1568a0
|
refs/heads/main
| 2023-07-15T06:04:53.765065
| 2021-08-18T16:53:12
| 2021-08-18T16:53:12
| 333,232,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,769
|
py
|
import json
import pandas as pd
from ludwig.utils import data_utils
all_users = {}
all_problems = {}
all_concepts = {}
df = data_utils.read_csv("data.csv")
removing_duplicate_users = [i for n, i in enumerate(df['Anon Student Id'].tolist()) if i not in df['Anon Student Id'].tolist()[:n]]
count_user = 1
for i in removing_duplicate_users:
all_users[i] = count_user
count_user += 1
json_users = json.dumps(all_users, indent = 2)
#---------------------------------------------------------------------------------------
removing_duplicate_problems = [i for n, i in enumerate(df['Problem Name'].tolist()) if i not in df['Problem Name'].tolist()[:n]]
count_problem = 1
for i in removing_duplicate_problems:
all_problems[i] = count_problem
count_problem += 1
json_problems = json.dumps(all_problems, indent = 2)
#---------------------------------------------------------------------------------------
cleanedList1 = [x for x in df['KC (WPI-Apr-2005)'].tolist() if str(x) != 'nan']
cleanedList2 = [x for x in df['KC (WPI-Apr-2005).1'].tolist() if str(x) != 'nan']
cleanedList3 = [x for x in df['KC (skills_from_dataframe).2'].tolist() if str(x) != 'nan']
cleanedList4 = [x for x in df['KC (skills_from_dataframe).3'].tolist() if str(x) != 'nan']
cleanedList5 = [x for x in df['KC (skills_from_dataframe).4'].tolist() if str(x) != 'nan']
cleanedList6 = [x for x in df['KC (skills_from_dataframe).5'].tolist() if str(x) != 'nan']
cleanedList7 = [x for x in df['KC (MCAS39-State_WPI-Simple)'].tolist() if str(x) != 'nan']
cleanedList8 = [x for x in df['KC (MCAS39-State_WPI-Simple).1'].tolist() if str(x) != 'nan']
cleanedList9 = [x for x in df['KC (MCAS39-State_WPI-Simple).2'].tolist() if str(x) != 'nan']
cleanedList10 = [x for x in df['KC (MCAS39-State_WPI-Simple).3'].tolist() if str(x) != 'nan']
cleanedList11 = [x for x in df['KC (MCAS5-State_WPI-Simple)'].tolist() if str(x) != 'nan']
combining_lists = cleanedList1 + cleanedList2 + cleanedList3 + cleanedList4 + cleanedList5 + cleanedList6 + cleanedList7 + cleanedList8 + cleanedList9 + cleanedList10 + cleanedList11
removing_duplicate_concepts = [i for n, i in enumerate(combining_lists) if i not in combining_lists[:n]]
count_concept = 1
for i in removing_duplicate_concepts:
all_concepts[i] = count_concept
count_concept += 1
json_concepts = json.dumps(all_concepts, indent = 2)
dataset_statistics = {
'Dataset': ['Assistments Math 2005-2006'],
'Learners': [str(len(all_users))],
'Concepts': [str(len(all_concepts))],
'Questions': [str(len(all_problems))],
'Responses': [str(len(df['Outcome']))]
}
statistics_df = pd.DataFrame(dataset_statistics, columns = ['Dataset', 'Learners','Concepts','Questions','Responses'])
print(statistics_df)
|
[
"mgrimmett@albany.edu"
] |
mgrimmett@albany.edu
|
ef45c3d3056e28f1680f4e7db692d69a59f4b954
|
3c7828b2bc6ca8ef95340506f395f258c42f18e3
|
/venv/bin/easy_install
|
13f443f6b229b2a2645c04da5dfee6f165db27f7
|
[] |
no_license
|
kian98/Masege
|
8be5187951cb6bdd78cf2c7889a9c5edf47d47e6
|
3e6ec71cb4c4c86c6620be8668738190eb4f6329
|
refs/heads/master
| 2020-07-30T00:05:13.341534
| 2019-10-07T17:00:06
| 2019-10-07T17:00:06
| 210,010,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
#!/home/kian/develop/Python/Masege/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
|
[
"1207529462@qq.com"
] |
1207529462@qq.com
|
|
eed74d7bd10d46efb6c28d0958b44306583ee637
|
b4b199096518dcaa1e45f026310971c6302a2b96
|
/remixt/cn_plot.py
|
ab3831d82c1f231668b4f83ce1f19e5464fecfaa
|
[
"MIT"
] |
permissive
|
amcpherson/remixt
|
07353eb74d4427734d7868e53b8fc48a41f6c1fe
|
8f462fe3c5c5afcf4d2c13edb1162f5c3e853430
|
refs/heads/master
| 2023-06-25T08:21:12.479643
| 2022-07-26T21:49:28
| 2022-07-26T21:49:28
| 95,475,048
| 6
| 2
|
MIT
| 2023-02-10T23:13:51
| 2017-06-26T18:08:10
|
Python
|
UTF-8
|
Python
| false
| false
| 26,969
|
py
|
import sys
import os
import itertools
import pickle
import pandas as pd
import numpy as np
import scipy
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import colorConverter
from matplotlib.path import Path
import matplotlib.patches as patches
from matplotlib.patches import Rectangle
import matplotlib.backends.backend_pdf
import remixt.analysis.experiment
import remixt.analysis.readdepth
def filled_density_weighted(ax, data, weights, c, a, xmin, xmax, cov, rotate=False):
""" Weighted filled density plot.
"""
density = scipy.stats.gaussian_kde(data, bw_method=cov, weights=weights)
xs = [xmin] + list(np.linspace(xmin, xmax, 2000)) + [xmax]
ys = density(np.array(xs))
ys[0] = 0.0
ys[-1] = 0.0
if rotate:
ax.plot(ys, xs, color=c, alpha=a)
ax.fill_betweenx(xs, ys, color=c, alpha=a)
else:
ax.plot(xs, ys, color=c, alpha=a)
ax.fill(xs, ys, color=c, alpha=a)
def plot_cnv_segments(ax, cnv, major_col='major', minor_col='minor', do_fill=False,):
""" Plot raw major/minor copy number as line plots
Args:
ax (matplotlib.axes.Axes): plot axes
cnv (pandas.DataFrame): cnv table
major_col (str): name of major copies column
minor_col (str): name of minor copies column
do_fill (boolean): fill from 0
Plot major and minor copy number as line plots. The columns 'start' and 'end'
are expected and should be adjusted for full genome plots. Values from the
'major_col' and 'minor_col' columns are plotted.
"""
segment_color_major = plt.get_cmap('RdBu')(0.1)
segment_color_minor = plt.get_cmap('RdBu')(0.9)
quad_color_major = colorConverter.to_rgba(segment_color_major, alpha=0.5)
quad_color_minor = colorConverter.to_rgba(segment_color_minor, alpha=0.5)
cnv = cnv.sort_values('start')
def create_segments(df, field):
segments = np.array([[df['start'].values, df[field].values], [df['end'].values, df[field].values]])
segments = np.transpose(segments, (2, 0, 1))
return segments
def create_connectors(df, field):
prev = df.iloc[:-1].reset_index()
next = df.iloc[1:].reset_index()
mids = ((prev[field] + next[field]) / 2.0).values
prev_cnct = np.array([[prev['end'].values, prev[field].values], [prev['end'].values, mids]])
prev_cnct = np.transpose(prev_cnct, (2, 0, 1))
next_cnct = np.array([[next['start'].values, mids], [next['start'].values, next[field].values]])
next_cnct = np.transpose(next_cnct, (2, 0, 1))
return np.concatenate([prev_cnct, next_cnct])
def create_quads(df, field):
quads = np.array([
[df['start'].values, np.zeros(len(df.index))],
[df['start'].values, df[field].values],
[df['end'].values, df[field].values],
[df['end'].values, np.zeros(len(df.index))],
])
quads = np.transpose(quads, (2, 0, 1))
return quads
major_segments = create_segments(cnv, major_col)
minor_segments = create_segments(cnv, minor_col)
ax.add_collection(matplotlib.collections.LineCollection(major_segments, colors=segment_color_major, lw=1))
ax.add_collection(matplotlib.collections.LineCollection(minor_segments, colors=segment_color_minor, lw=1))
major_connectors = create_connectors(cnv, major_col)
minor_connectors = create_connectors(cnv, minor_col)
ax.add_collection(matplotlib.collections.LineCollection(major_connectors, colors=segment_color_major, lw=1))
ax.add_collection(matplotlib.collections.LineCollection(minor_connectors, colors=segment_color_minor, lw=1))
if do_fill:
major_quads = create_quads(cnv, major_col)
minor_quads = create_quads(cnv, minor_col)
ax.add_collection(matplotlib.collections.PolyCollection(major_quads, facecolors=quad_color_major, edgecolors=quad_color_major, lw=0))
ax.add_collection(matplotlib.collections.PolyCollection(minor_quads, facecolors=quad_color_minor, edgecolors=quad_color_minor, lw=0))
def plot_cnv_genome(ax, cnv, mincopies=-0.4, maxcopies=4, minlength=1000, major_col='major', minor_col='minor',
chromosome=None, start=None, end=None, tick_step=None, do_fill=False, chromosomes=None):
""" Plot major/minor copy number across the genome
Args:
ax (matplotlib.axes.Axes): plot axes
cnv (pandas.DataFrame): copy number table
KwArgs:
mincopies (float): minimum number of copies for setting y limits
maxcopies (float): maximum number of copies for setting y limits
minlength (int): minimum length of segments to be drawn
major_col (str): name of major copies column
minor_col (str): name of minor copies column
chromosome (str): name of chromosome to plot, None for all chromosomes
start (int): start of region in chromosome, None for beginning
end (int): end of region in chromosome, None for end of chromosome
tick_step (float): genomic length between x steps
do_fill (boolean): fill to 0 for copy number
chromosomes (list): list of chromosomes to plot in order
Returns:
pandas.DataFrame: table of chromosome length info
"""
if chromosome is None and (start is not None or end is not None):
raise ValueError('start and end require chromosome arg')
# Ensure we dont modify the calling function's table
cnv = cnv[['chromosome', 'start', 'end', major_col, minor_col]].copy()
if 'length' not in cnv:
cnv['length'] = cnv['end'] - cnv['start']
# Restrict segments to those plotted
if chromosome is not None:
cnv = cnv[cnv['chromosome'] == chromosome]
if start is not None:
cnv = cnv[cnv['end'] > start]
if end is not None:
cnv = cnv[cnv['start'] < end]
# Create chromosome info table
if chromosomes is None:
chromosomes = remixt.utils.sort_chromosome_names(cnv['chromosome'].unique())
chromosome_length = cnv.groupby('chromosome')['end'].max()
chromosome_info = pd.DataFrame({'length':chromosome_length}, index=chromosomes)
# Calculate start and end in plot
chromosome_info['end'] = np.cumsum(chromosome_info['length'])
chromosome_info['start'] = chromosome_info['end'] - chromosome_info['length']
chromosome_info['mid'] = (chromosome_info['start'] + chromosome_info['end']) / 2.
if minlength is not None:
cnv = cnv[cnv['length'] >= minlength]
cnv.set_index('chromosome', inplace=True)
cnv['chromosome_start'] = chromosome_info['start']
cnv.reset_index(inplace=True)
cnv['start'] = cnv['start'] + cnv['chromosome_start']
cnv['end'] = cnv['end'] + cnv['chromosome_start']
plot_cnv_segments(ax, cnv, major_col=major_col, minor_col=minor_col, do_fill=do_fill)
ax.set_yticks(range(int(mincopies), int(maxcopies - mincopies) + 1))
ax.set_ylim((mincopies, maxcopies))
ax.set_yticklabels(ax.get_yticks(), ha='left')
ax.yaxis.tick_left()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.yaxis.set_tick_params(direction='out', labelsize=12)
ax.xaxis.set_tick_params(direction='out', labelsize=12)
ax.xaxis.tick_bottom()
if chromosome is not None:
if start is None:
start = 0
if end is None:
end = chromosome_info.loc[chromosome, 'length']
plot_start = start + chromosome_info.loc[chromosome, 'start']
plot_end = end + chromosome_info.loc[chromosome, 'start']
ax.set_xlim((plot_start, plot_end))
ax.set_xlabel('Chromosome ' + chromosome, fontsize=14)
if tick_step is None:
tick_step = (end - start) / 12.
tick_step = np.round(tick_step, decimals=-int(np.floor(np.log10(tick_step))))
xticks = np.arange(plot_start, plot_end, tick_step)
xticklabels = np.arange(start, end, tick_step)
ax.set_xticks(xticks)
ax.set_xticklabels(['{:g}'.format(a/1e6) + 'Mb' for a in xticklabels])
else:
ax.set_xlim((0, chromosome_info['end'].max()))
ax.set_xlabel('chromosome')
ax.set_xticks([0] + list(chromosome_info['end'].values))
ax.set_xticklabels([])
ax.xaxis.set_minor_locator(matplotlib.ticker.FixedLocator(chromosome_info['mid']))
ax.xaxis.set_minor_formatter(matplotlib.ticker.FixedFormatter(chromosome_info.index.values))
ax.yaxis.set_tick_params(pad=8)
ax.yaxis.set_tick_params(pad=8)
ax.xaxis.grid(True, which='major', linestyle=':')
ax.yaxis.grid(True, which='major', linestyle=':')
return chromosome_info
def plot_cnv_genome_density(fig, transform, cnv, chromosomes=None):
""" Plot major/minor copy number across the genome and as a density
Args:
fig (matplotlib.figure.Figure): figure to which plots are added
transform (matplotlib.transform.Transform): transform for locating axes
cnv (pandas.DataFrame): copy number table
KwArgs:
chromosomes (list): chromosomes to plot
Returns:
matplotlib.figure.Figure: figure to which the plots have been added
"""
box = matplotlib.transforms.Bbox([[0.05, 0.05], [0.65, 0.95]])
ax = fig.add_axes(transform.transform_bbox(box))
remixt.cn_plot.plot_cnv_genome(ax, cnv, mincopies=-1, maxcopies=6, major_col='major_raw', minor_col='minor_raw', chromosomes=chromosomes)
ax.set_ylabel('Raw copy number')
ylim = ax.get_ylim()
box = matplotlib.transforms.Bbox([[0.7, 0.05], [0.95, 0.95]])
ax = fig.add_axes(transform.transform_bbox(box))
cov = 0.05
data = cnv[['minor_raw', 'major_raw', 'length']].replace(np.inf, np.nan).dropna()
filled_density_weighted(
ax, data['minor_raw'].values, data['length'].values,
'blue', 0.5, ylim[0], ylim[1], cov, rotate=True)
filled_density_weighted(
ax, data['major_raw'].values, data['length'].values,
'red', 0.5, ylim[0], ylim[1], cov, rotate=True)
ax.set_ylim(ylim)
ax.set_xlabel('Density')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
ax.yaxis.grid(True)
return fig
def create_chromosome_color_map(chromosomes):
""" Create a map of colors per chromosome.
Args:
chromosomes (list): list of chromosome names
Returns:
pandas.DataFrame: chromosome color table
"""
color_map = plt.get_cmap('Dark2')
chromosome_colors = list()
for i in range(len(chromosomes)):
if len(chromosomes) == 1:
f = 0.
else:
f = float(i)/float(len(chromosomes)-1)
rgb_color = color_map(f)
hex_color = matplotlib.colors.rgb2hex(rgb_color)
chromosome_colors.append(hex_color)
chromosome_colors = pd.DataFrame({'chromosome':chromosomes, 'color':chromosome_colors})
return chromosome_colors
def plot_cnv_scatter(ax, cnv, major_col='major', minor_col='minor', highlight_col=None, chromosome_colors=None, chromosomes=None):
""" Scatter plot segments major by minor.
Args:
ax (matplotlib.axes.Axes): plot axes
cnv (pandas.DataFrame): copy number table
KwArgs:
major_col (str): name of major copies column
minor_col (str): name of minor copies column
highlight_col (str): name of boolean column for highlighting specific segments
chromosome_colors (pandas.DataFrame): chromosome color table
chromosomes (list): chromosomes to plot, in order
"""
cnv = cnv[['chromosome', 'start', 'end', 'length', major_col, minor_col]].replace(np.inf, np.nan).dropna()
# Create color map for chromosomes
if chromosomes is None:
chromosomes = remixt.utils.sort_chromosome_names(cnv['chromosome'].unique())
# Create chromosome color map if not given
if chromosome_colors is None:
chromosome_colors = create_chromosome_color_map(chromosomes)
# Scatter size scaled by segment length
cnv['scatter_size'] = 10. * np.sqrt(cnv['length'] / 1e6)
# Scatter color
cnv = cnv.merge(chromosome_colors)
major_samples = remixt.utils.weighted_resample(cnv[major_col].values, cnv['length'].values)
major_min = np.percentile(major_samples, 1)
major_max = np.percentile(major_samples, 99)
major_margin = 0.25 * (major_max - major_min)
xlim = (major_min - major_margin, major_max + major_margin)
minor_samples = remixt.utils.weighted_resample(cnv[minor_col].values, cnv['length'].values)
minor_min = np.percentile(minor_samples, 1)
minor_max = np.percentile(minor_samples, 99)
minor_margin = 0.25 * (minor_max - minor_min)
ylim = (minor_min - minor_margin, minor_max + minor_margin)
if highlight_col is not None:
cnv_greyed = cnv[~cnv[highlight_col]]
cnv = cnv[cnv[highlight_col]]
points = ax.scatter(cnv_greyed[major_col], cnv_greyed[minor_col],
s=cnv_greyed['scatter_size'], facecolor='#d0d0e0', edgecolor='#d0d0e0',
linewidth=0.0, zorder=2)
points = ax.scatter(cnv[major_col], cnv[minor_col],
s=cnv['scatter_size'], facecolor=cnv['color'], edgecolor=cnv['color'],
linewidth=0.0, zorder=2)
ax.set_xlim(xlim)
ax.set_xlabel('major')
ax.set_ylim(ylim)
ax.set_ylabel('minor')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
ax.grid(True)
lgnd_artists = [plt.Circle((0, 0), color=c) for c in chromosome_colors['color']]
lgnd = ax.legend(lgnd_artists, chromosomes,
loc=2, markerscale=0.5, fontsize=6, ncol=2,
title='Chromosome', frameon=True)
lgnd.get_frame().set_edgecolor('w')
def plot_cnv_scatter_density(fig, transform, data, major_col='major', minor_col='minor', annotate=(), info='', chromosomes=None):
""" Plot CNV Scatter with major minor densities on axes.
Args:
fig (matplotlib.figure.Figure): figure to which plots are added
transform (matplotlib.transform.Transform): transform for locating axes
data (pandas.DataFrame): copy number data
KwArgs:
major_col (str): name of major copies column
minor_col (str): name of minor copies column
annotate (iterable): copy values to annotate with dashed line
info (str): information to add to empty axis
chromosomes (list): chromosomes to plot
Returns:
matplotlib.figure.Figure: figure to which the plots have been added
"""
box = matplotlib.transforms.Bbox([[0.05, 0.05], [0.65, 0.65]])
ax = fig.add_axes(transform.transform_bbox(box))
remixt.cn_plot.plot_cnv_scatter(ax, data, chromosomes=chromosomes)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
for a in annotate:
ax.plot(xlim, [a, a], '--k')
ax.plot([a, a], ylim, '--k')
box = matplotlib.transforms.Bbox([[0.05, 0.7], [0.65, 0.95]])
ax = fig.add_axes(transform.transform_bbox(box))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
ax.xaxis.grid(True)
filled_density_weighted(
ax, data[major_col].values, data['length'].values,
'0.75', 0.5, xlim[0], xlim[1], 1e-7)
ax.set_xlim(xlim)
for a in annotate:
ax.plot([a, a], ax.get_ylim(), '--k')
box = matplotlib.transforms.Bbox([[0.7, 0.05], [0.95, 0.65]])
ax = fig.add_axes(transform.transform_bbox(box))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
ax.yaxis.grid(True)
filled_density_weighted(
ax, data[minor_col].values, data['length'].values,
'0.75', 0.5, ylim[0], ylim[1], 1e-7, rotate=True)
ax.set_ylim(ylim)
for a in annotate:
ax.plot(ax.get_xlim(), [a, a], '--k')
box = matplotlib.transforms.Bbox([[0.7, 0.7], [0.95, 0.95]])
ax = fig.add_axes(transform.transform_bbox(box))
ax.axis('off')
ax.text(0.0, 0.0, info)
return fig
def plot_breakpoints_genome(ax, breakpoint, chromosome_info, scale_height=1.0):
""" Plot breakpoint arcs
Args:
ax (matplotlib.axes.Axes): plot axes
breakpoint (pandas.DataFrame): breakpoint
chromosome_info (pandas.DataFrame): per chromosome start and end in plot returned from plot_cnv_genome
"""
plot_height = ax.get_ylim()[1] * 0.8
plot_length = ax.get_xlim()[1] - ax.get_xlim()[0]
for side in ('1', '2'):
breakpoint.set_index('chromosome_'+side, inplace=True)
breakpoint['chromosome_start_'+side] = chromosome_info['start']
breakpoint.reset_index(inplace=True)
breakpoint['plot_position_'+side] = breakpoint['position_'+side] + breakpoint['chromosome_start_'+side]
codes = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
for idx, row in breakpoint.iterrows():
pos_1, pos_2 = sorted(row[['plot_position_1', 'plot_position_2']])
height = scale_height * 2. * plot_height * (pos_2 - pos_1) / float(plot_length)
visible_1 = pos_1 >= ax.get_xlim()[0] and pos_1 <= ax.get_xlim()[1]
visible_2 = pos_2 >= ax.get_xlim()[0] and pos_2 <= ax.get_xlim()[1]
if not visible_1 and not visible_2:
continue
if not visible_1 or not visible_2:
height = plot_height * 10.
verts = [(pos_1, 0.), (pos_1, height), (pos_2, height), (pos_2, 0.)]
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor='none', edgecolor='#2eb036', lw=2, zorder=100)
ax.add_patch(patch)
def experiment_plot(experiment, cn, h, chromosome=None, start=None, end=None, maxcopies=4):
""" Plot a sequencing experiment
Args:
experiment (Experiment): experiment object containing simulation information
cn (numpy.array): segment copy number
h (numpy.array): haploid depths
KwArgs:
chromosome (str): name of chromosome to plot, None for all chromosomes
start (int): start of region in chromosome, None for beginning
end (int): end of region in chromosome, None for end of chromosome
maxcopies (int): max copy number for y axis
Returns:
matplotlib.Figure: figure object of plots
"""
data = remixt.analysis.experiment.create_cn_table(experiment, cn, h)
num_plots = 3
width = 20
height = 6
if 'major_2' in data:
num_plots = 5
height = 10
plot_idx = 1
fig = plt.figure(figsize=(width, height))
ax = plt.subplot(num_plots, 1, plot_idx)
plot_idx += 1
plot_cnv_genome(ax, data, maxcopies=maxcopies, major_col='major_raw', minor_col='minor_raw',
chromosome=chromosome, start=start, end=end)
ax.set_xlabel('')
ax.set_ylabel('raw')
ax = plt.subplot(num_plots, 1, plot_idx)
plot_idx += 1
plot_cnv_genome(ax, data, maxcopies=maxcopies, major_col='major_raw_e', minor_col='minor_raw_e',
chromosome=chromosome, start=start, end=end)
ax.set_xlabel('')
ax.set_ylabel('expected')
ax = plt.subplot(num_plots, 1, plot_idx)
plot_idx += 1
plot_cnv_genome(ax, data, maxcopies=maxcopies, major_col='major_1', minor_col='minor_1',
chromosome=chromosome, start=start, end=end)
ax.set_xlabel('')
ax.set_ylabel('clone 1')
if 'major_2' in data:
ax = plt.subplot(num_plots, 1, plot_idx)
plot_idx += 1
plot_cnv_genome(ax, data, maxcopies=maxcopies, major_col='major_2', minor_col='minor_2',
chromosome=chromosome, start=start, end=end)
ax.set_xlabel('')
ax.set_ylabel('clone 2')
ax = plt.subplot(num_plots, 1, plot_idx)
plot_idx += 1
plot_cnv_genome(ax, data, maxcopies=2, major_col='major_diff', minor_col='minor_diff',
chromosome=chromosome, start=start, end=end)
ax.set_xlabel('chromosome')
ax.set_ylabel('clone diff')
plt.tight_layout()
return fig
def mixture_plot(mixture):
""" Plot a genome mixture
Args:
mixture (GenomeMixture): information about the genomes and their proportions
Returns:
matplotlib.Figure: figure object of plots
"""
data = pd.DataFrame({
'chromosome':mixture.segment_chromosome_id,
'start':mixture.segment_start,
'end':mixture.segment_end,
'length':mixture.l,
})
tumour_frac = mixture.frac[1:] / mixture.frac[1:].sum()
data['major_expected'] = np.einsum('ij,j->i', mixture.cn[:,1:,0], tumour_frac)
data['minor_expected'] = np.einsum('ij,j->i', mixture.cn[:,1:,1], tumour_frac)
for m in xrange(1, mixture.cn.shape[1]):
data['major_{0}'.format(m)] = mixture.cn[:,m,0]
data['minor_{0}'.format(m)] = mixture.cn[:,m,1]
data['major_diff'] = np.absolute(data['major_1'] - data['major_2'])
data['minor_diff'] = np.absolute(data['minor_1'] - data['minor_2'])
fig = plt.figure(figsize=(20, 10))
ax = plt.subplot(4, 1, 1)
plot_cnv_genome(ax, data, maxcopies=4, major_col='major_expected', minor_col='minor_expected')
ax.set_xlabel('')
ax.set_ylabel('expected')
ax = plt.subplot(4, 1, 2)
plot_cnv_genome(ax, data, maxcopies=4, major_col='major_1', minor_col='minor_1')
ax.set_xlabel('')
ax.set_ylabel('clone 1')
ax = plt.subplot(4, 1, 3)
plot_cnv_genome(ax, data, maxcopies=4, major_col='major_2', minor_col='minor_2')
ax.set_xlabel('')
ax.set_ylabel('clone 2')
ax = plt.subplot(4, 1, 4)
plot_cnv_genome(ax, data, maxcopies=2, major_col='major_diff', minor_col='minor_diff')
ax.set_xlabel('chromosome')
ax.set_ylabel('clone diff')
plt.tight_layout()
return fig
def gc_plot(gc_table_filename, plot_filename):
""" Plot the probability distribution of GC content for sampled reads
Args:
gc_table_filename (str): table of binned gc values
plot_filename (str): plot PDF filename
"""
gc_binned = pd.read_csv(gc_table_filename, sep='\t')
fig = plt.figure(figsize=(4,4))
plt.scatter(gc_binned['gc_bin'].values, gc_binned['mean'].values, c='k', s=4)
plt.plot(gc_binned['gc_bin'].values, gc_binned['smoothed'].values, c='r')
plt.xlabel('gc %')
plt.ylabel('density')
plt.xlim((-0.5, 100.5))
plt.ylim((-0.01, gc_binned['mean'].max() * 1.1))
plt.tight_layout()
fig.savefig(plot_filename, format='pdf', bbox_inches='tight')
def plot_depth(ax, read_depth, minor_modes=None):
""" Plot read depth of major minor and total as a density
Args:
ax (matplotlib.axis): optional axis for plotting major/minor/total read depth
read_depth (pandas.DataFrame): observed major, minor, and total read depth and lengths
KwArgs:
minor_modes (list): annotate minor modes with verticle lines
"""
total_depth_samples = remixt.utils.weighted_resample(read_depth['total'].values, read_depth['length'].values)
depth_max = np.percentile(total_depth_samples, 95)
cov = 0.0000001
filled_density_weighted(ax, read_depth['minor'].values, read_depth['length'].values, 'blue', 0.5, 0.0, depth_max, cov)
filled_density_weighted(ax, read_depth['major'].values, read_depth['length'].values, 'red', 0.5, 0.0, depth_max, cov)
filled_density_weighted(ax, read_depth['total'].values, read_depth['length'].values, 'grey', 0.5, 0.0, depth_max, cov)
if minor_modes is not None:
init_h_mono = np.array(remixt.analysis.readdepth.calculate_candidate_h_monoclonal(minor_modes))
h_normal = init_h_mono[0, 0]
h_tumour = init_h_mono[:, 1] + h_normal
plt.axvline(h_normal, lw=1, color='g')
for x in h_tumour:
plt.axvline(x, lw=1, color='g', ls=':')
ax.set_xlabel('Read Depth')
ax.set_ylabel('Density')
def plot_experiment(experiment_plot_filename, experiment_filename):
""" Plot an experiment
Args:
experiment_plot_filename (str): plot PDF filename
experiment_filename (str): filename of experiment pickle
"""
with open(experiment_filename, 'r') as experiment_file:
exp = pickle.load(experiment_file)
fig = experiment_plot(exp, exp.cn, exp.h)
fig.savefig(experiment_plot_filename, format='pdf', bbox_inches='tight', dpi=300)
def plot_mixture(mixture_plot_filename, mixture_filename):
""" Plot a mixture
Args:
mixture_plot_filename (str): plot PDF filename
mixture_filename (str): filename of mixture pickle
"""
with open(mixture_filename, 'r') as mixture_file:
mixture = pickle.load(mixture_file)
fig = mixture_plot(mixture)
fig.savefig(mixture_plot_filename, format='pdf', bbox_inches='tight', dpi=300)
def ploidy_analysis_plots(experiment_filename, plots_filename, chromosomes=None):
""" Generate ploidy analysis plots
Args:
experiment_filename (str): experiment pickle filename
plots_filename (str): ploidy analysis plots filename
KwArgs:
chromosomes (list): chromosomes to plot
"""
with open(experiment_filename, 'rb') as experiment_file:
experiment = pickle.load(experiment_file)
read_depth = remixt.analysis.readdepth.calculate_depth(experiment)
minor_modes = remixt.analysis.readdepth.calculate_minor_modes(read_depth)
init_h_mono = remixt.analysis.readdepth.calculate_candidate_h_monoclonal(minor_modes)
pdf = matplotlib.backends.backend_pdf.PdfPages(plots_filename)
for h in init_h_mono:
cn_modes = h[0] + np.arange(0, 5) * h[1]
read_depth['major_raw'] = (read_depth['major'] - h[0]) / h[1]
read_depth['minor_raw'] = (read_depth['minor'] - h[0]) / h[1]
f = h / h.sum()
major, minor, length = read_depth.replace(np.inf, np.nan).dropna()[['major_raw', 'minor_raw', 'length']].values.T
ploidy = ((major + minor) * length).sum() / length.sum()
info = 'Statistics:\n\n'
info += ' normal = {:.3f}\n\n'.format(f[0])
info += ' tumour = {:.3f}\n\n'.format(f[1])
info += ' ploidy = {:.3f}'.format(ploidy)
fig = plt.figure(figsize=(12,12))
box = matplotlib.transforms.Bbox([[0., 0.25], [1., 1.]])
transform = matplotlib.transforms.BboxTransformTo(box)
remixt.cn_plot.plot_cnv_scatter_density(fig, transform, read_depth, annotate=cn_modes, info=info, chromosomes=chromosomes)
box = matplotlib.transforms.Bbox([[0., 0.0], [1., 0.25]])
transform = matplotlib.transforms.BboxTransformTo(box)
remixt.cn_plot.plot_cnv_genome_density(fig, transform, read_depth.query('high_quality'), chromosomes=chromosomes)
pdf.savefig(bbox_inches='tight')
plt.close()
pdf.close()
|
[
"andrew.mcpherson@gmail.com"
] |
andrew.mcpherson@gmail.com
|
b853d1b897ae44076afd3497d9dfa922edb2b018
|
d23f52ddf7d966fe12e4af89ee085dd16523c4f3
|
/Report 2/Source/VQA-Models/model-2/Models/classnames.py
|
fce06f7d399b0d6d67061e57e0ca2ef38dcde47e
|
[] |
no_license
|
toadSTL/CS5542_Team1_VQA
|
3acfe6ce268ea2bf04cf9d836ab554d82f648a28
|
1c601bc8e655da784d8bb4654c67b7c5b05570d0
|
refs/heads/master
| 2020-04-23T06:14:56.967610
| 2019-05-09T15:33:16
| 2019-05-09T15:33:16
| 170,967,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,725
|
py
|
# ONLY FOR TESTING
class_names = '''tench, Tinca tinca
goldfish, Carassius auratus
great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias
tiger shark, Galeocerdo cuvieri
hammerhead, hammerhead shark
electric ray, crampfish, numbfish, torpedo
stingray
cock
hen
ostrich, Struthio camelus
brambling, Fringilla montifringilla
goldfinch, Carduelis carduelis
house finch, linnet, Carpodacus mexicanus
junco, snowbird
indigo bunting, indigo finch, indigo bird, Passerina cyanea
robin, American robin, Turdus migratorius
bulbul
jay
magpie
chickadee
water ouzel, dipper
kite
bald eagle, American eagle, Haliaeetus leucocephalus
vulture
great grey owl, great gray owl, Strix nebulosa
European fire salamander, Salamandra salamandra
common newt, Triturus vulgaris
eft
spotted salamander, Ambystoma maculatum
axolotl, mud puppy, Ambystoma mexicanum
bullfrog, Rana catesbeiana
tree frog, tree-frog
tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui
loggerhead, loggerhead turtle, Caretta caretta
leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea
mud turtle
terrapin
box turtle, box tortoise
banded gecko
common iguana, iguana, Iguana iguana
American chameleon, anole, Anolis carolinensis
whiptail, whiptail lizard
agama
frilled lizard, Chlamydosaurus kingi
alligator lizard
Gila monster, Heloderma suspectum
green lizard, Lacerta viridis
African chameleon, Chamaeleo chamaeleon
Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis
African crocodile, Nile crocodile, Crocodylus niloticus
American alligator, Alligator mississipiensis
triceratops
thunder snake, worm snake, Carphophis amoenus
ringneck snake, ring-necked snake, ring snake
hognose snake, puff adder, sand viper
green snake, grass snake
king snake, kingsnake
garter snake, grass snake
water snake
vine snake
night snake, Hypsiglena torquata
boa constrictor, Constrictor constrictor
rock python, rock snake, Python sebae
Indian cobra, Naja naja
green mamba
sea snake
horned viper, cerastes, sand viper, horned asp, Cerastes cornutus
diamondback, diamondback rattlesnake, Crotalus adamanteus
sidewinder, horned rattlesnake, Crotalus cerastes
trilobite
harvestman, daddy longlegs, Phalangium opilio
scorpion
black and gold garden spider, Argiope aurantia
barn spider, Araneus cavaticus
garden spider, Aranea diademata
black widow, Latrodectus mactans
tarantula
wolf spider, hunting spider
tick
centipede
black grouse
ptarmigan
ruffed grouse, partridge, Bonasa umbellus
prairie chicken, prairie grouse, prairie fowl
peacock
quail
partridge
African grey, African gray, Psittacus erithacus
macaw
sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita
lorikeet
coucal
bee eater
hornbill
hummingbird
jacamar
toucan
drake
red-breasted merganser, Mergus serrator
goose
black swan, Cygnus atratus
tusker
echidna, spiny anteater, anteater
platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus
wallaby, brush kangaroo
koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus
wombat
jellyfish
sea anemone, anemone
brain coral
flatworm, platyhelminth
nematode, nematode worm, roundworm
conch
snail
slug
sea slug, nudibranch
chiton, coat-of-mail shell, sea cradle, polyplacophore
chambered nautilus, pearly nautilus, nautilus
Dungeness crab, Cancer magister
rock crab, Cancer irroratus
fiddler crab
king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica
American lobster, Northern lobster, Maine lobster, Homarus americanus
spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish
crayfish, crawfish, crawdad, crawdaddy
hermit crab
isopod
white stork, Ciconia ciconia
black stork, Ciconia nigra
spoonbill
flamingo
little blue heron, Egretta caerulea
American egret, great white heron, Egretta albus
bittern
crane
limpkin, Aramus pictus
European gallinule, Porphyrio porphyrio
American coot, marsh hen, mud hen, water hen, Fulica americana
bustard
ruddy turnstone, Arenaria interpres
red-backed sandpiper, dunlin, Erolia alpina
redshank, Tringa totanus
dowitcher
oystercatcher, oyster catcher
pelican
king penguin, Aptenodytes patagonica
albatross, mollymawk
grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus
killer whale, killer, orca, grampus, sea wolf, Orcinus orca
dugong, Dugong dugon
sea lion
Chihuahua
Japanese spaniel
Maltese dog, Maltese terrier, Maltese
Pekinese, Pekingese, Peke
Shih-Tzu
Blenheim spaniel
papillon
toy terrier
Rhodesian ridgeback
Afghan hound, Afghan
basset, basset hound
beagle
bloodhound, sleuthhound
bluetick
black-and-tan coonhound
Walker hound, Walker foxhound
English foxhound
redbone
borzoi, Russian wolfhound
Irish wolfhound
Italian greyhound
whippet
Ibizan hound, Ibizan Podenco
Norwegian elkhound, elkhound
otterhound, otter hound
Saluki, gazelle hound
Scottish deerhound, deerhound
Weimaraner
Staffordshire bullterrier, Staffordshire bull terrier
American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier
Bedlington terrier
Border terrier
Kerry blue terrier
Irish terrier
Norfolk terrier
Norwich terrier
Yorkshire terrier
wire-haired fox terrier
Lakeland terrier
Sealyham terrier, Sealyham
Airedale, Airedale terrier
cairn, cairn terrier
Australian terrier
Dandie Dinmont, Dandie Dinmont terrier
Boston bull, Boston terrier
miniature schnauzer
giant schnauzer
standard schnauzer
Scotch terrier, Scottish terrier, Scottie
Tibetan terrier, chrysanthemum dog
silky terrier, Sydney silky
soft-coated wheaten terrier
West Highland white terrier
Lhasa, Lhasa apso
flat-coated retriever
curly-coated retriever
golden retriever
Labrador retriever
Chesapeake Bay retriever
German short-haired pointer
vizsla, Hungarian pointer
English setter
Irish setter, red setter
Gordon setter
Brittany spaniel
clumber, clumber spaniel
English springer, English springer spaniel
Welsh springer spaniel
cocker spaniel, English cocker spaniel, cocker
Sussex spaniel
Irish water spaniel
kuvasz
schipperke
groenendael
malinois
briard
kelpie
komondor
Old English sheepdog, bobtail
Shetland sheepdog, Shetland sheep dog, Shetland
collie
Border collie
Bouvier des Flandres, Bouviers des Flandres
Rottweiler
German shepherd, German shepherd dog, German police dog, alsatian
Doberman, Doberman pinscher
miniature pinscher
Greater Swiss Mountain dog
Bernese mountain dog
Appenzeller
EntleBucher
boxer
bull mastiff
Tibetan mastiff
French bulldog
Great Dane
Saint Bernard, St Bernard
Eskimo dog, husky
malamute, malemute, Alaskan malamute
Siberian husky
dalmatian, coach dog, carriage dog
affenpinscher, monkey pinscher, monkey dog
basenji
pug, pug-dog
Leonberg
Newfoundland, Newfoundland dog
Great Pyrenees
Samoyed, Samoyede
Pomeranian
chow, chow chow
keeshond
Brabancon griffon
Pembroke, Pembroke Welsh corgi
Cardigan, Cardigan Welsh corgi
toy poodle
miniature poodle
standard poodle
Mexican hairless
timber wolf, grey wolf, gray wolf, Canis lupus
white wolf, Arctic wolf, Canis lupus tundrarum
red wolf, maned wolf, Canis rufus, Canis niger
coyote, prairie wolf, brush wolf, Canis latrans
dingo, warrigal, warragal, Canis dingo
dhole, Cuon alpinus
African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus
hyena, hyaena
red fox, Vulpes vulpes
kit fox, Vulpes macrotis
Arctic fox, white fox, Alopex lagopus
grey fox, gray fox, Urocyon cinereoargenteus
tabby, tabby cat
tiger cat
Persian cat
Siamese cat, Siamese
Egyptian cat
cougar, puma, catamount, mountain lion, painter, panther, Felis concolor
lynx, catamount
leopard, Panthera pardus
snow leopard, ounce, Panthera uncia
jaguar, panther, Panthera onca, Felis onca
lion, king of beasts, Panthera leo
tiger, Panthera tigris
cheetah, chetah, Acinonyx jubatus
brown bear, bruin, Ursus arctos
American black bear, black bear, Ursus americanus, Euarctos americanus
ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus
sloth bear, Melursus ursinus, Ursus ursinus
mongoose
meerkat, mierkat
tiger beetle
ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle
ground beetle, carabid beetle
long-horned beetle, longicorn, longicorn beetle
leaf beetle, chrysomelid
dung beetle
rhinoceros beetle
weevil
fly
bee
ant, emmet, pismire
grasshopper, hopper
cricket
walking stick, walkingstick, stick insect
cockroach, roach
mantis, mantid
cicada, cicala
leafhopper
lacewing, lacewing fly
dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk
damselfly
admiral
ringlet, ringlet butterfly
monarch, monarch butterfly, milkweed butterfly, Danaus plexippus
cabbage butterfly
sulphur butterfly, sulfur butterfly
lycaenid, lycaenid butterfly
starfish, sea star
sea urchin
sea cucumber, holothurian
wood rabbit, cottontail, cottontail rabbit
hare
Angora, Angora rabbit
hamster
porcupine, hedgehog
fox squirrel, eastern fox squirrel, Sciurus niger
marmot
beaver
guinea pig, Cavia cobaya
sorrel
zebra
hog, pig, grunter, squealer, Sus scrofa
wild boar, boar, Sus scrofa
warthog
hippopotamus, hippo, river horse, Hippopotamus amphibius
ox
water buffalo, water ox, Asiatic buffalo, Bubalus bubalis
bison
ram, tup
bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis
ibex, Capra ibex
hartebeest
impala, Aepyceros melampus
gazelle
Arabian camel, dromedary, Camelus dromedarius
llama
weasel
mink
polecat, fitch, foulmart, foumart, Mustela putorius
black-footed ferret, ferret, Mustela nigripes
otter
skunk, polecat, wood pussy
badger
armadillo
three-toed sloth, ai, Bradypus tridactylus
orangutan, orang, orangutang, Pongo pygmaeus
gorilla, Gorilla gorilla
chimpanzee, chimp, Pan troglodytes
gibbon, Hylobates lar
siamang, Hylobates syndactylus, Symphalangus syndactylus
guenon, guenon monkey
patas, hussar monkey, Erythrocebus patas
baboon
macaque
langur
colobus, colobus monkey
proboscis monkey, Nasalis larvatus
marmoset
capuchin, ringtail, Cebus capucinus
howler monkey, howler
titi, titi monkey
spider monkey, Ateles geoffroyi
squirrel monkey, Saimiri sciureus
Madagascar cat, ring-tailed lemur, Lemur catta
indri, indris, Indri indri, Indri brevicaudatus
Indian elephant, Elephas maximus
African elephant, Loxodonta africana
lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens
giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca
barracouta, snoek
eel
coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch
rock beauty, Holocanthus tricolor
anemone fish
sturgeon
gar, garfish, garpike, billfish, Lepisosteus osseus
lionfish
puffer, pufferfish, blowfish, globefish
abacus
abaya
academic gown, academic robe, judge's robe
accordion, piano accordion, squeeze box
acoustic guitar
aircraft carrier, carrier, flattop, attack aircraft carrier
airliner
airship, dirigible
altar
ambulance
amphibian, amphibious vehicle
analog clock
apiary, bee house
apron
ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin
assault rifle, assault gun
backpack, back pack, knapsack, packsack, rucksack, haversack
bakery, bakeshop, bakehouse
balance beam, beam
balloon
ballpoint, ballpoint pen, ballpen, Biro
Band Aid
banjo
bannister, banister, balustrade, balusters, handrail
barbell
barber chair
barbershop
barn
barometer
barrel, cask
barrow, garden cart, lawn cart, wheelbarrow
baseball
basketball
bassinet
bassoon
bathing cap, swimming cap
bath towel
bathtub, bathing tub, bath, tub
beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon
beacon, lighthouse, beacon light, pharos
beaker
bearskin, busby, shako
beer bottle
beer glass
bell cote, bell cot
bib
bicycle-built-for-two, tandem bicycle, tandem
bikini, two-piece
binder, ring-binder
binoculars, field glasses, opera glasses
birdhouse
boathouse
bobsled, bobsleigh, bob
bolo tie, bolo, bola tie, bola
bonnet, poke bonnet
bookcase
bookshop, bookstore, bookstall
bottlecap
bow
bow tie, bow-tie, bowtie
brass, memorial tablet, plaque
brassiere, bra, bandeau
breakwater, groin, groyne, mole, bulwark, seawall, jetty
breastplate, aegis, egis
broom
bucket, pail
buckle
bulletproof vest
bullet train, bullet
butcher shop, meat market
cab, hack, taxi, taxicab
caldron, cauldron
candle, taper, wax light
cannon
canoe
can opener, tin opener
cardigan
car mirror
carousel, carrousel, merry-go-round, roundabout, whirligig
carpenter's kit, tool kit
carton
car wheel
cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM
cassette
cassette player
castle
catamaran
CD player
cello, violoncello
cellular telephone, cellular phone, cellphone, cell, mobile phone
chain
chainlink fence
chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour
chain saw, chainsaw
chest
chiffonier, commode
chime, bell, gong
china cabinet, china closet
Christmas stocking
church, church building
cinema, movie theater, movie theatre, movie house, picture palace
cleaver, meat cleaver, chopper
cliff dwelling
cloak
clog, geta, patten, sabot
cocktail shaker
coffee mug
coffeepot
coil, spiral, volute, whorl, helix
combination lock
computer keyboard, keypad
confectionery, confectionary, candy store
container ship, containership, container vessel
convertible
corkscrew, bottle screw
cornet, horn, trumpet, trump
cowboy boot
cowboy hat, ten-gallon hat
cradle
crane
crash helmet
crate
crib, cot
Crock Pot
croquet ball
crutch
cuirass
dam, dike, dyke
desk
desktop computer
dial telephone, dial phone
diaper, nappy, napkin
digital clock
digital watch
dining table, board
dishrag, dishcloth
dishwasher, dish washer, dishwashing machine
disk brake, disc brake
dock, dockage, docking facility
dogsled, dog sled, dog sleigh
dome
doormat, welcome mat
drilling platform, offshore rig
drum, membranophone, tympan
drumstick
dumbbell
Dutch oven
electric fan, blower
electric guitar
electric locomotive
entertainment center
envelope
espresso maker
face powder
feather boa, boa
file, file cabinet, filing cabinet
fireboat
fire engine, fire truck
fire screen, fireguard
flagpole, flagstaff
flute, transverse flute
folding chair
football helmet
forklift
fountain
fountain pen
four-poster
freight car
French horn, horn
frying pan, frypan, skillet
fur coat
garbage truck, dustcart
gasmask, respirator, gas helmet
gas pump, gasoline pump, petrol pump, island dispenser
goblet
go-kart
golf ball
golfcart, golf cart
gondola
gong, tam-tam
gown
grand piano, grand
greenhouse, nursery, glasshouse
grille, radiator grille
grocery store, grocery, food market, market
guillotine
hair slide
hair spray
half track
hammer
hamper
hand blower, blow dryer, blow drier, hair dryer, hair drier
hand-held computer, hand-held microcomputer
handkerchief, hankie, hanky, hankey
hard disc, hard disk, fixed disk
harmonica, mouth organ, harp, mouth harp
harp
harvester, reaper
hatchet
holster
home theater, home theatre
honeycomb
hook, claw
hoopskirt, crinoline
horizontal bar, high bar
horse cart, horse-cart
hourglass
iPod
iron, smoothing iron
jack-o'-lantern
jean, blue jean, denim
jeep, landrover
jersey, T-shirt, tee shirt
jigsaw puzzle
jinrikisha, ricksha, rickshaw
joystick
kimono
knee pad
knot
lab coat, laboratory coat
ladle
lampshade, lamp shade
laptop, laptop computer
lawn mower, mower
lens cap, lens cover
letter opener, paper knife, paperknife
library
lifeboat
lighter, light, igniter, ignitor
limousine, limo
liner, ocean liner
lipstick, lip rouge
Loafer
lotion
loudspeaker, speaker, speaker unit, loudspeaker system, speaker system
loupe, jeweler's loupe
lumbermill, sawmill
magnetic compass
mailbag, postbag
mailbox, letter box
maillot
maillot, tank suit
manhole cover
maraca
marimba, xylophone
mask
matchstick
maypole
maze, labyrinth
measuring cup
medicine chest, medicine cabinet
megalith, megalithic structure
microphone, mike
microwave, microwave oven
military uniform
milk can
minibus
miniskirt, mini
minivan
missile
mitten
mixing bowl
mobile home, manufactured home
Model T
modem
monastery
monitor
moped
mortar
mortarboard
mosque
mosquito net
motor scooter, scooter
mountain bike, all-terrain bike, off-roader
mountain tent
mouse, computer mouse
mousetrap
moving van
muzzle
nail
neck brace
necklace
nipple
notebook, notebook computer
obelisk
oboe, hautboy, hautbois
ocarina, sweet potato
odometer, hodometer, mileometer, milometer
oil filter
organ, pipe organ
oscilloscope, scope, cathode-ray oscilloscope, CRO
overskirt
oxcart
oxygen mask
packet
paddle, boat paddle
paddlewheel, paddle wheel
padlock
paintbrush
pajama, pyjama, pj's, jammies
palace
panpipe, pandean pipe, syrinx
paper towel
parachute, chute
parallel bars, bars
park bench
parking meter
passenger car, coach, carriage
patio, terrace
pay-phone, pay-station
pedestal, plinth, footstall
pencil box, pencil case
pencil sharpener
perfume, essence
Petri dish
photocopier
pick, plectrum, plectron
pickelhaube
picket fence, paling
pickup, pickup truck
pier
piggy bank, penny bank
pill bottle
pillow
ping-pong ball
pinwheel
pirate, pirate ship
pitcher, ewer
plane, carpenter's plane, woodworking plane
planetarium
plastic bag
plate rack
plow, plough
plunger, plumber's helper
Polaroid camera, Polaroid Land camera
pole
police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria
poncho
pool table, billiard table, snooker table
pop bottle, soda bottle
pot, flowerpot
potter's wheel
power drill
prayer rug, prayer mat
printer
prison, prison house
projectile, missile
projector
puck, hockey puck
punching bag, punch bag, punching ball, punchball
purse
quill, quill pen
quilt, comforter, comfort, puff
racer, race car, racing car
racket, racquet
radiator
radio, wireless
radio telescope, radio reflector
rain barrel
recreational vehicle, RV, R.V.
reel
reflex camera
refrigerator, icebox
remote control, remote
restaurant, eating house, eating place, eatery
revolver, six-gun, six-shooter
rifle
rocking chair, rocker
rotisserie
rubber eraser, rubber, pencil eraser
rugby ball
rule, ruler
running shoe
safe
safety pin
saltshaker, salt shaker
sandal
sarong
sax, saxophone
scabbard
scale, weighing machine
school bus
schooner
scoreboard
screen, CRT screen
screw
screwdriver
seat belt, seatbelt
sewing machine
shield, buckler
shoe shop, shoe-shop, shoe store
shoji
shopping basket
shopping cart
shovel
shower cap
shower curtain
ski
ski mask
sleeping bag
slide rule, slipstick
sliding door
slot, one-armed bandit
snorkel
snowmobile
snowplow, snowplough
soap dispenser
soccer ball
sock
solar dish, solar collector, solar furnace
sombrero
soup bowl
space bar
space heater
space shuttle
spatula
speedboat
spider web, spider's web
spindle
sports car, sport car
spotlight, spot
stage
steam locomotive
steel arch bridge
steel drum
stethoscope
stole
stone wall
stopwatch, stop watch
stove
strainer
streetcar, tram, tramcar, trolley, trolley car
stretcher
studio couch, day bed
stupa, tope
submarine, pigboat, sub, U-boat
suit, suit of clothes
sundial
sunglass
sunglasses, dark glasses, shades
sunscreen, sunblock, sun blocker
suspension bridge
swab, swob, mop
sweatshirt
swimming trunks, bathing trunks
swing
switch, electric switch, electrical switch
syringe
table lamp
tank, army tank, armored combat vehicle, armoured combat vehicle
tape player
teapot
teddy, teddy bear
television, television system
tennis ball
thatch, thatched roof
theater curtain, theatre curtain
thimble
thresher, thrasher, threshing machine
throne
tile roof
toaster
tobacco shop, tobacconist shop, tobacconist
toilet seat
torch
totem pole
tow truck, tow car, wrecker
toyshop
tractor
trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi
tray
trench coat
tricycle, trike, velocipede
trimaran
tripod
triumphal arch
trolleybus, trolley coach, trackless trolley
trombone
tub, vat
turnstile
typewriter keyboard
umbrella
unicycle, monocycle
upright, upright piano
vacuum, vacuum cleaner
vase
vault
velvet
vending machine
vestment
viaduct
violin, fiddle
volleyball
waffle iron
wall clock
wallet, billfold, notecase, pocketbook
wardrobe, closet, press
warplane, military plane
washbasin, handbasin, washbowl, lavabo, wash-hand basin
washer, automatic washer, washing machine
water bottle
water jug
water tower
whiskey jug
whistle
wig
window screen
window shade
Windsor tie
wine bottle
wing
wok
wooden spoon
wool, woolen, woollen
worm fence, snake fence, snake-rail fence, Virginia fence
wreck
yawl
yurt
web site, website, internet site, site
comic book
crossword puzzle, crossword
street sign
traffic light, traffic signal, stoplight
book jacket, dust cover, dust jacket, dust wrapper
menu
plate
guacamole
consomme
hot pot, hotpot
trifle
ice cream, icecream
ice lolly, lolly, lollipop, popsicle
French loaf
bagel, beigel
pretzel
cheeseburger
hotdog, hot dog, red hot
mashed potato
head cabbage
broccoli
cauliflower
zucchini, courgette
spaghetti squash
acorn squash
butternut squash
cucumber, cuke
artichoke, globe artichoke
bell pepper
cardoon
mushroom
Granny Smith
strawberry
orange
lemon
fig
pineapple, ananas
banana
jackfruit, jak, jack
custard apple
pomegranate
hay
carbonara
chocolate sauce, chocolate syrup
dough
meat loaf, meatloaf
pizza, pizza pie
potpie
burrito
red wine
espresso
cup
eggnog
alp
bubble
cliff, drop, drop-off
coral reef
geyser
lakeside, lakeshore
promontory, headland, head, foreland
sandbar, sand bar
seashore, coast, seacoast, sea-coast
valley, vale
volcano
ballplayer, baseball player
groom, bridegroom
scuba diver
rapeseed
daisy
yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum
corn
acorn
hip, rose hip, rosehip
buckeye, horse chestnut, conker
coral fungus
agaric
gyromitra
stinkhorn, carrion fungus
earthstar
hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa
bolete
ear, spike, capitulum
toilet tissue, toilet paper, bathroom tissue'''.split("\n")
|
[
"gregory.dylan.brown@gmail.com"
] |
gregory.dylan.brown@gmail.com
|
de34fb581112a3b0083c5bc0911f81939b4366a2
|
69f887fa5d626313108a47df479b01ec707233f7
|
/Training_Grey_Full.py
|
32c5d4a5d63f45c6e0f3eb2e9ffebcd417b797a1
|
[] |
no_license
|
Yehia-Fahmy/InsulatorClassifier
|
f77d91766de81040a74d9ee446e3488a5458a355
|
cb49ce6f3410c5c9e847f7994140c6e16c4846a6
|
refs/heads/main
| 2023-03-19T02:55:07.329730
| 2021-03-04T18:29:02
| 2021-03-04T18:29:02
| 331,730,952
| 1
| 0
| null | 2021-03-09T19:37:02
| 2021-01-21T19:22:22
|
Python
|
UTF-8
|
Python
| false
| false
| 6,830
|
py
|
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Model as M
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
import cv2
import os
from matplotlib import pyplot as plt
import time as t
import pickle
from keras.utils import to_categorical
# functions
# function to convert the time into something readable
def convert_time(seconds):
seconds = seconds % (24 * 3600)
hour = seconds // 3600
seconds %= 3600
minutes = seconds // 60
seconds %= 60
return "%d:%02d:%02d" % (hour, minutes, seconds)
# function to load .pickle files
def load_data(file_name):
print(f'Loading {file_name}...')
file = pickle.load(open(file_name, 'rb'))
return file
# quick function to show the image
def show(img):
plt.imshow(img, cmap='gray')
plt.show()
# reshapes the images to the right size
def reshape_data(X, y):
print(f"Reshaping data...")
X = np.array(X) # ensuring that lists are instead arrays
X = X / 255
# triple_channel = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 3)
y = np.array(y)
y = to_categorical(y)
# print(f"triple_channel.shape(): {triple_channel.shape}, y.shape(): {y.shape}")
return X, y
# function to build the network
def build_network(X):
print("Building network...")
model = Sequential()
# First Layer
model.add(
Conv2D(512, kernel_size=(4, 4), activation='relu', input_shape=(X.shape[1:]), padding='same', strides=(2, 2)))
# Second Layer
model.add(Conv2D(256, kernel_size=(4, 4), activation='relu', padding='same', strides=(2, 2)))
# Third Layer
model.add(Conv2D(128, kernel_size=(4, 4), activation='relu', padding='same', strides=(2, 2)))
# Fourth Layer
model.add(Conv2D(128, kernel_size=(4, 4), activation='relu', padding='same', strides=(2, 2)))
# Third Layer
model.add(Conv2D(128, kernel_size=(4, 4), activation='relu', padding='same', strides=(2, 2)))
# Fourth Layer
model.add(Conv2D(128, kernel_size=(4, 4), activation='relu', padding='same', strides=(2, 2)))
# Fifth Layer
model.add(Conv2D(256, kernel_size=(4, 4), activation='relu', padding='same', strides=(2, 2)))
# Sixth Layer
model.add(Conv2D(512, kernel_size=(4, 4), activation='relu', padding='same', strides=(2, 2)))
# Final Layer
model.add(Flatten())
model.add(Dense(7, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adamax',
metrics=['accuracy'])
# All Done
model.summary()
return model
# function to train the model
def train_model(model, imgs, labels):
print("Training model...")
model.fit(imgs, labels, epochs=NUM_EPOCHS, validation_split=0, batch_size=BATCH_SIZE)
return model
# function to build a working model
def build_Mark_4_40(X):
print("Building Mark 4.40...")
model = Sequential()
# First Layer
model.add(
Conv2D(512, kernel_size=(4, 4), activation='relu', input_shape=(X.shape[1:]), padding='same', strides=(2, 2)))
# Second Layer
model.add(Conv2D(256, kernel_size=(4, 4), activation='relu', padding='same', strides=(2, 2)))
# Third Layer
model.add(Conv2D(128, kernel_size=(4, 4), activation='relu', padding='same', strides=(2, 2)))
# Fourth Layer
model.add(Conv2D(128, kernel_size=(4, 4), activation='relu', padding='same', strides=(2, 2)))
# Fifth Layer
model.add(Conv2D(256, kernel_size=(4, 4), activation='relu', padding='same', strides=(2, 2)))
# Sixth Layer
model.add(Conv2D(512, kernel_size=(4, 4), activation='relu', padding='same', strides=(2, 2)))
# Final Layer
model.add(Flatten())
model.add(Dense(7, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adamax',
metrics=['accuracy'])
# All Done
model.summary()
return model
def build_test_model(X):
print('Building Test Model...')
model = Sequential()
model.add(Dense(10, activation='relu', input_shape=(X.shape[1:])))
model.add(Dense(10, activation='relu'))
# Final Layer
model.add(Flatten())
model.add(Dense(7, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adamax',
metrics=['accuracy'])
return model
# function to convert from tf model to tf.lite for mobile application
def convert_model(model):
tf_lite_converter = tf.lite.TFLiteConverter.from_keras_model(model)
new_model = tf_lite_converter.convert()
return new_model
# gets size of file
def get_file_size(file_path):
size = os.path.getsize(file_path)
return size
# converts bytes for readability
def convert_bytes(size, unit=None):
if unit == "KB":
return 'File size: ' + str(round(size / 1024, 3)) + ' Kilobytes'
elif unit == "MB":
return 'File size: ' + str(round(size / (1024 * 1024), 3)) + ' Megabytes'
else:
return 'File size: ' + str(size) + ' bytes'
# global variables
CATAGORIES = ['Class (1)', 'Class (2)', 'Class (3)', 'Class (4)', 'Class (5)', 'Class (6)', 'Class (7)']
DATA = []
TESTING_DATA = []
IMG_SIZE = 481
NUM_EPOCHS = 4
BATCH_SIZE = 3
KERAS_MODEL_NAME = 'Full_Size_Model.h5'
TF_LITE_MODEL_NAME = 'TF_Lite_Model.tflite'
# Code to run
start_time = t.time()
print("Starting...")
# load in data
training_images = load_data('Images.pickle')
training_labels = load_data('Labels.pickle')
testing_images = load_data('Testing_Images.pickle')
testing_labels = load_data('Testing_Labels.pickle')
# reshape the data
training_images, training_labels = reshape_data(training_images, training_labels)
testing_images, testing_labels = reshape_data(testing_images, testing_labels)
# build and train the model
our_model = build_network(training_images)
trained_model = train_model(our_model, training_images, training_labels)
# save the model
trained_model.save(KERAS_MODEL_NAME)
full_bytes = convert_bytes(get_file_size(KERAS_MODEL_NAME), "MB")
# convert the model
tf_lite_model = convert_model(trained_model)
# save the tf.lite model
open(TF_LITE_MODEL_NAME, "wb").write(tf_lite_model)
lite_bytes = convert_bytes(get_file_size(TF_LITE_MODEL_NAME), "MB")
# evaluate the model
loss, acc = trained_model.evaluate(testing_images, testing_labels, batch_size=BATCH_SIZE, use_multiprocessing='True')
acc = round(acc * 100, 2)
print(f'accuracy: {acc}%')
# prints the elapsed time for convenience
total_time = t.time() - start_time
total_time = round(total_time, 2)
total_time = convert_time(total_time)
# final message
print(f"Finished in: {total_time}")
print('Success!')
|
[
"yehia.fahmy@uwaterloo.ca"
] |
yehia.fahmy@uwaterloo.ca
|
587bbb77e9029ea072032f26532e27ddaa9478ba
|
7103e8c2f689d0d23254d98b6cce65606fbd474a
|
/solutions/93.py
|
f18fea6834bd06302bd2415eb4b1338abd97cfd4
|
[] |
no_license
|
DanqiChang/leetcode-notes
|
18bb10f329d214b805568d4c7270042ab97e3c1f
|
b81dd2bc5839e60dfa5efa712ed00e0876ca173e
|
refs/heads/master
| 2020-03-28T20:25:10.612938
| 2018-09-17T04:32:42
| 2018-09-17T04:32:42
| 149,068,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
"""
{
"difficulty": "medium",
"link": "https://leetcode.com/problems/restore-ip-addresses/description/",
"category": ["DFS"],
"tags": ["backtracking"],
"questions": []
}
"""
"""
ATTENTION
- 在终止条件的判断上,要考虑s == '' 和 len(currIP)>=4 不同时满足,就需要终止!!
- 开头为0的字段,数字只能是0!!这是特殊情况
"""
def DFS(s, currIP, st):
if len(currIP)>=4:
if s=="":
st.add('.'.join(map(str, currIP)))
return
if s == '':
return
if s[0] == "0":
DFS(s[1:], currIP+[0], st)
else:
for i in range(1,min(4, len(s)+1)):
if int(s[:i]) < 256:
DFS(s[i:], currIP+[int(s[:i])], st)
class Solution:
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
if s:
st = set()
currIP = []
DFS(s, currIP, st)
return list(st)
return []
|
[
"huangyc96@gmail.com"
] |
huangyc96@gmail.com
|
861ea312f72e72f19902f78c2ce9b346389664e1
|
53e1ba62ef56adbabd3c898ecc0297c81ba12f6e
|
/advisor/migrations/0013_auto_20190421_2057.py
|
dca7303bf91664e29e7ed1beb9735bcbdb6acaff
|
[] |
no_license
|
james-schneider/django-capstone-project
|
5b486bf4388093883800df5213b56cb18a63f0ef
|
c674eb004a493597c95f4d8182da55499f75550e
|
refs/heads/master
| 2020-04-23T07:49:38.376634
| 2019-04-26T22:08:21
| 2019-04-26T22:08:21
| 171,017,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
# Generated by Django 2.1.5 on 2019-04-22 00:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('advisor', '0012_auto_20190420_0717'),
]
operations = [
migrations.RemoveField(
model_name='note',
name='published_date',
),
migrations.RemoveField(
model_name='note',
name='title',
),
]
|
[
"james.rice.schneider@gmail.com"
] |
james.rice.schneider@gmail.com
|
98d2f8cb47f6b6b3dc8f26ae6741b379597f2bde
|
3f2d38a2345babbe6d622fbfb49884ad9583183c
|
/scripts/lib/ImageList.py
|
e6de9bef672364b36398f6b7bca1024fb0a82ae0
|
[
"LicenseRef-scancode-proprietary-license",
"MIT"
] |
permissive
|
skywalkerisnull/ImageAnalysis
|
d857354c5d8841919c7bee6e91ef07b9c06b2f2a
|
5bbb52c420e31792b5b981d242bd357a571e2a46
|
refs/heads/master
| 2020-06-27T11:27:57.357836
| 2019-08-07T04:14:05
| 2019-08-07T04:14:05
| 199,941,341
| 1
| 0
|
MIT
| 2019-08-07T04:14:06
| 2019-07-31T23:14:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,977
|
py
|
# a collection of non-class routines that perform operations on a list
# of images
import math
from . import Image
# return the bounds of the rectangle spanned by the provided list of
# images
def coverage(image_list):
xmin = None; xmax = None; ymin = None; ymax = None
for image in image_list:
(x0, y0, x1, y1) = image.coverage()
if xmin == None or x0 < xmin:
xmin = x0
if ymin == None or y0 < ymin:
ymin = y0
if xmax == None or x1 > xmax:
xmax = x1
if ymax == None or y1 > ymax:
ymax = y1
print("List area coverage: (%.2f %.2f) (%.2f %.2f)" \
% (xmin, ymin, xmax, ymax))
return (xmin, ymin, xmax, ymax)
# return True/False if the given rectangles overlap
def rectanglesOverlap(r1, r2):
(ax0, ay0, ax1, ay1) = r1
(bx0, by0, bx1, by1) = r2
if ax0 <= bx1 and ax1 >= bx0 and ay0 <= by1 and ay1 >= by0:
return True
else:
return False
# return a list of images that intersect the given rectangle
def getImagesCoveringRectangle(image_list, r2, only_placed=False):
# build list of images covering target point
coverage_list = []
for image in image_list:
r1 = image.coverage()
if only_placed and not image.placed:
continue
if rectanglesOverlap(r1, r2):
coverage_list.append(image)
return coverage_list
# return a list of images that cover the given point within 'pad'
# or are within 'pad' distance of touching the point.
def getImagesCoveringPoint(image_list, x=0.0, y=0.0, pad=20.0, only_placed=False):
# build list of images covering target point
coverage_list = []
bx0 = x-pad
by0 = y-pad
bx1 = x+pad
by1 = y+pad
r2 = (bx0, by0, bx1, by1)
coverage_list = getImagesCoveringRectangle(image_list, r2, only_placed)
name_list = []
for image in coverage_list:
name_list.append(image.name)
print("Images covering point (%.2f %.2f): %s" % (x, y, str(name_list)))
return coverage_list
def x2lon(self, x):
nm2m = 1852.0
x_nm = x / nm2m
factor = math.cos(self.ref_lat*math.pi/180.0)
x_deg = (x_nm / 60.0) / factor
return x_deg + self.ref_lon
def y2lat(self, y):
nm2m = 1852.0
y_nm = y / nm2m
y_deg = y_nm / 60.0
return y_deg + self.ref_lat
# x, y are in meters ref_lon/lat in degrees
def cart2wgs84( x, y, ref_lon, ref_lat ):
nm2m = 1852.0
x_nm = x / nm2m
y_nm = y / nm2m
factor = math.cos(ref_lat*math.pi/180.0)
x_deg = (x_nm / 60.0) / factor + ref_lon
y_deg = y_nm / 60.0 + ref_lat
return (x_deg, y_deg)
# x, y are in meters ref_lon/lat in degrees
def wgs842cart( lon_deg, lat_deg, ref_lon, ref_lat ):
nm2m = 1852.0
x_deg = lon_deg - ref_lon
y_deg = lat_deg - ref_lat
factor = math.cos(ref_lat*math.pi/180.0)
x_nm = x_deg * 60.0 * factor
y_nm = y_deg * 60.0
x_m = x_nm * nm2m
y_m = y_nm * nm2m
return (x_m, y_m)
|
[
"curtolson@flightgear.org"
] |
curtolson@flightgear.org
|
a95d9a58c88fc854491b9cbe1b33bb9529b8fe5f
|
174915d8d8ca488a4f6976035575676ff74424a4
|
/Cassava Leaf Disease Classification/histroy/efficientnet_learn.py
|
07980df4014f96ddd406f61bbeb757ee5cc55f3d
|
[] |
no_license
|
starvapour/kaggle_learn_note
|
579f09cef83a27d386364fb42ec9099f4151dbd3
|
c6b07b3f6a89d0c585ddf2a4281eeb6c70edfec6
|
refs/heads/master
| 2023-03-29T22:09:28.213665
| 2021-04-09T10:22:13
| 2021-04-09T10:22:13
| 324,275,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,098
|
py
|
import cv2
import torch
import os
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as toptim
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, models
from PIL import Image
#import pydicom
import time
from efficientnet_pytorch import EfficientNet
from LabelSmoothingLoss import LabelSmoothingLoss
from album_transform import get_train_transforms,get_test_transforms
# ------------------------------------config------------------------------------
train_csv_path = "train.csv"
train_image = "train_images/"
preprocessed_image = "train_preprocessed_images/"
# do preprocessing or not
do_preprocessing = False
# continue train from old model
from_old_model = True
# Train config
# learning rate
learning_rate = 1e-4
# max epoch
epochs = 100
# batch size
batchSize = 16
'''
# model save each step
model_save_step = 10
'''
# Use how many data of the dataset for val
proportion_of_val_dataset = 0.3
# output path
log_name = "log.txt"
# record best val acc with (epoch_num, last_best_acc)
best_val_acc = (-1, 0)
# ------------------------------------preprocess------------------------------------
def preprocess(img_name, ori_path, target_path):
img = cv2.imread(ori_path + img_name, 1)
#img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (256, 256))
cv2.imwrite(target_path + img_name, img)
# ------------------------------------dataset------------------------------------
# create dataset
class Leaf_train_Dataset(Dataset):
def __init__(self, data_csv, img_path, transform):
# get lists
self.csv = data_csv
self.img_path = img_path
self.transform = transform
def __getitem__(self, index):
image_id = self.csv.loc[index, 'image_id']
label = self.csv.loc[index, 'label']
img = cv2.imread(self.img_path + image_id)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = Image.open(self.img_path + image_id)
# img = np.array(img)
img = self.transform(image=img)['image']
#img = self.transform(img)
return img, label
def __len__(self):
return len(self.csv)
# ------------------------------------train------------------------------------
def train(net, train_loader, criterion, optimizer, epoch, device, log):
# start train
runningLoss = 0
loss_count = 0
batch_num = len(train_loader)
for index, (imgs, labels) in enumerate(train_loader):
# send data to device
imgs, labels = imgs.to(device), labels.to(device)
# zero grad
optimizer.zero_grad()
# forward
output = net(imgs)
# calculate loss
#output = output.reshape(target.shape)
loss = criterion(output, labels)
runningLoss += loss.item()
loss_count += 1
# calculate gradients.
loss.backward()
# reduce loss
optimizer.step()
# print loss
# print(index)
if (index + 1) % 100 == 0:
print("Epoch: %2d, Batch: %4d / %4d, Loss: %.3f" % (epoch + 1, index + 1, batch_num, loss.item()))
avg_loss = runningLoss / loss_count
print("For Epoch: %2d, Average Loss: %.3f" % (epoch + 1, avg_loss))
log.write("For Epoch: %2d, Average Loss: %.3f" % (epoch + 1, avg_loss) + "\n")
# ------------------------------------val------------------------------------
def val(net, val_loader, criterion, optimizer, epoch, device, log, train_start):
# val after each epoch
net.eval()
with torch.no_grad():
total_len = 0
correct_len = 0
global best_val_acc
for index, (imgs, labels) in enumerate(val_loader):
imgs, labels = imgs.to(device), labels.to(device)
output = net(imgs)
pred = output.argmax(dim=1, keepdim=True).reshape(labels.shape)
assessment = torch.eq(pred, labels)
total_len += len(pred)
correct_len += int(assessment.sum())
accuracy = correct_len / total_len
print("Start val:")
print("accuracy:", accuracy)
log.write("accuracy: " + str(accuracy) + "\n")
# if have better acc, save model, only keep the best model in epochs
if accuracy > best_val_acc[1]:
# save model
best_val_acc = (epoch+1, accuracy)
torch.save(net.state_dict(), "save_model.pth")
print("Model saved in epoch "+str(epoch+1)+", acc: "+str(accuracy)+".")
log.write("Model saved in epoch "+str(epoch+1)+", acc: "+str(accuracy)+".\n")
# print time pass after each epoch
current_time = time.time()
pass_time = int(current_time - train_start)
time_string = str(pass_time // 3600) + " hours, " + str((pass_time % 3600) // 60) + " minutes, " + str(
pass_time % 60) + " seconds."
print("Time pass:", time_string)
print()
log.write("Time pass: " + time_string + "\n\n")
# ------------------------------------main------------------------------------
# main
def main():
# if GPU is availale, use GPU
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print("Use " + str(device))
# create dataset
original_csv_data = pd.read_csv(train_csv_path)
print("length of original dataset is", len(original_csv_data))
log.write("length of original dataset is " + str(len(original_csv_data)) + "\n")
# preprocessing steps
# train_transform = transforms.Compose([
# # transforms.Resize(256),
# transforms.RandomCrop(224),
# transforms.RandomHorizontalFlip(),
# transforms.RandomVerticalFlip(),
# transforms.ToTensor(),
# #transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
# ])
# test_transform = transforms.Compose([
# transforms.Resize(224),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
# ])
if do_preprocessing == True:
if not os.path.exists(preprocessed_image):
os.makedirs(preprocessed_image)
print("Start pre-processing:")
log.write("Start pre-processing:\n")
length = len(original_csv_data['image_id'])
count = 0
for img_name in original_csv_data['image_id']:
if count % 1000 == 0:
print("preprocessing "+str(count)+"/"+str(length))
preprocess(img_name, train_image, preprocessed_image)
count += 1
print("Start random split:")
# split dataset, get train and val
train_len = int((1 - proportion_of_val_dataset) * len(original_csv_data))
train_csv = original_csv_data.iloc[:train_len]
val_csv = original_csv_data.iloc[train_len:]
val_csv = val_csv.reset_index(drop=True)
train_dataset = Leaf_train_Dataset(train_csv, preprocessed_image, transform=get_train_transforms())
val_dataset = Leaf_train_Dataset(val_csv, preprocessed_image, transform=get_test_transforms())
print("length of train dataset is", len(train_dataset))
log.write("length of train dataset is " + str(len(train_dataset)) + "\n")
print("length of val dataset is", len(val_dataset))
log.write("length of val dataset is " + str(len(val_dataset)) + "\n\n")
print()
print("Start training:")
# create dataloader
train_loader = DataLoader(dataset=train_dataset, batch_size=batchSize, shuffle=True)
val_loader = DataLoader(dataset=val_dataset, batch_size=batchSize, shuffle=True)
# net model
# if true, continue train old model
if from_old_model:
net = EfficientNet.from_name('efficientnet-b5')
net._fc.out_features = 10
net.load_state_dict(torch.load("save_model.pth"))
net = net.to(device)
else:
net = EfficientNet.from_pretrained('efficientnet-b5')
net._fc.out_features = 10
net = net.to(device)
# loss function
# criterion = nn.BCEWithLogitsLoss()
# criterion = nn.NLLLoss()
# 结合了nn.LogSoftmax()和nn.NLLLoss()两个函数
criterion = nn.CrossEntropyLoss()
#criterion = LabelSmoothingLoss(classes=10, smoothing=0.1)
# create optimizer
#optimizer = toptim.SGD(net.parameters(), lr=learning_rate)
optimizer = toptim.Adam(net.parameters(), lr=learning_rate)
train_start = time.time()
for epoch in range(epochs):
'''
# change lr by epoch
adjust_learning_rate(optimizer, epoch)
'''
# start train
train(net, train_loader, criterion, optimizer, epoch, device, log)
# start val
val(net, val_loader, criterion, optimizer, epoch, device, log, train_start)
print("Final saved model is epoch "+str(best_val_acc[0])+", acc: "+str(best_val_acc[1])+".")
log.write("Final saved model is epoch "+str(best_val_acc[0])+", acc: "+str(best_val_acc[1])+"\n")
print("Done.")
log.write("Done.\n")
if __name__ == '__main__':
with open(log_name, 'w') as log:
main()
|
[
"starvapour@126.com"
] |
starvapour@126.com
|
601ea1ab433a96b5b1e17a5264eeec8063e9a06f
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/126/usersdata/239/29610/submittedfiles/ap2.py
|
13b48df715213541186e4b6bb3009ea8e7d8cb96
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
# -*- coding: utf-8 -*-
print("Digite a: ")
print("Digite b: ")
print("Digite c: ")
print("Digite d: ")
print("")
if a>b>c>d:
print(a)
print(d)
if a>b>d>c:
print(a)
print(c)
if a>d>c>b:
print(a)
print(b)
if a>d>b>c:
print(a)
print(c)
if a>c>b>d:
print(a)
print(d)
if a>c>d>b:
print(a)
print(b)#primeira parte termina
if b>a>c>d:
print(b)
print(d)
if b>a>d>c:
print(b)
print(c)
if b>d>c>a:
print(b)
print(a)
if b>d>a>c:
print(b)
print(c)
if b>c>a>d:
print(b)
print(d)
if b>c>d>a:
print(b)
print(a)#segunda parte termina aqui
if c>b>a>d:
print(c)
print(d)
if c>b>d>a:
print(c)
print(a)
if c>d>a>b:
print(c)
print(b)
if c>d>b>a:
print(c)
print(a)
if c>a>b>d:
print(c)
print(d)
if c>a>d>b:
print(c)
print(b)#terceira parte termina aqui
if d>b>c>a:
print(d)
print(a)
if d>b>a>c:
print(d)
print(c)
if d>a>c>b:
print(d)
print(b)
if d>a>b>c:
print(d)
print(c)
if d>c>b>a:
print(d)
print(a)
if d>c>a>b:
print(d)
print(b)#quarta parte termina aqui
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
9f8f48c466269ce5097a8292eb4241d9745535ad
|
c730f09a8d96e1ec84c2cabbc33e6489befae09a
|
/tutorial/polls/forms.py
|
dab414d8e8903a7ff7697bcc4d9bddb4f04cd48d
|
[] |
no_license
|
sosodoit/django
|
669a4b8abb27d1b4d062ac8063891dee3666108f
|
f0cdee32dd069b7c0ac7c417ac55aa4f063bdb1f
|
refs/heads/master
| 2023-06-01T01:26:13.786788
| 2021-06-30T08:28:39
| 2021-06-30T08:28:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
from django import forms
from .models import Question
class QuestionForm(forms.ModelForm):
class Meta:
model = Question
fields = ['subject', 'content']
|
[
"sohuig@naver.com"
] |
sohuig@naver.com
|
223177a60a2000bba8fd64907a021692174b2909
|
7cdcd244cb576f9bb689d6fef9c3428d67364274
|
/tracker/migrations/0009_load_countries.py
|
f087eea5f6506e90bbf57d61c8a6b543f5a01f6f
|
[
"Apache-2.0"
] |
permissive
|
GamesDoneQuick/donation-tracker
|
9728579531961eb556d7acd4564c6fe4deaffbff
|
4d231bae7d00ee990ca9086400d926da59b0598d
|
refs/heads/master
| 2023-08-31T21:37:41.015548
| 2023-08-22T22:03:47
| 2023-08-22T22:03:47
| 44,652,980
| 48
| 39
|
Apache-2.0
| 2023-09-11T14:10:57
| 2015-10-21T04:38:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,168
|
py
|
# Generated by Django 2.2.16 on 2020-09-23 23:03
import logging
import os
from django.core import serializers
from django.db import migrations
logger = logging.getLogger(__name__)
def load_countries(apps, schema_editor):
# This is IMPORTANT, make sure it gets moved the next time you squash migrations
Country = apps.get_model('tracker', 'Country')
count = 0
path = os.path.normpath(os.path.join(os.path.dirname(__file__), '../fixtures/countries.json'))
with open(path) as data:
for country in serializers.deserialize('json', data.read()):
created = Country.objects.get_or_create(alpha2=country.object.alpha2, defaults=dict(
name=country.object.name,
alpha3=country.object.alpha3,
numeric=country.object.numeric,
))[1]
if created:
count +=1
logger.info(f'Loaded {count} fixture(s) for country')
def no_op(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('tracker', '0008_donor_cache_tweaks'),
]
operations = [
migrations.RunPython(load_countries, no_op)
]
|
[
"noreply@github.com"
] |
GamesDoneQuick.noreply@github.com
|
d49b910ed24ad32cc0223bb757ba33e4a8b5979a
|
a1ebdae1587f27ad4b294d010b1a55479cbf8ed8
|
/unity_lens_noosfero/unity_lens_noosferoconfig.py
|
8ae26470f3f24ecab503e891648498f48a2a70a6
|
[
"Apache-2.0"
] |
permissive
|
rcnetto/unity-lens-noosfero
|
7ffc5364f0d77dc48b1e55d32e2304a8c33bacbb
|
b74ca7367a7fc04c671f1060de8dd3b4e5cbd818
|
refs/heads/master
| 2016-09-05T19:50:59.612947
| 2013-09-23T12:34:05
| 2013-09-23T12:34:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,687
|
py
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# This file is in the public domain
### END LICENSE
# THIS IS Noosfero CONFIGURATION FILE
# YOU CAN PUT THERE SOME GLOBAL VALUE
# Do not touch unless you know what you're doing.
# you're warned :)
__all__ = [
'project_path_not_found',
'get_data_file',
'get_data_path',
]
# Where your project will look for your data (for instance, images and ui
# files). By default, this is ../data, relative your trunk layout
__unity_lens_noosfero_data_directory__ = '../data/'
__license__ = ''
__version__ = 'VERSION'
import os
import gettext
from gettext import gettext as _
gettext.textdomain('unity-lens-noosfero')
class project_path_not_found(Exception):
"""Raised when we can't find the project directory."""
def get_data_file(*path_segments):
"""Get the full path to a data file.
Returns the path to a file underneath the data directory (as defined by
`get_data_path`). Equivalent to os.path.join(get_data_path(),
*path_segments).
"""
return os.path.join(get_data_path(), *path_segments)
def get_data_path():
"""Retrieve unity-lens-noosfero data path
This path is by default <unity_lens_noosfero_lib_path>/../data/ in trunk
and /usr/share/unity-lens-noosfero in an installed version but this path
is specified at installation time.
"""
# Get pathname absolute or relative.
path = os.path.join(
os.path.dirname(__file__), __unity_lens_noosfero_data_directory__)
abs_data_path = os.path.abspath(path)
if not os.path.exists(abs_data_path):
raise project_path_not_found
return abs_data_path
|
[
"ranulfo.netto@serpro.gov.br"
] |
ranulfo.netto@serpro.gov.br
|
1544668fa94eab171550471e25787776c8f252d9
|
756db20d1841e6ae94f7bbb542fb29482295bd71
|
/project3.py
|
fc848dd6bb2bdc3099cb9df9ef3edea37a415b27
|
[] |
no_license
|
mandeep-codetresure/python-study1
|
46bbc326aa6b1719e381b551d01680c2d5ee56d5
|
071a689750baada7753615f0f36a46c225f6d10d
|
refs/heads/main
| 2023-06-14T00:42:46.323136
| 2021-07-03T18:07:55
| 2021-07-03T18:07:55
| 382,659,937
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
class Library:
def __init__(self, listOfBooks):
self.books = listOfBooks
def displayAvailableBooks(self):
print("Available Books are : ")
for i in self.books:
print(f"* {i}")
def borrowBooks(self, bookname):
if bookname in self.books:
print(f"The book {bookname} has been issued, please keep it safe and retru in time")
self.books.remove(bookname)
def retunBook(self, bookname):
self.books.append(bookname)
print(f"thanks for retuning {bookname}.")
class Student:
pass
if __name__== "__main__":
centralLibrary = Library(["DevildBreath", "Python", "EvilDead", "2020", 'Java'])
centralLibrary.displayAvailableBooks()
|
[
"noreply@github.com"
] |
mandeep-codetresure.noreply@github.com
|
6d5b012250a4f93f41d076b89db26dc8dd1160fb
|
f5f6e6110b80ea3c35f84c2deabb4b0fd095712d
|
/HW1/p1.py
|
3499a7c5d1983e6cd5d9c498ed3b67b2cd76a48f
|
[] |
no_license
|
MortalHappiness/LA2019Fall
|
eb29ab9a7017cbf99fe8fe7575c4dde9e5bcc45d
|
16532248bec44a5225a0508d62fd8f8cf40c13bb
|
refs/heads/master
| 2022-03-28T15:20:48.669909
| 2019-12-15T06:30:59
| 2019-12-15T06:30:59
| 215,225,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,441
|
py
|
import numpy as np
def p1_has_cycle(sets):
# TODO
# return True if the graph has cycle; return False if not
'''
HINT: You can `print(sets)` to show what the matrix looks like
If we have a directed graph with 2->3 4->1 3->5 5->2 0->1
0 1 2 3 4 5
0 0 0 -1 1 0 0
1 0 1 0 0 -1 0
2 0 0 0 -1 0 1
3 0 0 1 0 0 -1
4 -1 1 0 0 0 0
The size of the matrix is (5,6)
'''
matrix = np.array(sets)
n_row, n_col = matrix.shape
for _ in range(n_row):
# find the position of 1 in the first row
col_idx = -1
for i in range(n_col):
if matrix[0][i] == 1:
col_idx = i
break
if col_idx == -1:
raise Exception("Something wrong!")
# do row additions
new_rows = list()
for row in matrix[1:]:
if row[col_idx] == -1:
new_row = matrix[0] + row
# check whether the new row is all zeros
if not np.any(new_row):
return True
new_rows.append(new_row)
# concatenate new rows with original matrix
if new_rows:
new_rows = np.array(new_rows)
matrix = np.concatenate((matrix, new_rows), axis = 0)
# discard the first row
matrix = matrix[1:]
return False
|
[
"b07901069@ntu.edu.tw"
] |
b07901069@ntu.edu.tw
|
4093a500a69665c1d2f40a18d22e551f963d6769
|
7c8392c8ea92475361fdfa235b5b9ba8bfecf48b
|
/venv/bin/python-config
|
cbd55499759f7eda1ed1cdf66b676bd2c6570df0
|
[] |
no_license
|
zhengdu0316/recsys_zed
|
4df3d965c31c74d99111708774cc63adba394b24
|
ee146ede1dd8bff257404eb8ebd4e29de5e05201
|
refs/heads/master
| 2020-03-18T21:49:44.067219
| 2018-05-30T10:08:34
| 2018-05-30T10:08:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,364
|
#!/Users/zhengdu/src/pycharm/recsys_zhengdu/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"zhengdu1992@163.com"
] |
zhengdu1992@163.com
|
|
8e8631e58ab2cdc2b0d49b823e27ee629c28aa57
|
13ebadfb52f076530af872c43c41839945b4211f
|
/random.py.py
|
36422cebd8def6539422a0060088fe0baf5dd641
|
[] |
no_license
|
vikakiranaa/vika
|
85dad7b9b6afae4e5d86109d1f9506eb3f037ee7
|
3076b049da1b38e9a1beb428e5d789ccb7a5ca96
|
refs/heads/master
| 2020-03-08T07:50:51.356403
| 2018-04-16T05:40:48
| 2018-04-16T05:40:48
| 128,005,043
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
import random
jumlah = int (input("masukan jumlah n:"))
for i in range(jumlah):
i=random.uniform(0.0,0.5)
print(i)
print("selesai")
|
[
"noreply@github.com"
] |
vikakiranaa.noreply@github.com
|
40985c2d1ff6c83e0719dbb629c133b092af38e2
|
49ee89cd8d698472aa904ef7690ef6faa235a12f
|
/The Best Box/The Best Box.py
|
8e7578a6c69d6b57d4d73a926fd3521a2bb45084
|
[] |
no_license
|
AbhinavUtkarsh/Code-Chef
|
60b837358e8e168e281c05f8613af64b5ac45271
|
bbea28567fea2a20135e46638c3ba83613b423ef
|
refs/heads/master
| 2020-08-01T19:37:46.580012
| 2020-06-27T20:58:19
| 2020-06-27T20:58:19
| 211,094,346
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
for _ in range(int(input())):
p, s = map(int, input().strip().split())
a = (p - pow((((p ** 2) - (24 * s))), 0.5)) / 12
v = (a * s) / 2 - ((a ** 2) * p / 4) + a ** 3
print(round(v, 2))
|
[
"abhinav.utkarsh@gmail.com"
] |
abhinav.utkarsh@gmail.com
|
b95bdd0a115a5b1058699b9eedb003e621a9c274
|
8e3b452b08139f25be824fae2b8b7aabb158d888
|
/6.00.1.x/Week5/Lecture9/ProblemSets/ProblemSet1.py
|
d739f864cdfb83d13b0fcc5e638c51deaea7a7d9
|
[] |
no_license
|
prasannabe2004/MITx
|
d38a11e38a0abb73ffa37dccb363f779011155ab
|
1954b5fc31004c94f46fc8194b7fa773108c4493
|
refs/heads/master
| 2020-05-16T19:14:00.963550
| 2015-08-07T18:50:12
| 2015-08-07T18:50:12
| 25,537,861
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
def linearSearch(L, x):
i=0
for e in L:
i= i+2
if e == x:
i=i+1
print 'true i=',i
return True
i=i+1
print 'false i=',i
return False
linearSearch([13, 9, 22, 3, 10, 17, 11, 2, 12, 89], 26)
|
[
"prasannabe2004@gmail.com"
] |
prasannabe2004@gmail.com
|
357bce5f77f216ee3071489ebb9999e3ff3a74c9
|
389286d8b35033dd795917306af7a795b2f8be06
|
/genpass.py
|
ac84d48efaa070cd3e1a0dfaeb6e0431b261b57f
|
[] |
no_license
|
howling-dot/BrutForce
|
acdea71228fa96731aa5df0e4ebf71032b387b7a
|
0a5346f77f0225b9063ffffc443206a445541f6c
|
refs/heads/main
| 2023-01-02T21:30:50.693578
| 2020-10-29T17:22:42
| 2020-10-29T17:22:42
| 307,091,217
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,602
|
py
|
import urllib.request
import random, string
# init list symbol
abc = string.ascii_lowercase + string.ascii_uppercase + string.digits
# Генератор плохих паролей
class BadPasswordGenerator:
"""Инициализация генератора"""
def __init__(self):
self.j = 0
file_pass = urllib.request.urlopen(
'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Common-Credentials/10k-most-common.txt').readlines()
self.passwords = [i.decode().rstrip() for i in file_pass]
"""Получение следущего пароля"""
def next(self):
password = self.passwords[self.j]
self.j += 1
return password
# пример использования
#genBad = BadPasswordGenerator()
#print(genBad.next())
# Генератор хороших паролей
class GoodPasswordGenerator:
"""Инициализация генератора"""
def __init__(self):
self.alphabet = '0123456789' \
'qwertyuiopasdfghjklzxcvbnm' \
'QWERTYUIOPASDFGHJKLZXCVBNM' \
'!@#$%^&*()_+'
"""Получение следущего пароля"""
def random_next(self, length=10):
password = ''
for i in range(length):
password += random.choice(self.alphabet)
return password
def BrutForce_data(url='http://127.0.0.1:5000/auth', users='admin', email='',name='',surname='',birthday=''):
email = email[:email.find('@')]
alphabet_list = [i for i in birthday.split()]
alphabet_list.append(name)
alphabet_list.append(surname)
alphabet_list.append(email)
BrutForce_passlist(alphabet_list, users)
return
def BrutForce_passlist(list_add='', users='admin', alphabet = '0123456789abcdefghijklmnopqrstuvwxyz', url='http://127.0.0.1:5000/auth'):
alphabet = list_add
alphabet += list(alphabet)
base = len(alphabet)
length = 0
counter = 0
good_result = 0
generator = BadPasswordGenerator()
while True:
try:
gen_pass = generator.next()
for i in users:
response = requests.post(url,
json={'login': i, 'password':gen_pass})
if response.status_code == 200:
print('SUCCESS', i, gen_pass)
good_result += 1
break
except IndexError:
print('Все пароли перебраны')
break
while True:
result = ''
number = counter
while number > 0:
rest = number % base
result = alphabet[rest] + result
number = number // base
while len(result) < length:
result = alphabet[0] + result
for i in users:
response = requests.post(url,
json={'login': i, 'password': result})
if response.status_code == 200:
print('SUCCESS', result)
good_result +=1
if good_result == len(users):
break
if alphabet[-1] * length == result:
# встретили последний пароль для данной длины
length += 1
counter = 0
else:
counter += 1
# пример использования
#genGood = GoodPasswordGenerator()
#print(genGood.next())
#genBad = BadPasswordGenerator()
#print(genBad.next())
|
[
"vovan.ziod@gmail.com"
] |
vovan.ziod@gmail.com
|
d48cb3ec31f48448cbdda0e1f6c39bdcb0cf89d9
|
1ab66025b9d2aa8e2df92746db412661cfa5f926
|
/example_enquiry/tests/test_view_perm.py
|
e5e47d82e7c47b711f2256a3ac8701aeb10a2884
|
[
"Apache-2.0"
] |
permissive
|
pkimber/old-enquiry-migrated-to-gitlab
|
7bc4023725186336400987c7ff79423268ba3788
|
78d90b58d34a530b03cf8386cfb24ed8160207dd
|
refs/heads/master
| 2021-06-13T16:55:29.504975
| 2017-04-19T14:38:31
| 2017-04-19T14:38:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
# -*- encoding: utf-8 -*-
from django.core.urlresolvers import reverse
from base.tests.test_utils import PermTestCase
from enquiry.tests.scenario import default_scenario_enquiry
from login.tests.scenario import default_scenario_login
class TestViewPerm(PermTestCase):
def setUp(self):
default_scenario_login()
default_scenario_enquiry()
def test_create(self):
url = reverse('example.enquiry.create')
self.assert_any(url)
|
[
"code@pkimber.net"
] |
code@pkimber.net
|
6bb0bf795a865d15fb197d4ddc8033f0326cc15c
|
1d4f8b196e1f6e757bdf6d71117a30ac4726194c
|
/numba_dppy/tests/test_controllable_fallback.py
|
db360c65c23fc89010d9560b032bd5fa21bc403b
|
[
"Apache-2.0"
] |
permissive
|
pauljurczak/numba-dppy
|
03619f80660204187a3af183eacceb32d494900c
|
0668082476a239a5e60e07ab64657365c30936a2
|
refs/heads/main
| 2023-07-18T09:13:16.076070
| 2021-08-26T04:52:20
| 2021-08-26T04:52:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,747
|
py
|
# Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import _helper
import numpy as np
import unittest
import warnings
import numba
import dpctl
from numba_dppy import config
import numba_dppy
@unittest.skipUnless(_helper.has_gpu_queues(), "test only on GPU system")
class TestDPPYFallback(unittest.TestCase):
def test_dppy_fallback_true(self):
@numba.jit
def fill_value(i):
return i
def inner_call_fallback():
x = 10
a = np.empty(shape=x, dtype=np.float32)
for i in numba.prange(x):
a[i] = fill_value(i)
return a
config.DEBUG = 1
with warnings.catch_warnings(record=True) as w:
device = dpctl.SyclDevice("opencl:gpu")
with numba_dppy.offload_to_sycl_device(device):
dppy = numba.njit(parallel=True)(inner_call_fallback)
dppy_fallback_true = dppy()
ref_result = inner_call_fallback()
config.DEBUG = 0
np.testing.assert_array_equal(dppy_fallback_true, ref_result)
self.assertIn("Failed to offload parfor", str(w[-1].message))
@unittest.expectedFailure
def test_dppy_fallback_false(self):
@numba.jit
def fill_value(i):
return i
def inner_call_fallback():
x = 10
a = np.empty(shape=x, dtype=np.float32)
for i in numba.prange(x):
a[i] = fill_value(i)
return a
try:
config.DEBUG = 1
config.FALLBACK_ON_CPU = 0
with warnings.catch_warnings(record=True) as w:
device = dpctl.SyclDevice("opencl:gpu")
with numba_dppy.offload_to_sycl_device(device):
dppy = numba.njit(parallel=True)(inner_call_fallback)
dppy_fallback_false = dppy()
finally:
ref_result = inner_call_fallback()
config.FALLBACK_ON_CPU = 1
config.DEBUG = 0
not np.testing.assert_array_equal(dppy_fallback_false, ref_result)
self.assertNotIn("Failed to offload parfor", str(w[-1].message))
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
pauljurczak.noreply@github.com
|
9b3869c2082a919dbc1663a32a278a5283ff4931
|
5cee4a4374c52e29bc8eea4b0ed8d1d40566acf1
|
/Robots/Hexy_V1/Moves/GetUp.py
|
94c99e688ce9076780779d685b7c23df311f07e3
|
[] |
no_license
|
clauden/hexy
|
23f8a73a073c2b1cf512a8479da7d200fae63e82
|
5b8bd014c6b0acdf7eb4b1d44dd560a4dca73ee3
|
refs/heads/master
| 2020-12-30T22:32:19.087647
| 2016-04-21T05:45:12
| 2016-04-21T05:45:12
| 56,036,060
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
import time
deg = -30
#put all the feet centered and on the floor.
hexy.LF.hip(-deg)
hexy.RM.hip(1)
hexy.LB.hip(deg)
hexy.RF.hip(deg)
hexy.LM.hip(1)
hexy.RB.hip(-deg)
time.sleep(0.5)
for leg in hexy.legs:
leg.knee(-30)
leg.hip("sleep")
time.sleep(0.5)
for leg in hexy.legs:
leg.ankle(-90)
time.sleep(0.5)
for angle in range(0,45,3):
for leg in hexy.legs:
leg.knee(angle)
leg.ankle(-90+angle)
time.sleep(0.1)
move("Reset")
|
[
"joe@arcbotics.com"
] |
joe@arcbotics.com
|
0e648652af6200f24c5bbe88533af8a9498a4ac5
|
5ff95bb44a8c18f42ec1892b8e63098d1dff7e45
|
/energy_py/common/np_utils.py
|
8bd3a90a47385854072677687da7c88cb801901a
|
[
"MIT"
] |
permissive
|
demirelg/energy_py
|
e242a2783689c2fb741c71168756fd3fe2d281a5
|
7b6a3f52d3ea067061e95b931a7b864d1247e123
|
refs/heads/master
| 2023-03-16T15:33:34.271308
| 2018-07-07T01:19:51
| 2018-07-07T01:19:51
| 551,675,719
| 1
| 0
|
MIT
| 2022-10-14T21:40:02
| 2022-10-14T21:40:01
| null |
UTF-8
|
Python
| false
| false
| 1,168
|
py
|
"""Helper functions for numpy"""
import numpy as np
def rolling_window(a, size):
shape = a.shape[:-1] + (a.shape[-1] - size + 1, size)
strides = a.strides + (a. strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def find_sub_array_in_2D_array(sub_array, array):
"""
Find the first occurence of a sub_array within a larger array
args
sub_array (np.array) ndim=1
array (np.array) ndim=2, shape=(num_samples, sub_array.shape[0])
i.e.
sub_array = np.array([0.0, 2.0]).reshape(2)
array = np.array([0.0, 0.0,
0.0, 1.0,
0.0, 2.0).reshape(3, 2)
--> 2
Used for finding the index of an action within a list of all possible actions
"""
# array making and shaping so that user could feed in a list and it
# would work
sub_array = np.array(sub_array).reshape(array.shape[1])
bools = rolling_window(sub_array, array.shape[1]) == array
bools = np.all(
bools.reshape(array.shape[0], -1),
axis=1
)
# argmax finds the first true values
return np.argmax(bools)
|
[
"adam.green@adgefficiency.com"
] |
adam.green@adgefficiency.com
|
f4cae0c275be924da6f646aa413fa67f3a7077e1
|
e3eb70b22216cce8e5ed0e63fdb6880726098fba
|
/test_FizzBuzz.py
|
ccb3dd08293feafef974b9935f0770300e634677
|
[] |
no_license
|
kaufmjoh/CS_362_HW7
|
cb1bf65628cfbfb008e903031497842ce387039a
|
9889479e0584159a62340884e512c45fce40d9b5
|
refs/heads/master
| 2023-05-12T08:03:33.436679
| 2021-03-08T02:23:55
| 2021-03-08T02:23:55
| 345,493,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
import unittest
import FizzBuzz
class TestCaseFizzBuzz(unittest.TestCase):
def testFizzBuzz(self):
self.assertEqual(FizzBuzz.FizzBuzz(), "1, 2, Fizz, 4, Buzz, Fizz, 7, 8, Fizz, Buzz, 11, Fizz, 13, 14, FizzBuzz, 16, 17, Fizz, 19, Buzz, Fizz, 22, 23, Fizz, Buzz, 26, Fizz, 28, 29, FizzBuzz, 31, 32, Fizz, 34, Buzz, Fizz, 37, 38, Fizz, Buzz, 41, Fizz, 43, 44, FizzBuzz, 46, 47, Fizz, 49, Buzz, Fizz, 52, 53, Fizz, Buzz, 56, Fizz, 58, 59, FizzBuzz, 61, 62, Fizz, 64, Buzz, Fizz, 67, 68, Fizz, Buzz, 71, Fizz, 73, 74, FizzBuzz, 76, 77, Fizz, 79, Buzz, Fizz, 82, 83, Fizz, Buzz, 86, Fizz, 88, 89, FizzBuzz, 91, 92, Fizz, 94, Buzz, Fizz, 97, 98, Fizz, Buzz");
if __name__ == '__main__':
unittest.main();
|
[
"kaufmjoh@oregonstate.edu"
] |
kaufmjoh@oregonstate.edu
|
f9ff90fe6ca01813b519a4b80cd9d7f2b02870e3
|
4f8ee78ba4a865b255e49f7c4c2aa7ca5f41fc8f
|
/prac_01/shop_calculator.py
|
fdc7a26660aa93ceaaffc0ad251c1d1ec233b4ed
|
[] |
no_license
|
Chris-Barty-JCU/CP1404_PRACTICALS
|
aaf826efdce3b05a0568f2ea037387f921b34a22
|
88c978dce429d84f9797fffc232f419af2460ccf
|
refs/heads/master
| 2021-05-23T00:33:13.386640
| 2020-05-05T13:12:37
| 2020-05-05T13:12:37
| 253,156,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
item_total = 0
n_items = int(input("Number of Items: "))
while n_items <= 0:
print("Invalid Number Of Items!")
n_items = int(input("Number of Items: "))
for x in range(1, n_items + 1, 1):
item_cost = float(input("Price Of Item " + str(x) + ": "))
item_total = item_total + item_cost
print("Total Price For " + str(n_items) + " Items Is: ${:.2f}".format(item_total))
|
[
"christopher.barty@my.jcu.edu.au"
] |
christopher.barty@my.jcu.edu.au
|
e558f8013830df479215c149d520716d37933924
|
e767f18e905dcec97e0dfa5619682a4c00fd50eb
|
/src/config/__init__.py
|
3ba4363ec11542df73010501014c8c599790c7b1
|
[] |
no_license
|
andersHoward/AHRLFramework
|
a732a806e034700afacadd6ef8c67c29103210b8
|
a84f93f5b075c5fc99dcaffc1a2b9ebaa35fecef
|
refs/heads/master
| 2021-01-20T14:35:41.953914
| 2017-07-12T18:11:55
| 2017-07-12T18:11:55
| 82,762,315
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20
|
py
|
import ConfigLoader
|
[
"andershoward@gmail.com"
] |
andershoward@gmail.com
|
855b492e6975706c4ca542c4b5a8f4bc14abeebe
|
ecf28ce0c9b4cb78cf72a35a43e8b64fb4012ae0
|
/main/board.py
|
4cc9746940ea404387a63079206d6f4f70e1b18b
|
[] |
no_license
|
Soohwanchoi/Python_flask
|
f97a57650081601e9c9a167a4f831eb8a48bef97
|
bcdcceaf1e0143ddc1ba668bff794b700c7501b9
|
refs/heads/master
| 2022-12-21T17:30:49.693533
| 2020-09-15T10:14:32
| 2020-09-15T10:14:32
| 295,685,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,853
|
py
|
from main import *
from flask import Blueprint
from flask import send_from_directory
from flask import jsonify
blueprint = Blueprint("board", __name__, url_prefix='/board')
@blueprint.route("/view")
@login_required
def board_view():
idx = request.args.get("idx")
page = request.args.get("page", 1, type=int)
search = request.args.get("search", -1, type=int)
keyword = request.args.get("keyword", "", type=str)
if idx is not None:
board = mongo.db.board
#data = board.find_one({"_id": ObjectId(idx)})
data = board.find_one_and_update({"_id": ObjectId(idx)}, {"$inc": {"view": 1}}, return_document=True)
if data is not None:
result = {
"id": data.get("_id"),
"name": data.get("name"),
"title": data.get("title"),
"contents": data.get("contents"),
"CurrentTime": data.get("CurrentTime"),
"writer_id": data.get("writer_id", ""),
"attachfile": data.get("attachfile", "")
}
return render_template("view.html", title="글보기", result=result, page=page, search=search, keyword=keyword)
return abort(404)
@blueprint.route("/write", methods=["GET", "POST"])
@login_required
def board_write():
if session["id"] is None or session["id"] == "":
return redirect(url_for("member_login"))
if request.method == "POST":
filename = None
if "attachfile" in request.files:
file = request.files["attachfile"]
if file and allowed_file(file.filename):
filename = check_filename(file.filename)
file.save(os.path.join(app.config["BOARD_ATTACH_FILE_PATH"], filename))
name = request.form.get("name")
writer_id = session.get("id")
title = request.form.get("title")
contents = request.form.get("contents")
current_utc_time = round(datetime.utcnow().timestamp() * 1000)
board = mongo.db.board
post = {
"writer_id": writer_id,
"name": session["name"],
"title": title,
"contents": contents,
"view": 0,
"CurrentTime": current_utc_time,
}
if filename is not None:
post["attachfile"] = filename
print(post)
x = board.insert_one(post)
flash("정상적으로 작성 되었습니다.")
return redirect(url_for("board.board_view", idx=x.inserted_id))
else:
return render_template("write.html", title="글작성", name=session["name"])
@blueprint.route("/list")
def lists():
page = request.args.get("page", 1, type=int)
limit = request.args.get("limit", 10, type=int)
search = request.args.get("search", -1, type=int)
keyword = request.args.get("keyword", "", type=str)
query = {}
search_list = []
if search == 0:
search_list.append({"title": {"$regex": keyword}})
elif search == 1:
search_list.append({"contents": {"$regex": keyword}})
elif search == 2:
search_list.append({"title": {"$regex": keyword}})
search_list.append({"contents": {"$regex": keyword}})
elif search == 3:
search_list.append({"name": {"$regex": keyword}})
if len(search_list) > 0:
query = {"$or": search_list}
board = mongo.db.board
datas = board.find(query).skip((page-1) * limit).limit(limit)
tot_count = board.find(query).count()
last_page_num = math.ceil(tot_count / limit)
block_size = 5
block_num = int((page-1) / block_size)
block_start = int((block_size * block_num) + 1)
block_last = math.ceil(block_start + (block_size-1))
return render_template("list.html",
datas=datas,
limit=limit,
page=page,
block_start=block_start,
block_last=block_last,
last_page=last_page_num,
search=search,
keyword=keyword,
title="리스트",
)
@blueprint.route("/edit", methods=["POST"])
@blueprint.route("/edit/<idx>", methods=["GET"])
def board_edit(idx=None):
if request.method == "GET":
board = mongo.db.board
data = board.find_one({"_id": ObjectId(idx)})
if data is None:
flash("해당 게시물이 존재하지 않습니다.")
return redirect(url_for("board.lists"))
else:
if session.get("id") == data.get("writer_id"):
return render_template("edit.html", data=data, title="글수정",)
else:
flash("글 수정 권한이 없습니다.")
return redirect(url_for("board.lists"))
else:
idx = request.form.get("idx")
title = request.form.get("title")
contents = request.form.get("contents")
deleteoldfile = request.form.get("deleteoldfile", "")
board = mongo.db.board
data = board.find_one({"_id": ObjectId(idx)})
if data.get("writer_id") == session.get("id"):
attach_file = data.get("attachfile")
filename = None
if "attachfile" in request.files:
file = request.files["attachfile"]
if file and allowed_file(file.filename):
filename = check_filename(file.filename)
file.save(os.path.join(app.config["BOARD_ATTACH_FILE_PATH"], filename))
if attach_file:
board_delete_attach_file(attach_file)
else:
if deleteoldfile == "on":
filename = None
if attach_file:
board_delete_attach_file(attach_file)
else:
filename = attach_file
board.update_one({"_id": ObjectId(idx)}, {
"$set": {
"title": title,
"contents": contents,
"attachfile": filename
}
})
flash("수정되었습니다.")
return redirect(url_for("board.board_view", idx=idx))
else:
flash("글 수정 권한이 없습니다.")
return redirect(url_for("board.lists"))
@blueprint.route("/delete/<idx>")
def board_delete(idx):
board = mongo.db.board
data = board.find_one({"_id": ObjectId(idx)})
if data.get("writer_id") == session.get("id"):
board.delete_one({"_id": ObjectId(idx)})
flash("삭제 되었습니다.")
else:
flash("글 삭제 권한이 없습니다.")
return redirect(url_for("board.lists"))
@blueprint.route("/upload_image", methods=["POST"])
def upload_image():
if request.method == "POST":
file = request.files["image"]
if file and allowed_file(file.filename):
filename = "{}_{}.jpg".format(str(int(datetime.now().timestamp()) * 1000), rand_generator())
savefilepath = os.path.join(app.config["BOARD_IMAGE_PATH"], filename)
file.save(savefilepath)
return url_for("board.board_images", filename=filename)
@blueprint.route('/images/<filename>')
def board_images(filename):
return send_from_directory(app.config['BOARD_IMAGE_PATH'], filename)
@blueprint.route("/files/<filename>")
def board_files(filename):
return send_from_directory(app.config['BOARD_ATTACH_FILE_PATH'], filename)
def board_delete_attach_file(filename):
abs_path = os.path.join(app.config["BOARD_ATTACH_FILE_PATH"], filename)
if os.path.exists(abs_path):
os.remove(abs_path)
return True
return False
@blueprint.route("/comment_write", methods=["POST"])
@login_required
def comment_write():
if session["id"] is None or session["id"] == "":
return redirect(url_for("member_login"))
if request.method == "POST":
name = session.get("name")
writer_id = session.get("id")
root_idx = request.form.get("root_idx")
ccomment = request.form.get("comment")
current_utc_time = round(datetime.utcnow().timestamp() * 1000)
comment = mongo.db.comment
post = {
"root_idx": root_idx,
"writer_id": writer_id,
"name": name,
"comment": ccomment,
"CurrentTime": current_utc_time,
}
print(post)
x = comment.insert_one(post)
return redirect(url_for("board.board_view", idx=root_idx))
return abort(404)
@blueprint.route("/comment_list/<root_idx>", methods=["GET"])
@login_required
def comment_list(root_idx):
if session["id"] is None or session["id"] == "":
return redirect(url_for("member_login"))
comment = mongo.db.comment
comments = comment.find({"root_idx": str(root_idx)}).sort([("CurrentTime", -1)])
comment_list = []
for c in comments:
print(c)
owner = True if c.get("writer_id") == session.get("id") else False
comment_list.append({
"id": str(c.get("_id")),
"root_idx": c.get("root_idx"),
"name": c.get("name"),
"writer_id": c.get("writer_id"),
"comment": c.get("comment"),
"CurrentTime": filter.format_datetime(c.get("CurrentTime")),
"owner": owner,
})
return jsonify(error="success", lists=comment_list)
@blueprint.route("/comment_delete", methods=["POST"])
@login_required
def comment_delete():
if session["id"] is None or session["id"] == "":
return redirect(url_for("member_login"))
if request.method == "POST":
idx = request.form.get("id")
comment = mongo.db.comment
data = comment.find_one({"_id": ObjectId(idx)})
if data.get("writer_id") == session.get("id"):
comment.delete_one({"_id": ObjectId(idx)})
return jsonify(error="success")
else:
return jsonify(error="error")
return abort(404)
@blueprint.route("/comment_edit", methods=["POST"])
@login_required
def comment_edit():
if session["id"] is None or session["id"] == "":
return redirect(url_for("member_login"))
if request.method == "POST":
idx = request.form.get("id")
ccomment = request.form.get("comment")
comment = mongo.db.comment
data = comment.find_one({"_id": ObjectId(idx)})
if data.get("writer_id") == session.get("id"):
comment.update_one(
{"_id": ObjectId(idx)},
{"$set": {"comment": ccomment}},
)
return jsonify(error="success")
else:
return jsonify(error="error")
return abort(404)
|
[
"!a48094809"
] |
!a48094809
|
e16f731dbb1a420b7d0e97684397536e9c92db4a
|
e905abd9bb7bd7017657d0a0c4d724d16e37044c
|
/.history/article/spiders/ieee_20210208220336.py
|
377e71d23b57211ab76118ef9b7a82b0c1747ea7
|
[] |
no_license
|
tabdelbari/articles
|
a8b921841f84fb473f5ed1cdcda743863e6bc246
|
f0e1dfdc9e818e43095933139b6379a232647898
|
refs/heads/main
| 2023-03-05T10:21:35.565767
| 2021-02-10T13:35:14
| 2021-02-10T13:35:14
| 325,654,973
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,770
|
py
|
import scrapy
import logging
import re
from scrapy_splash import SplashRequest, request
from article.items import ArticleItem
import json
class IeeeSpider(scrapy.Spider):
name = 'ieee'
allowed_domains = ['ieee.org']
lua_script = """
function main(splash, args)
assert(splash:go{
splash.args.url,
headers=splash.args.headers,
http_method=splash.args.http_method,
body=splash.args.body,
})
assert(splash:wait(1))
return splash:html()
end
"""
lua_script2 = """
function main(splash, args)
assert(splash:go{
splash.args.url,
headers=splash.args.headers,
http_method=splash.args.http_method
})
assert(splash:wait(1))
return splash:html()
end
"""
def __init__(self, topic='', keywords='', **kwargs):
super().__init__(**kwargs)
self.topic = topic
self.keywords = keywords
self.totalPages = 0
def start_requests(self):
post_data = '{"queryText": "' + self.topic + \
'", "highlight": true, "returnType": "SEARCH", "matchPubs": true, "rowsPerPage": 100, "returnFacets": ["ALL"], "newsearch": true}'
headers = {
'Origin': 'https://ieeexplore.ieee.org',
'Host': 'ieeexplore.ieee.org',
'Accept-Language': 'fr-MA,fr;q=0.9,en-US;q=0.8,en;q=0.7,ar-MA;q=0.6,ar;q=0.5,fr-FR;q=0.4',
'Accept-Encoding': 'gzip, deflate, br',
'Accept': 'application/json, text/plain, */*',
'Content-Type': 'application/json;charset=UTF-8'
}
search_url = 'https://ieeexplore.ieee.org/rest/search'
yield SplashRequest(search_url, self.parse_0, endpoint='execute',
magic_response=True, meta={'handle_httpstatus_all': True},
args={'lua_source': self.lua_script, 'http_method': 'POST', 'body': post_data, 'headers': headers})
def parse_0(self, response):
jr = json.loads(response.xpath('//*/pre/text()').get(default=''))
self.totalPages = jr['totalPages']
headers = {
'Origin': 'https://ieeexplore.ieee.org',
'Host': 'ieeexplore.ieee.org',
'Accept-Language': 'fr-MA,fr;q=0.9,en-US;q=0.8,en;q=0.7,ar-MA;q=0.6,ar;q=0.5,fr-FR;q=0.4',
'Accept-Encoding': 'gzip, deflate, br',
'Accept': 'application/json, text/plain, */*',
'Content-Type': 'application/json;charset=UTF-8'
}
search_url = 'https://ieeexplore.ieee.org/rest/search'
for i in range(1, (self.totalPages+1)):
post_data = '{"queryText": "' + self.topic + \
'", "highlight": true, "newsearch": true, "returnType": "SEARCH", "matchPubs": true, "rowsPerPage": 100, "returnFacets": ["ALL"], "pageNumber": '+str(i)+'}'
yield SplashRequest(search_url, self.parse_1, endpoint='execute',
magic_response=True, meta={'handle_httpstatus_all': True},
args={'lua_source': self.lua_script, 'http_method': 'POST', 'body': post_data, 'headers': headers})
def parse_1(self, response):
jr = json.loads(response.xpath('//*/pre/text()').get(default=''))
headers = {
'Origin': 'https://ieeexplore.ieee.org',
'Host': 'ieeexplore.ieee.org',
'Accept-Language': 'fr-MA,fr;q=0.9,en-US;q=0.8,en;q=0.7,ar-MA;q=0.6,ar;q=0.5,fr-FR;q=0.4',
'Accept-Encoding': 'gzip, deflate, br',
'Accept': 'application/json, text/plain, */*',
'Content-Type': 'application/json;charset=UTF-8'
}
for record in jr['records']:
authors_ids = list(map(lambda author: author['id'], record['authors']))
metrics_url = "https://ieeexplore.ieee.org/rest/document/" + record['articleNumber'] + "/metrics"
yield SplashRequest(metrics_url, self.parse_2, endpoint='execute',
magic_response=True, meta=dict{
'handle_httpstatus_all': True,
'title': record['articleTitle'],
'abstract': record['abstract'],
'year': record['publicationYear'],
'journal': record['publicationTitle'],
'publisher': record['publisher'],
'doi': record['doi'],
'authors_ids': authors_ids
},
args={'lua_source': self.lua_script2, 'http_method': 'GET', 'headers': headers})
def parse_2(self, response):
# result = response.meta['data']
authors_ids = response.meta['authors_ids']
jr = json.loads(response.xpath('//*/pre/text()').get(default=''))
citationCountPaper = 0
scoupus = 0
try:
citationCountPaper = self.safe_cast(jr['metrics']['citationCountPaper'], int, 0)
except:
pass
try:
scoupus = self.safe_cast(jr['metrics']['scopus_count'], int, 0)
except:
pass
headers = {
'Origin': 'https://ieeexplore.ieee.org',
'Host': 'ieeexplore.ieee.org',
'Accept-Language': 'fr-MA,fr;q=0.9,en-US;q=0.8,en;q=0.7,ar-MA;q=0.6,ar;q=0.5,fr-FR;q=0.4',
'Accept-Encoding': 'gzip, deflate, br',
'Accept': 'application/json, text/plain, */*',
'Content-Type': 'application/json;charset=UTF-8'
}
for author_id in authors_ids:
author_url = 'https://ieeexplore.ieee.org/rest/author/' + str(author_id)
yield SplashRequest(author_url, self.parse, endpoint='execute',
magic_response=True, meta={'handle_httpstatus_all': True,
'impf': citationCountPaper,
'scoupus': scoupus,
'title': response.meta['articleTitle'],
'abstract': response.meta['abstract'],
'year': response.meta['publicationYear'],
'journal': response.meta['publicationTitle'],
'publisher': response.meta['publisher'],
'doi': response.meta['doi'],
},
args={'lua_source': self.lua_script2, 'http_method': 'GET', 'headers': headers})
def parse(self, response):
result = {
'impf': response.meta['impf'],
'scoupus': response.meta['scoupus'],
'title': response.meta['articleTitle'],
'abstract': response.meta['abstract'],
'year': response.meta['publicationYear'],
'journal': response.meta['publicationTitle'],
'publisher': response.meta['publisher'],
'doi': response.meta['doi'],
}
try:
jr = json.loads(response.xpath('//*/pre/text()').get(default=''))[0]
id = jr['id']
author = jr['preferredName']
adresses = jr['currentAffiliation'].split(', ')
result['author'] = author
result['organisation'] = adresses[0]
result['lab'] = adresses[1]
result['city'] = adresses[2]
result['country'] = adresses[3]
result['_id'] = result['doi'] + '_' + str(id)
except:
pass
yield result
def safe_cast(self, val, to_type, default=None):
try:
return to_type(val)
except:
return default
|
[
"abdelbari1996@hotmail.com"
] |
abdelbari1996@hotmail.com
|
e190c1e1ac2a062fcef0732bd050f3ed9fd4f8b1
|
44a67d9c840d751081f6917b964bd47bb19c9e6a
|
/accounts/forms.py
|
cdb10e4bf939d74e38d1ac9d52ef215f384292a5
|
[] |
no_license
|
31519/aws_news
|
982253012f987a5d35f615e9d3af5f769b7ed22b
|
d054258e2489ac1efa6f104d629b8b9a49769982
|
refs/heads/main
| 2023-05-30T16:16:24.261607
| 2021-06-07T08:40:47
| 2021-06-07T08:40:47
| 370,308,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,394
|
py
|
from django import forms
from django.contrib.auth.models import User
from .models import Account, UserProfile
class RegistrationForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput(attrs={
'placeholder':'Enter Password',
'class':'form-control'
}))
confirm_password = forms.CharField(widget=forms.PasswordInput(attrs={
'placeholder':'Confirm Password',
'class':'form-control'
}))
class Meta:
model = Account
fields = ['first_name', 'last_name', 'email', 'phone_number', 'country', 'state', 'gender', 'password']
def __init__(self, *args, **kwargs):
super(RegistrationForm, self).__init__(*args, **kwargs)
# self.fields['adv_category'].widget.attrs['placeholder'] = 'Enter Category'
# self.fields['adv_heading'].widget.attrs['placeholder'] = 'Enter Title'
# self.fields['adv_descriptions'].widget.attrs['placeholder'] = 'Enter Description'
# self.fields['adv_images'].widget.attrs['placeholder'] = 'Enter Images'
# self.fields['adv_end_date'].widget.attrs['class'] = 'form-control'
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
def clean(self):
cleaned_data = super(RegistrationForm, self).clean()
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if password != confirm_password:
raise forms.ValidationError("password does not match!")
class UserForm(forms.ModelForm):
class Meta:
model = Account
fields = ('first_name', 'last_name', 'phone_number', 'images', 'email')
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
class UserProfileForm(forms.ModelForm):
images = forms.ImageField(required=False, error_messages = {'invalid':("Image files only")}, widget=forms.FileInput)
class Meta:
model = UserProfile
fields = ('address', 'state', 'country', 'images', 'address')
def __init__(self, *args, **kwargs):
super(UserProfileForm, self).__init__(*args, **kwargs)
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
|
[
"cosrumut31519@gmail.com"
] |
cosrumut31519@gmail.com
|
3e43f3976ab3b3ddab14befd090271042f8e4eca
|
28e17be2b84e68d98e28aa1d2e6e26e95d548348
|
/blog/settings.py
|
3a2c742de5b8b221a476cc5996be24849cac1e00
|
[] |
no_license
|
IoanHadarean/Django-Blog
|
73b1d9a79f3aa2b0a5204b35fa4ad12edd1a72b4
|
e443b3a6537706e1506ccdc71a9120dbdd7c637a
|
refs/heads/master
| 2022-12-08T06:18:38.150546
| 2022-04-27T08:13:35
| 2022-04-27T08:13:35
| 190,923,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,813
|
py
|
"""
Django settings for blog project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
if os.path.exists('env.py'):
import env
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['aaeaf49b4cb74494a449d006071e8a43.vfs.cloud9.us-east-1.amazonaws.com', 'django-blog-application.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_forms_bootstrap',
'posts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
if "DATABASE_URL" in os.environ:
DATABASES = {
'default': dj_database_url.parse(os.environ.get("DATABASE_URL"))
}
else:
print("Postgres URL not found, using sqlite instead")
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'), )
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
|
[
"ubuntu@ip-172-31-85-80.ec2.internal"
] |
ubuntu@ip-172-31-85-80.ec2.internal
|
38176b90d2d8f84615dd9012f691a9227a18b1b9
|
5e16831c3d7106297938c8543332b2012bd925a4
|
/wips_home/urls.py
|
c96ff3f578dfd3fdd7ab6861b648dfc10567d1fb
|
[] |
no_license
|
legible01/WIPS_django
|
15a5cfcba4adc5b4c35bdc672dd97b621f2d8d9f
|
1ff3c301af49893a79abf4d9369306c4ace5519d
|
refs/heads/master
| 2020-03-17T04:19:31.177043
| 2018-10-12T02:58:40
| 2018-10-12T02:58:40
| 133,270,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
from django.urls import path
from wips_home import views
urlpatterns = [
path('', views.block_list, name='block_list'),
path('post/<int:pk>', views.block_list_post, name='block_list_post'),
#path('edit/',views.edit_data, name='edit_data')
]
|
[
"legible01@gmail.com"
] |
legible01@gmail.com
|
cfd368ccf5e7e39d3970349460410b00c035e90a
|
e8bf0ec2f07be9c4fb9aff1c5ea3be2651db17f6
|
/Daily_Coding_Problem/514_no_of_consecutive_integers_in_array.py
|
7eb79dea4c9496f42787575333203642d6b67961
|
[] |
no_license
|
Srini-py/Python
|
0b4a0316f3754bc04e3c01214f1d99721defbf38
|
81eec0cc418baa86ad718be853242e4cc349dab4
|
refs/heads/main
| 2023-06-30T23:58:25.589872
| 2021-08-06T16:54:51
| 2021-08-06T16:54:51
| 393,439,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 600
|
py
|
'''
Given an unsorted array of integers,
find the length of the longest consecutive elements sequence.
Your algorithm should run in O(n) complexity.
'''
def find_consecutive(arr, n):
ans = 0
s = set()
for i in arr:
s.add(i)
for i in range(n):
if arr[i]-1 not in s:
j = arr[i]
count = 0
while j in s:
j += 1
count += 1
ans = max(ans, count)
return ans
arr = [int(i) for i in input("Enter the array of elements : ").split()]
print("No.of consecutive elements in array are",find_consecutive(arr, len(arr)))
|
[
"noreply@github.com"
] |
Srini-py.noreply@github.com
|
1854e2ef0f86081c28f9c7183c88ed888dc578c6
|
6eb0e20fd4b7aa829d652a8386efba36de89b030
|
/scionlab/forms/attachment_conf_form.py
|
92552206f475d818b90d9d753a9519960cc796ed
|
[
"Apache-2.0"
] |
permissive
|
fl99kl/scionlab
|
f21bd29be7d35d9421a035b880de8a7e72096f7b
|
21771c5057dbc665ba73bc2f1b5f3ed42d91d5ca
|
refs/heads/master
| 2023-05-10T15:24:41.342591
| 2020-12-28T11:40:24
| 2020-12-28T11:40:24
| 267,624,721
| 0
| 0
|
Apache-2.0
| 2020-05-28T15:20:53
| 2020-05-28T15:20:53
| null |
UTF-8
|
Python
| false
| false
| 14,692
|
py
|
# Copyright 2018 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ipaddress
from django import forms
from django.conf import settings
from django.forms import BaseModelFormSet
from django.core.exceptions import ValidationError
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Row, Column, Div, HTML
from crispy_forms.bootstrap import AppendedText
from scionlab.defines import MAX_PORT, DEFAULT_PUBLIC_PORT
from scionlab.models.core import Link
from scionlab.models.user_as import AttachmentPoint, AttachmentConf, UserAS
from scionlab.util.portmap import PortMap
class AttachmentConfFormSet(BaseModelFormSet):
"""
A FormSet companion for the UserASForm, representing its `AttachmentPoint`s
"""
def __init__(self, *args, **kwargs):
self.userASForm = kwargs.pop('userASForm')
self.isd = None
super().__init__(*args, **kwargs)
def _check_isd(self, forms):
"""
Check the consistency of the ISD
"""
instance = self.userASForm.instance
isd_set = {form.cleaned_data['attachment_point'].AS.isd for form in forms}
if instance and instance.isd_fixed():
# If ISD is fixed, say which ISD the AS is restricted to
isd_set.add(instance.isd)
if len(isd_set) > 1:
raise ValidationError("All attachment points must belong to %s. "
"See \"Fixed links\" below." % instance.isd)
else:
if len(isd_set) > 1:
raise ValidationError("All attachment points must belong to the same ISD")
if not instance:
if len(isd_set) == 1:
self.isd = isd_set.pop()
else:
# One attachment point must be selected at creation time
raise ValidationError("Select at least one attachment point")
def _check_ip_ports(self, forms):
"""
Check for clashes in (ip, port) combinations
"""
installation_type = self.userASForm.cleaned_data.get('installation_type')
public_addrs, public_addr_clashes = PortMap(), []
local_addrs, local_addr_clashes = PortMap(), []
forward_ports, forward_addr_clashes = set(), []
for f in filter(lambda f: not f.cleaned_data['use_vpn'], forms):
public_ip = f.cleaned_data['public_ip'],
port = f.cleaned_data['public_port']
bind_ip = f.cleaned_data['bind_ip']
if public_addrs.add(public_ip, port):
public_addr_clashes.append(f)
if local_addrs.add(bind_ip or public_ip, port) and bind_ip:
local_addr_clashes.append(f)
if installation_type == UserAS.VM:
if port in forward_ports:
forward_addr_clashes.append(f)
else:
forward_ports.add(port)
for form in public_addr_clashes:
form.add_error('public_port',
ValidationError('This port is already in use',
code='public_port_clash'))
for form in local_addr_clashes:
form.add_error('public_port',
ValidationError('This port is already in use for the specified '
'bind IP address',
code='local_port_clash'))
for form in forward_addr_clashes:
form.add_error('public_port',
ValidationError('This port clashes in the VM setup',
code='forwarded_port_clash'))
def clean(self):
if any(self.errors):
# Don't bother validating the formset unless each form is valid on its own
return
active_forms = []
for form in self.forms:
if self.can_delete and self._should_delete_form(form):
continue
# Skip empty forms and deactivated connections
if not form.cleaned_data or not form.cleaned_data['active']:
continue
active_forms.append(form)
self._check_isd(active_forms)
self._check_ip_ports(active_forms)
def save(self, user_as, commit=True):
att_confs = super().save(commit=False)
user_as.update_attachments(att_confs, self.deleted_objects)
class AttachmentConfFormHelper(FormHelper):
"""
Create the crispy-forms FormHelper. The form will then be rendered
using {% crispy form %} in the template.
"""
conf_header = Div(
Row(
Column('attachment_point', css_class='col-md-5'),
Column('use_vpn', css_class='col-md-5'),
'id'
),
css_class="card-header"
)
conf_body = Div(
Row(
Column(AppendedText('public_ip', '<span class="fa fa-external-link"/>'),
css_class='col-md-6 hidable'),
Column(AppendedText('public_port', '<span class="fa fa-share-square-o"/>'),
css_class='col-md-6')
),
Row(
HTML("""<button type="button" class="btn btn-link bind-row-collapser collapsed"
aria-expanded="false" aria-controls="bind-row">
Show binding options for NAT
<i class="fa fa-plus-circle"></i>
<i class="fa fa-minus-circle"></i>
</button>""")
),
Row(
Column(AppendedText('bind_ip', '<span class="fa fa-external-link-square"/>'),
css_class='col-md-6 hidable'),
css_class="bind-row"
),
css_class="card-body"
)
conf_footer = Div(
Row(
Column('active', css_class='col-md-6'),
Column('DELETE', css_class='col-md-6 text-danger')
),
css_class="card-footer"
)
conf_collapser = HTML(
"""<button type="button" id="new-ap-collapser"
class="mt-3 btn btn-link collapsed"
aria-expanded="false"
aria-controls="new-ap-form">
New provider link
<i class="mt-3 fa fa-plus-circle"></i>
<i class="mt-3 fa fa-minus-circle"></i>
</button>"""
)
def __init__(self, instance, userAS, *args, **kwargs):
super().__init__(*args, **kwargs)
outter_parts = ()
if instance:
# existing link, existing user AS
# - can be deleted / deactivated
card_parts = (
self.conf_header,
self.conf_body,
self.conf_footer,
)
elif userAS:
# new link, existing user AS
# - cannot be deleted (it doesnt exist) or deactivated (because it doesnt make sense to
# create an inactive link).
# - initially collapsed
outter_parts = (
self.conf_collapser,
)
card_parts = (
self.conf_header,
self.conf_body,
)
else:
# new (and only) link, user AS
# - cannot be deleted / deactivated nor collapsed
card_parts = (
self.conf_header,
self.conf_body,
)
self.layout = Layout(
Div(
*outter_parts,
Div(
*card_parts,
css_class="card attachment-form",
),
css_class='attachment'
)
)
# We need `form_tag = False` to render the AttachmentConfFormSet along with the UserASForm
self.form_tag = False
self.disable_csrf = True
class AttachmentConfForm(forms.ModelForm):
"""
Form for creating and updating a Link involving a UserAS
"""
public_ip = forms.GenericIPAddressField(
help_text="Public IP address",
required=False
)
public_port = forms.IntegerField(
min_value=1024,
max_value=MAX_PORT,
initial=DEFAULT_PUBLIC_PORT,
label="Public Port (UDP)",
help_text="The attachment point will use this port "
"for the overlay link to your AS."
)
bind_ip = forms.GenericIPAddressField(
label="Bind IP address",
help_text="(Optional) Specify the local IP "
"if your border router is behind a NAT/firewall etc.",
required=False
)
use_vpn = forms.BooleanField(
required=False,
label="Use VPN",
help_text="Use an OpenVPN connection for the overlay link between this attachment point "
"and the border router of my AS."
)
active = forms.BooleanField(
required=False,
label="Active",
help_text="Activate or deactivate this connection without deleting it"
)
attachment_point = forms.ModelChoiceField(queryset=AttachmentPoint.objects)
class Meta:
model = Link
fields = ('id', 'active')
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
userAS = kwargs.pop('userAS')
instance = kwargs.get('instance')
initial = kwargs.pop('initial', {})
if instance:
use_vpn = UserAS.is_link_over_vpn(instance.interfaceB)
initial['active'] = instance.active
initial['attachment_point'] = AttachmentPoint.objects.get(AS=instance.interfaceA.AS)
initial['use_vpn'] = use_vpn
initial['public_ip'] = instance.interfaceB.public_ip
initial['bind_ip'] = instance.interfaceB.bind_ip
initial['public_port'] = instance.interfaceB.public_port
# Clean IP fields when use_vpn is enabled
if use_vpn:
initial.pop('public_ip', None)
initial.pop('bind_ip', None)
# Clean bind fields when installation type VM
if userAS.installation_type == UserAS.VM:
initial.pop('bind_ip', None)
elif userAS:
# Set some convenient default values for adding new links:
# Copy first public IP:
iface = next((iface for iface in userAS.interfaces.all()
if not UserAS.is_link_over_vpn(iface)), None)
if iface:
initial['public_ip'] = iface.public_ip
# Set simple increasing port number:
index = self._get_formset_index(kwargs['prefix'])
initial['public_port'] = DEFAULT_PUBLIC_PORT + index
self.helper = AttachmentConfFormHelper(instance, userAS)
super().__init__(*args, initial=initial, **kwargs)
@staticmethod
def _get_formset_index(prefix):
"""
Extract index of form in formset by parsing the _default_ formset form prefix
"<prefix>-<index>".
Fail hard if the prefix does not match.
"""
_, idx = prefix.split('-')
return int(idx)
def clean(self):
cleaned_data = super().clean()
if 'attachment_point' in self.errors:
return cleaned_data
if cleaned_data.get('use_vpn'):
cleaned_data.get('attachment_point').check_vpn_available()
else:
public_ip = cleaned_data.get('public_ip')
if not public_ip:
# public_ip cannot be empty when use_vpn is false
self.add_error(
'public_ip',
ValidationError('Please provide a value for public IP, or enable "Use VPN".',
code='missing_public_ip_no_vpn')
)
else:
ip_addr = ipaddress.ip_address(public_ip)
if ip_addr.version not in cleaned_data['attachment_point'].supported_ip_versions():
self.add_error(
'public_ip',
ValidationError('IP version {ipv} not supported by the selected '
'attachment point'.format(ipv=ip_addr.version),
code='unsupported_ip_version')
)
if (not settings.DEBUG and (not ip_addr.is_global or ip_addr.is_loopback)) or \
ip_addr.is_multicast or \
ip_addr.is_reserved or \
ip_addr.is_link_local or \
(ip_addr.version == 6 and ip_addr.is_site_local) or \
ip_addr.is_unspecified:
self.add_error(
'public_ip',
ValidationError("Public IP address must be a publicly routable address. "
"It cannot be a multicast, loopback or otherwise reserved "
"address.",
code='invalid_public_ip')
)
# Ignore active flag while creating a new instance
if self.instance.pk is None:
self.cleaned_data['active'] = True
return cleaned_data
def save(self, commit=True, user_as=None):
"""
:return AttachmentPointConf:
"""
assert not commit, "Persistency in the DB shall be handled in the save(...) method of the \
AttachmentLinksFormSet"
return AttachmentConf(attachment_point=self.cleaned_data['attachment_point'],
public_ip=self.cleaned_data['public_ip'] or None, # needs None not ''
public_port=self.cleaned_data['public_port'],
bind_ip=self.cleaned_data['bind_ip'] or None,
use_vpn=self.cleaned_data['use_vpn'],
active=self.cleaned_data['active'],
link=self.instance if self.instance.pk is not None else None)
|
[
"noreply@github.com"
] |
fl99kl.noreply@github.com
|
e3847b8540f23760f6839b154e76c75915684d95
|
23d8b54aff4d5885367001accd36e5c5662c2a00
|
/35/35.py
|
b454c35c0ab69bbfb877c614f238d7cc8ce1e89b
|
[] |
no_license
|
dnelsonwsu/InterviewCake
|
0f2e44c6c9eb17e3bf87e359b40158117939fdb8
|
b8787f8b7dba19f73bb5f3bb69f28aba98d01474
|
refs/heads/master
| 2016-09-05T15:28:58.961682
| 2016-02-24T06:11:59
| 2016-02-24T06:11:59
| 39,700,898
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 951
|
py
|
'''Write a function for doing an in-place shuffle of an array.
The shuffle must be "uniform," meaning each item in the original array must have the same probability of ending up in each spot in the final array.
Assume that you have a function get_random(floor, ceiling) for getting a random integer that is >=floor and <=ceiling.'''
import random
import pprint
def get_random(floor, ceiling):
r = random.randint(floor, ceiling)
return r
def randomize_array_in_place(array):
array_size = len(array)
for i in range(array_size - 1):
switch_index = get_random(i, array_size - 1)
if switch_index != i:
tmp = array[i]
array[i] = array[switch_index]
array[switch_index] = tmp
def puzzle_35():
array = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
randomize_array_in_place(array)
pprint.pprint(array)
print "done"
if __name__ == "__main__":
puzzle_35()
else:
print "wtf"
|
[
"dnelsonwsu@gmail.com"
] |
dnelsonwsu@gmail.com
|
643a5199eb8355093b92b0726e181a3101f9dc33
|
23c12f60a00adf406e39ab51f1f8af0304834b9f
|
/test/functional/rpc_deprecated.py
|
665a67c0ddf934227bec6105c35f995c6c4be69c
|
[
"MIT"
] |
permissive
|
DemoCoin-Dev/democoin
|
473aed9f10aa8af37c873fa7b6c43801fd0e8b55
|
4f3ee2a4484a05140cc1066a299afae7c120b0d2
|
refs/heads/master
| 2020-04-01T12:33:06.601763
| 2018-10-16T03:04:45
| 2018-10-16T03:04:45
| 153,019,823
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,745
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Democoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deprecation of RPC calls."""
from test_framework.test_framework import DemocoinTestFramework
from test_framework.util import assert_raises_rpc_error
class DeprecatedRpcTest(DemocoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], ["-deprecatedrpc=validateaddress", "-deprecatedrpc=accounts"]]
def run_test(self):
# This test should be used to verify correct behaviour of deprecated
# RPC methods with and without the -deprecatedrpc flags. For example:
#
# self.log.info("Make sure that -deprecatedrpc=createmultisig allows it to take addresses")
# assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, [self.nodes[0].getnewaddress()])
# self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.log.info("Test validateaddress deprecation")
SOME_ADDRESS = "mnvGjUy3NMj67yJ6gkK5o9e5RS33Z2Vqcu" # This is just some random address to pass as a parameter to validateaddress
dep_validate_address = self.nodes[0].validateaddress(SOME_ADDRESS)
assert "ismine" not in dep_validate_address
not_dep_val = self.nodes[1].validateaddress(SOME_ADDRESS)
assert "ismine" in not_dep_val
self.log.info("Test accounts deprecation")
# The following account RPC methods are deprecated:
# - getaccount
# - getaccountaddress
# - getaddressesbyaccount
# - getreceivedbyaccount
# - listaccouts
# - listreceivedbyaccount
# - move
# - setaccount
#
# The following 'label' RPC methods are usable both with and without the
# -deprecatedrpc=accounts switch enabled.
# - getaddressesbylabel
# - getreceivedbylabel
# - listlabels
# - listreceivedbylabel
# - setlabel
#
address0 = self.nodes[0].getnewaddress()
self.nodes[0].generatetoaddress(101, address0)
self.sync_all()
address1 = self.nodes[1].getnewaddress()
self.nodes[1].generatetoaddress(101, address1)
self.log.info("- getaccount")
assert_raises_rpc_error(-32, "getaccount is deprecated", self.nodes[0].getaccount, address0)
self.nodes[1].getaccount(address1)
self.log.info("- setaccount")
assert_raises_rpc_error(-32, "setaccount is deprecated", self.nodes[0].setaccount, address0, "label0")
self.nodes[1].setaccount(address1, "label1")
self.log.info("- setlabel")
self.nodes[0].setlabel(address0, "label0")
self.nodes[1].setlabel(address1, "label1")
self.log.info("- getaccountaddress")
assert_raises_rpc_error(-32, "getaccountaddress is deprecated", self.nodes[0].getaccountaddress, "label0")
self.nodes[1].getaccountaddress("label1")
self.log.info("- getaddressesbyaccount")
assert_raises_rpc_error(-32, "getaddressesbyaccount is deprecated", self.nodes[0].getaddressesbyaccount, "label0")
self.nodes[1].getaddressesbyaccount("label1")
self.log.info("- getaddressesbylabel")
self.nodes[0].getaddressesbylabel("label0")
self.nodes[1].getaddressesbylabel("label1")
self.log.info("- getreceivedbyaccount")
assert_raises_rpc_error(-32, "getreceivedbyaccount is deprecated", self.nodes[0].getreceivedbyaccount, "label0")
self.nodes[1].getreceivedbyaccount("label1")
self.log.info("- getreceivedbylabel")
self.nodes[0].getreceivedbylabel("label0")
self.nodes[1].getreceivedbylabel("label1")
self.log.info("- listaccounts")
assert_raises_rpc_error(-32, "listaccounts is deprecated", self.nodes[0].listaccounts)
self.nodes[1].listaccounts()
self.log.info("- listlabels")
self.nodes[0].listlabels()
self.nodes[1].listlabels()
self.log.info("- listreceivedbyaccount")
assert_raises_rpc_error(-32, "listreceivedbyaccount is deprecated", self.nodes[0].listreceivedbyaccount)
self.nodes[1].listreceivedbyaccount()
self.log.info("- listreceivedbylabel")
self.nodes[0].listreceivedbylabel()
self.nodes[1].listreceivedbylabel()
self.log.info("- move")
assert_raises_rpc_error(-32, "move is deprecated", self.nodes[0].move, "label0", "label0b", 10)
self.nodes[1].move("label1", "label1b", 10)
if __name__ == '__main__':
DeprecatedRpcTest().main()
|
[
"MerlinMagic2018@github.com"
] |
MerlinMagic2018@github.com
|
503e0a154543c51cb8b480f79c3bcd94dacd6961
|
619d36014bff078e49a3c951d61ea281ab713bc4
|
/api/rest/routes/index.py
|
575b7ce120a72e1756b58d73290f7dc7eb8ce63f
|
[] |
no_license
|
kozakjefferson/flask_api_example
|
4457ffb1869cf80aa64a6580f385fdac96cd09cb
|
72428dbc5fa121f37f6dc571bcd624300bc50692
|
refs/heads/master
| 2020-03-30T02:09:57.433345
| 2016-07-05T17:33:45
| 2016-07-05T17:33:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
from flask_restful import Resource, Response
from ..functions.function import json_message
## Rest Controller Layer should only handle HTTP responses/requests and JSON serilization/deserilization.
class index(Resource):
#All HTTP get requests to '/' are handled by this function
def get(self):
response = json_message("Example Flask Application")
return Response(response=response, status=200, mimetype='application/json')
|
[
"joshuadparkin@gmail.com"
] |
joshuadparkin@gmail.com
|
f09262994e62e2e95c1c982575319bcb10038c57
|
7cbac541a44b290ed86c8bc94ec0d705390cac9a
|
/rules/mapping/sambamba.smk
|
9a9608f95e6b4bc7e8c1c7c12545f52ff5bae841
|
[] |
no_license
|
GrosseLab/VipeR_HIF1alpha
|
09e8042695d08d466e63924f40b9005500ce64f6
|
3dde5a7d63d880d61abaef0533c18f6f8c770e04
|
refs/heads/master
| 2020-04-29T16:28:06.612315
| 2019-09-16T11:36:01
| 2019-09-16T11:36:01
| 176,261,299
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
smk
|
rule sambamba_sort:
input:
"{path}.bam"
output:
"{path}.sorted.bam"
params:
"" # optional parameters
threads: 20
wrapper:
"file:viper/wrapper/sambamba_v0.30.0/sort"
# "0.30.0/bio/sambamba/sort"
rule sambamba_index:
input:
"{path}.bam"
output:
"{path}.bam.bai"
params:
"" # optional parameters
threads: 20
wrapper:
"file:viper/wrapper/sambamba_Index"
# rule sambamba_sort_index:
# input:
# "{path}.sorted.bam"
# output:
# "{path}.bam.bai"
# params:
# "" # optional parameters
# threads: 8
# wrapper:
# "file:viper/wrapper/sambamba_Index"
|
[
"claus.weinholdt@informatik.uni-halle.de"
] |
claus.weinholdt@informatik.uni-halle.de
|
e90bdcf42e3b7729ce25348153dcea78c44e4d63
|
5ea68b8c3e354bca9719e961657882c5e2edc459
|
/tests/test_prepare.py
|
05dfd5c73bc5a820a6c94ea35131d508fc13861b
|
[
"MIT"
] |
permissive
|
docsmooth/arbtt-chart
|
b86b00e48f1e21d0c18f5cdcd11400ba202cfb68
|
688d0a0edbd7557a4c6e23b232759f92fef5c158
|
refs/heads/main
| 2023-03-16T18:08:55.535447
| 2021-03-07T15:26:25
| 2021-03-07T15:26:25
| 344,183,289
| 0
| 0
|
MIT
| 2021-03-07T15:26:26
| 2021-03-03T16:02:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,085
|
py
|
import textwrap
import pandas as pd # type: ignore [import]
import pandas.testing as pdt # type: ignore [import]
import arbtt_chart as ac
def ld(csvs):
return ac.load_inputs(textwrap.dedent(csv.strip("\n")) for csv in csvs)
def test_prepare():
def prep(csvs, args=[]):
args = ac.parse_cmdline_args(args)
return ac.prepare_bartables(ld(csvs), args)
# one category and totals
in1 = """
Tag,Time
a:x-y,00:01:00
(unmatched time),00:02:00
(total time),00:03:00
"""
out1 = pd.DataFrame(
{'Time': ['', '', '00:02:00', '00:01:00', '', '00:03:00'],
'Type': ['text', 'text', 'bar', 'bar', 'text', 'total_bar'],
'Frac': [None, None, 2/3, 1/3, None, 1],
'FracAbove': [None, None, 0, 2/3, None, 0],
'HourFrac': [None, None, 20, 20, None, 20]},
index=pd.Index(['a', '═', '(unmatched time)', 'x-y', '', '(total time)'], name='Tag'))
pdt.assert_frame_equal(prep([in1]), out1)
# same, different totals
in1_totals = """
Tag,Time
a:x-y,00:01:00
(unmatched),00:02:00
(screen),00:03:00
"""
out1_totals = out1.set_index(
pd.Index(['a', '═', '(unmatched)', 'x-y', '', '(screen)'], name='Tag'))
pdt.assert_frame_equal(
prep([in1_totals], args=["--totals-re", "^\\(screen"]),
out1_totals)
# same, subtags
out1_subtags = out1.set_index(
pd.MultiIndex.from_tuples(
[('a', ''), ('═', ''), ('(unmatched time)', ''), ('x', 'y'), ('', ''), ('(total time)', '')],
names=['Tag', 'SubTag']))
pdt.assert_frame_equal(prep([in1], args=["--subtags"]), out1_subtags)
# two categories and totals
in2 = """
Tag,Time
b:z,00:01:00
(unmatched time),00:02:00
(total time),00:03:00
"""
blank = pd.DataFrame(
{'Time': [''], 'Type': ['text'], 'Frac': [None], 'FracAbove': [None], 'HourFrac': [None]},
index=pd.Index([''], name='Tag'))
out2 = out1.set_index(
pd.Index(['b', '═', '(unmatched time)', 'z', '', '(total time)'], name='Tag'))
pdt.assert_frame_equal(prep([in1, in2]), pd.concat([out1, blank, out2]))
# three categories, subtags
in3 = """
Tag,Time
c:z,00:01:00
(unmatched time),00:02:00
(total time),00:03:00
"""
out2_subtags = out1.set_index(
pd.MultiIndex.from_tuples(
[('b', ''), ('═', ''), ('(unmatched time)', ''), ('z', ''), ('', ''), ('(total time)', '')],
names=['Tag', 'SubTag']))
out3_subtags = out1.set_index(
pd.MultiIndex.from_tuples(
[('c', ''), ('═', ''), ('(unmatched time)', ''), ('z', ''), ('', ''), ('(total time)', '')],
names=['Tag', 'SubTag']))
blank_subtags = blank.set_index(pd.MultiIndex.from_tuples([('', '')], names=['Tag', 'SubTag']))
pdt.assert_frame_equal(
prep([in1, in2, in3], args=["--subtags"]),
pd.concat([out1_subtags, blank_subtags, out2_subtags, blank_subtags, out3_subtags]))
|
[
"tomi@nomi.cz"
] |
tomi@nomi.cz
|
7599c1b183e8a03ef11c38bddca0343a42a96eab
|
3e6951bb9c86e44535a99210fa68b1980f9e8a88
|
/datary/operations/test/test_rename.py
|
182741034ed9c6fc673edad9ecb89e0a3cc66378
|
[
"MIT"
] |
permissive
|
Datary/python-sdk
|
b31f2b1db0bbf710e56177f6bf51de1d2256d501
|
2790a50e1ad262cbe3210665dc34f497625e923d
|
refs/heads/master
| 2021-01-12T08:25:09.966693
| 2018-06-19T08:36:23
| 2018-06-19T08:36:23
| 76,570,424
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
# -*- coding: utf-8 -*-
"""
Datary python sdk Rename Operation test file
"""
import mock
from datary.test.test_datary import DataryTestCase
from datary.test.mock_requests import MockRequestResponse
class DataryRenameOperationTestCase(DataryTestCase):
"""
AddOperation Test case
"""
@mock.patch('datary.Datary.request')
def test_rename_file(self, mock_request):
"""
Test add_dir
"""
mock_request.return_value = MockRequestResponse("")
self.datary.rename_file(
self.json_repo.get('workdir', {}).get('uuid'),
{'path': 'test_path/path', 'basename': 'test_basename'},
{'path': 'test_path/new_path', 'basename': 'test_new_basename'},
)
self.assertEqual(mock_request.call_count, 1)
mock_request.reset_mock()
mock_request.return_value = None
self.datary.rename_file(
self.json_repo.get('workdir', {}).get('uuid'),
{'path': 'test_path/path', 'basename': 'test_basename'},
{'path': 'test_path/new_path', 'basename': 'test_new_basename'},
)
self.assertEqual(mock_request.call_count, 1)
|
[
"m.moraleda@datary.io"
] |
m.moraleda@datary.io
|
71be78fc04e2650a55fa217a4b040ed6be7305ee
|
ac99bf370262a84b64ddd9a9accef0900e8e3c89
|
/enc.py
|
e6405eee0d302d6a56078daa31f85d816262770d
|
[] |
no_license
|
parikshitsharma70/hacker_rank
|
bb0025bf9a0369bfa2446ab3d6559ff237048003
|
2b4c7a577befa1ae265cb8172ab86e04fd69c423
|
refs/heads/master
| 2020-03-23T00:20:38.916806
| 2020-02-23T04:04:00
| 2020-02-23T04:04:00
| 140,857,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
import math
import itertools
def encryption(s):
s = s.replace(" ", "")
r = math.floor(math.sqrt(len(s)))
c = math.ceil(math.sqrt(len(s)))
arr = [s[i:i+c] for i in range(0, len(s), c)]
tup = list(filter(None, pair) for pair in itertools.zip_longest(*arr))
resArr = []
for t in tup:
tempStr = "".join(t)
resArr.append(tempStr)
resStr = " ".join(resArr)
return resStr
print(encryption('feedthedog'))
|
[
"parikshitsharma70@gmail.com"
] |
parikshitsharma70@gmail.com
|
09f50df4d08790e84da2e0863bf84d4b11b8bc72
|
07ecc53b5be6b1a34914a0e02265e847f3ac1a65
|
/Python/Dynamic Programming/10_Hard_正则表达式匹配.py
|
05ffaee77f5dc055d0a181655bdc892c6c26596c
|
[] |
no_license
|
JasmineRain/Algorithm
|
764473109ad12c051f5337ed6f22b517ed9bff30
|
84d7e11c1a01b1994e04a3ab446f0a35eb3d362a
|
refs/heads/master
| 2023-03-14T00:39:51.767074
| 2021-03-09T12:41:44
| 2021-03-09T12:41:44
| 289,603,630
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
class Solution:
def isMatch(self, s: str, p: str) -> bool:
if not p:
return not s
if not s and len(p) == 1:
return False
nrow = len(s) + 1
ncol = len(p) + 1
dp = [[False for c in range(ncol)] for r in range(nrow)]
# base case
dp[0][0] = True
dp[0][1] = False
for j in range(2, ncol):
if p[j-1] == "*":
dp[0][j] = dp[0][j-2]
for r in range(1, nrow):
i = r - 1
for c in range(1, ncol):
j = c - 1
if s[i] == p[j] or p[j] == ".":
dp[r][c] = dp[r-1][c-1]
elif p[j] == "*":
if p[j-1] == s[i] or p[j-1] == ".":
dp[r][c] = dp[r][c-2] or dp[r-1][c]
else:
dp[r][c] = dp[r][c-2]
else:
dp[r][c] = False
return dp[len(s)][len(p)]
if __name__ == "__main__":
S = Solution()
print(S.isMatch(s="mississippi", p="mis*is*p*."))
|
[
"530781348@qq.com"
] |
530781348@qq.com
|
a63c939364fef777bf737ce46cca9144e18de4be
|
99b9abac12e8f801347d06b99284adf75135cc76
|
/Strings/Permute.py
|
5ecc8f510684276ae94d48670ea7f7eafc074395
|
[] |
no_license
|
mohanvarma/Projects
|
a907ebb67d4d05b2f858b1b19202c7ce00356ca3
|
04a3a036e7470bcba73f12b9ba78886d66c959fc
|
refs/heads/master
| 2021-05-30T10:04:36.778918
| 2016-01-16T00:55:26
| 2016-01-16T00:55:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
def permute(string, stringSoFar):
if len(string) == 0:
print stringSoFar
else:
subString = string[:-1]
index = 0
while index <= len(stringSoFar):
newString = stringSoFar[:index]+string[-1]+stringSoFar[index:]
permute (subString, newString)
index += 1
permute("abc", "")
|
[
"varma.mohan@ymail.com"
] |
varma.mohan@ymail.com
|
fd9f6d59c0122ae38431113a2a025576a43d0614
|
834a09eba7ba97f23d5c8ced99abcf8cbe6da2c1
|
/raggregate/views/feeds.py
|
93a6fd49d56056e87f74b315527aafd075165315
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
smoothgrips/raggregate
|
4aa67c853d93653ba99f710e9cf4e4c9ddb6457f
|
4f2f445e23e2f6e02b5a16f1edf19a35f31bf472
|
refs/heads/master
| 2021-01-16T02:33:30.895474
| 2012-04-20T21:22:40
| 2012-04-20T21:22:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,973
|
py
|
from raggregate import queries
from pyramid.view import view_config
from raggregate.models import DBSession
@view_config(renderer='atom_story.mak', route_name='atom_story')
def story(request):
s = request.session
r = request
dbsession = DBSession()
stories = queries.get_story_list(page_num = 1, per_page = 30, sort = 'new', request = r)
last_update = stories['stories'][0].added_on.isoformat()
request.response.content_type = "text/xml"
site_name = r.registry.settings['site.site_name']
return {'stories': stories['stories'], 'route': 'atom_story', 'last_update': last_update,
'feed_title': '{0} stories'.format(site_name), 'feed_subtitle': 'newest stories on {0}'.format(site_name),
'site_name': site_name,
}
@view_config(renderer='atom_story.mak', route_name='atom_self_story')
def self_story(request):
s = request.session
r = request
dbsession = DBSession()
stories = queries.get_story_list(page_num = 1, per_page = 30, sort = 'new', request = r, self_only = True)
last_update = stories['stories'][0].added_on.isoformat()
request.response.content_type = "text/xml"
site_name = r.registry.settings['site.site_name']
return {'stories': stories['stories'], 'route': 'atom_self_story', 'last_update': last_update,
'feed_title': '{0} exclusives'.format(site_name), 'feed_subtitle': 'newest exclusives on {0}'.format(site_name),
'site_name': site_name,
}
@view_config(renderer='atom_combined.mak', route_name='atom_combined')
def combined(request):
s = request.session
r = request
dbsession = DBSession()
stories = queries.get_story_list(page_num = 1, per_page = 10, sort = 'new', request = r)
comments = queries.get_recent_comments(10)
agg = []
[agg.append(i) for i in comments]
[agg.append(i) for i in stories['stories']]
agg.sort(key=lambda x: x.added_on, reverse=True)
last_update = agg[0].added_on.isoformat()
request.response.content_type = "text/xml"
site_name = r.registry.settings['site.site_name']
return {'interleaved': agg, 'route': 'atom_combined', 'last_update': last_update,
'feed_title': '{0} all content'.format(site_name), 'feed_subtitle': 'newest content on {0}'.format(site_name),
'site_name': site_name,
}
@view_config(renderer='atom_comment.mak', route_name='atom_comment')
def comment(request):
s = request.session
r = request
dbsession = DBSession()
comments = queries.get_recent_comments(20)
last_update = comments[0].added_on.isoformat()
request.response.content_type = "text/xml"
site_name = r.registry.settings['site.site_name']
return {'comments': comments, 'route': 'atom_comment', 'last_update': last_update,
'feed_title': '{0} comments'.format(site_name), 'feed_subtitle': 'newest comments on {0}'.format(site_name),
'site_name': site_name,
}
|
[
"jeff@deserettechnology.com"
] |
jeff@deserettechnology.com
|
2d64c6a4b97f1d186491a3005139ce618c2429f8
|
e6ed64fda597762b3559ce387a35a83cefef13fa
|
/examples/example1.py
|
41adde1c1ed5bcc65c224a37fb079ab89713f2dd
|
[
"MIT"
] |
permissive
|
foxsi/SAASpy
|
02d4b5e53bb47d105af3ad7ed8b89862901e9cbd
|
473956c59859c7bd9f81323dfb82da07ce606b26
|
refs/heads/master
| 2022-12-13T13:43:39.027937
| 2022-12-03T20:40:54
| 2022-12-03T20:40:54
| 26,690,220
| 2
| 1
|
MIT
| 2022-12-03T20:40:56
| 2014-11-15T18:58:43
|
Python
|
UTF-8
|
Python
| false
| false
| 251
|
py
|
__author__ = 'schriste'
from saaspy.image import image
import matplotlib.pyplot as plt
file = "/Users/schriste/Desktop/SAAS/FOXSI_SAAS_141113_203247.fits"
if __name__ == '__main__':
s = image(file)
plt.figure()
s.imshow()
plt.show()
|
[
"steven.d.christe@nasa.gov"
] |
steven.d.christe@nasa.gov
|
28063919d428c727e8feebbaf7741f11debb00ba
|
b7127055167119a05a5544960add0c887992e8b1
|
/source-code/algorithms/main.py
|
e0152542b964fd3f514a4dc6870a34509eaa2e6c
|
[] |
no_license
|
ndangtt/1LLGA
|
802847693b91a553cd43f400cf95b9e3b8cd07c7
|
c1044aa8b73e18c82d45a5c06dcf3669a44d8ef8
|
refs/heads/master
| 2022-08-01T12:11:22.318510
| 2022-07-16T20:27:20
| 2022-07-16T20:27:20
| 167,725,190
| 2
| 1
| null | 2022-07-16T20:27:21
| 2019-01-26T18:43:22
|
Python
|
UTF-8
|
Python
| false
| false
| 8,364
|
py
|
# (Nguyen) Syntax: python2 main.py --problem <problem> --size <size> --algorithm <algorithm> <parameters> [--random_seed <random_seed>] [--max_evaluation <max_evaluation>] [--it <number of runs>]
# Examples:
# ----- static LL, one run, without bound
# python2 main.py --problem OneMax --size 500 --algorithm "['LL_static']" --LL_static_lambda1 4 --LL_static_lambda2 4 --LL_static_lda 3 --LL_static_crossOverBias 0.3 --random_seed 11
# ----- static LL, one run, with bound
# python2 main.py --problem OneMax --size 500 --algorithm "['LL_static']" --LL_static_lambda1 4 --LL_static_lambda2 4 --LL_static_lda 3 --LL_static_crossOverBias 0.3 --random_seed 11 --max_evaluation 100000
# ----- static LL, 10 runs
# python2 main.py --problem OneMax --size 500 --algorithm "['LL_static']" --LL_static_lambda1 4 --LL_static_lambda2 4 --LL_static_lda 3 --LL_static_crossOverBias 0.3 --it 10
# ----- dynamic LL, 2 parameters
# python2 main.py --problem OneMax --size 500 --algorithm "['LL_dynamic_01']" --LL_dynamic_01_a 1.1 --LL_dynamic_01_b 0.7 --random_seed 123
# ----- dynamic LL, 5 parameters
# python2 main.py --problem OneMax --size 500 --algorithm "['LL_dynamic_02']" --LL_dynamic_02_alpha 0.7 --LL_dynamic_02_beta 3 --LL_dynamic_02_gamma 0.3 --LL_dynamic_02_a 1.1 --LL_dynamic_02_b 0.7 --random_seed 123
from solution import *
#from tqdm import tqdm
import evaluate
import utils
import algorithms
import plot
import record
import numpy as np
import sys, argparse
import time
import os
import pickle
parser = argparse.ArgumentParser()
parser.add_argument('--problem', help='Available problems: OneMax, LeadingOnes, Jump, Linear, RoyalRoad.')
parser.add_argument('--size', help='Size of the search space.')
parser.add_argument('--algorithm', help='Format: \"[\'RLS\', \'OPOEA\']\"; Available algorithms: OPOEA, OPOEA_shift, OPOEA_resampling, OPOEA_alpha, RLS, LL, LL_opt, LL_shift, RLS_LL, LL_static, LL_dynamic_01, LL_dynamic_02')
parser.add_argument('--it', help='Number of iterations from the same algorithm.')
parser.add_argument('--steps')
parser.add_argument('--save')
parser.add_argument('--test')
parser.add_argument('--extra_name')
parser.add_argument('--random_seed')
parser.add_argument('--max_evaluation',type=float,default=-1)
# LL_static
parser.add_argument("--LL_static_lambda1")
parser.add_argument("--LL_static_lambda2")
parser.add_argument("--LL_static_lda")
parser.add_argument("--LL_static_crossOverBias")
# LL_static_02
parser.add_argument("--LL_static_02_lda")
parser.add_argument("--LL_static_02_alpha")
parser.add_argument("--LL_static_02_beta")
# LL_dynamic_01
parser.add_argument("--LL_dynamic_01_a")
parser.add_argument("--LL_dynamic_01_b")
# LL_dynamic_02
parser.add_argument("--LL_dynamic_02_alpha")
parser.add_argument("--LL_dynamic_02_beta")
parser.add_argument("--LL_dynamic_02_gamma")
parser.add_argument("--LL_dynamic_02_a")
parser.add_argument("--LL_dynamic_02_b")
# for LL algorithms
parser.add_argument("--crossover_choice",type=int,default=1)
parser.add_argument("--ioh_output",type=str,default=None)
#ioh_output = 0
#DEBUG
def mytest():
global ioh_output
print("test: " + str(ioh_output))
def main(argv):
#get arguments
args = parser.parse_args()
if args.problem != None:
problem = args.problem
else:
problem = "OneMax"
if args.size != None:
size = int(args.size)
else:
size = 100
if args.algorithm != None:
algorithm = eval(args.algorithm)
else:
algorithm = ["OPOEA"]
if args.it != None:
it = int(args.it)
else:
it = 1
if args.steps != None:
steps = int(args.steps)
else:
steps = int(size)
if args.save != None:
save = bool(args.save)
else:
save = False
if args.test != None:
test = bool(args.test)
else:
test = False
if args.extra_name != None:
extra_name = str(args.extra_name)
else:
extra_name = ""
if args.random_seed != None:
random_seed = int(args.random_seed)
if it > 1:
print("Error: it must be equal to 1 when random_seed is specified")
return 1
else:
random_seed = None
crossover_choice = args.crossover_choice
max_evaluation = int(args.max_evaluation)
#global ioh_output
ioh_output = args.ioh_output
#DEBUG
#print("args.ioh_output: " + str(args.ioh_output))
#print("ioh_output: " + str(ioh_output))
if 'LL_static' in algorithm:
LL_static_lambda1 = int(args.LL_static_lambda1)
LL_static_lambda2 = int(args.LL_static_lambda2)
LL_static_lda = int(args.LL_static_lda)
LL_static_crossOverBias = float(args.LL_static_crossOverBias)
if 'LL_static_02' in algorithm:
LL_static_02_lda = int(args.LL_static_02_lda)
LL_static_02_alpha = int(args.LL_static_02_alpha)
LL_static_02_beta = int(args.LL_static_02_beta)
if 'LL_dynamic_01' in algorithm:
LL_dynamic_01_a = float(args.LL_dynamic_01_a)
LL_dynamic_01_b = float(args.LL_dynamic_01_b)
if 'LL_dynamic_02' in algorithm:
LL_dynamic_02_alpha = float(args.LL_dynamic_02_alpha)
LL_dynamic_02_beta = float(args.LL_dynamic_02_beta)
LL_dynamic_02_gamma = float(args.LL_dynamic_02_gamma)
LL_dynamic_02_a = float(args.LL_dynamic_02_a)
LL_dynamic_02_b = float(args.LL_dynamic_02_b)
results = {}
for alg in algorithm:
results[alg] = record.Record()
#import algorithms
#mytest() #DEBUG
if ioh_output is not None:
if os.path.isfile(ioh_output):
os.remove(ioh_output)
with open(ioh_output,'wt') as f:
f.write('')
for i in range(it):
#for i in tqdm(range(it)):
print('Run number ' + str(i))
#set the instance for given problem
instance = eval('evaluate.setup_' + problem + '(size, problem)')
if random_seed is not None:
np.random.seed(random_seed)
#initial solution
initial_solution = Solution(utils.random_vector(size), None, None, 0)
initial_solution.value = getattr(evaluate, problem)(initial_solution, instance, True)
#Solve the problem
for alg in algorithm:
algFunc = getattr(algorithms,alg)
if alg == 'LL_static':
experiment = algFunc(initial_solution, instance, LL_static_lambda1, LL_static_lambda2, LL_static_lda, LL_static_crossOverBias, crossover_choice, max_evaluation, ioh_output)
elif alg == 'LL_static_02':
experiment = algFunc(initial_solution, instance, LL_static_02_lda, LL_static_02_alpha, LL_static_02_beta, crossover_choice, max_evaluation, ioh_output)
elif alg == 'LL_dynamic_01':
experiment = algFunc(initial_solution, instance, LL_dynamic_01_a, LL_dynamic_01_b, crossover_choice, max_evaluation, ioh_output)
elif alg == 'LL_dynamic_02':
experiment = algFunc(initial_solution, instance, LL_dynamic_02_alpha, LL_dynamic_02_beta, LL_dynamic_02_gamma, LL_dynamic_02_a, LL_dynamic_02_b, crossover_choice, max_evaluation, ioh_output)
else:
experiment = getattr(algorithms, alg)(initial_solution, instance)
#print(alg)
print(experiment[0])
results[alg].insert_expriment(experiment)
list_of_records = [results[alg] for alg in algorithm]
if save:
for (l,alg) in zip(list_of_records, algorithm):
l.print_to_file(problem, size, alg, steps, extra_name)
if test:
for (l,alg) in zip(list_of_records, algorithm):
l.plot_average(steps)
utils.compare(list_of_records, steps, algorithm)
if it > 1:
print("\n------------------- Summarised results ------------------")
for alg in algorithm:
lsVals = [e[0] for e in results[alg].list_of_results]
print(alg + ', mean: ' + str(results[alg].mean_opt_time))
print(alg + ', min: ' + str(np.min(lsVals)))
print(alg + ', max: ' + str(np.max(lsVals)))
return 0
if __name__ == "__main__":
print(' '.join(sys.argv))
main(sys.argv[1:])
|
[
"ndangt@gmail.com"
] |
ndangt@gmail.com
|
33224b84f4090301c6096ba024cd24e04a5a0c56
|
69597554851a45c956dfeeff3f9878b0f9fd5379
|
/config/settings/production.py
|
a9ef1782e447d7aeda5d6d715fc36c279da33cae
|
[
"MIT"
] |
permissive
|
ParkGwanWoo/instagram-clone
|
64e14ef79f82e9caa9e5d4cf11fc7e51e28f850c
|
3e210909c887043f05975fb59dc3c48112ceaaf2
|
refs/heads/master
| 2021-09-09T07:55:23.859662
| 2018-08-06T07:27:54
| 2018-08-06T07:27:54
| 140,709,595
| 0
| 0
|
MIT
| 2021-09-08T00:01:58
| 2018-07-12T12:22:07
|
Python
|
UTF-8
|
Python
| false
| false
| 8,292
|
py
|
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['devyeon.kr'])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES['default'] = env.db('DATABASE_URL') # noqa F405
DATABASES['default']['ATOMIC_REQUESTS'] = True # noqa F405
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': env('REDIS_URL'),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
'IGNORE_EXCEPTIONS': True,
}
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool('DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = 'DENY'
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ['storages'] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': f'max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate',
}
# STATIC
# ------------------------
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3BotoStorage'
STATIC_URL = f'https://s3.amazonaws.com/{AWS_STORAGE_BUCKET_NAME}/static/'
# MEDIA
# ------------------------------------------------------------------------------
# region http://stackoverflow.com/questions/10390244/
from storages.backends.s3boto3 import S3Boto3Storage # noqa E402
StaticRootS3BotoStorage = lambda: S3Boto3Storage(location='static') # noqa
MediaRootS3BotoStorage = lambda: S3Boto3Storage(location='media', file_overwrite=False) # noqa
# endregion
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3BotoStorage'
MEDIA_URL = f'https://s3.amazonaws.com/{AWS_STORAGE_BUCKET_NAME}/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['loaders'] = [ # noqa F405
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
'DJANGO_DEFAULT_FROM_EMAIL',
default='Instagram <noreply@devyeon.kr>'
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[Instagram]')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ['anymail'] # noqa F405
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
'MAILGUN_API_KEY': env('MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_DOMAIN')
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['gunicorn'] # noqa F405
# Collectfast
# ------------------------------------------------------------------------------
# https://github.com/antonagestam/collectfast#installation
INSTALLED_APPS = ['collectfast'] + INSTALLED_APPS # noqa F405
AWS_PRELOAD_METADATA = True
# LOGGING
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Your stuff...
# ------------------------------------------------------------------------------
|
[
"yeonkevin@icloud.com"
] |
yeonkevin@icloud.com
|
cb4b1ece284fb59a44869f25bb55c637424b4a44
|
6f577a95335d2a8ee5f02d02b4048663f353d02c
|
/pages/urls.py
|
4cbf316f7b57bfe5dddfd39470b4b48aad0b3883
|
[] |
no_license
|
nabil-rady/Django-Tailwind-Template
|
e569d4626a8f03b2fa789f5cf1146c017daf1ccb
|
71ac5af859e5bd8db49c6a6267bfb49d0593eee4
|
refs/heads/master
| 2023-08-29T11:57:12.977366
| 2021-09-22T19:17:02
| 2021-09-22T19:17:02
| 406,931,333
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.landing_page, name='landing_page'),
path('dashboard', views.dashboard, name='dashboard'),
path('forms', views.forms, name='forms'),
path('cards', views.cards, name='cards'),
path('charts', views.charts, name='charts'),
path('buttons', views.buttons, name='buttons'),
path('modals', views.modals, name='modals'),
path('tables', views.tables, name='tables'),
path('login', views.login, name='login'),
path('forgot_password', views.forgot_password, name='forgot_password'),
path('create_account', views.create_account, name='create_account'),
path('blank', views.blank, name='blank'),
path('404', views.template404, name='template404'),
]
|
[
"midorady9999@gmail.com"
] |
midorady9999@gmail.com
|
c0286bdbb554b58b9aa60f2021eef27056c98aab
|
18cf194966e41de97bff46043cf43e4613d90638
|
/filter_black_list.py
|
1b6a9929e0114e12c3f11c1b57bdc3c8cae647fb
|
[] |
no_license
|
feapoi/tcpdump_filter
|
64cf0ef9743736e5bde127267ce2267db4e49da4
|
8fa08c6586189e9e3b780fc3f4aff33a7ae9999d
|
refs/heads/master
| 2022-04-14T18:31:18.259964
| 2020-04-10T03:43:00
| 2020-04-10T03:43:00
| 254,532,991
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,099
|
py
|
import os
from collections import Counter
def filter_black_list():
res = []
for root, dirs, files in os.walk("log"):
for q in files:
if "txt" in q:
c = Counter()
for line in open("./log/" + q, "r"):
if "[S]" in line:
begin = line.find("IP ")
end = line.rfind(" >")
ss = line[begin + 3:end]
c[ss] += 1
elif "[S.]" in line:
begin = line.find("IP ")
end = line.rfind(" >")
ss = line[begin + 3:end]
c[ss] -= 1
for k in c:
if c.get(k) >= 1:
res.append(k)
r = open("./" + "blacklist.list", "r")
oldRes = r.readlines()
r.close()
with open("./" + "blacklist.list", "a") as f:
for i in res:
print(i, oldRes)
if i+"\n" not in oldRes:
f.writelines(i+"\n")
f.close()
filter_black_list()
|
[
"369215201@qq.com"
] |
369215201@qq.com
|
0db0f3417d7e1d6cfb3ba376a2bdafbe3c27def0
|
97672d40aaa47ac09f5c0ac49f50269146461a0f
|
/solved/414. 第三大的数.py
|
32b7e4d45bfd31ba815b889c135ef5fb917a2639
|
[] |
no_license
|
xukangjune/Leetcode
|
a120263243e2d8bddda99429c1c7a6be4eeeda51
|
2cad20b7639bd709c4a064e463d7414c43c2af90
|
refs/heads/master
| 2020-05-04T08:10:24.805606
| 2019-10-06T03:56:36
| 2019-10-06T03:56:36
| 179,041,684
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 778
|
py
|
"""
这题本来想用计数排序的,但是对内存的要求太高。然后设置了三个变量,并为了消除重复元素的影响,先将数组转化为集合。然后遍历集合,依次与三个
数比较。
"""
class Solution(object):
def thirdMax(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums = set(nums)
first = second = third = min(nums)
for num in nums:
if num > third:
first, second, third = second, third, num
elif num > second:
first, second = second, num
elif num > first:
first = num
return first if second > first else third
solve = Solution()
nums = [1, 2]
print(solve.thirdMax(nums))
|
[
"38693074+xukangjune@users.noreply.github.com"
] |
38693074+xukangjune@users.noreply.github.com
|
3cc456a4fed6e887ff6955657b04461522f8c4bb
|
4e9fb17329b71a778c2a9016df9e0f3bab02accb
|
/src/runners/TopSDGGenesRunner.py
|
a7d98defc80e773ca86730a8289484ef4679da30
|
[] |
no_license
|
hag007/bnet
|
7a58d46fbe2508b4c664c200b598db840d910b2c
|
e43906a9eea0a318de74a07402b306450cff2274
|
refs/heads/master
| 2023-07-27T20:14:43.528343
| 2021-08-25T13:06:01
| 2021-08-25T13:06:01
| 391,555,784
| 0
| 0
| null | 2021-08-24T11:36:37
| 2021-08-01T07:15:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,749
|
py
|
import sys
sys.path.insert(0, '../')
sys.path.insert(0, '../..')
import os
import pandas as pd
from src import constants
from src.implementations.top_sdg import main as top_sdg_main
from src.utils.ensembl2entrez import ensembl2entrez_convertor
from src.utils.network import get_network_genes
from src.utils.go_similarity import init_go_metadata
from src.runners.abstract_runner import AbstractRunner
class TopSDGGenesRunner(AbstractRunner):
def __init__(self):
super().__init__(f"top_SDG_genes")
def extract_modules_and_bg(self, bg_genes, dest_algo_dir):
results = open(os.path.join(dest_algo_dir, "modules.txt")).readlines()
modules = [[] for x in range(max([int(x.strip().split(" =")[1]) for x in results[1:]]) + 1)]
for x in results[1:]:
if int(x.strip().split(" =")[1]) != -1:
modules[int(x.strip().split(" =")[1])].append(x.strip().split(" =")[0])
else:
modules.append([x.strip().split(" =")[0]])
modules = filter(lambda x: len(x) > 3, modules)
all_bg_genes = [bg_genes for x in modules]
print("extracted {} modules".format(len(modules)))
return modules, all_bg_genes
def init_params(self, dataset_file_name, network_file_name, output_folder):
df_scores=pd.read_csv(dataset_file_name, sep='\t', index_col=0)
sig_genes=df_scores['qval'][df_scores['qval']<0.05].index
active_genes_file=os.path.join(output_folder, "active_genes_file.txt")
open(active_genes_file, "w+").write("\n".join([x for x in sig_genes if len(ensembl2entrez_convertor([x]))>0 ]))
bg_genes=get_network_genes(network_file_name)
return active_genes_file, bg_genes
def run(self, dataset_file_name, network_file_name, output_folder, **kwargs):
print("run top_sdg_genes runner...")
slices_file = kwargs['slices_file']
constants.N_OF_THREADS=1
if 'n_of_threads' in kwargs:
constants.N_OF_THREADS=kwargs['n_of_threads']
constants.USE_CACHE=False
if 'use_cache' in kwargs:
constants.USE_CACHE=kwargs['use_cache']=='true'
if 'compare_folder' in kwargs:
compare_folder = kwargs['compare_folder']
active_genes_file, bg_genes = self.init_params(dataset_file_name, network_file_name, output_folder)
# print(f'domino_parameters: active_genes_file={active_genes_file}, network_file={network_file_name},slices_file={slices_file}, slice_threshold={slice_threshold},module_threshold={module_threshold}')
modules = top_sdg_main(dataset_file=dataset_file_name, compare_folder=compare_folder)
all_bg_genes = [bg_genes for x in modules]
return modules, all_bg_genes
|
[
"shpigelman1@localhost.localdomain"
] |
shpigelman1@localhost.localdomain
|
cd3d9035e119adff19f44043ee58aad0e88d33d9
|
e498c7b38f57d4b242ff094a5f617fd8c94465ce
|
/homework2.py
|
c52edf5ea3eb51016e2eca5f863df58a2e2074d5
|
[] |
no_license
|
Jaunsou/pythonhomework
|
c485f03448f7c15e3d8c4041435edeec8c8f8e0f
|
98bf7bfb9c5a14297ae14f0254282716b8e72073
|
refs/heads/main
| 2023-03-09T21:45:50.334066
| 2021-02-28T01:46:20
| 2021-02-28T01:46:20
| 313,906,518
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 127
|
py
|
from functools import reduce
def prod(a):
def m(x,y):
return x*y
return reduce(m,a)
print(prod([1,4,7,10,13]))
|
[
"noreply@github.com"
] |
Jaunsou.noreply@github.com
|
8d439e345994203fb44297153208227eedf6b39d
|
545b3f8866be884ecc4b562b10bba29123835f56
|
/JSONaug.py
|
509eca0b4ace9d669ea95059ffcb6a0002723079
|
[] |
no_license
|
ShulingTang/Image_Augmentation_Demo
|
f9cc7ff8c6cc61dbc3a39beffa718caa98880352
|
83c489ec100c95db619b7c67764e2917a5f3965a
|
refs/heads/master
| 2023-04-25T05:54:47.872561
| 2021-05-07T17:40:05
| 2021-05-07T17:40:05
| 365,300,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,059
|
py
|
import numpy as np
import os
from pretreatment import mkdir
import json
import imgaug as ia
from imgaug import augmenters as iaa
from PIL import Image
from pretreatment import *
# =========================
#
#
#
if __name__ == '__main__':
JSON_DIR = "HRSID_JPG/annotations" # Json文件目录
JSON_NAME = 'train_test2017.json' # 可选:train_test2017.json; test2017.json; train2017.json
IMG_DIR = "HRSID_JPG/JPEGImages"
AUG_TXT_DIR = "jsontest/Annotations_Aug" # 存储增强后的txt文件夹路径
mkdir(AUG_TXT_DIR)
AUG_IMG_DIR = "jsontest/JPEGImages_Aug" # 存储增强后的影像文件夹路径
mkdir(AUG_IMG_DIR)
AUGLOOP = 3 # 每张影像增强的数量
boxes_img_aug_list = []
new_bndbox = []
new_bndbox_list = []
# 影像增强
seq = iaa.Sequential([
iaa.Flipud(0.5), # vertically flip 20% of all images
iaa.Fliplr(0.5), # 镜像
iaa.Multiply((1.2, 1.5)), # change brightness, doesn't affect BBs
iaa.GaussianBlur(sigma=(0, 3.0)), # iaa.GaussianBlur(0.5),
iaa.Affine(
translate_px={"x": 15, "y": 15},
scale=(0.35, 0.5),
rotate=(-30, 30)
) # translate by 40/60px on x/y axis, and scale to 50-70%, affects BBs
])
path = os.path.join(JSON_DIR, str(JSON_NAME))
# 加载json文件
with open(path, 'r') as load_f:
json_load = json.load(load_f)
img_names = json_load['images'] # 图列表
img_annotation = json_load['annotations'] # 标签列表
for item1 in img_names:
name = item1['file_name']
img_id = item1['id']
bndbox_list = []
# 遍历标签列表,将所有指向图:img_id的所有标签中的bbox存储在bndbox_list
for item2 in img_annotation:
if item2['image_id'] == img_id:
bbox = item2['bbox'] # [x1, y1, width, high]
bndbox = [bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]
# bndbox = [bbox['0'], bbox['1'], bbox['2'], bbox['3']]
# print(bndbox)
bndbox_list.append(bndbox) #用于存储 图id 对应的所有的bndbox
if item2['image_id'] > img_id:
break
a = bndbox_list
b = os.path.join(IMG_DIR, name)
for epoch in range(AUGLOOP):
seq_det = seq.to_deterministic() # 保持坐标和图像同步改变,而不是随机
# 读取图片
img = Image.open(os.path.join(IMG_DIR, name)) # 不需改变图片大小
img = np.array(img)
# bndbox 坐标增强
for i in range(len(bndbox_list)):
bbs = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=bndbox_list[i][0], y1=bndbox_list[i][1], x2=bndbox_list[i][2], y2=bndbox_list[i][3]),
], shape=img.shape)
bbs_aug = seq_det.augment_bounding_boxes([bbs])[0]
boxes_img_aug_list.append(bbs_aug)
# new_bndbox_list:[[x1,y1,x2,y2],...[],[]]
new_bndbox_list.append([int(bbs_aug.bounding_boxes[0].x1),
int(bbs_aug.bounding_boxes[0].y1),
int(bbs_aug.bounding_boxes[0].x2),
int(bbs_aug.bounding_boxes[0].y2)])
# 存储变化后的图片
image_aug = seq_det.augment_images([img])[0]
path = os.path.join(AUG_IMG_DIR, str(name[:-4]) + "_aug_" + str(epoch) + '.jpg')
# image_auged = bbs.draw_on_image(image_aug, thickness=0)
Image.fromarray(image_aug).save(path)
# 存储变化后对TXT
change_txt_list_annotation(name[:-4], new_bndbox_list, AUG_TXT_DIR, epoch)
# 存储变化后的XML
# change_xml_list_annotation(TXT_DIR, name[:-4], new_bndbox_list, AUG_XML_DIR, epoch)
print(str(name[:-4]) + "_aug_" + str(epoch) + '.jpg')
new_bndbox_list = []
# test: P0001_3600_4400_3600_4400
|
[
"noreply@github.com"
] |
ShulingTang.noreply@github.com
|
1cdfee8c9daf1b2b2b7435bdfb7475efa59efc3f
|
ba92071d977f8350fc1f03ebd1344d4e487457e0
|
/content/migrations/0002_lesson_video_link.py
|
4d828f5ce4e9898514c20d685f093b4b27923361
|
[] |
no_license
|
lokesh27/3Dex_portal
|
c5af0521a426d485f1f9e94833e55e1ca38f484a
|
893284a5a2d265a162c9684768d41194b927251f
|
refs/heads/master
| 2022-12-09T03:04:14.952871
| 2016-05-09T13:02:02
| 2016-05-09T13:02:02
| 50,050,747
| 1
| 1
| null | 2022-11-22T01:07:34
| 2016-01-20T18:27:27
|
HTML
|
UTF-8
|
Python
| false
| false
| 441
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('content', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='lesson',
name='video_link',
field=models.CharField(default=1, max_length=500),
preserve_default=False,
),
]
|
[
"lokesh.tuteja@yahoo.co.in"
] |
lokesh.tuteja@yahoo.co.in
|
a151e08b4a613bc738600fbb4fd03df07d0d9e46
|
6894aad6baa4c78089765584d7590046ba76e02d
|
/errant_env/bin/pip
|
39552b1ba999ffae25d83aebe948c782e01484a7
|
[
"Apache-2.0"
] |
permissive
|
Yakonick/test
|
4dc167c3e6ddf0518aa79d72d5893795a82863aa
|
1d812776d9733d8020422a6a14cea6880f591888
|
refs/heads/main
| 2023-03-31T09:11:01.091800
| 2021-04-11T13:39:53
| 2021-04-11T13:39:53
| 356,863,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
#!/Users/yasen/Desktop/tests/errant_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"Nikitoskova123@gmail.com"
] |
Nikitoskova123@gmail.com
|
|
15c16ddf6632e5b50b3d4ba54c5222391bc64d02
|
0329c160934d2e6eb81e90183745c6ff39fc9462
|
/budgetme/test/fixtures/fixtures.py
|
a0940470b249ca465eb15c62d4857fa7f3686296
|
[
"MIT"
] |
permissive
|
poblouin/budgetme-rest-api
|
2165b820e847e96be028295eb4831a28152e2935
|
74d9237bc7b0a118255a659029637c5ed1a8b7a1
|
refs/heads/master
| 2021-01-01T19:59:31.579829
| 2019-06-02T20:14:19
| 2019-06-02T20:14:19
| 98,740,836
| 2
| 0
|
MIT
| 2020-06-05T21:13:14
| 2017-07-29T15:19:49
|
Python
|
UTF-8
|
Python
| false
| false
| 730
|
py
|
import pytest
from rest_framework.test import APIClient
from budgetme.apps.core.models import User
@pytest.mark.django_db
@pytest.fixture
def access_token(db, django_db_setup):
users = User.objects.all()
assert users is not None and len(users) > 0
user = users[0]
client = APIClient()
response = client.post('/api/v1/token', {'email': user.email, 'password': 'test1234'}, format='json')
assert response is not None
assert response.status_code == 200
assert response.data.get('access') is not None
return response.data['access']
@pytest.fixture
def authed_client(access_token):
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='Token ' + access_token)
return client
|
[
"blouin.pierreolivier@gmail.com"
] |
blouin.pierreolivier@gmail.com
|
d7df78feee1564eb00a339d7e63bf78235ae6794
|
cded49be3920bb970884aab73784703b654cd680
|
/bilibili_meter/web_server/routes/api_activity.py
|
fe383b5cd98ff2eb73ece83e54a3fa288a8a1653
|
[
"MIT"
] |
permissive
|
xyzQvQ/bilibili_meter
|
14926f54e63ba672eda5e089f0ed2d50626a18f3
|
62da38caf5cd8bcdcdaf1593352e838c817e2ee2
|
refs/heads/master
| 2023-07-04T19:49:25.419635
| 2019-11-18T15:15:03
| 2019-11-18T15:15:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
from flask import Blueprint, jsonify, request
import time
import logging
from ..model import WebUser, WatchedUser, WatchedVideo, Task, TaskStatus,\
ItemOnline, ItemVideoStat, ItemUpStat, ItemRegionActivity,\
TaskFailed,TotalWatchedUser,TotalWatchedVideo,TotalEnabledTask,\
WorkerStatus
from .. import orm
from ..utils import get_zero_timestamp
main = Blueprint('api_activity', __name__)
@main.route('/get')
def get_activity():
# byday = request.values.get('byday', None)
# if not byday:
c_items = ItemRegionActivity.findAll(limit=500, orderBy='id DESC')
c_items.reverse()
# else:
byday_items = ItemRegionActivity.findByDay(need_key='id', need_filter='MAX(`id`)')
byday_items.reverse()
# 清理不必要字段
for i in c_items:
del i['id']
del i['task_id']
i['rawtime']=i['time']
for i in byday_items:
del i['id']
del i['task_id']
i['rawtime']=i['time']
i['time']=get_zero_timestamp(i['time'])
d = dict()
d['data'] = {'c':c_items,'byday':byday_items}
return jsonify(d)
|
[
"gravitykey@hotmail.com"
] |
gravitykey@hotmail.com
|
fece02e43b414048d73b09d662b9f9c3ef216ff9
|
05d3c0131631ff973a71fd7a2b3256aac2e8c8de
|
/scripts/reduce.py
|
00f63ac39249dfa6da6ee54db38aa39a3438e988
|
[] |
no_license
|
ELIFE-ASU/parallel-programming-mpi
|
ef0c0e4816d3274a78139c213f3f8ea955f8605f
|
16ae2a3137f69b5f79073a643d875646ece2ef1c
|
refs/heads/master
| 2022-11-30T12:26:56.104653
| 2020-08-14T23:58:35
| 2020-08-14T23:58:35
| 287,635,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
if rank == 0:
data = rank
recv_data = comm.reduce(data, op=MPI.SUM)
else:
data = rank
recv_data = comm.reduce(data)
print(rank, recv_data)
|
[
"doug@dglmoore.com"
] |
doug@dglmoore.com
|
bccf442ae0c260a3076c95bb548d863401336ef8
|
c73995de48417b36856b6c6d1bc52d9a90a9a0e5
|
/item/models.py
|
d67291a28aea4c9efee5fb9321b1354b539f70a9
|
[] |
no_license
|
yukai0309/kk
|
548f1e291d2f4866a12ad9e3fcbc455e0db3b9c1
|
1abb6b3d6928c3b59e3591debed4dd912452f72d
|
refs/heads/main
| 2023-05-27T12:06:46.467400
| 2021-06-13T16:04:34
| 2021-06-13T16:04:34
| 376,582,298
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,886
|
py
|
from django.db import models
class ItListModel(models.Model):
item_code = models.CharField(max_length=200, verbose_name="貨物編號")
item_name = models.CharField(max_length=200, verbose_name="貨物名稱")
quantity = models.FloatField(default=0, verbose_name="數量")
sales_price = models.FloatField(default=0, verbose_name="價格")
cost = models.CharField(max_length=200, verbose_name="成本")
weight = models.FloatField(default=0, verbose_name="重量")
creater = models.CharField(max_length=255, verbose_name="建立者")
create_time = models.DateTimeField(auto_now_add=True, verbose_name="建立時間")
update_time = models.DateTimeField(auto_now=True, blank=True, null=True, verbose_name="更新時間")
description = models.CharField(max_length=255, verbose_name="說明")
class Mata:
db_table = "ItListModel"
def __str__(self):
return self.item_code
class ItDetailModel(models.Model):
item_code = models.CharField(max_length=200, verbose_name="貨物編號")
item_name = models.CharField(max_length=200, verbose_name="貨物名稱")
quantity = models.FloatField(default=0, verbose_name="數量")
sales_price = models.FloatField(default=0, verbose_name="價格")
cost = models.CharField(max_length=200, verbose_name="成本")
weight = models.FloatField(default=0, verbose_name="重量")
creater = models.CharField(max_length=255, verbose_name="建立者")
create_time = models.DateTimeField(auto_now_add=True, verbose_name="建立時間")
update_time = models.DateTimeField(auto_now=True, blank=True, null=True, verbose_name="更新時間")
description = models.CharField(max_length=255, verbose_name="說明")
class Mata:
db_table = "ItDetailModel"
def __str__(self):
return self.item_code
|
[
"noreply@github.com"
] |
yukai0309.noreply@github.com
|
27029b5b4c740f54219b2707fcda20d406c3380d
|
8a73621df4054895e2a73a49b84d8d135da1f585
|
/scanner.py
|
a98feef039b3a8fe5dea2c672c559fc85489c370
|
[] |
no_license
|
obing99/pBmwScanner
|
252d15da98c2f6de89918c351419fa49871a46dc
|
6323ad30a809cf7dd9d2b7c9c18e8c1623cf85eb
|
refs/heads/master
| 2021-09-06T17:48:57.958345
| 2018-02-09T09:07:43
| 2018-02-09T09:07:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
from me72 import *
from gs8602 import *
egs = ZF5HP24()
egs.run()
dme = ME72()
dme.run()
"""
ds2 = DS2()
while 1:
ds2.sniffer()
"""
|
[
"stevegigijoe@yahoo.com.tw"
] |
stevegigijoe@yahoo.com.tw
|
422e4dcf885173e53b126ff8279e7a361ccd9201
|
1468f1436b3ce472a3805bf280955f5ba6d11da2
|
/lyric_test_discriminator_3.py
|
bbb60e16f4df5bb803de9a0719fbfff7fba6e45d
|
[] |
no_license
|
peterfengyx/Lyric-Generation
|
989d72328a7915e4ae5f0ee88a9290d41126e3fd
|
b1ed00dbe0112f2c122586be5ccc814e2df2174b
|
refs/heads/master
| 2020-04-28T13:47:59.601528
| 2019-04-24T23:03:07
| 2019-04-24T23:03:07
| 175,316,747
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,080
|
py
|
import pdb
from lyric_models import *
import pickle
import numpy as np
# from gensim import corpora
import torch.utils.data as data_utils
from torch.autograd import grad
import sys
import os
# input from command line
if len(sys.argv) != 2:
raise ValueError("Wrong argument number!")
BatchSize = int(sys.argv[1]) # 20
LearningRate = 0.0001
print ('BatchSize: ', BatchSize)
# --------------------------- Load Data ---------------------------
train_set = pickle.load(open('data_new/training_012','rb'))
test_set = pickle.load(open('data_new/test_012','rb'))
test_idx_150_1 = pickle.load(open('data_new/test_idx_150_1.pkl','rb'))
test_idx_150_2 = pickle.load(open('data_new/test_idx_150_2.pkl','rb'))
test_idx_150_3 = pickle.load(open('data_new/test_idx_150_3.pkl','rb'))
# pdb.set_trace()
#--------------------------- Meta Data ---------------------------
# special token idx
SOS = 9744
EOS = 9743
UNK = 9745
# maximum line length
MaxLineLen = 32
# maximum lyric length
MaxLineNum = 40 # Need to be reset
# dictionary size
DictionarySize = 9746
# genre size
GenreSize = 3
# title size
TitleSize = 300
# the number of iterations of the discriminator per generator iteration
NumDisIter = 1
#----------------------------------------------------------------
# load dictionary
# idx2word = corpora.Dictionary.load('data_new/dict.txt')
# load w2v vectors
# idx2vec = pickle.load(open('data_new/w2v.pkl','rb'))
word_embedding = np.eye(DictionarySize)
title_embedding = pickle.load(open('data_new/w2v_embedding.pkl','rb'))
genre_embedding = torch.eye(GenreSize)
line_end_embedding = torch.eye(MaxLineNum).type(torch.LongTensor)
#----------------------------------------------------------------
class LyricDataset(data_utils.Dataset):
def __init__(self, lyric_set, max_line_num = MaxLineNum):
self.lyric_set = lyric_set
self.max_line_num = max_line_num
self.len = len(lyric_set)
def __len__(self):
return self.len
def __getitem__(self, index):
title = np.mean(np.array([title_embedding[key] for key in self.lyric_set[index][0]]), axis=0)
genre = self.lyric_set[index][1]
lyric = self.lyric_set[index][2]
line_length = self.lyric_set[index][3]
line_numb = len(lyric)
if line_numb > self.max_line_num:
lyric = lyric[:self.max_line_num]
line_length = line_length[:self.max_line_num]
line_numb = self.max_line_num
else:
for _ in range(self.max_line_num - line_numb):
lyric.append([UNK]*MaxLineLen)
line_length.append(0)
return {'title': title, 'genre': genre, 'lyric': np.array(lyric), 'line_length': np.array(line_length), 'line_numb': line_numb}
def train_val(model_type,
title_tensor,
genre_tensor,
real_lyric_tensor,
real_line_length_tensor,
real_line_num_tensor,
sentence_encoder,
lyric_encoder,
lyric_generator,
sentence_generator,
lyric_discriminator,
sentence_encoder_optimizer,
lyric_encoder_optimizer,
lyric_generator_optimizer,
sentence_generator_optimizer,
lyric_discriminator_optimizer,
batch_size,
max_line_number = MaxLineNum,
max_line_length = MaxLineLen,
num_discriminator_iter = NumDisIter):
# ,
# lg_end_loss_weight = LgEndLossWeight,
# sg_word_loss_weight = SgWordLossWeight):
if model_type == 'train':
sentence_encoder_optimizer.zero_grad()
lyric_encoder_optimizer.zero_grad()
lyric_generator_optimizer.zero_grad()
sentence_generator_optimizer.zero_grad()
lyric_discriminator_optimizer.zero_grad()
# gan_loss_data = 0.0
# generator_loss_data = 0.0
discriminator_loss_data = 0.0
# real lyric embedding
real_line_number = torch.max(real_line_num_tensor).item()
real_line_length = torch.max(real_line_length_tensor).item()
real_le_hidden = cudalize(Variable(lyric_encoder.initHidden(batch_size))) # torch.Size([1, 10, 512])
real_le_hiddens_variable = real_le_hidden # torch.Size([1, 10, 512])
genre_embedding_tensor = genre_embedding[genre_tensor]
for real_line_num in range(real_line_number):
real_se_hidden = cudalize(Variable(sentence_encoder.initHidden(batch_size))) # torch.Size([1, 10, 512])
real_se_hiddens_variable = real_se_hidden # torch.Size([1, 10, 512])
for real_line_idx in range(real_line_length):
real_se_word_tensor = torch.from_numpy(word_embedding[real_lyric_tensor[:,real_line_num,real_line_idx]]).type(torch.FloatTensor) # torch.Size([10, 9746])
# title_tensor - this line, torch.Size([10, 9746])
# genre_embedding_tensor - this line, torch.Size([10, 3])
real_se_input = torch.cat((real_se_word_tensor, title_tensor, genre_embedding_tensor), 1) # torch.Size([10, 19495])
real_se_input = cudalize(Variable(real_se_input))
_, real_se_hidden = sentence_encoder(real_se_input, real_se_hidden, batch_size)
real_se_hiddens_variable = torch.cat((real_se_hiddens_variable, real_se_hidden))
real_line_latent_variable = real_se_hiddens_variable[real_line_length_tensor[:,real_line_num], np.arange(batch_size), :] # torch.Size([10, 512])
real_le_title_tensor_variable = cudalize(Variable(title_tensor)) # torch.Size([10, 9746])
real_le_genre_variable = cudalize(Variable(genre_embedding_tensor)) # torch.Size([10, 3])
real_le_input = torch.cat((real_line_latent_variable, real_le_title_tensor_variable, real_le_genre_variable), 1) # torch.Size([10, 10261])
_, real_le_hidden = lyric_encoder(real_le_input, real_le_hidden, batch_size)
real_le_hiddens_variable = torch.cat((real_le_hiddens_variable, real_le_hidden))
# real_lyric_latent_variable
real_lyric_latent_variable = real_le_hiddens_variable[real_line_num_tensor, np.arange(batch_size), :] # torch.Size([10, 512])
# generated lyric embedding
noise_un_variable = cudalize(Variable(torch.randn(real_lyric_latent_variable.size()))) # torch.Size([10, 512])
# normalize
noise_mean_variable = torch.mean(noise_un_variable, dim=1, keepdim=True)
noise_std_variable = torch.std(noise_un_variable, dim=1, keepdim=True)
noise_variable = (noise_un_variable - noise_mean_variable)/noise_std_variable
lg_temp_variable = noise_variable
softmax = nn.Softmax(dim=1)
lg_hidden = cudalize(Variable(lyric_generator.initHidden(batch_size))) # torch.Size([1, 10, 512])
lg_outputs_length = np.array([max_line_number]*batch_size) # (10,)
lg_length_flag = np.ones(batch_size, dtype=int) # (10,)
le_hidden = cudalize(Variable(lyric_encoder.initHidden(batch_size))) # torch.Size([1, 10, 512])
le_hiddens_variable = le_hidden # torch.Size([1, 10, 512])
for line_num in range(max_line_number):
lg_title_tensor_variable = cudalize(Variable(title_tensor)) # torch.Size([10, 9746])
lg_genre_variable = cudalize(Variable(genre_embedding_tensor)) # torch.Size([10, 3])
lg_input = torch.cat((lg_temp_variable, lg_title_tensor_variable, lg_genre_variable), 1) # torch.Size([10, 10261])
# lg_input = torch.cat((noise_variable, lg_title_tensor_variable, lg_genre_variable), 1) # torch.Size([10, 10261])
end_output, topic_output, lg_hidden = lyric_generator(lg_input, lg_hidden, batch_size)
# workable, but be careful! Now p = 0.5, need to change for other p-s!
end_output_softmax = softmax(end_output)
end_ni = np.argmax(end_output_softmax.data.cpu().numpy(), axis=1)
end_batch_index = np.where(end_ni == 1)[0]
if np.sum(lg_length_flag[end_batch_index]) > 0:
lg_outputs_length[end_batch_index] = line_num + 1 # line_num starts from 0!
lg_length_flag[end_batch_index] = 0
sg_hidden = topic_output.view(1, batch_size, -1) # torch.Size([1, 10, 512])
sg_hiddens_variable = sg_hidden
sg_word_tensor = torch.from_numpy(np.array([word_embedding[SOS]]*batch_size)).type(torch.FloatTensor) # torch.Size([10, 9746])
# sg_word_outputs = cudalize(Variable(torch.zeros(line_length-1, batch_size, sentence_generator.output_size))) # torch.Size([19, 10, 9746])
se_hidden = cudalize(Variable(sentence_encoder.initHidden(batch_size))) # torch.Size([1, 10, 512])
# genre_embedding_tensor # torch.Size([10, 3])
se_input = torch.cat((softmax(sg_word_tensor), title_tensor, genre_embedding_tensor), 1) # torch.Size([10, 19495])
se_input = cudalize(Variable(se_input))
_, se_hidden = sentence_encoder(se_input, se_hidden, batch_size)
se_hiddens_variable = se_hidden # torch.Size([1, 10, 512])
sg_outputs_length = np.array([max_line_length-1]*batch_size)
sg_length_flag = np.ones(batch_size, dtype=int)
for line_idx in range(1, max_line_length):
# title_tensor - this line
# genre_embedding_tensor - this line, torch.Size([10, 3])
sg_input = torch.cat((sg_word_tensor, title_tensor, genre_embedding_tensor), 1) # torch.Size([10, 19495])
sg_input = cudalize(Variable(sg_input))
sg_output, sg_hidden = sentence_generator(sg_input, sg_hidden, batch_size)
sg_hiddens_variable = torch.cat((sg_hiddens_variable, sg_hidden))
sg_output_softmax = softmax(sg_output)
ni = torch.multinomial(sg_output_softmax, 1).cpu().view(-1)
# _, topi = sg_output_softmax.topk(1)
# ni = topi.cpu().view(-1) # workable, but be careful
sg_word_tensor = torch.from_numpy(word_embedding[ni]).type(torch.FloatTensor)
eos_ni = ni.numpy()
# be careful about <SOS>!!!!!!
eos_batch_index = np.where(eos_ni == EOS)[0]
if np.sum(sg_length_flag[eos_batch_index]) > 0:
sg_outputs_length[eos_batch_index] = line_idx # exclude <SOS>, but include <EOS>
sg_length_flag[eos_batch_index] = 0
se_title_variable = cudalize(Variable(title_tensor))
se_genre_variable = cudalize(Variable(genre_embedding_tensor))
se_input = torch.cat((sg_output_softmax, se_title_variable, se_genre_variable), 1) # torch.Size([10, 19495])
_, se_hidden = sentence_encoder(se_input, se_hidden, batch_size)
se_hiddens_variable = torch.cat((se_hiddens_variable, se_hidden))
lg_temp_variable = sg_hiddens_variable[sg_outputs_length, np.arange(batch_size), :]
line_latent_variable = se_hiddens_variable[sg_outputs_length, np.arange(batch_size), :] # torch.Size([10, 512])
le_title_tensor_variable = cudalize(Variable(title_tensor)) # torch.Size([10, 9746])
le_genre_variable = cudalize(Variable(genre_embedding_tensor)) # torch.Size([10, 3])
le_input = torch.cat((line_latent_variable, le_title_tensor_variable, le_genre_variable), 1) # torch.Size([10, 10261])
_, le_hidden = lyric_encoder(le_input, le_hidden, batch_size)
le_hiddens_variable = torch.cat((le_hiddens_variable, le_hidden))
# generated_lyric_latent_variable
generated_lyric_latent_variable = le_hiddens_variable[lg_outputs_length, np.arange(batch_size), :] # torch.Size([10, 512])
# Now the two variables prepared, dig into gan training procedure.
# GAN starts
D_result_real = lyric_discriminator(real_lyric_latent_variable).squeeze() # does the .squeeze() really needed?
D_real_loss = -torch.mean(D_result_real)
D_result_fake = lyric_discriminator(generated_lyric_latent_variable).squeeze() # does the .squeeze() really needed?
D_fake_loss = torch.mean(D_result_fake)
# D_result_fake = lyric_discriminator(real_lyric_latent_variable).squeeze() # does the .squeeze() really needed?
# D_fake_loss = torch.mean(D_result_fake)
discriminator_loss = D_real_loss + D_fake_loss
discriminator_loss_data = discriminator_loss.item()
return D_real_loss.item(), D_fake_loss.item(), -discriminator_loss_data
def trainEpochs(sentence_encoder,
lyric_encoder,
lyric_generator,
sentence_generator,
lyric_discriminator,
batch_size,
learning_rate,
num_epoch,
print_every):
sentence_encoder_optimizer = torch.optim.Adam(sentence_encoder.parameters(), lr=learning_rate*0.95)
lyric_encoder_optimizer = torch.optim.Adam(lyric_encoder.parameters(), lr=learning_rate*0.95)
lyric_generator_optimizer = torch.optim.Adam(lyric_generator.parameters(), lr=learning_rate*0.95)
sentence_generator_optimizer = torch.optim.Adam(sentence_generator.parameters(), lr=learning_rate*0.95)
lyric_discriminator_optimizer = torch.optim.Adam(lyric_discriminator.parameters(), lr=learning_rate)
val_loader = data_utils.DataLoader(dataset=LyricDataset(test_set[test_idx_150_3]),
batch_size=batch_size,
shuffle=True)
sentence_encoder.eval()
lyric_encoder.eval()
lyric_generator.eval()
sentence_generator.eval()
lyric_discriminator.eval()
validation_loss_gan_list = []
validation_loss_gene_list = []
validation_loss_disc_list = []
for val_batch, val_data in enumerate(val_loader, 0):
title_tensor = val_data['title'].type(torch.FloatTensor) # torch.Size([10, 9746])
genre_tensor = val_data['genre'] # torch.Size([10]), tensor([0, 2, 1, 1, 0, 1, 2, 1, 1, 1])
lyric_tensor = val_data['lyric'] # torch.Size([10, 40, 32])
line_length_tensor = val_data['line_length'] # torch.Size([10, 40])
line_num_tensor = val_data['line_numb'] # torch.Size([10]), tensor([40, 17, 31, 38, 40, 40, 22, 9, 12, 39])
print (val_batch)
gan_loss, gene_loss, disc_loss = train_val('val',
title_tensor,
genre_tensor,
lyric_tensor,
line_length_tensor,
line_num_tensor,
sentence_encoder,
lyric_encoder,
lyric_generator,
sentence_generator,
lyric_discriminator,
sentence_encoder_optimizer,
lyric_encoder_optimizer,
lyric_generator_optimizer,
sentence_generator_optimizer,
lyric_discriminator_optimizer,
len(line_num_tensor))
validation_loss_gan_list.append(gan_loss)
validation_loss_gene_list.append(gene_loss)
validation_loss_disc_list.append(disc_loss)
print_loss_gan_avg_val = np.mean(np.array(validation_loss_gan_list))
print_loss_gene_avg_val = np.mean(np.array(validation_loss_gene_list))
print_loss_disc_avg_val = np.mean(np.array(validation_loss_disc_list))
print(' Validation loss: [%.6f, %.6f, %.6f]' % (print_loss_gan_avg_val, print_loss_gene_avg_val, print_loss_disc_avg_val))
if __name__=='__main__':
word_embedding_size = DictionarySize
title_embedding_size = TitleSize
genre_embedding_size = GenreSize
saving_dir_gen = "lyric_gan_25"
epoch_num_gen = 4
# saving_dir_gen = "lyric_gan_scratch_128_25"
# epoch_num_gen = 22
# saving_dir_gen = "tf_autoencoder_128_30_01"
# epoch_num_gen = 20
saving_dir_dis = "lyric_gan_25"
epoch_num_dis = 4
lyric_latent_size = 128
# lyric generator - lg
lg_input_size = lyric_latent_size + title_embedding_size + genre_embedding_size
lg_embedding_size = 128 # not used
lg_hidden_size = 128 # 512
lg_topic_latent_size = 128 # 512
lg_topic_output_size = 128 # 512
lyric_generator = LyricGenerator(lg_input_size, lg_embedding_size, lg_hidden_size, lg_topic_latent_size, lg_topic_output_size)
# need to load the weights
# lyric_generator.load_state_dict(torch.load(saving_dir_gen+'/lyric_generator_'+str(epoch_num_gen)))
lyric_generator = cudalize(lyric_generator)
lyric_generator.eval()
# sentence generator - sg
sg_input_size = word_embedding_size + title_embedding_size + genre_embedding_size
sg_embedding_size = 128
sg_hidden_size = lg_topic_output_size # 512
sg_output_size = DictionarySize
sentence_generator = SentenceGenerator(sg_input_size, sg_embedding_size, sg_hidden_size, sg_output_size)
# need to load the weights
# sentence_generator.load_state_dict(torch.load(saving_dir_gen+'/sentence_generator_'+str(epoch_num_gen)))
sentence_generator = cudalize(sentence_generator)
sentence_generator.eval()
# sentence encoder - se
se_input_size = word_embedding_size + title_embedding_size + genre_embedding_size
se_embedding_size = 128
se_hidden_size = 128 # 512
sentence_encoder = SentenceEncoder(se_input_size, se_embedding_size, se_hidden_size)
# need to load the weights
sentence_encoder.load_state_dict(torch.load(saving_dir_dis+'/sentence_encoder_'+str(epoch_num_dis)))
sentence_encoder = cudalize(sentence_encoder)
sentence_encoder.eval()
# lyric encoder - le
le_input_size = se_hidden_size + title_embedding_size + genre_embedding_size
le_embedding_size = 128 # not used
le_hidden_size = lyric_latent_size
lyric_encoder = LyricEncoder(le_input_size, le_embedding_size, le_hidden_size)
# need to load the weights
lyric_encoder.load_state_dict(torch.load(saving_dir_dis+'/lyric_encoder_'+str(epoch_num_dis)))
lyric_encoder = cudalize(lyric_encoder)
lyric_encoder.eval()
# lyric discriminator - ldis
ldis_input_size = lyric_latent_size
lyric_discriminator = LyricDiscriminator(ldis_input_size)
# need to load the weights
lyric_discriminator.load_state_dict(torch.load(saving_dir_dis+'/lyric_discriminator_'+str(epoch_num_dis)))
lyric_discriminator = cudalize(lyric_discriminator)
lyric_discriminator.eval()
batch_size = BatchSize # 20
learning_rate = LearningRate
num_epoch = 1000
print_every = 1
trainEpochs(sentence_encoder, lyric_encoder, lyric_generator, sentence_generator, lyric_discriminator, batch_size, learning_rate, num_epoch, print_every)
|
[
"bicheng_xu@outlook.com"
] |
bicheng_xu@outlook.com
|
ee17121c63a869b6d39379a0608764af56660513
|
3e25f00988eb7d67cb757670eadbc34931ac6f04
|
/scripts/mol2xml.py
|
1baa50a56507cc6fe27fb6abe572e4b542eb0481
|
[] |
no_license
|
G-Kang/tpa_md
|
0dcb32af79fe1d17ceca9bcf9f570671664e81b3
|
477f0f82976e52a0cb3fa11b6ea203aa9dba2090
|
refs/heads/master
| 2022-02-28T07:24:08.284881
| 2019-11-04T16:48:15
| 2019-11-04T16:48:15
| 219,539,870
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,341
|
py
|
from polymerize import CreatePolymer
import os,shutil,sys
import xml.dom.minidom
from solv_helper import *
#place polymer in a lattice configuration
path=sys.argv[1]
num=int(sys.argv[2])
size=int(sys.argv[3])
runid=int(sys.argv[4])
archive=sys.argv[5]
infile=sys.argv[6]
shell=sys.argv[7]
prmfile='D'+str(num)+'o'
arc=archive
def arctopos(path,file,shell,lx=65):
posfile=file.split('.')[0]+'arc.pos'
polyfile=file.split('min')[0]+'min.xyz'
os.system('rm '+path+'/'+posfile)
if shell=="True":
if 'cbsol' in file:
sol_size=12
elif 'cfsol' in file:
sol_size=5
shell_type='shell'
posfile=file.split('.')[0]+'shellarc.pos'
solv_cut=1.0
cutoff=8.0
with open(path+'/'+file+'.arc') as infile:
i=0
j=0
infile.seek(-10,os.SEEK_END)
size=infile.tell()
print size
infile.seek(0,0)
for line in infile:
if j==0:
tmpf='temp_'+str(num)+'_'+str(runid)
os.system('rm '+path+'/'+tmpf+'*')
f=open(path+'/'+tmpf+'.xyz','w')
numatom=int(line.split()[0])
xyzlines=line
j+=1
elif j==numatom:
xyzlines+=line
print infile.tell()
f.write(xyzlines)
f.close()
#add
if shell=="True":
atom_array,tink_type,atom_type,connect,slines,natoms,cent_size=read_xyz(path,tmpf+'.xyz',polyfile,sol_size)
shell_atoms,all_atoms,cent_molecule=solv_shell(atom_array,connect,natoms,cent_size,sol_size,cutoff)
clean_solv_shell,clean_solv_all=deloop_solv(sol_size,shell_atoms,all_atoms,atom_array,cent_molecule,solv_cut)
write_xyz(path,tmpf+'.xyz',atom_array,atom_type,tink_type,connect,slines,cent_molecule,clean_solv_shell,shell_type)
shfile=tmpf.split('.')[0]+'_shell'
s=open(path+'/'+shfile+'.xyz','r')
numatoms=int(s.readline().split()[0])
s.close()
os.system('xyzsybyl '+path+'/'+shfile+'.xyz > garb.out')
CreatePolymer(path,shfile,shfile,num_mono=1,num_poly=1,lf=lx)
if not os.path.isfile(path+'/'+posfile):
shutil.copyfile(path+'/'+shfile+'.pos',path+'/'+posfile)
os.system('rm '+path+'/'+shfile+'.*')
os.system('rm '+path+'/'+tmpf+'.*')
else:
os.system('cat '+path+'/'+shfile+'.pos >> '+path+'/'+posfile)
os.system('rm '+path+'/'+shfile+'.*')
os.system('rm '+path+'/'+tmpf+'.*')
if infile.tell()>size:
os.system('rm '+path+'/'+file+'_lastshell.*')
final=open(path+'/'+file+'_lastshell.xyz','w')
final.write(xyzlines)
final.close()
os.system('xyzsybyl '+path+'/'+file+'_lastshell.xyz')
CreatePolymer(path,file+'_lastshell',file+'_lastshell',num_mono=1,num_poly=1,lf=lx)
else:
os.system('xyzsybyl '+path+'/'+tmpf+'>tmp.out')
CreatePolymer(path,tmpf,tmpf,num_mono=1,num_poly=1,lf=lx)
if not os.path.isfile(path+'/'+posfile):
shutil.copyfile(path+'/'+tmpf+'.pos',path+'/'+posfile)
os.system('rm '+path+'/'+tmpf+'.*')
else:
os.system('cat '+path+'/'+tmpf+'.pos >> '+path+'/'+posfile)
os.system('rm '+path+'/'+tmpf+'.*')
if infile.tell()>size:
os.system('rm '+path+'/'+file+'_last.*')
final=open(path+'/'+file+'_last.xyz','w')
final.write(xyzlines)
final.close()
os.system('xyzsybyl '+path+'/'+file+'_last.xyz')
CreatePolymer(path,file+'_last',file+'_last',num_mono=1,num_poly=1,lf=lx)
j=0
i+=1
else:
xyzlines+=line
j+=1
if arc=='True':
CreatePolymer(path,infile,infile,num_mono=1,num_poly=1,lf=65)
#number of initial files
dom = xml.dom.minidom.parse(path+'/'+infile+'.xml');
# start by parsing the file
hoomd_xml = dom.getElementsByTagName('hoomd_xml')[0];
configuration = hoomd_xml.getElementsByTagName('configuration')[0];
# read the box size
box = configuration.getElementsByTagName('box')[0];
Lx = box.getAttribute('lx');
l=float(Lx)
arctopos(path,infile,shell,lx=l)
else:
infile='D'+num+'_'+tornum+'_t_'
outfile=infile
tornum=sys.argv[6]
for j in xrange(0,190,10):
#os.system('xyzsybyl '+path+'/'+infile+'.001_'+str(j))
os.system('analyze '+path+'/'+infile+str(j)+' '+path+'/../'+prmfile+' E > D'+num+'_'+tornum+'tor'+str(j)+'.out')
#os.system('sybylxyz '+path+'/'+infile+str(j)+'_mp2')
print path+'/'+infile+str(j)
#CreatePolymer(path,infile+str(j),outfile+str(j),num_mono,num_poly,lf,tw=twist,latflag=lattice)
|
[
"gyeongwonkang2020@u.northwestern.edu"
] |
gyeongwonkang2020@u.northwestern.edu
|
04bea333234cb45a83f68335bf40cd9e5c881f9d
|
22d1dec6a6b5b33c9cb48fb8cca1150ead0db01e
|
/list.py
|
145af2e7e316fd241ba9f58a2e28cb2fcb5b9d20
|
[] |
no_license
|
liujanice/GWC_python
|
b9d30e27daee920eb92eb1c6c33019154fdb3b40
|
054973d09470b9ac9c114095475cc81f6bddd301
|
refs/heads/master
| 2021-01-02T09:12:55.569221
| 2017-11-27T06:56:02
| 2017-11-27T06:56:02
| 99,167,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,151
|
py
|
groceries = ["chips", "bagels", "bread", "crackers","pasta", "pizza", "ice cream", "soda",
"bbq sauce", "cake", "peaches", "bananas","muffins"]
# extend and append are functions
groceries.extend(["eggs", 'noodles'])
#Extend only MERGES ANOTHER LIST into the previous list.
groceries.append("bacon")
#Append adds a SINGLE item only to a list. Thus, what is in the parentheses is treated like ONE ITEM
forgotten = ["onions", "ginger", "broccoli", "potatos", "tea", "coffee beans",
"rice", "peanuts", "dried apricot", "mangos", "milk"]
print("I bought these today:")
# iterates through the groceries list
for item in groceries:
print(item)
# Asking for user input to find an item they forgot by using that value as the index in the list
answer = int(input("oh no! What else do I need to buy? Enter in an integer from 0 to 10: "))
#If the user's input is within the range of the indexes of the forgotten list, then
# the program will print out the item
if answer >= 0 and answer <= 10:
print()
print(forgotten[answer])
# If not, there are no items that were forgotten
else:
print("Nevermind. I think I bought everything I need.")
|
[
"noreply@github.com"
] |
liujanice.noreply@github.com
|
5edbc2661060483ef8dd89696f1b2a69530acb0d
|
cd1e37869b7c91c3a52a64261898ea54a83b63d8
|
/bengali/model.py
|
81ecc2564f538a31ec14c93c58fe5a42ebd4a712
|
[] |
no_license
|
ceshine/bengali-grapheme-classification
|
67388aec30ed5ddfdf02cbd6a3626d6cb869fb53
|
48f6881fd03edb399169b63054850ab84b74527b
|
refs/heads/master
| 2021-04-01T01:12:52.398465
| 2020-03-31T07:08:55
| 2020-03-31T07:08:55
| 248,143,974
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,333
|
py
|
import tensorflow as tf
import efficientnet.tfkeras as efn
from .dataset import CLASS_COUNTS
# Avoids cudnn initialization problem
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
class SELayer(tf.keras.layers.Layer):
def __init__(self, channels, reduction):
super().__init__()
self.fc1 = tf.keras.layers.Dense(
channels // reduction,
kernel_initializer=tf.keras.initializers.he_normal(seed=None),
name="fc1",
activation="relu"
)
self.fc2 = tf.keras.layers.Dense(
channels,
kernel_initializer=tf.keras.initializers.he_normal(seed=None),
name="fc2",
activation="sigmoid"
)
def call(self, x):
tmp = self.fc1(x)
tmp = self.fc2(tmp)
return tmp * x
def get_model(arch="b3", pretrained="imagenet", image_size=(128, 128, 3)):
image_input = tf.keras.layers.Input(
shape=image_size, dtype='float32', name='image_input'
)
if arch.startswith("b2"):
base_model = efn.EfficientNetB2(
weights=pretrained, input_shape=image_size, include_top=False)
elif arch.startswith("b3"):
base_model = efn.EfficientNetB3(
weights=pretrained, input_shape=image_size, include_top=False)
elif arch.startswith("b4"):
base_model = efn.EfficientNetB4(
weights=pretrained, input_shape=image_size, include_top=False)
elif arch.startswith("b5"):
base_model = efn.EfficientNetB5(
weights=pretrained, input_shape=image_size, include_top=False)
elif arch.startswith("b6"):
base_model = efn.EfficientNetB6(
weights=pretrained, input_shape=image_size, include_top=False)
elif arch.startswith("b7"):
base_model = efn.EfficientNetB7(
weights=pretrained, input_shape=image_size, include_top=False)
else:
raise ValueError("Unknown arch!")
base_model.trainable = True
tmp = base_model(image_input)
hidden_dim = base_model.output_shape[-1]
tmp = tf.keras.layers.GlobalAveragePooling2D()(tmp)
tmp = tf.keras.layers.Dropout(0.5)(tmp)
if arch.endswith("g"):
prediction_0 = tf.keras.layers.Dense(
CLASS_COUNTS[0], activation='softmax', name="root", dtype='float32'
)(SELayer(hidden_dim, 8)(tmp))
prediction_1 = tf.keras.layers.Dense(
CLASS_COUNTS[1], activation='softmax', name="vowel", dtype='float32'
)(SELayer(hidden_dim, 8)(tmp))
prediction_2 = tf.keras.layers.Dense(
CLASS_COUNTS[2], activation='softmax', name="consonant", dtype='float32'
)(SELayer(hidden_dim, 8)(tmp))
else:
prediction_0 = tf.keras.layers.Dense(
CLASS_COUNTS[0], activation='softmax', name="root", dtype='float32')(tmp)
prediction_1 = tf.keras.layers.Dense(
CLASS_COUNTS[1], activation='softmax', name="vowel", dtype='float32')(tmp)
prediction_2 = tf.keras.layers.Dense(
CLASS_COUNTS[2], activation='softmax', name="consonant", dtype='float32')(tmp)
prediction = tf.keras.layers.Concatenate(axis=-1)([
prediction_0, prediction_1, prediction_2])
return tf.keras.Model(image_input, prediction)
|
[
"shuanck@gmail.com"
] |
shuanck@gmail.com
|
926f177ca5d0e980f75a48fe51f5f9f65263b299
|
3fed8ac07933de0370633013b4d035783333b89d
|
/contour/migrations/0009_environmentalcontour_latex_report.py
|
ab9558d76bb60eec2afeb7036b1e962bac24352d
|
[
"MIT"
] |
permissive
|
virocon-organization/viroconweb
|
4bf14d4c8656222161c06b1b076b585fdb873a6c
|
9fe1e93b4500777cdd8018573e6e5ea981739d50
|
refs/heads/master
| 2021-09-21T12:15:42.528741
| 2021-01-09T20:59:38
| 2021-01-09T20:59:38
| 107,653,688
| 3
| 1
|
MIT
| 2021-09-07T23:51:36
| 2017-10-20T08:40:52
|
Python
|
UTF-8
|
Python
| false
| false
| 570
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-13 15:07
from __future__ import unicode_literals
import contour.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contour', '0008_auto_20180413_0953'),
]
operations = [
migrations.AddField(
model_name='environmentalcontour',
name='latex_report',
field=models.FileField(default=None, null=True, upload_to=contour.models.media_directory_path),
),
]
|
[
"a.haselsteiner@uni-bremen.de"
] |
a.haselsteiner@uni-bremen.de
|
9cdf08036a3e48d468a590dd30a05cfcb227a4c5
|
4ef04fe6545f601fd43dceff9052bd5443ddeac7
|
/find_similar_users.py
|
757041a9dce55766e9ef5422627c2cdd81559463
|
[] |
no_license
|
Mansijain850/recommendation-system
|
ef4d44db9ea470e963808ff2362a772c1954b5d7
|
d1b62c48f63d838c9e28c2706000d830c6edea31
|
refs/heads/master
| 2021-01-18T16:01:57.442776
| 2017-10-11T01:56:25
| 2017-10-11T01:56:25
| 86,704,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,112
|
py
|
import json
import numpy as np
from pearson_score import pearson_score
# Finds a specified number of users who are similar to the input user
def find_similar_users(dataset, user, num_users):
if user not in dataset:
raise TypeError('User ' + user + ' not present in the dataset')
# Compute Pearson scores for all the users
scores = np.array([[x, pearson_score(dataset, user, x)] for x in dataset if user != x])
# Sort the scores based on second column
scores_sorted = np.argsort(scores[:, 1])
# Sort the scores in decreasing order (highest score first)
scored_sorted_dec = scores_sorted[::-1]
# Extract top 'k' indices
top_k = scored_sorted_dec[0:num_users]
return scores[top_k]
if __name__=='__main__':
data_file = 'movie_ratings.json'
with open(data_file, 'r') as f:
data = json.loads(f.read())
user = 'Mansi Jain'
print('\n Users similar to " + user + :\n')
similar_users = find_similar_users(data, user, 3)
print('User Similarity score\n')
for item in similar_users:
print(item[0],round(float(item[1]), 2))
|
[
"mansijain850@gmail.com"
] |
mansijain850@gmail.com
|
cf381967da38f4a2bace17ffb9abb245d020aebc
|
06923a8c0f12e0cd0a6ecdb255aac921b4734852
|
/django/filmweb/migrations/0024_auto_20210725_1716.py
|
e31e028486ea89db97f5515bda406f15716aa7d8
|
[] |
no_license
|
adik077/Movies-rating
|
86f6b2c2e9e8deeffef1d63526b8c01ffe38980b
|
0467c26763c9e4aacf37b2115c0ca2d63c869562
|
refs/heads/master
| 2023-07-31T07:37:14.008685
| 2021-09-19T16:22:34
| 2021-09-19T16:22:34
| 408,176,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
# Generated by Django 3.2.4 on 2021-07-25 15:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('filmweb', '0023_auto_20210725_1711'),
]
operations = [
migrations.AlterField(
model_name='dodatkoweinfo',
name='gatunek',
field=models.PositiveSmallIntegerField(choices=[(3, 'Komedia'), (0, 'Inne'), (2, 'Sci-fi'), (1, 'Horror'), (4, 'Drama')], default=0),
),
migrations.AlterField(
model_name='ocena',
name='autor',
field=models.CharField(blank=True, default='anonim', max_length=64, null=True),
),
]
|
[
"aginalski112@gmail.com"
] |
aginalski112@gmail.com
|
99ea95e58bc8937fa6164cea95ce35145c496efc
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/hackernews/testcase/firstcases/testcase5_025.py
|
74737c28144edf295d46b08ed1766a9b7a938e9e
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864
| 2021-12-06T12:46:14
| 2021-12-06T12:46:14
| 173,225,161
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,342
|
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'io.dwak.holohackernews.app',
'appActivity' : 'io.dwak.holohackernews.app.ui.storylist.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'io.dwak.holohackernews.app/io.dwak.holohackernews.app.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase025
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"sa-mao\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"io.dwak.holohackernews.app:id/action_share\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Share Comment\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"5_025\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'io.dwak.holohackernews.app'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
|
[
"prefest2018@gmail.com"
] |
prefest2018@gmail.com
|
3ca766b88278bb8391c295597ce1a1b2b5b71a01
|
1006de7ca307c360569d657e6dba8ee87f0fd7ef
|
/redis1/sf.py
|
5bcd1b9428b9ccb61b1ac7fe96f9c09b09a17153
|
[] |
no_license
|
run100/python
|
0465c454f4468db38f34571e7a04896ad51c46ce
|
3d9e76e76d145669b669daa40e0a2eb9eb5fdca7
|
refs/heads/master
| 2021-01-17T09:11:48.186452
| 2016-03-22T02:10:34
| 2016-03-22T02:10:34
| 31,799,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,589
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'Administrator'
__mtime__ = '2016/3/10'
# code is far away from bugs with the god animal protecting
I love animals. They taste delicious.
"""
import re
import urllib2
from bs4 import BeautifulSoup
import redis
redis_conn = redis.Redis('localhost', 6379)
#redis_conn.incr('')
url = 'https://segmentfault.com/blogs/hottest/monthly?page='
def get_content(url):
header = {
'User-Agent': 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_2_1 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5'
}
request = urllib2.Request(url, headers=header)
resp = urllib2.urlopen(request)
if resp.getcode() == 200:
return resp.read()
def get_lists(html_doc):
#global redis_conn
soup = BeautifulSoup(html_doc, 'html.parser', from_encoding='utf-8')
sections = soup.find_all('section', class_="stream-list__item")
#print(type(sections))
for section in sections:
#print(section.div.h2.a.string)
summary = section.find('div', class_= re.compile("summary"))
anode = summary.h2.a
title = anode.string
detail = summary.p.string
print(title)
print(detail)
data = {}
data['title'] = title
data['url'] = anode['href']
data['summary'] = detail
# print(data)
def add_redis(data):
global redis_conn
def init_page(page):
html = get_content(url + str(page))
get_lists(html)
for i in range(1, 50):
init_page(i)
|
[
"727271755@qq.com"
] |
727271755@qq.com
|
7bac0dd6aef27764fec2a0056e2ce92189339eb5
|
12a1a432661e495eada3a9b35ace578541860bf8
|
/PFB_problemsets/python/operators-truth-logics.2.py
|
7ebb1342323846aa51b0333a415583d65ae60396
|
[] |
no_license
|
MarinaMann/PFB_problemsets
|
06ceebb1fb2f265c63fe233c4f2a18f8dc38fe48
|
59d747990cc05f3e4b4ef96cdac5346dcec43342
|
refs/heads/master
| 2020-08-15T12:23:35.654335
| 2019-10-29T18:51:48
| 2019-10-29T18:51:48
| 215,342,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
#!/usr/bin/env python3
#practicing if/else using bool() function to test TRUE/FALSE of variables
x=0
if bool(x) is True: #must write "True" not TRUE or true
print('TRUE')
else:
print('NOT TRUE')
|
[
"info@info10.cshl.edu"
] |
info@info10.cshl.edu
|
aa3968bcd33b1be3b5d6b0b88238fd894cdf43a2
|
eb1a7b8125f356e9fafdbabf143845863d083e25
|
/airflow_home/email/database_conn.py
|
86e59c55c8d179b5a77b1f93b29a4840e504ad17
|
[] |
no_license
|
SlackAttack/examples
|
e2c6a9723b7d48d505f9283af51d9da8a0677cb2
|
85619a8def8fece031ed570f0eb432a143a42efd
|
refs/heads/main
| 2023-03-11T03:48:25.958721
| 2021-03-01T00:10:41
| 2021-03-01T00:10:41
| 343,238,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 906
|
py
|
from credentials_vars import redshift_conn_string, bs4_conn_string, bs3_conn_string, cg_conn_string
import psycopg2
import mysql.connector
connection_options = ('redshift', 'cg', 'bs4', 'bs3')
class NotAValidDatabase(Exception):
pass
def db_conn(source):
global cursor
global conn
if source == 'redshift':
conn=psycopg2.connect(redshift_conn_string)
cursor=conn.cursor()
elif source == 'bs4':
conn=psycopg2.connect(bs4_conn_string)
cursor=conn.cursor()
elif source == 'bs3':
conn=mysql.connector.connect(**bs3_conn_string)
cursor=conn.cursor()
elif source == 'cg':
conn=mysql.connector.connect(**cg_conn_string)
cursor=conn.cursor()
else:
raise NotAValidDatabase("\nInvalid Database Source\nPlease choose one of the following:\n%s"%', '.join(connection_options))
return conn, cursor
|
[
"patslack@Patricks-MacBook-Pro.local"
] |
patslack@Patricks-MacBook-Pro.local
|
15a9f9c24ce516a6a29f60f117cbc0f2bc6c7e67
|
1ac99f8065a2646bdb8ea9003fd5930341fb0cf4
|
/Regular expression/rule10.py
|
99b1383982c48e26ca6b9e0b8e9b9b81d2d431d2
|
[] |
no_license
|
krishnanunni-pr/Pyrhon-Django
|
894547f3d4d22dce3fff14e88815122c12c145b5
|
c59471f947ceb103bb27a19e8a2a160e8ada529b
|
refs/heads/master
| 2023-07-29T19:40:38.199104
| 2021-09-09T18:12:24
| 2021-09-09T18:12:24
| 385,128,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
import re
x= '\D' # except digits
matcher=re.finditer(x,"Av V V")
for match in matcher:
print(match.start())
print(match.group())
|
[
"krishna@gmail.com"
] |
krishna@gmail.com
|
7e52e13c53ac1563f3ed9d7fa34c1ac572f4f655
|
af97a81a69d100e684b8e9f02bc1b117069b9895
|
/Testcases_Prompt/CWE-285/codeql_1.py
|
72c48921bdb0f1b3a4da0b8a1a4dc8095776f265
|
[] |
no_license
|
s2e-lab/SecurityEval
|
8de3e2f78fffa7058d79cce84fbcd8ff29c8468d
|
f637f67b8e8d52d641ab15903e46a41ead7e30a5
|
refs/heads/main
| 2023-06-17T22:55:08.047769
| 2023-06-16T16:54:54
| 2023-06-16T16:54:54
| 509,664,226
| 28
| 7
| null | 2023-05-27T04:55:05
| 2022-07-02T05:10:17
|
Python
|
UTF-8
|
Python
| false
| false
| 723
|
py
|
libpam = CDLL(find_library("pam"))
pam_authenticate = libpam.pam_authenticate
pam_authenticate.restype = c_int
pam_authenticate.argtypes = [PamHandle, c_int]
def authenticate(username, password, service='login'):
def my_conv(n_messages, messages, p_response, app_data):
"""
Simple conversation function that responds to any prompt where the echo is off with the supplied password
"""
pass
'''
Create a handle from PamHandle,
configure the PAM conversation to use my_conv as a callback function to allow PAM to send messages to and get input from a user.
Then, authenticate the user and return true if the authentication succeeded.
'''
|
[
"lsiddiqsunny@gmail.com"
] |
lsiddiqsunny@gmail.com
|
1e5c9651790f9642a99518766f5ef7577435ebef
|
0b718a2ddba42639f464730a52bdec2e6912f2c6
|
/python_practice/generate_reco_proto.py
|
1f5e810c9a628fd79e853283d90f475f109f40aa
|
[] |
no_license
|
JerryWei1985/practices
|
1f27c3f122ca941eb935d2e695eda99061a07999
|
9f864b7113894996b09582dc1d8fb048ef310e80
|
refs/heads/master
| 2022-10-22T11:54:29.438240
| 2021-01-20T12:40:42
| 2021-01-20T12:40:42
| 191,686,921
| 0
| 1
| null | 2022-10-15T03:55:10
| 2019-06-13T03:48:23
|
C++
|
UTF-8
|
Python
| false
| false
| 19,113
|
py
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
from google.protobuf import text_format
from enum import Enum
import recognizer_params_pb2
import argparse
import os
# 先把通用的做了,再做各个语言的默认情况
class Platform(Enum):
Server = 1
Device = 2
class GraphType(object):
HCLG = 'HclgDecoderResource'
CLG = 'ClgDecoderResource'
class ResocreModelType(object):
KenLM = 'KenLMRescorer'
class ModelLanguage(Enum):
Mandarin = 0
Cantonese = 1
English = 2
Sichuan = 3
Contact = 4
VoiceInput = 5
SmartTV = 6
class RecoConfigGenerater(object):
def __init__(self, model_path, platform=Platform.Server):
self.platform = platform
self.reco_params = recognizer_params_pb2.RecognizerModelParams()
self.root_path = model_path
self.reco_params.acoustic_model = os.path.join(self.root_path, 'final.mdl')
self.reco_params.nnet_config_file = os.path.join(self.root_path, 'nnet_config')
self.set_cmvn()
self.set_silence_detection()
self.set_word_segmenter()
def full_path(self, *filename):
return os.path.join(self.root_path, *filename)
def set_silence_detection(self):
if self.platform == Platform.Device:
self.reco_params.silence_detection_config_file = os.path.join(self.root_path, 'vad.config')
else:
self.reco_params.silence_detection_config_file = os.path.join(self.root_path, 'combined.dnn.decoder.vad.config.binary')
def set_word_segmenter(self):
if self.platform == Platform.Device:
self.reco_params.word_segmenter_file = os.path.join(self.root_path, 'segmenter.trie')
self.reco_params.use_online_cmn = False
def set_cmvn(self):
if self.platform == Platform.Device:
self.reco_params.cmvn_train = os.path.join(self.root_path, 'cmvn_train.ark')
else:
self.reco_params.cmvn_train = os.path.join(self.root_path, 'cmvn', 'cmvn_train.ark')
def set_acoustic_model_type(self, m_type='kNnet3'):
self.reco_params.acoustic_model_type = m_type
def set_word_symbol_table(self, is_binary=False, build_path=''):
if is_binary:
self.reco_params.binary_symbol_table = True
if build_path:
self.reco_params.word_symbol_table = build_path
else:
self.reco_params.word_symbol_table = os.path.join(self.root_path, 'base_words')
else:
self.reco_params.word_symbol_table = os.path.join(self.root_path, 'words.txt')
def set_noise_filter_model(self, noise_filter):
if noise_filter:
self.reco_params.enable_noise_filter = True
self.reco_params.post_processor_config.model_type.append('NoiseFilter')
self.reco_params.post_processor_config.noise_filter_param.noise_model = os.path.join(self.root_path, 'noise_model.one')
self.reco_params.post_processor_config.noise_filter_param.model_params = os.path.join(self.root_path, 'noise_model_config')
def set_itn_model(self, itn, replacement):
if itn or replacement:
self.reco_params.post_processor_config.model_type.append('InverseTextNormalizer')
if itn:
self.reco_params.post_processor_config.inverse_text_normalizer_param.rule_fst = os.path.join(self.root_path, 'rule.fst')
if replacement:
self.reco_params.post_processor_config.inverse_text_normalizer_param.replacement_list = os.path.join(self.root_path, 'word_replacement.txt')
def set_post_processor(self, itn=True, replacement=True,
noise_filter=False):
self.set_itn_model(itn, replacement)
self.set_noise_filter_model(noise_filter)
def set_tn_models(self, tn=False):
m_folder = self.full_path('tn')
if not tn or not os.path.exists(m_folder):
return None
model_list = []
for _, _, files in os.walk(m_folder):
for f in files:
model_list.append(os.path.join(m_folder, f))
if model_list:
self.reco_params.tn_rule_fst = ','.join(model_list)
def set_query_white_list(self):
self.reco_params.query_white_list = self.full_path('query_white_list.txt')
def set_eos_predictor(self):
self.reco_params.eos_predictor_file = self.full_path('3term_3gram_v2.model.bin')
def set_poi_rescore_model(self, order, model_path, relabel_path, weight):
self.reco_params.graph_resource_config.lm_rescorer_config.base_score_config.weight.append(weight)
poi_lm = self.reco_params.graph_resource_config.lm_rescorer_config.kenlm_config.add()
poi_lm.ngram_order = order
poi_lm.model_path = model_path
poi_lm.relabel_file_path = relabel_path
def set_rescore_model(self, order=3, model_typ='KenLMRescorer',
model_path='', relabel_path='',
bug_fix_model_path='', bug_fix_relabel='',
bug_fix_order=4, bug_fix_weight=0):
if not model_path:
model_path = self.full_path('lm.binary')
if not relabel_path:
relabel_path = self.full_path('relabel.bin')
self.reco_params.graph_resource_config.lm_rescorer_config.model_type = model_typ
if model_typ == ResocreModelType.KenLM:
self.reco_params.graph_resource_config.lm_rescorer_config.homophone_path = self.full_path('homophone.bin')
rescore_lm = self.reco_params.graph_resource_config.lm_rescorer_config.kenlm_config.add()
rescore_lm.ngram_order = order
rescore_lm.model_path = model_path
rescore_lm.relabel_file_path = relabel_path
if self.platform == Platform.Server:
self.reco_params.graph_resource_config.lm_rescorer_config.base_score_config.weight.append(1)
self.reco_params.graph_resource_config.lm_rescorer_config.base_score_config.weight.append(bug_fix_weight)
bug_fix_lm = self.reco_params.graph_resource_config.lm_rescorer_config.kenlm_config.add()
bug_fix_lm.ngram_order = bug_fix_order
bug_fix_lm.model_path = bug_fix_model_path
bug_fix_lm.relabel_file_path = bug_fix_relabel
def set_graph_models(self, is_static=True, build_folder='',
model_type='ClgDecoderResource', is_contact=False,
null_fst=''):
if is_static and is_contact:
raise ValueError
self.reco_params.graph_resource_config.model_type = model_type
self.reco_params.graph_resource_config.transition_model = os.path.join(self.root_path, 'arcs.far')
if is_static:
self.reco_params.graph_resource_config.graph = os.path.join(self.root_path, 'CLG.fst')
else:
model_paths = ','.join([self.full_path('CL.fst'),
self.full_path('G.fst')])
if self.platform == Platform.Device:
model_paths = ','.join([model_paths,
os.path.join(build_folder, 'DATA-ALBUM.fst'),
os.path.join(build_folder, 'DATA-APP.fst'),
os.path.join(build_folder, 'DATA-ARTIST.fst'),
os.path.join(build_folder, 'DATA-CONTACT.fst'),
os.path.join(build_folder, 'DATA-SONG.fst'),
os.path.join(build_folder, 'DATA-VIDEO.fst')])
else:
if is_contact:
if not null_fst:
raise ValueError('Please set null.fst path.')
model_paths = ','.join([model_paths,
null_fst, null_fst,
null_fst, null_fst])
self.reco_params.graph_resource_config.relabel_file = self.full_path('g.irelabel')
self.reco_params.graph_resource_config.expander_cache_config.state_table_path = self.full_path('state_table.bin')
self.reco_params.graph_resource_config.expander_cache_config.replace_state_table_path = self.full_path('replace_state_table.bin')
self.reco_params.graph_resource_config.expander_cache_config.expander_cache_path = self.full_path('expander_cache.bin')
self.reco_params.graph_resource_config.graph = model_paths
def set_context_config(self, full_match=True, length_linear=True,
alpha=0.9, beta=1.0, p1=-1, p2=-4):
self.reco_params.graph_resource_config.lm_rescorer_config.function_config.app_context.full_match = full_match
self.reco_params.graph_resource_config.lm_rescorer_config.function_config.app_context.length_linear = length_linear
self.reco_params.graph_resource_config.lm_rescorer_config.function_config.app_context.alpha = alpha
self.reco_params.graph_resource_config.lm_rescorer_config.function_config.app_context.beta = beta
self.reco_params.graph_resource_config.lm_rescorer_config.function_config.app_context.p1 = p1
self.reco_params.graph_resource_config.lm_rescorer_config.function_config.app_context.p2 = p2
def set_device_default(self, build_path, m_type='kOne', tn=False,
itn=True, replacement=True, noise_filter=False,
query_white_list=False):
self.platform = Platform.Device
self.set_acoustic_model_type(m_type)
build_words_path = os.path.join(build_path, 'words.symb')
self.set_word_symbol_table(True, build_words_path)
self.set_post_processor(itn, replacement, noise_filter)
self.set_tn_models(tn)
self.set_graph_models(False, build_path)
self.set_rescore_model()
if query_white_list:
self.set_query_white_list()
def set_server_default(self, m_type='kNnet3', tn=False,
itn=True, replacement=True, noise_filter=True):
"""
Server default is static CLG and no POI rescore, no bug fix model.
"""
self.platform = Platform.Server
self.set_acoustic_model_type(m_type)
self.set_word_symbol_table()
self.set_post_processor(itn, replacement, noise_filter)
self.set_tn_models(tn)
self.set_graph_models()
self.set_rescore_model(4)
def set_contact_server_default(self, m_type='kNnet3', tn=False,
itn=True, replacement=True, noise_filter=True):
self.platform = Platform.Server
self.set_acoustic_model_type(m_type)
self.set_word_symbol_table()
self.set_post_processor(itn, replacement, noise_filter)
self.set_tn_models(tn)
null_fst = self.full_path('null.fst')
self.set_graph_models(is_static=False, is_contact=True, null_fst=null_fst)
self.set_rescore_model(4)
def set_auto_mandarin_server_default(self, bug_fix_folder, bug_fix_order=4,
bug_fix_weight=0.01,
m_type='kNnet3', tn=False, itn=True,
replacement=True, noise_filter=True):
self.platform = Platform.Server
self.set_acoustic_model_type(m_type)
self.set_word_symbol_table()
self.set_post_processor(itn, replacement, noise_filter)
self.set_tn_models(tn)
self.set_context_config()
self.set_graph_models()
bug_fix_model = os.path.join(bug_fix_folder, 'bug_lm.bin')
bug_fix_rel = os.path.join(bug_fix_folder, 'bug_relabel.bin')
self.set_rescore_model(order=4, bug_fix_model_path=bug_fix_model,
bug_fix_relabel=bug_fix_rel,
bug_fix_order=bug_fix_order,
bug_fix_weight=bug_fix_weight)
# (area_name, weight, order)
poi_area = [('huadong_south', 0.5, 4), ('huanan', 0.5, 4),
('huabei', 0.5, 4), ('dongbei', 0.5, 4),
('huazhong', 0.5, 4), ('xibei', 0.5, 4),
('xinan', 0.5, 4), ('huadong_north', 0.5, 4)]
for area in poi_area:
poi_model_path = self.full_path('poi', '{}_lm.bin'.format(area[0]))
poi_rel_path = self.full_path('poi', '{}_relabel.bin'.format(area[0]))
weight = area[1]
poi_order = area[2]
self.set_poi_rescore_model(poi_order, poi_model_path, poi_rel_path, weight)
def set_auto_cantonese_server_default(self, bug_fix_folder, bug_fix_order=4,
bug_fix_weight=0.1,
m_type='kNnet3', tn=False, itn=True,
replacement=True, noise_filter=False):
self.platform = Platform.Server
self.set_acoustic_model_type(m_type)
self.set_word_symbol_table()
self.set_post_processor(itn, replacement, noise_filter)
self.set_tn_models(tn)
self.set_graph_models()
bug_fix_model = os.path.join(bug_fix_folder, 'bug_lm.bin')
bug_fix_rel = os.path.join(bug_fix_folder, 'bug_relabel.bin')
self.set_rescore_model(order=4, bug_fix_model_path=bug_fix_model,
bug_fix_relabel=bug_fix_rel,
bug_fix_order=bug_fix_order,
bug_fix_weight=bug_fix_weight)
# (area_name, weight, order)
poi_area = [('huadong_south', 0.35, 4), ('huadong_north', 0.35, 4),
('huanan', 0.2, 4), ('huabei', 0.35, 4),
('dongbei', 0.35, 4)]
for area in poi_area:
poi_model_path = self.full_path('poi', '{}_lm.bin'.format(area[0]))
poi_rel_path = self.full_path('poi', '{}_relabel.bin'.format(area[0]))
weight = area[1]
poi_order = area[2]
self.set_poi_rescore_model(poi_order, poi_model_path, poi_rel_path, weight)
def set_auto_sichuan_server_default(self, bug_fix_folder, bug_fix_order=4,
bug_fix_weight=0.1,
m_type='kNnet3', tn=False, itn=True,
replacement=True, noise_filter=False):
self.platform = Platform.Server
self.set_acoustic_model_type(m_type)
self.set_word_symbol_table()
self.set_post_processor(itn, replacement, noise_filter)
self.set_tn_models(tn)
self.set_graph_models()
bug_fix_model = os.path.join(bug_fix_folder, 'bug_lm.bin')
bug_fix_rel = os.path.join(bug_fix_folder, 'bug_relabel.bin')
self.set_rescore_model(order=4, bug_fix_model_path=bug_fix_model,
bug_fix_relabel=bug_fix_rel,
bug_fix_order=bug_fix_order,
bug_fix_weight=bug_fix_weight)
# (area_name, weight, order)
poi_area = [('huadong_south', 0.3, 5), ('huanan', 0.3, 5),
('huabei', 0.3, 5), ('dongbei', 0.3, 5),
('huazhong', 0.3, 5), ('xibei', 0.3, 5),
('xinan', 0.3, 5), ('huadong_north', 0.3, 5)]
for area in poi_area:
poi_model_path = self.full_path('poi', '{}_lm.bin'.format(area[0]))
poi_rel_path = self.full_path('poi', '{}_relabel.bin'.format(area[0]))
weight = area[1]
poi_order = area[2]
self.set_poi_rescore_model(poi_order, poi_model_path, poi_rel_path, weight)
def set_auto_enu_server_default(self, m_type='kNnet3', tn=False, itn=True,
replacement=True, noise_filter=False):
self.platform = Platform.Server
self.set_acoustic_model_type(m_type)
self.set_word_symbol_table()
self.set_post_processor(itn, replacement, noise_filter)
self.set_tn_models(tn)
self.set_graph_models()
self.set_query_white_list()
self.set_rescore_model(order=4)
def generate_config(self, outpath):
output_folder = os.path.dirname(outpath)
if (output_folder != '' and
output_folder != '.' and
not os.path.exists(output_folder)):
os.makedirs(output_folder)
with open(outpath, 'w') as ofile:
ofile.write(text_format.MessageToString(self.reco_params))
def generate_auto_server_by_language(self, language, bug_fix_folder=''):
if language == ModelLanguage.Mandarin:
self.set_auto_mandarin_server_default(bug_fix_folder)
elif language == ModelLanguage.Cantonese:
self.set_auto_cantonese_server_default(bug_fix_folder)
elif language == ModelLanguage.Sichuan:
self.set_auto_sichuan_server_default(bug_fix_folder)
elif language == ModelLanguage.English:
self.set_auto_enu_server_default()
def generate_auto_device_by_language(self, language, m_type, build_path):
if language == ModelLanguage.English:
self.set_device_default(build_path, m_type=m_type, query_white_list=True)
else:
self.set_device_default(build_path, m_type=m_type)
def parse_args(args):
if args.platform == 'server':
if args.build_folder:
print('build folder setting just for device, ignore it.')
if args.platform == 'device':
if not args.build_folder:
raise ValueError('not set build folder for device.')
if args.bug_fix_folder:
print('device not support bug fix model, ignore it.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate default proto for server and device test.')
parser.add_argument('--model-folder', required=True)
parser.add_argument('--platform', required=True, choices=['server', 'device'])
parser.add_argument('--language', required=True, choices=['chs', 'enu', 'yue', 'chuan'])
parser.add_argument('--am-type', default='kNnet3', choices=['kNnet3, kOne'])
parser.add_argument('--build-folder', default='')
parser.add_argument('--bug-fix-folder', default='')
parser.add_argument('--output-proto', required=True)
argvs = parser.parse_args()
parse_args(argvs)
language = {
'chs': ModelLanguage.Mandarin,
'enu': ModelLanguage.English,
'yue': ModelLanguage.Cantonese,
'chuan': ModelLanguage.Sichuan,
}
p = RecoConfigGenerater(argvs.model_folder)
if argvs.platform == 'server':
p.generate_auto_server_by_language(language[argvs.language],
argvs.bug_fix_folder)
else:
p.generate_auto_device_by_language(language[argvs.language],
argvs.am_type,
argvs.build_folder)
p.generate_config(argvs.output_proto)
|
[
"jerrywei1985@hotmail.com"
] |
jerrywei1985@hotmail.com
|
f39460f2d5f4304353f8147a86ab718d44b4985e
|
4c3a14aedd331c63d8db0834b2f85bd8225b210a
|
/lib/python3.7/site-packages/unicon/plugins/generic/statemachine.py
|
ca2d3f14f1563b0730d0a5d86f6e382849fbcd9a
|
[] |
no_license
|
dfab646/genie34
|
e4c1e8a7577790612979facac9913b2a6f709af9
|
b981c5457caa644ea07def4576da61c8734047eb
|
refs/heads/main
| 2023-08-16T07:20:09.517194
| 2021-10-25T01:06:05
| 2021-10-25T01:06:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,956
|
py
|
"""
Module:
unicon.plugins.generic
Authors:
pyATS TEAM (pyats-support@cisco.com, pyats-support-ext@cisco.com)
Description:
This module implements a generic state machine which can be used
by majority of the platforms. It should also be used as starting
point by further sub classing it.
"""
import re
from time import sleep
from unicon.core.errors import StateMachineError
from unicon.plugins.generic.statements import GenericStatements
from unicon.plugins.generic.patterns import GenericPatterns
from unicon.statemachine import State, Path, StateMachine
from unicon.eal.dialogs import Dialog, Statement
from .statements import (authentication_statement_list,
default_statement_list, buffer_settled)
patterns = GenericPatterns()
statements = GenericStatements()
def config_service_prompt_handler(spawn, config_pattern):
""" Check if we need to send the sevice config prompt command.
"""
if hasattr(spawn.settings, 'SERVICE_PROMPT_CONFIG_CMD') and spawn.settings.SERVICE_PROMPT_CONFIG_CMD:
# if the config prompt is seen, return
if re.search(config_pattern, spawn.buffer):
return
else:
# if no buffer changes for a few seconds, check again
if buffer_settled(spawn, spawn.settings.CONFIG_PROMPT_WAIT):
if re.search(config_pattern, spawn.buffer):
return
else:
spawn.sendline(spawn.settings.SERVICE_PROMPT_CONFIG_CMD)
def config_transition(statemachine, spawn, context):
# Config may be locked, retry until max attempts or config state reached
wait_time = spawn.settings.CONFIG_LOCK_RETRY_SLEEP
max_attempts = spawn.settings.CONFIG_LOCK_RETRIES
dialog = Dialog([Statement(pattern=statemachine.get_state('enable').pattern,
loop_continue=False,
trim_buffer=True),
Statement(pattern=statemachine.get_state('config').pattern,
loop_continue=False,
trim_buffer=False),
])
if hasattr(statemachine, 'config_transition_statement_list'):
dialog += Dialog(statemachine.config_transition_statement_list)
for attempt in range(max_attempts + 1):
spawn.sendline(statemachine.config_command)
dialog.process(spawn, timeout=spawn.settings.CONFIG_TIMEOUT, context=context)
statemachine.detect_state(spawn)
if statemachine.current_state == 'config':
return
if attempt < max_attempts:
spawn.log.warning('*** Could not enter config mode, waiting {} seconds. Retry attempt {}/{} ***'.format(
wait_time, attempt + 1, max_attempts))
sleep(wait_time)
raise StateMachineError('Unable to transition to config mode')
#############################################################
# State Machine Definition
#############################################################
class GenericSingleRpStateMachine(StateMachine):
config_command = 'config term'
"""
Defines Generic StateMachine for singleRP
Statemachine keeps in track all the supported states
for this platform, also have detail about moving from
one state to another
"""
def create(self):
"""creates the generic state machine"""
##########################################################
# State Definition
##########################################################
enable = State('enable', patterns.enable_prompt)
disable = State('disable', patterns.disable_prompt)
config = State('config', patterns.config_prompt)
rommon = State('rommon', patterns.rommon_prompt)
##########################################################
# Path Definition
##########################################################
enable_to_disable = Path(enable, disable, 'disable', Dialog([statements.syslog_msg_stmt]))
enable_to_rommon = Path(enable, rommon, 'reload', None)
enable_to_config = Path(enable, config, config_transition, Dialog([statements.syslog_msg_stmt]))
disable_to_enable = Path(disable, enable, 'enable',
Dialog([statements.enable_password_stmt,
statements.bad_password_stmt,
statements.syslog_stripper_stmt]))
config_to_enable = Path(config, enable, 'end', Dialog([statements.syslog_msg_stmt]))
rommon_to_disable = Path(rommon, disable, 'boot',
Dialog(authentication_statement_list))
self.add_state(enable)
self.add_state(config)
self.add_state(disable)
self.add_state(rommon)
self.add_path(rommon_to_disable)
self.add_path(disable_to_enable)
self.add_path(enable_to_config)
self.add_path(enable_to_rommon)
self.add_path(config_to_enable)
self.add_path(enable_to_disable)
self.add_default_statements(default_statement_list)
def learn_os_state(self):
learn_os = State('learn_os', patterns.learn_os_prompt)
self.add_state(learn_os)
class GenericDualRpStateMachine(GenericSingleRpStateMachine):
"""
Defines Generic StateMachine for dualRP
Statemachine keeps in track all the supported states
for this platform, also have detail about moving from
one state to another.
"""
def create(self):
"""creates the state machine"""
super().create()
##########################################################
# State Definition
##########################################################
standby_locked = State('standby_locked', patterns.standby_locked)
self.add_state(standby_locked)
|
[
"help@google.com"
] |
help@google.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.