blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
โ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d8a6f08dd04ec9ae7d39e4517dc83f205c562ba9
|
e0212c9361a553c37df7f109e60c323c6bf32246
|
/minesweeper.py
|
a2bcbefceced04154cfc8b391b4ed0265f252653
|
[] |
no_license
|
nickpwhite/minesweeper
|
27be10bdfe91089b995858c93fcc683e3c6094a2
|
a690a5d6bf47ab203df06d655a161e686ffd57e5
|
refs/heads/master
| 2020-03-11T21:47:01.071961
| 2018-04-19T21:32:01
| 2018-04-19T21:32:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,387
|
py
|
import random
class Cell:
def __init__(self):
self.isVisible = False
self.value = 0
self.isFlagged = False
def __str__(self):
if self.isFlagged:
return '๐ฉ'
elif self.isVisible:
if self.value == -1:
return '๐ฃ'
if self.value == 0:
return ' '
else:
return str(self.value)
else:
return '*'
class Game:
WIDTH = 10
HEIGHT = 10
MINES = 10
SURROUNDING_ARRAY = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]
def __init__(self):
self.board = [[Cell() for i in range(self.WIDTH)] for j in range(self.HEIGHT)]
self.flagged = 0
self.initializeBoard()
def printHelp(self):
print("To play, enter one of the following commands, followed by x,y-coordinates:")
print("\to[pen] - opens the given node, revealing it")
print("\tf[lag] - flags the given node, marking it as a (potential) mine")
print("\tu[nflag] - unflags the given node, removing a flag")
print("\tq[uit] - quits the game")
print("\thelp - shows this help")
print("")
def printWelcome(self):
print("Welcome to Minesweeper!")
self.printHelp()
def printStatus(self):
print()
print('{0} flagged / {1} mines'.format(self.flagged, self.MINES))
print()
def printBoard(self):
print('', end=' ')
for i in range(self.WIDTH):
print(i % 10, end=' ')
print('')
for i in range(self.WIDTH):
print('___', end='')
print('')
for i in range(self.HEIGHT):
print('{0}|'.format(i % 10), end='')
for j in range(self.WIDTH):
print('{0}'.format(self.board[i][j]), end=' ')
print('')
def printDebug(self):
print('', end=' ')
for i in range(self.WIDTH):
print(i % 10, end=' ')
print('')
for i in range(self.WIDTH):
print('___', end='')
print('')
for i in range(self.HEIGHT):
print('{0}|'.format(i % 10), end='')
for j in range(self.WIDTH):
cell = self.board[i][j]
cell.isVisible = True
print('{0}'.format(cell), end=' ')
cell.isVisible = False
print('')
def isWon(self):
for i in range(self.HEIGHT):
for j in range(self.WIDTH):
cell = self.board[i][j]
if cell.value >= 0 and not cell.isVisible:
return False
return True
def setMines(self):
mines = 0
while mines < self.MINES:
x = random.randint(0, self.WIDTH - 1)
y = random.randint(0, self.HEIGHT - 1)
cell = self.board[y][x]
if cell.value != -1:
cell.value = -1
mines += 1
def setCellValue(self, x, y):
cell = self.board[y][x]
if cell.value != -1:
mines = 0
for dx, dy in self.SURROUNDING_ARRAY:
newx = x + dx
newy = y + dy
if 0 <= newx < self.WIDTH and 0 <= newy < self.HEIGHT and self.board[newy][newx].value == -1:
mines += 1
cell.value = mines
def initializeBoard(self):
self.setMines()
# set all the values
for y in range(self.HEIGHT):
for x in range(self.WIDTH):
self.setCellValue(x, y)
def clearFlags(self):
for y in range(self.HEIGHT):
for x in range(self.WIDTH):
self.board[y][x].isFlagged = False
def openCellsAround(self, x, y):
surrounding_cells = []
visited_cells = [(x, y)]
for dx, dy in self.SURROUNDING_ARRAY:
newx = x + dx
newy = y + dy
if 0 <= newx < self.WIDTH and 0 <= newy < self.HEIGHT:
surrounding_cells.append((newx, newy))
while surrounding_cells:
u, v = surrounding_cells.pop()
cell = self.board[v][u]
visited_cells.append((u, v))
if not cell.isFlagged:
cell.isVisible = True
if cell.value == 0:
for du, dv in self.SURROUNDING_ARRAY:
newu = u + du
newv = v + dv
if 0 <= newu < self.WIDTH and 0 <= newv < self.HEIGHT:
newcell = (newu, newv)
if newcell not in surrounding_cells and newcell not in visited_cells:
surrounding_cells.append(newcell)
return 0
def processCommand(self, command, x, y):
if "open".startswith(command.lower()):
cell = self.board[y][x]
if cell.isVisible:
return cell.value
cell.isVisible = True
if cell.value == 0:
return self.openCellsAround(x, y)
else:
return cell.value
elif "flag".startswith(command.lower()):
cell = self.board[y][x]
if not cell.isVisible:
self.flagged += 1
cell.isFlagged = True
return 0
elif "unflag".startswith(command.lower()):
cell = self.board[y][x]
if cell.isFlagged:
self.flagged -= 1
cell.isFlagged = False
return 0
elif "quit".startswith(command.lower()):
return -1
else:
game.printHelp()
game = Game()
game.printWelcome()
game.printBoard()
while not game.isWon():
full_command = input("> ").split()
try:
command = full_command[0]
if len(full_command) > 1:
x = int(full_command[1])
y = int(full_command[2])
else:
x = 0
y = 0
result = game.processCommand(command, x, y)
if result == -1:
game.printBoard()
print("Thanks for playing!")
break;
else:
game.printStatus()
game.printBoard()
except Exception as e:
game.printHelp()
if game.isWon():
game.clearFlags()
game.printDebug()
print("Congratulations! You won!")
|
[
"nick41496@gmail.com"
] |
nick41496@gmail.com
|
9a4f444ef136bc8766e3fde43a36cab0eb31ed99
|
0b347d1f766cba6c00a20ab02fe50f7ebb2e8754
|
/projet HTML fini/Exercices/Fonctions/exercice10.py
|
ee3e26933ea2057a3e14e8df398fccdba39db9aa
|
[] |
no_license
|
AdriBird/SiteWeb
|
d2f20161c23c207c0544490c1842f39be523a326
|
37c59aa2c8f1f5bb41784da7cdb85c02e1b0633b
|
refs/heads/main
| 2023-04-27T04:39:14.736647
| 2021-05-17T09:29:15
| 2021-05-17T09:29:15
| 355,120,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 90
|
py
|
ma_chaine_1 = 2020
ma_chaine_2 = "le"
res = ma_chaine_1 + ma_chaine_2 + "monde"
print(res)
|
[
"jmear92@gmail.com"
] |
jmear92@gmail.com
|
51b70b9324f5e51e8212940b9ce83e5d29286acd
|
9729b72f99ce764fcb2741af5914051f9dd2b87e
|
/app/models.py
|
150a6c279b5869fd6850f870261ca0aadc2b2aff
|
[] |
no_license
|
sleong1/msa-webapps
|
eda01f5715aa87fed9debb48fec8c1036b9b6c88
|
2cfb9afe4d5ed861ec9fc05b244920ccac799c94
|
refs/heads/master
| 2022-12-07T10:05:04.010844
| 2020-09-04T04:30:09
| 2020-09-04T04:30:09
| 291,599,121
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 620
|
py
|
from app import db
from sqlalchemy_imageattach.entity import Image, image_attachment
# Models
class AllRecipes(db.Model):
__tablename__ = 'allrecipes'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
description = db.Column(db.String(512))
ingredients = db.Column(db.String(2048))
method = db.Column(db.String(2048))
fav = db.Column(db.Boolean(create_constraint=False))
#photo = image_attachment('UserPicture')
def __repr__(self):
return '<AllRecipes %r - %r - %r - %r - %r>' % self.name, self.description, self.ingredients, self.method, self.fav
|
[
"leongsuwen@yahoo.com"
] |
leongsuwen@yahoo.com
|
f9a9a10310655241434c78951afb857b9bac83f5
|
0b0ca501c78908cbd84d79aea0c18dbe10ec2393
|
/venv/lib/python2.7/site-packages/novaclient/tests/unit/test_shell.py
|
86047923e37b149b127ab109abb24f1730372fc6
|
[] |
no_license
|
Supriya30201/gskube
|
71c83528d0f425380f47e2c3148200e1b72c8a01
|
7a5c75ee7eabd39a3f286b22af8d7ce2d74225d0
|
refs/heads/master
| 2020-04-21T11:22:54.848625
| 2018-06-25T08:39:00
| 2018-06-25T08:39:00
| 169,523,236
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39,353
|
py
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import distutils.version as dist_version
import re
import sys
import fixtures
from keystoneauth1 import fixture
import mock
import prettytable
import requests_mock
import six
from testtools import matchers
from novaclient import api_versions
import novaclient.client
from novaclient import exceptions
import novaclient.shell
from novaclient.tests.unit import fake_actions_module
from novaclient.tests.unit import utils
FAKE_ENV = {'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_TENANT_NAME': 'tenant_name',
'OS_AUTH_URL': 'http://no.where/v2.0',
'OS_COMPUTE_API_VERSION': '2'}
FAKE_ENV2 = {'OS_USER_ID': 'user_id',
'OS_PASSWORD': 'password',
'OS_TENANT_ID': 'tenant_id',
'OS_AUTH_URL': 'http://no.where/v2.0',
'OS_COMPUTE_API_VERSION': '2'}
FAKE_ENV3 = {'OS_USER_ID': 'user_id',
'OS_PASSWORD': 'password',
'OS_TENANT_ID': 'tenant_id',
'OS_AUTH_URL': 'http://no.where/v2.0',
'NOVA_ENDPOINT_TYPE': 'novaURL',
'OS_ENDPOINT_TYPE': 'osURL',
'OS_COMPUTE_API_VERSION': '2'}
FAKE_ENV4 = {'OS_USER_ID': 'user_id',
'OS_PASSWORD': 'password',
'OS_TENANT_ID': 'tenant_id',
'OS_AUTH_URL': 'http://no.where/v2.0',
'NOVA_ENDPOINT_TYPE': 'internal',
'OS_ENDPOINT_TYPE': 'osURL',
'OS_COMPUTE_API_VERSION': '2'}
FAKE_ENV5 = {'OS_USERNAME': 'username',
'OS_PASSWORD': 'password',
'OS_TENANT_NAME': 'tenant_name',
'OS_AUTH_URL': 'http://no.where/v2.0'}
def _create_ver_list(versions):
return {'versions': {'values': versions}}
class DeprecatedActionTest(utils.TestCase):
@mock.patch.object(argparse.Action, '__init__', return_value=None)
def test_init_emptyhelp_nouse(self, mock_init):
result = novaclient.shell.DeprecatedAction(
'option_strings', 'dest', a=1, b=2, c=3)
self.assertEqual(result.emitted, set())
self.assertIsNone(result.use)
self.assertEqual(result.real_action_args,
('option_strings', 'dest', 'Deprecated',
{'a': 1, 'b': 2, 'c': 3}))
self.assertIsNone(result.real_action)
mock_init.assert_called_once_with(
'option_strings', 'dest', help='Deprecated', a=1, b=2, c=3)
@mock.patch.object(novaclient.shell.argparse.Action, '__init__',
return_value=None)
def test_init_emptyhelp_withuse(self, mock_init):
result = novaclient.shell.DeprecatedAction(
'option_strings', 'dest', use='use this instead', a=1, b=2, c=3)
self.assertEqual(result.emitted, set())
self.assertEqual(result.use, 'use this instead')
self.assertEqual(result.real_action_args,
('option_strings', 'dest',
'Deprecated; use this instead',
{'a': 1, 'b': 2, 'c': 3}))
self.assertIsNone(result.real_action)
mock_init.assert_called_once_with(
'option_strings', 'dest', help='Deprecated; use this instead',
a=1, b=2, c=3)
@mock.patch.object(argparse.Action, '__init__', return_value=None)
def test_init_withhelp_nouse(self, mock_init):
result = novaclient.shell.DeprecatedAction(
'option_strings', 'dest', help='some help', a=1, b=2, c=3)
self.assertEqual(result.emitted, set())
self.assertIsNone(result.use)
self.assertEqual(result.real_action_args,
('option_strings', 'dest',
'some help (Deprecated)',
{'a': 1, 'b': 2, 'c': 3}))
self.assertIsNone(result.real_action)
mock_init.assert_called_once_with(
'option_strings', 'dest', help='some help (Deprecated)',
a=1, b=2, c=3)
@mock.patch.object(novaclient.shell.argparse.Action, '__init__',
return_value=None)
def test_init_withhelp_withuse(self, mock_init):
result = novaclient.shell.DeprecatedAction(
'option_strings', 'dest', help='some help',
use='use this instead', a=1, b=2, c=3)
self.assertEqual(result.emitted, set())
self.assertEqual(result.use, 'use this instead')
self.assertEqual(result.real_action_args,
('option_strings', 'dest',
'some help (Deprecated; use this instead)',
{'a': 1, 'b': 2, 'c': 3}))
self.assertIsNone(result.real_action)
mock_init.assert_called_once_with(
'option_strings', 'dest',
help='some help (Deprecated; use this instead)',
a=1, b=2, c=3)
@mock.patch.object(argparse.Action, '__init__', return_value=None)
def test_init_suppresshelp_nouse(self, mock_init):
result = novaclient.shell.DeprecatedAction(
'option_strings', 'dest', help=argparse.SUPPRESS, a=1, b=2, c=3)
self.assertEqual(result.emitted, set())
self.assertIsNone(result.use)
self.assertEqual(result.real_action_args,
('option_strings', 'dest', argparse.SUPPRESS,
{'a': 1, 'b': 2, 'c': 3}))
self.assertIsNone(result.real_action)
mock_init.assert_called_once_with(
'option_strings', 'dest', help=argparse.SUPPRESS, a=1, b=2, c=3)
@mock.patch.object(novaclient.shell.argparse.Action, '__init__',
return_value=None)
def test_init_suppresshelp_withuse(self, mock_init):
result = novaclient.shell.DeprecatedAction(
'option_strings', 'dest', help=argparse.SUPPRESS,
use='use this instead', a=1, b=2, c=3)
self.assertEqual(result.emitted, set())
self.assertEqual(result.use, 'use this instead')
self.assertEqual(result.real_action_args,
('option_strings', 'dest', argparse.SUPPRESS,
{'a': 1, 'b': 2, 'c': 3}))
self.assertIsNone(result.real_action)
mock_init.assert_called_once_with(
'option_strings', 'dest', help=argparse.SUPPRESS, a=1, b=2, c=3)
@mock.patch.object(argparse.Action, '__init__', return_value=None)
def test_init_action_nothing(self, mock_init):
result = novaclient.shell.DeprecatedAction(
'option_strings', 'dest', real_action='nothing', a=1, b=2, c=3)
self.assertEqual(result.emitted, set())
self.assertIsNone(result.use)
self.assertIs(result.real_action_args, False)
self.assertIsNone(result.real_action)
mock_init.assert_called_once_with(
'option_strings', 'dest', help='Deprecated', a=1, b=2, c=3)
@mock.patch.object(argparse.Action, '__init__', return_value=None)
def test_init_action_string(self, mock_init):
result = novaclient.shell.DeprecatedAction(
'option_strings', 'dest', real_action='store', a=1, b=2, c=3)
self.assertEqual(result.emitted, set())
self.assertIsNone(result.use)
self.assertEqual(result.real_action_args,
('option_strings', 'dest', 'Deprecated',
{'a': 1, 'b': 2, 'c': 3}))
self.assertEqual(result.real_action, 'store')
mock_init.assert_called_once_with(
'option_strings', 'dest', help='Deprecated', a=1, b=2, c=3)
@mock.patch.object(argparse.Action, '__init__', return_value=None)
def test_init_action_other(self, mock_init):
action = mock.Mock()
result = novaclient.shell.DeprecatedAction(
'option_strings', 'dest', real_action=action, a=1, b=2, c=3)
self.assertEqual(result.emitted, set())
self.assertIsNone(result.use)
self.assertIs(result.real_action_args, False)
self.assertEqual(result.real_action, action.return_value)
mock_init.assert_called_once_with(
'option_strings', 'dest', help='Deprecated', a=1, b=2, c=3)
action.assert_called_once_with(
'option_strings', 'dest', help='Deprecated', a=1, b=2, c=3)
@mock.patch.object(sys, 'stderr', six.StringIO())
def test_get_action_nolookup(self):
action_class = mock.Mock()
parser = mock.Mock(**{
'_registry_get.return_value': action_class,
})
obj = novaclient.shell.DeprecatedAction(
'option_strings', 'dest', real_action='nothing', const=1)
obj.real_action = 'action'
result = obj._get_action(parser)
self.assertEqual(result, 'action')
self.assertEqual(obj.real_action, 'action')
self.assertFalse(parser._registry_get.called)
self.assertFalse(action_class.called)
self.assertEqual(sys.stderr.getvalue(), '')
@mock.patch.object(sys, 'stderr', six.StringIO())
def test_get_action_lookup_noresult(self):
parser = mock.Mock(**{
'_registry_get.return_value': None,
})
obj = novaclient.shell.DeprecatedAction(
'option_strings', 'dest', real_action='store', const=1)
result = obj._get_action(parser)
self.assertIsNone(result)
self.assertIsNone(obj.real_action)
parser._registry_get.assert_called_once_with(
'action', 'store')
self.assertEqual(sys.stderr.getvalue(),
'WARNING: Programming error: Unknown real action '
'"store"\n')
@mock.patch.object(sys, 'stderr', six.StringIO())
def test_get_action_lookup_withresult(self):
action_class = mock.Mock()
parser = mock.Mock(**{
'_registry_get.return_value': action_class,
})
obj = novaclient.shell.DeprecatedAction(
'option_strings', 'dest', real_action='store', const=1)
result = obj._get_action(parser)
self.assertEqual(result, action_class.return_value)
self.assertEqual(obj.real_action, action_class.return_value)
parser._registry_get.assert_called_once_with(
'action', 'store')
action_class.assert_called_once_with(
'option_strings', 'dest', help='Deprecated', const=1)
self.assertEqual(sys.stderr.getvalue(), '')
@mock.patch.object(sys, 'stderr', six.StringIO())
@mock.patch.object(novaclient.shell.DeprecatedAction, '_get_action')
def test_call_unemitted_nouse(self, mock_get_action):
obj = novaclient.shell.DeprecatedAction(
'option_strings', 'dest')
obj('parser', 'namespace', 'values', 'option_string')
self.assertEqual(obj.emitted, set(['option_string']))
mock_get_action.assert_called_once_with('parser')
mock_get_action.return_value.assert_called_once_with(
'parser', 'namespace', 'values', 'option_string')
self.assertEqual(sys.stderr.getvalue(),
'WARNING: Option "option_string" is deprecated\n')
@mock.patch.object(sys, 'stderr', six.StringIO())
@mock.patch.object(novaclient.shell.DeprecatedAction, '_get_action')
def test_call_unemitted_withuse(self, mock_get_action):
obj = novaclient.shell.DeprecatedAction(
'option_strings', 'dest', use='use this instead')
obj('parser', 'namespace', 'values', 'option_string')
self.assertEqual(obj.emitted, set(['option_string']))
mock_get_action.assert_called_once_with('parser')
mock_get_action.return_value.assert_called_once_with(
'parser', 'namespace', 'values', 'option_string')
self.assertEqual(sys.stderr.getvalue(),
'WARNING: Option "option_string" is deprecated; '
'use this instead\n')
@mock.patch.object(sys, 'stderr', six.StringIO())
@mock.patch.object(novaclient.shell.DeprecatedAction, '_get_action')
def test_call_emitted_nouse(self, mock_get_action):
obj = novaclient.shell.DeprecatedAction(
'option_strings', 'dest')
obj.emitted.add('option_string')
obj('parser', 'namespace', 'values', 'option_string')
self.assertEqual(obj.emitted, set(['option_string']))
mock_get_action.assert_called_once_with('parser')
mock_get_action.return_value.assert_called_once_with(
'parser', 'namespace', 'values', 'option_string')
self.assertEqual(sys.stderr.getvalue(), '')
@mock.patch.object(sys, 'stderr', six.StringIO())
@mock.patch.object(novaclient.shell.DeprecatedAction, '_get_action')
def test_call_emitted_withuse(self, mock_get_action):
obj = novaclient.shell.DeprecatedAction(
'option_strings', 'dest', use='use this instead')
obj.emitted.add('option_string')
obj('parser', 'namespace', 'values', 'option_string')
self.assertEqual(obj.emitted, set(['option_string']))
mock_get_action.assert_called_once_with('parser')
mock_get_action.return_value.assert_called_once_with(
'parser', 'namespace', 'values', 'option_string')
self.assertEqual(sys.stderr.getvalue(), '')
class ParserTest(utils.TestCase):
def setUp(self):
super(ParserTest, self).setUp()
self.parser = novaclient.shell.NovaClientArgumentParser()
def test_ambiguous_option(self):
self.parser.add_argument('--tic')
self.parser.add_argument('--tac')
try:
self.parser.parse_args(['--t'])
except SystemExit as err:
self.assertEqual(2, err.code)
else:
self.fail('SystemExit not raised')
def test_not_really_ambiguous_option(self):
# current/deprecated forms of the same option
self.parser.add_argument('--tic-tac', action="store_true")
self.parser.add_argument('--tic_tac', action="store_true")
args = self.parser.parse_args(['--tic'])
self.assertTrue(args.tic_tac)
class ShellTest(utils.TestCase):
_msg_no_tenant_project = ("You must provide a project name or project"
" ID via --os-project-name, --os-project-id,"
" env[OS_PROJECT_ID] or env[OS_PROJECT_NAME]."
" You may use os-project and os-tenant"
" interchangeably.")
def make_env(self, exclude=None, fake_env=FAKE_ENV):
env = dict((k, v) for k, v in fake_env.items() if k != exclude)
self.useFixture(fixtures.MonkeyPatch('os.environ', env))
def setUp(self):
super(ShellTest, self).setUp()
self.mock_client = mock.MagicMock()
self.mock_client.return_value.api_version = novaclient.API_MIN_VERSION
self.useFixture(fixtures.MonkeyPatch('novaclient.client.Client',
self.mock_client))
self.nc_util = mock.patch('novaclient.utils.isunauthenticated').start()
self.nc_util.return_value = False
self.mock_server_version_range = mock.patch(
'novaclient.api_versions._get_server_version_range').start()
self.mock_server_version_range.return_value = (
novaclient.API_MIN_VERSION,
novaclient.API_MIN_VERSION)
self.orig_max_ver = novaclient.API_MAX_VERSION
self.orig_min_ver = novaclient.API_MIN_VERSION
self.addCleanup(self._clear_fake_version)
self.addCleanup(mock.patch.stopall)
def _clear_fake_version(self):
novaclient.API_MAX_VERSION = self.orig_max_ver
novaclient.API_MIN_VERSION = self.orig_min_ver
def shell(self, argstr, exitcodes=(0,)):
orig = sys.stdout
orig_stderr = sys.stderr
try:
sys.stdout = six.StringIO()
sys.stderr = six.StringIO()
_shell = novaclient.shell.OpenStackComputeShell()
_shell.main(argstr.split())
except SystemExit:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertIn(exc_value.code, exitcodes)
finally:
stdout = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = orig
stderr = sys.stderr.getvalue()
sys.stderr.close()
sys.stderr = orig_stderr
return (stdout, stderr)
def register_keystone_discovery_fixture(self, mreq):
v2_url = "http://no.where/v2.0"
v2_version = fixture.V2Discovery(v2_url)
mreq.register_uri(
'GET', v2_url, json=_create_ver_list([v2_version]),
status_code=200)
def test_help_unknown_command(self):
self.assertRaises(exceptions.CommandError, self.shell, 'help foofoo')
def test_invalid_timeout(self):
for f in [0, -1, -10]:
cmd_text = '--timeout %s' % (f)
stdout, stderr = self.shell(cmd_text, exitcodes=[0, 2])
required = [
'argument --timeout: %s must be greater than 0' % (f),
]
for r in required:
self.assertIn(r, stderr)
def _test_help(self, command, required=None):
if required is None:
required = [
'.*?^usage: ',
'.*?^\s+set-password\s+Change the admin password',
'.*?^See "nova help COMMAND" for help on a specific command',
]
stdout, stderr = self.shell(command)
for r in required:
self.assertThat((stdout + stderr),
matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE))
def test_help(self):
self._test_help('help')
def test_help_option(self):
self._test_help('--help')
self._test_help('-h')
def test_help_no_options(self):
self._test_help('')
def test_help_on_subcommand(self):
required = [
'.*?^usage: nova set-password',
'.*?^Change the admin password',
'.*?^Positional arguments:',
]
self._test_help('help set-password', required=required)
def test_bash_completion(self):
stdout, stderr = self.shell('bash-completion')
# just check we have some output
required = [
'.*--matching',
'.*--wrap',
'.*help',
'.*server-group-delete',
'.*--image-with']
for r in required:
self.assertThat((stdout + stderr),
matchers.MatchesRegex(r, re.DOTALL | re.MULTILINE))
def test_no_username(self):
required = ('You must provide a user name/id (via --os-username, '
'--os-user-id, env[OS_USERNAME] or env[OS_USER_ID]) or '
'an auth token (via --os-token).')
self.make_env(exclude='OS_USERNAME')
try:
self.shell('list')
except exceptions.CommandError as message:
self.assertEqual(required, message.args[0])
else:
self.fail('CommandError not raised')
def test_no_user_id(self):
required = ('You must provide a user name/id (via --os-username, '
'--os-user-id, env[OS_USERNAME] or env[OS_USER_ID]) or '
'an auth token (via --os-token).')
self.make_env(exclude='OS_USER_ID', fake_env=FAKE_ENV2)
try:
self.shell('list')
except exceptions.CommandError as message:
self.assertEqual(required, message.args[0])
else:
self.fail('CommandError not raised')
def test_no_tenant_name(self):
required = self._msg_no_tenant_project
self.make_env(exclude='OS_TENANT_NAME')
try:
self.shell('list')
except exceptions.CommandError as message:
self.assertEqual(required, message.args[0])
else:
self.fail('CommandError not raised')
def test_no_tenant_id(self):
required = self._msg_no_tenant_project
self.make_env(exclude='OS_TENANT_ID', fake_env=FAKE_ENV2)
try:
self.shell('list')
except exceptions.CommandError as message:
self.assertEqual(required, message.args[0])
else:
self.fail('CommandError not raised')
def test_no_auth_url(self):
required = ('You must provide an auth url'
' via either --os-auth-url or env[OS_AUTH_URL].',)
self.make_env(exclude='OS_AUTH_URL')
try:
self.shell('list')
except exceptions.CommandError as message:
self.assertEqual(required, message.args)
else:
self.fail('CommandError not raised')
@requests_mock.Mocker()
def test_nova_endpoint_type(self, m_requests):
self.make_env(fake_env=FAKE_ENV3)
self.register_keystone_discovery_fixture(m_requests)
self.shell('list')
client_kwargs = self.mock_client.call_args_list[0][1]
self.assertEqual(client_kwargs['endpoint_type'], 'novaURL')
@requests_mock.Mocker()
def test_endpoint_type_like_other_clients(self, m_requests):
self.make_env(fake_env=FAKE_ENV4)
self.register_keystone_discovery_fixture(m_requests)
self.shell('list')
client_kwargs = self.mock_client.call_args_list[0][1]
self.assertEqual(client_kwargs['endpoint_type'], 'internalURL')
@requests_mock.Mocker()
def test_os_endpoint_type(self, m_requests):
self.make_env(exclude='NOVA_ENDPOINT_TYPE', fake_env=FAKE_ENV3)
self.register_keystone_discovery_fixture(m_requests)
self.shell('list')
client_kwargs = self.mock_client.call_args_list[0][1]
self.assertEqual(client_kwargs['endpoint_type'], 'osURL')
def test_default_endpoint_type(self):
self.make_env()
self.shell('list')
client_kwargs = self.mock_client.call_args_list[0][1]
self.assertEqual(client_kwargs['endpoint_type'], 'publicURL')
@mock.patch('sys.stdin', side_effect=mock.MagicMock)
@mock.patch('getpass.getpass', return_value='password')
@requests_mock.Mocker()
def test_password(self, mock_getpass, mock_stdin, m_requests):
mock_stdin.encoding = "utf-8"
# default output of empty tables differs depending between prettytable
# versions
if (hasattr(prettytable, '__version__') and
dist_version.StrictVersion(prettytable.__version__) <
dist_version.StrictVersion('0.7.2')):
ex = '\n'
else:
ex = '\n'.join([
'+----+------+--------+------------+-------------+----------+',
'| ID | Name | Status | Task State | Power State | Networks |',
'+----+------+--------+------------+-------------+----------+',
'+----+------+--------+------------+-------------+----------+',
''
])
self.make_env(exclude='OS_PASSWORD')
self.register_keystone_discovery_fixture(m_requests)
stdout, stderr = self.shell('list')
self.assertEqual((stdout + stderr), ex)
def _test_service_type(self, version, service_type, mock_client):
if version is None:
cmd = 'list'
else:
cmd = ('--service-type %s --os-compute-api-version %s list' %
(service_type, version))
self.make_env()
self.shell(cmd)
_client_args, client_kwargs = mock_client.call_args_list[0]
self.assertEqual(service_type, client_kwargs['service_type'])
def test_default_service_type(self):
self._test_service_type(None, 'compute', self.mock_client)
def test_v2_service_type(self):
self._test_service_type('2', 'compute', self.mock_client)
def test_v_unknown_service_type(self):
self.assertRaises(exceptions.UnsupportedVersion,
self._test_service_type,
'unknown', 'compute', self.mock_client)
@mock.patch('sys.argv', ['nova'])
@mock.patch('sys.stdout', six.StringIO())
@mock.patch('sys.stderr', six.StringIO())
def test_main_noargs(self):
# Ensure that main works with no command-line arguments
try:
novaclient.shell.main()
except SystemExit:
self.fail('Unexpected SystemExit')
# We expect the normal usage as a result
self.assertIn('Command-line interface to the OpenStack Nova API',
sys.stdout.getvalue())
@mock.patch.object(novaclient.shell.OpenStackComputeShell, 'main')
def test_main_keyboard_interrupt(self, mock_compute_shell):
# Ensure that exit code is 130 for KeyboardInterrupt
mock_compute_shell.side_effect = KeyboardInterrupt()
try:
novaclient.shell.main()
except SystemExit as ex:
self.assertEqual(ex.code, 130)
@mock.patch.object(novaclient.shell.OpenStackComputeShell, 'times')
@requests_mock.Mocker()
def test_timing(self, m_times, m_requests):
m_times.append.side_effect = RuntimeError('Boom!')
self.make_env()
self.register_keystone_discovery_fixture(m_requests)
self.shell('list')
exc = self.assertRaises(RuntimeError, self.shell, '--timings list')
self.assertEqual('Boom!', str(exc))
@requests_mock.Mocker()
def test_osprofiler(self, m_requests):
self.make_env()
def client(*args, **kwargs):
self.assertEqual('swordfish', kwargs['profile'])
with mock.patch('novaclient.client.Client', client):
# we are only interested in the fact Client is initialized properly
self.shell('list --profile swordfish', (0, 2))
@requests_mock.Mocker()
def test_osprofiler_not_installed(self, m_requests):
self.make_env()
# NOTE(rpodolyaka): osprofiler is in test-requirements, so we have to
# simulate its absence here
with mock.patch('novaclient.shell.osprofiler_profiler', None):
_, stderr = self.shell('list --profile swordfish', (0, 2))
self.assertIn('unrecognized arguments: --profile swordfish',
stderr)
def test_microversion_with_default_behaviour(self):
self.make_env(fake_env=FAKE_ENV5)
self.mock_server_version_range.return_value = (
api_versions.APIVersion("2.1"), api_versions.APIVersion("2.3"))
self.shell('list')
client_args = self.mock_client.call_args_list[1][0]
self.assertEqual(api_versions.APIVersion("2.3"), client_args[0])
def test_microversion_with_default_behaviour_with_legacy_server(self):
self.make_env(fake_env=FAKE_ENV5)
self.mock_server_version_range.return_value = (
api_versions.APIVersion(), api_versions.APIVersion())
self.shell('list')
client_args = self.mock_client.call_args_list[1][0]
self.assertEqual(api_versions.APIVersion("2.0"), client_args[0])
def test_microversion_with_latest(self):
self.make_env()
novaclient.API_MAX_VERSION = api_versions.APIVersion('2.3')
self.mock_server_version_range.return_value = (
api_versions.APIVersion("2.1"), api_versions.APIVersion("2.3"))
self.shell('--os-compute-api-version 2.latest list')
client_args = self.mock_client.call_args_list[1][0]
self.assertEqual(api_versions.APIVersion("2.3"), client_args[0])
def test_microversion_with_specified_version(self):
self.make_env()
self.mock_server_version_range.return_value = (
api_versions.APIVersion("2.10"), api_versions.APIVersion("2.100"))
novaclient.API_MAX_VERSION = api_versions.APIVersion("2.100")
novaclient.API_MIN_VERSION = api_versions.APIVersion("2.90")
self.shell('--os-compute-api-version 2.99 list')
client_args = self.mock_client.call_args_list[1][0]
self.assertEqual(api_versions.APIVersion("2.99"), client_args[0])
def test_microversion_with_specified_version_out_of_range(self):
novaclient.API_MAX_VERSION = api_versions.APIVersion("2.100")
novaclient.API_MIN_VERSION = api_versions.APIVersion("2.90")
self.assertRaises(exceptions.CommandError,
self.shell, '--os-compute-api-version 2.199 list')
def test_microversion_with_v2_and_v2_1_server(self):
self.make_env()
self.mock_server_version_range.return_value = (
api_versions.APIVersion('2.1'), api_versions.APIVersion('2.3'))
novaclient.API_MAX_VERSION = api_versions.APIVersion("2.100")
novaclient.API_MIN_VERSION = api_versions.APIVersion("2.1")
self.shell('--os-compute-api-version 2 list')
client_args = self.mock_client.call_args_list[1][0]
self.assertEqual(api_versions.APIVersion("2.0"), client_args[0])
def test_microversion_with_v2_and_v2_server(self):
self.make_env()
self.mock_server_version_range.return_value = (
api_versions.APIVersion(), api_versions.APIVersion())
novaclient.API_MAX_VERSION = api_versions.APIVersion("2.100")
novaclient.API_MIN_VERSION = api_versions.APIVersion("2.1")
self.shell('--os-compute-api-version 2 list')
client_args = self.mock_client.call_args_list[1][0]
self.assertEqual(api_versions.APIVersion("2.0"), client_args[0])
def test_microversion_with_v2_without_server_compatible(self):
self.make_env()
self.mock_server_version_range.return_value = (
api_versions.APIVersion('2.2'), api_versions.APIVersion('2.3'))
novaclient.API_MAX_VERSION = api_versions.APIVersion("2.100")
novaclient.API_MIN_VERSION = api_versions.APIVersion("2.1")
self.assertRaises(
exceptions.UnsupportedVersion,
self.shell, '--os-compute-api-version 2 list')
def test_microversion_with_specific_version_without_microversions(self):
self.make_env()
self.mock_server_version_range.return_value = (
api_versions.APIVersion(), api_versions.APIVersion())
novaclient.API_MAX_VERSION = api_versions.APIVersion("2.100")
novaclient.API_MIN_VERSION = api_versions.APIVersion("2.1")
self.assertRaises(
exceptions.UnsupportedVersion,
self.shell,
'--os-compute-api-version 2.3 list')
@mock.patch.object(novaclient.shell.OpenStackComputeShell, 'main')
def test_main_error_handling(self, mock_compute_shell):
class MyException(Exception):
pass
with mock.patch('sys.stderr', six.StringIO()):
mock_compute_shell.side_effect = MyException('message')
self.assertRaises(SystemExit, novaclient.shell.main)
err = sys.stderr.getvalue()
self.assertEqual(err, 'ERROR (MyException): message\n')
class TestLoadVersionedActions(utils.TestCase):
def test_load_versioned_actions(self):
parser = novaclient.shell.NovaClientArgumentParser()
subparsers = parser.add_subparsers(metavar='<subcommand>')
shell = novaclient.shell.OpenStackComputeShell()
shell.subcommands = {}
shell._find_actions(subparsers, fake_actions_module,
api_versions.APIVersion("2.15"), False)
self.assertIn('fake-action', shell.subcommands.keys())
self.assertEqual(
1, shell.subcommands['fake-action'].get_default('func')())
shell.subcommands = {}
shell._find_actions(subparsers, fake_actions_module,
api_versions.APIVersion("2.25"), False)
self.assertIn('fake-action', shell.subcommands.keys())
self.assertEqual(
2, shell.subcommands['fake-action'].get_default('func')())
self.assertIn('fake-action2', shell.subcommands.keys())
self.assertEqual(
3, shell.subcommands['fake-action2'].get_default('func')())
def test_load_versioned_actions_not_in_version_range(self):
parser = novaclient.shell.NovaClientArgumentParser()
subparsers = parser.add_subparsers(metavar='<subcommand>')
shell = novaclient.shell.OpenStackComputeShell()
shell.subcommands = {}
shell._find_actions(subparsers, fake_actions_module,
api_versions.APIVersion("2.10000"), False)
self.assertNotIn('fake-action', shell.subcommands.keys())
self.assertIn('fake-action2', shell.subcommands.keys())
def test_load_versioned_actions_with_help(self):
parser = novaclient.shell.NovaClientArgumentParser()
subparsers = parser.add_subparsers(metavar='<subcommand>')
shell = novaclient.shell.OpenStackComputeShell()
shell.subcommands = {}
shell._find_actions(subparsers, fake_actions_module,
api_versions.APIVersion("2.15"), True)
self.assertIn('fake-action', shell.subcommands.keys())
expected_desc = (" (Supported by API versions '%(start)s' - "
"'%(end)s')") % {'start': '2.10', 'end': '2.30'}
self.assertEqual(expected_desc,
shell.subcommands['fake-action'].description)
def test_load_versioned_actions_with_help_on_latest(self):
parser = novaclient.shell.NovaClientArgumentParser()
subparsers = parser.add_subparsers(metavar='<subcommand>')
shell = novaclient.shell.OpenStackComputeShell()
shell.subcommands = {}
shell._find_actions(subparsers, fake_actions_module,
api_versions.APIVersion("2.latest"), True)
self.assertIn('another-fake-action', shell.subcommands.keys())
expected_desc = (" (Supported by API versions '%(start)s' - "
"'%(end)s')%(hint)s") % {
'start': '2.0', 'end': '2.latest',
'hint': novaclient.shell.HINT_HELP_MSG}
self.assertEqual(expected_desc,
shell.subcommands['another-fake-action'].description)
@mock.patch.object(novaclient.shell.NovaClientArgumentParser,
'add_argument')
def test_load_versioned_actions_with_args(self, mock_add_arg):
parser = novaclient.shell.NovaClientArgumentParser(add_help=False)
subparsers = parser.add_subparsers(metavar='<subcommand>')
shell = novaclient.shell.OpenStackComputeShell()
shell.subcommands = {}
shell._find_actions(subparsers, fake_actions_module,
api_versions.APIVersion("2.1"), False)
self.assertIn('fake-action2', shell.subcommands.keys())
mock_add_arg.assert_has_calls([
mock.call('-h', '--help', action='help', help='==SUPPRESS=='),
mock.call('--foo')])
@mock.patch.object(novaclient.shell.NovaClientArgumentParser,
'add_argument')
def test_load_versioned_actions_with_args2(self, mock_add_arg):
parser = novaclient.shell.NovaClientArgumentParser(add_help=False)
subparsers = parser.add_subparsers(metavar='<subcommand>')
shell = novaclient.shell.OpenStackComputeShell()
shell.subcommands = {}
shell._find_actions(subparsers, fake_actions_module,
api_versions.APIVersion("2.4"), False)
self.assertIn('fake-action2', shell.subcommands.keys())
mock_add_arg.assert_has_calls([
mock.call('-h', '--help', action='help', help='==SUPPRESS=='),
mock.call('--bar')])
@mock.patch.object(novaclient.shell.NovaClientArgumentParser,
'add_argument')
def test_load_versioned_actions_with_args_not_in_version_range(
self, mock_add_arg):
parser = novaclient.shell.NovaClientArgumentParser(add_help=False)
subparsers = parser.add_subparsers(metavar='<subcommand>')
shell = novaclient.shell.OpenStackComputeShell()
shell.subcommands = {}
shell._find_actions(subparsers, fake_actions_module,
api_versions.APIVersion("2.10000"), False)
self.assertIn('fake-action2', shell.subcommands.keys())
mock_add_arg.assert_has_calls([
mock.call('-h', '--help', action='help', help='==SUPPRESS==')])
@mock.patch.object(novaclient.shell.NovaClientArgumentParser,
'add_argument')
def test_load_versioned_actions_with_args_and_help(self, mock_add_arg):
parser = novaclient.shell.NovaClientArgumentParser(add_help=False)
subparsers = parser.add_subparsers(metavar='<subcommand>')
shell = novaclient.shell.OpenStackComputeShell()
shell.subcommands = {}
shell._find_actions(subparsers, fake_actions_module,
api_versions.APIVersion("2.4"), True)
mock_add_arg.assert_has_calls([
mock.call('-h', '--help', action='help', help='==SUPPRESS=='),
mock.call('--bar',
help=" (Supported by API versions '2.3' - '2.4')")])
@mock.patch.object(novaclient.shell.NovaClientArgumentParser,
'add_argument')
def test_load_actions_with_versioned_args(self, mock_add_arg):
parser = novaclient.shell.NovaClientArgumentParser(add_help=False)
subparsers = parser.add_subparsers(metavar='<subcommand>')
shell = novaclient.shell.OpenStackComputeShell()
shell.subcommands = {}
shell._find_actions(subparsers, fake_actions_module,
api_versions.APIVersion("2.20"), False)
self.assertIn(mock.call('--foo', help="first foo"),
mock_add_arg.call_args_list)
self.assertNotIn(mock.call('--foo', help="second foo"),
mock_add_arg.call_args_list)
mock_add_arg.reset_mock()
shell._find_actions(subparsers, fake_actions_module,
api_versions.APIVersion("2.21"), False)
self.assertNotIn(mock.call('--foo', help="first foo"),
mock_add_arg.call_args_list)
self.assertIn(mock.call('--foo', help="second foo"),
mock_add_arg.call_args_list)
class ShellTestKeystoneV3(ShellTest):
def make_env(self, exclude=None, fake_env=FAKE_ENV):
if 'OS_AUTH_URL' in fake_env:
fake_env.update({'OS_AUTH_URL': 'http://no.where/v3'})
env = dict((k, v) for k, v in fake_env.items() if k != exclude)
self.useFixture(fixtures.MonkeyPatch('os.environ', env))
def register_keystone_discovery_fixture(self, mreq):
v3_url = "http://no.where/v3"
v3_version = fixture.V3Discovery(v3_url)
mreq.register_uri(
'GET', v3_url, json=_create_ver_list([v3_version]),
status_code=200)
|
[
"rahul.mishra@gslab.com"
] |
rahul.mishra@gslab.com
|
3327bd789cd44d894356311aa715e14c3e2975bf
|
d398bff6ef75eda209f815c93ef45e3c11b508a8
|
/CIS3210/Lab 10/lab10/tests/functional/test_users.py
|
9db55c302199eb9027d335f894eb6e6115ee0cda
|
[] |
no_license
|
mtran155/uoguelph
|
1d8eb2f4d8e7f38647949de63d48d227a4037f47
|
ab35d69acb874ff76220397a2637023850e04a00
|
refs/heads/master
| 2021-01-10T06:50:17.999278
| 2015-12-04T21:02:29
| 2015-12-04T21:02:29
| 47,378,239
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
from lab2.tests import *
class TestUsersController(TestController):
# def test_index(self):
# response = self.app.get(url(controller='users', action='index'))
# assert 'Home' in response
def test_get(self):
response = self.app.get(url(controller='users', action='userid'))
assert "Getting " in response
def test_put(self):
response = self.app.put(url(controller='users', action='userid'))
assert "Updating " in response
def test_post(self):
response = self.app.post(url(controller='users', action='userid'))
assert "Creating " in response
def test_delete(self):
response = self.app.delete(url(controller='users', action='userid'))
assert "Deleting " in response
|
[
"miketran155@gmail.com"
] |
miketran155@gmail.com
|
455b313e95a0e259cb911bec579e4255b2a88b6b
|
f692aeee810dfaa5e78efbc2137535f3694f4bf4
|
/vertex_face_feature/vertex_functions.py
|
5a71154ce99c1163ca477c78cd6eee8dd2de1d31
|
[] |
no_license
|
alikhanlab/Lesion-Clustering
|
e1ce3d13912e4df5a7fd001619e6e216d599ad5a
|
e0249c1f06300f49ec50e431baae98b5f7b6465a
|
refs/heads/master
| 2020-08-01T07:35:02.818796
| 2019-11-14T01:59:03
| 2019-11-14T01:59:03
| 210,915,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,947
|
py
|
import pandas as pd
def create_vertex(df):
'''
Takes df
Returns df vertex coordinates
# v1 = (+1/2 , +1/2, +1/2)
# v2 = (+1/2, +1/2, -1/2)
# v3 = (+1/2, -1/2, +1/2)
# v4 = (+1/2, -1/2, -1/2)
# v5 = (-1/2, +1/2, +1/2)
# v6 = (-1/2, -1/2, +1/2)
# v7 = (-1/2, +1/2, -1/2)
# v8 = (-1/2, -1/2, -1/2)
'''
df['v1'] = df['xyz'].apply(lambda xyz: [xyz[0]+0.5, xyz[1]+0.5, xyz[2]+0.5])
df['v2'] = df['xyz'].apply(lambda xyz: [xyz[0]+0.5, xyz[1]+0.5, xyz[2]-0.5])
df['v3'] = df['xyz'].apply(lambda xyz: [xyz[0]+0.5, xyz[1]-0.5, xyz[2]+0.5])
df['v4'] = df['xyz'].apply(lambda xyz: [xyz[0]+0.5, xyz[1]-0.5, xyz[2]-0.5])
df['v5'] = df['xyz'].apply(lambda xyz: [xyz[0]-0.5, xyz[1]+0.5, xyz[2]+0.5])
df['v6'] = df['xyz'].apply(lambda xyz: [xyz[0]-0.5, xyz[1]-0.5, xyz[2]+0.5])
df['v7'] = df['xyz'].apply(lambda xyz: [xyz[0]-0.5, xyz[1]+0.5, xyz[2]-0.5])
df['v8'] = df['xyz'].apply(lambda xyz: [xyz[0]-0.5, xyz[1]-0.5, xyz[2]-0.5])
return df
# Calculate vertex function
def calculate_vertex(df):
'''
Calculates, counts how many identical vertexes, by face neighbors
Iterates over voxels.
'''
import pandas as pd
c = df.values.tolist()
# Counting vertexes
result = []
for row in c:
all_v = row[3:]
neighbors = row[2]
counter = [0, 0, 0, 0, 0, 0, 0, 0]
for element in neighbors:
checker = c[element][3:]
for count, l in enumerate(all_v):
if l in checker:
counter[count]+=1
counter = sorted(counter, reverse=True)
result.append(counter)
# Converting to dataFrame
df_res = pd.DataFrame(result, columns = ['v1_n' , 'v2_n', 'v3_n', 'v4_n', 'v5_n', 'v6_n', 'v7_n', 'v8_n'])
# Merge with original dataFrame
vox = pd.concat([df, df_res], axis=1, sort=False)
return vox
|
[
"nalikhan@bu.edu"
] |
nalikhan@bu.edu
|
4ba5d130f9a73bb9196a02b349f5f8c4281bf1c9
|
a011b2c93c829781dd381d405d880445a9c5188b
|
/main.py
|
5921bd76daa813c7c754abd791a690224cd52013
|
[] |
no_license
|
DavidRSeWell/ImageMLScaffold
|
704a015d575ecfc4590a7475eebe0c1f2e83c55d
|
5b62f75d94a29f974eae0d67e177ea72c51fb45b
|
refs/heads/master
| 2023-03-22T13:05:43.088243
| 2021-03-23T23:55:01
| 2021-03-23T23:55:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
import json
import numpy as np
from ml import data,utils
from ml.designer import Designer
def main(config_path):
config = utils.load_config(config_path)
print("CONFIG")
print("----------------------------------------------------------")
print(json.dumps(config, indent=4, sort_keys=True))
designer = Designer(config_path)
print("Loading in the data")
designer.load_data()
print("Data loaded showing stats")
designer.image_data.get_data_stats()
X = designer.transform_data()
label_dict, y = designer.image_data.transform_labels()
X_train, X_test, y_train, y_test = designer.create_train_data(X,y)
print("Training size \n")
print(X_train.shape)
print("Testing size \n")
print(X_test.shape)
print("Training model \n")
designer.train(X_train,y_train)
print("Evaluating the model")
print(designer.test(X_test,y_test))
print("Done running main")
if __name__ == '__main__':
config_path = "config.yaml"
main(config_path)
|
[
"dsewell@pdx.edu"
] |
dsewell@pdx.edu
|
27f65268dc55bcd17b19df08d6232dc00b7385b2
|
9e5ac63135b43127d14e74b707d5c0dc846e07d2
|
/wikinterests/users/models.py
|
0b5f66774cfd451d55e94942b07527c57c9a8bd3
|
[] |
no_license
|
Aayog/WikiNterest
|
4ce82d40cf886c095391921041431d556ee14b5b
|
ac5d2f29335a42e66f34b41ff18e481ce62f4e42
|
refs/heads/master
| 2022-11-10T01:41:50.298381
| 2020-07-05T14:36:30
| 2020-07-05T14:36:30
| 276,683,594
| 0
| 0
| null | 2020-07-03T16:41:53
| 2020-07-02T15:34:50
|
Python
|
UTF-8
|
Python
| false
| false
| 575
|
py
|
from django.db import models
from django.contrib.auth.models import User
class Category(models.Model):
title = models.CharField(max_length=100)
url = models.CharField(max_length=100)
class Article(models.Model):
title = models.CharField(max_length=100)
url = models.CharField(max_length=100)
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
categories = models.ManyToManyField(Category)
favorites = models.ManyToManyField(Article)
def __str__(self):
return f'{self.user.username} Profile'
|
[
"1aayogkoirala@gmail.com"
] |
1aayogkoirala@gmail.com
|
d6787e11fbca103b10a569b2c9d9eacdc24cd19d
|
90094b0065058917b840226558b1712c9e69138e
|
/Exercise/Day1.3/ex2.py
|
43aa77b530e28d7c46a86b663c25a8cd8f99605e
|
[] |
no_license
|
MinhTamPhan/LearnPythonTheHardWay
|
8008a33784e3c1b7f74bac6541cddb02e2eab0f2
|
099beaaba1edc426d1f144b1c0a0c901db40479e
|
refs/heads/master
| 2021-06-21T22:30:12.810483
| 2017-07-18T15:32:17
| 2017-07-18T15:32:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
# A comment, this is so you can read your program later
# Anything after the # is ingored by python
print "I could have code like this."# and the comment after ingored
# You can also use a comment to "disable" or comment out a piece of code
#ptint "this won't run."
print "This will run."
|
[
"phanminhtam247@gmail.com"
] |
phanminhtam247@gmail.com
|
99677e21c63be316461e81c583cece59ba4c30ec
|
0a227eb4db935d8c0d04a8075e067401d373e733
|
/backend/manage.py
|
d8971114ef826868e2d9db2398ba8841021a79e9
|
[] |
no_license
|
crowdbotics-apps/tst-daniel-figma-de-22592
|
3639f7a97ebb0186f1f0181d889fa38e9f789241
|
52d451240ed42de2d8e6e8eb054e0bddecc024aa
|
refs/heads/master
| 2023-05-05T03:26:49.228604
| 2021-05-24T17:18:56
| 2021-05-24T17:18:56
| 370,423,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tst_daniel_figma_de_22592.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
487f1b80806f1d4101d12458b639f7942ce6b667
|
b4d04e3beaee3272835fe4fc85e8e0cbc798870c
|
/uebung/sets.py
|
87968839e12a19b523a136d065b9ec4ecb967bf5
|
[
"MIT"
] |
permissive
|
wieerwill/Python-Intro
|
5e2c95946b64a169014b0587bd69ffafe2b1d4d5
|
6b6f1d8b1b5c95590ffe15b0b4ddf188b680b491
|
refs/heads/master
| 2022-01-19T01:53:22.489401
| 2022-01-11T15:11:41
| 2022-01-11T15:11:41
| 173,446,988
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
# รffne die ../data/names.csv - Datei als .csv-Datei und berechne die Anzahl der verschiedenen Vornamen, die in dieser Datei aufgelistet sind
import csv
names = set()
with open('../data/names.csv', newline='') as csvfile:
namereader = csv.reader(csvfile, delimiter=',', quotechar='"')
counter = 0
for row in namereader:
if counter != 0:
names.add(row[1])
counter = counter + 1
print(len(names))
|
[
"robert.jeutter@gmx.de"
] |
robert.jeutter@gmx.de
|
46142fed8a42b76bbcd40ef9e7a53b005fe228fc
|
07e803289e82ec0eb25d6b2aabc7093c42fdddb7
|
/primenums_occurance_plotter.py
|
a2cf899a60a63ace7ca5e4e8610657ec605c37fc
|
[] |
no_license
|
joncokisler/Prime_Number_Occurance_Frequency_Plotter
|
173ca99e8b40b2c2c228d9fcba096dc3f7a04e25
|
306da5a8a7e660121c290b752d58a9efc572912e
|
refs/heads/master
| 2020-03-27T09:54:27.029397
| 2018-08-28T02:45:58
| 2018-08-28T02:45:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,314
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
import math
file = open('prime_numbers.csv', 'w')
primes_list = [2]
limit = 1000
a = 0
for i in range(3, limit):
gate = True
if i % 2 == 0:
gate = False
continue
##
for j in range(2, int(math.sqrt(i))+1):
##math.sqrt(i) + 1 --> because you only need to check until the number's sqr root.
if i % j == 0:
gate = False
break
if i % j != 0:
pass
if gate:
primes_list.append(i)
print(i)
file.write('prime_num')
for b in primes_list:
file.write("\n" + str(b))
print(b)
file.close()
df = pd.read_csv('prime_numbers.csv')
df2 = df.prime_num.tolist()
data = {}
upper_range = int(math.ceil(float(df2[-1] / 100)))
for counter in range(upper_range):
data[counter * 100] = 0
for i in range(len(df2)):
if df2[i] > (counter * 100):
break
if df2[i] <= (counter * 100) and df2[i] > ((counter - 1) * 100):
data[counter * 100] += 1
print(data)
x, y = zip(*sorted(data.items()))
plt.style.use('grayscale')
plt.ylabel('Prime Number Occurance Frequency')
plt.xlabel('Range of Num from X-100 to X')
plt.title('Frequency of Prime Number Occurances Between x-100 and x')
plt.plot(x, y)
plt.grid()
plt.show()
|
[
"can.cokisler@gmail.com"
] |
can.cokisler@gmail.com
|
0f44faa55dd74a6ff2459dd236af6e9efd69c83a
|
d9cc66f37c08ba7f3f1309c0932df137fef93fd0
|
/Appium+Mitmdump็ฌๅb็ซ็ชๅงไฟกๆฏ/run.py
|
4bf1f3057e790ebf45b453aeb5dd39decdc39783
|
[] |
no_license
|
TechnologyDepartment/Python_Spider_AppSpider
|
4538b5925707a692c98cdf0fc648b8652d93ef05
|
eeee0fa14c091f699d2368e71ca3e327441fd20d
|
refs/heads/master
| 2020-08-30T02:41:36.389271
| 2018-08-03T14:15:42
| 2018-08-03T14:15:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
from bilibili.Appium import Bilibili
def main():
main = Bilibili()
main.main()
if __name__ == '__main__':
main()
|
[
"guanwei006@gmail.com"
] |
guanwei006@gmail.com
|
1942b79c10625cb0fcda6b40d916f376a98b7ad7
|
549c944ee0c743e5bf79c510fe03505d78ad356c
|
/food_order/urls.py
|
90152d26ca6d3039101d06efaa90ce62187ceef3
|
[] |
no_license
|
mPooja-15/practise
|
a6792c431a76a68412aa488fd858116c5608e250
|
d81889a14706ed5c5c44d014b6aea716e0feacb8
|
refs/heads/master
| 2021-05-25T14:19:13.312333
| 2020-04-07T12:46:11
| 2020-04-07T12:46:11
| 253,786,330
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,675
|
py
|
from django.urls import path
from . import views
from django.contrib.auth import views as v
urlpatterns = [
path('index_page/',views.index_page,name='index_page'),
path('register/',views.register_user,name='register_user'),
path('login/',views.login_user,name='login_user'),
path('logout_user/',views.logout_user,name='logout_user'),
path('recipes/',views.recipes,name='recipes'),
path('service/',views.service,name='service'),
path('about/',views.about,name='about'),
path('news/',views.news,name='news'),
path('',views.single,name='single'),
path('contact/',views.contact,name='contact'),
path('password-change/',v.PasswordChangeView.as_view(template_name='food/password_change.html'),name='change_password'),
path('password-change-done/',v.PasswordChangeView.as_view(template_name='food/password_change_done.html'),name='password_change_done'),
path('password-reset/',v.PasswordResetView.as_view(template_name='food/password_reset.html',email_template_name='food/password_reset_email.html',subject_template_name='food/password_reset_email_subject.txt'),name='password_reset'),
path('password-reset-done/',v.PasswordResetView.as_view(template_name='food/password_reset_done.html'),name='password_reset_done'),
path('password-reset-confirm/<uidb64>/<token>',v.PasswordResetConfirmView.as_view(template_name='food/password_reset_confirm.html'),name='password_reset_confirm'),
path('password-reset-complete/',v.PasswordResetCompleteView.as_view(template_name='food/password_reset_complete.html'),name='password_reset_complete'),
path('edit_profile/',views.edit_profile,name='edit_profile')
]
|
[
"mishrapooja8128@gmail.com"
] |
mishrapooja8128@gmail.com
|
5cdd000db5a17f5c6b8d974f1ea18fe71bc2fd53
|
3b22eb1b5edd936ce1582295422d5101d85cf4b4
|
/day5/simple_math/setup.py
|
3d221c2688e14b3b29e611d3c8e709fd2e81cb83
|
[] |
no_license
|
zhou436/advanced_python_assignments
|
1822240eb341ec300c5f54392e7dcf833214da2e
|
f086fff82e84067a3614a489d8c80773dee8426f
|
refs/heads/master
| 2023-03-07T17:18:24.192101
| 2021-02-19T16:19:36
| 2021-02-19T16:19:36
| 339,788,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
from setuptools import setup
setup(name='uu-bms-simple_math',
version='0.0',
description='Nothing',
author='Yijun Zhou',
author_email='yijun.zhou@angstrom.uu.se',
license='BSD',
py_modules=['simple_math'],
packages=[])
|
[
"zyj19931230@gmail.com"
] |
zyj19931230@gmail.com
|
865dc8309fd2470836c0870494552d1cf3796028
|
b7f40a654701e87b8bc3ec24a0c018982f85cd8f
|
/distruct/tests/test_Distructure.py
|
988f931e36ed51ef5241d50ca86574206eb243b0
|
[
"MIT"
] |
permissive
|
AsclepiusInformatica/distruct
|
c07f9b233903a096864a6fca915c6576b0a8052e
|
c8633f4c90bbac8669efae43bda765dfa8b7348a
|
refs/heads/master
| 2023-03-30T14:03:26.259069
| 2020-06-17T16:08:22
| 2020-06-17T16:08:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,065
|
py
|
#!/usr/bin/env python3
#####################################
#
# Filename : test_Distructure.py
#
# Projectname : diSTruct
#
# Author : Oskar Taubert
#
# Creation Date : Mon 11 Jun 2018 04:04:59 PM CEST
#
# Last Modified : Thu 28 Mar 2019 11:14:02 AM CET
#
#####################################
from Bio import SeqIO
from Bio.PDB.PDBParser import PDBParser
from distruct import Distructure
from distruct import config
testFilePath = config.data_path + "tests/"
def test_Distructure():
code = "1ptq"
fileName = testFilePath + code + '.pdb'
sequences = list()
with open(fileName, 'r') as f:
# for record in SeqIO.parse(f, "pdb-seqres"):
# print(record)
# sequences.append(record.seq)
# pass
sequences = [r.seq for r in SeqIO.parse(f, "pdb-seqres")]
pass
# create distruct
ds = Distructure(code, sequences)
# create primary contacts
ds.generate_primary_contacts()
# generate coordinates
ds.run()
return
if __name__ == '__main__':
test_Distructure()
|
[
"oskar.taubert@kit.edu"
] |
oskar.taubert@kit.edu
|
8f0d7371318bd0aff7f8a010412f044bdd8dd4a9
|
f0a20597594121d228798af6160a97c943e5ef93
|
/chap15/calcProd.py
|
0fdad3408d88fcc664842decba1364af39033a79
|
[] |
no_license
|
febin89/automate_theboringstuff
|
c1cc15875f763d5bf3a841fbc873ac084a2f875f
|
3a28d95a69257b6c689c3ceed54243a2c74a83bd
|
refs/heads/master
| 2021-01-09T20:23:23.330662
| 2016-07-17T11:20:01
| 2016-07-17T11:20:01
| 63,527,035
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
import time
def calcProd():
product=1
for i in range(1,100000):
product=product*i
return product
startTime=time.time()
prod=calcProd()
endTime=time.time()
print('The result is %s digits long.' %(len(str(prod))))
print('Took %s seconds to calculate.'%(endTime-startTime))
|
[
"a@jfh.com"
] |
a@jfh.com
|
ea59816d8bbd0ddcee9cb8a67eb5006a8d7b55fa
|
081f423c0e18eda780e517d760bb413a067da58b
|
/demo/settings/development.py
|
299c53343536616f69a56e085db1523d479e48de
|
[] |
no_license
|
Frozen-Burrito/DjangoBoilerplate
|
65f09dbc0734072f66da6710a9f44c883699fc8b
|
b17a8b56836b06d6e96402e4d538722d9d1cd06e
|
refs/heads/master
| 2021-10-09T12:37:39.554314
| 2020-01-21T17:04:45
| 2020-01-21T17:04:45
| 235,257,760
| 2
| 0
| null | 2021-09-22T18:27:34
| 2020-01-21T04:43:50
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,373
|
py
|
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', cast=bool)
ALLOWED_HOSTS = ['127.0.0.1']
INSTALLED_APPS += [
'debug_toolbar',
]
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware',]
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# DEBUG TOOLBAR SETTINGS
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
'debug_toolbar.panels.profiling.ProfilingPanel',
]
def show_toolbar(request):
return True
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': show_toolbar
}
STRIPE_PUBLIC_KEY: ''
STRIPE_SECRET_KEY: ''
|
[
"f3rm3ndouza@outlook.com"
] |
f3rm3ndouza@outlook.com
|
d986a4cb8eab53a3affc3d659f59d7d5e496ed95
|
70c78732a42f95cc4ad8e9698050477282feca87
|
/Painting System/Ground Station/Python_XBee_Communication.py
|
f1446eff7754afda2c040a1a039961bfec1f6052
|
[
"MIT"
] |
permissive
|
BAILOOL/Automatized-Wall-Painting-Drone
|
15b29508d637c6cb6849744b78eaa064fd4c156c
|
fc2da8692d4f7d0651634af7e49a1f29f79e3c6d
|
refs/heads/master
| 2021-01-16T00:57:28.130000
| 2018-05-11T00:33:00
| 2018-05-11T00:33:00
| 38,023,142
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
import sys, os
import serial
ser = serial.Serial('COM15', 9600)
received = []
print "Reseting arduino"
#reset arduino first!!!
ser.write(bytes("&"))
print "Waiting for response from arduino"
if ser.read() == '&':
while True:
received.append(ser.read())
length = len(received)
#print received
if received[length-1] == '*':
receivedstring = ''.join(map(str,received[:length-1]))
print receivedstring
if receivedstring == 'Please input a word!':
input_word = raw_input()
print "Sending word input to xbee"
ser.write(bytes(input_word))
if receivedstring == 'Printing is completed. Congratulations!':
print "Exiting the program"
ser.close()
ser.close()
ser.close()
ser.close()
ser.close()
sys.exit("Mission is completed")
del received[:]
|
[
"alexandr.baylo@gmail.com"
] |
alexandr.baylo@gmail.com
|
01c5be4bb24dd571f976567d024215e2cd20ace1
|
6138af219efc3a8f31060e30ebc532ffcbad1768
|
/astrogrid/desktop/impl/python-test/Plastic.py
|
cd0af30734dce3c6daf74a4595cc36afd036e15f
|
[] |
no_license
|
Javastro/astrogrid-legacy
|
dd794b7867a4ac650d1a84bdef05dfcd135b8bb6
|
51bdbec04bacfc3bcc3af6a896e8c7f603059cd5
|
refs/heads/main
| 2023-06-26T10:23:01.083788
| 2021-07-30T11:17:12
| 2021-07-30T11:17:12
| 391,028,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,783
|
py
|
#!/usr/bin/env python
#exercise the plastic hub.
import unittest
class Plastic(unittest.TestCase):
def setUp(self):
self.hub = ar.plastic.hub
self.id = self.hub.getHubId()
l = self.hub.getRegisteredIds()
l.remove(self.id)
self.vdId = None
for id in l:
n = self.hub.getName(id)
if n == 'VO Desktop':
self.vdId = id
self.assertTrue(self.vdId != None,"Vodesktop not registered with plastic?")
self.ECHO = 'ivo://votech.org/test/echo'
self.ownName = 'testScript'
def testHubRegistered(self):
self.assertTrue(self.id in self.hub.getRegisteredIds())
def testHubName(self):
self.assertEquals("hub",self.hub.getName(self.id))
def testHubUnderstoodMessages(self):
l = self.hub.getUnderstoodMessages(self.id)
self.assertTrue(len(l) > 3)
self.assertTrue(self.ECHO in l)
def testAppUnderstoodMessages(self):
l = self.hub.getUnderstoodMessages(self.vdId)
self.assertTrue(len(l) > 6)
self.assertTrue(self.ECHO in l)
def testMessageRegisteredIds(self):
ids = self.hub.getMessageRegisteredIds(self.ECHO)
self.assertTrue(self.id in ids)
self.assertTrue(self.vdId in ids)
self.assertEquals(0,len(self.hub.getMessageRegisteredIds('ivo://unknown')))
def testRegister(self):
prevSize = len(self.hub.getRegisteredIds())
myId = self.hub.registerNoCallBack(self.ownName )
ids = self.hub.getRegisteredIds()
self.assertEquals(prevSize,len(ids) - 1)
self.assertTrue(myId in ids)
self.hub.unregister(myId)
ids = self.hub.getRegisteredIds()
self.assertEquals(prevSize,len(ids))
self.assertFalse(myId in ids)
def testRequest(self):
myId = self.hub.registerNoCallBack(self.ownName)
r = self.hub.request(myId,self.ECHO,['msg'])
self.hub.unregister(myId)
self.assertTrue(self.id in r)
self.assertTrue(self.vdId in r)
self.assertEquals('msg',r[self.id])
self.assertEquals('msg',r[self.vdId])
def testRequestToSubset(self):
myId = self.hub.registerNoCallBack(self.ownName)
r = self.hub.requestToSubset(myId,self.ECHO,['msg'],[self.id,self.vdId])
self.hub.unregister(myId)
self.assertTrue(self.vdId in r,msg='VODesktop did not respond')
self.assertTrue(self.id in r,msg='hub did not respond')
self.assertEquals('msg',r[self.id])
self.assertEquals('msg',r[self.vdId])
self.assertEquals(2,len(r))
def testRegisterAsMockService(self):
myId = self.hub.registerXMLRPC(self.ownName,[self.ECHO],'http://localhost:7090')
self.assertTrue(myId in self.hub.getRegisteredIds())
self.assertTrue(myId in self.hub.getMessageRegisteredIds(self.ECHO))
self.hub.unregister(myId)
self.assertTrue(myId not in self.hub.getRegisteredIds())
self.assertTrue(myId not in self.hub.getMessageRegisteredIds(self.ECHO))
def testRegisterAsService(self):
from SimpleXMLRPCServer import SimpleXMLRPCServer
server = SimpleXMLRPCServer(('localhost',7090))
server.register_introspection_functions()
def fn(sender,message,args):
if message == self.ECHO:
return args[0]
server.register_function(fn,"perform")
import thread
thread.start_new_thread(server.serve_forever,())
servId = self.hub.registerXMLRPC('testService',[self.ECHO],'http://localhost:7090')
myId = self.hub.registerNoCallBack(self.ownName)
r = self.hub.request(myId,self.ECHO,['msg'])
self.assertTrue(self.id in r)
self.assertTrue(self.vdId in r)
self.assertTrue(servId in r)
self.assertEquals('msg',r[self.id])
self.assertEquals('msg',r[self.vdId])
self.assertEquals('msg',r[servId])
r = self.hub.requestToSubset(myId,self.ECHO,['msg'],[servId])
self.assertTrue(servId in r,msg='service did not respond to subset message')
self.assertEquals('msg',r[servId])
self.assertEquals(1,len(r))
self.hub.unregister(myId)
self.hub.unregister(servId)
#sadly it's too hard to clean up all the stuff that this one creates.
def noTestRegisterAsAsynchService(self):
from SimpleXMLRPCServer import SimpleXMLRPCServer
server = SimpleXMLRPCServer(('localhost',7091))
server.register_introspection_functions()
def fn(sender,message,args):
print sender, message, args
if message == self.ECHO:
self.seen = True
return args[0]
fn.seen = False
server.register_function(fn,"perform")
import thread
thread.start_new_thread(server.serve_forever,())
servId = self.hub.registerXMLRPC('testService',[self.ECHO],'http://localhost:7091')
myId = self.hub.registerNoCallBack(self.ownName)
import time
self.hub.requestAsynch(myId,self.ECHO,['msg'])
self.assertFalse(fn.seen)
time.sleep(5)
self.assertTrue(fn.seen)
fn.seen = False
self.hub.requestToSubsetAsynch(myId,self.ECHO,['msg'],[servId])
time.sleep(5)
self.assertTrue(fn.seen)
fn.seen = False
self.hub.unregister(myId)
self.hub.unregister(servId)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(Plastic)
if __name__ == '__main__':
import alltests
alltests.setupAR()
#run the tests.
unittest.TextTestRunner(verbosity=1).run(suite())
|
[
"Noel.Winstanley@astrogrid.org"
] |
Noel.Winstanley@astrogrid.org
|
197585ed48041451db0d028d44ce2fb4cd7fb1fb
|
2d2c422fe86b253c628129bb1713f3c14af1704d
|
/examples/neural_doodle.py
|
a63b33ab76369f5ac21eee7df607dde6c9e60ee8
|
[
"MIT"
] |
permissive
|
alvarouc/keras
|
43f3b04332e40c3cf1cfbe364b0cd84fc3b69a3e
|
4301f009ffe57a1713b36bb863df73b30c466446
|
refs/heads/master
| 2020-05-19T23:17:49.304548
| 2019-05-06T20:15:50
| 2019-05-06T20:15:50
| 43,589,364
| 0
| 0
|
MIT
| 2019-05-06T20:02:38
| 2015-10-03T05:47:36
|
Python
|
UTF-8
|
Python
| false
| false
| 14,089
|
py
|
'''Neural doodle with Keras
# Script Usage
## Arguments
```
--nlabels: # of regions (colors) in mask images
--style-image: image to learn style from
--style-mask: semantic labels for style image
--target-mask: semantic labels for target image (your doodle)
--content-image: optional image to learn content from
--target-image-prefix: path prefix for generated target images
```
## Example 1: doodle using a style image, style mask
and target mask.
```
python neural_doodle.py --nlabels 4 --style-image Monet/style.png \
--style-mask Monet/style_mask.png --target-mask Monet/target_mask.png \
--target-image-prefix generated/monet
```
## Example 2: doodle using a style image, style mask,
target mask and an optional content image.
```
python neural_doodle.py --nlabels 4 --style-image Renoir/style.png \
--style-mask Renoir/style_mask.png --target-mask Renoir/target_mask.png \
--content-image Renoir/creek.jpg \
--target-image-prefix generated/renoir
```
# References
- [Dmitry Ulyanov's blog on fast-neural-doodle]
(http://dmitryulyanov.github.io/feed-forward-neural-doodle/)
- [Torch code for fast-neural-doodle]
(https://github.com/DmitryUlyanov/fast-neural-doodle)
- [Torch code for online-neural-doodle]
(https://github.com/DmitryUlyanov/online-neural-doodle)
- [Paper Texture Networks: Feed-forward Synthesis of Textures and Stylized Images]
(http://arxiv.org/abs/1603.03417)
- [Discussion on parameter tuning]
(https://github.com/keras-team/keras/issues/3705)
# Resources
Example images can be downloaded from
https://github.com/DmitryUlyanov/fast-neural-doodle/tree/master/data
'''
from __future__ import print_function
import time
import argparse
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
from keras import backend as K
from keras.layers import Input, AveragePooling2D
from keras.models import Model
from keras.preprocessing.image import load_img, save_img, img_to_array
from keras.applications import vgg19
# Command line arguments
parser = argparse.ArgumentParser(description='Keras neural doodle example')
parser.add_argument('--nlabels', type=int,
help='number of semantic labels'
' (regions in differnet colors)'
' in style_mask/target_mask')
parser.add_argument('--style-image', type=str,
help='path to image to learn style from')
parser.add_argument('--style-mask', type=str,
help='path to semantic mask of style image')
parser.add_argument('--target-mask', type=str,
help='path to semantic mask of target image')
parser.add_argument('--content-image', type=str, default=None,
help='path to optional content image')
parser.add_argument('--target-image-prefix', type=str,
help='path prefix for generated results')
args = parser.parse_args()
style_img_path = args.style_image
style_mask_path = args.style_mask
target_mask_path = args.target_mask
content_img_path = args.content_image
target_img_prefix = args.target_image_prefix
use_content_img = content_img_path is not None
num_labels = args.nlabels
num_colors = 3 # RGB
# determine image sizes based on target_mask
ref_img = img_to_array(load_img(target_mask_path))
img_nrows, img_ncols = ref_img.shape[:2]
total_variation_weight = 50.
style_weight = 1.
content_weight = 0.1 if use_content_img else 0
content_feature_layers = ['block5_conv2']
# To get better generation qualities, use more conv layers for style features
style_feature_layers = ['block1_conv1', 'block2_conv1', 'block3_conv1',
'block4_conv1', 'block5_conv1']
# helper functions for reading/processing images
def preprocess_image(image_path):
img = load_img(image_path, target_size=(img_nrows, img_ncols))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg19.preprocess_input(img)
return img
def deprocess_image(x):
if K.image_data_format() == 'channels_first':
x = x.reshape((3, img_nrows, img_ncols))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((img_nrows, img_ncols, 3))
# Remove zero-center by mean pixel
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# 'BGR'->'RGB'
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
def kmeans(xs, k):
assert xs.ndim == 2
try:
from sklearn.cluster import k_means
_, labels, _ = k_means(xs.astype('float64'), k)
except ImportError:
from scipy.cluster.vq import kmeans2
_, labels = kmeans2(xs, k, missing='raise')
return labels
def load_mask_labels():
'''Load both target and style masks.
A mask image (nr x nc) with m labels/colors will be loaded
as a 4D boolean tensor:
(1, m, nr, nc) for 'channels_first' or (1, nr, nc, m) for 'channels_last'
'''
target_mask_img = load_img(target_mask_path,
target_size=(img_nrows, img_ncols))
target_mask_img = img_to_array(target_mask_img)
style_mask_img = load_img(style_mask_path,
target_size=(img_nrows, img_ncols))
style_mask_img = img_to_array(style_mask_img)
if K.image_data_format() == 'channels_first':
mask_vecs = np.vstack([style_mask_img.reshape((3, -1)).T,
target_mask_img.reshape((3, -1)).T])
else:
mask_vecs = np.vstack([style_mask_img.reshape((-1, 3)),
target_mask_img.reshape((-1, 3))])
labels = kmeans(mask_vecs, num_labels)
style_mask_label = labels[:img_nrows *
img_ncols].reshape((img_nrows, img_ncols))
target_mask_label = labels[img_nrows *
img_ncols:].reshape((img_nrows, img_ncols))
stack_axis = 0 if K.image_data_format() == 'channels_first' else -1
style_mask = np.stack([style_mask_label == r for r in range(num_labels)],
axis=stack_axis)
target_mask = np.stack([target_mask_label == r for r in range(num_labels)],
axis=stack_axis)
return (np.expand_dims(style_mask, axis=0),
np.expand_dims(target_mask, axis=0))
# Create tensor variables for images
if K.image_data_format() == 'channels_first':
shape = (1, num_colors, img_nrows, img_ncols)
else:
shape = (1, img_nrows, img_ncols, num_colors)
style_image = K.variable(preprocess_image(style_img_path))
target_image = K.placeholder(shape=shape)
if use_content_img:
content_image = K.variable(preprocess_image(content_img_path))
else:
content_image = K.zeros(shape=shape)
images = K.concatenate([style_image, target_image, content_image], axis=0)
# Create tensor variables for masks
raw_style_mask, raw_target_mask = load_mask_labels()
style_mask = K.variable(raw_style_mask.astype('float32'))
target_mask = K.variable(raw_target_mask.astype('float32'))
masks = K.concatenate([style_mask, target_mask], axis=0)
# index constants for images and tasks variables
STYLE, TARGET, CONTENT = 0, 1, 2
# Build image model, mask model and use layer outputs as features
# image model as VGG19
image_model = vgg19.VGG19(include_top=False, input_tensor=images)
# mask model as a series of pooling
mask_input = Input(tensor=masks, shape=(None, None, None), name='mask_input')
x = mask_input
for layer in image_model.layers[1:]:
name = 'mask_%s' % layer.name
if 'conv' in layer.name:
x = AveragePooling2D((3, 3), padding='same', strides=(
1, 1), name=name)(x)
elif 'pool' in layer.name:
x = AveragePooling2D((2, 2), name=name)(x)
mask_model = Model(mask_input, x)
# Collect features from image_model and task_model
image_features = {}
mask_features = {}
for img_layer, mask_layer in zip(image_model.layers, mask_model.layers):
if 'conv' in img_layer.name:
assert 'mask_' + img_layer.name == mask_layer.name
layer_name = img_layer.name
img_feat, mask_feat = img_layer.output, mask_layer.output
image_features[layer_name] = img_feat
mask_features[layer_name] = mask_feat
# Define loss functions
def gram_matrix(x):
assert K.ndim(x) == 3
features = K.batch_flatten(x)
gram = K.dot(features, K.transpose(features))
return gram
def region_style_loss(style_image, target_image, style_mask, target_mask):
'''Calculate style loss between style_image and target_image,
for one common region specified by their (boolean) masks
'''
assert 3 == K.ndim(style_image) == K.ndim(target_image)
assert 2 == K.ndim(style_mask) == K.ndim(target_mask)
if K.image_data_format() == 'channels_first':
masked_style = style_image * style_mask
masked_target = target_image * target_mask
num_channels = K.shape(style_image)[0]
else:
masked_style = K.permute_dimensions(
style_image, (2, 0, 1)) * style_mask
masked_target = K.permute_dimensions(
target_image, (2, 0, 1)) * target_mask
num_channels = K.shape(style_image)[-1]
num_channels = K.cast(num_channels, dtype='float32')
s = gram_matrix(masked_style) / K.mean(style_mask) / num_channels
c = gram_matrix(masked_target) / K.mean(target_mask) / num_channels
return K.mean(K.square(s - c))
def style_loss(style_image, target_image, style_masks, target_masks):
'''Calculate style loss between style_image and target_image,
in all regions.
'''
assert 3 == K.ndim(style_image) == K.ndim(target_image)
assert 3 == K.ndim(style_masks) == K.ndim(target_masks)
loss = K.variable(0)
for i in range(num_labels):
if K.image_data_format() == 'channels_first':
style_mask = style_masks[i, :, :]
target_mask = target_masks[i, :, :]
else:
style_mask = style_masks[:, :, i]
target_mask = target_masks[:, :, i]
loss += region_style_loss(style_image,
target_image, style_mask, target_mask)
return loss
def content_loss(content_image, target_image):
return K.sum(K.square(target_image - content_image))
def total_variation_loss(x):
assert 4 == K.ndim(x)
if K.image_data_format() == 'channels_first':
a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] -
x[:, :, 1:, :img_ncols - 1])
b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] -
x[:, :, :img_nrows - 1, 1:])
else:
a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] -
x[:, 1:, :img_ncols - 1, :])
b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] -
x[:, :img_nrows - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
# Overall loss is the weighted sum of content_loss, style_loss and tv_loss
# Each individual loss uses features from image/mask models.
loss = K.variable(0)
for layer in content_feature_layers:
content_feat = image_features[layer][CONTENT, :, :, :]
target_feat = image_features[layer][TARGET, :, :, :]
loss += content_weight * content_loss(content_feat, target_feat)
for layer in style_feature_layers:
style_feat = image_features[layer][STYLE, :, :, :]
target_feat = image_features[layer][TARGET, :, :, :]
style_masks = mask_features[layer][STYLE, :, :, :]
target_masks = mask_features[layer][TARGET, :, :, :]
sl = style_loss(style_feat, target_feat, style_masks, target_masks)
loss += (style_weight / len(style_feature_layers)) * sl
loss += total_variation_weight * total_variation_loss(target_image)
loss_grads = K.gradients(loss, target_image)
# Evaluator class for computing efficiency
outputs = [loss]
if isinstance(loss_grads, (list, tuple)):
outputs += loss_grads
else:
outputs.append(loss_grads)
f_outputs = K.function([target_image], outputs)
def eval_loss_and_grads(x):
if K.image_data_format() == 'channels_first':
x = x.reshape((1, 3, img_nrows, img_ncols))
else:
x = x.reshape((1, img_nrows, img_ncols, 3))
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grads_values = None
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
# Generate images by iterative optimization
if K.image_data_format() == 'channels_first':
x = np.random.uniform(0, 255, (1, 3, img_nrows, img_ncols)) - 128.
else:
x = np.random.uniform(0, 255, (1, img_nrows, img_ncols, 3)) - 128.
for i in range(50):
print('Start of iteration', i)
start_time = time.time()
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
fprime=evaluator.grads, maxfun=20)
print('Current loss value:', min_val)
# save current generated image
img = deprocess_image(x.copy())
fname = target_img_prefix + '_at_iteration_%d.png' % i
save_img(fname, img)
end_time = time.time()
print('Image saved as', fname)
print('Iteration %d completed in %ds' % (i, end_time - start_time))
|
[
"francois.chollet@gmail.com"
] |
francois.chollet@gmail.com
|
1607c5383de4197cee20b31ca046cbce530dd23b
|
cb58e2890778b3489c28c952bd74ad9bbb0a09c2
|
/old_code/convert_to_csv.py
|
f7f32f83460ff81ae38e0d76e1a71a05b17d1a9c
|
[] |
no_license
|
codyfcook/political_polarization
|
044e56f2f0aa5fc097d25eab525a638d1f0f6d75
|
34b188ccc0ce5b7931904d01060ea8e1c4e1c3f4
|
refs/heads/master
| 2021-06-12T18:24:52.118319
| 2017-04-27T04:18:26
| 2017-04-27T04:18:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
import cPickle as pickle
import csv
## DEBATES ##
d = pickle.load(open("debates.p", "rb"))
f = csv.writer(open("debates.csv", "wb+"))
f.writerow(["date", "debate", "speaker", "text"])
for date in d.keys():
for speaker in d[date]['speakers']:
f.writerow([unicode(date.replace(",", "")).encode("utf-8"),
unicode(d[date]['debate_title'].replace(",", "")).encode("utf-8"),
unicode(speaker.lower().replace(",", "")).encode("utf-8"),
d[date]['speakers'][speaker].replace(",", "")
])
## SPEECHES ##
d = pickle.load(open("speeches.p", "rb"))
f = csv.writer(open("speeches.csv", "wb+"))
f.writerow(["speaker", "type", "date", "text"])
for speaker in d.keys():
for type1 in d[speaker].keys():
for date in d[speaker][type1].keys():
f.writerow([speaker.replace(",", ""), type1.replace(",", ""), date.replace(",", ""), d[speaker][type1][date].replace(",", "")])
## SPEAKERS DICT ##
d = pickle.load(open("speakers_dict.p", "rb"))
f = csv.writer(open("speakers_dict.csv", "wb+"))
f.writerow(["date", "title", "speaker", "text"])
for date in d.keys():
for title in d[date].keys():
for speaker in d[date][title].keys():
if speaker!="ACTING PRESIDENT" and speaker!="PRESIDING OFFICER":
text = ' '.join(d[date][title][speaker])
f.writerow([
unicode(date.replace(",", "")).encode("utf-8"),
unicode(title.replace(",", "")).encode("utf-8"),
unicode(speaker.replace(",", "")).encode("utf-8"),
unicode(text.replace(",", ""))])
|
[
"Cody@Codys-MacBook-Pro.local"
] |
Cody@Codys-MacBook-Pro.local
|
3b3eaf64d2ec26a49fe7af5ed9a93cf844e28921
|
4088951e5ff3830a502f8098d0633f643e862579
|
/app.py
|
2f4f3043f2c16288b2efae868ee6388ee921e81a
|
[] |
no_license
|
brvijaya/qaservice
|
669ea9312c7aaff6590a8d98c7cb1783e119aae6
|
867ce833de1af7f5c1588ca73fe65e2d35b8f434
|
refs/heads/master
| 2022-11-09T22:50:06.285038
| 2020-06-09T06:20:56
| 2020-06-09T06:20:56
| 270,339,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,493
|
py
|
import os
import logging
import socket
from flask import Flask, jsonify, request
import json
from distilbert import run_prediction
HOST_NAME = os.environ.get('OPENSHIFT_APP_DNS', 'localhost')
APP_NAME = os.environ.get('OPENSHIFT_APP_NAME', 'flask')
IP = os.environ.get('OPENSHIFT_PYTHON_IP', '127.0.0.1')
PORT = int(os.environ.get('OPENSHIFT_PYTHON_PORT', 8080))
HOME_DIR = os.environ.get('OPENSHIFT_HOMEDIR', os.getcwd())
log = logging.getLogger(__name__)
app = Flask(__name__)
@app.route('/')
def hello():
return jsonify({
'host_name': HOST_NAME,
'app_name': APP_NAME,
'ip': IP,
'port': PORT,
'home_dir': HOME_DIR,
'host': socket.gethostname()
})
@app.route("/qa/ask", methods=['POST'])
def ask():
data = request.get_json()
print(data)
question = data['question']
context = data['context']
predictions = run_prediction([question], context)
return predictions
@app.route("/test", methods=['GET','POST'])
def run_test():
context = "New Zealand (Mฤori: Aotearoa) is a sovereign island country in the southwestern Pacific Ocean. It has a total land area of 268,000 square kilometres (103,500 sq mi), and a population of 4.9 million. New Zealand's capital city is Wellington, and its most populous city is Auckland."
question = "What's the largest city?"
predictions = run_prediction([question], context)
return predictions
if __name__ == '__main__':
app.run(host='0.0.0.0', port=PORT)
|
[
"noreply@github.com"
] |
brvijaya.noreply@github.com
|
b9d412eec1487bfed44c40513d70646016ca33c6
|
42ac672183d9c61794c6c07440ee0fa791296189
|
/tests/test_properties.py
|
562778baaca88d935a135669ec14ee0e201a0ee5
|
[
"BSD-2-Clause"
] |
permissive
|
pydanny/pygments-custom
|
c5f900dcb7e5cb73b01c8cbe787306c3310781aa
|
b2862e3e58c7e42028ec8363ae668f2f7efd6ccb
|
refs/heads/master
| 2023-08-28T19:43:40.507971
| 2017-06-14T21:24:13
| 2017-06-14T21:24:13
| 93,273,976
| 2
| 3
|
BSD-2-Clause
| 2021-08-10T17:10:57
| 2017-06-03T21:06:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,745
|
py
|
# -*- coding: utf-8 -*-
"""
Properties Tests
~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import unittest
from pygments.lexers.configs import PropertiesLexer
from pygments.token import Token
class PropertiesTest(unittest.TestCase):
def setUp(self):
self.lexer = PropertiesLexer()
def test_comments(self):
"""
Assures lines lead by either # or ! are recognized as a comment
"""
fragment = '! a comment\n# also a comment\n'
tokens = [
(Token.Comment, '! a comment'),
(Token.Text, '\n'),
(Token.Comment, '# also a comment'),
(Token.Text, '\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
def test_leading_whitespace_comments(self):
fragment = ' # comment\n'
tokens = [
(Token.Text, ' '),
(Token.Comment, '# comment'),
(Token.Text, '\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
def test_escaped_space_in_key(self):
fragment = 'key = value\n'
tokens = [
(Token.Name.Attribute, 'key'),
(Token.Text, ' '),
(Token.Operator, '='),
(Token.Text, ' '),
(Token.Literal.String, 'value'),
(Token.Text, '\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
def test_escaped_space_in_value(self):
fragment = 'key = doubleword\\ value\n'
tokens = [
(Token.Name.Attribute, 'key'),
(Token.Text, ' '),
(Token.Operator, '='),
(Token.Text, ' '),
(Token.Literal.String, 'doubleword\\ value'),
(Token.Text, '\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
def test_space_delimited_kv_pair(self):
fragment = 'key value\n'
tokens = [
(Token.Name.Attribute, 'key'),
(Token.Text, ' '),
(Token.Literal.String, 'value\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
def test_just_key(self):
fragment = 'justkey\n'
tokens = [
(Token.Name.Attribute, 'justkey'),
(Token.Text, '\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
def test_just_key_with_space(self):
fragment = 'just\\ key\n'
tokens = [
(Token.Name.Attribute, 'just\\ key'),
(Token.Text, '\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
|
[
"pydanny@gmail.com"
] |
pydanny@gmail.com
|
0640bc720256153a8ef2810f14e34e37c4dc5c56
|
723fb811f684994827f7ba51f9d736b965111227
|
/code/utils.py
|
bf1e394fc552e43819cf703b778f520770b518c4
|
[] |
no_license
|
mjason98/evalita20_hate
|
439c75e0cbacb9ed7afaeb5dfcdd1081bd5efa16
|
71180255d66be5f2dfdfc1adb91608cc17023c64
|
refs/heads/main
| 2023-01-07T14:56:05.560146
| 2020-11-02T22:03:49
| 2020-11-02T22:03:49
| 301,477,785
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,896
|
py
|
import math
import time
import numpy as np
import matplotlib.pyplot as plt
import random
import collections # reducer
import os # reducer
import re # reducer
import pickle
def colorizar(text):
return '\033[91m' + text + '\033[0m'
def headerizar(text):
return '\033[1m' + text + '\033[0m'
def getMyDict():
return {'<emogy>':1, '<hashtag>':2, '<url>':3, '<risa>':4, '<signo>':5,
'<ask>':6, '<phoria>':7, '<diag>':8, '<number>':9, '<date>':10,
'<sent>':11, '<user>':12, '<frase>':13 }
def generate_dictionary_from_embedding(filename, dictionary, ret=True, logs=True, norm=False, message_logs=''):
if logs:
print ('# Loading:', colorizar(os.path.basename(filename)), message_logs)
x = []
band, l = False, 0
mean, var, T = 0, 0, 0
with open(filename, 'r', encoding='utf-8') as file:
for ide, line in enumerate(file):
li = line.split()
if len(li) <= 2:
print('#WARNING::', line, 'interpreted as head')
continue
if not band:
x.append([0 for _ in range(len(li)-1)])
my_d = getMyDict()
l = len(my_d)
for val in my_d:
x.append([random.random() for _ in range(len(li)-1)])
dictionary.update({val.lower(): my_d[val] })
band = True
a = [float(i) for i in li[1:]]
x.append(a)
mean += np.array(a, dtype=np.float32)
var += np.array(a, dtype=np.float32)**2
T += 1
dictionary.update({li[0].lower(): ide + l + 1})
var /= float(T)
mean /= float(T)
var -= mean ** 2
var = np.sqrt(var)
mean = mean.reshape(1,mean.shape[0])
var = var.reshape(1,var.shape[0])
if ret:
sol = np.array(x, np.float32)
if norm:
sol = (sol - mean) / var
return sol
class StatusBar:
def __init__(self, fin_, title=""):
self.title = title
self.ini = 0
self.fin = fin_
self.car = 's'
self.ti = time.time()
def update(self, loss=[], metrics=[]):
self.ini = self.ini + 1
metrics = " - ".join(['{}: {:.6f} '.format(n, m)
for n, m in loss + metrics])
ctn = int(float(self.ini) / float(self.fin) * 20.0)
bars = '=' * ctn + '.' * (20 - ctn)
lo1, lo2 = int(math.log10(self.fin + 0.1) + 1), int(math.log10(self.ini + 0.1) + 1)
spas = ' ' * (lo1 - lo2)
ta = time.time() - self.ti
if ta > 60:
self.car = 'm'
if ta / 60.0 > 60.0:
self.car = 'h'
if self.car == 'm':
ta /= 60.0
elif self.car == 'h':
ta /= 360.0
lo1 = 2 - int(math.log10(int(ta) + 0.1))
spa2 = ' ' * lo1
end = "" if self.ini < self.fin else "\n"
print("\r{} {}{}/{} [{}] {}{:.1f}{} ".format(self.title, spas, self.ini, self.fin, bars, spa2, ta,
self.car) + metrics, end=end) # + metrics, end=end)
class TorchBoard(object):
def __init__(self):
self.dict = {}
self.labels = ['train', 'test']
self.future_updt = True
self.best_funct = None
self.setFunct( max )
self.best = [None, None]
self.best_p = [0, 0]
def setFunct(self, fun):
self.best_funct = fun
def update(self, label, value, getBest=False):
if self.future_updt == False:
return
if label not in self.labels:
print ('WARNING: the label {} its not in {}, the board will not be updated.'.format(
label, self.labels))
self.future_updt = False
return
pk = 1
if label == 'train':
pk = 0
if self.dict.get(label) == None:
self.dict.update({label:[value]})
else:
self.dict[label].append(value)
yo = False
if self.best[pk] is None:
yo = True
self.best[pk] = value
else:
self.best[pk] = self.best_funct(self.best[pk], value)
yo = self.best[pk] == value
if yo:
self.best_p[pk] = len(self.dict[label]) - 1
if getBest:
return yo
def show(self, saveroute, plot_smood=False):
fig , axes = plt.subplots()
for i,l in enumerate(self.dict):
y = self.dict[l]
if len(y) <= 1:
continue
lab = str(self.best[i])
if len(lab) > 7:
lab = lab[:7]
axes.plot(range(len(y)), y, label=l + ' ' + lab)
axes.scatter([self.best_p[i]], [self.best[i]])
if plot_smood:
w = 3
y_hat = [ np.array(y[max(i-w,0):min(len(y),i+w)]).mean() for i in range(len(y))]
axes.plot(range(len(y)), y_hat, ls='--', color='gray')
fig.legend()
fig.savefig(saveroute)
del axes
del fig
# -------------------------------------------------
def reduced(oldFile, newFile, vocab):
print ('# Turning', colorizar(oldFile), 'into', colorizar(newFile))
file = open (newFile, 'w')
with open(oldFile, 'r', encoding='utf-8') as oldf:
for line in oldf.readlines():
l = line.split()
if len(l) <= 2:
continue
word = l[0].lower()
if vocab.get(word, 0) != 0:
file.write(line)
vocab.pop(word)
file.close()
print('# Done!')
def makeVocabFromData(filepath):
c = None
with open(filepath, 'r', encoding='utf-8') as f:
line = f.read().replace('\n', ' ')
c = collections.Counter(line.split())
return dict([(i, 5) for i in sorted(c, reverse=True)])
|
[
"mjasoncuba@gmail.com"
] |
mjasoncuba@gmail.com
|
02d9364f10e7a583cdb28b925cf60d416c15c9c9
|
2778a9542d551283eeb6b6d06ba4bbd5db877676
|
/draw_pictures_for_gradu/make_centerList.py
|
86147ad0b4755e0e3f816554e2c7d51a440f3210
|
[] |
no_license
|
EmmaMannfors/gradu
|
077046e2a39f7b674a7e1467a634d2fe2fcb7bf6
|
3533d09363b75f54dd4cf77ae008eb5104c3b3eb
|
refs/heads/master
| 2021-08-27T18:31:43.034113
| 2021-08-02T08:56:48
| 2021-08-02T08:56:48
| 148,455,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,092
|
py
|
#####################################################################################
# Writes file which has scuba centers corresponding with Herschel files
#####################################################################################
# Jul 20, 2018
#####################################################################################
import os
# Centers, in deg, corresponds to SCUBA files
center_deg = '/home/emma/gradu/codes/Find_and_write_center_of_scuba_map/center_in_deg.txt'
# Matches, scuba = p[0], psw = p[4]
out = open('centers_250_850.txt','w')
out.write('SCUBA'+3*' '+'PSW(250 um)'+4*' '+'RA DEC\n')
with open(center_deg) as c:
next(c)
for line in c:
match = open('/home/emma/gradu/codes/matches/cropMatches.txt','r')
p = line.split()
scuba = p[0]
ra = p[1]
dec = p[2]
print scuba
for line in match:
e = line.split()
print e[0]
if e[0] == scuba:
psw = e[4]
if psw == 'NA':
psw = 'NA'+4*' '
else:
continue
print psw
out.write(scuba+' '+psw+' '+ra+' '+dec+'\n')
print 10*'*'
psw = 'NA'
|
[
"noreply@github.com"
] |
EmmaMannfors.noreply@github.com
|
c32bc595bcac3b99c0cd50c0cfde594f121dc9ee
|
730f441d2828f0fefcb2c7d7ef252a0cb98217a3
|
/convertirABlanco.py
|
ea2ca75bcd703649d9a44961fb89e4fc2bf2e90c
|
[] |
no_license
|
camilo912/lettuceClassificator
|
21686790dd5e39fe98a770b26321f6b4d6717218
|
fb904a70f54376a78bbed76877dd6002349247b1
|
refs/heads/master
| 2020-03-10T22:29:28.399853
| 2018-04-15T20:48:49
| 2018-04-15T20:48:49
| 129,620,245
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
###script que convierte las imagenes en blanco
import numpy as np
import cv2
import matplotlib.pyplot as plt
from skimage.segmentation import slic
from skimage.segmentation import mark_boundaries
for i in range(11,18):
image_name = str(i) + ".jpg"
image = cv2.imread("datasetTrainPlants/" + image_name, cv2.IMREAD_COLOR)
image[:,:,:]=255
cv2.imwrite("negroYBlancoParaPlantas/"+image_name, image)
|
[
"cam.912@hotmail.com"
] |
cam.912@hotmail.com
|
cab67cc7f4473bc83520c5aac57676fda1591172
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/1/cvk.py
|
d49bc0cf25f76bcc1026a9e082cfe955b27b36b4
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018
| 2016-11-13T20:45:50
| 2016-11-13T20:45:50
| 73,624,224
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'cVK':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"juliettaylorswift@gmail.com"
] |
juliettaylorswift@gmail.com
|
ff45f6e59dc0d234c40145ed2efc22aa14096a08
|
e1883fb6506f989b1ee805388aa5a1ebd1d511b7
|
/models/keras/inception_v3.py
|
10c9edbeae691ff71ba7b82d65bffe587cca8f23
|
[] |
no_license
|
bellamkondaprakash/praveen-eyeem
|
47d8a9cc5b5df6447bf09a3efd962aacaa09c030
|
6409f0fc442ec60ba50d4b6147b3b7154694dd20
|
refs/heads/master
| 2020-03-23T19:34:20.533413
| 2017-02-10T10:10:39
| 2017-02-10T10:10:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,265
|
py
|
# https://groups.google.com/forum/#!topic/keras-users/rRdL01zGKi4
# https://github.com/Lasagne/Recipes/blob/master/modelzoo/inception_v3.py
# https://github.com/fchollet/keras/issues/1321#issuecomment-166576492
import numpy as np
import sys, os
import yaml
import json
import h5py
from keras.optimizers import SGD, RMSprop, Adagrad, Adam
from keras import backend as K
from keras.layers.core import Merge
from keras.models import Sequential, Graph
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Activation, Dense, Reshape, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, AveragePooling2D
from rnd_libs.lib.label_embedding.embedding_io import EmbeddingIO
from rnd_libs.lib.keras.loss_functions import ObjectiveFunctions
layer_dict = {'Convolution2D': Convolution2D, 'ZeroPadding2D': ZeroPadding2D, 'MaxPooling2D': MaxPooling2D, 'BatchNormalization': BatchNormalization, 'Activation': Activation}
pooling_dict = {'max': MaxPooling2D, 'average': AveragePooling2D}
optimizer_dict = {'sgd': SGD, 'rmsprop': RMSprop, 'adam': Adam}
class InceptionV3():
def __init__(self):
self.layer_list = []
self.config_file = None
self.io = EmbeddingIO(None)
self.init = False
self.cfgs = None
self.loss_functions = ObjectiveFunctions()
def configure(self, config_file):
self.config_file = config_file
self.init = False
if not os.path.exists(self.config_file):
self.io.print_error('Could not find config file for InceptionV3 {0}'.format(self.config_file))
self.init = False
return
pfile = open(self.config_file, 'r')
self.cfgs = yaml.load(pfile)
pfile.close()
self.init = True
def add_to_graph(self, *args, **kwargs):
self.model.add_node(*args, **kwargs)
self.last_added_node = kwargs['name']
self.layer_list.append(kwargs['name'])
return kwargs['name']
def add_bn_conv_layer(self, *args, **kwargs):
layer_name = kwargs['name']
input_layer = kwargs['input']
del kwargs['name']
del kwargs['input']
if 'padding' in kwargs:
layer_name = layer_name + '_pad'
self.add_to_graph(ZeroPadding2D(padding=kwargs['padding']), name=layer_name, input=input_layer)
input_layer = layer_name
del kwargs['padding']
# CONV with linear activation by default
layer_name = layer_name + '_conv'
self.add_to_graph(Convolution2D(*args, **kwargs), name=layer_name, input=input_layer)
# Batch normalization added directly on output of a linear layer
input_layer = layer_name
layer_name = layer_name + '_bn'
_ = self.add_to_graph(BatchNormalization(mode=0, epsilon=0.0001, axis=1), name=layer_name, input=input_layer)
# Standard normalization
input_layer = layer_name
layer_name = layer_name + '_nonlin'
_ = self.add_to_graph(Activation('relu'), name=layer_name, input=input_layer)
return layer_name
def add_inceptionA(self, input_layer, list_nb_filter, base_name):
l1_1 = self.add_bn_conv_layer(name=base_name + '_l1_1', input=input_layer, nb_filter=list_nb_filter[0][0], nb_row=1, nb_col=1)
l2_1 = self.add_bn_conv_layer(name=base_name + '_l2_1', input=input_layer, nb_filter=list_nb_filter[1][0], nb_row=1, nb_col=1)
l2_2 = self.add_bn_conv_layer(name=base_name + '_l2_2', input=l2_1, nb_filter=list_nb_filter[1][1], nb_row=5, nb_col=5, padding=(2, 2))
l3_1 = self.add_bn_conv_layer(name=base_name + '_l3_1', input=input_layer, nb_filter=list_nb_filter[2][0], nb_row=1, nb_col=1)
l3_2 = self.add_bn_conv_layer(name=base_name + '_l3_2', input=l3_1, nb_filter=list_nb_filter[2][1], nb_row=3, nb_col=3, padding=(1, 1))
l3_3 = self.add_bn_conv_layer(name=base_name + '_l3_3', input=l3_2, nb_filter=list_nb_filter[2][2], nb_row=3, nb_col=3, padding=(1, 1))
l4_1 = self.add_to_graph(ZeroPadding2D(padding=(1, 1)), name=base_name + '_14_1', input=input_layer)
l4_2 = self.add_to_graph(AveragePooling2D(pool_size=(3, 3), strides=(1, 1)), name=base_name + '_14_2', input=l4_1)
l4_3 = self.add_bn_conv_layer(name=base_name + '_l4_3', input=l4_2, nb_filter=list_nb_filter[3][0], nb_row=1, nb_col=1)
self.add_to_graph(Activation("linear"), name=base_name, inputs=[l1_1, l2_2, l3_3, l4_3], merge_mode="concat", concat_axis=1)
self.io.print_info('Added Inception-A {0}'.format(base_name))
# https://github.com/fchollet/keras/issues/391
def add_inceptionB(self, input_layer, list_nb_filter, base_name):
l1_1 = self.add_bn_conv_layer(name=base_name + '_l1_1', input=input_layer, nb_filter=list_nb_filter[0][0], nb_row=3, nb_col=3, subsample=(2, 2))
l2_1 = self.add_bn_conv_layer(name=base_name + '_l2_1', input=input_layer, nb_filter=list_nb_filter[1][0], nb_row=1, nb_col=1)
l2_2 = self.add_bn_conv_layer(name=base_name + '_l2_2', input=l2_1, nb_filter=list_nb_filter[1][1], nb_row=3, nb_col=3, padding=(1, 1))
l2_3 = self.add_bn_conv_layer(name=base_name + '_l2_3', input=l2_2, nb_filter=list_nb_filter[1][2], nb_row=3, nb_col=3, subsample=(2, 2))
l3_1 = self.add_to_graph(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)), name=base_name + '_13_1', input=input_layer)
self.add_to_graph(Activation("linear"), name=base_name, inputs=[l1_1, l2_3, l3_1], merge_mode="concat", concat_axis=1)
self.io.print_info('Added Inception-B {0}'.format(base_name))
# https://github.com/fchollet/keras/issues/391
def add_inceptionC(self, input_layer, list_nb_filter, base_name):
l1_1 = self.add_bn_conv_layer(name=base_name + '_l1_1', input=input_layer, nb_filter=list_nb_filter[0][0], nb_row=1, nb_col=1)
l2_1 = self.add_bn_conv_layer(name=base_name + '_l2_1', input=input_layer, nb_filter=list_nb_filter[1][0], nb_row=1, nb_col=1)
l2_2 = self.add_bn_conv_layer(name=base_name + '_l2_2', input=l2_1, nb_filter=list_nb_filter[1][1], nb_row=1, nb_col=7, padding=(0, 3))
l2_3 = self.add_bn_conv_layer(name=base_name + '_l2_3', input=l2_2, nb_filter=list_nb_filter[1][2], nb_row=7, nb_col=1, padding=(3, 0))
l3_1 = self.add_bn_conv_layer(name=base_name + '_l3_1', input=input_layer, nb_filter=list_nb_filter[2][0], nb_row=1, nb_col=1)
l3_2 = self.add_bn_conv_layer(name=base_name + '_l3_2', input=l3_1, nb_filter=list_nb_filter[2][1], nb_row=7, nb_col=1, padding=(3, 0))
l3_3 = self.add_bn_conv_layer(name=base_name + '_l3_3', input=l3_2, nb_filter=list_nb_filter[2][2], nb_row=1, nb_col=7, padding=(0, 3))
l3_4 = self.add_bn_conv_layer(name=base_name + '_l3_4', input=l3_3, nb_filter=list_nb_filter[2][3], nb_row=7, nb_col=1, padding=(3, 0))
l3_5 = self.add_bn_conv_layer(name=base_name + '_l3_5', input=l3_4, nb_filter=list_nb_filter[2][4], nb_row=1, nb_col=7, padding=(0, 3))
l4_1 = self.add_to_graph(ZeroPadding2D(padding=(1, 1)), name=base_name + '_14_1', input=input_layer)
l4_2 = self.add_to_graph(AveragePooling2D(pool_size=(3, 3), strides=(1, 1)), name=base_name + '_14_2', input=l4_1)
l4_3 = self.add_bn_conv_layer(name=base_name + '_l4_3', input=l4_2, nb_filter=list_nb_filter[3][0], nb_row=1, nb_col=1)
self.add_to_graph(Activation("linear"), name=base_name, inputs=[l1_1, l2_3, l3_5, l4_3], merge_mode="concat", concat_axis=1)
self.io.print_info('Added Inception-C {0}'.format(base_name))
# https://github.com/fchollet/keras/issues/391
def add_inceptionD(self, input_layer, list_nb_filter, base_name):
l1_1 = self.add_bn_conv_layer(name=base_name + '_l1_1', input=input_layer, nb_filter=list_nb_filter[0][0], nb_row=1, nb_col=1)
l1_2 = self.add_bn_conv_layer(name=base_name + '_l1_2', input=l1_1, nb_filter=list_nb_filter[0][1], nb_row=3, nb_col=3, subsample=(2, 2))
l2_1 = self.add_bn_conv_layer(name=base_name + '_l2_1', input=input_layer, nb_filter=list_nb_filter[1][0], nb_row=1, nb_col=1)
l2_2 = self.add_bn_conv_layer(name=base_name + '_l2_2', input=l2_1, nb_filter=list_nb_filter[1][1], nb_row=1, nb_col=7, padding=(0, 3))
l2_3 = self.add_bn_conv_layer(name=base_name + '_l2_3', input=l2_2, nb_filter=list_nb_filter[1][2], nb_row=7, nb_col=1, padding=(3, 0))
l2_4 = self.add_bn_conv_layer(name=base_name + '_l2_4', input=l2_3, nb_filter=list_nb_filter[1][2], nb_row=3, nb_col=3, subsample=(2, 2))
l3_1 = self.add_to_graph(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)), name=base_name + '_13_1', input=input_layer)
self.add_to_graph(Activation("linear"), name=base_name, inputs=[l1_2, l2_4, l3_1], merge_mode="concat", concat_axis=1)
self.io.print_info('Added Inception-D {0}'.format(base_name))
# https://github.com/fchollet/keras/issues/391
def add_inceptionE(self, input_layer, list_nb_filter, base_name, pool_mode):
l1_1 = self.add_bn_conv_layer(name=base_name + '_l1_1', input=input_layer, nb_filter=list_nb_filter[0][0], nb_row=1, nb_col=1)
l2_1 = self.add_bn_conv_layer(name=base_name + '_l2_1', input=input_layer, nb_filter=list_nb_filter[1][0], nb_row=1, nb_col=1)
l2_2a = self.add_bn_conv_layer(name=base_name + '_l2_2a', input=l2_1, nb_filter=list_nb_filter[1][1], nb_row=1, nb_col=3, padding=(0, 1))
l2_2b = self.add_bn_conv_layer(name=base_name + '_l2_2b', input=l2_1, nb_filter=list_nb_filter[1][1], nb_row=3, nb_col=1, padding=(1, 0))
l3_1 = self.add_bn_conv_layer(name=base_name + '_l3_1', input=input_layer, nb_filter=list_nb_filter[2][0], nb_row=1, nb_col=1)
l3_2 = self.add_bn_conv_layer(name=base_name + '_l3_2', input=l3_1, nb_filter=list_nb_filter[2][1], nb_row=3, nb_col=3, padding=(1, 1))
l3_3a = self.add_bn_conv_layer(name=base_name + '_l3_3a', input=l3_2, nb_filter=list_nb_filter[2][2], nb_row=1, nb_col=3, padding=(0, 1))
l3_3b = self.add_bn_conv_layer(name=base_name + '_l3_3b', input=l3_2, nb_filter=list_nb_filter[2][3], nb_row=3, nb_col=1, padding=(1, 0))
l4_1 = self.add_to_graph(ZeroPadding2D(padding=(1, 1)), name=base_name + '_14_1', input=input_layer)
l4_2 = self.add_to_graph(pooling_dict[pool_mode](pool_size=(3, 3), strides=(1, 1)), name=base_name + '_14_2', input=l4_1)
l4_3 = self.add_bn_conv_layer(name=base_name + '_l4_3', input=l4_2, nb_filter=list_nb_filter[3][0], nb_row=1, nb_col=1)
self.add_to_graph(Activation("linear"), name=base_name, inputs=[l1_1, l2_2a, l2_2b, l3_3a, l3_3b, l4_3], merge_mode="concat", concat_axis=1)
self.io.print_info('Added Inception-E {0}'.format(base_name))
# https://github.com/fchollet/keras/issues/391
def define(self):
try:
self.model = Graph()
self.model.add_input(name='input', input_shape=(self.cfgs['n_channels'], self.cfgs['image_height'], self.cfgs['image_width']))
#
# Part of the network which is defined in the config file should move here
#
cgfs_nodes = self.cfgs['nodes']
for node in cgfs_nodes:
if not node['type'] == 'Activation':
self.add_to_graph(layer_dict[node['type']](**node['parameter']), name=node['name'], input=node['input'])
else:
self.add_to_graph(layer_dict[node['type']](node['parameter']['mode']), name=node['name'], input=node['input'])
self.io.print_info('Added {1}:{0}'.format(node['type'], node['name']))
self.add_inceptionA(input_layer=self.last_added_node, list_nb_filter=((64,), (48, 64), (64, 96, 96), (32,)), base_name='mixed')
self.add_inceptionA(input_layer=self.last_added_node, list_nb_filter=((64,), (48, 64), (64, 96, 96), (64,)), base_name='mixed_1')
self.add_inceptionA(input_layer=self.last_added_node, list_nb_filter=((64,), (48, 64), (64, 96, 96), (64,)), base_name='mixed_2')
self.add_inceptionB(input_layer=self.last_added_node, list_nb_filter=((384,), (64, 96, 96)), base_name='mixed_3')
self.add_inceptionC(input_layer=self.last_added_node, list_nb_filter=((192,), (128, 128, 192), (128, 128, 128, 128, 192), (192,)), base_name='mixed_4')
self.add_inceptionC(input_layer=self.last_added_node, list_nb_filter=((192,), (160, 160, 192), (160, 160, 160, 160, 192), (192,)), base_name='mixed_5')
self.add_inceptionC(input_layer=self.last_added_node, list_nb_filter=((192,), (160, 160, 192), (160, 160, 160, 160, 192), (192,)), base_name='mixed_6')
self.add_inceptionC(input_layer=self.last_added_node, list_nb_filter=((192,), (192, 192, 192), (192, 192, 192, 192, 192), (192,)), base_name='mixed_7')
self.add_inceptionD(input_layer=self.last_added_node, list_nb_filter=((192, 320), (192, 192, 192, 192)), base_name='mixed_8')
self.add_inceptionE(input_layer=self.last_added_node, list_nb_filter=((320,), (384, 384, 384), (448, 384, 384, 384), (192,)), pool_mode='average', base_name='mixed_9')
self.add_inceptionE(input_layer=self.last_added_node, list_nb_filter=((320,), (384, 384, 384), (448, 384, 384, 384), (192,)), pool_mode='max', base_name='mixed_10')
self.add_to_graph(AveragePooling2D(pool_size=(5, 5)), name='pool3', input=self.last_added_node)
self.io.print_info('Added {1}:{0}'.format('AveragePooling', self.last_added_node))
self.add_to_graph(Flatten(), name='flatten', input='pool3')
self.io.print_info('Added {1}:{0}'.format('Flatten', self.last_added_node))
self.add_to_graph(Dense(self.cfgs['nb_classes']), name='softmax', input='flatten')
self.io.print_info('Added {1}:{0}'.format('Dense', self.last_added_node))
self.add_to_graph(Activation(self.cfgs['activation']), name='prob', input='softmax')
# Output to the Graph
self.model.add_output(name='output', input='prob')
if self.cfgs['model_weights_file'] is not None:
self.init_from_this()
except Exception as err:
self.io.print_error('Error configuring the model, {0}'.format(err))
self.init = False
return
self.init = True
def init_from_this(self):
weights_file = self.cfgs['model_weights_file']
if not weights_file == 'None':
self.load_weights(weights_file)
self.io.print_info('Weights Initalized from {0}'.format(weights_file))
def load_weights(self, filepath):
if filepath.endswith('.npz'):
pfile = open(filepath, 'r')
graph = np.load(pfile)['graph'].item()
for node_name, weights in graph.items():
if node_name in self.cfgs['ignore_while_loading']:
self.io.print_warning('Ignoring weights from {0}'.format(node_name))
continue
self.io.print_info('Transfering parameters from {0}'.format(node_name))
self.model.nodes[node_name].set_weights(weights)
pfile.close()
elif filepath.endswith('.hdf5'):
self.model.load_weights(filepath)
else:
self.io.print_error('Unknown model weights file {}'.format(filepath))
self.io.print_info(self.model.nodes['softmax'].get_config())
def setup_loss_function(self, w):
self.loss_functions.set_weights(w)
def update_optimizer(self, decay_rate=0.95):
lr = self.opt.get_config()['lr']
self.opt.lr = K.variable(lr * np.exp(-decay_rate))
lr = self.opt.get_config()['lr']
return lr
def compile(self, compile_cfgs):
try:
self.opt = optimizer_dict[compile_cfgs['optimizer']](lr=compile_cfgs['lr'], epsilon=compile_cfgs['epsilon'])
self.model.compile(loss={'output': self.loss_functions.dict[compile_cfgs['loss']]}, optimizer=self.opt)
except Exception as e:
self.io.print_error('Error configuring the model, {0}'.format(e))
self.init = False
return
self.init = True
|
[
"praveen@eyeem.com"
] |
praveen@eyeem.com
|
24a33ed287568028e5739b87f65ed322ba179399
|
501238f0462ec2bb65534b6258b0559c710c08ed
|
/sftp_to_db.py
|
bbcc53bf820383d0193b035276d993186bc7eb93
|
[] |
no_license
|
nfisher20/sftp-to-db
|
1eae08e924164e9ff7b072ff69749d37ae199017
|
bfb9143e00fdef7fd63c0477fd293d4dc504b9f5
|
refs/heads/main
| 2023-02-21T20:41:07.437427
| 2021-01-18T16:44:37
| 2021-01-18T16:44:37
| 329,975,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,836
|
py
|
import paramiko
import datetime
import pandas as pd
import numpy as np
import sqlalchemy
import settings
hostkey=None
t = paramiko.Transport((settings.myHostname, settings.myPort))
t.connect(username=settings.myUsername, password=settings.myPassword, hostkey=hostkey)
sftp = paramiko.SFTPClient.from_transport(t)
# get directory list
dirlist = sftp.listdir_iter('.')
today = datetime.datetime.now().date()
#open file to be inserted into database
fopen = sftp.open(settings.sftppath,'r')
#read lines of txt file to list
txtfile = fopen.readlines()
#close connections
t.close()
sftp.close()
#filter txtfile to remove lines that start with 1 as their first index
filteredtxtfile = []
for line in txtfile:
if line[0] == '1':
filteredtxtfile.append(line)
#dictionary of headers and indexes as .csv did not have delimeters
carrierrecord = {'RECORDID' : [0,1],
'PROCESSORNO' : [1,11],
'BATCHNO' : [11,18],
'PROCESSORNAME' : [18,43],
'PROCESSORADDRESS' : [43,63],
'PROCESSORCITY' : [63,81],
'PROCESSORSTATE' : [81,83],
'PROCESSORZIP' : [83,88],
'PROCESSORPHONE' : [88,98],
'CREATIONDATE' : [98,106],
'CREATIONMM' : [98,100],
'CREATIONDD' : [100,102],
'CREATIONCC' : [102,104],
'CREATIONYY' : [104,106],
'ZIPEXPAND' : [106,110],
'DESTCUSTOMER' : [110,125],
'DESTPROCNO' : [125,135],
'FILLER2' : [135,5000]
}
#assign headers to columns based on indexing and store in dict
dict2 = {}
for line in filteredtxtfile:
for key,value in carrierrecord.items():
#print(key,value[0]) i[value[0]:value[1]]
dict2.setdefault(key,[]).append(line[value[0]:value[1]])
#convert dictionary to dataframe
df = pd.DataFrame.from_dict(dict2,dtype=str)
#process phone numbers
df['PROCESSORPHONE']=df['PROCESSORPHONE'].astype(str).apply(lambda x: np.where((len(x)>=10)&set(list(x)).issubset(list('.0123456789')),
'('+x[:3]+')'+x[3:6]+'-'+x[6:10],
'Phone number not in record'))
#parse date
df['CREATIONDATE'] = pd.to_datetime(df['CREATIONDATE'],format='%m%d%Y', errors='ignore')
sql = """
SELECT PROCESSORNAME
FROM CarrierRecord
"""
engine = sqlalchemy.create_engine('mssql+pyodbc://@' + settings.servername + '/' + settings.databasename + '?trusted_connection=yes&driver=ODBC+Driver+13+for+SQL+Server')
sqltable = pd.read_sql_query(sql, engine)
processornames = sqltable.PROCESSORNAME.unique()
#drop duplicate processor names
for processor in processornames:
for index, row in df.iterrows():
if processor == df.PROCESSORNAME[index]:
df = df.drop([index])
#write dataframe to database
df.to_sql('CarrierRecord',engine,if_exists = 'append',index=False)
|
[
"noreply@github.com"
] |
nfisher20.noreply@github.com
|
f2df6fd91c81c440ba467cea676610defdc3838f
|
0a3efe67ef4c69317154276c0094cc237da7f8ca
|
/InsertionSort.py
|
0012b34ee62eb8f28872b3ff897dc40607c48563
|
[] |
no_license
|
GlamorousCar/Algorithm
|
c707676cb75378da8f0fb349da4016d946df43cd
|
04ed7c47ac3d60a057ee2d7ed9473f283a405857
|
refs/heads/master
| 2023-01-24T00:33:42.038830
| 2020-12-04T13:28:17
| 2020-12-04T13:28:17
| 258,520,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
import random
a, b, s = 0, 100, 10
array = [random. randint (a, b) for _ in range (s)]
def InsertionSort(array):
for i in range(2,len(array)):
key = array[i]
k = i - 1
while k > 0 and array[k] > key:
array[k+1] = array[k]
k = k - 1
array[k+1] = key
return array
print(array)
print(InsertionSort(array))
|
[
"noreply@github.com"
] |
GlamorousCar.noreply@github.com
|
a766a6bd0b97decd8a899aa1e066a7c8774ecdbe
|
5f8398e336790fa833a97139d9e4afcb0eea4701
|
/Dinner_Party/submit.py
|
2c305511537a013bef3d756faddefd6be4dae108
|
[] |
no_license
|
Achva-Kl/CSA-2020
|
61c960d4eafc7b15143789c9c1c7abf5e60f489d
|
97c6aae671716f29d3e38eeae7cf2f75810273a6
|
refs/heads/main
| 2023-03-28T12:32:46.557656
| 2021-03-25T13:53:38
| 2021-03-25T13:53:38
| 350,474,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
#! /usr/bin/python3
### run the file with the solution ###
import sys
import itertools
from hashlib import sha256
from Crypto.Cipher import ARC4
PROOF_OF_WORK_DIFFICULTY = 25 #bit
def proof_of_work(buf,n):
i = 0
while True:
for prefix in itertools.product(range(0x30,0x7E), repeat = i):
h = sha256()
h.update(bytes(prefix))
h.update(buf)
if bin(int(h.hexdigest(),16)).endswith("0"*n):
return prefix
i += 1
def recover_flag(solution, e_flag):
prow = proof_of_work(solution.encode("utf8"), PROOF_OF_WORK_DIFFICULTY)
prow = bytes(prow).decode("utf8")
key = f"RC4KEYFILLER|{solution}|{prow}"
key = key.encode("utf8")
return ARC4.new(key).decrypt(e_flag)
if __name__ == "__main__":
solution = " ".join(sys.argv[1:])
with open("flag.txt.enc","rb") as fh:
e_flag = fh.read()
flag = recover_flag(solution, e_flag)
try:
flag = flag.decode("utf8")
if flag.startswith("CSA"):
print("Congratulations!")
print("Your flag is: ", flag)
else:
print("Sorry, try again")
except:
print("Sorry, try again")
# python3 submit.py Germain scales Franklin abacus Curie telescope Lovelace pencil Noether laptop
|
[
"achvakk1@gmail.com"
] |
achvakk1@gmail.com
|
9d6730d9edce0a074813fd0fe10fd9e3cb40551e
|
4c8ccd889db16011835e970b8c4d6b93036595a0
|
/realestate/decorators.py
|
685cf4705c486e6ddcf9139f0437c15be0996682
|
[] |
no_license
|
arjun312-coder/Real-Estate-Django-Project
|
a35a09fb6eb366da12b1cfd3ad51fa51630d9003
|
c149d3c756200883d27f048e53a01738fb1169a0
|
refs/heads/master
| 2022-04-24T15:08:12.107957
| 2020-04-24T12:24:43
| 2020-04-24T12:24:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 829
|
py
|
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import user_passes_test
def agent_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url='login'):
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_agent,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def office_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url='login'):
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_office,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
|
[
"ravi16iiitg@gmail.com"
] |
ravi16iiitg@gmail.com
|
ed71559e27b6930a355acfdb912e7b85eec752c8
|
5be9cf2da0de61fdd4510c5a28333298a8fe1843
|
/questionnaire/migrations/0008_auto_20190303_1715.py
|
46a9ddd01f2994b4bc152f99f373e1944575e11c
|
[] |
no_license
|
jordanSev/CS3398-Ferengi-Finaglers-S2019
|
e85bd6f9f6a9756a8545a95faece240746d926db
|
2a1c64a2a1680913a68e97bbba980a4fde9a603e
|
refs/heads/master
| 2020-07-09T17:27:44.921035
| 2019-05-02T03:10:20
| 2019-05-02T03:10:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,674
|
py
|
# Generated by Django 2.1.7 on 2019-03-03 23:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('questionnaire', '0007_auto_20190302_0112'),
]
operations = [
migrations.CreateModel(
name='ResponseIntance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.CharField(max_length=64, verbose_name='User who answered the question')),
('answer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='questionnaire.Answers', verbose_name='Question that was answered')),
],
),
migrations.AlterField(
model_name='questions',
name='answertype',
field=models.CharField(choices=[('SINGLE', 'Can only pick one answer'), ('MULTIPLE', 'Can pick multiple answers'), ('SORTABLE', 'The answers are sortable or rateable')], default='SINGLEANSWER', max_length=64, verbose_name='Answer Type'),
),
migrations.AlterField(
model_name='questions',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='questionnaire.Categories', verbose_name='Question Category'),
),
migrations.AddField(
model_name='responseintance',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='questionnaire.Questions', verbose_name='Question that was answered'),
),
]
|
[
"kcp36@txstate.edu"
] |
kcp36@txstate.edu
|
cd53046a6b6ee2f9f16ab5e7e54cb942ada274d2
|
19ee33b2aee85046a46319b1e3c434b913228e98
|
/text_process.py
|
b461f6c613a40636353cc0369cf2403a89936fe2
|
[] |
no_license
|
wataru-masuda/text-base-demogra
|
bba9241607fd356b09deb602d41ef51b3597d717
|
206c5603aa8a150726b0537bd477b8cf5a8e80bd
|
refs/heads/master
| 2021-06-26T08:05:58.387106
| 2020-02-12T07:36:52
| 2020-02-12T07:36:52
| 232,492,427
| 0
| 0
| null | 2021-06-18T22:12:29
| 2020-01-08T06:16:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,305
|
py
|
# -*- coding: utf-8 -*-
import requests
import sys
from gensim.corpora.dictionary import Dictionary
from bs4 import BeautifulSoup
import MeCab
def fetch_contents_from_url(url, encoding="UTF-8", use_domain=False, timeout=3.0):
try:
res = requests.get("http://"+url, timeout=timeout) if use_domain else requests.get(url, timeout=timeout)
except Exception:
print("GET request failed")
return None
res.encoding = encoding
try:
soup = BeautifulSoup(res.text, 'html.parser')
except Exception:
print("failed to parse html text")
return None
for meta_tag in soup.find_all('meta', attrs={'name': 'description'}):
if soup.title:
return str(soup.title.string) + "\n" + str(meta_tag.get('content'))
else:
return None
def doc2word_list(text):
m = MeCab.Tagger("-d /usr/local/lib/mecab/dic/mecab-ipadic-neologd")
word_list = []
for row in m.parse(text).split("\n"):
word = row.split()[0]
if word == "EOS":
break
pos = row.split()[-1]
if "ๅ่ฉ-ใตๅคๆฅ็ถ" in pos or "ๆฐ" in pos:
continue
if "ๅ่ฉ" in pos or "ๅฝขๅฎน่ฉ" in pos or "ๅ่ฉ" in pos:
word_list.append(word)
return word_list
|
[
"wataru-masuda@GD0033.local"
] |
wataru-masuda@GD0033.local
|
949cb8f555f0b92cd8efa118c8b092fa031c75dc
|
915413d79c69f5a22d7dc1fc88dadd70eedf1e10
|
/instance/config.py
|
8f76fd76f1427cf4e5bfacbdbbd54694206c588a
|
[
"MIT"
] |
permissive
|
Dave-mash/StackOverflow-lite-API
|
258db5a1800f94b6f996a49a999727fcbbfe1601
|
10f86ffe732d3cffe2b16323184780f4c9f4fdc4
|
refs/heads/develop
| 2020-04-11T23:45:21.877576
| 2019-01-02T10:09:54
| 2019-01-02T10:09:54
| 162,178,069
| 0
| 0
|
MIT
| 2019-01-02T10:09:55
| 2018-12-17T19:13:02
|
Python
|
UTF-8
|
Python
| false
| false
| 588
|
py
|
import os
class Config(object):
"""Parent configuration class."""
DEBUG = False
TESTING = False
SECRET = os.getenv('SECRET')
class Development(Config):
"""Configurations for Development."""
DEBUG = True
TESTING = True
class Testing(Config):
"""Configurations for Testing, with a separate test database."""
TESTING = True
DEBUG = True
class Production(Config):
"""Configurations for Production."""
DEBUG = False
TESTING = False
app_config = {
'development': Development,
'testing': Testing,
'production': Production
}
|
[
"macharia3041@gmail.com"
] |
macharia3041@gmail.com
|
90f1442e8dd5d3a5cce13e0c94173151d58984a3
|
559b9cd3a7ed865cd1955c62b4835207e30e3ff1
|
/tests/test_run_process.py
|
51bbd827cc962d872a08974fa55f913a7e211844
|
[
"MIT"
] |
permissive
|
affankingkhan/watchgod
|
423f7dc782342d6e4e6bf8001378c9c980913ffe
|
3d5558bb04b77488abe43cb0a18e46212cded72b
|
refs/heads/master
| 2020-09-09T11:29:39.896724
| 2019-11-13T12:02:39
| 2019-11-13T12:02:39
| 221,435,053
| 0
| 0
|
MIT
| 2019-11-13T10:39:28
| 2019-11-13T10:39:27
| null |
UTF-8
|
Python
| false
| false
| 2,960
|
py
|
from asyncio import Future
from watchgod import arun_process, run_process
from watchgod.main import _start_process
class FakeWatcher:
def __init__(self, path):
self._async = 'async' in path
self._check = 0
self.files = [1, 2, 3]
def check(self):
self._check += 1
if self._check == 1:
return {'x'}
elif self._check == 2:
return set()
elif self._async:
raise StopAsyncIteration
else:
raise KeyboardInterrupt
class FakeProcess:
def __init__(self, is_alive=True, exitcode=1, pid=123):
self._is_alive = is_alive
self.exitcode = exitcode
self.pid = pid
def is_alive(self):
return self._is_alive
def join(self, wait):
pass
def test_alive_terminates(mocker):
mock_start_process = mocker.patch('watchgod.main._start_process')
mock_start_process.return_value = FakeProcess()
mock_kill = mocker.patch('watchgod.main.os.kill')
assert run_process('/x/y/z', object(), watcher_cls=FakeWatcher, debounce=5, min_sleep=1) == 1
assert mock_start_process.call_count == 2
assert mock_kill.call_count == 1
def test_dead_callback(mocker):
mock_start_process = mocker.patch('watchgod.main._start_process')
mock_start_process.return_value = FakeProcess(is_alive=False)
mock_kill = mocker.patch('watchgod.main.os.kill')
c = mocker.MagicMock()
assert run_process('/x/y/z', object(), watcher_cls=FakeWatcher, callback=c, debounce=5, min_sleep=1) == 1
assert mock_start_process.call_count == 2
assert mock_kill.call_count == 0
assert c.call_count == 1
c.assert_called_with({'x'})
def test_alive_doesnt_terminate(mocker):
mock_start_process = mocker.patch('watchgod.main._start_process')
mock_start_process.return_value = FakeProcess(exitcode=None)
mock_kill = mocker.patch('watchgod.main.os.kill')
assert run_process('/x/y/z', object(), watcher_cls=FakeWatcher, debounce=5, min_sleep=1) == 1
assert mock_start_process.call_count == 2
assert mock_kill.call_count == 2
def test_start_process(mocker):
mock_process = mocker.patch('watchgod.main.Process')
v = object()
_start_process(v, (1, 2, 3), {})
assert mock_process.call_count == 1
mock_process.assert_called_with(target=v, args=(1, 2, 3), kwargs={})
async def test_async_alive_terminates(mocker):
mock_start_process = mocker.patch('watchgod.main._start_process')
mock_start_process.return_value = FakeProcess()
mock_kill = mocker.patch('watchgod.main.os.kill')
f = Future()
f.set_result(1)
c = mocker.MagicMock(return_value=f)
reloads = await arun_process('/x/y/async', object(), watcher_cls=FakeWatcher, callback=c, debounce=5, min_sleep=1)
assert reloads == 1
assert mock_start_process.call_count == 2
assert mock_kill.call_count == 1
assert c.call_count == 1
c.assert_called_with({'x'})
|
[
"s@muelcolvin.com"
] |
s@muelcolvin.com
|
211f94f6fe84de597390b4eea76d59f256bbd5b7
|
b31bbc632c6895170e5ac4bb17ec0a25cb024466
|
/Neural_Network/schedule_LR.py
|
62b1656548205a479eac802b9281071cc46b6a83
|
[] |
no_license
|
lamnguyen-mltd/shuffling
|
1b8bd88f47ffbd436d22a07a5718e7b1b4119da3
|
e81eb35650dbf394d6892a282ba5c964c4316cbf
|
refs/heads/master
| 2023-06-02T12:57:35.782370
| 2021-06-18T01:35:18
| 2021-06-18T01:35:18
| 377,664,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
"""
Different learning rates
"""
import math
def constant(eta):
x = lambda t : eta
return x
def diminishing(gamma, alpha):
return lambda t : gamma / (1 + t)**(alpha)
|
[
"lmn214@lehigh.edu"
] |
lmn214@lehigh.edu
|
39530fcac484022b59d18d7d4d6e79231b44069d
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-DiscRecording/PyObjCTest/test_drburn.py
|
b073fb67b06ed44f8785845ab704f1fd5d39d6ed
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,115
|
py
|
import DiscRecording
from PyObjCTools.TestSupport import TestCase
class TestDRBurn(TestCase):
def testMethods(self):
self.assertResultIsBOOL(DiscRecording.DRBurn.appendable)
self.assertArgIsBOOL(DiscRecording.DRBurn.setAppendable_, 0)
self.assertResultIsBOOL(DiscRecording.DRBurn.verifyDisc)
self.assertArgIsBOOL(DiscRecording.DRBurn.setVerifyDisc_, 0)
def testConstants(self):
self.assertIsInstance(DiscRecording.DRBurnRequestedSpeedKey, str)
self.assertIsInstance(DiscRecording.DRBurnAppendableKey, str)
self.assertIsInstance(DiscRecording.DRBurnOverwriteDiscKey, str)
self.assertIsInstance(DiscRecording.DRBurnVerifyDiscKey, str)
self.assertIsInstance(DiscRecording.DRBurnCompletionActionKey, str)
self.assertIsInstance(DiscRecording.DRBurnUnderrunProtectionKey, str)
self.assertIsInstance(DiscRecording.DRBurnTestingKey, str)
self.assertIsInstance(DiscRecording.DRSynchronousBehaviorKey, str)
self.assertIsInstance(DiscRecording.DRBurnFailureActionKey, str)
self.assertIsInstance(DiscRecording.DRMediaCatalogNumberKey, str)
self.assertIsInstance(DiscRecording.DRBurnDoubleLayerL0DataZoneBlocksKey, str)
self.assertIsInstance(DiscRecording.DRBurnStrategyKey, str)
self.assertIsInstance(DiscRecording.DRBurnStrategyIsRequiredKey, str)
self.assertIsInstance(DiscRecording.DRCDTextKey, str)
self.assertIsInstance(DiscRecording.DRBurnCompletionActionEject, str)
self.assertIsInstance(DiscRecording.DRBurnCompletionActionMount, str)
self.assertIsInstance(DiscRecording.DRBurnFailureActionEject, str)
self.assertIsInstance(DiscRecording.DRBurnFailureActionNone, str)
self.assertIsInstance(DiscRecording.DRBurnStrategyCDTAO, str)
self.assertIsInstance(DiscRecording.DRBurnStrategyCDSAO, str)
self.assertIsInstance(DiscRecording.DRBurnStrategyDVDDAO, str)
self.assertIsInstance(DiscRecording.DRBurnStrategyBDDAO, str)
self.assertIsInstance(DiscRecording.DRBurnStatusChangedNotification, str)
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
77eb14d50bb083ecbe2ae6ce90c59fb3c5bfbb00
|
2253dad674431167638a94a028bf2fc16cf6c27a
|
/core/migrations/0001_initial.py
|
ac502e50b632e94dc0fa51291a5933898dd88ef0
|
[] |
no_license
|
tristanmatthew/django-music-collab-tristanmatthew
|
38b32620d77c5551d2a7b24e8b71598312f2fb53
|
473151ccf9301786f2d749075db7d64498cf1271
|
refs/heads/main
| 2023-03-13T00:54:32.948223
| 2021-03-04T00:10:06
| 2021-03-04T00:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,870
|
py
|
# Generated by Django 3.1.7 on 2021-03-01 16:05
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
[
"tristanmatthew@gmail.com"
] |
tristanmatthew@gmail.com
|
151158dc001dab396c79cb4025e6326f17ad263b
|
2d3babcba078a44b7f2c70718e095cd766d9e55e
|
/script/my_codes/digital_classroom.py
|
c795be3d6c4732429f577d40acd4a9bf318e59d4
|
[
"MIT"
] |
permissive
|
ThivakaranThana/AlignedReid-Reproduction-Pytorch
|
6cfdb8be7902b2e6ecc9e518b561916b683e6a29
|
a310de27c74a7fc29963386aa631d09e7cc3eb2f
|
refs/heads/master
| 2020-04-04T13:35:43.923708
| 2019-01-07T16:51:37
| 2019-01-07T16:51:37
| 155,967,554
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,844
|
py
|
import cv2
from aligned_reid.model.Model import Model
from torch.nn.parallel import DataParallel
import torch.optim as optim
import torch
from aligned_reid.utils.utils import load_state_dict
from aligned_reid.utils.utils import set_devices
from torch.autograd import Variable
import numpy as np
input_image = cv2.imread('gow_query.jpg')
resized_image = cv2.resize(input_image, (128, 256))
# input_image = np.asarray(Image.open('gow_query.jpg'))
transposed = resized_image.transpose(2,0,1)
test_img = transposed[np.newaxis]
# cv2.imshow("preview", input_image)
# key = cv2.waitKey(0)
###########
# Models #
###########
local_conv_out_channels = 128
num_classes = 3
model = Model(local_conv_out_channels=local_conv_out_channels, num_classes=num_classes)
# Model wrapper
model_w = DataParallel(model)
base_lr = 2e-4
weight_decay = 0.0005
optimizer = optim.Adam(model.parameters(), lr = base_lr, weight_decay = weight_decay)
# Bind them together just to save some codes in the following usage.
modules_optims = [model, optimizer]
model_weight_file = '/home/niruhan/AlignedReID-Re-Production-Pytorch/model_weight.pth'
map_location = (lambda storage, loc: storage)
sd = torch.load(model_weight_file, map_location=map_location)
load_state_dict(model, sd)
print('Loaded model weights from {}'.format(model_weight_file))
sys_device_ids = (0,)
TVT, TMO = set_devices(sys_device_ids)
old_train_eval_model = model.training
# Set eval mode.
# Force all BN layers to use global mean and variance, also disable dropout.
model.eval()
ims = np.stack(input_image, axis=0)
ims = Variable(TVT(torch.from_numpy(test_img).float()))
global_feat, local_feat = model(ims)[:2]
global_feat = global_feat.data.cpu().numpy()
local_feat = local_feat.data.cpu().numpy()
# Restore the model to its old train/eval mode.
model.train(old_train_eval_model)
|
[
"thivakaranthana@gmail.com"
] |
thivakaranthana@gmail.com
|
9ab33f4881e26c8bfa04c708f83dce9807708c20
|
d775692233aaf07d59a814953c4f6166e551822b
|
/myapi/updates/migrations/0001_initial.py
|
1b5e0ebc0326c39915fa38c98b5dfea14dcc4205
|
[] |
no_license
|
tweeks024/django_rest_example
|
ccc10df86477086027decf94fec49f889460c677
|
eb6d217e125b502d43c52a18ca372b68b7cab6f8
|
refs/heads/master
| 2021-04-28T03:06:20.940298
| 2018-03-15T11:24:54
| 2018-03-15T11:24:54
| 122,132,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,088
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-02-19 22:27
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import updates.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Update',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(blank=True, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to=updates.models.upload_update_image)),
('updated', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"tweeks@timetrade.com"
] |
tweeks@timetrade.com
|
dcd3054a1f19fbffeaa4bdb6d4a5120debffe7b3
|
2fad3cec294796205ad4ff5806601165a9ef7ae1
|
/match/migrations/0001_initial.py
|
4ee29f481a2c48eadd4b9ef96c27385ac303c4c2
|
[] |
no_license
|
taro1025/sb.com
|
325c5c4b522926758c2982e4bf30face7943008f
|
d6a6980002c8abb1edf0b10bfa67de8c5f89df4e
|
refs/heads/master
| 2023-04-04T03:03:55.413060
| 2021-04-21T10:15:58
| 2021-04-21T10:15:58
| 323,674,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,950
|
py
|
# Generated by Django 3.1.4 on 2020-12-21 17:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import match.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('my_profile', models.TextField(blank=True, null=True, verbose_name='่ชๅทฑ็ดนไป')),
('user_img', models.ImageField(blank=True, upload_to='', verbose_name='ใใญใ็ปๅ')),
('menter', models.IntegerField(choices=[(1, 'ใกใณใฟใผ'), (2, 'ๅผๅญ')], default=0, verbose_name='ไผๅกๅฝขๅผ')),
('course1', models.IntegerField(blank=True, null=True, verbose_name='ใณใผใน1')),
('course2', models.IntegerField(blank=True, null=True, verbose_name='ใณใผใน2')),
('course3', models.IntegerField(blank=True, null=True, verbose_name='ใณใผใน3')),
('describe1', models.TextField(blank=True, null=True, verbose_name='ใณใผใน1ใฎ่ชฌๆ')),
('describe2', models.TextField(blank=True, null=True, verbose_name='ใณใผใน2ใฎ่ชฌๆ')),
('describe3', models.TextField(blank=True, null=True, verbose_name='ใณใผใน3ใฎ่ชฌๆ')),
('busy', models.BooleanField(default=True, verbose_name='ๅฟใใ')),
('user_account_number', models.CharField(max_length=128, null=True, verbose_name='ๅฃๅบง็ชๅท')),
('user_routing_number', models.CharField(max_length=128, null=True, verbose_name='้่กใณใผใ๏ผๆฏๅบใณใผใ')),
('user_holder_name', models.CharField(max_length=32, null=True, verbose_name='ๅฃๅบงๅ็พฉ')),
('user_postal_code', models.CharField(max_length=128, null=True, verbose_name='้ตไพฟ็ชๅท')),
('user_state_kana', models.CharField(max_length=32, null=True, verbose_name='้ฝ้ๅบ็๏ผใซใ๏ผ')),
('user_city_kana', models.CharField(max_length=32, null=True, verbose_name='ๅบๅธ็บๆ๏ผใซใ๏ผ')),
('user_town_kana', models.CharField(max_length=32, null=True, verbose_name='็บๅ๏ผใซใ๏ผ')),
('user_line1_kana', models.CharField(max_length=32, null=True, verbose_name='็ชๅฐใๅท๏ผใซใ๏ผ')),
('user_line2_kana', models.CharField(blank=True, max_length=32, null=True, verbose_name='ๅปบ็ฉใป้จๅฑ็ชๅทใปใใฎไป๏ผไปปๆ๏ผ๏ผใซใ๏ผ')),
('user_state_kanji', models.CharField(max_length=32, null=True, verbose_name='้ฝ้ๅบ็๏ผๆผขๅญ๏ผ')),
('user_city_kanji', models.CharField(max_length=32, null=True, verbose_name='ๅบๅธ็บๆ๏ผๆผขๅญ๏ผ')),
('user_town_kanji', models.CharField(max_length=32, null=True, verbose_name='็บๅ๏ผๆผขๅญ๏ผ')),
('user_line1_kanji', models.CharField(max_length=32, null=True, verbose_name='็ชๅฐใๅท๏ผๆผขๅญ๏ผ')),
('user_line2_kanji', models.CharField(blank=True, max_length=32, null=True, verbose_name='ๅปบ็ฉใป้จๅฑ็ชๅทใปใใฎไป๏ผไปปๆ๏ผ๏ผๆผขๅญ๏ผ')),
('user_day', models.IntegerField(null=True, verbose_name='็ๅนดๆๆฅ๏ผๆฅ๏ผ')),
('user_month', models.IntegerField(null=True, verbose_name='็ๅนดๆๆฅ๏ผๆ๏ผ')),
('user_year', models.IntegerField(null=True, verbose_name='็ๅนดๆๆฅ๏ผๅนด๏ผ')),
('user_phone_number', models.CharField(max_length=128, null=True, verbose_name='้ป่ฉฑ็ชๅท')),
('user_tos_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='ใฟใคใ ในใฟใณใ')),
('user_last_name_kanji', models.CharField(max_length=32, null=True, verbose_name='ๆง๏ผๆผขๅญ๏ผ')),
('user_last_name_kana', models.CharField(max_length=32, null=True, verbose_name='ๆง๏ผใใช๏ผ')),
('user_first_name_kanji', models.CharField(max_length=32, null=True, verbose_name='ๅ๏ผๆผขๅญ๏ผ')),
('user_first_name_kana', models.CharField(max_length=32, null=True, verbose_name='ๅ๏ผใใช๏ผ')),
('user_gender', models.IntegerField(choices=[(1, '็ท'), (2, 'ๅฅณ')], default=0, verbose_name='ๆงๅฅ')),
('user_verification', models.ImageField(null=True, upload_to='', verbose_name='ๆฌไบบ็ขบ่ช')),
('user_account_id', models.CharField(blank=True, max_length=255, null=True, verbose_name='ๆฑบๆธใซไฝฟใID')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', match.models.CustomUserManager()),
],
),
migrations.CreateModel(
name='Char',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('char', models.CharField(max_length=255, verbose_name='ใญใฃใฉ')),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(verbose_name='ใกใใปใผใธ')),
('created_at', models.DateTimeField(blank=True, default=django.utils.timezone.now, verbose_name='้ไฟกๆฅๆ้')),
('room', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='room', to=settings.AUTH_USER_MODEL, verbose_name='ใกใใปใผใธใฎๆใกไธป')),
('to_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='to_user', to=settings.AUTH_USER_MODEL, verbose_name='ใกใใปใผใธใฎๅฎๅ
')),
],
),
migrations.CreateModel(
name='BuyingHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', models.CharField(max_length=200, verbose_name='ใฟใคใใซ')),
('created_at', models.DateTimeField(default=django.utils.timezone.now, verbose_name='ๆฅไป')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='course', to=settings.AUTH_USER_MODEL, verbose_name='ใกใใปใผใธใฎๅฎๅ
')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='buyer', to=settings.AUTH_USER_MODEL, verbose_name='่ณผๅ
ฅ่
')),
],
),
migrations.AddField(
model_name='user',
name='user_char',
field=models.ManyToManyField(blank=True, to='match.Char', verbose_name='ไฝฟ็จใญใฃใฉ'),
),
migrations.AddField(
model_name='user',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
|
[
"game4967@gmail.com"
] |
game4967@gmail.com
|
797f4c73b0278599dcfedb7af58d95b38a5afc87
|
b0cdbad299f6174bfdb0fba173dbcf3889b82209
|
/Modules/os/dirwalk4.py
|
eaaefb1ed5ab44c8053319d9393155e7f7a65218
|
[] |
no_license
|
deesaw/PythonD-06
|
a33e676f1e0cfc13b4ea645c8b60547b198239ac
|
3c6f065d7be2e3e10cafb6cef79d6cae9d55a7fa
|
refs/heads/master
| 2023-03-18T08:24:42.030935
| 2021-03-02T14:15:09
| 2021-03-02T14:15:09
| 343,797,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
import os
extns=["mp3","jpg","jpeg","mpg","mpeg"]
size=1 * 1024 * 1024
for (dirname, subdir, files) in os.walk('c:\\'):
for myfile in files:
filename=os.path.join(dirname,myfile)
extn = os.path.splitext(filename)[-1]
if extn in extns:
if os.path.getfilesize(filename) > size:
print(filename)
|
[
"69420960+deesaw@users.noreply.github.com"
] |
69420960+deesaw@users.noreply.github.com
|
3dd874f730373ef6ad93cf7b4ac82b15d6f296e6
|
68ebbec2ceb269add3f71fa0189c6547765dae8b
|
/orders/models.py
|
597159ce256c919c2d930027717cf0500096b406
|
[
"MIT"
] |
permissive
|
OjureFred/BlazeMarketplace
|
17ca512dbd80319db6342fa2d9e90dc41f3f67f3
|
e207d538e25dfa8866b01e74886d8e91a54c53fe
|
refs/heads/main
| 2023-04-24T18:57:05.834297
| 2021-05-04T06:52:36
| 2021-05-04T06:52:36
| 319,242,596
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,559
|
py
|
from django.db import models
from django.db.models.signals import pre_save, post_save
import math
from addresses.models import Address
from carts.models import Cart
from billing.models import BillingProfile
from blazemarketplace.utils import unique_order_id_generator
# Create your models here.
ORDER_STATUS_CHOICES = (
('created', 'Created'),
('paid', 'Paid'),
('shipped', 'Shipped'),
('refunded', 'Refunded'),
)
class OrderManager(models.Manager):
def new_or_get(self, billing_profile, cart_obj):
created = False
qs = self.get_queryset().filter(billing_profile=billing_profile, cart=cart_obj, active=True, status='created')
if qs.count() == 1:
obj = qs.first()
else:
obj = self.model.objects.create(billing_profile=billing_profile, cart=cart_obj)
created=True
return obj, created
class Order(models.Model):
order_id = models.CharField(max_length=120, blank=True)
billing_profile = models.ForeignKey(BillingProfile, null=True, blank=True, on_delete=models.CASCADE)
shipping_address = models.ForeignKey(Address, related_name='shipping_address', null=True, blank=True, on_delete=models.CASCADE)
billing_address = models.ForeignKey(Address, related_name='billing_address',null=True, blank=True, on_delete=models.CASCADE)
cart = models.ForeignKey(Cart, on_delete=models.CASCADE)
status = models.CharField(max_length=120, default='created', choices=ORDER_STATUS_CHOICES)
shipping_total = models.DecimalField(default=500.00, max_digits=100, decimal_places=2)
total = models.DecimalField(default=0.00, max_digits=100, decimal_places=2)
active = models.BooleanField(default=True)
def __str__(self):
return self.order_id
objects = OrderManager()
def update_total(self):
cart_total = self.cart.total
shipping_total = self.shipping_total
new_total = math.fsum([cart_total, shipping_total])
formatted_total = format(new_total, '.2f')
self.total = formatted_total
self.save()
return formatted_total
def check_done(self):
billing_profile = self.billing_profile
shipping_address = self.shipping_address
billing_address = self.billing_address
total = self.total
if billing_profile and shipping_address and billing_address and total > 0:
return True
return False
def mark_paid(self):
if self.check_done():
self.status = 'paid'
self.save()
return self.status
def pre_save_create_order_id(sender, instance, *args, **kwargs):
if not instance.order_id:
instance.order_id = unique_order_id_generator(instance)
qs = Order.objects.filter(cart=instance.cart).exclude(billing_profile=instance.billing_profile)
if qs.exists():
qs.update(active=False)
pre_save.connect(pre_save_create_order_id, sender=Order)
def post_save_cart_total(sender, instance, created, *args, **kwargs):
if not created:
cart_obj = instance
cart_total = cart_obj.total
cart_id = cart_obj.id
qs = Order.objects.filter(cart__id=cart_id)
if qs.count() == 1:
order_obj = qs.first()
print('running update')
order_obj.update_total()
post_save.connect(post_save_cart_total, sender=Cart)
def post_save_order(sender, instance, created, *args, **kwargs):
if created:
instance.update_total
post_save.connect(post_save_order, sender=Order)
|
[
"fredojure@hotmail.com"
] |
fredojure@hotmail.com
|
fb1d6c7c79955d6e7e9956c82783c28acb3d3057
|
f513c794fd95cb72ee776029ece38a08c4b4da0b
|
/custom/_legacy/pathfinder/urls.py
|
d2352d244f646d232b5dae3a977e19a0c1485820
|
[] |
no_license
|
bglar/commcare-hq
|
a92f034a0c2faf787da8321b4d79e55f098bd89f
|
972129fc26864c08c7bef07874bd2a7218550bff
|
refs/heads/master
| 2021-05-28T20:44:12.876151
| 2015-01-16T16:23:52
| 2015-01-16T16:23:52
| 29,391,363
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
from django.conf.urls.defaults import *
urlpatterns = patterns('pathfinder.views',
url(r'select/$', 'selector'),
url('select/ward', 'ward_selector'),
url('select/provider', 'provider_selector'),
url('select/hbc', 'hbc_selector'),
url('hbc/', 'home_based_care'),
url('ward/', 'ward_summary'),
url('provider/', 'provider_summary'),
)
|
[
"cternus@dimagi.com"
] |
cternus@dimagi.com
|
a9b90beacd2dc62d8a0ddd34deb42dedee180677
|
9591cbab774598c0d3b5a382f03202d7d86f9bb9
|
/xmlfilter.py
|
a5f2e8cb2f75adb04f34024961e788824a8b3c1f
|
[] |
no_license
|
zgjoget9/Scripts_for_spotbugs
|
e3094fce87e9bcc87f59ef96d400d6ad987e59b3
|
fb4f2721806723037a96e6dd2330c929aa8e841c
|
refs/heads/main
| 2023-03-25T23:23:11.324142
| 2021-03-21T06:57:40
| 2021-03-21T06:57:40
| 349,895,698
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,165
|
py
|
from xml.dom.minidom import parse
import xml.dom.minidom
import os
import sys
# using minidom
typelist = ['BC_UNCONFIRMED_CAST', 'DLS_DEAD_LOCAL_STORE', 'RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE', 'UCF_USELESS_CONTROL_FLOW', 'UWF_FIELD_NOT_INITIALIZED_IN_CONSTRUCTOR']
BC_UNCONFIRMED_CAST = []
DLS_DEAD_LOCAL_STORE = []
RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE = []
UCF_USELESS_CONTROL_FLOW = []
UWF_FIELD_NOT_INITIALIZED_IN_CONSTRUCTOR = []
filedir = "/.../allxml/"
files = os.listdir(filedir)
f1 = open("/.../BC_UNCONFIRMED_CAST.txt","w+")
f2 = open("/.../DLS_DEAD_LOCAL_STORE.txt","w+")
f3 = open("/.../RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE.txt","w+")
f4 = open("/.../UCF_USELESS_CONTROL_FLOW.txt","w+")
f5 = open("/.../UWF_FIELD_NOT_INITIALIZED_IN_CONSTRUCTOR.txt","w+")
for file in files:
if not file.endswith('.xml'):
continue
try:
DOMTree = xml.dom.minidom.parse(filedir+file)
except:
continue
collection = DOMTree.documentElement
jar = collection.getElementsByTagName("Jar")
BugInstance = collection.getElementsByTagName("BugInstance")
for buginstance in BugInstance:
TYPE = buginstance.getAttribute("type")
if TYPE not in typelist:
continue
entity = []
signatures = []
clasS = buginstance.getElementsByTagName("Class")
for claSS in clasS:
sourceline = claSS.getElementsByTagName("SourceLine")[0]
entity.append("file: "+sourceline.getAttribute("sourcepath"))
methods = buginstance.getElementsByTagName("Method")
for method in methods:
sourceline = method.getElementsByTagName("SourceLine")[0]
entity.append("method name: " + method.getAttribute("name"))
entity.append("method signature: " + method.getAttribute("signature"))
entity.append("method begin: "+sourceline.getAttribute("start"))
entity.append("method end: " + sourceline.getAttribute("end"))
sourcelines = buginstance.childNodes
i = len(sourcelines)-1
while(i>=0):
if sourcelines[i].localName=="SourceLine":
entity.append("bug begin: " + sourcelines[i].getAttribute("start"))
entity.append("bug end: " + sourcelines[i].getAttribute("end"))
i-=1
if TYPE == 'BC_UNCONFIRMED_CAST':
if entity not in BC_UNCONFIRMED_CAST:
BC_UNCONFIRMED_CAST.append(entity)
f1.writelines("jar: "+jar[0].childNodes[0].data+'\n')
for i in entity:
f1.writelines(i+'\n')
f1.writelines('\n')
elif TYPE == 'DLS_DEAD_LOCAL_STORE':
if entity not in DLS_DEAD_LOCAL_STORE:
DLS_DEAD_LOCAL_STORE.append(entity)
f2.writelines("jar: " + jar[0].childNodes[0].data+'\n')
for i in entity:
f2.writelines(i+'\n')
f2.writelines('\n')
elif TYPE == 'RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE':
if entity not in RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE:
RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE.append(entity)
f3.writelines("jar: " + jar[0].childNodes[0].data+'\n')
for i in entity:
f3.writelines(i+'\n')
f3.writelines('\n')
elif TYPE == 'UCF_USELESS_CONTROL_FLOW':
if entity not in UCF_USELESS_CONTROL_FLOW:
UCF_USELESS_CONTROL_FLOW.append(entity)
f4.writelines("jar: " + jar[0].childNodes[0].data+'\n')
for i in entity:
f4.writelines(i+'\n')
f4.writelines('\n')
elif TYPE == 'UWF_FIELD_NOT_INITIALIZED_IN_CONSTRUCTOR':
if entity not in UWF_FIELD_NOT_INITIALIZED_IN_CONSTRUCTOR:
UWF_FIELD_NOT_INITIALIZED_IN_CONSTRUCTOR.append(entity)
f5.writelines("jar: " + jar[0].childNodes[0].data+'\n')
for i in entity:
f5.writelines(i+'\n')
f5.writelines('\n')
print('over')
f1.close()
f2.close()
f3.close()
f4.close()
f5.close()
|
[
"noreply@github.com"
] |
zgjoget9.noreply@github.com
|
0a40a1d559c028627fb025ed6057c40c0d9f9f1d
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/recoveryservices/v20200202/private_endpoint_connection.py
|
f1f8b04627df798b43442d52d427fd896c41693a
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 6,487
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['PrivateEndpointConnection']
class PrivateEndpointConnection(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
e_tag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointConnectionArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Private Endpoint Connection Response Properties
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] e_tag: Optional ETag.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] private_endpoint_connection_name: The name of the private endpoint connection.
:param pulumi.Input[pulumi.InputType['PrivateEndpointConnectionArgs']] properties: PrivateEndpointConnectionResource properties
:param pulumi.Input[str] resource_group_name: The name of the resource group where the recovery services vault is present.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] vault_name: The name of the recovery services vault.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['e_tag'] = e_tag
__props__['location'] = location
if private_endpoint_connection_name is None:
raise TypeError("Missing required property 'private_endpoint_connection_name'")
__props__['private_endpoint_connection_name'] = private_endpoint_connection_name
__props__['properties'] = properties
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
if vault_name is None:
raise TypeError("Missing required property 'vault_name'")
__props__['vault_name'] = vault_name
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:recoveryservices/latest:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-nextgen:recoveryservices/v20200202:PrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
"""
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> pulumi.Output[Optional[str]]:
"""
Optional ETag.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name associated with the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.PrivateEndpointConnectionResponse']:
"""
PrivateEndpointConnectionResource properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/...
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
92211e6930886bccd94286601778955ce5b0fc35
|
83fd94bddf0039a692518284249b4a0411738086
|
/prob_ioi.py
|
0c497411a3c5ee3732b62c36a9dd2397d43ea0b1
|
[] |
no_license
|
Firasnafti67/problemSolving
|
3ad53e6c8875f1b054fce0e5647d2e6cca0d3cb8
|
2ca6e2ba72598d68c31f63115b61ee1509e43b3b
|
refs/heads/main
| 2023-02-12T17:43:47.938889
| 2021-01-12T21:11:29
| 2021-01-12T21:11:29
| 329,112,837
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
nbLivres, nbJours = map(int,input().split())
d=dict()
for i in range(nbLivres):
d[i]=0
i=1
l = []
while i<=nbJours:
nbClients = int(input())
while nbClients:
ind,dur = map(int,input().split())
if i>= d[ind]:
d[ind]=i+dur
l.append(1)
else:
l.append(0)
nbClients = nbClients-1
i = i+1
for i in l:
print(i)
|
[
"noreply@github.com"
] |
Firasnafti67.noreply@github.com
|
bbcf290fc9f7863e878a76ee3541e662eba89da2
|
f7a9c41ebdbab550ad07e9382a6df3d88d9f317e
|
/StudyExample/1.DataType/int_convert.py
|
f4cfeda4188c8b976eb9f29c8a8caddf2619c001
|
[] |
no_license
|
shinsangeun/PythonStudy
|
9f32abf08bf5a936db147722ea85fcb97cfe26f0
|
2bf2dfaf5b838b5456b450acbaf31fd9b256ee12
|
refs/heads/master
| 2023-02-08T12:34:57.556148
| 2020-12-23T16:39:36
| 2020-12-23T16:39:36
| 141,964,854
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 185
|
py
|
string_a = input("์
๋ ฅA>")
string_b = input("์
๋ ฅB>")
int_a = int(string_a)
int_b = int(string_b)
print("๋ฌธ์์ด ์๋ฃ:", string_a+string_b)
print("์ซ์ ์๋ฃ", int_a+int_b)
|
[
"s0813se@naver.com"
] |
s0813se@naver.com
|
81b6d1556691d9c085b59e7856c9a9bc25214281
|
5c5f0b22f48d00d06ac1a2455341de513d35bd5c
|
/TL through optimal extension/Test.py
|
d93a934dd1fed2914a9c170cc897c8064456d5e6
|
[] |
no_license
|
dvaccam/RL-TL-Code
|
93f83961e2404b8e17ec44252955980c5f4ca8ab
|
95243b6394f0c8416bddbbc4dabc4a393a826611
|
refs/heads/master
| 2021-01-23T09:40:01.093061
| 2017-11-22T18:34:01
| 2017-11-22T18:34:01
| 102,589,651
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,935
|
py
|
import gym
import numpy as np
import utils
import scipy.optimize as opt
import itertools as itt
import time
def policy(state):
if state[0] < -0.9 or state[1] > 0 or (abs(state[1]) < 0.001 and state[0] < -0.4):
action = 2
else:
action = 0
return action
gamma = 0.9
goal_pos = -0.35
# Creation of source task
source_task = gym.make('MountainCar-v0', discretize=(1000, 1000), aceleration=0.001, seed=1000, goal_position=goal_pos)
# Data structures for storing samples
n_source = 5#1000
source_samples = [] #93, 115
# Dictionary to check if an initial position is observed during sampling
init_positions = source_task.env.bins[0][(-0.6 <= source_task.env.bins[0]) & (source_task.env.bins[0] <= -0.4)]
seen_init_positions_source = {s0:False for s0 in init_positions}
#Sampling from source task
print("Collecting ", n_source, " samples from source task...")
for i in range(n_source):
episode = []
init_state = source_task.reset()
seen_init_positions_source[init_state[0]] = True
first_state = init_state
for t in range(500):
source_task.render()
action = policy(first_state)
next_state, reward, done, info = source_task.step(action)
episode.append((first_state, action, next_state, reward))
first_state = next_state
if done:
break
source_samples.append(episode)
print("Done sampling from source task")
# Check there was at least one episode for each initial position
not_seen_states = 0
for k,v in seen_init_positions_source.items():
if not v:
not_seen_states += 1#print(k)
print("Not seen positions: ", not_seen_states, "/", len(seen_init_positions_source))
# Creation of target task
target_acel = 0.0025
target_task = gym.make('MountainCar-v0', discretize=(1000, 1000), aceleration=target_acel, seed=1, goal_position=goal_pos)
# Data structures for storing samples
n_target = 1
target_samples = []
# Dictionary to check if an initial position is observed during sampling
seen_init_positions_target = {s0:False for s0 in init_positions}
# Sampling from target task
print("Collecting ", n_target, " samples from target task...")
for i in range(n_target):
episode = []
init_state = target_task.reset()
seen_init_positions_target[init_state[0]] = True
first_state = init_state
for t in range(500):
# target_task.render()
if first_state[0] < -0.9 or first_state[1] > 0 or (abs(first_state[1]) < 0.001 and first_state[0] < -0.5):
action = 2
else:
action = 0
next_state, reward, done, info = target_task.step(action)
episode.append((first_state, action, next_state, reward))
first_state = next_state
if done:
break
target_samples.append(episode)
print("Done sampling from target task")
# Check there was at least one episode for each initial state
not_seen_states = 0
for k,v in seen_init_positions_target.items():
if not v:
not_seen_states += 1#print(k)
print("Not seen states: ", not_seen_states, "/", len(seen_init_positions_target))
# Calculate real value function on target task
V = {}
print("Calculating real value function for target task...")
for s in init_positions:
V[s] = 0
target_task = gym.make('MountainCar-v0', discretize=(1000, 1000), aceleration=target_acel, initial_position=s, goal_position=goal_pos)
init_state = target_task.reset()
first_state = init_state
for t in range(500):
# target_task.render()
if first_state[0] < -0.9 or first_state[1] > 0 or (abs(first_state[1]) < 0.001 and first_state[0] < -0.5):
action = 2
else:
action = 0
next_state, reward, done, info = target_task.step(action)
V[s] += np.power(gamma, t)*reward
first_state = next_state
if done:
break
print("Done calculating real value function for target task")
# Calculate real expected reward for policy
Jt = np.array(list(V.values())).mean()
print("Expected reward for the policy:", Jt)
print("Estimated expected reward:", utils.estimate_J(target_samples, gamma))
def B(D_source, D_target, gamma, ext, target_task, Jt):# Ext is a mutidimensional array with the new states
D_c = list(D_target)
diff = 0
for ep in range(len(ext)):
ep_c = []
for d in range(len(ext[ep])):
init_pos, init_vel = D_source[ep][d][0]
action = D_source[ep][d][1]
vel = init_vel + (action - 1) * target_acel + np.cos(3 * init_pos) * (-0.0025)
vel = target_task.env.bins[1][np.array(abs(target_task.env.bins[1] - vel)).argmin()]
pos = init_pos + vel
pos = target_task.env.bins[0][np.array(abs(target_task.env.bins[0] - pos)).argmin()]
if (pos == target_task.env.min_position and vel < 0): vel = 0
dis = np.sqrt(((np.array([pos, vel]) - ext[ep][d])**2).sum())
ep_c.append((D_source[ep][d][0], D_source[ep][d][1], ext[ep][d], pow(2, dis)*-1))
diff += (D_source[ep][d][3] - ep_c[d][3])
D_c.append(ep_c)
diff /= len(D_source)
J_cap = utils.estimate_J(D_c, gamma)
eps = abs(J_cap - Jt)
return eps + diff
def par_f(D_source, D_target, gamma, x, target_task, Jt):
ext = []
offset = 0
for e in range(len(D_source)):
ep = []
for d in range(len(D_source[e])):
ep.append(np.array([x[offset + 2*d], x[offset + 2*d + 1]]))
offset += 2*len(D_source[e])
ext.append(ep)
return B(D_source, D_target, gamma, ext, target_task, Jt)
def B_1(D_source, D_target, gamma, ext, sample_mask, target_task, Jt):# Ext is a list of lists as D_source, but with the state as a numpy array instead of a tuple
#rounded_mask = np.array(np.around(sample_mask)).astype(dtype=bool)
D_c = list(D_target)
diff = 0
for ep in range(len(ext)):
#if rounded_mask[ep]:
ep_c = []
for d in range(len(ext[ep])):
init_pos, init_vel = D_source[ep][d][0]
action = D_source[ep][d][1]
vel = init_vel + (action - 1) * target_acel + np.cos(3 * init_pos) * (-0.0025)
vel = target_task.env.bins[1][np.array(abs(target_task.env.bins[1] - vel)).argmin()]
pos = init_pos + vel
pos = target_task.env.bins[0][np.array(abs(target_task.env.bins[0] - pos)).argmin()]
if (pos == target_task.env.min_position and vel < 0): vel = 0
dis = np.sqrt(((np.array([pos, vel]) - ext[ep][d])**2).sum())
ep_c.append((D_source[ep][d][0], D_source[ep][d][1], ext[ep][d], pow(2, dis)*-1))
diff += sample_mask[ep]*(D_source[ep][d][3] - ep_c[d][3])
D_c.append(ep_c)
diff /= sample_mask.sum() if sample_mask.sum() != 0 else 1
J_cap = utils.estimate_J(D_c, gamma, weights=np.hstack((np.ones(len(D_target)), sample_mask)))
eps = abs(J_cap - Jt)
return eps + diff
def par_f1(D_source, D_target, gamma, x, target_task, Jt):
ext = []
offset = 0
for e in range(len(D_source)):
ep = []
for d in range(len(D_source[e])):
ep.append(np.array([x[offset + 2 * d], x[offset + 2 * d + 1]]))
offset += 2 * len(D_source[e])
ext.append(ep)
sample_mask = x[-len(D_source):]
return B_1(D_source, D_target, gamma, ext, sample_mask, target_task, Jt)
np.random.seed(200)
st = time.time()
ex = []
for e in source_samples:
for d in e:
ex.extend([d[2][0], d[2][1]])
ex = np.array(ex)
dim = ex.shape[0]
a = par_f(source_samples, target_samples, gamma, ex, target_task, Jt)
print("Bound by using source samples as extension:", a)
print("Estimated expected reward with direct tranfer:", utils.estimate_J(target_samples+source_samples, gamma))
print("Error by direct transfer:", abs(utils.estimate_J(target_samples+source_samples, gamma) - Jt))
f = lambda x:par_f(D_source=source_samples, D_target=target_samples, gamma=gamma, Jt=Jt, target_task=target_task, x=x)
bounds = []
for _ in range(int(dim/2)):
bounds.extend([(target_task.env.min_position, target_task.env.max_position), (-target_task.env.max_speed, target_task.env.max_speed)])
s0 = np.random.uniform(low=target_task.env.min_position, high=target_task.env.max_position, size=int(dim/2))
v0 = np.random.uniform(low=-target_task.env.max_speed, high=target_task.env.max_speed, size=int(dim/2))
x0 = np.empty(dim, dtype=s0.dtype)
x0[0::2] = s0
x0[1::2] = v0
best = opt.minimize(f, x0, bounds=bounds, options={'maxfun':min(10000*len(source_samples), 1e5)})
print("Def:", best.fun, best.x, best.success)
x_opt_all = best.x
print("Full extenstion:", time.time()-st)
print("-------")
st = time.time()
minim = 100
min_idx = None
for n in range(len(source_samples)):
for idx in itt.combinations(range(len(source_samples)), n+1):
sub = np.array(source_samples)[np.array(idx)].tolist()
ex = []
for e in sub:
for d in e:
ex.extend([d[2][0], d[2][1]])
ex = np.array(ex)
dim = ex.shape[0]
a = par_f(sub, target_samples, gamma, ex, target_task, Jt)
print(idx)
print("Bound by using source samples as extension:", a)
print("Estimated expected reward with direct tranfer:", utils.estimate_J(target_samples+sub, gamma))
print("Error by direct transfer:", abs(utils.estimate_J(target_samples+sub, gamma) - Jt))
f = lambda x:par_f(D_source=sub, D_target=target_samples, gamma=gamma, Jt=Jt, target_task=target_task, x=x)
bounds = []
for _ in range(int(dim/2)):
bounds.extend([(target_task.env.min_position, target_task.env.max_position), (-target_task.env.max_speed, target_task.env.max_speed)])
s0 = np.random.uniform(low=target_task.env.min_position, high=target_task.env.max_position, size=int(dim/2))
v0 = np.random.uniform(low=-target_task.env.max_speed, high=target_task.env.max_speed, size=int(dim/2))
x0 = np.empty(dim, dtype=s0.dtype)
x0[0::2] = s0
x0[1::2] = v0
best = opt.minimize(f, x0, bounds=bounds, options={'maxfun':min(10000*len(sub), 1e5)})
if best.fun < minim:
minim = best.fun
min_idx = idx
print("Def:", best.fun, best.success)
print("-------")
print("Best one:",minim, min_idx)
print("Combinatorial:", time.time()-st)
print("-------")
st = time.time()
'''ex = []
for e in source_samples:
for d in e:
ex.extend([d[2][0], d[2][1]])
ex.extend([0,1])
ex = np.array(ex)
dim = ex.shape[0]
a = par_f1(source_samples, target_samples, gamma, ex, target_task, Jt)
print("Bound by using some source samples as extension:", a)
print("Error by direct transfer:", abs(utils.estimate_J(target_samples+source_samples, gamma, np.array([1,0,1])) - Jt))'''
dim = ex.shape[0] + len(source_samples)
f = lambda x:par_f1(D_source=source_samples, D_target=target_samples, gamma=gamma, Jt=Jt, target_task=target_task, x=x)
bounds = []
for _ in range(int((dim-len(source_samples))/2)):
bounds.extend([(target_task.env.min_position, target_task.env.max_position), (-target_task.env.max_speed, target_task.env.max_speed)])
bounds.extend([(0,1) for _ in range(len(source_samples))])
reps = 1
for i in range(reps):
s0 = np.random.uniform(low=target_task.env.min_position, high=target_task.env.max_position, size=int((dim-len(source_samples))/2))
v0 = np.random.uniform(low=-target_task.env.max_speed, high=target_task.env.max_speed, size=int((dim-len(source_samples))/2))
x0 = np.empty(dim-len(source_samples), dtype=s0.dtype)
x0[0::2] = s0
x0[1::2] = v0
w0 = np.random.uniform(low=0, high=1, size=len(source_samples))
#x0 = x_opt_all
w0 = np.ones(len(source_samples))
best = opt.minimize(f, np.hstack((x0, w0)), bounds=bounds, options={'maxfun':min(10000*len(source_samples), 1e5)})
print("Def:", best.fun, best.x, best.success)
print("Full optimization:", time.time()-st)
|
[
"daniel.vacca@hotmail.com"
] |
daniel.vacca@hotmail.com
|
c54f7cc8f4d5a5509d29f0f3b3b2b5d1611e711d
|
a72f33626c6a773d3a2e2a434f0b3f56e84359bd
|
/preprocessing/tape_detection.py
|
97d615d8b3b9c95cf361df910f2aa4da7c4ae7c7
|
[
"MIT"
] |
permissive
|
isse-augsburg/adaptive-spreading
|
fceb3d5a19d5eb473a10ea890db31c32fbf9a1f8
|
3e423f888ff07257111fc95c3276024c4c44036d
|
refs/heads/master
| 2023-07-20T04:41:27.458970
| 2020-05-04T11:08:23
| 2020-05-04T11:08:23
| 260,015,568
| 0
| 0
|
MIT
| 2023-07-06T21:48:52
| 2020-04-29T18:54:03
|
Python
|
UTF-8
|
Python
| false
| false
| 663
|
py
|
import torch
def get_tape_edges(profiles, axis=1):
nonzeros = (profiles > torch.unsqueeze(torch.mean(profiles, dim=1), -1))
tape_counter = nonzeros.cumsum(axis)
tape_counter[~nonzeros] = 0
temp_tape_profile, temp_tape_start_idx = (tape_counter == 1).max(axis)
temp_tape_start_idx[temp_tape_profile == 0] = 0
temp_tape_profile, temp_tape_end_idx = tape_counter.max(axis)
temp_tape_end_idx[temp_tape_profile == 0] = 0
return temp_tape_start_idx, temp_tape_end_idx
def get_tape_width(profiles, axis=1):
temp_tape_start_idx, temp_tape_end_idx = get_tape_edges(profiles, axis)
return temp_tape_end_idx - temp_tape_start_idx
|
[
"kruetzmann@isse.de"
] |
kruetzmann@isse.de
|
003d77730c8e89b19c510db41dd9d0781b0d847e
|
20c20938e201a0834ccf8b5f2eb5d570d407ad15
|
/abc179/abc179_a/18172147.py
|
fb5d8b2b9d6a805bfe0b9e458bc5ec4ae341844c
|
[] |
no_license
|
kouhei-k/atcoder_submissions
|
8e1a1fb30c38e0d443b585a27c6d134bf1af610a
|
584b4fd842ccfabb16200998fe6652f018edbfc5
|
refs/heads/master
| 2021-07-02T21:20:05.379886
| 2021-03-01T12:52:26
| 2021-03-01T12:52:26
| 227,364,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 62
|
py
|
x=input()
if x[-1]=="s":
print(x+"es")
else:
print(x+"s")
|
[
"kouhei.k.0116@gmail.com"
] |
kouhei.k.0116@gmail.com
|
67fd75d7c3672c13f3dd4033cb6fb5e6b8d53d71
|
2b120cadf79de28652eaab9ba468b78e2eeddeaf
|
/main.py
|
9680d298af699b2319ad134529d562b32527ac76
|
[] |
no_license
|
0xfirefist/face-liveness-detection
|
8ff19d1fd09257598ff1e5be364fa4fcc45e9851
|
6d4ec5e9ba6106e5f238c04fec0cedb0ed43b6c0
|
refs/heads/master
| 2023-01-22T15:39:47.444135
| 2020-12-03T10:47:17
| 2020-12-03T10:47:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,757
|
py
|
import cv2
from liveness_model.model import LivenessNet
from keras.models import load_model
import face_recognition
from keras.preprocessing.image import img_to_array
import numpy as np
def extract_faces(image):
face_locations = face_recognition.face_locations(image)
face_images = []
locs = []
for face_location in face_locations:
# Print the location of each face in this image
top, right, bottom, left = face_location
locs.append([top, right, bottom, left])
# You can access the actual face itself like this:
face_image = image[top:bottom, left:right]
face_images.append(face_image)
return face_images, locs
# loading models
model = load_model("liveness_model/model.h5")
cap = cv2.VideoCapture(0)
checker = 0
while True:
ret, frame = cap.read()
if checker < 50:
checker += 1
continue
a, locs = extract_faces(frame)
if len(a) < 1:
continue
face_image = a[0]
face_image = cv2.resize(face_image, (64,64))
face = img_to_array(face_image)
face = np.expand_dims(face, axis=0)
preds = model.predict(face)
predicted_class_indices=np.argmax(preds,axis=1)
if(predicted_class_indices[0] == 1):
cv2.putText(frame, 'REAL', (frame.shape[1]//2, frame.shape[0]//2), cv2.FONT_HERSHEY_DUPLEX, 1.0, (0, 255, 0), 1)
else:
cv2.putText(frame, 'FAKE', (frame.shape[1]//2, frame.shape[0]//2), cv2.FONT_HERSHEY_DUPLEX, 1.0, (0, 0, 255), 1)
top,right,bottom,left = locs[0]
cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), 2)
cv2.imshow('testing', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
cap.release()
cv2.destroyAllWindows()
|
[
"kalradev@outlook.com"
] |
kalradev@outlook.com
|
4845e4d8830fd0273b3515d8fcb0b0f8a2892e8a
|
6ea96e96810f1db6f0b41511681d8481726c8d21
|
/setup.py
|
cbe1a3d21a445d557c6933e9836b3a1a9ce71f4a
|
[] |
no_license
|
mmreis/pdTransformers
|
34bc846e73bb9d148f543c5d3e977f8848036639
|
ce0336f273d9a1c4e7dc07edc49df054a057fb69
|
refs/heads/master
| 2021-04-29T21:41:05.135098
| 2018-05-28T12:11:32
| 2018-05-28T12:11:32
| 121,621,122
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
from setuptools import setup
setup(name='pdTransformers',
version='0.1.0',
description='TransformerMixin for Pipeline building',
url='https://github.com/mmreis/pdTransformers',
author='Marisa Reis',
license='MIT',
packages=['pdTransformers'],
install_requires=[
'pandas',
'numpy',
'scikit-learn',
],
zip_safe=False)
|
[
"marisa.m.reis@inesctec.pt"
] |
marisa.m.reis@inesctec.pt
|
4493d5fe0c58bab3c391c494e22e17440866d8fa
|
1fc4efd558eef3caa760d1d4df7142b60bff10b9
|
/baoming/webapp/models_report.py
|
f0b3f6070b35f0d7fa9ccae3257790dba5a4ac26
|
[
"Apache-2.0"
] |
permissive
|
hanxiaoshun/RegistrationSystem
|
e041f94a2661913db61b3911292a5d53b29f4e47
|
2f7310508fc1725e96fe941b1062ce7f26f265a4
|
refs/heads/master
| 2022-12-15T12:10:42.631271
| 2020-07-06T00:46:57
| 2020-07-06T00:46:57
| 244,163,310
| 0
| 0
|
Apache-2.0
| 2022-11-22T04:00:40
| 2020-03-01T14:25:11
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,061
|
py
|
import django.utils.timezone as timezone
from django.db import models
import uuid
class ReportBase(models.Model):
"""
ๅบ็กไฟกๆฏ็ฑป
"""
explain = models.TextField('่ฏดๆ',
default='',
max_length=200,
blank=True,
null=True)
user_operator = models.ForeignKey('RegisterUserInfo', to_field='id',
related_name="%(app_label)s_%(class)s_Operator",
verbose_name='ๆไฝ็จๆท',
blank=True,
null=True,
on_delete=models.SET_NULL,
help_text='ๆไฝ็จๆท่ฎฐๅฝ')
create_time = models.DateTimeField('็ๆๆถ้ด', default=timezone.now)
# ไฝฟ็จModel.save()ๆฅๆดๆฐๆไผๆดๆฐๆณจๆ
modify_time = models.DateTimeField('ไฟฎๆนๆถ้ด', auto_now=True)
objects = models.Manager()
|
[
"18301513217@sina.cn"
] |
18301513217@sina.cn
|
580c34eb0100a79df9dbbdd88edfc188993e36ab
|
5462142b5e72cb39bea5b802dd46f55357c4ea84
|
/homework_zero_class/lesson9/ๅฝๆฐ็ไฝ็จๅ-times_3.py
|
6ddd3b709e42f3734bdc4783ba910b1682547cca
|
[] |
no_license
|
qqmadeinchina/myhomeocde
|
a0996ba195020da9af32613d6d2822b049e515a0
|
291a30fac236feb75b47610c4d554392d7b30139
|
refs/heads/master
| 2023-03-23T05:28:53.076041
| 2020-08-24T08:39:00
| 2020-08-24T08:39:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,604
|
py
|
#!D:\Program Files\Anaconda3
# -*- coding: utf-8 -*-
# @Time : 2020/7/23 00:35
# @Author : ่่ๅ
# @File : ๅฝๆฐ็ไฝ็จๅ-times_3.py
# @Software: PyCharm Community Edition
# ไฝ็จๅ(scope)
# ไฝ็จๅๆ็ๆฏๅ้็ๆ็ๅบๅ
# ๅจPythonๅฝไธญไธๅ
ฑๆไธค็งไฝ็จๅ:ๅ
จๅฑไฝ็จๅ ๅ ๅฝๆฐไฝ็จๅ
def fn():
# aๅฎไนๅจไบๅฝๆฐๅ
้จ๏ผๆๆๅฎ็ไฝ็จๅๅฐฑๆฏๅฝๆฐๅ
้จ๏ผๅฝๆฐๅค้จ่ฎฟ้ฎไธๅฐ
a=10
print("ๅฝๆฐๅ
้จa=",a)
fn() # ๅฝๆฐๅ
้จa= 10
# print("ๅฝๆฐๅค้จa=", a) # NameError: name 'a' is not defined
b = 20
def fn1():
# aๅฎไนๅจไบๅฝๆฐๅ
้จ๏ผๆๆๅฎ็ไฝ็จๅๅฐฑๆฏๅฝๆฐๅ
้จ๏ผๅฝๆฐๅค้จ่ฎฟ้ฎไธๅฐ
a = 10
print('ๅฝๆฐๅ
้จ:','a =',a)
print('ๅฝๆฐๅ
้จ:','b =',b)
fn1()
print('ๅฝๆฐๅค้จ:', 'b =', b)
# print('ๅฝๆฐๅค้จ:', 'a =', a) # NameError: name 'a' is not defined
# ๅฝๆฐๅ
้จ: a = 10
# ๅฝๆฐๅ
้จ: b = 20
# ๅฝๆฐๅค้จ: b = 20
def fn2():
a = 30
def fn3():
a = 80
print("fn3ไธญ๏ผa =",a)
print("fn3ไธญ๏ผb =",b)
fn3()
print("fn2ไธญ๏ผa =", a)
fn2()
# fn3ไธญ๏ผa = 80
# fn3ไธญ๏ผb = 20
# fn2ไธญ๏ผa = 30
print("ๅฝๆฐ่ฐ็จๅ b =",b)
def fn4():
# ๅฆๆๅธๆๅจๅฝๆฐๅ
้จไฟฎๆนๅ
จๅฑๅ้๏ผๅ้่ฆไฝฟ็จไธไธชglobalๅ
ณ้ฎๅญ๏ผๆฅๅฃฐๆๅ้
global b # ๅฃฐๆๅจๅฝๆฐๅ
้จ็ไฝฟ็จaๆฏๅ
จๅฑๅ้๏ผๆญคๆถๆไปฌไฟฎๆนa๏ผๅฐฑๆฏๅจไฟฎๆนๅ
จๅฑๅ้
b = 80
print('ๅฝๆฐๅ
้จ:', 'b =', b)
fn4()
print('ๅฝๆฐๅค้จ:','b =',b)
# ๅฝๆฐ่ฐ็จๅ b= 20
# ๅฝๆฐๅ
้จ: b = 80
# ๅฝๆฐๅค้จ: b = 80
|
[
"newwxm@126.com"
] |
newwxm@126.com
|
791973912cf18523ae8538782e145e325e26c72c
|
39f475f5f5c9cacbcffb1d742742d004394d6098
|
/amo_s.py
|
c9aa9a685661a26a045d9e9f69b6bac12cc21b14
|
[] |
no_license
|
SamuelDBZMAAM/Budokai-Modding-Tool
|
bb990106defb79b7932739134a9e861c99ddfc10
|
32f35d23b91c3c9197d3fd549e34561582dda9e4
|
refs/heads/master
| 2020-04-11T23:46:08.669371
| 2019-07-01T16:54:15
| 2019-07-01T16:54:15
| 162,178,720
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,445
|
py
|
# AMO Model Separator - amo_s
# Purpose: To make every model part in the character a separate AMG for use in Budokai series
import struct
import math
import os
def main():
print("")
print("Drag and drop AMO here")
x = input("")
x = x.replace("\"", "")
f = open(x, "r+b")
# Copying files needed
hti = 0
ith = 0
chunk = f.read(16)
amg_head = open("Files\AMG\B3_amg_head.bin", "r+b")
amg_head = amg_head.read()
amg_axis = open("Files\AMG\B3_amg_axis.bin", "r+b")
amg_axis = amg_axis.read()
amg_mp1 = open("Files\AMG\B3_amg_mp1.bin", "r+b")
amg_mp1 = amg_mp1.read()
amg_mp2 = open("Files\AMG\B3_amg_mp2.bin", "r+b")
amg_mp2 = amg_mp2.read()
amg_end = open("Files\AMG\B3_amg_end.bin", "r+b")
amg_end = amg_end.read()
# Setting up temp bins
tn1 = "Files\z.bin" # Will hold edited bin
temp1 = open(tn1, "w+b")
temp1.close()
temp1 = open(tn1, "r+b")
# Setting up AMG template
temp1.write(amg_head)
temp1.seek(16)
temp1.write(b'\x01')
temp1.read()
temp1.write(amg_axis)
temp1.seek(84)
temp1.write(b'\x70\x00\x00\x00')
temp1.read()
temp1.write(amg_mp1)
temp1.seek(116)
temp1.write(b'\x80\x00\x00\x00')
temp1.read()
temp1.write(amg_mp2)
temp1.seek(144)
temp1.write(b'\x20\x00\x00\x00')
temp1.seek(0)
amg_temp = temp1.read()
# Searches how many model parts are in the AMO and creates AMGs
amount_parts = 0
while chunk != b"":
if chunk[0] == 0x01 and chunk[8] == 0x46:
amount_parts += 1
part_offset = f.tell()
#print("DEBUG: Model part position - " + str(f.tell()))
# Collects mesh-----------------------------------------
f.seek(part_offset+64)
hti = f.read(4)
mesh_size = hex_to_int(hti)
mesh_size = (mesh_size-1610612736)*16
#print("DEBUG: Mesh size - " + str(mesh_size))
f.seek(part_offset+80)
mesh = f.read(mesh_size)
# Collects the model part heading-----------------------
f.seek(part_offset-80)
mesh_heading = f.read(160)
#print("DEBUG: Heading data collected...")
f.seek(part_offset-72) # Texture number
hti = f.read(4)
if hti == b'\xFF\xFF\xFF\xFF':
t_numb = "N"
else:
t_numb = hex_to_int(hti)+1
f.seek(part_offset-68) # Shader number
hti = f.read(4)
if hti == b'\xFF\xFF\xFF\xFF':
s_numb = "N"
else:
s_numb = hex_to_int(hti) + 1
#print("DEBUG: Texture - " + str(t_numb))
#print("DEBUG: Shader - " + str(s_numb))
# Creating AMG for part found--------------------------
f_name = "Model Part " + str(amount_parts) + " - T" + str(t_numb) + "_S" + str(s_numb) + ".bin"
#print("DEBUG: File name - " + f_name)
folder = x+" - All parts\\"
if not os.path.exists(folder): # Creates new folder for model parts
os.makedirs(folder)
amg = open(folder+f_name, "w+b")
amg.close()
amg = open(folder+f_name, "r+b")
amg.write(amg_temp)
amg.write(mesh_heading)
amg.write(mesh)
amg.write(amg_end)
amg_size = amg.tell()
#print("DEBUG: AMG size - " + str(amg_size))
amg.seek(28)
amg.write(b'\x00\x00\x00\x00')
amg.seek(28)
ith = amg_size
amg_size2 = int_to_hex(ith)
amg.write(amg_size2)
amg.seek(124)
amg.write(b'\x00\x00\x00\x00')
amg.seek(124)
ith = amg_size-64
amg_size2 = int_to_hex(ith)
amg.write(amg_size2)
#print("")
f.seek(part_offset)
chunk = f.read(16)
#print("DEBUG: Total amount of model parts - " + str(amount_parts))
print("")
print("Completed!")
# deletes temp files
tn1 = "Files\z.bin"
temp1 = open(tn1, "r+b")
temp1.close()
os.remove(tn1)
print("")
print("T#_S# = Texture used [Texture number in GGS], Shader used [Texture number in GGS].")
print("TN or SN = No texture used.")
print("You now have a new folder with all of the model parts in it located at [" + folder + "]")
print("")
def hex_to_int(hti):
# Converts hex offsets to integer format
hti = hti.hex()
hti = int(hti, 16)
hti = struct.pack('<L', hti)
hti = hti.hex()
hti = int(hti, 16)
#print("DEBUG:HTI - " + str(hti))
return hti
def int_to_hex(ith):
# Opposite of hex_to_int
ith = struct.pack('<L', ith)
#print("DEBUG:ITH - " + str(ith))
return ith
def again():
yn = input("Would you like to load another? (Y/N)")
yn = yn.lower()
if yn == "y" or yn == "yes":
main()
again()
else:
print("")
print("AMO model separator by: Nexus-sama")
print("credit to SamuelDBZMA&M for some parts")
print("Follow me on Twitter @NexusTheModder")
print("")
kill = input("press enter to close")
main()
again()
exit()
|
[
"noreply@github.com"
] |
SamuelDBZMAAM.noreply@github.com
|
3b07fbf27860b7e43680ac8f5e92ef3e7704557d
|
c807f50194c1ad6912bab6d4cf848d51e6f4c120
|
/exercise_4/linked_list_step_4.py
|
e59af4bff7e3f4e4048c9869a18d7c0efdae8089
|
[
"MIT"
] |
permissive
|
LDSSA/batch3-wave1-test-solutions
|
3cae5b6ab1b1e200d3209506436b6c3c6be727ad
|
3f5d72ec41888c8eb14937a437e1a13ef3d6d633
|
refs/heads/master
| 2020-05-21T16:51:49.317733
| 2019-05-20T09:07:39
| 2019-05-20T09:07:39
| 186,111,737
| 1
| 1
| null | 2019-05-16T14:27:26
| 2019-05-11T09:14:17
|
Python
|
UTF-8
|
Python
| false
| false
| 76
|
py
|
def update_list(l):
l.remove_node(7)
l.remove_node(10)
return l
|
[
"mariacristinavfdominguez@gmail.com"
] |
mariacristinavfdominguez@gmail.com
|
971f126af73af3400b894e7d1eac4731e8aa4ea0
|
a319119367429716fc57916ae479a66b8158cd16
|
/mantle/xilinx/spartan6/DCM.py
|
f0e14f4e4cc791ba3fe91184a20b4edc1c044319
|
[
"MIT"
] |
permissive
|
phanrahan/mantle
|
788ab7689107ea139f87cb169d53ad6cd9a91e38
|
8193cf6df60f5be7f198e55d74d9a196abec8342
|
refs/heads/master
| 2023-01-05T17:21:29.458634
| 2022-12-20T20:14:12
| 2022-12-20T20:14:12
| 84,332,208
| 38
| 14
|
NOASSERTION
| 2021-09-17T04:07:31
| 2017-03-08T14:56:32
|
Verilog
|
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
from magma import *
__all__ = ['_DCM', 'DCM']
_DCM = DeclareCircuit('DCM',
"CLKIN", In(Bit),
"CLKFB", In(Bit),
"PSCLK", In(Bit),
"PSEN", In(Bit),
"PSINCDEC", In(Bit),
"RST", In(Bit),
"CLKFX", In(Bit))
def DCM( freq, basefreq=32):
"""
Instantiate a Digital Clock Manager
>>> DCM(freq,basefreq=32)
O's clockrate is freq (in Mhz). clockin should be set to
the frequency of the input clock.
"""
(mult, div), bestfreq = findbestclock(freq, basefreq)
if freq != bestfreq:
print("%d * %d / %d (%g) ~= %g" % (basefreq,
mult,
div,
bestfreq,
freq))
params = {}
params["CLK_FEEDBACK"] = '"NONE"'
params["CLKFX_DIVIDE"] = str(div)
params["CLKFX_MULTIPLY"] = str(mult)
params["DUTY_CYCLE_CORRECTION"] = '"FALSE"'
params["STARTUP_WAIT"] = '"FALSE"'
#params["X_CLKIN_PERIOD"] = '"31.25"'
#for attr in ["PSEN", "PSINCDEC", "RST", "PSCLK"]:
# params[attr + "INV"] = '"%s"' % attr
dcm = _DCM(**params)
for attr in ["PSEN", "PSINCDEC", "RST", "PSCLK"]:
wire(GND, getattr(dcm, attr))
wire(GND, dcm.CLKFB)
return AnonymousCircuit("input I", dcm.CLKIN, "output O", dcm.CLKFX)
def findbestclock(freq, basefreq):
bestfreq = freq
bestdiff = 1024.
bestclock = None
for i in range(2, 33):
f1 = basefreq * i
for j in range(1, 33):
f2 = f1 / float(j)
diff = abs(f2 - freq)
if diff < bestdiff:
bestdiff = diff
bestfreq = f2
best = (i, j)
return best, bestfreq
if __name__ == '__main__':
from parts.xilinx.spartan6.primitives.BUF import BUFG
d = DefineCircuit('main', "input CLKIN", Bit, "output CLKOUT", Bit)
dcm = DCM(50)
bufg = BUFG()
wire(d.CLKIN, dcm.I)
wire(dcm, bufg)
wire(bufg, d.CLKOUT)
EndCircuit()
compiledefinitions('main.v', d)
|
[
"hanrahan@cs.stanford.edu"
] |
hanrahan@cs.stanford.edu
|
1833e6bd9c1ffad2af48048acbdc3876e2309735
|
ce2bd9e3080726df8d6bbc98eb218bed8afbb04d
|
/app.py
|
626972e050675db212f356010e7f5bc651e410d3
|
[] |
no_license
|
Abhishek-Dobliyal/MonoShot
|
325de6fdab483e3b9f4bcd558d92b798b02646cd
|
3bc1eb4d9a9e3aad5d833320782042c83d1fb7bd
|
refs/heads/main
| 2023-03-05T11:41:41.673738
| 2021-02-26T07:57:42
| 2021-02-26T07:57:42
| 341,968,995
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,520
|
py
|
# Required Imports
import streamlit as st # pip install streamlit
from file_processing import FileProcessor
import tempfile
import os
import shutil
import time
# Helper Functions
def get_file_data(file):
''' Writes the uploaded file data
to a temporary file for processing'''
if not os.path.isdir("./temp_files"):
os.mkdir("./temp_files")
tempfile.tempdir = "temp_files" # Set the directory where all temp files will be stores
temp_file = tempfile.NamedTemporaryFile(suffix="file_for_processing")
temp_file.write(file.read())
return temp_file
def display_progress_bar():
''' Displays a progress bar to visualize
the completion of a function '''
processing_txt = st.markdown("#### Processing, Please wait...")
bar = st.progress(0)
for percent in range(100):
time.sleep(0.05)
bar.progress(percent)
bar.empty()
processing_txt.empty()
def display_msg(msg, msg_type=0):
''' Display message according to the
type
1 -> Success Message
0 -> Info Message
-1 -> Error Message
'''
msg_widget = st.empty()
if msg_type == 1:
msg_widget = st.success(msg)
time.sleep(3)
msg_widget.empty()
elif msg_type == -1:
msg_widget = st.error(msg)
time.sleep(3)
msg_widget.empty()
elif msg_type == 0:
msg_widget = st.info(msg)
time.sleep(3)
msg_widget.empty()
def get_file_details(processed_file_path, processed_file_details, file_extension):
''' Dispalys the basic details of the file
inside an expander widget '''
info_expander = st.beta_expander("Output Section")
with info_expander:
processed_col, file = st.beta_columns(2)
processed_col.write(processed_file_details)
if file_extension in ("mov", "mp4", "avi"):
file.video(processed_file_path)
else:
file.image(processed_file_path)
def display_processed_file(processed_file_path, processed_files):
''' Displays the processed file in web app '''
if not processed_files:
display_msg("No files processed yet.", -1)
elif len(processed_files) == 1:
file = processed_files[0]
processed_filename, processed_file_ext = os.path.basename(processed_file_path + file).split('.')
processed_file_details = {
"File Name": processed_filename,
"File Type": processed_file_ext,
"File Size": str(round(os.path.getsize(processed_file_path + file) / 1e6, 2)) + " MB"
}
if processed_file_ext in ("mov", "mp4", "avi"):
get_file_details(processed_file_path + file, processed_file_details,
processed_file_ext)
else:
get_file_details(processed_file_path + file, processed_file_details,
processed_file_ext)
else:
display_msg("You can only process and download a single file at a time", 0)
shutil.rmtree(processed_file_path)
def display_info_sections():
''' Displays the instructions and about section '''
help_expander = st.beta_expander("Instructions")
about_expander = st.beta_expander("About")
instructions = '''
* **Choose or Drag N Drop** a file to upload. For a video file, the duration must not exceed
the 30 seconds mark.
* Once uploaded, select the enhacements you would like to apply from the sidebar.
* Click on the **Generate** button and wait for the processing to complete.
* Once completed, click on the **Proceed to Download** button to generate the output.
* Wait for the processing to complete **(notice the RUNNING status on the top right hand corner)**.
Once completed, expand the **Output Section** section to view the output.
* If satisfied with the results:
- If the output is an image file then right click on the file and
select the **Save as** option to download the output ( This applies to
GIFs and Boomerangs as well ).
- If it is a video file then click on the **Three dots** present at
the bottom right corner inside the video player and select **Download**.
'''
about = '''
Made By <span style="color:seagreen">**Abhishek Dobliyal**</span> :smile:
[](https://github.com/Abhishek-Dobliyal)
[![Linkedin: Abhishek Dobliyal]
(https://img.shields.io/badge/-AbhishekDobliyal-blue?style=flat-square&logo=Linkedin&logoColor=white&link=https://www.linkedin.com/in/abhishek-dobliyal-4474061b7/)]
(https://www.linkedin.com/in/abhishek-dobliyal-4474061b7)
#### Modules Utilized:
- <a style="text-decoration: none;" href="http://streamlit.io/" target="blank_"> Streamlit </a>
- <a style="text-decoration: none;" href="https://opencv.org/releases/" target="blank_"> OpenCV </a>
- <a style="text-decoration: none;" href="https://zulko.github.io/moviepy/" target="blank_"> MoviePy </a>
- <a style="text-decoration: none;" href="https://pillow.readthedocs.io/en/3.0.x/index.html" target="blank_"> Pillow </a>
- <a style="text-decoration: none;" href="https://numpy.org/" target="blank_"> NumPy </a>
#### Hope You Like It :blush:
'''
with help_expander: # Instructions
help_expander.markdown(instructions, unsafe_allow_html=True)
with about_expander: # About
about_expander.markdown(about, unsafe_allow_html=True)
# Main App
def main():
''' The main Web app '''
# Set Page name and favicon
ICON = "./assets/favicon.png"
st.set_page_config(page_title="MonoShot", page_icon=ICON)
# Title
TITLE_STYLE = '''
border: 3px solid #9ab7d6;
border-radius: 15px;
text-align: center;
background: rgb(220, 235, 237);
color: white;
font-family: Futura, sans-serif;
color: pink;
text-shadow: #000 0px 0px 1px;
'''
title = f'''
<div style="{TITLE_STYLE}">
<h1> MONOSHOT </h1>
</div>
'''
st.markdown(title, unsafe_allow_html=True) # Render the HTML/CSS
# Header
HEADER = "Just a single shot that's all it takes."
st.subheader(HEADER)
# File Upload and Selections
PROCESSED_DATA_PATH = "./processed_data/"
uploaded_file = st.file_uploader("Upload your file here:",
type=[".mp4", ".avi", ".mov", ".jpeg", ".jpg", ".png"])
if uploaded_file:
file_data = get_file_data(uploaded_file)
processor = FileProcessor(file_data) # Processor object for applying diff. methods to the media file
if uploaded_file.type in ("video/mp4", "video/mov", "video/avi"):
if not os.path.isdir(PROCESSED_DATA_PATH):
os.mkdir(PROCESSED_DATA_PATH)
video_duration = processor.get_duration()
has_required_dim = processor.get_dimensions()
if video_duration > 30:
display_msg("Oops! Video too long to be processed!", -1)
elif not has_required_dim:
display_msg("Oops! Allowed Resolutions are: 360p, 480p, 720p and 1080p", -1)
else:
# SideBar Widgets
select_options = ["Enhance Image", "Generate Shot"]
select_output = st.sidebar.selectbox("Select Enhancement:", select_options)
if select_output:
time.sleep(1.5)
if select_output == "Enhance Image":
time_stamp = st.sidebar.slider("Choose the time stamp (in seconds)",
min_value=1, max_value=video_duration,
step=1)
brightness_lvl = st.sidebar.slider("Brightness:",
min_value=0.0, max_value=2.0,
step=0.2, value=1.0)
sharpness_lvl = st.sidebar.slider("Sharpness:",
min_value=0.0, max_value=2.0,
step=0.2, value=1.0)
contrast_lvl = st.sidebar.slider("Contrast:",
min_value=0.0, max_value=2.0,
step=0.2, value=1.0)
color_lvl = st.sidebar.slider("Color:",
min_value=0.0, max_value=2.0,
step=0.2, value=1.0)
if st.sidebar.button("Generate"):
processor.enhanced_img(PROCESSED_DATA_PATH, time_stamp*1000, # Timestamp should be in milliseconds
brightness_lvl, sharpness_lvl,
contrast_lvl, color_lvl)
display_progress_bar()
display_msg("Enhanced Image has been successfully generated.", 1)
elif select_output == "Generate Shot":
display_msg("NOTE: It may take a while to generate a shot.", 0)
shot_options = ["SlowMo", "TimeLapse", "GIF", "Boomerang"]
shot = st.sidebar.selectbox("Select Shot:", shot_options)
if shot == "SlowMo" and st.sidebar.button("Generate"):
processor.generate_shot(PROCESSED_DATA_PATH, slowmo=True)
display_progress_bar()
display_msg("SlowMo has been successfully generated.", 1)
elif shot == "TimeLapse" and st.sidebar.button("Generate"):
processor.generate_shot(PROCESSED_DATA_PATH, timelapse=True)
display_progress_bar()
display_msg("TimeLapse has been successfully generated.", 1)
elif shot == "GIF" and st.sidebar.button("Generate"):
processor.generate_shot(PROCESSED_DATA_PATH, gif=True)
display_progress_bar()
display_msg("GIF has been successfully generated.", 1)
elif shot == "Boomerang":
start_time = st.sidebar.slider("Choose the start time (in seconds):",
min_value=1, max_value=video_duration,
step=1)
end_time = st.sidebar.slider("Choose the end time (in seconds):",
min_value=start_time + 2, max_value=video_duration,
step=1)
if st.sidebar.button("Generate"):
processor.generate_shot(PROCESSED_DATA_PATH,
boomerang=(True, start_time, end_time))
display_progress_bar()
display_msg("Boomerang has been successfully generated.", 1)
else:
display_msg("Please select atleast a single option to proceed.", 0)
else:
if not os.path.isdir(PROCESSED_DATA_PATH):
os.mkdir(PROCESSED_DATA_PATH)
select_options = ["Enhance Resolution", "Apply Filter", "Extract Text"]
select_output = st.sidebar.selectbox("Select Enhancement:", select_options)
if select_output:
time.sleep(1.5)
if select_output == "Enhance Resolution":
if st.sidebar.button("Generate"):
display_msg("Please wait. It may take a while to enhance the resolution...", 0)
processor.enhance_resolution(PROCESSED_DATA_PATH)
display_progress_bar()
display_msg("Enhanced Resolution Image has been successfully generated.", 1)
elif select_output == "Apply Filter":
select_options = ["Pencil Sketch", "Water Colored", "Faded", "Document",
"Cartoonify", "Vigenette", "Phantom", "Negative"]
select_output = st.sidebar.selectbox("Select a filter:", select_options)
if st.sidebar.button("Generate"):
processor.apply_filter(PROCESSED_DATA_PATH, filter=select_output)
display_progress_bar()
display_msg("Filter has been successfully applied.", 1)
elif select_output == "Extract Text":
if st.sidebar.button("Fetch Text"):
txt = processor.extract_txt()
if txt:
display_msg("Text extraction successful.", 1)
text_expander = st.beta_expander("Output Section")
with text_expander:
st.text("*" * 70)
st.write(txt)
st.text("*" * 70)
else:
display_msg("Failed to extract text!", -1)
else:
display_msg("Please select atleast a single option to proceed.", 0)
file_data.close() # Delete the temp file after all the operations.
shutil.rmtree("./temp_files")
helper_widget = st.empty()
processed_files = os.listdir(PROCESSED_DATA_PATH)
if processed_files:
if helper_widget.button("Proceed to Download"):
display_processed_file(PROCESSED_DATA_PATH, processed_files)
display_info_sections()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
Abhishek-Dobliyal.noreply@github.com
|
32ab833d177d77afda396b1be367557bb9ddf33f
|
d315461033c69c8055aba6ac90b4053886a16afa
|
/local/common/internet_active.py
|
7d949f6f320828be6e81e054578af3cde4b6eefc
|
[] |
no_license
|
dhhjjdfg/GotoX
|
5855653d4b100bef2269b5fba8cdf422e3583fc6
|
6fee4d20e109fbbe0c9d9bd2d29a6cb2d3873948
|
refs/heads/master
| 2023-03-10T10:20:09.687538
| 2021-02-26T05:14:54
| 2021-02-26T05:14:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,814
|
py
|
# coding:utf-8
#ๆญค่ๆฌ้่ฟๅๅ่งฃๆๆต่ฏ็ฝ็ป็ถๆ๏ผไธๆฏๆ UDP ็ๅ็ซฏไปฃ็ๆ ๆณไฝฟ็จ
#ๅณไฝฟๅ็ฝฎไปฃ็ๆฏๆ UDP๏ผ่ฟ้่ฆไฟฎๆนๅฅๆฅๅญไฝฟ็จไปฃ็
import os
import queue
import socket
import random
import dnslib
import logging
import collections
from time import time, sleep
from select import select
from threading import _start_new_thread as start_new_thread
from .net import isipv4, isipv6, get_wan_ipv6
from .path import get_dirname
from .util import spawn_loop
from local.GlobalConfig import GC
#็ฝ็ปๆต่ฏ่ฆๆฑ็จณๅฎใๅฟซ้๏ผๆไปฅ้ๅฝๅ
็ DNS IP
dns_ips_v4 = (
#็ตไฟก 114
#https://www.114dns.com/
'114.114.114.114',
'114.114.115.115',
'114.114.114.110',
'114.114.115.110',
'114.114.114.119',
'114.114.115.119',
#้ฟ้
#http://www.alidns.com/
'223.5.5.5',
'223.6.6.6',
#็พๅบฆ
#http://dudns.baidu.com/intro/publicdns/
'180.76.76.76',
#่
พ่ฎฏ DNSPod
#https://www.dnspod.cn/Products/Public.DNS
'119.28.28.28',
'119.29.29.29',
'182.254.116.116',
'182.254.118.118',
#DNS ๆดพ
#http://www.dnspai.com/public.html
'101.226.4.6',
'218.30.118.6',
'123.125.81.6',
'140.207.198.6',
#OneDNS
#http://www.onedns.net/
'117.50.11.11',
'117.50.22.22',
'112.124.47.27',
'114.215.126.16',
#CNNIC SDNS
#http://public.sdns.cn/emergency_services.shtml
'1.2.4.8',
'210.2.4.8',
)
dns_ips_v6 = (
#CFIEC
#http://www.cfiec.net/dns/s/?978.html
#http://www.chinaipv6.com.cn/
'240c::6666',
'240c::6644',
#CNNIC
'2001:dc7:1000::1',
#ๆธ
ๅๅคงๅญฆ TUNA ๅไผ
#https://tuna.moe/help/dns/
'2001:da8::666',
#ๅไบฌ็งๆๅคงๅญฆ
'2001:da8:208:10::6', #ๅง็ปๅๅบ REFUSED
#ไธๆตทไบคๅคง
#http://ipv6.sjtu.edu.cn/dns.php
'2001:da8:8000:1:202:120:2:100', #ๅง็ปๅๅบ REFUSED
'2001:da8:8000:1:202:120:2:101', #ๅง็ปๅๅบ REFUSED
#ๅไบฌ้ฎ็ตๅคงๅญฆ
'2001:da8:202:10::36',
'2001:da8:202:10::37',
#็พๅบฆ
#http://dudns.baidu.com/intro/publicdns/
'2400:da00::6666',
)
#็จไบ Teredo ้ง้็ญ
dns_ips_v6w = (
#Google
#https://developers.google.com/speed/public-dns/docs/using
'2001:4860:4860::8888',
'2001:4860:4860::8844',
#OpenDNS
#https://www.opendns.com/about/innovations/ipv6/
'2620:119:35::35',
'2620:119:53::53',
#Cloudflare
#https://developers.cloudflare.com/1.1.1.1/setting-up-1.1.1.1/
'2606:4700:4700::1111',
'2606:4700:4700::1001',
#Quad9
#https://www.quad9.net/faq/
'2620:fe::fe',
'2620:fe::9',
#Neustar UltraDNS
#https://www.security.neustar/digital-performance/dns-services/recursive-dns
'2610:a1:1018::1',
'2610:a1:1019::1',
'2610:a1:1018::5',
'2610:a1:1019::5',
#'2610:a1:1018::2',
#'2610:a1:1019::2',
#'2610:a1:1018::3',
#'2610:a1:1019::3',
#'2610:a1:1018::4',
#'2610:a1:1019::4',
)
def read_domains(file):
domains = set()
with open(file, 'r') as fd:
for line in fd:
if line[:1] not in '#;':
domain = line.strip()
if domain:
domains.add(domain)
return list(domains)
current_dir = get_dirname(__file__)
domains_file = os.path.join(current_dir, 'domains.txt')
domains = read_domains(domains_file)
class InternetActiveCheck:
max_qdata_num = 256
max_check_times = 0
only_check_ip = None
def __init__(self, type, domains=domains):
self.in_check = False
self.last_stat = None
self.qdata = None
self._dns_servers = None
if type.lower() == 'ipv4':
self.type = 'IPv4'
self.set_dns_servers(dns_ips_v4)
elif type.lower() == 'ipv6':
self.type = 'IPv6'
self.only_check_ip = GC.LINK_FASTV6CHECK
self.set_dns_servers_v6()
spawn_loop(10, self.set_dns_servers_v6)
domains = domains.copy()
random.shuffle(domains)
del domains[self.max_qdata_num:]
self.qdata_list = collections.deque(dnslib.DNSRecord.question(qname).pack() for qname in domains)
self.sock = socket.socket(socket.AF_INET if self.type == 'IPv4' else socket.AF_INET6, socket.SOCK_DGRAM)
def set_dns_servers(self, dns_ips):
dns_servers = [(ip, 53) for ip in dns_ips]
random.shuffle(dns_servers)
self.max_check_times = len(dns_servers)
self._dns_servers = dns_servers
self.dns_servers = None
def set_dns_servers_v6(self):
if '6' not in GC.LINK_PROFILE:
return
addr6 = get_wan_ipv6()
if addr6:
if addr6.teredo:
if self.type != 'IPv6 Teredo':
if self.type != 'IPv6':
logging.warning('ๆฃๆตๅฐ IPv6 ็ฝ็ปๅๅจ๏ผๅฝๅไฝฟ็จ Teredo ้ง้๏ผIP๏ผ%s', addr6)
self.type = 'IPv6 Teredo'
if not (self._dns_servers or self.only_check_ip):
self.set_dns_servers(dns_ips_v6w)
elif addr6.sixtofour:
if self.type != 'IPv6 6to4':
if self.type != 'IPv6':
logging.warning('ๆฃๆตๅฐ IPv6 ็ฝ็ปๅๅจ๏ผๅฝๅไฝฟ็จ 6to4 ้ง้๏ผIP๏ผ%s', addr6)
self.type = 'IPv6 6to4'
if not (self._dns_servers or self.only_check_ip):
self.set_dns_servers(dns_ips_v6w)
else:
if self.type != 'IPv6 Global':
if self.type != 'IPv6':
logging.warning('ๆฃๆตๅฐ IPv6 ็ฝ็ปๅๅจ๏ผๅฝๅไฝฟ็จๅ็็ฝ็ป๏ผIP๏ผ%s', addr6)
self.type = 'IPv6 Global'
if not (self._dns_servers or self.only_check_ip):
self.set_dns_servers(dns_ips_v6)
if self.only_check_ip and self.last_stat != 1:
if self.last_stat is not None:
logging.warning('IPv6 ็ฝ็ปๆขๅค่ฟๆฅ')
self.last_stat = 1
else:
if self.only_check_ip and self.last_stat != 0:
logging.error('IPv6 ็ฝ็ป็ฐๅจไธๅฏ็จ๏ผๅฐๆฏ 10 ็งๆฃๆตไธๆฌกโฆโฆ')
self.last_stat = 0
self._dns_servers = None
return
def is_active(self, keep_on=None):
if self.only_check_ip:
while keep_on and not self.last_stat:
sleep(5)
return self.last_stat
time_pass = 0
while self.in_check:
sleep(0.01)
time_pass += 0.01
if time_pass > 10:
if not keep_on:
break
time_pass = 0.01
if time_pass:
return self.last_stat
self.in_check = True
ok = None
haserr = None
ins = True
sent = []
check_times = 0
try:
#ๆธ
็่ฟๆๅๅบ
while ins:
ins, _, _ = select([self.sock], [], [], 0)
if ins:
self.sock.recvfrom(512)
while ok is None:
check_times += 1
if check_times > self.max_check_times:
if not haserr:
if not keep_on:
ok = False
break
haserr = True
try:
keep_on = abs(int(keep_on))
except:
keep_on = 10
logging.error('%s ็ฝ็ป็ฐๅจไธๅฏ็จ๏ผๅฐๆฏ %d ็งๆฃๆตไธๆฌกโฆโฆ', self.type, keep_on)
sleep(keep_on)
if self._dns_servers is None:
check_times = self.max_check_times
continue
if not self.dns_servers:
self.dns_servers = self._dns_servers.copy()
self.qdata = self.qdata_list.pop()
self.qdata_list.appendleft(self.qdata)
dns_server = self.dns_servers.pop()
try:
self.sock.sendto(self.qdata, dns_server)
sent.append(dns_server)
ins, _, _ = select([self.sock], [], [], 0.5)
if ins:
_, peername = self.sock.recvfrom(512)
if peername[:2] in sent:
ok = True
except:
pass
except:
pass
finally:
self.last_stat = int(ok is True)
self.in_check = False
if haserr:
logging.warning('%s ็ฝ็ปๆขๅค่ฟๆฅ', self.type)
return self.last_stat
internet_v4 = InternetActiveCheck('ipv4')
internet_v6 = InternetActiveCheck('ipv6')
qobj_cache = queue.deque()
def _is_active(type, qobj, keep_on):
if type == 4:
stat = internet_v4.is_active(keep_on)
elif type == 6:
stat = internet_v6.is_active(keep_on)
qobj.put(stat)
def is_active(type='ipv4', keep_on=None):
stat = 1
n = 0
try:
qobj = qobj_cache.pop()
qobj.queue.clear()
except IndexError:
qobj = queue.LifoQueue()
if type.lower() in ('ipv4', 'ipv46') or isipv4(type):
start_new_thread(_is_active, (4, qobj, keep_on))
n += 1
if type.lower() in ('ipv6', 'ipv46') or isipv6(type):
start_new_thread(_is_active, (6, qobj, keep_on))
n += 1
for _ in range(n):
_stat = qobj.get()
if _stat and keep_on:
return _stat
stat &= _stat
qobj_cache.append(qobj)
if n:
return stat
else:
logging.error('is_active๏ผ้่ฏฏ็ type ๅๆฐ๏ผ%s', type)
|
[
"Seahoh@Gmail.com"
] |
Seahoh@Gmail.com
|
3237faa7ea52ea2f166108e843853e2fa401b89f
|
57d7c6ef3e16999661ef959336158a833fc83c8c
|
/competitive level/TuxTiepattern1.py
|
85fd88c6742865912da6a030176bf79447cd43f9
|
[] |
no_license
|
Sevansu/PythonBasicPrograms
|
c60402bd7d2d469337ff58241532450d0ba38da2
|
3367140193844091eb9d4e43ea014c2176721543
|
refs/heads/master
| 2021-06-26T14:01:43.105778
| 2020-12-15T10:15:40
| 2020-12-15T10:15:40
| 183,554,250
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
try:
n=abs(int(input("Enter number of lines:")))
if n&1:
for i in range(1,n+1):
for j in range(1,n+1):
print([' ','*'][(j<=i or j>n-i if i<=(n+1)/2 else j<=n-i+1 or j>=i) and (i&1)==(j&1)],end=' ')
print('')
else:print("Invalid input. Input must be an odd number")
except:print("Invalid input. Input must be an odd number")
|
[
"noreply@github.com"
] |
Sevansu.noreply@github.com
|
481377a716123168e4dd9980470b57b9c3cd8ce8
|
d25d42d61c450a0a9c337861069df97660df8f39
|
/datacleaning.py
|
35a0ad14d55917f3e29948e78c3b8a64524e425a
|
[] |
no_license
|
zehaowork/Open-Data-Innovation-CW1
|
e90d3577e9223319c15014c81d9b71ddb2dbc8f4
|
2f3611fc58125ca63977fcbe05fa2ced1b914dba
|
refs/heads/main
| 2023-03-28T15:08:19.126606
| 2021-03-31T18:29:10
| 2021-03-31T18:29:10
| 344,849,819
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
import pandas as pd
sample_size = pd.ExcelFile('CW1-BusinessImpactsOfCovid19Data.xlsx')
print(sample_size['Sample Size'])
|
[
"zehaowork@outlook.com"
] |
zehaowork@outlook.com
|
bb10f37c105b960f6047c77acfdca615d5446032
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/nTW4KgmJxpLDXcWPt_6.py
|
a0d819f3ad17c6e5031c3261795b415657d3c1e2
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
"""
Write a function that moves all elements of one type to the **end** of the
list.
### Examples
move_to_end([1, 3, 2, 4, 4, 1], 1) โ [3, 2, 4, 4, 1, 1]
# Move all the 1s to the end of the array.
move_to_end([7, 8, 9, 1, 2, 3, 4], 9) โ [7, 8, 1, 2, 3, 4, 9]
move_to_end(["a", "a", "a", "b"], "a") โ ["b", "a", "a", "a"]
### Notes
Keep the order of the un-moved items the same.
"""
def move_to_end(lst, el):
return [i for i in lst if i!=el]+([el]*lst.count(el))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
67128f9bdccd6db6951e2c77216a82f0ca32a684
|
fbf3d58c8410cc3bd5ef28e03576550f137f2b44
|
/voting-client/node_modules/webpack-dev-server/node_modules/socket.io/node_modules/engine.io/node_modules/ws/node_modules/bufferutil/build/config.gypi
|
99f3b3c25007889f1fec4c9214c0f52984300397
|
[
"MIT"
] |
permissive
|
SpencerCDixon/React-Redux-Voting
|
7fba4b6b34c07ff96bd1eeef7bcacbc34a1f1af3
|
f515c5dbf6bd53753fb90791459727a9cb240e39
|
refs/heads/master
| 2016-09-06T04:29:49.661213
| 2015-09-22T13:24:13
| 2015-09-22T13:24:13
| 42,935,102
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,750
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"icu_data_file": "icudt54l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt54l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "54",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"uv_library": "static_library",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/Users/spencerdixon/.node-gyp/0.12.7",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "true",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/bin/zsh",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"npat": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"cafile": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"access": "",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/spencerdixon/.npm-init.js",
"userconfig": "/Users/spencerdixon/.npmrc",
"node_version": "0.12.7",
"user": "501",
"save": "true",
"editor": "mvim",
"tag": "latest",
"global": "",
"optional": "true",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/spencerdixon/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/2.11.3 node/v0.12.7 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "1.0.0",
"umask": "0022",
"git": "git",
"init_author_name": "",
"scope": "",
"onload_script": "",
"tmp": "/var/folders/9f/n_j80xgs5w57sj6bf6b9tw7w0000gn/T",
"unsafe_perm": "true",
"link": "",
"prefix": "/usr/local"
}
}
|
[
"spencercdixon@gmail.com"
] |
spencercdixon@gmail.com
|
9393c39a5b7513fadd41c4b6eee020413957d027
|
1ec436504cb7dbd4796931cf64faa333c827b6e4
|
/flask_service/runSimulation.py
|
d0eb5a21be3bb17ddf780034c327c1c1c69dd9e4
|
[] |
no_license
|
IDPSGitHubProjects/SIM_app
|
540cf479fa3b28817a624257bb0f7f8740caa2e2
|
58caab39f5a4ea920bf6a53b338a9674fbe8c46b
|
refs/heads/master
| 2023-04-14T14:41:52.434085
| 2021-05-02T04:20:05
| 2021-05-02T04:20:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,414
|
py
|
import pandas as pd
import json,sys
import numpy as np
class SpaceNode:
def __init__(self,data):
# print(data)
self.name = data['spaceName']
self.area = float(data['area'])
self.level = int(data['level'])
self.window = data['window']
self.occupied = data['occupied']
self.contains = []
self.nextElement = None
class DeviceNode:
def __init__(self,data):
self.name = data['deviceName']
self.type = data['type']
self.placement = data['placement']
self.networks = data['networks']
self.visibility = data['visibility']
self.monitoring = data['monitoring']
self.nextElement = None
class Vertex:
def __init__(self, node):
self.id = node.name
self.data = node
self.adjacent = {}
# Set distance to infinity for all nodes
# self.distance = float('inf')
self.distance = 100
# Mark all nodes unvisited
self.visited = False
# Predecessor
self.previous = None
def add_neighbor(self, neighbor, weight=0):
self.adjacent[neighbor] = weight
def get_connections(self):
return self.adjacent.keys()
def get_id(self):
return self.id
def get_weight(self, neighbor):
return self.adjacent[neighbor]
def set_distance(self, dist):
self.distance = dist
def get_distance(self):
return self.distance
def set_previous(self, prev):
self.previous = prev
def set_visited(self):
self.visited = True
def __str__(self):
return str(self.id) + ' adjacent: ' + str([x.id for x in self.adjacent])
class Graph:
def __init__(self):
self.vert_dict = {}
self.num_vertices = 0
def __iter__(self):
return iter(self.vert_dict.values())
def add_vertex(self, node):
self.num_vertices = self.num_vertices + 1
new_vertex = Vertex(node)
self.vert_dict[node.name] = new_vertex # name to vertex mapping
return new_vertex
def get_vertex(self, n):
if n in self.vert_dict:
return self.vert_dict[n]
else:
return None
def add_edge(self, frm, to, cost = {}):
if frm not in self.vert_dict:
self.add_vertex(self.vert_dict[frm])
if to not in self.vert_dict:
self.add_vertex(self.vert_dict[to])
self.vert_dict[frm].add_neighbor(self.vert_dict[to], cost)
self.vert_dict[to].add_neighbor(self.vert_dict[frm], cost)
def add_directed_edge(self, frm, to):
if frm not in self.vert_dict:
self.add_vertex(self.vert_dict[frm])
self.vert_dict[frm].add_neighbor(self.vert_dict[to])
def get_vertices(self):
return self.vert_dict.keys()
def set_previous(self, current):
self.previous = current
def get_previous(self, current):
return self.previous
class RunSim:
def generatePaths(self,u,d):
res = []
def dfs(u,d,path):
u.visited = True
path.append(u.data.name)
# If current vertex is same as destination, then print
if u == d:
# print(path)
res.append(list(path))
else:
# If current vertex is not destination
# Recur for all the vertices adjacent to this vertex
for i in u.get_connections():
if i.visited == False:
dfs(i, d, path)
# Remove current vertex from path[] and mark it as unvisited
path.pop()
u.visited = False
dfs(u,d,[])
return res
def calculate1(self,df_attack,paths,g):
all_paths = []
for path in paths:
# breach_time = 0
breach_difficulty = ""
attack_type = ""
result_path = "Outside, "
breached_node = ""
breached = False
for i in range(len(path)):
curr = path[i]
device_generic_name = ''.join([i for i in curr if not i.isdigit()]).strip()
curr_node = g.vert_dict[curr]
# if the curr node is space node
if curr_node.data.__class__.__name__ == 'SpaceNode':
# check all its connections to see if there is any monitoring device
monitored = False
# check if the space has windows or is occupied
if curr_node.data.window == False or curr_node.data.occupied == True:
continue
for conn in g.vert_dict[curr].get_connections():
if conn.data.__class__.__name__ == 'SpaceNode':
continue
if curr in conn.data.monitoring:
# print("monitored")
monitored = True
break
if not monitored:
result_path+=curr+" window, "
breach_difficulty = "easy"
breached_node = curr
attack_type = "physical"
# if the curr node is vulnerable to cyber attack
elif device_generic_name in df_attack['Attacked device'].values.tolist():
df_curr_attack = df_attack[df_attack['Attacked device']==device_generic_name]
list_index = []
curr_network = curr_node.data.networks
for index,row in df_curr_attack.iterrows():
if len(set(row['Communication protocol']).intersection(set(curr_network)))>0:
list_index.append(index)
df_filtered = df_curr_attack[df_curr_attack.index.isin(list_index)]
# print(df_filtered)
for index,row in df_filtered.iterrows():
# print(row)
if row['Constraint'] == 'visibility':
# print("visibility")
# Laser attack
if 'Outside' in curr_node.data.visibility:
if row['Testing Device'] in path[i+1:]:
breach_difficulty = row['Attack difficulty']
breached_node = curr
result_path+=curr+", "
attack_type = row['Attack Vector']
for j in range(i+1,len(path)):
result_path+=path[j]+", "
# breach_time+=5
breached = True
break
elif np.isnan(row['Constraint']):
# print("else")
breach_difficulty = row['Attack difficulty']
breached_node = curr
result_path+=curr+", "
attack_type = row['Attack Vector']
for j in range(i+1,len(path)):
result_path+=path[j]+", "
# breach_time+=5
breached = True
break
if breached:
break
if breached_node!="":
attack_path = breached_node+" : "+breach_difficulty+" : "+ attack_type+" : "+result_path
if attack_path not in all_paths:
all_paths.append(attack_path)
return all_paths
def tranformAttackPaths(self,data,attack):
'''
converts a string of attack path into a graph format which is compatible
for D3 force directed graph viz
input: data(json input graph), attack(string)
output: output graph (json)
'''
# assigning groups to differentiate nodes with different colors
for i in data['Spaces']:
i['group']='space' if i['spaceName']!='Outside' else 'Outside'
i['name'] = i['spaceName']
for i in data['Devices']:
i['group']='device'
i['name'] = i['deviceName']
final = {"vertices":data['Spaces']+data['Devices']}
# edges represent physical and cyber connections
edges = []
# adding all cyber connection
for i in data['CyberConnections']:
for j in i['targets']:
edges.append({'source':j,'target':i['sources'],'distance':30,'group':2})
# adding all physical connection
for i in data['PhysicalConnections']:
edges.append({'source':i['sources'],'target':i['targets'],'distance':30,'group':1})
# adding all containment connections
for i in data['Devices']:
edges.append({'source':i['placement'],'target':i['deviceName'],'distance':15,'group':3})
# links show attack paths
links = []
# attack string format - "target_node:attack_difficulty:attack_type:attack_path"
attack_split = attack.split(':')
path_list = attack_split[-1].strip()[:-1].split(',')
# for nodes in the attack path adjacent nodes become source and target respectively
for i in range(len(path_list)-1):
# if there is a break-in through window, we extract the space name since nodes in graph
# have space names
source = path_list[i].split('window')[0].strip()
target = path_list[i+1].split('window')[0].strip()
# if "physical" in attack_split[2]:
# target = target.split(' ')[0]
# assigning link distance as 20 (can be experimented with for better viz)
# also initializing the attack type only for the first node which has been targeted
# the attack type will be displayed on the link in the output graph
links.append({'source':source,'target':target,'distance':20,'group':4,
'attack':attack_split[2]+attack_split[1] if i==0 else ''})
# assigning the targeted node which will be highlighted in the graph
for i in final['vertices']:
if i['name'] == attack_split[0].strip():
i['attacked']=True
else:
i['attacked']=False
final['edges']=edges
final['links']=links
return final
def main(self,data):
g = Graph()
# create space vertices
for s in data['Spaces']:
g.add_vertex(SpaceNode(s))
# add space connections
for pc in data['PhysicalConnections']:
g.add_edge(pc['sources'],pc['targets'],pc)
# create device vertices
for d in data['Devices']:
g.add_vertex(DeviceNode(d))
# add device connections
for cc in data['CyberConnections']:
for target in cc['targets']:
g.add_directed_edge(cc['sources'],target)
# add edges between devices and spaces by checking where they are placed
for d in data['Devices']:
g.add_directed_edge(d['placement'],d['deviceName'])
# reads the excel file in the current folder
df_attack = pd.read_excel('attack_models.xls',sheet_name='Sheet2')
# splits multiple values and converts it into a list
df_attack['Communication protocol'] = df_attack['Communication protocol'].map(lambda x: x.split(','))
df_attack['Smart home solution'] = df_attack['Smart home solution'].map(lambda x: x.split(','))
# not taking outside node into consideration
vertices = [v for v in g if v.data.name!='outside']
candidate_paths = []
# for every combination of start and end nodes generate path.
# TODO: logic can be improved
for v in vertices:
for u in vertices:
temp = self.generatePaths(u,v)
if temp: candidate_paths.extend(temp)
return {'attacks':self.calculate1(df_attack,candidate_paths,g)}
|
[
"rahulagarwal@Rahuls-MacBook-Pro.local"
] |
rahulagarwal@Rahuls-MacBook-Pro.local
|
ed2d87291941590e4acfaefb1d07eb4fb4fbdf83
|
b80fbb7238b91858932169e1595b7fe04015a9a5
|
/benchmarking_rule_check_optimized.py
|
8a247512d68b221d7e2f8a51f05f0c3469b15fbd
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jake-billings/research-graphs
|
a65e7e611b8af7eb92543acb0e1d44938039c483
|
fcb5007bebf027d2da2fb29b2b9d37b17ae42889
|
refs/heads/master
| 2021-01-21T08:51:49.179790
| 2018-01-12T05:38:52
| 2018-01-12T05:38:52
| 91,642,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 963
|
py
|
import networkx as nx
from time import time
import graph
if __name__ == "__main__":
G = graph.treex(3, 8)
G.add_edge(2, 7)
print "Testing network of size", len(nx.to_numpy_matrix(G))
start_optimized = time()
follows_rules_optimized = graph.does_follow_rules_optimized(nx.to_numpy_matrix(G))
end_optimized = time()
start_normal = time()
follows_rules = graph.does_follow_rules(nx.to_numpy_matrix(G))
end_normal = time()
optimized_duration = end_optimized - start_optimized
normal_duration = end_normal - start_normal
if follows_rules != follows_rules_optimized:
print "The functions disagree. This is a huge problem."
print "Follows rules: ", follows_rules
print "Time optimized: ", optimized_duration, "seconds"
print "Time slow: ", normal_duration, "seconds"
print "The optimized algorithm ran in ", optimized_duration/normal_duration*100, "% of the time as the normal algorithm."
|
[
"jake@jakebillings.com"
] |
jake@jakebillings.com
|
ab49b4cc29be7c1856e89b14aeaf2bd67a324f7c
|
99d80e6403488291e2f15eb989b6815a16bf9366
|
/pyfolio1.py
|
6c24a80bc5eedff886a578c6e081854be07effd0
|
[] |
no_license
|
rorro128/finanzas
|
72f975f0d6bdac5c109aa5d27f1c8fb708c673e3
|
0a1e8065988858016a4199e3120cb83ef9c86383
|
refs/heads/main
| 2023-06-30T09:09:50.187407
| 2021-08-02T23:39:11
| 2021-08-02T23:39:11
| 392,125,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
import pyfolio as pf
import warnings
warnings.filterwarnings('ignore')
stock_rets = pf.utils.get_symbol_rets('AMZN')
pf.create_returns_tear_sheet(stock_rets, live_start_date='2015-12-1')
|
[
"noreply@github.com"
] |
rorro128.noreply@github.com
|
ffd55c1a5a6990efbf68b16e0f88165b675f4f9d
|
523522ed5b9573dd495dad23e91b630c18e5eb08
|
/simpleUnixServer/compile.py
|
4a7865b0edf5e6e8b9b7b09675f2e992e1be6d35
|
[] |
no_license
|
leonlazuli/tempWorkShop
|
a72cb989a029f2d6d1cebfed598133014f5f9241
|
9476ed6c0a7801b4e7ec689ff9b95146371f4b25
|
refs/heads/master
| 2021-01-10T08:38:04.223461
| 2016-01-07T01:26:48
| 2016-01-07T01:26:48
| 48,371,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
import subprocess
import sys
command1 = "gcc -o main webServer.c;"
command2 = " gcc -o client webClient.c rio.h;"
command3 = " gcc -o cgi-bin/adder cgi-bin/adder.c rio.h;";
command = "";
argv= sys.argv;
print len(argv);
if(len(argv) == 1):
command = command1 + command2 + command3;
else:
options = argv[1];
if "s" in options:
command += command1;
if "c" in options:
command += command2;
if "a" in options:
command += command3;
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True)
(out, err) = proc.communicate()
print "program output:", out
print "program error:", err
|
[
"leonlazuli@gmail.com"
] |
leonlazuli@gmail.com
|
1d492964a7f09669838f135f72666c0f70abb33b
|
5b810b5b59eb66e564b84d4627a81afb72bb769c
|
/app/util/myid.py
|
9b1d49f940b1dc3740c5f691382d3cc62374eaf6
|
[] |
no_license
|
wangyitao/microfilm
|
402d6c61aa8e4f41749829c96a742487c88d9f75
|
3cbee7b599483b117283359de9e7bec9d945ff53
|
refs/heads/master
| 2020-03-23T04:19:19.797404
| 2018-08-02T01:38:14
| 2018-08-02T01:38:14
| 141,076,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 640
|
py
|
# -*- coding: utf-8 -*-
# @Author : FELIX
# @Date : 2018/6/30 13:48
def my_id_maker():
import uuid
from hashlib import md5
import datetime
import random
return md5(str('{0:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now()) + ''.join(
[str(random.randint(1, 10)) for i in range(5)])).encode('utf8') + str(uuid.uuid1()).encode('utf8')).hexdigest()
# ็ฎๅ็ๆฌ็ไธ้ขๅฐฑ่กไบ
# return '{0:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now()) + ''.join(
# [str(random.randint(1, 10)) for i in range(5)])
if __name__ == '__main__':
for i in range(10):
print(len(my_id_maker()))
|
[
"1403179190@qq.com"
] |
1403179190@qq.com
|
fb48644ac909941918fc799f5736d3cf518d0fb4
|
c7132023d6caa10c879ea9345109f95c7e1c70a8
|
/q1.py
|
998b3e0e2b6d1b38419c8c5fce4bb73a799ce828
|
[] |
no_license
|
paulremerata/Excercises
|
ac9b74fe5474a56aab428c45b55fcfc9d397a529
|
e23ee8587f6b85086066e88fb7e38b340c198d0c
|
refs/heads/master
| 2016-09-13T07:08:32.119078
| 2016-04-22T07:38:24
| 2016-04-22T07:38:24
| 56,668,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
def convert():
x=raw_input("Convert to Fahrenheit or Celsius (f/c)?\n")
y=raw_input("Enter Value:")
if x=="f":
print str(9*float(y)/5+32)+" F"
elif x=="c":
print str((float(y)-32)*5/9)+" C"
else:
print "wrong input"
again(raw_input("Again?(y/n)"))
def again(x):
if x=="y":
convert()
elif x=="n":
1+1
else:
again(raw_input("Again?(y/n)"))
convert()
|
[
"paul26rem@gmail.com"
] |
paul26rem@gmail.com
|
b581d3246abc1d89d94a521412e4a8b80d71ce70
|
0363ca4526c72fafd96daa2bd8cbb38db68cc3d3
|
/model.py
|
604bb0b7137c365f2f269db6d7d9cd8ec4973e99
|
[] |
no_license
|
Gabrielnero000/Rotation-Detection
|
6ea79d491d61b825113a07b60b3c2e97974250ec
|
25475fce86e79f6992cdd492b381ca4eddba8bad
|
refs/heads/master
| 2020-08-09T04:01:37.270778
| 2019-10-09T18:35:14
| 2019-10-09T18:35:14
| 213,991,896
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 983
|
py
|
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense
# Return the CNN model
def GetModel(input_shape, num_classes):
model = Sequential()
model.add(Conv2D(16, (3, 3), padding='same',
input_shape=input_shape, activation='relu'))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.35))
model.add(Dense(num_classes, activation='softmax'))
return model
|
[
"gabriel.leite083@gmail.com"
] |
gabriel.leite083@gmail.com
|
50f306e030bc29b5fe1d794458ce400433b8e462
|
a99bf3c086622348e7363e974ac7bac6a6e00f15
|
/manage.py
|
ad9442e82125ccd22e0b395f6de4882582b1fbbc
|
[] |
no_license
|
shubhamd/Dj-API
|
96e14a5d8f0dea600a3ded989c09c69ab1f2d57c
|
c739d3d85ae5b14891793be13213e24937ba2f16
|
refs/heads/master
| 2021-01-16T00:28:41.043023
| 2015-02-14T15:19:43
| 2015-02-14T15:19:43
| 30,799,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kisan_net.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"shubham.desale@gmail.com"
] |
shubham.desale@gmail.com
|
0b9e96092d3e1008a7f05cc7c7bfc659ab670f67
|
65909167510b7c479cd81db74a0f2bbc9d57e9fd
|
/webGuard/urls.py
|
d649847aeb99553dced843049be5d7c4be57d6cf
|
[] |
no_license
|
Njokosi/webguard-django
|
99c4a2ba814d44d816527706685bf34bbf593870
|
f1ca3ebaa093a22577303b78d59531133ed9b01a
|
refs/heads/master
| 2023-04-14T21:27:37.062158
| 2021-04-17T09:54:26
| 2021-04-17T09:54:26
| 358,842,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 997
|
py
|
"""webGuard URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('reports/', include('reports.urls')),
path('', include('account.urls')),
]
admin.site.site_header = "WEB GUARD Admin"
admin.site.site_title = "WEB GUARD Admin Portal"
admin.site.index_title = "WEB GUARD Administration Portal"
|
[
"eng.njokosi@gmail.com"
] |
eng.njokosi@gmail.com
|
03ca7feebe8e5d697766b33c66fa96f2c7d8f4c3
|
fc206761fdbc29be053f7721492bb715b03143bf
|
/Lab/DataVisualization/MedDataColumnTransforms.py
|
ff1ec847835d1cb8cad99a39b00226867ef9a5ae
|
[] |
no_license
|
zeydabadi/PythonLab
|
ec0f67f7a694ea5c2e25f9b2df21c95e95ccbcf4
|
4ce6e8e827fbcc00f040bfc1133a3272e5147aa6
|
refs/heads/master
| 2020-03-28T17:33:24.486283
| 2018-09-19T22:07:19
| 2018-09-19T22:07:19
| 148,800,976
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,865
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 5 11:28:40 2018
@author: Nita
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Read the data in and once read in, we can:
# run data transformations on columns
# filter rows by conditions
# sort data etc
medData=pd.read_csv('./csvfiles/processed.cleveland.data.csv')
medDataDf = pd.DataFrame(medData)
print(medDataDf)
# From the dataset, transform a column and save it as a new column in the
# dataframe: Ex. add a new column identifying rows with RestBP >120
medDataDf['HighRestBP']=medDataDf['RestBP'].apply(lambda x: x > 120)
# add a new column identifying rows with High or normal FBS
medDataDf['HighFBS']=medDataDf['FBS'].apply(lambda x: x == 1)
# Filter rows using conditions. This can be done by creating the condition
# and generating a boolean array variable for each condition
# EX. the following 3 lines creates a boolean array, with each row having
# a True saved if the condition is met or False if it isnt't.
# The length of such a boolean array is equal to the total number of rows
# in the dataframe
edf=medDataDf['RestECG'] == 2
hrBp=medDataDf['RestBP'] > 120
hFbs=medDataDf['FBS'] == 1
# Once the boolean arrays are created, they can be used singly to filter
# rows of combined with operators like 'And' 'Or' etc
print(medDataDf[edf & hrBp & hFbs].count())
# Column wise calculation of stats using functions like mean(), max() etc
#calculate mean of all values in a column
print("Mean of Column RestBP:", medDataDf['RestBP'].mean())
#calculate max of all values in a column
print("Max value of Column RestBP:", medDataDf['RestBP'].max())
#calculate min of all values in a column
print("Min value of Column RestBP:", medDataDf['RestBP'].min())
#calculate median of all values in a column
print("Median of Column RestBP:", medDataDf['Age'].median())
|
[
"zeydabadi@gmail.com"
] |
zeydabadi@gmail.com
|
e09c1b5770850fd83798f6c08f48354e1b74a5ca
|
391ba6a63ae180a9dd837489dd195ed3fd8fb233
|
/OOP/Tebak Kandang.py
|
653f64f0699bd1c04263de097e644624d6e7a6e5
|
[] |
no_license
|
hamdiranu/cobarepo
|
90502f5defb3b6c3a0aad6f0592ebd62b39d229d
|
e569df6b27f9d9f375889c3cf56b527d0affbcd3
|
refs/heads/master
| 2020-11-27T04:43:26.288102
| 2019-12-20T17:30:18
| 2019-12-20T17:30:18
| 229,307,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,614
|
py
|
import os, sys, random
if sys.platform.lower() == "win32":
os.system('color')
# Group of Different functions for different styles
class style():
RED = lambda x: '\033[31m' + str(x)
YELLOW = lambda x: '\033[33m' + str(x)
BLUE = lambda x: '\033[34m' + str(x)
WHITE = lambda x: '\033[37m' + str(x)
GREEN = lambda x: '\033[32m' + str(x)
RESET = lambda x: '\033[0m' + str(x)
# print(style.YELLOW("Hello, ") + style.RESET("World!"))
class Kandang():
def __init__(self, angka, huruf, status):
self.angka = angka
self.huruf = huruf
self.status = status
def tampilan_huruf_angka(self):
if self.status == 'Buka':
print('\n| | |\n| {} '.format(self.huruf)+style.RESET('|\n| | |\n')) # dibutuhkan 'reset' agar str stelah self.huruf tidak bewarna
else :
print('\n| | |\n| {} |\n| | |\n'.format(self.angka))
def ganti_status_buka(self):
self.status = 'Buka'
def huruf_pada_kandang(self):
return self.huruf
class Kambing(Kandang):
def __init__(self, angka):
super().__init__(angka, style.BLUE('K'), 'Tutup')
class Zebra(Kandang):
def __init__(self, angka):
super().__init__(angka, style.RED('Z'), 'Tutup')
class Bebek(Kandang):
def __init__(self, angka):
super().__init__(angka, style.YELLOW('B'), 'Tutup')
# Sistem Game
class Board:
hasil = 0 # berguna agar untuk mengetahui kapan permainan harus berhenti nantinya
def __init__(self, hasil):
self.hasil = hasil
# =========================== Opening ===========================
def run(self):
os.system('clear')
self.hasil=0
print('\n=====================================\n'+'||'+' '*10+style.BLUE('Tebak Kandang')+' '*10+style.RESET('||\n=====================================\n'))
print('1: Jumlah Kandang \n99: Exit\n')
input1=int(input('Pilih Menu: '))
if input1==1:
self.kandang()
elif input1==99:
print('\n Terima Kasih telah menggunakan program kami :)\n')
else:
print('\nMohon masukkan pilihan yang sesuai')
self.run()
# ====================== Masukkan Jumlah Kandang ==========================
def kandang(self):
input2=int(input('Masukkan jumlah kandang: '))
jmlh_kandang=input2
self.buat_kandang(jmlh_kandang)
# ========================= Buat Kandang ==============================
def buat_kandang(self,jmlh_kandang):
isi_kandang = []
for i in range(0,jmlh_kandang):
index_list_huruf=random.choice(['K','Z','B'])
if index_list_huruf == 'K':
isi_kandang.append(Kambing(str(i+1)))
elif index_list_huruf == 'Z':
isi_kandang.append(Zebra(str(i+1)))
elif index_list_huruf == 'B':
isi_kandang.append(Bebek(str(i+1)))
self.tampilan_kandang(jmlh_kandang, isi_kandang)
# ========================= Tampilan Kandang =============================
def tampilan_kandang(self, jmlh_kandang, isi_kandang):
for i in (isi_kandang):
i.tampilan_huruf_angka()
self.pilihan_kandang(jmlh_kandang, isi_kandang)
# ====================== Pilihan kotak tebakan ===========================
def pilihan_kandang(self,jmlh_kandang,isi_kandang):
pilihan_kotak = 0
cek_input=[]
input3 = input('Pilih kandang yang ingin dibuka: ')
for i in range(1,jmlh_kandang+1):
cek_input.append(str(i))
if input3 in cek_input:
pilihan_kotak = int(input3)
self.tebakan_user(jmlh_kandang, isi_kandang, pilihan_kotak)
else:
print('\nMohon masukkan input yang sesuai\n')
self.pilihan_kandang(jmlh_kandang,isi_kandang)
# ========================== Tebakan User ================================
def tebakan_user(self,jmlh_kandang, isi_kandang, pilihan_kotak):
tebakan = ''
print('---------Pilihan---------\n'+style.BLUE('K')+style.RESET(': Kambing'))
print(style.RED('Z')+style.RESET(': Zebra'))
print(style.YELLOW('B')+style.RESET(': Bebek\n'))
input4 = str(input('Masukkan tebakan: '))
if input4 in ['K','Z','B','k','z','b']:
tebakan = input4
self.proses_cek(jmlh_kandang, isi_kandang, pilihan_kotak, tebakan)
else:
print('\nMohon masukkan input yang sesuai\n')
self.tebakan_user(jmlh_kandang, isi_kandang, pilihan_kotak)
# ============================ Proses ==================================
def proses_cek(self,jmlh_kandang, isi_kandang, pilihan_kotak, tebakan):
os.system('clear')
print('PERCOBAAN BUKA:\n')
if style.BLUE(tebakan.upper()) == isi_kandang[pilihan_kotak-1].huruf_pada_kandang() or style.YELLOW(tebakan.upper()) == isi_kandang[pilihan_kotak-1].huruf_pada_kandang() or style.RED(tebakan.upper()) == isi_kandang[pilihan_kotak-1].huruf_pada_kandang():
isi_kandang[pilihan_kotak-1].ganti_status_buka()
self.hasil = self.hasil + 1
for i in (isi_kandang):
i.tampilan_huruf_angka()
print(style.GREEN("Tebakan Benar!")+style.RESET(''))
else :
for i in (isi_kandang):
i.tampilan_huruf_angka()
print(style.YELLOW("Tebakan Salah!")+style.RESET(''))
self.cek_hasil(jmlh_kandang, isi_kandang, pilihan_kotak, tebakan)
# ================ Pengecekan apakah semua kandang telah dibuka ==============
def cek_hasil(self,jmlh_kandang, isi_kandang, pilihan_kotak, tebakan):
if self.hasil==len(isi_kandang):
for i in (isi_kandang):
i.tampilan_huruf_angka()
print('Selamat! anda menebak semua kandang\n')
self.Bye()
else:
self.tampilan_kandang(jmlh_kandang, isi_kandang)
# ================================= OUT ====================================
def Bye(self):
input5=input('Apakah anda ingin mengulangi permainan? (Y/N) :')
if input5 == "Y" or input5 == "y":
self.run()
elif input5 == "N" or input5 == "n":
print('\n+-+-+-+-+-+-+-+-+-Terima Kasih telah menggunakan program kami :)+-+-+-+-+-+-+-+-+-+\n| '+' '*80+'|\n+-+-+-+-+-+-+-+-+-+-+-+-+ Created By : Hamdi R. +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n')
else:
print('\nMohon masukkan pilihan yang sesuai\n')
self.Bye()
Board_object=Board(0)
Board_object.run()
|
[
"hamdi@alterra.id"
] |
hamdi@alterra.id
|
46255fb3ef72a146254c24529e25171cabf29bf8
|
59c2f193f5a05bc669dbc750642e820439581bc1
|
/01-Jose-Salvatierra-Learn-Python-By-Doing/01-Basic-Data-Types/05-booleans.py
|
d3a0934f413d98db55b69f6f61355d611d3ae0a3
|
[] |
no_license
|
dhanushka-gayashan/python-mastering
|
046ceba57bb3e5b5494c7f50f071df15f95d8b9d
|
63aa64fe99ded212e6617be35e028f1936f7531b
|
refs/heads/master
| 2022-04-25T21:45:12.150691
| 2020-04-27T08:48:54
| 2020-04-27T08:48:54
| 255,232,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 865
|
py
|
# Truthy and Falsy
truthy = True
falsy = False
# Boolena Comparison
age = 20
is_over_age = age >= 18
is_under_age = age < 18
is_twenty = age == 20
# AND
age = 30
can_learn_programming = age > 0 and age < 100
# OR
age = 30
usually_working = age >= 18 or age <= 65
# bool() - Convert into boolean
print(bool(35))
print(bool(0))
print(bool("Dhanushka"))
print(bool(""))
# Usage AND and OR
x = 35 and 0
print(x)
x = 0 and 35
print(x)
x = True and 18
print(x)
x = 35 or 0
print(x)
x = 0 or 35
print(x)
x = False or 35
print(x)
age = 16
side_job = True
print(age > 18 and age < 65 or side_job) # Eveluate Left to Right
name = input("Enter your name: ")
surname = input("Enter your suername: ")
greeting = name or f"Mr. {surname}"
print(greeting)
# Use of NOT
x = not False
print(x)
x = not True
print(x)
x = not 35
print(x)
x = not 0
print(x)
|
[
"dhanukdg.soft@gmail.com"
] |
dhanukdg.soft@gmail.com
|
1aa314ea7fea174b2a659cde9f3fa638e36af50d
|
ec5f0341e53733d1142bb2826aeb98e8a73fb4ff
|
/Unit7/same_structure.py
|
66abf89246a146d4cd06970ea3463d8741e0ae7b
|
[] |
no_license
|
sandrabee/Udacity_CS101
|
0ecea7dc0f97ff489437ca95a74793098313b8e3
|
83a16a9465d8cad8407563f3f745dde34190d05f
|
refs/heads/master
| 2020-12-24T13:53:09.026621
| 2012-04-12T21:27:29
| 2012-04-12T21:27:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,260
|
py
|
#Same Structure
#Define a procedure, same_structure, that takes two inputs. It should output
#True if the lists contain the same elements in the same structure, and False
#otherwise. Two values, p and q have the same structure if:
# Neither p or q is a list.
# Both p and q are lists, they have the same number of elements, and each
# element of p has the same structure as the corresponding element of q.
#For this procedure, you can use the is_list(p) procedure from Homework 6:
def is_list(p):
return isinstance(p, list)
def deep_count(p):
leng = len(p)
for elem in p:
if is_list(elem):
leng = deep_count(elem)+leng
return leng
def same_structure(a,b):
if is_list(a) and is_list(b) and len(a) == len(b) and deep_count(a) == deep_count(b):
return True
elif not is_list(a) and not is_list(b):
return True
return False
#Here are some examples:
#print same_structure(3, 7)
#>>> True
#print same_structure([1, 0, 1], [2, 1, 2])
#>>> True
#print same_structure([1, [0], 1], [2, 5, 3])
#>>> False
#print same_structure([1, [2, [3, [4, 5]]]], ['a', ['b', ['c', ['d', 'e']]]])
#>>> True
#print same_structure([1, [2, [3, [4, 5]]]], ['a', ['b', ['c', ['de']]]])
#>>> False
|
[
"barteitsandra@gmail.com"
] |
barteitsandra@gmail.com
|
8c6cf3faa76d0ff8db9c4587da99976def0570f9
|
b191c19a9fe9c5c7318b811336ef176ff551da48
|
/CSPP1-Practice/CSPP1-Assignments/M7/Eval Quadratic Exercise/eval_quadratic.py
|
85e74343fe924fecece839e42260fd2ce8fdaeee
|
[] |
no_license
|
Pranav-20186017/CSPP1
|
0d4423d0b8a6028f07a64bc90383bb322538932f
|
b8e61256d9c4950fedebf57de45cdc9fec3bb302
|
refs/heads/master
| 2020-03-24T17:40:21.486906
| 2018-10-01T03:00:17
| 2018-10-01T03:00:17
| 142,867,060
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
# Exercise: eval quadratic
# Write a Python function, evalQuadratic(a, b, c, x),
#that returns the value of the quadratic a . x 2 + b . x + c
# This function takes in four numbers and returns a single number.
'''
Author: Pranav Surampudi
Date: 6 August 2018
Encoding: utf-8
'''
def eval_quadratic(a_coeff, b_coeff, c_coeff, x_var):
"""This method evaluates a quadratic equation"""
return int((a_coeff * (x_var ** 2)) + (b_coeff * x_var) + c_coeff)
def main():
"""Main function"""
data = input()
data = data.split(' ')
data = list(map(float, data))
for x_var in data:
temp = str(x_var).split('.')
if temp[1] == '0':
x_var = int(float(str(x_var)))
else:
x_var = x_var
print(eval_quadratic(data[0], data[1], data[2], data[3]))
if __name__ == "__main__":
main()
|
[
"sharmapranav307@msitprogram.net"
] |
sharmapranav307@msitprogram.net
|
e8bfab5581f0fd75f7a96a09361f6b8edfcf21f3
|
435079f92604590710f7149fecf886563529bc96
|
/config/settings/local.py
|
af3b51d5cfd419e42c164590fb61212e17ae6a7a
|
[
"MIT"
] |
permissive
|
PEKTOP/metrics-api
|
f93b04965af877ddb4f1de01dcecf06d50a339c2
|
025b955bd86ef5df0261788abde3a0358d64f8f5
|
refs/heads/master
| 2020-09-08T18:09:52.211724
| 2017-06-15T14:03:40
| 2017-06-15T14:03:40
| 94,438,471
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
py
|
from .base import * # noqa
ALLOWED_HOSTS += ['127.0.0.1'] # noqa
INTERNAL_IPS = ALLOWED_HOSTS
DEBUG_TOOLBAR_CONFIG = {
'SHOW_COLLAPSED': True
}
INSTALLED_APPS += [ # noqa
'debug_toolbar',
]
MIDDLEWARE += [ # noqa
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
|
[
"mail2nikolas@gmail.com"
] |
mail2nikolas@gmail.com
|
dda9dd5bcabf859f268643acce6bb426918e144c
|
ef97cd06e991b87bda612c5b7f19f30fa6e083e9
|
/scripts/servicenode_test.py
|
96945e99efe399e5eb5f0fa50a7cc3f25c7caa7a
|
[] |
no_license
|
jdios89/Ragnar_teensy
|
b5ba72c3cfd68763dd113c163ed7d4ef786125d2
|
4eb44159ddcb154a72dd5b48e1d7d687821e2834
|
refs/heads/master
| 2023-08-18T09:10:23.379972
| 2023-08-07T17:37:19
| 2023-08-07T17:37:19
| 152,589,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 736
|
py
|
#!/usr/bin/env python
import sys
import rospy
from std_msgs.msg import String
from ragnar_teensy.srv import *
from std_msgs.msg import Float64MultiArray
servicetotest = 'set_stiffness_matrix'
def set_matrix():
rospy.wait_for_service(servicetotest)
try:
servarg = Float64MultiArray()
data = [None]*9
for x in range(9):
data[x] = x * 0.1
servarg.data = data
print(data)
servicce = rospy.ServiceProxy(servicetotest, threeby3Matrix)
servicce(servarg)
return
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def usage():
return
if __name__ == "__main__":
print "Requesting "
set_matrix()
|
[
"juan.dios.flores@gmail.com"
] |
juan.dios.flores@gmail.com
|
d32de1d1ac15ec06e6ba5df921548fd598644253
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_278/ch25_2020_03_23_20_30_02_598987.py
|
a64e003c1ccb846a55e1d315202af707e65238c0
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
import math
a= รขngulo desejado
d= distรขncia
d=(v**2)*math.sin(2*a)/9.8
if d<98:
return "Muito perto"
elif 98<=d<=102:
return "Muito longe"
else:
return "Acertou!"
|
[
"you@example.com"
] |
you@example.com
|
39f52eaa9c5a4cad6f26a0e003f857b598337245
|
d60ec19607f02d885fc9a50347fdf527e99fba50
|
/Face_Recog_and_Detection_files/Face_Recog/pi-facerec-box-master/facedetpicamera.py
|
b77c2d3365d7ef93b01abba44dcfdf85dbcb953c
|
[
"MIT"
] |
permissive
|
RBurke314/test1
|
944359177affbf5331e80eccfc527c8f7693f022
|
3ef05ea9ed93aafbe5d1f39d1cea1d02aa40c5a9
|
refs/heads/master
| 2021-04-15T17:19:08.761329
| 2017-06-28T20:31:05
| 2017-06-28T20:31:05
| 95,709,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,874
|
py
|
# import the necessary packages
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
FRAME_W=400
FRAME_H=240
x1=0
y1=0
z1=0
Default_X=float(80)
Default_Y=float(48)
Default_Z=float(75)
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (400, 240)#faster processing set to 160, 120
camera.framerate = 40 #max frame rate of 90 frames per second
rawCapture = PiRGBArray(camera, size=(400, 240))#faster processing set to 160, 120
# allow the camera to warmup
time.sleep(0.1)
cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt2.xml")
while True:
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
img = frame.array
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face=0
faces = cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, width, height) in faces:
face=1
cv2.rectangle(img, (x,y), (x+width, y+height), (0, 255,0), 2)
#get the center position of face
x1 = ((x+width)/2)
y1 = ((y+height)/2)
z1 = ((width+height)/2)
if face == 1:
Position_x = float(((FRAME_W/2)-x1))
Position_y = float((FRAME_H/2)-y1)
Position_z = float(z1)
else:
Position_x = (Default_X)
Position_y = (Default_Y)
Position_z = (Default_Z)
cv2.putText(img,'cam_pan='+ str(Position_x),(1,50), cv2.FONT_ITALIC, 0.5,(0,0,255))
cv2.putText(img,'cam_tilt='+ str(Position_y),(1,100), cv2.FONT_ITALIC, 0.5,(0,0,255))
cv2.putText(img,'cam_focus='+ str(Position_z),(1,150), cv2.FONT_ITALIC, 0.5,(0,0,255))
# show the frame
print('cam_pan='+ str(Position_x))
print('cam_tilt='+ str(Position_y))
print('cam_focus='+ str(Position_z))
print "Found {0} faces!".format(len(faces))
print"Found: %d" % (face)
cv2.imshow("Frame", img)
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
|
[
"robertjburke3.14@gmail.com"
] |
robertjburke3.14@gmail.com
|
c00c44c24fe7e1137c0426f1fa5eea0742de0ffe
|
f28bbb08c931770f2c2cf6fa4cecbf18605881de
|
/Criptografia Classica/ataque/escuro/vigenere_forca_bruta.py
|
a3dd808641228595930ad5a3f325015d7e05b5b8
|
[] |
no_license
|
elitont/Seguranca-e-Auditoria-de-Sistemas
|
002989b3aedc590e7b21f63be3674c6db91614e2
|
1101eaa2d678c3d121a90f7f028eeb8217e43e51
|
refs/heads/master
| 2021-01-20T19:33:43.239743
| 2017-02-23T13:24:25
| 2017-02-23T13:24:25
| 65,201,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
#!/opt/plone/zinstance/bin/python2.7
# -*- coding: utf-8 -*-
from itertools import cycle
from sys import argv
import itertools
"""
Ataque por forรงa bruta, sem texto em claro
"""
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
data = open(argv[1], 'rb').read()
words_english = open('data/words_example.in', 'rb').read()
words_english = words_english.split()
chaves_invalidas = []
for i in range(1, 4):
r = itertools.product(chars, repeat=i)
for i in r:
key = [-ord(j) for j in i]
msg_decifrada = bytes((c + m) % 256 for (c, m) in zip(cycle(key), data))
all_word = msg_decifrada.split()
words = []
for word in all_word[0:30]:
if word in words_english:
words.append(word)
if(len(words) > 5):
open('data/outputs/vigenere_decifrado.txt', 'wb').write(msg_decifrada)
|
[
"eliton.traverssini@gmail.com"
] |
eliton.traverssini@gmail.com
|
585c781e287285340d839ba8d718541fa9dc84fd
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-smn/huaweicloudsdksmn/v2/model/application_endpoint.py
|
562ef992a276405881049913173794a8aac0d433
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 6,649
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ApplicationEndpoint:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'create_time': 'str',
'endpoint_urn': 'str',
'user_data': 'str',
'enabled': 'str',
'token': 'str'
}
attribute_map = {
'create_time': 'create_time',
'endpoint_urn': 'endpoint_urn',
'user_data': 'user_data',
'enabled': 'enabled',
'token': 'token'
}
def __init__(self, create_time=None, endpoint_urn=None, user_data=None, enabled=None, token=None):
"""ApplicationEndpoint
The model defined in huaweicloud sdk
:param create_time: ๅๅปบapplication็ๆถ้ด ๆถ้ดๆ ผๅผไธบUTCๆถ้ด๏ผYYYY-MM-DDTHH:MM:SSZใ
:type create_time: str
:param endpoint_urn: Application endpoint็ๅฏไธ่ตๆบๆ ่ฏใ
:type endpoint_urn: str
:param user_data: ็จๆท่ชๅฎไนๆฐๆฎ ๆๅคง้ฟๅบฆๆฏๆUTF-8็ผ็ ๅ2048ๅญ่ใ
:type user_data: str
:param enabled: endpointๅฏ็จๅผๅ
ณ trueๆfalseๅญ็ฌฆไธฒใ
:type enabled: str
:param token: ่ฎพๅคtoken ๆๅคง้ฟๅบฆ512ไธชๅญ่ใ
:type token: str
"""
self._create_time = None
self._endpoint_urn = None
self._user_data = None
self._enabled = None
self._token = None
self.discriminator = None
self.create_time = create_time
self.endpoint_urn = endpoint_urn
self.user_data = user_data
self.enabled = enabled
self.token = token
@property
def create_time(self):
"""Gets the create_time of this ApplicationEndpoint.
ๅๅปบapplication็ๆถ้ด ๆถ้ดๆ ผๅผไธบUTCๆถ้ด๏ผYYYY-MM-DDTHH:MM:SSZใ
:return: The create_time of this ApplicationEndpoint.
:rtype: str
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this ApplicationEndpoint.
ๅๅปบapplication็ๆถ้ด ๆถ้ดๆ ผๅผไธบUTCๆถ้ด๏ผYYYY-MM-DDTHH:MM:SSZใ
:param create_time: The create_time of this ApplicationEndpoint.
:type create_time: str
"""
self._create_time = create_time
@property
def endpoint_urn(self):
"""Gets the endpoint_urn of this ApplicationEndpoint.
Application endpoint็ๅฏไธ่ตๆบๆ ่ฏใ
:return: The endpoint_urn of this ApplicationEndpoint.
:rtype: str
"""
return self._endpoint_urn
@endpoint_urn.setter
def endpoint_urn(self, endpoint_urn):
"""Sets the endpoint_urn of this ApplicationEndpoint.
Application endpoint็ๅฏไธ่ตๆบๆ ่ฏใ
:param endpoint_urn: The endpoint_urn of this ApplicationEndpoint.
:type endpoint_urn: str
"""
self._endpoint_urn = endpoint_urn
@property
def user_data(self):
"""Gets the user_data of this ApplicationEndpoint.
็จๆท่ชๅฎไนๆฐๆฎ ๆๅคง้ฟๅบฆๆฏๆUTF-8็ผ็ ๅ2048ๅญ่ใ
:return: The user_data of this ApplicationEndpoint.
:rtype: str
"""
return self._user_data
@user_data.setter
def user_data(self, user_data):
"""Sets the user_data of this ApplicationEndpoint.
็จๆท่ชๅฎไนๆฐๆฎ ๆๅคง้ฟๅบฆๆฏๆUTF-8็ผ็ ๅ2048ๅญ่ใ
:param user_data: The user_data of this ApplicationEndpoint.
:type user_data: str
"""
self._user_data = user_data
@property
def enabled(self):
"""Gets the enabled of this ApplicationEndpoint.
endpointๅฏ็จๅผๅ
ณ trueๆfalseๅญ็ฌฆไธฒใ
:return: The enabled of this ApplicationEndpoint.
:rtype: str
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this ApplicationEndpoint.
endpointๅฏ็จๅผๅ
ณ trueๆfalseๅญ็ฌฆไธฒใ
:param enabled: The enabled of this ApplicationEndpoint.
:type enabled: str
"""
self._enabled = enabled
@property
def token(self):
"""Gets the token of this ApplicationEndpoint.
่ฎพๅคtoken ๆๅคง้ฟๅบฆ512ไธชๅญ่ใ
:return: The token of this ApplicationEndpoint.
:rtype: str
"""
return self._token
@token.setter
def token(self, token):
"""Sets the token of this ApplicationEndpoint.
่ฎพๅคtoken ๆๅคง้ฟๅบฆ512ไธชๅญ่ใ
:param token: The token of this ApplicationEndpoint.
:type token: str
"""
self._token = token
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApplicationEndpoint):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
2799baf75b490c89a862ad9a728c49b9539d73cc
|
49ddd4e169fd9b271427f34cd216cbc9b001ded6
|
/dist_search/apps.py
|
fb06022ee796b90866c0e51a018a6c6d8846047e
|
[
"MIT"
] |
permissive
|
pontusgranath/svt-apl
|
9dac25ed7d029b946c6b3a91fa9959822a240dd5
|
fea759e7629af3e92a03849ad06d82c77e57328e
|
refs/heads/main
| 2023-04-27T21:01:28.451804
| 2021-05-07T12:35:15
| 2021-05-07T12:35:15
| 347,094,738
| 2
| 0
| null | 2021-05-07T09:02:36
| 2021-03-12T14:32:16
|
Python
|
UTF-8
|
Python
| false
| false
| 96
|
py
|
from django.apps import AppConfig
class DistSearchConfig(AppConfig):
name = 'dist_search'
|
[
"felix.larsson@elev.ga.ntig.se"
] |
felix.larsson@elev.ga.ntig.se
|
2e7616731d2c59f2ebf041b4b3ca45a459628f98
|
3af9b492ec584c561cf8e6d7921e508ed1f84dec
|
/ubuntu/provisioners/mywiki.py
|
2926c352154942636c897f755b7fb2ffe8b37583
|
[] |
no_license
|
AAROC/VMI-endorsement
|
85671ffd45cf90dd32f70c3264b0a31b258cf3b5
|
d6e3c1affab352d8fbebafd7760ab1b47ddeef2a
|
refs/heads/master
| 2021-06-21T11:28:59.199245
| 2017-08-02T10:34:14
| 2017-08-02T10:34:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,352
|
py
|
# -*- coding: iso-8859-1 -*-
# IMPORTANT! This encoding (charset) setting MUST be correct! If you live in a
# western country and you don't know that you use utf-8, you probably want to
# use iso-8859-1 (or some other iso charset). If you use utf-8 (a Unicode
# encoding) you MUST use: coding: utf-8
# That setting must match the encoding your editor uses when you modify the
# settings below. If it does not, special non-ASCII chars will be wrong.
"""
This is a sample config for a wiki that is part of a wiki farm and uses
farmconfig for common stuff. Here we define what has to be different from
the farm's common settings.
"""
# we import the FarmConfig class for common defaults of our wikis:
from farmconfig import FarmConfig
# now we subclass that config (inherit from it) and change what's different:
class Config(FarmConfig):
# basic options (you normally need to change these)
sitename = u'MyWiki' # [Unicode]
interwikiname = u'MyWiki' # [Unicode]
superuser = [u"WikiAdmin", ]
# name of entry page / front page [Unicode], choose one of those:
# a) if most wiki content is in a single language
#page_front_page = u"MyStartingPage"
# b) if wiki content is maintained in many languages
page_front_page = u"FrontPage"
data_dir = '/org/mywiki/data/'
data_underlay_dir = '/org/mywiki/underlay/'
|
[
"enol.fernandez@egi.eu"
] |
enol.fernandez@egi.eu
|
9119719ad7d1887e6ad58a29a0abf8a5d0cdcc67
|
902198ab44ff0c74f8176e3eb3c6850c67ece463
|
/fwaas-service-chain/neutron-66/neutron/plugins/mlnx/agent/utils.py
|
654ba8fb72fc2e695bb8d47dab42f7eb0e22c64b
|
[
"Apache-2.0"
] |
permissive
|
rajeshmohan/openstack
|
7ca543e86ce5b18be9615590128bf2811c019ef5
|
19eaf41c20503a7320e9895271802d11675dae2f
|
refs/heads/master
| 2021-03-12T20:29:20.842361
| 2014-09-12T00:48:08
| 2014-09-12T00:48:08
| 23,939,961
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,692
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.openstack.common import importutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.mlnx.common.comm_utils import RetryDecorator
from neutron.plugins.mlnx.common import exceptions
zmq = importutils.try_import('eventlet.green.zmq')
LOG = logging.getLogger(__name__)
class EswitchUtils(object):
def __init__(self, daemon_endpoint, timeout):
if not zmq:
msg = _("Failed to import eventlet.green.zmq. "
"Won't connect to eSwitchD - exiting...")
LOG.error(msg)
raise SystemExit(msg)
self.__conn = None
self.daemon = daemon_endpoint
self.timeout = timeout
@property
def _conn(self):
if self.__conn is None:
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.setsockopt(zmq.LINGER, 0)
socket.connect(self.daemon)
self.__conn = socket
self.poller = zmq.Poller()
self.poller.register(self._conn, zmq.POLLIN)
return self.__conn
@RetryDecorator(exceptions.RequestTimeout)
def send_msg(self, msg):
self._conn.send(msg)
socks = dict(self.poller.poll(self.timeout))
if socks.get(self._conn) == zmq.POLLIN:
recv_msg = self._conn.recv()
response = self.parse_response_msg(recv_msg)
return response
else:
self._conn.setsockopt(zmq.LINGER, 0)
self._conn.close()
self.poller.unregister(self._conn)
self.__conn = None
raise exceptions.RequestTimeout()
def parse_response_msg(self, recv_msg):
msg = jsonutils.loads(recv_msg)
if msg['status'] == 'OK':
if 'response' in msg:
return msg.get('response')
return
elif msg['status'] == 'FAIL':
msg_dict = dict(action=msg['action'], reason=msg['reason'])
error_msg = _("Action %(action)s failed: %(reason)s") % msg_dict
else:
error_msg = _("Unknown operation status %s") % msg['status']
LOG.error(error_msg)
raise exceptions.OperationFailed(err_msg=error_msg)
def get_attached_vnics(self):
LOG.debug(_("get_attached_vnics"))
msg = jsonutils.dumps({'action': 'get_vnics', 'fabric': '*'})
vnics = self.send_msg(msg)
return vnics
def set_port_vlan_id(self, physical_network,
segmentation_id, port_mac):
LOG.debug(_("Set Vlan %(segmentation_id)s on Port %(port_mac)s "
"on Fabric %(physical_network)s"),
{'port_mac': port_mac,
'segmentation_id': segmentation_id,
'physical_network': physical_network})
msg = jsonutils.dumps({'action': 'set_vlan',
'fabric': physical_network,
'port_mac': port_mac,
'vlan': segmentation_id})
self.send_msg(msg)
def define_fabric_mappings(self, interface_mapping):
for fabric, phy_interface in interface_mapping.iteritems():
LOG.debug(_("Define Fabric %(fabric)s on interface %(ifc)s"),
{'fabric': fabric,
'ifc': phy_interface})
msg = jsonutils.dumps({'action': 'define_fabric_mapping',
'fabric': fabric,
'interface': phy_interface})
self.send_msg(msg)
def port_up(self, fabric, port_mac):
LOG.debug(_("Port Up for %(port_mac)s on fabric %(fabric)s"),
{'port_mac': port_mac, 'fabric': fabric})
msg = jsonutils.dumps({'action': 'port_up',
'fabric': fabric,
'ref_by': 'mac_address',
'mac': 'port_mac'})
self.send_msg(msg)
def port_down(self, fabric, port_mac):
LOG.debug(_("Port Down for %(port_mac)s on fabric %(fabric)s"),
{'port_mac': port_mac, 'fabric': fabric})
msg = jsonutils.dumps({'action': 'port_down',
'fabric': fabric,
'ref_by': 'mac_address',
'mac': port_mac})
self.send_msg(msg)
def port_release(self, fabric, port_mac):
LOG.debug(_("Port Release for %(port_mac)s on fabric %(fabric)s"),
{'port_mac': port_mac, 'fabric': fabric})
msg = jsonutils.dumps({'action': 'port_release',
'fabric': fabric,
'ref_by': 'mac_address',
'mac': port_mac})
self.send_msg(msg)
def get_eswitch_ports(self, fabric):
# TODO(irena) - to implement for next phase
return {}
def get_eswitch_id(self, fabric):
# TODO(irena) - to implement for next phase
return ""
|
[
"rajesh.mlists@gmail.com"
] |
rajesh.mlists@gmail.com
|
362db05edead17a47a2ae5d4f707c6ca9eafd1fa
|
fac3dc011d1ddc577ac3922393ac750d1e943147
|
/src/transformers/models/bloom/modeling_bloom.py
|
d37972a429f12cb0d20fee47e66cdae4dadec0d6
|
[
"Apache-2.0"
] |
permissive
|
wxrui/transformers
|
06ce57bc71466af05cbbe29b5be3ead5ce386d5b
|
6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1
|
refs/heads/master
| 2023-07-06T02:54:36.257425
| 2023-07-03T10:24:46
| 2023-07-03T10:24:46
| 250,432,634
| 0
| 0
|
Apache-2.0
| 2020-03-27T03:38:17
| 2020-03-27T03:38:16
| null |
UTF-8
|
Python
| false
| false
| 56,944
|
py
|
# coding=utf-8
# Copyright 2022 HuggingFace Inc. team and BigScience workshop.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BLOOM model."""
import math
import warnings
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
from torch.nn import functional as F
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
QuestionAnsweringModelOutput,
SequenceClassifierOutputWithPast,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_bloom import BloomConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bigscience/bloom-560m"
_CONFIG_FOR_DOC = "BloomConfig"
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bigscience/bigscience-small-testing",
"bigscience/bloom-560m",
"bigscience/bloom-1b1",
"bigscience/bloom-1b7",
"bigscience/bloom-3b",
"bigscience/bloom-7b1",
"bigscience/bloom",
]
def _make_causal_mask(
input_ids_shape: torch.Size, device: torch.device, past_key_values_length: int
) -> torch.BoolTensor:
"""
Make causal mask used for self-attention.
"""
batch_size, target_length = input_ids_shape
mask = torch.empty((target_length, target_length + past_key_values_length), dtype=torch.bool, device=device)
# ONNX doesn't support `torch.Tensor.triu` properly, thus we use this workaround
seq_ids = torch.arange(target_length, device=device)
mask[:, past_key_values_length:] = seq_ids[:, None] < seq_ids[None, :]
if past_key_values_length > 0:
mask[:, :past_key_values_length] = False
expanded_mask = mask[None, None, :, :].expand(batch_size, 1, target_length, target_length + past_key_values_length)
return expanded_mask
def _expand_mask(mask: torch.Tensor, tgt_length: int) -> torch.BoolTensor:
"""
Expands attention_mask from `[batch_size, src_length]` to `[batch_size, 1, tgt_length, src_length]`.
"""
batch_size, src_length = mask.shape
tgt_length = tgt_length if tgt_length is not None else src_length
expanded_mask = ~(mask[:, None, None, :].to(torch.bool))
return expanded_mask.expand(batch_size, 1, tgt_length, src_length)
def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor:
"""
Link to paper: https://arxiv.org/abs/2108.12409 Alibi tensor is not causal as the original paper mentions, it
relies on a translation invariance of softmax for quick implementation: with l being a tensor, and a fixed value
`softmax(l+a) = softmax(l)`. Based on
https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742
TODO @thomasw21 this doesn't work as nicely due to the masking strategy, and so masking varies slightly.
Args:
Returns tensor shaped (batch_size * num_heads, 1, max_seq_len)
attention_mask (`torch.Tensor`):
Token-wise attention mask, this should be of shape (batch_size, max_seq_len).
num_heads (`int`, *required*):
number of heads
dtype (`torch.dtype`, *optional*, default=`torch.bfloat16`):
dtype of the output tensor
"""
batch_size, seq_length = attention_mask.shape
closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
base = torch.tensor(
2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32
)
powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32)
slopes = torch.pow(base, powers)
if closest_power_of_2 != num_heads:
extra_base = torch.tensor(
2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32
)
num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2)
extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32)
slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0)
# Note: alibi will added to the attention bias that will be applied to the query, key product of attention
# => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length)
# => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length)
# => the query_length dimension will then be broadcasted correctly
# This is more or less identical to T5's relative position bias:
# https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527
arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :]
alibi = slopes[..., None] * arange_tensor
return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype)
def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor:
"""
Dropout add function
Args:
x (`torch.tensor`, *required*):
input tensor
residual (`torch.tensor`, *required*):
esidual tensor
prob (`float`, *required*):
dropout probability
training (`bool`, *required*):
training mode
"""
out = F.dropout(x, p=prob, training=training)
out = residual + out
return out
def bloom_gelu_forward(x: torch.Tensor) -> torch.Tensor:
"""
Custom bias GELU function. Adapted from Megatron-DeepSpeed code. Here we use a simple implementation (inference) to
make the model jitable.
Args:
x (`torch.tensor`, *required*):
input hidden states
"""
return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
def bloom_gelu_back(g: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
"""
gradient of tanh approximation of gelu gradient of actual gelu is: 0.5 * (1. + torch.erf(x * 0.70710678)) +
0.3989423 * x * torch.exp(-0.5 * x * x)
Args:
g (`torch.tensor`, *required*):
gradient output tensor
x (`torch.tensor`, *required*):
input tensor
"""
x = x[0] # x is a tuple of 1 element, needs to unpack it first
tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
# sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out)
return ff * g
class GeLUFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input: torch.Tensor) -> torch.Tensor:
ctx.save_for_backward(input)
return bloom_gelu_forward(input)
@staticmethod
def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:
input = ctx.saved_tensors
tmp = bloom_gelu_back(grad_output, input)
return tmp
class BloomGelu(nn.Module):
"""
BloomBiasGelu wrapper function that make use of the simple function on inference mode to make the model
torchscriptable and use the autograd function in training mode to get the accurate results of the gradients Partly
copied from Megatron-DeepSpeed code and adapted for our needs
See here why autograd functions are not torchscriptable: https://github.com/pytorch/pytorch/issues/22329
"""
def __init__(self):
super().__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.training:
return GeLUFunction.apply(x)
else:
return bloom_gelu_forward(x)
class BloomAttention(nn.Module):
def __init__(self, config: BloomConfig):
super().__init__()
self.pretraining_tp = config.pretraining_tp
self.slow_but_exact = config.slow_but_exact
self.hidden_size = config.hidden_size
self.num_heads = config.n_head
self.head_dim = self.hidden_size // self.num_heads
self.split_size = self.hidden_size
self.hidden_dropout = config.hidden_dropout
if self.head_dim * self.num_heads != self.hidden_size:
raise ValueError(
f"`hidden_size` must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`:"
f" {self.num_heads})."
)
# Layer-wise attention scaling
self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim)
self.beta = 1.0
self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=True)
self.dense = nn.Linear(self.hidden_size, self.hidden_size)
self.attention_dropout = nn.Dropout(config.attention_dropout)
def _split_heads(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory
storage as `fused_qkv`
Args:
fused_qkv (`torch.tensor`, *required*): [batch_size, seq_length, num_heads * 3 * head_dim]
Returns:
query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim]
value: [batch_size, seq_length, num_heads, head_dim]
"""
batch_size, seq_length, three_times_hidden_size = fused_qkv.shape
fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim)
return fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :]
def _merge_heads(self, x: torch.Tensor) -> torch.Tensor:
"""
Merge heads together over the last dimenstion
Args:
x (`torch.tensor`, *required*): [batch_size * num_heads, seq_length, head_dim]
Returns:
torch.tensor: [batch_size, seq_length, num_heads * head_dim]
"""
# What we want to achieve is:
# batch_size * num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads * head_dim
batch_size_and_num_heads, seq_length, _ = x.shape
batch_size = batch_size_and_num_heads // self.num_heads
# First view to decompose the batch size
# batch_size * num_heads, seq_length, head_dim -> batch_size, num_heads, seq_length, head_dim
x = x.view(batch_size, self.num_heads, seq_length, self.head_dim)
# batch_size, num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads, head_dim
x = x.permute(0, 2, 1, 3)
# batch_size, seq_length, num_heads, head_dim -> batch_size, seq_length, num_heads * head_dim
return x.reshape(batch_size, seq_length, self.num_heads * self.head_dim)
def forward(
self,
hidden_states: torch.Tensor,
residual: torch.Tensor,
alibi: torch.Tensor,
attention_mask: torch.Tensor,
layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
head_mask: Optional[torch.Tensor] = None,
use_cache: bool = False,
output_attentions: bool = False,
):
fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size]
# 3 x [batch_size, seq_length, num_heads, head_dim]
(query_layer, key_layer, value_layer) = self._split_heads(fused_qkv)
batch_size, q_length, _, _ = query_layer.shape
query_layer = query_layer.transpose(1, 2).reshape(batch_size * self.num_heads, q_length, self.head_dim)
key_layer = key_layer.permute(0, 2, 3, 1).reshape(batch_size * self.num_heads, self.head_dim, q_length)
value_layer = value_layer.transpose(1, 2).reshape(batch_size * self.num_heads, q_length, self.head_dim)
if layer_past is not None:
past_key, past_value = layer_past
# concatenate along seq_length dimension:
# - key: [batch_size * self.num_heads, head_dim, kv_length]
# - value: [batch_size * self.num_heads, kv_length, head_dim]
key_layer = torch.cat((past_key, key_layer), dim=2)
value_layer = torch.cat((past_value, value_layer), dim=1)
_, _, kv_length = key_layer.shape
if use_cache is True:
present = (key_layer, value_layer)
else:
present = None
# [batch_size * num_heads, q_length, kv_length]
# we use `torch.Tensor.baddbmm` instead of `torch.baddbmm` as the latter isn't supported by TorchScript v1.11
matmul_result = alibi.baddbmm(
batch1=query_layer,
batch2=key_layer,
beta=self.beta,
alpha=self.inv_norm_factor,
)
# change view to [batch_size, num_heads, q_length, kv_length]
attention_scores = matmul_result.view(batch_size, self.num_heads, q_length, kv_length)
# cast attention scores to fp32, compute scaled softmax and cast back to initial dtype - [batch_size, num_heads, q_length, kv_length]
input_dtype = attention_scores.dtype
# `float16` has a minimum value of -65504.0, whereas `bfloat16` and `float32` have a minimum value of `-3.4e+38`
if input_dtype == torch.float16:
attention_scores = attention_scores.to(torch.float)
attn_weights = torch.masked_fill(attention_scores, attention_mask, torch.finfo(attention_scores.dtype).min)
attention_probs = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(input_dtype)
# [batch_size, num_heads, q_length, kv_length]
attention_probs = self.attention_dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
# change view [batch_size x num_heads, q_length, kv_length]
attention_probs_reshaped = attention_probs.view(batch_size * self.num_heads, q_length, kv_length)
# matmul: [batch_size * num_heads, q_length, head_dim]
context_layer = torch.bmm(attention_probs_reshaped, value_layer)
# change view [batch_size, num_heads, q_length, head_dim]
context_layer = self._merge_heads(context_layer)
# aggregate results across tp ranks. See here: https://github.com/pytorch/pytorch/issues/76232
if self.pretraining_tp > 1 and self.slow_but_exact:
slices = self.hidden_size / self.pretraining_tp
output_tensor = torch.zeros_like(context_layer)
for i in range(self.pretraining_tp):
output_tensor = output_tensor + F.linear(
context_layer[:, :, int(i * slices) : int((i + 1) * slices)],
self.dense.weight[:, int(i * slices) : int((i + 1) * slices)],
)
else:
output_tensor = self.dense(context_layer)
output_tensor = dropout_add(output_tensor, residual, self.hidden_dropout, self.training)
outputs = (output_tensor, present)
if output_attentions:
outputs += (attention_probs,)
return outputs
class BloomMLP(nn.Module):
def __init__(self, config: BloomConfig):
super().__init__()
hidden_size = config.hidden_size
self.pretraining_tp = config.pretraining_tp
self.slow_but_exact = config.slow_but_exact
self.dense_h_to_4h = nn.Linear(hidden_size, 4 * hidden_size)
self.gelu_impl = BloomGelu()
self.dense_4h_to_h = nn.Linear(4 * hidden_size, hidden_size)
self.hidden_dropout = config.hidden_dropout
def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
hidden_states = self.gelu_impl(self.dense_h_to_4h(hidden_states))
if self.pretraining_tp > 1 and self.slow_but_exact:
intermediate_output = torch.zeros_like(residual)
slices = self.dense_4h_to_h.weight.shape[-1] / self.pretraining_tp
for i in range(self.pretraining_tp):
intermediate_output = intermediate_output + F.linear(
hidden_states[:, :, int(i * slices) : int((i + 1) * slices)],
self.dense_4h_to_h.weight[:, int(i * slices) : int((i + 1) * slices)],
)
else:
intermediate_output = self.dense_4h_to_h(hidden_states)
output = dropout_add(intermediate_output, residual, self.hidden_dropout, self.training)
return output
class BloomBlock(nn.Module):
def __init__(self, config: BloomConfig):
super().__init__()
hidden_size = config.hidden_size
self.input_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.num_heads = config.n_head
self.self_attention = BloomAttention(config)
self.post_attention_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = BloomMLP(config)
self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm
self.hidden_dropout = config.hidden_dropout
def forward(
self,
hidden_states: torch.Tensor,
alibi: torch.Tensor,
attention_mask: torch.Tensor,
layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
head_mask: Optional[torch.Tensor] = None,
use_cache: bool = False,
output_attentions: bool = False,
):
# hidden_states: [batch_size, seq_length, hidden_size]
# Layer norm at the beginning of the transformer layer.
layernorm_output = self.input_layernorm(hidden_states)
# Layer norm post the self attention.
if self.apply_residual_connection_post_layernorm:
residual = layernorm_output
else:
residual = hidden_states
# Self attention.
attn_outputs = self.self_attention(
layernorm_output,
residual,
layer_past=layer_past,
attention_mask=attention_mask,
alibi=alibi,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
attention_output = attn_outputs[0]
outputs = attn_outputs[1:]
layernorm_output = self.post_attention_layernorm(attention_output)
# Get residual
if self.apply_residual_connection_post_layernorm:
residual = layernorm_output
else:
residual = attention_output
# MLP.
output = self.mlp(layernorm_output, residual)
if use_cache:
outputs = (output,) + outputs
else:
outputs = (output,) + outputs[1:]
return outputs # hidden_states, present, attentions
class BloomPreTrainedModel(PreTrainedModel):
config_class = BloomConfig
base_model_prefix = "transformer"
supports_gradient_checkpointing = True
_no_split_modules = ["BloomBlock"]
_skip_keys_device_placement = "past_key_values"
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module: nn.Module):
"""Initialize the weights."""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module: nn.Module, value: bool = False):
if isinstance(module, BloomModel):
module.gradient_checkpointing = value
@staticmethod
def _convert_to_standard_cache(
past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]], batch_size: int
) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]:
"""
Standardizes the format of the cache so as to match most implementations, i.e. to tuple(tuple([batch_size,
num_heads, ...]))
"""
batch_size_times_num_heads, head_dim, seq_length = past_key_value[0][0].shape
num_heads = batch_size_times_num_heads // batch_size
# key: [batch_size * num_heads, head_dim, seq_length] -> [batch_size, num_heads, head_dim, seq_length]
# value: [batch_size * num_heads, seq_length, head_dim] -> [batch_size, num_heads, seq_length, head_dim]
return tuple(
(
layer_past[0].view(batch_size, num_heads, head_dim, seq_length),
layer_past[1].view(batch_size, num_heads, seq_length, head_dim),
)
for layer_past in past_key_value
)
@staticmethod
def _convert_to_bloom_cache(
past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]]
) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]:
"""
Converts the cache to the format expected by Bloom, i.e. to tuple(tuple([batch_size * num_heads, ...]))
"""
batch_size, num_heads, head_dim, seq_length = past_key_value[0][0].shape
batch_size_times_num_heads = batch_size * num_heads
# key: [batch_size, num_heads, head_dim, seq_length] -> [batch_size * num_heads, head_dim, seq_length]
# value: [batch_size, num_heads, seq_length, head_dim] -> [batch_size * num_heads, seq_length, head_dim]
return tuple(
(
layer_past[0].view(batch_size_times_num_heads, head_dim, seq_length),
layer_past[1].view(batch_size_times_num_heads, seq_length, head_dim),
)
for layer_past in past_key_value
)
BLOOM_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`BloomConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
BLOOM_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0][0].shape[2]`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`):
Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
`past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
their past given to this model should not be passed as `input_ids` as they have already been computed.
Each element of `past_key_values` is a tuple (past_key, past_value):
- past_key: [batch_size * num_heads, head_dim, kv_length]
- past_value: [batch_size * num_heads, kv_length, head_dim]
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see
`past_key_values`).
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Bloom Model transformer outputting raw hidden-states without any specific head on top.",
BLOOM_START_DOCSTRING,
)
class BloomModel(BloomPreTrainedModel):
def __init__(self, config: BloomConfig):
super().__init__(config)
self.embed_dim = config.hidden_size
self.num_heads = config.n_head
# Embedding + LN Embedding
self.word_embeddings = nn.Embedding(config.vocab_size, self.embed_dim)
self.word_embeddings_layernorm = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
# Transformer blocks
self.h = nn.ModuleList([BloomBlock(config) for _ in range(config.num_hidden_layers)])
# Final Layer Norm
self.ln_f = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def build_alibi_tensor(self, attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor:
return build_alibi_tensor(attention_mask, num_heads, dtype)
def get_input_embeddings(self):
return self.word_embeddings
def _prepare_attn_mask(
self, attention_mask: torch.Tensor, input_shape: Tuple[int, int], past_key_values_length: int
) -> torch.BoolTensor:
# create causal mask
# [batch_size, seq_length] -> [batch_size, 1, tgt_length, src_length]
combined_attention_mask = None
device = attention_mask.device
_, src_length = input_shape
if src_length > 1:
combined_attention_mask = _make_causal_mask(
input_shape, device=device, past_key_values_length=past_key_values_length
)
# [batch_size, seq_length] -> [batch_size, 1, tgt_length, src_length]
expanded_attn_mask = _expand_mask(attention_mask, tgt_length=src_length)
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask
)
return combined_attention_mask
def set_input_embeddings(self, new_embeddings: torch.Tensor):
self.word_embeddings = new_embeddings
@add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPastAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**deprecated_arguments,
) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
if deprecated_arguments.pop("position_ids", False) is not False:
# `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None`
warnings.warn(
"`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore"
" passing `position_ids`.",
FutureWarning,
)
if len(deprecated_arguments) > 0:
raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
batch_size, seq_length = input_ids.shape
elif inputs_embeds is not None:
batch_size, seq_length, _ = inputs_embeds.shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if past_key_values is None:
past_key_values = tuple([None] * len(self.h))
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape batch_size x num_heads x N x N
# head_mask has shape n_layer x batch x num_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
hidden_states = self.word_embeddings_layernorm(inputs_embeds)
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
# Compute alibi tensor: check build_alibi_tensor documentation
seq_length_with_past = seq_length
past_key_values_length = 0
if past_key_values[0] is not None:
past_key_values_length = past_key_values[0][0].shape[2]
seq_length_with_past = seq_length_with_past + past_key_values_length
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device)
else:
attention_mask = attention_mask.to(hidden_states.device)
alibi = self.build_alibi_tensor(attention_mask, self.num_heads, dtype=hidden_states.dtype)
causal_mask = self._prepare_attn_mask(
attention_mask,
input_shape=(batch_size, seq_length),
past_key_values_length=past_key_values_length,
)
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, use_cache=use_cache, output_attentions=output_attentions)
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
alibi,
causal_mask,
layer_past,
head_mask[i],
)
else:
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=causal_mask,
head_mask=head_mask[i],
use_cache=use_cache,
output_attentions=output_attentions,
alibi=alibi,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
# Add last hidden state
hidden_states = self.ln_f(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@add_start_docstrings(
"""
The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
""",
BLOOM_START_DOCSTRING,
)
class BloomForCausalLM(BloomPreTrainedModel):
_tied_weights_keys = ["lm_head.weight"]
def __init__(self, config: BloomConfig):
super().__init__(config)
self.transformer = BloomModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings: torch.Tensor):
self.lm_head = new_embeddings
def prepare_inputs_for_generation(
self,
input_ids: torch.LongTensor,
past_key_values: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
**kwargs,
) -> dict:
# only last token for input_ids if past is not None
if past_key_values:
input_ids = input_ids[:, -1].unsqueeze(-1)
# the cache may be in the stardard format (e.g. in contrastive search), convert to bloom's format if needed
if past_key_values[0][0].shape[0] == input_ids.shape[0]:
past_key_values = self._convert_to_bloom_cache(past_key_values)
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs.update(
{
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"attention_mask": attention_mask,
}
)
return model_inputs
@add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=CausalLMOutputWithCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**deprecated_arguments,
) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
if deprecated_arguments.pop("position_ids", False) is not False:
# `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None`
warnings.warn(
"`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore"
" passing `position_ids`.",
FutureWarning,
)
if len(deprecated_arguments) > 0:
raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# move labels to correct device to enable model parallelism
labels = labels.to(lm_logits.device)
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
batch_size, seq_length, vocab_size = shift_logits.shape
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(
shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length)
)
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
def _reorder_cache(
self, past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
"""
This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
[`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
beam_idx at every generation step.
Output shares the same memory storage as `past`.
"""
standardized_past = self._convert_to_standard_cache(past, batch_size=len(beam_idx))
# Get a copy of `beam_idx` on all the devices where we need those indices.
device_to_beam_idx = {
past_state.device: beam_idx.to(past_state.device) for layer_past in past for past_state in layer_past
}
reordered_past = tuple(
(
layer_past[0].index_select(0, device_to_beam_idx[layer_past[0].device]),
layer_past[1].index_select(0, device_to_beam_idx[layer_past[0].device]),
)
for layer_past in standardized_past
)
return self._convert_to_bloom_cache(reordered_past)
@add_start_docstrings(
"""
The Bloom Model transformer with a sequence classification head on top (linear layer).
[`BloomForSequenceClassification`] uses the last token in order to do the classification, as other causal models
(e.g. GPT-1) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
each row of the batch).
""",
BLOOM_START_DOCSTRING,
)
class BloomForSequenceClassification(BloomPreTrainedModel):
def __init__(self, config: BloomConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = BloomModel(config)
self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutputWithPast,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**deprecated_arguments,
) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
if deprecated_arguments.pop("position_ids", False) is not False:
# `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None`
warnings.warn(
"`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore"
" passing `position_ids`.",
FutureWarning,
)
if len(deprecated_arguments) > 0:
raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
if self.config.pad_token_id is None:
sequence_lengths = -1
else:
if input_ids is not None:
sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
else:
sequence_lengths = -1
logger.warning(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
Bloom Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BLOOM_START_DOCSTRING,
)
class BloomForTokenClassification(BloomPreTrainedModel):
def __init__(self, config: BloomConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = BloomModel(config)
if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
classifier_dropout = config.classifier_dropout
elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
classifier_dropout = config.hidden_dropout
else:
classifier_dropout = 0.1
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**deprecated_arguments,
) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
if deprecated_arguments.pop("position_ids", False) is not False:
# `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None`
warnings.warn(
"`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore"
" passing `position_ids`.",
FutureWarning,
)
if len(deprecated_arguments) > 0:
raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.classifier(hidden_states)
loss = None
if labels is not None:
# move labels to correct device to enable model parallelism
labels = labels.to(logits.device)
batch_size, seq_length = labels.shape
loss_fct = CrossEntropyLoss()
loss = loss_fct(
logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
)
if not return_dict:
output = (logits,) + transformer_outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
The BLOOM Model transformer with a span classification head on top for extractive question-answering tasks like
SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BLOOM_START_DOCSTRING,
)
class BloomForQuestionAnswering(BloomPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = BloomModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, QuestionAnsweringModelOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
[
"noreply@github.com"
] |
wxrui.noreply@github.com
|
5304cbf2716d3daa6232534adbc4e9a7a839f803
|
5887aa33c8034d6c60a80df78030fc2d66c9a8ad
|
/ejemplo_cloudml/trainer/model.py
|
6851b2b5a3897b45852325786e5a6bba0e2f4c86
|
[] |
no_license
|
mjuez/seminario-gcp
|
775045a5f91a9aa6d4ec5a74f34d179a79c2be63
|
41a9ddf003c91fb8d708c39792a3b68c81459800
|
refs/heads/master
| 2021-08-28T13:22:12.051871
| 2017-12-12T09:22:31
| 2017-12-12T09:22:31
| 112,468,890
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,644
|
py
|
#!/usr/bin/env python
# Copyright 2017 Mario Juez. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Creacion de un modelo de machine learning (regresion lineal)
# en Cloud ML Engine (TensorFlow)
#
# El modelo generado sirve para predecir el peso de un recien nacido
# en base a los siguientes valores:
# * Edad de la madre
# * Edad del padre
# * Semanas de gestacion
# * Ganancia de peso de la madre
# * Puntuacion apgar
#
# Los dataset de entrenamiento y test han sido obtenidos del conjunto
# de datos publico [publicdata:samples.natality] de BigQuery.
#
# Autor: Mario Juez <mario@mjuez.com>
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import tensorflow as tf
# Columnas del fichero CSV.
CSV_COLUMNS = ['peso', 'edad_madre', 'edad_padre', 'semanas_gestacion', 'ganancia_peso', 'puntuacion_apgar']
# Valores por defecto.
CSV_COLUMN_DEFAULTS = [[0.0], [0.0], [0.0], [0.0], [0.0], [0.0]]
# Etiqueta a predecir.
LABEL_COLUMN = 'peso'
# Trabajamos con caracteristicas numericas.
INPUT_COLUMNS = [
tf.feature_column.numeric_column('edad_madre'),
tf.feature_column.numeric_column('edad_padre'),
tf.feature_column.numeric_column('semanas_gestacion'),
tf.feature_column.numeric_column('ganancia_peso'),
tf.feature_column.numeric_column('puntuacion_apgar')
]
def build_estimator(output_dir):
"""Construccion del estimador de regresion lineal"""
return tf.contrib.learn.LinearRegressor(model_dir=output_dir, feature_columns=INPUT_COLUMNS)
def csv_serving_input_fn():
"""Definicion de la funcion de entrada"""
csv_row = tf.placeholder(
shape=[None],
dtype=tf.string
)
features = parse_csv(csv_row)
features.pop(LABEL_COLUMN)
return tf.contrib.learn.InputFnOps(features, None, {'csv_row': csv_row})
# Solo ofrecemos la opcion de utilizar ficheros CSV
SERVING_FUNCTIONS = {
'CSV': csv_serving_input_fn
}
def parse_csv(rows_tensor):
"""Lectura de caracteristicas desde CSV"""
row_columns = tf.expand_dims(rows_tensor, -1)
columns = tf.decode_csv(row_columns, record_defaults=CSV_COLUMN_DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
return features
def generate_input_fn(filenames,
num_epochs=None,
skip_header_lines=1,
batch_size=200):
"""Creacion de la funcion de entrada"""
filename_queue = tf.train.string_input_producer(
filenames, num_epochs=num_epochs, shuffle=False)
reader = tf.TextLineReader(skip_header_lines=skip_header_lines)
# Lectura de tantas filas como batch_size
_, rows = reader.read_up_to(filename_queue, num_records=batch_size)
# Obtencion de caracteristicas
features = parse_csv(rows)
features = tf.train.batch(
features,
batch_size,
capacity=batch_size * 10,
num_threads=multiprocessing.cpu_count(),
enqueue_many=True,
allow_smaller_final_batch=True
)
# Obtencion de etiqueta
label = features.pop(LABEL_COLUMN)
return features, label
|
[
"mjuezath@gmail.com"
] |
mjuezath@gmail.com
|
e3cb99ef947dba4d15936b6267d2cf162301a086
|
f17315da2280b1885600abbfc5a4765386111264
|
/python_script/Image_Preprocessing.py
|
93bce424ae43a2246e74fca3c13f78b254a68864
|
[
"Apache-2.0"
] |
permissive
|
rayheberer/burda_hackday
|
e42e12ab448ba139938e92b8f252385554a53c62
|
29837f891898bf4a7b63631f98325b0e9240266b
|
refs/heads/master
| 2021-07-12T16:35:48.744783
| 2017-10-16T18:30:06
| 2017-10-16T18:30:06
| 106,206,711
| 0
| 0
| null | 2017-10-08T20:14:35
| 2017-10-08T20:14:35
| null |
UTF-8
|
Python
| false
| false
| 3,634
|
py
|
import glob, os
from PIL import Image, ImageOps
def rename_images(directory, label):
for ii, img in enumerate(glob.iglob(os.path.join(directory, '*.jpg'))):
os.rename(img, os.path.join(directory, label + str(ii)+'.jpg'))
def split_image(img_dir, img_name, left_dir, right_dir):
im = Image.open(os.path.join(img_dir, img_name))
left = im.crop((0, 0, im.width/2, im.height))
left.save(os.path.join(left_dir, img_name))
right = im.crop((im.width/2, 0, im.width, im.height))
right.save(os.path.join(right_dir, img_name))
def split_all_images(img_dir, left_dir, right_dir):
for img in glob.iglob(os.path.join(img_dir, '*.jpg')):
title = os.path.basename(img)
split_image(img_dir, title, left_dir, right_dir)
def reflect_y(img_dir, img_name, label, out_dir, reflect):
im = Image.open(os.path.join(img_dir, img_name))
if reflect:
im = ImageOps.mirror(im)
name, ext = os.path.splitext(img_name)
im.save(os.path.join(out_dir, name + label + ext))
def reflect_permutations(img_dir, out_dir, label, reflect=True):
for img in glob.iglob(os.path.join(img_dir, '*.jpg')):
title = os.path.basename(img)
reflect_y(img_dir, title, label, out_dir, reflect)
def crop_top_bottom(img_dir, img_name, label, out_dir, upper_crop_fraction):
im = Image.open(os.path.join(img_dir, img_name))
height = im.height
width = im.width
diff = height - width
cropped = im.crop((0, diff * upper_crop_fraction, width, height - diff*(1 - upper_crop_fraction)))
name, ext = os.path.splitext(img_name)
cropped.save(os.path.join(out_dir, name + label + ext))
def crop_all_images(img_dir, out_dir, label, upper_crop_fraction):
for img in glob.iglob(os.path.join(img_dir, '*.jpg')):
title = os.path.basename(img)
crop_top_bottom(img_dir, title, label, out_dir, upper_crop_fraction)
rename_images(r'C:\Users\8050116\Documents\DPS\Events\Burda Hackathon\pix2pix-tensorflow\data\original', 'male')
split_all_images(r'C:\Users\8050116\Documents\DPS\Events\Burda Hackathon\pix2pix-tensorflow\data\original',
r'C:\Users\8050116\Documents\DPS\Events\Burda Hackathon\pix2pix-tensorflow\data\split\left',
r'C:\Users\8050116\Documents\DPS\Events\Burda Hackathon\pix2pix-tensorflow\data\split\right')
labels = ['r0', 'r1']
directions = ['left', 'right']
reflects = [(False, False), (True, True)]
for ii, label in enumerate(labels):
reflect_permutations(os.path.join(r'C:\Users\8050116\Documents\DPS\Events\Burda Hackathon\pix2pix-tensorflow\data\split', 'left'),
os.path.join(r'C:\Users\8050116\Documents\DPS\Events\Burda Hackathon\pix2pix-tensorflow\data\reflected', 'left'),
label, reflects[ii][0])
reflect_permutations(os.path.join(r'C:\Users\8050116\Documents\DPS\Events\Burda Hackathon\pix2pix-tensorflow\data\split', 'right'),
os.path.join(r'C:\Users\8050116\Documents\DPS\Events\Burda Hackathon\pix2pix-tensorflow\data\reflected', 'right'),
label, reflects[ii][1])
fractions = [0, 0.5]
labels = ['a', 'b']
directions = ['left', 'right']
for crop_fraction, label in zip(fractions, labels):
for side in directions:
crop_all_images(os.path.join(r'C:\Users\8050116\Documents\DPS\Events\Burda Hackathon\pix2pix-tensorflow\data\reflected', side),
os.path.join(r'C:\Users\8050116\Documents\DPS\Events\Burda Hackathon\pix2pix-tensorflow\data\cropped', side),
label,
crop_fraction)
|
[
"kapilbakshi88@gmail.com"
] |
kapilbakshi88@gmail.com
|
4c74d8ca0b0d5b396d8797f37e0106f98f47f509
|
4438a397db52f1dad60edc7f583d2dad103f217a
|
/ex5 nota.py
|
c53ece70fa79fa731a766a207f2feee28bc3eb82
|
[] |
no_license
|
ddank0/Python-ex
|
07c20ed2f609fad700f0801d7d174626285a3bb2
|
ab0ed0d7228d19695e7e320a973a928cddbe9055
|
refs/heads/master
| 2021-07-01T18:17:05.729400
| 2021-02-22T01:30:05
| 2021-02-22T01:30:05
| 224,468,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
cont = 0
soma = 0
nome = input('Nome:')
while nome != 'fim':
sexo = input('Sexo:')
n1 = float(input('1ยช nota:'))
n2 = float(input('2ยชnota:'))
media = (n1+n2)/2
if sexo == 'f':
print('media feminina individual:',media)
else:
cont = cont + 1
soma = soma + media
nome = input('Nome:')
if cont != 0:
print('Media masculina total:',(soma/cont))
|
[
"isporck0@gmail.com"
] |
isporck0@gmail.com
|
d587998f796c35b943ebb944133064b336c1b387
|
63fcd65a543f047f4dbab65b9275a4646395a731
|
/ENet.py
|
298584992ba65269d507cf0a52779019fb45a176
|
[] |
no_license
|
XXXVincent/ENet-Keras
|
14ed0e94f0e5bf2d03bce1df196c432a8805f843
|
2d13203075fc2860aba60ec4be6060dd78907d0e
|
refs/heads/master
| 2020-04-25T08:42:58.710149
| 2019-02-14T05:42:30
| 2019-02-14T05:42:30
| 172,656,054
| 2
| 0
| null | 2019-02-26T07:01:26
| 2019-02-26T07:01:26
| null |
UTF-8
|
Python
| false
| false
| 6,097
|
py
|
#coding=utf-8
from keras.layers.advanced_activations import PReLU
from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D, ZeroPadding2D
from keras.layers.core import SpatialDropout2D, Permute, Activation, Reshape
from keras.layers.merge import add, concatenate
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling2D
from keras.engine.topology import Input
from keras.models import Model
def initial_block(inp, nb_filter=13, nb_row=3, nb_col=3, strides=(2, 2)):
conv = Conv2D(nb_filter, (nb_row, nb_col), padding='same', strides=strides)(inp)
max_pool = MaxPooling2D()(inp)
merged = concatenate([conv, max_pool], axis=3)
return merged
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1):
# main branch
internal = output // internal_scale
encoder = inp
# 1x1
input_stride = 2 if downsample else 1 # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
encoder = Conv2D(internal, (input_stride, input_stride),
# padding='same',
strides=(input_stride, input_stride), use_bias=False)(encoder)
# Batch normalization + PReLU
encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99
encoder = PReLU(shared_axes=[1, 2])(encoder)
# conv
if not asymmetric and not dilated:
encoder = Conv2D(internal, (3, 3), padding='same')(encoder)
elif asymmetric:
encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder)
encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder)
elif dilated:
encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder)
else:
raise (Exception('You shouldn\'t be here'))
encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99
encoder = PReLU(shared_axes=[1, 2])(encoder)
# 1x1
encoder = Conv2D(output, (1, 1), use_bias=False)(encoder)
encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99
encoder = SpatialDropout2D(dropout_rate)(encoder)
other = inp
# other branch
if downsample:
other = MaxPooling2D()(other)
other = Permute((1, 3, 2))(other)
pad_feature_maps = output - inp.get_shape().as_list()[3]
tb_pad = (0, 0)
lr_pad = (0, pad_feature_maps)
other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
other = Permute((1, 3, 2))(other)
encoder = add([encoder, other])
encoder = PReLU(shared_axes=[1, 2])(encoder)
return encoder
def en_build(inp, dropout_rate=0.01):
enet = initial_block(inp)
enet = BatchNormalization(momentum=0.1)(enet) # enet_unpooling uses momentum of 0.1, keras default is 0.99
enet = PReLU(shared_axes=[1, 2])(enet)
enet = bottleneck(enet, 64, downsample=True, dropout_rate=dropout_rate) # bottleneck 1.0
for _ in range(4):
enet = bottleneck(enet, 64, dropout_rate=dropout_rate) # bottleneck 1.i
enet = bottleneck(enet, 128, downsample=True) # bottleneck 2.0
# bottleneck 2.x and 3.x
for _ in range(2):
enet = bottleneck(enet, 128) # bottleneck 2.1
enet = bottleneck(enet, 128, dilated=2) # bottleneck 2.2
enet = bottleneck(enet, 128, asymmetric=5) # bottleneck 2.3
enet = bottleneck(enet, 128, dilated=4) # bottleneck 2.4
enet = bottleneck(enet, 128) # bottleneck 2.5
enet = bottleneck(enet, 128, dilated=8) # bottleneck 2.6
enet = bottleneck(enet, 128, asymmetric=5) # bottleneck 2.7
enet = bottleneck(enet, 128, dilated=16) # bottleneck 2.8
return enet
# decoder
def de_bottleneck(encoder, output, upsample=False, reverse_module=False):
internal = output // 4
x = Conv2D(internal, (1, 1), use_bias=False)(encoder)
x = BatchNormalization(momentum=0.1)(x)
x = Activation('relu')(x)
if not upsample:
x = Conv2D(internal, (3, 3), padding='same', use_bias=True)(x)
else:
x = Conv2DTranspose(filters=internal, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
x = BatchNormalization(momentum=0.1)(x)
x = Activation('relu')(x)
x = Conv2D(output, (1, 1), padding='same', use_bias=False)(x)
other = encoder
if encoder.get_shape()[-1] != output or upsample:
other = Conv2D(output, (1, 1), padding='same', use_bias=False)(other)
other = BatchNormalization(momentum=0.1)(other)
if upsample and reverse_module is not False:
other = UpSampling2D(size=(2, 2))(other)
if upsample and reverse_module is False:
decoder = x
else:
x = BatchNormalization(momentum=0.1)(x)
decoder = add([x, other])
decoder = Activation('relu')(decoder)
return decoder
def de_build(encoder, nc):
enet = de_bottleneck(encoder, 64, upsample=True, reverse_module=True) # bottleneck 4.0
enet = de_bottleneck(enet, 64) # bottleneck 4.1
enet = de_bottleneck(enet, 64) # bottleneck 4.2
enet = de_bottleneck(enet, 16, upsample=True, reverse_module=True) # bottleneck 5.0
enet = de_bottleneck(enet, 16) # bottleneck 5.1
enet = Conv2DTranspose(filters=nc, kernel_size=(2, 2), strides=(2, 2), padding='same')(enet)
return enet
def ENet(n_classes, input_height=512, input_width=512):
assert input_height % 32 == 0
assert input_width % 32 == 0
img_input = Input(shape=(input_height, input_width, 3))
enet = en_build(img_input)
enet = de_build(enet, n_classes)
o_shape = Model(img_input, enet).output_shape
outputHeight = o_shape[1]
outputWidth = o_shape[2]
enet = (Reshape((outputHeight*outputWidth, n_classes)))(enet)
enet = Activation('softmax')(enet)
model = Model(img_input, enet)
print(outputHeight)
print(outputWidth)
model.outputWidth = outputWidth
model.outputHeight = outputHeight
return model
|
[
"1182563586@qq.com"
] |
1182563586@qq.com
|
97db9e62ed85695f27f233c12c47c5aadfcf9b64
|
3bf398d66621db1e8955f5c6cf5cc37c677f5153
|
/external_tools/ply_info/test/testyacc.py
|
1a98b4f756bab7e924b2e51565548a896762ae97
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
engineeringentropy/echronos
|
b60d4f00333375534b10566f470f734c4d4e02dd
|
b531d5d374ee9b3b66732001eaa6923b3f97ec1b
|
refs/heads/master
| 2021-08-07T11:22:30.795797
| 2020-08-13T03:01:45
| 2020-08-13T03:01:45
| 209,948,267
| 0
| 0
|
NOASSERTION
| 2019-09-21T08:09:32
| 2019-09-21T08:09:32
| null |
UTF-8
|
Python
| false
| false
| 15,937
|
py
|
# testyacc.py
import unittest
try:
import StringIO
except ImportError:
import io as StringIO
import sys
import os
import warnings
sys.path.insert(0,"..")
sys.tracebacklimit = 0
import ply.yacc
import imp
def make_pymodule_path(filename):
path = os.path.dirname(filename)
file = os.path.basename(filename)
mod, ext = os.path.splitext(file)
if sys.hexversion >= 0x3020000:
modname = mod+"."+imp.get_tag()+ext
fullpath = os.path.join(path,'__pycache__',modname)
else:
fullpath = filename
return fullpath
def pymodule_out_exists(filename):
return os.path.exists(make_pymodule_path(filename))
def pymodule_out_remove(filename):
os.remove(make_pymodule_path(filename))
def check_expected(result,expected):
resultlines = []
for line in result.splitlines():
if line.startswith("WARNING: "):
line = line[9:]
elif line.startswith("ERROR: "):
line = line[7:]
resultlines.append(line)
expectedlines = expected.splitlines()
if len(resultlines) != len(expectedlines):
return False
for rline,eline in zip(resultlines,expectedlines):
if not rline.endswith(eline):
return False
return True
def run_import(module):
code = "import "+module
exec(code)
del sys.modules[module]
# Tests related to errors and warnings when building parsers
class YaccErrorWarningTests(unittest.TestCase):
def setUp(self):
sys.stderr = StringIO.StringIO()
sys.stdout = StringIO.StringIO()
try:
os.remove("parsetab.py")
pymodule_out_remove("parsetab.pyc")
except OSError:
pass
if sys.hexversion >= 0x3020000:
warnings.filterwarnings('ignore',category=ResourceWarning)
def tearDown(self):
sys.stderr = sys.__stderr__
sys.stdout = sys.__stdout__
def test_yacc_badargs(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_badargs")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_badargs.py:23: Rule 'p_statement_assign' has too many arguments\n"
"yacc_badargs.py:27: Rule 'p_statement_expr' requires an argument\n"
))
def test_yacc_badid(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_badid")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_badid.py:32: Illegal name 'bad&rule' in rule 'statement'\n"
"yacc_badid.py:36: Illegal rule name 'bad&rule'\n"
))
def test_yacc_badprec(self):
try:
run_import("yacc_badprec")
except ply.yacc.YaccError:
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"precedence must be a list or tuple\n"
))
def test_yacc_badprec2(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_badprec2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Bad precedence table\n"
))
def test_yacc_badprec3(self):
run_import("yacc_badprec3")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Precedence already specified for terminal 'MINUS'\n"
"Generating LALR tables\n"
))
def test_yacc_badrule(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_badrule")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_badrule.py:24: Syntax error. Expected ':'\n"
"yacc_badrule.py:28: Syntax error in rule 'statement'\n"
"yacc_badrule.py:33: Syntax error. Expected ':'\n"
"yacc_badrule.py:42: Syntax error. Expected ':'\n"
))
def test_yacc_badtok(self):
try:
run_import("yacc_badtok")
except ply.yacc.YaccError:
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"tokens must be a list or tuple\n"))
def test_yacc_dup(self):
run_import("yacc_dup")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_dup.py:27: Function p_statement redefined. Previously defined on line 23\n"
"Token 'EQUALS' defined, but not used\n"
"There is 1 unused token\n"
"Generating LALR tables\n"
))
def test_yacc_error1(self):
try:
run_import("yacc_error1")
except ply.yacc.YaccError:
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_error1.py:61: p_error() requires 1 argument\n"))
def test_yacc_error2(self):
try:
run_import("yacc_error2")
except ply.yacc.YaccError:
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_error2.py:61: p_error() requires 1 argument\n"))
def test_yacc_error3(self):
try:
run_import("yacc_error3")
except ply.yacc.YaccError:
e = sys.exc_info()[1]
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"'p_error' defined, but is not a function or method\n"))
def test_yacc_error4(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_error4")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_error4.py:62: Illegal rule name 'error'. Already defined as a token\n"
))
def test_yacc_error5(self):
run_import("yacc_error5")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"Group at 3:10 to 3:12\n"
"Syntax error at 'b'\n"
"Syntax error at 4:18 to 4:22\n"
"Assignment Error at 2:5 to 5:33\n"
))
def test_yacc_error6(self):
run_import("yacc_error6")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"a=7\n"
"Line 3: Syntax error at '*'\n"
"c=21\n"
))
def test_yacc_error7(self):
run_import("yacc_error7")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"a=7\n"
"Line 3: Syntax error at '*'\n"
"c=21\n"
))
def test_yacc_inf(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_inf")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Token 'NUMBER' defined, but not used\n"
"There is 1 unused token\n"
"Infinite recursion detected for symbol 'statement'\n"
"Infinite recursion detected for symbol 'expression'\n"
))
def test_yacc_literal(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_literal")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_literal.py:36: Literal token '**' in rule 'expression' may only be a single character\n"
))
def test_yacc_misplaced(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_misplaced")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_misplaced.py:32: Misplaced '|'\n"
))
def test_yacc_missing1(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_missing1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_missing1.py:24: Symbol 'location' used, but not defined as a token or a rule\n"
))
def test_yacc_nested(self):
run_import("yacc_nested")
result = sys.stdout.getvalue()
self.assert_(check_expected(result,
"A\n"
"A\n"
"A\n",
))
def test_yacc_nodoc(self):
run_import("yacc_nodoc")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_nodoc.py:27: No documentation string specified in function 'p_statement_expr' (ignored)\n"
"Generating LALR tables\n"
))
def test_yacc_noerror(self):
run_import("yacc_noerror")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"no p_error() function is defined\n"
"Generating LALR tables\n"
))
def test_yacc_nop(self):
run_import("yacc_nop")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_nop.py:27: Possible grammar rule 'statement_expr' defined without p_ prefix\n"
"Generating LALR tables\n"
))
def test_yacc_notfunc(self):
run_import("yacc_notfunc")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"'p_statement_assign' not defined as a function\n"
"Token 'EQUALS' defined, but not used\n"
"There is 1 unused token\n"
"Generating LALR tables\n"
))
def test_yacc_notok(self):
try:
run_import("yacc_notok")
except ply.yacc.YaccError:
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"No token list is defined\n"))
def test_yacc_rr(self):
run_import("yacc_rr")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Generating LALR tables\n"
"1 reduce/reduce conflict\n"
"reduce/reduce conflict in state 15 resolved using rule (statement -> NAME EQUALS NUMBER)\n"
"rejected rule (expression -> NUMBER) in state 15\n"
))
def test_yacc_rr_unused(self):
run_import("yacc_rr_unused")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"no p_error() function is defined\n"
"Generating LALR tables\n"
"3 reduce/reduce conflicts\n"
"reduce/reduce conflict in state 1 resolved using rule (rule3 -> A)\n"
"rejected rule (rule4 -> A) in state 1\n"
"reduce/reduce conflict in state 1 resolved using rule (rule3 -> A)\n"
"rejected rule (rule5 -> A) in state 1\n"
"reduce/reduce conflict in state 1 resolved using rule (rule4 -> A)\n"
"rejected rule (rule5 -> A) in state 1\n"
"Rule (rule5 -> A) is never reduced\n"
))
def test_yacc_simple(self):
run_import("yacc_simple")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Generating LALR tables\n"
))
def test_yacc_sr(self):
run_import("yacc_sr")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Generating LALR tables\n"
"20 shift/reduce conflicts\n"
))
def test_yacc_term1(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_term1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_term1.py:24: Illegal rule name 'NUMBER'. Already defined as a token\n"
))
def test_yacc_unused(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_unused")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_unused.py:62: Symbol 'COMMA' used, but not defined as a token or a rule\n"
"Symbol 'COMMA' is unreachable\n"
"Symbol 'exprlist' is unreachable\n"
))
def test_yacc_unused_rule(self):
run_import("yacc_unused_rule")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_unused_rule.py:62: Rule 'integer' defined, but not used\n"
"There is 1 unused rule\n"
"Symbol 'integer' is unreachable\n"
"Generating LALR tables\n"
))
def test_yacc_uprec(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_uprec")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_uprec.py:37: Nothing known about the precedence of 'UMINUS'\n"
))
def test_yacc_uprec2(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_uprec2")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"yacc_uprec2.py:37: Syntax error. Nothing follows %prec\n"
))
def test_yacc_prec1(self):
self.assertRaises(ply.yacc.YaccError,run_import,"yacc_prec1")
result = sys.stderr.getvalue()
self.assert_(check_expected(result,
"Precedence rule 'left' defined for unknown symbol '+'\n"
"Precedence rule 'left' defined for unknown symbol '*'\n"
"Precedence rule 'left' defined for unknown symbol '-'\n"
"Precedence rule 'left' defined for unknown symbol '/'\n"
))
unittest.main()
|
[
"benno@breakawayconsulting.com.au"
] |
benno@breakawayconsulting.com.au
|
4bdd54024cb065037e02da496368a94aedc86137
|
2ac772bf0c2054e2f1a2e8db6d73e0b4d14dedda
|
/[Do_it]_Python/chap06/shell_sort2.py
|
9c6b3e024beb69d3a8bd9884b06668dc47f56052
|
[] |
no_license
|
cherieuu/Algorithm
|
1cf5d7399cb89eb124c3b335eb666b77dd33d8b8
|
cd1e4482e2ce5ad46efac0fff22eea80a3edb036
|
refs/heads/master
| 2023-06-23T20:12:00.148985
| 2021-07-27T14:55:54
| 2021-07-27T14:55:54
| 377,110,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 993
|
py
|
# [Do it! ์ค์ต 6-9] ์
ธ ์ ๋ ฌ ์๊ณ ๋ฆฌ์ฆ ๊ตฌํํ๊ธฐ(h * 3 + 1์ ์์ด ์ฌ์ฉ)
from typing import MutableSequence
def shell_sort(a: MutableSequence) -> None:
"""์
ธ ์ ๋ ฌ(h * 3 + 1์ ์์ด ์ฌ์ฉ)"""
n = len(a)
h = 1
while h < n // 9:
h = h * 3 + 1
while h > 0:
for i in range(h, n):
j = i - h
tmp = a[i]
while j >= 0 and a[j] > tmp:
a[j + h] = a[j]
j -= h
a[j + h] = tmp
h //= 3
if __name__ == '__main__':
print('์
ธ ์ ๋ ฌ์ ์ํํฉ๋๋ค(h * 3 + 1์ ์์ด ์ฌ์ฉ).')
num = int(input('์์ ์๋ฅผ ์
๋ ฅํ์ธ์.: '))
x = [None] * num # ์์ ์๊ฐ num์ธ ๋ฐฐ์ด์ ์์ฑ
for i in range(num):
x[i] = int(input(f'x[{i}]: '))
shell_sort(x) # ๋ฐฐ์ด x๋ฅผ ์
ธ ์ ๋ ฌ
print('์ค๋ฆ์ฐจ์์ผ๋ก ์ ๋ ฌํ์ต๋๋ค.')
for i in range(num):
print(f'x[{i}] = {x[i]}')
|
[
"yhy970108@gmail.com"
] |
yhy970108@gmail.com
|
7a0612bb90e38e3b609279636f755bf5f4be3f41
|
839c28be0b52f3831fe1b64ef2d8ba34c384dec4
|
/etc/hello.py
|
a4d609375092cda12958024c573ea83626c03e58
|
[] |
no_license
|
petertrr/stepic-project-web
|
5d9d82522d70e65b534325815e768d1df3e43016
|
d235d9d1b187b98dcaca8d833bf77f4cf2b632ae
|
refs/heads/master
| 2020-07-22T17:50:16.233607
| 2016-11-21T18:59:31
| 2016-11-21T18:59:31
| 73,825,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
def app(environ, start_response):
# logic
status = '200 OK'
headers = [
('Content-Type', 'text/plain')
]
body = environ['QUERY_STRING'].split("&")
body = [i+'\r\n' for i in body]
start_response(status, headers )
return body
|
[
"peter.trifanov@mail.ru"
] |
peter.trifanov@mail.ru
|
7d6914b5302e5fb20406717bbbb6260a2f53a640
|
1e23ae696c5620043063e20c2dde14a92709c644
|
/appengine/lib/echonest.py
|
b9cc406fc367d5fb4d4e6338c206c69aee93854d
|
[] |
no_license
|
ynohtna/techno-is-the-word
|
ff1ea4e3f553118bd43380f1db1c9ec834a28c08
|
ebfd4186d847ebce1a83476ca557448d26e4aad6
|
refs/heads/master
| 2021-01-19T04:43:48.549625
| 2016-02-28T17:30:03
| 2016-02-28T17:30:03
| 52,732,582
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,087
|
py
|
import logging
from django.utils import simplejson
from google.appengine.api.urlfetch import fetch
from lib.helpers import alter_query
# ============================================================
# CONSTANTS.
API_KEY = 'TCLMMWCPEMTQCMBVA'
CON_KEY = '93a88bc50c2a37f66339cbd35d7bfb66'
SECRET = 'qlJ5i/ZeQjGv5e8cBTjqlQ'
URL = 'http://developer.echonest.com/api/v4'
# ============================================================
def mk_url(api, dict = None):
url = '%s/%s' % (URL, api)
params = {
'api_key': API_KEY,
'format': 'json'
}
if dict:
params.update(dict)
url = alter_query(url, params)
return url
# ============================================================
def test():
url = mk_url('song')
logging.info('URL %s' % url)
# ============================================================
def get_assets(limit = 100, offset = 0):
url = mk_url('sandbox/list', {
'sandbox': 'emi_open_collection',
'results': limit,
'start': offset,
})
logging.info('FETCHING ASSETS FROM %s' % url)
response = fetch(url)
assets = []
try:
if response.status_code == 200:
json = response.content
json_dict = simplejson.loads(json)
assets = json_dict['response']['assets']
except Exception, e:
logging.error(repr(e))
return assets
# ============================================================
def get_techno_tracks(limit = 100, offset = 0):
url = mk_url('song/search', {
'style': 'techno',
'bucket': 'id:emi_open_collection',
'results': limit,
'start': offset,
})
logging.info('FETCHING TECHNO TRACKS FROM %s' % url)
response = fetch(url)
tracks = []
try:
if response.status_code == 200:
json = response.content
json_dict = simplejson.loads(json)
tracks = json_dict['response']['songs']
except Exception, e:
logging.error(repr(e))
return tracks
|
[
"ynohtna@ynohtna.org"
] |
ynohtna@ynohtna.org
|
45f8a9907a4bba03a02af49231f0d72b1e4bcfdc
|
35ef09d5ded82356cdec16c1dc710d3b1643588f
|
/project/project/settings.py
|
1fa9c005d230e9fb5ff5d7da82fd0ced1d84c1bd
|
[
"MIT"
] |
permissive
|
Shravya9506/sample-django-docker
|
c5c23140a2b3537255ba5e8f83296459c7f11957
|
b80f68ad639d134b8699abd10c83de2283955e9a
|
refs/heads/master
| 2022-01-12T08:12:11.849002
| 2019-07-09T11:33:36
| 2019-07-09T11:33:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,309
|
py
|
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': os.environ.get('POSTGRES_PASSWORD'),
'HOST': 'db',
'PORT': '5432',
}
}
BROKER_TRANSPORT = 'redis'
BROKER_URL = "redis://redis:6379/0"
CELERY_RESULT_BACKEND = BROKER_URL
STATIC_ROOT = '/code/static'
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"rowdy_beaver@piedmontfalls.org"
] |
rowdy_beaver@piedmontfalls.org
|
232cd5de467da1a24c6ec42498c4ddf0a4ba6674
|
b083ec94ed712c315e7f811aa6f7a3eb37eeb1cf
|
/exe6.py
|
e47b06645f8bb1107efb0e75ea41619fe7f0d95f
|
[] |
no_license
|
LePetuconski/simple-python-exercises
|
742cbdfefb5299840fb925a6206ef8e54960507b
|
6851dedf5d55e4ae64008eeb37c717376ad267c0
|
refs/heads/master
| 2023-04-11T06:36:01.909128
| 2021-04-25T16:01:04
| 2021-04-25T16:01:04
| 361,325,880
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
R = 30;
volume = ((4/3) * 3.1416 * (R**3));
print(f'A esfera do raio รฉ r = {R} e o volume รฉ {volume}');
|
[
"leticiapetuconski@gmail.com"
] |
leticiapetuconski@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.