blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a2d2add573e706d72b9bc3d90e3c3c73e3e88b4
|
cc18d1effafd9690c1af7a4fb9beb148e0d8259a
|
/Buchalka/lessons1_7/blocks.py
|
5419a77cee017a2f3876e09960635e42a04795e9
|
[] |
no_license
|
MikhaylovD/StudyPython
|
b59d10ee78c2fc59b125322ae816f8913f664eda
|
bd076cb2fc31d8f22f9382337714f87370654db0
|
refs/heads/master
| 2022-07-04T17:41:57.787087
| 2020-05-22T15:32:32
| 2020-05-22T15:32:32
| 264,994,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
# for i in range(1, 13):
# print(("no. {} squared is {} and cubed is {:4}".format(i, i ** 2, i ** 3)))
# print("*" * 80)
name = input("Please enter Name")
age = int(input("How ols are you, {0}?".format(name)))
print(age)
if age >= 18:
print("You are old enough")
else:
print("Please come back in {0}".format(18-age))
|
[
"dmikhailov.work@gmail.com"
] |
dmikhailov.work@gmail.com
|
d82608719f616130383431c5c7b8da3c7fefe854
|
25b5f5a6c9c95ef9a1264eef12384003f037cb58
|
/monocle/utils.py
|
26ac476b91830b6e40735beedcba8749474b6c9f
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
RobTwoThree/Monocle-Level30
|
7dbe76ead87e489faff6ef68d267fbc3610d953e
|
94ed6e121ad41b437083bf207954c34ff8f312f9
|
refs/heads/master
| 2022-11-22T06:17:51.203216
| 2017-12-07T22:11:52
| 2017-12-07T22:11:52
| 113,503,725
| 1
| 1
|
MIT
| 2020-07-22T03:56:03
| 2017-12-07T22:12:23
|
Python
|
UTF-8
|
Python
| false
| false
| 7,811
|
py
|
import socket
from os import mkdir
from os.path import join, exists
from sys import platform
from asyncio import sleep
from math import sqrt
from uuid import uuid4
from enum import Enum
from csv import DictReader
from cyrandom import choice, shuffle, uniform
from time import time
from pickle import dump as pickle_dump, load as pickle_load, HIGHEST_PROTOCOL
from geopy import Point
from geopy.distance import distance
from aiopogo import utilities as pgoapi_utils
from pogeo import get_distance
from . import bounds, sanitized as conf
# iPhones 5 + 5C (4S is really not playable)
IPHONES = {'iPhone5,1': 'N41AP',
'iPhone5,2': 'N42AP',
'iPhone5,3': 'N48AP',
'iPhone5,4': 'N49AP',
'iPhone6,1': 'N51AP',
'iPhone6,2': 'N53AP',
'iPhone7,1': 'N56AP',
'iPhone7,2': 'N61AP',
'iPhone8,1': 'N71AP',
'iPhone8,2': 'N66AP',
'iPhone8,4': 'N69AP',
'iPhone9,1': 'D10AP',
'iPhone9,2': 'D11AP',
'iPhone9,3': 'D101AP',
'iPhone9,4': 'D111AP',
'iPhone10,1': 'D20AP',
'iPhone10,2': 'D21AP',
'iPhone10,3': 'D22AP',
'iPhone10,4': 'D201AP',
'iPhone10,5': 'D211AP',
'iPhone10,6': 'D221AP'}
class Units(Enum):
miles = 1
kilometers = 2
meters = 3
def best_factors(n):
return next(((i, n//i) for i in range(int(n**0.5), 0, -1) if n % i == 0))
def percentage_split(seq, percentages):
percentages[-1] += 1.0 - sum(percentages)
prv = 0
size = len(seq)
cum_percentage = 0
for p in percentages:
cum_percentage += p
nxt = int(cum_percentage * size)
yield seq[prv:nxt]
prv = nxt
def get_start_coords(worker_no, grid=conf.GRID, bounds=bounds):
"""Returns center of square for given worker"""
per_column = int((grid[0] * grid[1]) / grid[0])
column = worker_no % per_column
row = int(worker_no / per_column)
part_lat = (bounds.south - bounds.north) / grid[0]
part_lon = (bounds.east - bounds.west) / grid[1]
start_lat = bounds.north + part_lat * row + part_lat / 2
start_lon = bounds.west + part_lon * column + part_lon / 2
return start_lat, start_lon
def float_range(start, end, step):
"""range for floats, also capable of iterating backwards"""
if start > end:
while end <= start:
yield start
start += -step
else:
while start <= end:
yield start
start += step
def get_gains(dist=70):
"""Returns lat and lon gain
Gain is space between circles.
"""
start = Point(*bounds.center)
base = dist * sqrt(3)
height = base * sqrt(3) / 2
dis_a = distance(meters=base)
dis_h = distance(meters=height)
lon_gain = dis_a.destination(point=start, bearing=90).longitude
lat_gain = dis_h.destination(point=start, bearing=0).latitude
return abs(start.latitude - lat_gain), abs(start.longitude - lon_gain)
def round_coords(point, precision, _round=round):
return _round(point[0], precision), _round(point[1], precision)
def get_bootstrap_points(bounds):
coords = []
if bounds.multi:
for b in bounds.polygons:
coords.extend(get_bootstrap_points(b))
return coords
lat_gain, lon_gain = get_gains(conf.BOOTSTRAP_RADIUS)
west, east = bounds.west, bounds.east
bound = bool(bounds)
for map_row, lat in enumerate(
float_range(bounds.south, bounds.north, lat_gain)
):
row_start_lon = west
if map_row % 2 != 0:
row_start_lon -= 0.5 * lon_gain
for lon in float_range(row_start_lon, east, lon_gain):
point = lat, lon
if not bound or point in bounds:
coords.append(point)
shuffle(coords)
return coords
def get_device_info(account):
device_info = {'brand': 'Apple',
'device': 'iPhone',
'manufacturer': 'Apple'}
try:
if account['iOS'].startswith('1'):
device_info['product'] = 'iOS'
else:
device_info['product'] = 'iPhone OS'
device_info['hardware'] = account['model'] + '\x00'
device_info['model'] = IPHONES[account['model']] + '\x00'
except (KeyError, AttributeError):
account = generate_device_info(account)
return get_device_info(account)
device_info['version'] = account['iOS']
device_info['device_id'] = account['id']
return device_info
def generate_device_info(account):
ios9 = ('9.0', '9.0.1', '9.0.2', '9.1', '9.2', '9.2.1', '9.3', '9.3.1', '9.3.2', '9.3.3', '9.3.4', '9.3.5')
# 10.0 was only for iPhone 7 and 7 Plus, and is rare
ios10 = ('10.0.1', '10.0.2', '10.0.3', '10.1', '10.1.1', '10.2', '10.2.1', '10.3', '10.3.1', '10.3.2', '10.3.3')
ios11 = ('11.0', '11.0.1', '11.0.2', '11.0.3', '11.1', '11.1.1')
devices = tuple(IPHONES.keys())
account['model'] = choice(devices)
account['id'] = uuid4().hex
if account['model'] in ('iPhone10,1', 'iPhone10,2',
'iPhone10,3', 'iPhone10,4',
'iPhone10,5', 'iPhone10,6'):
account['iOS'] = choice(ios11)
elif account['model'] in ('iPhone9,1', 'iPhone9,2',
'iPhone9,3', 'iPhone9,4'):
account['iOS'] = choice(ios10 + ios11)
elif account['model'] in ('iPhone8,1', 'iPhone8,2'):
account['iOS'] = choice(ios9 + ios10 + ios11)
elif account['model'] == 'iPhone8,4':
# iPhone SE started on 9.3
account['iOS'] = choice(('9.3', '9.3.1', '9.3.2', '9.3.3', '9.3.4', '9.3.5') + ios10 + ios11)
else:
account['iOS'] = choice(ios9 + ios10)
return account
def get_current_hour(now=None, _time=time):
now = now or _time()
return round(now - (now % 3600))
def time_until_time(seconds, seen=None, _time=time):
current_seconds = seen or _time() % 3600
if current_seconds > seconds:
return seconds + 3600 - current_seconds
elif current_seconds + 3600 < seconds:
return seconds - 3600 - current_seconds
else:
return seconds - current_seconds
def get_address():
if conf.MANAGER_ADDRESS:
return conf.MANAGER_ADDRESS
if platform == 'win32':
return r'\\.\pipe\monocle'
if hasattr(socket, 'AF_UNIX'):
return join(conf.DIRECTORY, 'monocle.sock')
return ('127.0.0.1', 5001)
def load_pickle(name, raise_exception=False):
location = join(conf.DIRECTORY, 'pickles', '{}.pickle'.format(name))
try:
with open(location, 'rb') as f:
return pickle_load(f)
except (FileNotFoundError, EOFError):
if raise_exception:
raise FileNotFoundError
else:
return None
def dump_pickle(name, var):
folder = join(conf.DIRECTORY, 'pickles')
try:
mkdir(folder)
except FileExistsError:
pass
except Exception as e:
raise OSError("Failed to create 'pickles' folder, please create it manually") from e
location = join(folder, '{}.pickle'.format(name))
with open(location, 'wb') as f:
pickle_dump(var, f, HIGHEST_PROTOCOL)
def randomize_point(point, amount=0.0003, randomize=uniform):
'''Randomize point, by up to ~47 meters by default.'''
lat, lon = point
return (
randomize(lat - amount, lat + amount),
randomize(lon - amount, lon + amount)
)
def calc_pokemon_level(cp_multiplier):
if cp_multiplier < 0.734:
pokemon_level = (58.35178527 * cp_multiplier * cp_multiplier - 2.838007664 * cp_multiplier + 0.8539209906)
else:
pokemon_level = 171.0112688 * cp_multiplier - 95.20425243
pokemon_level = int((round(pokemon_level) * 2) / 2)
return pokemon_level
|
[
"rsayson31@gmail.com"
] |
rsayson31@gmail.com
|
15d76288a158fb406c934e877e28b724e096ea6a
|
2a3743ced45bd79826dcdc55f304da049f627f1b
|
/venv/lib/python3.7/site-packages/bokeh/application/handlers/tests/test_server_lifecycle.py
|
c4b0619bfa46433fa3e209e51acb9a553214d2ba
|
[
"MIT"
] |
permissive
|
Dimasik007/Deribit_funding_rate_indicator
|
12cc8cd7c0be564d6e34d9eae91940c62492ae2a
|
3251602ae5249069489834f9afb57b11ff37750e
|
refs/heads/master
| 2023-05-26T10:14:20.395939
| 2019-08-03T11:35:51
| 2019-08-03T11:35:51
| 198,705,946
| 5
| 3
|
MIT
| 2023-05-22T22:29:24
| 2019-07-24T20:32:19
|
Python
|
UTF-8
|
Python
| false
| false
| 7,392
|
py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh.document import Document
from bokeh._testing.util.filesystem import with_file_contents
# Module under test
import bokeh.application.handlers.server_lifecycle as bahs
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
script_adds_four_handlers = """
def on_server_loaded(server_context):
return "on_server_loaded"
def on_server_unloaded(server_context):
return "on_server_unloaded"
def on_session_created(session_context):
return "on_session_created"
def on_session_destroyed(session_context):
return "on_session_destroyed"
"""
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class Test_ServerLifecycleHandler(object):
# Public methods ----------------------------------------------------------
def test_empty_lifecycle(self):
doc = Document()
def load(filename):
handler = bahs.ServerLifecycleHandler(filename=filename)
handler.modify_document(doc)
handler.on_server_loaded(None)
handler.on_server_unloaded(None)
handler.on_session_created(None)
handler.on_session_destroyed(None)
if handler.failed:
raise RuntimeError(handler.error)
with_file_contents("# This script does nothing", load)
assert not doc.roots
def test_lifecycle_bad_syntax(self):
result = {}
def load(filename):
handler = bahs.ServerLifecycleHandler(filename=filename)
result['handler'] = handler
with_file_contents("This is a syntax error", load)
handler = result['handler']
assert handler.error is not None
assert 'Invalid syntax' in handler.error
def test_lifecycle_runtime_error(self):
result = {}
def load(filename):
handler = bahs.ServerLifecycleHandler(filename=filename)
result['handler'] = handler
with_file_contents("raise RuntimeError('nope')", load)
handler = result['handler']
assert handler.error is not None
assert 'nope' in handler.error
def test_lifecycle_bad_server_loaded_signature(self):
result = {}
def load(filename):
handler = bahs.ServerLifecycleHandler(filename=filename)
result['handler'] = handler
with_file_contents("""
def on_server_loaded(a,b):
pass
""", load)
handler = result['handler']
assert handler.error is not None
assert 'on_server_loaded must have signature func(server_context)' in handler.error
assert 'func(a, b)' in handler.error
assert "Traceback" in handler.error_detail
def test_lifecycle_bad_server_unloaded_signature(self):
result = {}
def load(filename):
handler = bahs.ServerLifecycleHandler(filename=filename)
result['handler'] = handler
with_file_contents("""
def on_server_unloaded(a,b):
pass
""", load)
handler = result['handler']
assert handler.error is not None
assert 'on_server_unloaded must have signature func(server_context)' in handler.error
assert 'func(a, b)' in handler.error
assert "Traceback" in handler.error_detail
def test_lifecycle_bad_session_created_signature(self):
result = {}
def load(filename):
handler = bahs.ServerLifecycleHandler(filename=filename)
result['handler'] = handler
with_file_contents("""
def on_session_created(a,b):
pass
""", load)
handler = result['handler']
assert handler.error is not None
assert 'on_session_created must have signature func(session_context)' in handler.error
assert 'func(a, b)' in handler.error
def test_lifecycle_bad_session_destroyed_signature(self):
result = {}
def load(filename):
handler = bahs.ServerLifecycleHandler(filename=filename)
result['handler'] = handler
with_file_contents("""
def on_session_destroyed(a,b):
pass
""", load)
handler = result['handler']
assert handler.error is not None
assert 'on_session_destroyed must have signature func(session_context)' in handler.error
assert 'func(a, b)' in handler.error
def test_calling_lifecycle_hooks(self):
result = {}
def load(filename):
handler = result['handler'] = bahs.ServerLifecycleHandler(filename=filename)
if handler.failed:
raise RuntimeError(handler.error)
with_file_contents(script_adds_four_handlers, load)
handler = result['handler']
assert "on_server_loaded" == handler.on_server_loaded(None)
assert "on_server_unloaded" == handler.on_server_unloaded(None)
assert "on_session_created" == handler.on_session_created(None)
assert "on_session_destroyed" == handler.on_session_destroyed(None)
def test_missing_filename_raises(self):
with pytest.raises(ValueError):
bahs.ServerLifecycleHandler()
def test_url_path(self):
result = {}
def load(filename):
handler = bahs.ServerLifecycleHandler(filename=filename)
result['handler'] = handler
with_file_contents("""
def on_server_unloaded(server_context):
pass
""", load)
handler = result['handler']
assert handler.error is None
assert handler.url_path().startswith("/")
def test_url_path_failed(self):
result = {}
def load(filename):
handler = bahs.ServerLifecycleHandler(filename=filename)
result['handler'] = handler
with_file_contents("""
# bad signature
def on_server_unloaded():
pass
""", load)
handler = result['handler']
assert handler.error is not None
assert handler.url_path() is None
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
[
"dmitriy00vn@gmail.com"
] |
dmitriy00vn@gmail.com
|
aa056842a14f7ec88f0be84f980e6eaf7910d417
|
a59a3335ceb27b807e9a8eeb95932c72eac214ac
|
/apps/brew/management/commands/migrate_recipe_slug_url.py
|
44f985493b6391cf3e22de75ca1ec936abd86063
|
[
"Beerware"
] |
permissive
|
egaillot/zython
|
ef2a413f90323ed9f3e6c131913548eb0e0c9142
|
cf7ebcdb5265012d9e2b9c0652befe33b3bb6fe0
|
refs/heads/main
| 2023-03-24T02:36:33.281166
| 2022-07-05T08:42:44
| 2022-07-05T08:42:44
| 193,670,642
| 0
| 0
|
NOASSERTION
| 2022-09-24T12:58:40
| 2019-06-25T08:50:22
|
Python
|
UTF-8
|
Python
| false
| false
| 287
|
py
|
from django.core.management.base import BaseCommand
from brew.models import Recipe
class Command(BaseCommand):
def handle(self, *args, **options):
for r in Recipe.objects.filter(slug_url__isnull=True):
r.update_slug_url(force_update=False)
print(r)
|
[
"martyn.clement@gmail.com"
] |
martyn.clement@gmail.com
|
169cbdda02836d71d1a791e7092f32511df9991b
|
cbf171e1389face074a32a7bd08e5421a36c866d
|
/app.py
|
ef2989abbbbd3f8e7a25a457d79a0750b27953ac
|
[] |
no_license
|
abbeycite/Automated-Teller-Machine-ATM-Application
|
6a13cc60a2423db8a6beb07d1171f5c859b17de1
|
4620436bf64c404687fe9a08beaf06a836540667
|
refs/heads/main
| 2023-08-28T07:18:04.488428
| 2021-10-25T14:09:12
| 2021-10-25T14:09:12
| 410,758,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,159
|
py
|
from banking_pkg import account
def atm_menu(name):
print("")
print(" === Automated Teller Machine === ")
print("User: " + name)
print("------------------------------------------")
print("| 1. Balance | 2. Deposit |")
print("------------------------------------------")
print("------------------------------------------")
print("| 3. Withdraw | 4. Logout |")
print("------------------------------------------")
print(" === Automated Teller Machine === ")
name = input("Enter name to register:")
if len(name)<1 or len(name)>10:
print("The maximum length allowed for name is 10")
name = input("Enter name to register:")
else:
pass
pin = input("Enter PIN:")
balance = 0
if len(pin) <4 or len(pin) >4:
print("PIN must be 4-digits")
else:
print(name + " has registered with a starting balance of " + "$" + str(balance))
while True:
name_to_validate = input("Enter name to Login:")
pin_to_validate = input("Enter PIN to Login:")
#name_to_validate = name
#pin_to_validate = pin
if name_to_validate == name and pin_to_validate == pin:
print("Login successful!")
break
else:
print("Invalid credentials!")
while True:
print("")
print(" === Automated Teller Machine === ")
print("User: " + name)
print("------------------------------------------")
print("| 1. Balance | 2. Deposit |")
print("------------------------------------------")
print("------------------------------------------")
print("| 3. Withdraw | 4. Logout |")
print("------------------------------------------")
option = input("Choose an option:")
if option == "1":
account.show_balance(balance)
elif option == "2":
balance1 = account.deposit(balance)
account.show_balance(balance)
elif option == "3":
balance = account.withdraw(balance)
account.show_balance(balance)
else:
account.logout(name)
break
|
[
"noreply@github.com"
] |
abbeycite.noreply@github.com
|
8d8f375d036af0267ad6d48a1fdff3989fe6abb9
|
bcb01c7a08debdfdbff9c3036219658262ee8c1b
|
/src/Planners/XTREE/methods1.py
|
77ec787cdf15a6fc6501085df9cfeffe9363c63b
|
[
"MIT"
] |
permissive
|
rahlk/RAAT
|
ce4989bb9c8582f4ba090c220af6dde000829159
|
d022231f61437ac43b22f1e133216d62f1883c4b
|
refs/heads/master
| 2020-04-06T07:02:12.588202
| 2016-08-19T00:46:59
| 2016-08-19T00:47:01
| 37,782,349
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,717
|
py
|
#! /Users/rkrsn/anaconda/bin/python
from pdb import set_trace
from os import environ, getcwd
from os import walk
from os.path import expanduser
from pdb import set_trace
import sys
# Update PYTHONPATH
HOME = expanduser('~')
axe = HOME + '/git/axe/axe/' # AXE
pystat = HOME + '/git/pystats/' # PySTAT
cwd = getcwd() # Current Directory
sys.path.extend([axe, pystat, './where2'])
from tools.axe.dtree import *
from tools.axe.table import *
# from w2 import *
from lib.w2 import where2, prepare, leaves
from lib.makeAmodel import makeAModel
import matplotlib.mlab as mlab
# import matplotlib.pyplot as plt
import smote
def explore(dir):
datasets = []
for (dirpath, dirnames, filenames) in walk(dir):
datasets.append(dirpath)
training = []
testing = []
for k in datasets[1:]:
train = [[dirPath, fname] for dirPath, _, fname in walk(k)]
test = [train[0][0] + '/' + train[0][1].pop(-1)]
training.append(
[train[0][0] + '/' + p for p in train[0][1] if not p == '.DS_Store'])
testing.append(test)
return training, testing
def newTable(tbl, headerLabel, Rows):
tbl2 = clone(tbl)
newHead = Sym()
newHead.col = len(tbl.headers)
newHead.name = headerLabel
tbl2.headers = tbl.headers + [newHead]
return clone(tbl2, rows=Rows)
def createTbl(
data,
settings=None,
_smote=False,
isBin=False,
bugThres=1,
duplicate=False):
"""
kwargs:
_smote = True/False : SMOTE input data (or not)
_isBin = True/False : Reduce bugs to defects/no defects
_bugThres = int : Threshold for marking stuff as defective,
default = 1. Not defective => Bugs < 1
"""
makeaModel = makeAModel()
_r = []
for t in data:
m = makeaModel.csv2py(t, _smote=_smote, duplicate=duplicate)
_r += m._rows
m._rows = _r
# Initialize all parameters for where2 to run
prepare(m, settings=None)
tree = where2(m, m._rows) # Decision tree using where2
tbl = table(t)
headerLabel = '=klass'
Rows = []
for k, _ in leaves(tree): # for k, _ in leaves(tree):
for j in k.val:
tmp = j.cells
if isBin:
tmp[-1] = 0 if tmp[-1] < bugThres else 1
tmp.append('_' + str(id(k) % 1000))
j.__dict__.update({'cells': tmp})
Rows.append(j.cells)
return newTable(tbl, headerLabel, Rows)
def test_createTbl():
dir = '../Data/camel/camel-1.6.csv'
newTbl = createTbl([dir], _smote=False)
newTblSMOTE = createTbl([dir], _smote=True)
print(len(newTbl._rows), len(newTblSMOTE._rows))
def drop(test, tree):
loc = apex(test, tree)
return loc
if __name__ == '__main__':
test_createTbl()
|
[
"i.m.ralk@gmail.com"
] |
i.m.ralk@gmail.com
|
7ca8e2675038c589bfcabe68370250d28b3176e2
|
eb191a4eb16c5f1753c13bfd5b0b8a6b1e7d23f1
|
/Session 3 - Conditional Statements part 2/05_grocery_strore.py
|
6d1783985253f626ee7eb3d41b67bed062e3f792
|
[
"MIT"
] |
permissive
|
solomonsh/Crack-the-Code
|
efadb130d584522e073a2dde2f4cc41722a0faad
|
5eee7f59292315c724dd173f321bb95925c3023e
|
refs/heads/main
| 2023-09-04T09:46:29.477958
| 2021-10-13T10:45:59
| 2021-10-13T10:45:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,088
|
py
|
product = input()
city = input()
amount = float(input())
price = 0
if city == "Sofia":
if product == "coffee":
price = 0.50 * amount
elif product == "water":
price = 0.80 * amount
elif product == "beer":
price = 1.20 * amount
elif product == "sweets":
price = 1.45 * amount
elif product == "peanuts":
price = 1.60 * amount
elif city == "Plovdiv":
if product == "coffee":
price = 0.40 * amount
elif product == "water":
price = 0.70 * amount
elif product == "beer":
price = 1.15 * amount
elif product == "sweets":
price = 1.30 * amount
elif product == "peanuts":
price = 1.50 * amount
elif city == "Varna":
if product == "coffee":
price = 0.45 * amount
elif product == "water":
price = 0.70 * amount
elif product == "beer":
price = 1.10 * amount
elif product == "sweets":
price = 1.35 * amount
elif product == "peanuts":
price = 1.55 * amount
print(price)
|
[
"elenaborrisova@gmail.com"
] |
elenaborrisova@gmail.com
|
93516a79d6bfc2469eb4eab27939c086e8fd0b1e
|
fc82bf6ad91053c37bc954d99e9791b09bb1708b
|
/test_case/test_05_send_record.py
|
98b64753f9ab7a88705f7872473a0b0be7c2dd09
|
[] |
no_license
|
ddy88958620/enterprise3.1-ver2.0
|
0cc840684895a6ab0a9eed0c06cfca47e04f19c9
|
1c18f8cf8dd947cf7fcfb008d8f3065b6998d4cc
|
refs/heads/master
| 2023-03-12T20:55:17.885893
| 2021-03-02T09:31:38
| 2021-03-02T09:31:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,227
|
py
|
import unittest
from pages.page_05_send_record import sendRecord_page
from pages.login_page import LoginPage
from commons.log import log
from BeautifulReport import BeautifulReport
from commons.driver_setup import *
import os
# @unittest.skip("先跳过")
class SeacrchBysender(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = driver_config()
cls.driver.implicitly_wait(5)
LoginPage(cls.driver).login()
log().info('开始执行:群发记录页面的自动化测试')
@classmethod
def tearDownClass(cls):
log().info('执行结束:群发记录页面的自动化测试')
cls.driver.quit()
@BeautifulReport.add_test_img('test_02_serachBytext')
def test_02_serachBytext(self):
"""群发记录->通过消息内容查询,断言消息内容"""
log().info('开始执行:用例-通过消息内容查询')
sendRecord_page(self.driver).searchBytext('自动化测试')
log().info('执行结束:用例-通过消息内容查询')
@BeautifulReport.add_test_img('test_03_serachBytype')
def test_03_serachBytype(self):
"""群发记录->通过群发类型查询,断言群发类型"""
log().info('开始执行:用例-通过群发类型查询')
self.driver.refresh()
sendRecord_page(self.driver).searchBytype()
log().info('执行结束:用例-通过群发类型查询')
@BeautifulReport.add_test_img('test_04_serachBytime')
def test_04_serachBytime(self):
"""群发记录->通过创建时间查询,断言创建时间"""
log().info('开始执行:用例-通过创建时间查询')
self.driver.refresh()
sendRecord_page(self.driver).searchBytime('2020-11-26', '2020-11-26')
log().info('执行结束:用例-通过创建时间查询')
@BeautifulReport.add_test_img('test_05_reset')
def test_05_reset(self):
"""群发记录->重置功能,断言a<=b"""
log().info('开始执行:用例-重置功能')
self.driver.refresh()
sendRecord_page(self.driver).reset('群发自动测试')
log().info('执行结束:用例-重置功能')
if __name__ == '__main__':
unittest.main()
|
[
"781707715@qq.com"
] |
781707715@qq.com
|
649ea16733eebc56eb6e0780b6c68434cb05bcf2
|
16cb8cc18d92d4018f9ee3044565cf22d4daef70
|
/Lab4/Corrections/Python/exercise1.py
|
773a0549f8c5d6762b043b02ef4db65b314206bb
|
[] |
no_license
|
zx-joe/Computational-Motor-Control-for-Salamandar-Robot
|
c13ac105d73b283ac86c00a00a7b25b28e3713af
|
c66d23fb8365e4b12263bb4115a30d708d42dbb2
|
refs/heads/master
| 2022-12-12T12:23:57.573980
| 2020-09-08T09:05:28
| 2020-09-08T09:05:28
| 256,481,679
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,579
|
py
|
""" Lab 4 """
import matplotlib.pyplot as plt
import numpy as np
import farms_pylog as pylog
from cmcpack import DEFAULT, integrate, integrate_multiple, parse_args
from pendulum_system import PendulumSystem
from system_animation import SystemAnimation
from system_parameters import PendulumParameters
DEFAULT["label"] = [r"$\theta$ [rad]", r"$d\theta/dt$ [rad/s]"]
def pendulum_integration(state, time, *args):
""" Function for system integration """
pylog.warning("Pendulum equation with spring and damper must be implemented") # _S
pendulum = args[0]
return pendulum.pendulum_system(
state[0], state[1], time, torque=0.0
)[:, 0]
def pendulum_perturbation(state, time, *args):
""" Function for system integration with perturbations.
Setup your time based system pertubations within this function.
The pertubation can either be applied to the states or as an
external torque.
"""
pendulum = args[0]
if time > 5 and time < 5.1:
pylog.info("Applying state perturbation to pendulum_system")
state[1] = 2.
return pendulum.pendulum_system(
state[0], state[1], time, torque=0.0)[:, 0]
def pendulum_limit_cycle(x0, time=0.0, *args):
# Initialize the parameters without damping
pendulum = args[0]
pendulum.parameters.b1 = 0.0
pendulum.parameters.b2 = 0.0
pylog.info(pendulum.parameters.showParameters())
pylog.info(
"1a. Running pendulum_system with springs to study limit cycle behavior")
title = "{} Limit Cycle(x0 = {})"
res = integrate(pendulum_perturbation, x0, time, args=args)
res.plot_state(title.format("State", x0))
res.plot_phase(title.format("Phase", x0))
def pendulum_spring_constant(x0, time=0.0, *args):
# Initialize the parameters to bias spring 1
pendulum = args[0]
pendulum.parameters.b1 = 0.0
pendulum.parameters.b2 = 0.0
pendulum.parameters.k1 = 0.1
pendulum.parameters.k2 = 0.1
pylog.info(
"1b. Running pendulum_system for analysing role of spring constant")
pylog.info(pendulum.parameters.showParameters())
title = "{} Spring Constant 1(x0 = {})"
res = integrate(pendulum_integration, x0, time, args=args)
res.plot_state(title.format("State", x0))
res.plot_phase(title.format("Phase", x0))
# Initialize the parameters to bias spring 2
pendulum.parameters.b1 = 0.0
pendulum.parameters.b2 = 0.0
pendulum.parameters.k1 = 100.
pendulum.parameters.k2 = 100.
pylog.info(
"1b. Running pendulum_system for analysing role of spring constant")
pylog.info(pendulum.parameters.showParameters())
title = "{} Spring Constant 2(x0 = {})"
res = integrate(pendulum_integration, x0, time, args=args)
res.plot_state(title.format("State", x0))
res.plot_phase(title.format("Phase", x0))
# Initialize the pendulum.parameters to bias spring 1
pendulum.parameters.b1 = 0.0
pendulum.parameters.b2 = 0.0
pendulum.parameters.k1 = 1.0
pendulum.parameters.k2 = 100.0
pylog.info(
"1b. Running pendulum_system for analysing role of variable spring constant")
pylog.info(pendulum.parameters.showParameters())
title = "{} Variable Spring Constant 1(x0 = {})"
res = integrate(pendulum_integration, x0, time, args=args)
res.plot_state(title.format("State", x0))
res.plot_phase(title.format("Phase", x0))
def pendulum_spring_reference(x0, time=0.0, *args):
# Initialize the parameters to bias spring 1 reference angle
pendulum = args[0]
pendulum.parameters.b1 = 0.0
pendulum.parameters.b2 = 0.0
pendulum.parameters.k1 = 10.0
pendulum.parameters.k2 = 10.0
pendulum.parameters.s_theta_ref1 = np.deg2rad(-10.0)
pendulum.parameters.s_theta_ref2 = np.deg2rad(10.0)
pylog.info(
"1b. Running pendulum_system for analysing role of spring reference")
pylog.info(pendulum.parameters.showParameters())
title = "{} Spring Reference 1(x0 = {})"
res = integrate(pendulum_integration, x0, time, args=args)
res.plot_state(title.format("State", x0))
res.plot_phase(title.format("Phase", x0))
# Initialize the pendulum.parameters to bias spring 2 reference angle
pendulum.parameters.b1 = 0.0
pendulum.parameters.b2 = 0.0
pendulum.parameters.k1 = 10.0
pendulum.parameters.k2 = 10.0
pendulum.parameters.s_theta_ref1 = np.deg2rad(-75.0)
pendulum.parameters.s_theta_ref2 = np.deg2rad(75.)
pylog.info(
"1b. Running pendulum_system for analysing role of spring reference")
pylog.info(pendulum.parameters.showParameters())
title = "{} Spring Reference 2(x0 = {})"
res = integrate(pendulum_integration, x0, time, args=args)
res.plot_state(title.format("State", x0))
res.plot_phase(title.format("Phase", x0))
# Initialize the pendulum.parameters to bias spring 2 reference angle
pendulum.parameters.b1 = 0.0
pendulum.parameters.b2 = 0.0
pendulum.parameters.k1 = 10.0
pendulum.parameters.k2 = 10.0
pendulum.parameters.s_theta_ref1 = np.deg2rad(0.0)
pendulum.parameters.s_theta_ref2 = np.deg2rad(75.0)
pylog.info(
"1c. Running pendulum_system for analysing role of variable spring reference")
pylog.info(pendulum.parameters.showParameters())
title = "{} Variable Spring Reference 1(x0 = {})"
res = integrate(pendulum_integration, x0, time, args=args)
res.plot_state(title.format("State", x0))
res.plot_phase(title.format("Phase", x0))
def pendulum_spring_damper(x0, time=0.0, *args):
""" Function to analyse the pendulum spring damper system"""
pendulum = args[0]
pendulum.parameters.b1 = 0.5
pendulum.parameters.b2 = 0.5
pendulum.parameters.k1 = 50.0
pendulum.parameters.k2 = 50.0
pendulum.parameters.s_theta_ref1 = np.deg2rad(-45.0)
pendulum.parameters.s_theta_ref2 = np.deg2rad(45.0)
pylog.info(
"20. Running pendulum_system for analysing role of spring and damper muscle")
pylog.info(pendulum.parameters.showParameters())
title = "{} Spring Damper (x0 = {})"
res = integrate(pendulum_perturbation, x0, time, args=args)
res.plot_state(title.format("State", x0))
res.plot_phase(title.format("Phase", x0))
def pendulum_set_position(x0, time=0.0, *args):
""" Function to analyse the pendulum spring damper system"""
pendulum = args[0]
pendulum.parameters.b1 = 1.
pendulum.parameters.b2 = 1.
pendulum.parameters.k1 = 50.0
pendulum.parameters.k2 = 50.0
pendulum.parameters.s_theta_ref1 = np.deg2rad(0.0)
pendulum.parameters.s_theta_ref2 = np.deg2rad(65.6)
pylog.info(
"1b. Running pendulum_system to set fixed position")
pylog.info(pendulum.parameters.showParameters())
title = "{} Pendulum Fixed Position (x0 = {})"
res = integrate(pendulum_integration, x0, time, args=args)
pylog.debug('Position : {}'.format(np.rad2deg(res.state[-1])))
res.plot_state(title.format("State", x0))
res.plot_phase(title.format("Phase", x0))
def exercise1():
""" Exercise 1 """
pylog.info("Executing Lab 4 : Exercise 1")
pendulum = PendulumSystem()
pendulum.parameters = PendulumParameters()
pylog.info(
"Find more information about Pendulum Parameters in SystemParameters.py")
pylog.info("Executing Lab 4 : Exercise 1")
# Initialize a new pendulum system with default parameters
pendulum = PendulumSystem()
pendulum.parameters = PendulumParameters()
pylog.info(
"Find more information about Pendulum Parameters in SystemParameters.py")
# To change a parameter you could so by,
# >>> pendulum.parameters.L = 1.5
# The above line changes the length of the pendulum
# You can instantiate multiple pendulum models with different parameters
# For example,
"""
>>> pendulum_1 = PendulumSystem()
>>> pendulum_1.parameters = PendulumParameters()
>>> pendulum_1.parameters.L = 0.5
>>> parameters_2 = PendulumParameters()
>>> parameters_2.L = 0.5
>>> pendulum_2 = PendulumSystem(paramters=parameters_2)
"""
# Above examples shows how to create two istances of the pendulum
# and changing the different parameters using two different approaches
pendulum.parameters.k1 = 50
pendulum.parameters.k2 = 50
pendulum.parameters.s_theta_ref1 = 1.0
pendulum.parameters.s_theta_ref2 = 1.0
pendulum.parameters.b1 = 0.5
pendulum.parameters.b2 = 0.5
pylog.warning("Loading default pendulum pendulum.parameters")
pylog.info(pendulum.parameters.showParameters())
# Simulation Parameters
t_start = 0.0
t_stop = 10.0
dt = 0.01
pylog.warning("Using large time step dt={}".format(dt))
time = np.arange(t_start, t_stop, dt)
x0 = [0.5, 0.0]
res = integrate(pendulum_integration, x0, time, args=(pendulum,))
pylog.info("Instructions for applying pertubations")
# Use pendulum_perturbation method to apply pertubations
# Define the pertubations inside the function pendulum_perturbation
# res = integrate(pendulum_perturbation, x0, time, args=(pendulum,))
res.plot_state("State")
res.plot_phase("Phase")
x0 = [0.5, 0.1]
pendulum_limit_cycle(x0, time, pendulum)
pendulum_spring_constant(x0, time, pendulum)
pendulum_spring_reference(x0, time, pendulum)
pendulum_spring_damper(x0, time, pendulum)
pendulum_set_position(x0, time, pendulum)
if DEFAULT["save_figures"] is False:
plt.show()
return
if __name__ == '__main__':
from cmcpack import parse_args
parse_args()
exercise1()
|
[
"xiao.zhou@epfl.ch"
] |
xiao.zhou@epfl.ch
|
9b0dfb5017741e39c3117616edb348cc76e2ff43
|
3ef524fbe4f299fa5f2702bf3b421b9e5e548b90
|
/Problem 25.py
|
7ca64eb15dacb84a5eb19486af2591ea15d05289
|
[] |
no_license
|
PSpeiser/ProjectEuler
|
87e95cac98f7811a15ca2bb1c925dd595d8a7c43
|
b846c172bd12b4400e200d28886a6af7bec2dcf0
|
refs/heads/master
| 2021-01-01T18:55:01.121820
| 2014-04-03T14:37:35
| 2014-04-03T14:37:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
cache = {1:1,2:1}
def fibonacci(n):
if n not in cache:
cache[n] = cache[n-1] + cache[n-2]
return cache[n]
i = 1
while True:
n = fibonacci(i)
if len(str(n)) >= 1000:
print "Term F%s ; Value %s" % (i,n)
break
i += 1
|
[
"mail@patrickspeiser.at"
] |
mail@patrickspeiser.at
|
7e192c0c89b5bce331375946602582924e4e4432
|
37ac0b28befbbd822f3020e1b748114977f0acf9
|
/bankapp/utilities/konstants.py
|
e0d8b57126197f1629748cf9e321f6d92fbdb5df
|
[] |
no_license
|
Akhilrajns/emaps
|
1b0e4d9933268def6f0a5500b09163a02a6c0cbd
|
cf5ffafffcbb2cd0d8703668fc87cb56ae47535e
|
refs/heads/master
| 2021-01-01T04:13:55.441433
| 2017-08-04T02:48:02
| 2017-08-04T02:48:02
| 97,145,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,722
|
py
|
INPUT_DATE_FORMAT = ["%m/%d/%Y"]
OUTPUT_DATE_FORMAT = "%m/%d/%Y"
class K:
def __init__(self, label=None, **kwargs):
assert(len(kwargs) == 1)
for k, v in kwargs.items():
self.id = k
self.v = v
self.label = label or self.id
class Konstants:
def __init__(self, *args):
self.klist = args
for k in self.klist:
setattr(self, k.id, k.v)
def choices(self):
return [(k.v, k.label) for k in self.klist]
def get_label(self, key):
for k in self.klist:
if k.v == key:
return k.label
return None
ROLES = Konstants(
K(fan='R1001', label='Fan'),
K(celebrity='R1002', label='Celebrity'),
K(admin='R1003', label='Admin')
)
CELEBRITY_CATEGORIES = Konstants(
K(music='CC1001', label='Music'),
K(actor='CC1002', label='Actor'),
K(model='CC1003', label='Model'),
K(sports='CC1004', label='Sports')
)
FEATURES = Konstants(
K(user_profile_retrieve='F1001', label='User Profile Retrieve'),
K(celebrity_retrieve='F1002', label='Celebrity Retrieve'),
K(fan_request_list='F1003', label='Fan Request List'),
K(fan_request_retrieve='F1004', label='Fan Request Retrieve'),
K(profile_image_URL_add='F1005', label='Profile Image URL Add'),
K(user_profile_update='F1006', label='User Profile Update'),
K(fan_request_delete='F1007', label='Fan Request Delete'),
K(stargram_video_create='F1008', label='Stargram Video Create'),
K(stargram_video_update='F1009', label='Stargram Video Update'),
K(stargram_video_delete='F1010', label='Stargram Video Delete'),
K(profile_image_URL_delete='F1011', label='Profile Image URL Delete')
)
|
[
"akhilrajns@gmail.com"
] |
akhilrajns@gmail.com
|
5b8c65264124e112089b9594ca36afdf4ff2d6e9
|
fbc7f98003b6b04447b89055e988015985c54b29
|
/crm/urls.py
|
6ff043a8d9cf28a781a38ca65df9be90aeeec478
|
[] |
no_license
|
GuillaumeLambermont/django-crm
|
33f215590f2cce7065b9350a61981b6bc8927a5f
|
46eb2df1f481cb5119203c6d05d771aa6b0cbebc
|
refs/heads/main
| 2023-06-28T17:11:05.546850
| 2021-08-03T10:53:09
| 2021-08-03T10:53:09
| 392,279,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
"""crm URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"guillaume.lambermont@gmail.com"
] |
guillaume.lambermont@gmail.com
|
842f9af822f0189540b1496ff74ae039be3ba8f8
|
d9b75a25299b5686070021444c936cd5a23f3fe2
|
/main.py
|
69da713da0b6199bea37f96867f068f48be8f13d
|
[] |
no_license
|
DBM1/DM-Assignment
|
effe3c57fcc77ccf600f92d373c41af5c69473e8
|
934ceea5961f66919854c0b4e37e61edca98f0c1
|
refs/heads/main
| 2023-02-02T14:37:30.982072
| 2020-12-19T07:44:52
| 2020-12-19T07:44:52
| 314,745,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,403
|
py
|
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import StandardScaler
import torch
import time
from transformers import AutoTokenizer, AutoModel
CATEGORY = 'CATEGORY'
TEXT = 'TEXT'
TITLE = 'TITLE'
def load_data_news_train(nrows=None) -> pd.DataFrame:
data_file = 'data/ag_news_csv/train.csv'
columns_name = [CATEGORY, TITLE, TEXT]
file_data = pd.read_csv(data_file, engine='python', header=None, names=columns_name,
nrows=nrows)
file_data[TEXT] = file_data[TITLE] + ' ' + file_data[TEXT]
file_data = file_data.drop(columns=[TITLE, ])
file_data[TEXT] = file_data[TEXT].str.replace('[^a-zA-Z0-9]', ' ').str.lower()
return file_data
def token_to_cuda(tokenizer_output):
tokens_tensor = tokenizer_output['input_ids'].cuda()
token_type_ids = tokenizer_output['token_type_ids'].cuda()
attention_mask = tokenizer_output['attention_mask'].cuda()
output = {'input_ids': tokens_tensor,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask}
return output
def generate_bert_embedding(data, batchsize=10, log_interval=500):
start = time.time()
data_size = data.shape[0]
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
model = AutoModel.from_pretrained('bert-base-uncased').cuda()
result = []
for i in range((data.shape[0] // batchsize) + 1):
data_batch = data[(i * batchsize):((i + 1) * batchsize)]
inputs = token_to_cuda(tokenizer(data_batch.tolist(), padding=True, truncation=True, return_tensors='pt'))
batch_result = model(**inputs).pooler_output.detach().tolist()
result += batch_result
if (i + 1) % log_interval == 0:
with open('bert_embedding\\' + str((i + 1) // log_interval) + '.txt', 'w') as f:
for line in result:
line_format = [round(i, 5) for i in line]
f.write(str(line_format))
print('TIME {} num timesteps {} processed {:.2f}%'
.format(time.strftime('%Hh %Mm %Ss', time.gmtime(time.time() - start)),
i + 1,
((i + 1) * 100) / data_size))
result = []
return result
def get_distribution(data):
distribution = {
'len < 10': 0,
'10 <= len < 20': 0,
'20 <= len < 30': 0,
'30 <= len < 40': 0,
'40 <= len < 50': 0,
'50 <= len < 60': 0,
'60 <= len < 70': 0,
'70 <= len < 80': 0,
'80 <= len < 90': 0,
'90 <= len <100': 0,
'100<= len': 0
}
len_list = []
for text in data[TEXT]:
l = len(text.split())
len_list.append(l)
if l < 10:
distribution['len < 10'] += 1
elif l < 20:
distribution['10 <= len < 20'] += 1
elif l < 30:
distribution['20 <= len < 30'] += 1
elif l < 40:
distribution['30 <= len < 40'] += 1
elif l < 50:
distribution['40 <= len < 50'] += 1
elif l < 60:
distribution['50 <= len < 60'] += 1
elif l < 70:
distribution['60 <= len < 70'] += 1
elif l < 80:
distribution['70 <= len < 80'] += 1
elif l < 90:
distribution['80 <= len < 90'] += 1
elif l < 100:
distribution['90 <= len <100'] += 1
else:
distribution['100<= len'] += 1
len_list = np.array(len_list)
return len_list, distribution
def to_tf_idf(data):
vectorizer = CountVectorizer()
x = vectorizer.fit_transform(data[TEXT])
tf_idf_transformer = TfidfTransformer()
tf_idf = tf_idf_transformer.fit_transform(x)
truncated_svd = TruncatedSVD(256)
svd = truncated_svd.fit_transform(tf_idf)
standard_scaler = StandardScaler()
scaled = standard_scaler.fit_transform(svd)
train_vector = pd.DataFrame(scaled)
# train_vector.to_csv('data/ag_news_csv/train_vector_tfidf.csv')
return train_vector
def get_statistics(data):
len_list, distribution = get_distribution(data)
total = 0
for k, v in distribution.items():
total += v
# vectorizer = CountVectorizer()
# vectorizer.fit_transform(data[TEXT])
print('Total Num: ' + str(total))
print('Category Num: ')
print(data[CATEGORY].value_counts())
print('Max Len: ' + str(len_list.max()))
print('Min Len: ' + str(len_list.min()))
print('Average Len: ' + str(len_list.mean()))
print('Variance: ' + str(len_list.var()))
print('Standard Deviation: ' + str(len_list.std()))
print('Distribution: ' + str(distribution))
# print('Vocabulary Size: ' + str(len(vectorizer.get_feature_names())))
for i in range(1, 5):
print('\n')
print('Category ' + str(i))
len_list, distribution = get_distribution(data[data[CATEGORY] == i])
print('Max Len: ' + str(len_list.max()))
print('Min Len: ' + str(len_list.min()))
print('Average Len: ' + str(len_list.mean()))
print('Variance: ' + str(len_list.var()))
print('Standard Deviation: ' + str(len_list.std()))
print('Distribution: ' + str(distribution))
data = load_data_news_train()
generate_bert_embedding(data[TEXT])
|
[
"fu809250829@gmail.com"
] |
fu809250829@gmail.com
|
40d940fd55b5ed7bdbf11c42809e914d77778457
|
0da0a5a42705d60cf0dcac016d3a80156e4c9ec6
|
/euler007/euler007_2.py
|
b989aa26db53a83126f11e997a4eb6a58ea72c3a
|
[
"MIT"
] |
permissive
|
jcpince/hackerrank-euler
|
27d623a0cccd96848f9ed9bd25ed6d983f23e773
|
2d54c06be9037d3182db400f5f64564ab6b41103
|
refs/heads/master
| 2021-01-19T04:07:43.006416
| 2018-02-25T22:35:39
| 2018-02-25T22:35:39
| 84,428,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 924
|
py
|
#!/bin/python3
import time
from math import sqrt
count = 0
def get_prime(primes, N):
global count
if N < len(primes):
return primes[N-1]
candidate = primes[-1]
while len(primes) != N:
candidate += 2
count += 1
if count == 5:
count = 0
candidate += 2
s = int(sqrt(candidate))
#print("candidate = %d, primes = %s" % (candidate, primes))
for p in primes[1:]:
if p > s:
primes.append(candidate)
break
if (candidate % p) == 0:
break
return primes[-1]
start = time.time()
primes = [2, 3, 5]
t = int(input().strip())
for a0 in range(t):
n = int(input().strip())
print(get_prime(primes, n))
assert(get_prime(primes, 168) == 997)
assert(get_prime(primes, 10000) == 104729)
print("Computation time: %f seconds" % (time.time() - start))
#print(primes)
|
[
"jcpince@gmail.com"
] |
jcpince@gmail.com
|
9672473b428541ed9dc1e3485053ff317c767e5a
|
8f8f49e86a61e4e926817774d6028831aecd5807
|
/multi_label/n_test.py
|
06a55f58eec3398808394a854e500275241f9b2f
|
[
"MIT"
] |
permissive
|
MinxZ/multi_label
|
5ee533411e28a3488642089ee8a2667bcf7ab963
|
aed67d4bb4102962eb73b996288aa56983589858
|
refs/heads/master
| 2020-03-17T23:15:04.897950
| 2019-12-10T10:10:19
| 2019-12-10T10:10:19
| 134,036,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,001
|
py
|
from __future__ import absolute_import, division, print_function
import json
import multiprocessing as mp
import random
import cv2
import keras
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.applications import *
from keras.applications.inception_v3 import preprocess_input
from keras.callbacks import *
from keras.layers import *
from keras.models import *
from keras.optimizers import *
from keras.regularizers import *
from keras.utils.generic_utils import CustomObjectScope
from sklearn.preprocessing import MultiLabelBinarizer
from tqdm import tqdm
from image import ImageDataGenerator, resizeAndPad
from load_data import *
from model import *
data_path = "../data/json"
with open('%s/train.json' % (data_path)) as json_data:
train = json.load(json_data)
with open('%s/test.json' % (data_path)) as json_data:
test = json.load(json_data)
with open('%s/validation.json' % (data_path)) as json_data:
validation = json.load(json_data)
train_img_url = train['images']
train_img_url = pd.DataFrame(train_img_url)
train_ann = train['annotations']
train_ann = pd.DataFrame(train_ann)
train = pd.merge(train_img_url, train_ann, on='imageId', how='inner')
# test data
test = pd.DataFrame(test['images'])
# Validation Data
val_img_url = validation['images']
val_img_url = pd.DataFrame(val_img_url)
val_ann = validation['annotations']
val_ann = pd.DataFrame(val_ann)
validation = pd.merge(val_img_url, val_ann, on='imageId', how='inner')
datas = {'Train': train, 'Test': test, 'Validation': validation}
for data in datas.values():
data['imageId'] = data['imageId'].astype(np.uint32)
mlb = MultiLabelBinarizer()
train_label = mlb.fit_transform(train['labelId'])
y_test = np.zeros((39706, 228))
x_test = np.arange(y_test.shape[0]) + 1
width = 224
model_name = 'Xception'
# with CustomObjectScope({'f1_loss': f1_loss, 'f1_score': f1_score, 'precision': precision, 'recall': recall}):
# model = load_model(f'../models/{model_name}_f1.h5')
test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
y_pred_test = model.predict_generator(
test_datagen.flow(x_test, '../data/test_data', width,
y_test, batch_size=1, shuffle=False),
verbose=1)
np.save(f'../data/json/y_pred_{model_name}', y_pred_test)
# y_pred_test_xe = y_pred_test.copy()
# y_pred_test = (y_pred_test_xe + y_pred_test_in) / 2
y_pred_test1 = np.round(y_pred_test)
where_1 = mlb.inverse_transform(y_pred_test1)
file = open('../data/json/test.csv', 'w')
file.write('image_id,label_id\n')
for i in x_test:
where_one = where_1[i - 1]
line = f"{i},"
for x in where_one:
line += f'{x} '
if line[-1] == ' ':
line = line[:-1]
file.write(line + '\n')
file.close()
"""
scp -i cyou.pem ec2-user@ec2-54-178-135-12.ap-northeast-1.compute.amazonaws.com:~/data/iM_Fa/data/json/test.csv .
scp -i cyou.pem ec2-user@ec2-54-178-135-12.ap-northeast-1.compute.amazonaws.com:~/data/iM_Fa/data/json/y_pred_Xception.npy .
"""
|
[
"z670172581@icloud.com"
] |
z670172581@icloud.com
|
a818895c3c81014a591dda8ea7fe96523ca622d7
|
a214a6ed7cb48c50cbdd842a914932103a974b2d
|
/main_app/urls.py
|
a334136e607497e63440a13bf8c6440b580cee27
|
[] |
no_license
|
ajelleny/finch-collector-lab
|
b1df311fcabed220b9fa8036aa7345934afd113a
|
25967eb023ed1d47bf30ec351d060f6297abcfa0
|
refs/heads/main
| 2023-03-18T18:20:12.787459
| 2021-03-14T23:00:01
| 2021-03-14T23:00:01
| 347,227,882
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name="home"),
path('about/', views.about, name="about"),
path('dogs/', views.dogs_index, name="index"),
path('dogs/new', views.dogs_new, name="new"),
path('dogs/<int:dog_id>/', views.dogs_detail, name='detail'),
path('dogs/<int:dog_id>/add_feeding/', views.add_feeding, name="add_feeding"),
path('dogs/<int:dog_id>/assoc_toy/<int:toy_id>/', views.assoc_toy, name="assoc_toy"),
path('accounts/signup/', views.signup, name='signup'),
]
|
[
"ajelleny@gmail.com"
] |
ajelleny@gmail.com
|
15249287706f2ca19f8758e7e53d8c5c4817c827
|
c632e6ba36598f34e6336d1cb5e2411c1e571da8
|
/flag-collection/grader.py
|
127b0dc85d06593c8a17650729b889ed9597d5d5
|
[] |
no_license
|
adbforlife/easyctf-2017-problems
|
0f7e229d884d6d66c3d0ae1226e2e2e1826d4c17
|
c19872a88080845fa4c5ac51a45ddaffbf40690b
|
refs/heads/master
| 2021-01-22T23:34:09.525637
| 2017-03-21T03:30:53
| 2017-03-21T03:30:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
def grade(autogen, answer):
if answer.find("thumbs.db_c4n_b3_useful") != -1:
return True, "wowie! thumbs up"
return False, "Nope, try again."
|
[
"failed.down@gmail.com"
] |
failed.down@gmail.com
|
a494085136d697cdfd1ab4f26e32bdbebaff6ee5
|
3843a7a3f9e296ae37aa4be8eed2a18f89a32769
|
/tweeter_crawler/factory/TwitterCrawler.py
|
40cb41331660b26a0ad6ac4c3dbd551791979928
|
[] |
no_license
|
mahdi00021/Twitter_and_Locations_gis
|
c22420dd3c3f170b3e1fee95e2bcf9473f0539bc
|
c77a58a368c4c8973f6706697f5477ede3f7e7bf
|
refs/heads/master
| 2023-01-28T15:57:17.170633
| 2020-12-06T20:53:04
| 2020-12-06T20:53:04
| 318,503,822
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,874
|
py
|
import json
from datetime import datetime
from MainPy.settings import PROXY_URL
from OrmMongodbRepository.OrmRepository import OrmRepository
from ThraedsAndQueue.DoWorkTwitter import DoWorkTwitter
from tools.Tools import Tools
from tweeter_crawler.factory.IFactorySocial import IFactorySocial
import time
import tweepy
""" This class is a crawler for get data from twitter by api's twitter """
class TwitterCrawler(IFactorySocial):
# read tweets from twitter and save it in mongodb database
@staticmethod
def read_and_save(request):
consumer_key = ""
consumer_secret = ""
access_token = ""
access_token_secret = ""
tweets = []
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, proxy=PROXY_URL)
username = request.data.get('username')
timeline = tweepy.Cursor(api.user_timeline, id=username,tweet_mode='extended').items(20)
start_date = datetime.fromisoformat(str(request.data.get('start_date')))
end_date = datetime.fromisoformat(str(request.data.get('end_date')))
url_video = ""
# for in timeline twitter user=username
for tweet in timeline:
created_at = datetime.fromisoformat(time.strftime('%Y-%m-%d',
time.strptime(str(tweet.created_at),
'%Y-%m-%d %H:%M:%S')))
for media in tweet.entities.get("media", [{}]):
if created_at >= start_date and created_at <= end_date:
if str(media.get("media_url")) in str(tweet.entities.get("media", [{}])):
# get url video from tweets
if hasattr(tweet,"extended_entities"):
if "video_info" in str(tweet.extended_entities.get("media", [{}])[0]):
for link in tweet.extended_entities.get("media", [{}])[0]["video_info"]["variants"]:
if "video" in str(link.get("content_type")):
url_video = link.get("url")
json_make_with_media = {
"tweet_id": int(tweet.id),
"username": username,
"type": "Tweet_with_Media",
"media": str(media.get("media_url")),
"url_video": url_video,
"text": str(tweet.full_text),
"created_at": str(tweet.created_at)
}
tweets.append(json_make_with_media)
url_video = ""
DoWorkTwitter.add_qeueu( json_make_with_media)
else:
json_make = {
"tweet_id": int(tweet.id),
"username": username,
"type": "Between_Date",
"text": tweet.full_text,
"media": str(media.get("media_url")),
"created_at": str(tweet.created_at)
}
tweets.append(json_make)
DoWorkTwitter.add_qeueu(json_make)
DoWorkTwitter.len = len(tweets)
DoWorkTwitter.doing()
return tweets
# save images in drive from binary field database
@staticmethod
def save_images(request):
return Tools.save_images(request.data.get('key'), request.data.get('value'))
# read all documents from mongodb all data
@staticmethod
def read_data_from_mongodb():
return OrmRepository.read_data()
# find data from database with two param key and value
@staticmethod
def find_data(request):
data = []
for field in OrmRepository.find(request.data.get('key'), request.data.get('value')):
json_make = {
"read": "true",
"tweet_id": str(field.tweet_id),
"username": field.username,
"type ": "Tweet_with_Media",
"media": field.media,
"text": field.text,
"created_at": str(field.created_at),
"binary_file": str(field.binary_file)
}
data.append(json_make)
return data
"""
json_str = json.dumps(tweet._json)
if hasattr(tweet, 'extended_entities'):
j = json.loads(json_str)
video_info = j["extended_entities"]["media"]
for video in video_info:
for vid in video["video_info"]["variants"]:
print(vid["url"])
"""
|
[
"m73hdi@gmail.com"
] |
m73hdi@gmail.com
|
93f5cafc50ef7f705e3b7621d9771df6063ac54b
|
0254a7f4da96e59a589dd7152243ade04307018e
|
/apps/grubaker2/tags/V5/grubaker
|
5584c1d5db6bbed20953b4b6f0c114f8855464f1
|
[] |
no_license
|
Https-github-com-sulaeman51/guadalinex-v7
|
6a51583b82582a48bf8be2046f56fdcd4e8f90aa
|
2c50315620caa727d427a9bb67c2d1e39245bb3a
|
refs/heads/master
| 2023-04-07T21:59:09.200620
| 2021-04-16T14:11:46
| 2021-04-16T14:11:46
| 356,053,724
| 1
| 0
| null | 2021-04-16T14:11:47
| 2021-04-08T21:34:04
|
Python
|
UTF-8
|
Python
| false
| false
| 15,607
|
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
# grubaker 1.1.0
# author: Alfonso E.M. alfonso@el-magnifico.org
# date: 15/Apr/2006
# last update: 24/Mar/2008 by Alfonso E.M.
# Patches by JuanJe Ojeda
import re
import pygtk
pygtk.require ('2.0')
import gtk
import gtk.glade
import os,commands,sys
import getopt
# DEBIAN MARKERS FOR AUTOMAGICALLY GENERATED KERNELS SECTION
AUTOMAGIC_START="### BEGIN AUTOMAGIC KERNELS LIST"
AUTOMAGIC_END="### END DEBIAN AUTOMAGIC KERNELS LIST"
class Menu:
filename=""
def __init__(self,filename):
"""
Grub menu file
"""
self.filename=filename
self.liststore = gtk.ListStore(str,str,int)
self.item=[]
self.error=''
self.parsefile()
def parsefile(self):
self.liststore.clear()
try:
file=open(self.filename)
except:
self.error="No se puede editar el menu.\nNo tiene permiso para modificar "+self.filename+". Posiblemente se encuentre en un sistema Live donde no se puede modificar el gestor de arranque. Pruebe en el sistema una vez instalado."
print self.error
else:
options=""
id=-1
self.comments=""
self.timeout="3"
self.splashimage=""
self.fallback=""
self.default=""
mustreadoptions=False
mustreadcomments=True
while 1:
line=file.readline()
if line == "":
break
if line == "\n":
continue
# As menu.lst says:
# "lines between the AUTOMAGIC KERNELS LIST markers will be modified
# by the debian update-grub script except for the default options below"
# So Grubaker must check (and keep) these odd markers
if line.find(AUTOMAGIC_START) != -1:
i=self.liststore.append([gtk.STOCK_GOTO_BOTTOM,"ATENCION: el sistema puede modificar las siguientes líneas automáticamente",-1])
self.item.append({})
continue
if line.find(AUTOMAGIC_END) != -1:
i=self.liststore.append([gtk.STOCK_GOTO_TOP,"Aquí termina la sección que se actualiza automáticamente",-2])
self.item.append({})
continue
if line[:1] == "#":
if line.find("Grubaker") != -1:
continue
if mustreadcomments:
self.comments=self.comments+line
continue
line = line.replace("\n","")
if re.search("[=\s]+",line):
key,value=re.split("[=\s]+",line,1)
else:
key=line
value=''
if key == "timeout":
self.timeout = value
elif key == "default":
self.default = value
elif key == "splashimage":
self.splashimage = value
elif key == "fallback":
self.fallback = value
elif key == "title":
mustreadcomments=False
id = id + 1
if self.default == str(id):
icon=gtk.STOCK_YES
else:
icon=gtk.STOCK_NO
i=self.liststore.append([icon,value,id])
self.item.append({})
mustreadoptions=True
else:
if mustreadoptions:
self.item[id][key]=value
file.close
def write_key(self,i,k,wfile):
if self.item[i].has_key(k):
wfile.write(k+" \t"+self.item[i][k]+"\n")
del self.item[i][k]
def write(self):
tmpfilename=self.filename+".tmp"
backupfilename=self.filename+".bak"
automagic=0
try:
newfile=open(tmpfilename,"w")
except:
sys.exit("ERROR: "+self.filename+".tmp not writable")
return
newfile.write("# menu.lst edited with Grubaker\n\n")
newfile.write(self.comments)
if self.default:
newfile.write("default "+str(self.default)+"\n")
if self.timeout:
newfile.write("timeout "+self.timeout+"\n")
if self.fallback:
newfile.write("fallback "+self.fallback+"\n")
if self.splashimage:
newfile.write("splashimage="+self.splashimage+"\n")
newfile.write("\n\n")
iter=self.liststore.get_iter_first()
while iter != None:
title=self.liststore.get_value(iter,1)
id=self.liststore.get_value(iter,2)
# Dummy items for DEBIAN AUTOMAGIC KERNEL LIST
if id == -1 and automagic == 0:
newfile.write(AUTOMAGIC_START+"\n")
automagic = 1
elif id == -2:
if automagic == 0:
newfile.write(AUTOMAGIC_START+"\n")
newfile.write(AUTOMAGIC_END+"\n")
automagic == 2
else:
newfile.write("title \t"+title+"\n")
# Order is important !
# First we'll write Linux options (ordered)
self.write_key(id,'root',newfile)
self.write_key(id,'kernel',newfile)
self.write_key(id,'module',newfile)
self.write_key(id,'initrd',newfile)
# Second, Windows options (ordered)
self.write_key(id,'rootnoverify',newfile)
self.write_key(id,'kernel',newfile)
self.write_key(id,'chainloader',newfile)
# Then, whatever left (but "boot")
for key in self.item[id].keys():
if key != 'boot':
newfile.write(key+" \t"+self.item[id][key]+"\n")
# "Boot", if exists, must be the latest (not needed, indeed!)
self.write_key(id,'boot',newfile)
newfile.write("\n")
iter=self.liststore.iter_next(iter)
newfile.close
os.rename(self.filename,backupfilename)
os.rename(tmpfilename,self.filename)
return
class Appgui:
def __init__(self, menu):
"""
In this init the main window is displayed
"""
dic = {
"on_bt_quit_clicked" : (self.quit),
"on_window_main_delete" : (self.quit),
"on_window_preferences_delete_event" : self.bt_preferences_cancel_clicked,
"on_treeview1_cursor_changed" : self.treeview1_cursor_changed,
"on_treeview1_row_activated" : self.edit_item,
"on_bt_ok_clicked" : self.bt_ok_clicked,
"on_bt_preferences_clicked" : self.bt_preferences_clicked,
"on_bt_edit_ok_clicked" : self.bt_edit_ok_clicked,
"on_bt_edit_cancel_clicked" : self.bt_edit_cancel_clicked,
"on_window_edit_delete_event" : self.bt_edit_cancel_clicked,
"on_bt_preferences_ok_clicked" : self.bt_preferences_ok_clicked,
"on_bt_preferences_cancel_clicked" : self.bt_preferences_cancel_clicked,
"on_treeview1_drag_end" : self.treeview1_drag_end,
"on_bt_delete_clicked" : self.bt_delete_clicked,
"on_bt_new_clicked" : self.bt_new_clicked,
"on_dialog_error_response" : self.dialog_error_response,
}
self.menu = menu
# self.xml = gtk.glade.XML("grubaker.glade")
self.xml = gtk.glade.XML("/usr/share/grubaker/grubaker.glade")
self.xml.signal_autoconnect (dic)
self.treeview = self.xml.get_widget('treeview1')
self.window_main = self.xml.get_widget('window_main')
self.window_main.set_size_request(600,320)
self.window_edit=self.xml.get_widget('window_edit')
self.window_preferences=self.xml.get_widget('window_preferences')
self.treeview.set_rules_hint(True)
self.treeview.set_model(model=self.menu.liststore)
# create the TreeViewColumn to display the data
self.column = gtk.TreeViewColumn('Systems')
# add tvcolumn to treeview
self.treeview.append_column(self.column)
# create a CellRendererText to render the data
self.cellicon = gtk.CellRendererPixbuf()
self.cell = gtk.CellRendererText()
self.cell.set_property('single-paragraph-mode',True)
# add the cell to the column
self.column.pack_start(self.cellicon, False)
self.column.pack_start(self.cell, True)
self.column.set_attributes(self.cellicon, stock_id=0)
self.column.add_attribute(self.cell, 'markup',1)
if self.menu.error:
dialog=self.xml.get_widget('dialog_error')
errortext=self.window_edit=self.xml.get_widget('text_error')
errortext.set_text(self.menu.error)
dialog.show()
def dialog_error_response(self,dialog,response,*args):
sys.exit('Error!')
def run(self):
gtk.main()
def quit(*args):
if hasattr(gtk, 'main_quit'):
gtk.main_quit()
else:
gtk.mainquit()
def bt_delete_clicked(self,widget):
(iter,id,title)=self.get_selected_item()
# Dummy items are not deleteable
if id > 0:
self.menu.liststore.remove(iter)
self.change_item_buttons_state(False)
self.menu_has_changed()
return
def bt_new_clicked(self,widget):
self.set_text('hidden_id','')
self.set_text('entry_title','(Nuevo)')
self.window_edit.show()
self.menu_has_changed()
return
def get_selected_item(self):
selection=self.treeview.get_selection()
(model,iter)=selection.get_selected()
title=self.menu.liststore.get_value(iter,1)
id=self.menu.liststore.get_value(iter,2)
return (iter,id,title)
def bt_ok_clicked(self,widget):
widget.set_sensitive(False)
self.menu.write()
self.quit()
def treeview1_cursor_changed(self,treeview):
self.change_item_buttons_state(True)
return
def treeview1_drag_end(self,*args):
self.menu_has_changed()
return
def change_item_buttons_state(self,value):
widget=self.xml.get_widget('bt_delete')
widget.set_sensitive(value)
return
def menu_has_changed(self):
widget=self.xml.get_widget('bt_ok')
widget.set_sensitive(True)
return
def bt_preferences_clicked(self,widget):
self.set_text('entry_timeout',self.menu.timeout)
self.set_text('entry_splashimage',self.menu.splashimage)
self.window_preferences.show()
return
def bt_preferences_ok_clicked(self,widget):
self.menu.timeout=self.get_value('entry_timeout')
self.menu.splashimage=self.get_value('entry_splashimage')
self.menu_has_changed()
self.window_preferences.hide()
return
def bt_preferences_cancel_clicked(self,widget,*args):
self.window_preferences.hide()
return True
def edit_item(self,treeview,TreePath,TreeViewColumn):
(iter,id,title)=self.get_selected_item()
# Dummy items are not editable
if id < 0:
return
self.set_text('entry_title',title)
self.set_text('hidden_id',str(id))
if self.menu.item[id].has_key('root'):
rootkey='root'
if self.menu.item[id].has_key('rootnoverify'):
rootkey='rootnoverify'
matches=re.search('.*hd(\d),(\d).*',self.menu.item[id][rootkey])
if matches:
hd=int(matches.group(1))
partition=int(matches.group(2))
self.set_spin('spin_hd',hd)
self.set_spin('spin_partition',partition)
if self.menu.item[id].has_key('kernel'):
if re.search("\s+",self.menu.item[id]['kernel']):
kernel,options=re.split("\s+",self.menu.item[id]['kernel'],1)
self.set_text('entry_kernel',kernel)
self.set_text('entry_options',options)
else:
self.set_text('entry_kernel',self.menu.item[id]['kernel'])
else:
self.set_text('entry_kernel','')
self.set_text('entry_options','')
if self.menu.item[id].has_key('initrd'):
self.set_text('entry_initrd',self.menu.item[id]['initrd'])
else:
self.set_text('entry_initrd','')
if self.menu.item[id].has_key('chainloader'):
self.set_text('entry_chainloader',self.menu.item[id]['chainloader'])
else:
self.set_text('entry_chainloader','')
if self.menu.item[id].has_key('makeactive'):
self.set_active('check_makeactive',True)
else:
self.set_active('check_makeactive',False)
self.window_edit.show()
return
def bt_edit_ok_clicked(self,widget):
old_id=self.get_value('hidden_id')
title=self.get_value('entry_title')
#old_id is a hidden input, empty for new menu items (tricky)
if old_id != '':
(iter,id,oldtitle)=self.get_selected_item()
self.menu.liststore.set_value(iter,1,title)
del self.menu.item[id]
self.menu.item.insert(id,{})
else:
self.menu.item.append({})
id=len(self.menu.item)-1
icon=gtk.STOCK_NO
i=self.menu.liststore.append([icon,title,id])
hd=self.get_spin('spin_hd')
partition=self.get_spin('spin_partition')
kernel=self.get_value('entry_kernel')
options=self.get_value('entry_options')
if kernel:
self.menu.item[id]['kernel']=kernel+' '+options
initrd=self.get_value('entry_initrd')
if initrd:
self.menu.item[id]['initrd']=initrd
chainloader=self.get_value('entry_chainloader')
#if chainloader field is not empty, 'rootnoverify' is used instead of 'root' (tricky)
if chainloader:
self.menu.item[id]['chainloader']=chainloader
rootkey='rootnoverify'
else:
rootkey='root'
self.menu.item[id][rootkey]='(hd'+str(hd)+','+str(partition)+')'
widget=self.xml.get_widget('check_makeactive')
makeactive=widget.get_active()
if makeactive:
self.menu.item[id]['makeactive']=''
self.window_edit.hide()
self.menu_has_changed()
return
def set_text(self,widgetname,text):
widget=self.xml.get_widget(widgetname)
widget.set_text(text)
def get_value(self,widgetname):
widget=self.xml.get_widget(widgetname)
return widget.get_text()
def set_spin(self,widgetname,number):
widget=self.xml.get_widget(widgetname)
widget.set_value(number)
def get_spin(self,widgetname):
widget=self.xml.get_widget(widgetname)
return widget.get_value_as_int()
def set_active(self,widgetname,value):
widget=self.xml.get_widget(widgetname)
widget.set_active(value)
def bt_edit_cancel_clicked(self,widget,*args):
self.window_edit.hide()
return True
def usage():
print """
Usage:
-h --help This simple help
-f --file=xxx Menu file to edit (default is /boot/grub/menu.lst)
"""
def main():
try:
opts, args = getopt.getopt(sys.argv[1:],"hf:",["help","file="])
except getopt.GetoptError:
usage()
sys.exit(2)
menufile="/boot/grub/menu.lst"
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-f", "--file"):
menufile = arg
menu=Menu(menufile)
app=Appgui(menu)
app.run()
if __name__ == '__main__':
main()
|
[
"rcmorano@7a7f4aa0-ec84-47cc-a100-2f2ca1a7a0cd"
] |
rcmorano@7a7f4aa0-ec84-47cc-a100-2f2ca1a7a0cd
|
|
1893c23dc0fac32cfd537f5db991abb5770d3226
|
3f7a392a6c256e1e40abd46a2fb229b789870962
|
/WebBooks/manage.py
|
78da798fe8c8256222ff5fd15e6e5dafad865152
|
[] |
no_license
|
APostolit/django_world_book
|
e8133717349b3b94cf6330b6747612e6e032a181
|
3e6cc7aae8b882f51867cb4260a556ec6de067b9
|
refs/heads/master
| 2022-12-13T05:04:30.983804
| 2020-09-02T15:30:57
| 2020-09-02T15:30:57
| 290,755,694
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 628
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'WebBooks.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"anat_post@mail.ru"
] |
anat_post@mail.ru
|
83bfd6ba23b22af36d0ed922b2512865f6aca56d
|
193f048d639eb0e6f33c4ca4e55008b791443989
|
/Python/text/entity_matcher.py
|
257be477ba24e10c9598585366d755f65f11599a
|
[
"MIT"
] |
permissive
|
neuromusic/rst-napari
|
39b13e9249135d67217518a96bb386e28740761e
|
332b9be4d4c8509032a4141db3348f7cbee45935
|
refs/heads/master
| 2023-08-29T06:17:43.982709
| 2021-11-09T17:10:54
| 2021-11-09T17:10:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,656
|
py
|
"""
High level API into EntityMatcher
"""
from abc import ABCMeta, abstractmethod
import dataclasses
from enum import Enum
import json
import os
from typing import Any, Dict, List, NamedTuple, Optional, Sequence, Set, Tuple, Union
import unicodedata
import regex
from utils.misc import PersistentObject, timed_exec
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
class EntityNameType(Enum):
# The preferred or primary name of an Entity
PRIMARY = "primary"
# Any alternative name that is not an Acronym
SYNONYM = "synonym"
# Any acronym that is not the primary name
ACRONYM = "acronym"
# Partial Name, lower priority synonyms, typically subsequences of Primary-Name or Synonyms
PARTIAL = "partial"
# /
@dataclasses.dataclass
class EntityInfo:
entity_id: str
primary_name: str
# For use by subclasses of `EntityMatcher`
name_indices: Any = None
# Typically used as {SYNONYM: (ss, se); ACRONYM: (as, ae)}
# /
class EntityMatcher(PersistentObject, metaclass=ABCMeta):
"""
This Abstract Class' mainpurpose is to provide some common code, mostly for building and managing the data.
The actual methods for matching are left to the sub-classes, as there are many different use cases based on
the needs and implementation.
To build an EntityMatcher:
em = EntityMatcherSubclass.from_params(params_dict)
em.add_entity(eid, e_primary, e_syns, e_acrs)
...
em.compile_and_save()
"""
def __init__(self, params: Dict[str, Any]):
"""
params:
- class: str.
must match Class-Name
- name: str.
Some name for this set of options
- descr: str.
A brief description
- cache_file: str.
Path to file where Class Instance is cached by `build_and_save()`
- lexicon_id: str.
Used to identify the Lexicon this matcher is built from.
Can be a path to the Lexicon file.
"""
super().__init__()
self.params = params
self.name = params.get("name", self.__class__.__name__)
self.descr = params.get("descr")
self.lexicon_id = params["lexicon_id"]
# eid -> {type_id: str, primary_name: str, primary_name_tokens: Array[str]}
self.entity_info: Dict[str, EntityInfo] = dict()
self.is_compiled = False
return
@abstractmethod
def validate_params(self, params: Dict[str, Any]) -> bool:
"""
Validate the loaded data's options as loaded from `self.load_data_cache()`
against options specified in `__init__()` during creation of this instance.
May potentially update `params`
"""
raise NotImplementedError
@abstractmethod
def copy_from(self, another: "EntityMatcher"):
"""
Copy data populated during `self.build_and_save()` from another instance.
`another.params()` are equivalent (gives the same build) to this instance's params.
Override, and call super().copy_from(another)
"""
self.params = another.params
self.name = another.name
self.descr = another.descr
self.lexicon_id = another.lexicon_id
self.entity_info = another.entity_info
self.is_compiled = another.entity_info
return
def load_from_cache(self):
"""
Load data into `self.data` from option 'cache_file' and validate.
"""
with timed_exec(name="Load " + self.__class__.__name__):
loaded_obj = self.load_from(os.path.expanduser(self.params['cache_file']), verbose=True)
print(f" ... {self.__class__.__name__} Testing loaded object", flush=True)
assert self.validate_params(loaded_obj.params), "Cached data params do not match!"
print(f" ... {self.__class__.__name__} Copying from loaded object", flush=True)
self.copy_from(loaded_obj)
return True
def add_entity(self, entity_id: str, primary_name: str, synonyms: Sequence[str], acronyms: Sequence[str],
partial_names: Sequence[str] = None):
assert not self.is_compiled, "Can only add entities before compiling"
self.entity_info[entity_id] = EntityInfo(entity_id=entity_id, primary_name=primary_name)
self._add_entity_names(entity_id, primary_name, synonyms, acronyms, partial_names)
return
@abstractmethod
def _add_entity_names(self, entity_id: str,
primary_name: str, synonyms: Sequence[str], acronyms: Sequence[str],
partial_names: Sequence[str]):
"""
Populate local data structures.
"""
raise NotImplementedError
@abstractmethod
def compile_and_save(self):
"""
Build out all the local data structures, and save to 'cache_file'
"""
raise NotImplementedError
@abstractmethod
def get_normalized_name(self, name_index: int) -> str:
raise NotImplementedError
@abstractmethod
def get_original_name(self, name_index: int) -> str:
raise NotImplementedError
def get_original_names(self, name_index: int) -> Set[str]:
"""Override when returning multiple names mapping to same name-index."""
return {self.get_original_name(name_index)}
def get_all_entity_ids(self) -> Set[str]:
return set(self.entity_info.keys())
def get_primary_name(self, entity_id: str) -> str:
return self.entity_info[entity_id].primary_name
@abstractmethod
def get_unique_synonyms(self, entity_id: str) -> Tuple[List[str], List[str]]:
"""
Unique based on normalization scheme used by subclass.
:return: Original-Synonyms, Normalized-Synonyms
where first element corresponds to the primary name
"""
raise NotImplementedError
@abstractmethod
def get_unique_acronyms(self, entity_id: str) -> Optional[Tuple[List[str], List[str]]]:
"""
Unique based on normalization scheme used by subclass.
:return: Original-Acronyms, Normalized-Acronyms
"""
raise NotImplementedError
@classmethod
def from_params(cls, params: Union[str, Dict[str, Any]]):
"""
:param params: Either params dict, or path to JSON file
"""
if isinstance(params, str):
params_file = os.path.expanduser(params)
with open(params_file) as f:
params = json.load(f)
assert params["class"] == cls.__name__
return cls(params)
# =====================================================================================================
# Methods for entity matching are left to the sub-classes.
# =====================================================================================================
# /
class Token(NamedTuple):
text: str
char_start: int
char_end: int
# /
class BasicRevMappedTokenizer:
"""
Equivalent to BasicTokenizer, except:
- Maps each token to its character position in the source string.
- Returns `List[Token]` instead of `List[str]`.
"""
# Token consists of a sequence of:
# - Word-char (but not underscore '_')
# - Unicode Accent char e.g. [é] in 'Montréal' => 'Montreal'
TOKEN_PATT = regex.compile(r"((?:(?!_)\w|\p{Mn})+)")
def __init__(self):
super().__init__()
return
def tokenize(self, txt: str, to_lower: bool = True) -> List[Token]:
return self._tokenize(txt, to_lower=to_lower)
def _tokenize(self, txt: str, to_lower: bool) -> List[Token]:
tokens = []
s = 0
for t in self.TOKEN_PATT.split(txt):
if len(t) == 0:
continue
elif not self.TOKEN_PATT.match(t):
s += len(t)
else:
e = s + len(t)
t = self.standardize_chars(t)
if to_lower:
t = t.casefold()
tokens.append(Token(t, s, e))
s = e
return tokens
@staticmethod
def standardize_chars(text):
# Separate combined chars, e.g. [fi] in 'financial' => 'fi...'
text = unicodedata.normalize("NFKD", text)
# Strip accents, e.g. [é] in 'Montréal' => 'Montreal'
text = "".join([c for c in text if unicodedata.category(c) != "Mn"])
return text
# /
|
[
"smohan@chanzuckerberg.com"
] |
smohan@chanzuckerberg.com
|
ae79642ca69ae2a0cea3a89bf48d01ce72cb2367
|
76da6e4cf87a52338e7012eaeb43d05b967f9cb4
|
/bench_fastcall_slots.py
|
33125258a3e5987cd4c0c810b0e6443bbbe6e01e
|
[
"MIT"
] |
permissive
|
moreati/pymicrobench
|
0a02634ef71d6864f933170c8de819cb1829b972
|
97e43ffd14aaffe27f2e9b82e34a5939d5284d3b
|
refs/heads/master
| 2020-03-08T07:30:17.376632
| 2017-11-21T23:58:58
| 2017-11-21T23:58:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
#!/usr/bin/env python3
"""
Benchmark Python "slots".
http://bugs.python.org/issue28915
http://bugs.python.org/issue29507
Created at 2016-12-09 by Victor Stinner.
"""
import perf
class Obj:
def __int__(self):
return 5
def __getitem__(self, key):
return 6
obj = Obj()
if int(obj) != 5:
raise Exception("bug")
if obj[0] != 6:
raise Exception("bug")
runner = perf.Runner()
runner.timeit('Python __int__: int(obj)',
'int(obj)',
duplicate=100,
# Copy int type builtin into globals for faster lookup
globals={'int': int, 'obj': obj})
runner.timeit('Python __getitem__: obj[0]',
'obj[0]',
duplicate=100,
globals={'obj': obj})
|
[
"vstinner@redhat.com"
] |
vstinner@redhat.com
|
b2fb30f6ac89b8ec65ad048ee9d1a838eda90b71
|
af94c7b38aaed5f2ebb93f1afbc21f4a6407413d
|
/decision_tree_dmw.py
|
d21a36039081e8d4e00748e7d42ba4e64aa3c605
|
[] |
no_license
|
Drax9299/ML
|
f2b442f67c4f95889a9676bd0087d599fbfb4f6e
|
36889bc9e07b139ec8896edaa922c623de9bc57a
|
refs/heads/master
| 2020-07-30T22:10:44.802472
| 2019-10-06T12:01:35
| 2019-10-06T12:01:35
| 210,376,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report,confusion_matrix
import matplotlib as m
print("---------Started---------")
#Importing data
dataset = pd.read_csv("heart.csv")
print("---------Shape---------")
print(dataset.shape)
print("---------Head---------")
print(dataset.head)
#Target the dataset
x = dataset.drop('target',axis=1)
y = dataset['target']
#Splitting dataset
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.20)
#Training model
clasifier = DecisionTreeClassifier()
clasifier.fit(x_train,y_train)
#predict values
y_pred = clasifier.predict(x_test)
#evaluating model
print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))
print("---------End---------")
|
[
"vivek.labhade25@outlook.com"
] |
vivek.labhade25@outlook.com
|
aceac660c31af02baed4e4b199d4e4a2fa42a8c1
|
a648a238c18626ac2d2924ba73d876a5e3b58069
|
/.env/bin/jupyter-troubleshoot
|
c9e2c42af58f9db387e23ff7c8cb3dc746b5751a
|
[] |
no_license
|
iFocusing/Digit_Recognition
|
0d68e3171d0ae4a1de6aaa098fb8bdb152ba7f6c
|
f863b9df935f18c366c64bdfa2b32ee2a7d70dc9
|
refs/heads/master
| 2020-03-21T07:27:45.053556
| 2018-06-27T05:05:07
| 2018-06-27T05:05:07
| 138,281,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
#!/Users/huojingjing/Documents/Python/Kaggle/Digit_Recognition/.env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_core.troubleshoot import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"huo841022920@163.com"
] |
huo841022920@163.com
|
|
bb175060d7b79efae6a98bb40f786a658a390bb2
|
2519f2094e61b91bbf3bab2a1f138a41b74cae51
|
/develop.py
|
f7cc19267d618226fea9dbbc766f3d07cc911c60
|
[] |
no_license
|
bihe0832/Android-appium
|
0a453dc0864403f9fbe730cf85fc52bb9d2d0b5c
|
7f1e21e5df0d95cb68acac4ed31070148c1ea540
|
refs/heads/master
| 2021-01-06T22:57:14.003694
| 2020-03-02T05:36:15
| 2020-03-02T05:36:15
| 241,506,192
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,830
|
py
|
from appium import webdriver
from time import ctime
from selenium.webdriver.support.ui import WebDriverWait
import sys
import multiprocessing
sys.path.append(r'./zixie/')
sys.path.append(r'./common/')
sys.path.append(r'./appium/')
sys.path.append(r'./const/')
from const import *
import utils
import zixie_main
import port_utils
def reinstall():
utils.cmd("adb uninstall io.appium.uiautomator2.server")
utils.cmd("adb uninstall io.appium.uiautomator2.server.test")
utils.cmd("adb uninstall io.appium.unlock")
utils.cmd("adb uninstall io.appium.settings")
def hardytest():
# port_utils.check_port(4723)
port_utils.release_port(8200)
if __name__ == '__main__':
uuid=""
port=4725
utils.cmd("adb kill-server")
port_utils.check_port(5037)
result = utils.cmd("adb devices")
print(result)
for device in result.split('\n'):
if device.endswith('\tdevice') == True:
uuid=device.split('\t')[0]
break
print(uuid)
if uuid == "":
exit
utils.showDevice(uuid)
# reinstall()
desired_caps={}
desired_caps['platformName']= "Android"
desired_caps['deviceName']= uuid
desired_caps["chromeOptions"] = {
"androidProcess":"com.bihe0832.readhub:web",
"androidPackage":"com.bihe0832.readhub"
}
desired_caps["chromedriverExecutableDir"] = "./appium"
print('appium start run %s at %s' %(uuid,ctime()))
# hardytest()
driver=webdriver.Remote('http://127.0.0.1:'+str(port)+'/wd/hub',desired_caps)
driver.implicitly_wait(appium_const.IMPLICITLY_WAIT_SHORT)
# utils.wechatlogin(driver,uuid,zixie_const.WECHAT_ID,zixie_const.WECHAT_PASSWORD)
# utils.qqlogin(driver,uuid,zixie_const.WECHAT_ID,zixie_const.WECHAT_PASSWORD)
zixie_main.startTest(driver,zixie_const.TEMP_UUID)
|
[
"code@bihe0832.com"
] |
code@bihe0832.com
|
b62d75ef9c18ec265d5f0863cbcf6073f29b0eb9
|
f5a4c46bd60d719bd9b6d20604a7c22dc1d7da23
|
/Iterate_nested_dicts.py
|
513ebfb57dd88b8eb09840e64cb61b496f61dc6c
|
[] |
no_license
|
jmg5219/Python_Dictionaries
|
d4e4fdf022f860c214a8f4091e651dfc93470cc5
|
9c475b39fce6070691bc3e97839ff478705da729
|
refs/heads/master
| 2022-11-29T16:33:51.622382
| 2020-08-03T17:57:51
| 2020-08-03T17:57:51
| 283,870,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
D = {'emp1': {'name': 'Bob', 'job': 'Mgr'},
'emp2': {'name': 'Kim', 'job': 'Dev'},
'emp3': {'name': 'Sam', 'job': 'Dev'}}
for id, info in D.items():
print("\nEmployee ID:", id)
for key in info: #for every value print the key
print(key + ':', info[key])
# Prints Employee ID: emp1
# name: Bob
# job: Mgr
# Employee ID: emp2
# name: Kim
# job: Dev
# Employee ID: emp3
# name: Sam
# job: Dev
|
[
"gokhale.jaidev@gmail.com"
] |
gokhale.jaidev@gmail.com
|
e326fd2aae176f67972dc11d050d6262025b96ad
|
cc9df1a140ce671e4e76a7748363f82d993bdcad
|
/app/crawler/mongo_writer.py
|
5f73a531435e208e72302b3aa42a84b4ff207bbb
|
[] |
no_license
|
shagunsodhani/news-crawler
|
23b9caf037b6ca46dc7317cac8217f7c7ffaedba
|
6d665f50dbac22fd6d8dd47d214dc6f0640ec375
|
refs/heads/master
| 2020-06-01T07:55:23.486447
| 2017-12-31T10:49:53
| 2017-12-31T10:49:53
| 42,307,149
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,232
|
py
|
#! /usr/bin/python
import __init__
import newspaper
# from newspaper import source
from mq import redis_queue
from database import mongo
import hashlib
import json
import time
from newspaper.article import Article
class MongoWriter(object):
"""Crawler class prepares a news source and writes it into the redis mq"""
def __init__(self):
self.queue = redis_queue.connect()
self.p = self.queue.pubsub()
self.p.subscribe('newssource-channel')
self.db = mongo.connect()
def run(self):
for message in self.p.listen():
self.message = message
self.write_to_mongo()
def write_to_mongo(self):
self.write_article()
# self.write_category()
def write_article(self):
if(self.message['data']==1):
return
collection = self.db['articles']
data = json.loads(self.message['data'])
articles = data['articles']
payloads = []
count = 0
for article_url in articles:
article = Article(article_url)
article.build()
payload = {}
payload['crawled_at'] = str(int(time.time()))
payload['meta_keywords'] = article.meta_keywords
payload['title'] = article.title
payload['url'] = article.url
payload['source_url'] = article.source_url
payload['text'] = article.text
payload['html'] = article.html
payload['keywords'] = article.keywords
payload['tags'] = list(article.tags)
payload['authors'] = article.authors
payload['_id'] = str(hashlib.sha1(article.html.encode('utf-8')).hexdigest())
payload['publish_date'] = article.publish_date
payload['summary'] = article.summary
payload['article_html'] = article.article_html
payload['canonical_link'] = article.canonical_link
payloads.append(payload)
count+=1
if(count%100==0):
collection.insert_many(payloads)
payloads = []
if payloads:
collection.insert_many(payloads)
if __name__ == "__main__":
test = MongoWriter()
test.run()
|
[
"sshagunsodhani@gmail.com"
] |
sshagunsodhani@gmail.com
|
7435faffd97a47ab6c7c625bb8707eddcae67725
|
3675b2b06cc212f8c0820217be0406e895e351aa
|
/audio_files/test_recognition.py
|
226555044c0ff674bb466ee66916179922855bf4
|
[] |
no_license
|
pannapat/slu-spring-2019-iot
|
2a38c139ba158547f6e504423b57b7d4b8346a10
|
7d8e3b32834827e2c3f2e6d2eaac0f69a3f459b6
|
refs/heads/master
| 2020-05-04T12:30:08.569653
| 2019-05-13T21:19:08
| 2019-05-13T21:19:08
| 179,123,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,559
|
py
|
#!/usr/bin/python3
import speech_recognition as sr
v_command = ''
def test_harvard():
harvard = sr.AudioFile('harvard.wav')
with harvard as source:
audio = r.record(source)
try:
v_command = r.recognize_sphinx(audio, keyword_entries=[('forward',1.0),('backward',1.0),('left',1.0),('right',1.0),('stop',1.0),('harvard', 1.0)]) #You can add your own command here
print(v_command)
except sr.UnknownValueError:
print("say again")
except sr.RequestError as e:
print(e)
pass
def run():
global v_command
# obtain audio from the microphone
r = sr.Recognizer()
with sr.Microphone(device_index =2,sample_rate=48000) as source:
r.record(source,duration=2)
#r.adjust_for_ambient_noise(source)
print("Command?")
audio = r.listen(source)
try:
v_command = r.recognize_sphinx(audio,
keyword_entries=[('forward',1.0),('backward',1.0),('left',1.0),('right',1.0),('stop',1.0),('harvard', 1.0)]) #You can add your own command here
print(v_command)
except sr.UnknownValueError:
print("say again")
except sr.RequestError as e:
print(e)
pass
if 'forward' in v_command:
print(v_command)
elif 'backward' in v_command:
print(v_command)
elif 'left' in v_command:
print(v_command)
elif "right" in v_command:
print(v_command)
elif 'stop' in v_command:
print(v_command)
else:
pass
|
[
"ch.pannapat@gmail.com"
] |
ch.pannapat@gmail.com
|
8f61b9f702953ec2d7a99e6df760299e434cec36
|
5f482fecee9923e041d43e5a7e079ad7c3c0daee
|
/bobsite/bobsite/urls.py
|
4a626e370eca6107fa057cf46fb88970ccd8c9fb
|
[] |
no_license
|
bkwiencien/bob-site
|
a1ebb4163a692182be44bea3ecf06f5dc89a5616
|
be41f45ce250478040c871c35990e4f3aca67fe2
|
refs/heads/master
| 2020-03-24T21:04:03.054034
| 2018-08-31T13:42:53
| 2018-08-31T13:42:53
| 143,012,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 988
|
py
|
"""bobsite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from ufo import views
from comments import views as cviews
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.index),
path('genbarchart/',views.genbarchart,name="genbarchart"),
path('comments/',cviews.comments),
path('comments/getcomment/',cviews.getcomment),
]
|
[
"bkwiencien@gmail.com"
] |
bkwiencien@gmail.com
|
2d53487a772e75d490b7a4410e2cc3a1f94aebf5
|
84dc79a560d164c65ec6c6ab400a229fdfc76c65
|
/point12.py
|
f30bd28903c143131f58255ec92b69c39a6700df
|
[] |
no_license
|
GabrieleMazzola/Modeling-data-analysis-complex-networks_TUDelft
|
182ed8515681e1fab10cd77b41547135fa8534b1
|
78fb768e8d427f94c43a55d5f680c1950ea6a6ec
|
refs/heads/master
| 2020-04-28T09:01:31.845336
| 2019-03-12T12:43:01
| 2019-03-12T12:43:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,341
|
py
|
import matplotlib.pyplot as plt
import networkx as nx
from util import load_network, load_infection_ranking, build_graph_from_dataset, compute_ranking
graph_dataset = load_network('./data/network.json')
infect_ranking = load_infection_ranking("./data/ranking.json")
graph, unique_nodes = build_graph_from_dataset(graph_dataset)
bets = list(nx.betweenness_centrality(graph).items())
bets = sorted(bets, key=lambda x: x[1], reverse=True)
f_values, RRbs = compute_ranking(infect_ranking, bets)
plt.plot(f_values, RRbs)
plt.ylabel("rRB")
plt.xlabel("f")
plt.show()
# Temporal networks
timestep_dict = dict.fromkeys(list(unique_nodes), 0)
for timestep in graph_dataset:
added_nodes = []
for conn in timestep:
if conn[0] not in added_nodes:
timestep_dict[conn[0]] += 1
added_nodes.append(conn[0])
if conn[1] not in added_nodes:
timestep_dict[conn[1]] += 1
added_nodes.append(conn[1])
timesteps = list(timestep_dict.items())
timesteps = sorted(timesteps, key=lambda x: x[1], reverse=True)
print(timesteps)
f_values, RTs = compute_ranking(infect_ranking, timesteps)
plt.subplot(1, 2, 1)
plt.plot(f_values, RTs)
plt.ylabel("rRT (Temporal Feature)")
plt.xlabel("f")
plt.subplot(1, 2, 2)
plt.plot(f_values, RRbs)
plt.ylabel("rRB (Betweennes")
plt.xlabel("f")
plt.show()
|
[
"gabriele.mazzola@techedgegroup.com"
] |
gabriele.mazzola@techedgegroup.com
|
824feba713fbc872c9088b87bfa32216bdc4dae7
|
c28aac1c46e22d4f4caa3c98a3b8ec8bb4ad93da
|
/session 5/Homework/Exercise 2.py
|
ab02f445fd556f3ed671675dba768cf2af476c8d
|
[] |
no_license
|
TrungNguyen1996/TrungNguyen-fundamental-c4e15
|
55777663e5f30120caca20c7ff95a4700642f915
|
9be3815ef3defef17d46d23a05fd6a29dacbd278
|
refs/heads/master
| 2021-05-11T13:02:05.196920
| 2018-01-29T15:44:03
| 2018-01-29T15:44:03
| 117,670,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
<<<<<<< HEAD
# Exercise 2
# Write a program to count number occurrences in a list, with AND without using count() function
#
# Example:
#!/usr/bin/python3
# aList = [123, 'xyz', 'zara', 'abc', 123];
#
# print ("Count for 123 : ", aList.count(123))
# print ("Count for zara : ", aList.count('zara'))
numbers = [1,6,8,1,2,1,5,6]
# print("Count for 1: ", numbers.count(1))
i = int(input("Mời bạn nhập vào số nào đó"))
print("Count for number: ", numbers.count(i))
=======
# Exercise 2
# Write a program to count number occurrences in a list, with AND without using count() function
#
# Example:
>>>>>>> 8abc8e53cfabba275c0bbc2837ca1fec99c452f2
|
[
"nguyentrungnguyen@github.com"
] |
nguyentrungnguyen@github.com
|
e0c60cea4c15a098db9bc2d5adf8053c24c7e12a
|
09fd2a42a931e3e094af9fc5ec6eb57dc9d42660
|
/addismap/mapvis/parser_POST.py
|
988445c2d45a4aaa4d10896cf6796a635ec1660f
|
[] |
no_license
|
yohannes15/AddisMap
|
78df24404b1934f5327297efe13f220485f76de5
|
3bcafccd985e92cd1d894b9535c747bf4c42fd4c
|
refs/heads/master
| 2022-11-24T18:01:53.027070
| 2020-08-03T22:46:10
| 2020-08-03T22:46:10
| 271,413,519
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
def get_float(request, id):
value = request.POST.get(id)
# Check that it's possible to convert input to float.
try:
return float(value)
except (TypeError, ValueError):
return None
def get_str(request, id):
return request.POST.get(id)
class POST_parser:
def __init__(self, request):
# You can choose what variables you want to
# get from POST and what to call them.
self.lat1 = get_float(request, 'start-latitude')
self.lng1 = get_float(request, 'start-longitude')
self.lat2 = get_float(request, 'destination-latitude')
self.lng2 = get_float(request, 'destination-longitude')
|
[
"y.berhane56@gmail.com"
] |
y.berhane56@gmail.com
|
3e4f4fcb1d3215ba8390a9c937dfd9d0f1e369f1
|
18767e5e01a822bd8ef94befa4a4d7e5e0c301a4
|
/mysql/m1.py
|
c6770c2c851710ad8371b1e09298a8eb97ca5d14
|
[] |
no_license
|
lovekobe13001400/pystudy
|
67671bf9c2651a571b017f227ef5dea67ccc90f2
|
cfd03cab2834f351ffc17cf8c92649c55919d03e
|
refs/heads/master
| 2020-03-26T08:05:32.028221
| 2018-09-19T15:06:10
| 2018-09-19T15:06:10
| 144,685,458
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
import MySQLdb
conn=MySQLdb.connect(host='localhost',port=3306,db='test',user='root',passwd='root',charset='utf8')
cs1=conn.cursor()
count=cs1.execute("insert into student(name) values('张良')")
print(count)
conn.commit()
cs1.close()
conn.close()
|
[
"15267607479@163.com"
] |
15267607479@163.com
|
24f5be7423109579e89342bc1cdf88ad13a29183
|
34eb285795baac91373a8a223e0bc5a9ec619f25
|
/website_bridetobe/models/sale_rental.py
|
28eecd845762243b6046d403521740b5283b12be
|
[] |
no_license
|
JpGarciiia964/custom_addons
|
acc43a86ecb679ff4c39aab4c8dce3cfd311e5ed
|
d864da263a2daf9d3910a50117767692ace1b760
|
refs/heads/master
| 2020-07-01T19:31:11.881884
| 2018-09-05T14:36:43
| 2018-09-05T14:36:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,269
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from datetime import datetime, timedelta
from odoo.exceptions import Warning
import logging
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from dateutil.relativedelta import relativedelta
_logger = logging.getLogger(__name__)
class SaleRental(models.Model):
_name = "sale.rental"
_inherit = ['sale.rental', 'mail.thread']
_order = 'delivery_date,state_internal'
@api.one
def _get_residual_time(self):
if self.delivery_date and self.test_date:
if datetime.today() > datetime.strptime(self.test_date, DEFAULT_SERVER_DATETIME_FORMAT):
residual_datetime = datetime.strptime(self.delivery_date,
DEFAULT_SERVER_DATETIME_FORMAT) - datetime.today()
else:
residual_datetime = datetime.strptime(self.test_date, DEFAULT_SERVER_DATETIME_FORMAT) - datetime.today()
residual_time = residual_datetime.total_seconds() // 3600
residual_datetime.total_seconds() // 3600
security_rental = self.start_order_id.company_id.security_rental
if residual_time > security_rental and residual_time < (security_rental * 2) and self.state in (
'ordered', 'pending'):
self.alert_color = "yellow"
elif residual_time <= security_rental and self.state in ('ordered', 'pending'):
self.alert_color = "crimson"
if not self.alert_color:
self.alert_color = self.state_internal.state_color
if self.state == 'tested_out':
self.alert_color = 'yellow'
@api.model
def _get_modista_domain(self):
return [('department_id', '=', self.env.ref('website_bridetobe.modista').id)]
@api.model
def _get_default_internal_state(self):
return self.state_internal.search([('sequence', '=', 1)])
state_internal = fields.Many2one('sale.rental.internal.state',
string="Estado Interno", ondelete="restrict",
default=_get_default_internal_state)
modista = fields.Many2one("hr.employee", string="Modista", domain=_get_modista_domain)
event_place = fields.Char(related="start_order_id.event_place", string="Event Place")
end_date = fields.Date(store=True)
state = fields.Selection(selection_add=[('ordered', 'Sin Asignar'),
('pending', 'Pendiente de Prueba'),
('tested_out', 'Prueba y Entrega'),
('tested', 'Probado'),
('out', 'Entregado')], readonly=False,
track_visivility='onchange',
)
alert_color = fields.Char(string="Alert Color",
compute="_get_residual_time")
product_barcode = fields.Char(related="rental_product_id.barcode",
readonly=True)
delivery_date = fields.Datetime(string="Fecha Entrega")
test_date = fields.Datetime(string="Fecha Prueba")
comments = fields.Text(string="Comments")
details = fields.Text(string="Details")
rental_product_id = fields.Many2one(
'product.template', related='',
string="Vestido", readonly=False, track_visibility="onchange")
internal_comment = fields.Text(string="Nota Interna", track_visibility='onchange')
receipt_render = fields.Text()
receipt_url = fields.Char()
seller_id = fields.Many2one(related='start_order_id.seller_id', string="Vendedor")
current_days = fields.Integer(compute="_get_current_days", string="Dias Transcurridos")
start_date = fields.Date(related='start_order_line_id.start_date', readonly=False, track_visibility="onchange",
string="Fecha Evento")
@api.depends('state')
def next_state(self):
if self.state_internal:
last_state_sequence = max(
a.sequence for a in self.state_internal.search([('sale_order_state', '=', False)]))
if last_state_sequence != self.state_internal.sequence:
next_sequence = min(
a.sequence for a in self.state_internal.search(
[('sequence', '>', self.state_internal.sequence), ('sale_order_state', '=', False)]))
self.state_internal = self.state_internal.search([('sequence', '=', next_sequence)])
else:
state_ids = [a.sequence for a in self.state_internal.search([])]
if state_ids:
first_state = min(a.sequence for a in self.state_internal.search([]))
self.state_internal = self.state_internal.search([('sequence', '=', first_state)])
else:
raise Warning('No existen estados Definidos')
@api.one
@api.depends(
'start_order_line_id', 'extension_order_line_ids.end_date',
'extension_order_line_ids.state', 'start_order_line_id.end_date')
def _compute_display_name_field(self):
self.display_name = u'[%s] %s (%s)' % (
self.partner_id.name,
self.rented_product_id.name,
self._fields['state'].convert_to_export(self.state, self))
def get_details(self):
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'sale.rental',
'res_id': self.id,
'context': self.env.context,
'view_id': self.env.ref('sale_rental.sale_rental_form').id,
}
@api.one
@api.depends(
'start_order_line_id.order_id.state',
'start_order_line_id.procurement_ids.move_ids.state',
'start_order_line_id.procurement_ids.move_ids.move_dest_id.state',
'sell_order_line_ids.procurement_ids.move_ids.state',
)
def _compute_procurement_and_move(self):
procurement = False
in_move = False
out_move = False
sell_procurement = False
sell_move = False
state = False
if (self.start_order_line_id and self.start_order_line_id.procurement_ids):
procurement = self.start_order_line_id.procurement_ids[0]
if procurement.move_ids:
for move in procurement.move_ids:
if move.move_dest_id:
out_move = move
in_move = move.move_dest_id
if (self.sell_order_line_ids and self.sell_order_line_ids[0].procurement_ids):
sell_procurement = self.sell_order_line_ids[0].procurement_ids[0]
if sell_procurement.move_ids:
sell_move = sell_procurement.move_ids[0]
state = 'ordered'
if out_move and in_move:
if out_move.state == 'done':
state = 'out'
if out_move.state == 'done' and in_move.state == 'done':
state = 'in'
if (out_move.state == 'done' and in_move.state == 'cancel' and sell_procurement):
state = 'sell_progress'
if sell_move and sell_move.state == 'done':
state = 'sold'
if self.start_order_line_id.order_id.state == 'cancel':
state = 'cancel'
self.procurement_id = procurement
self.in_move_id = in_move
self.out_move_id = out_move
self.state = state
self.sell_procurement_id = sell_procurement
self.sell_move_id = sell_move
if self.state == 'in':
self.rental_product_id = False
@api.multi
def write(self, vals):
rental_product_id = self.rental_product_id.rented_product_id.id
if vals.get('start_date'):
self.start_order_line_id.write({'start_date': vals.get('start_date'), 'end_date': vals.get('start_date')})
self.in_picking_id.min_date = fields.Datetime.from_string(vals.get('start_date'))
self.out_picking_id.write({'min_date': fields.Datetime.from_string(vals.get('start_date'))})
sale_rental = super(SaleRental, self).write(vals)
message_body = False
delivery_date = ""
test_date = ""
self_test_date = ""
self_delivery_date = ""
if self.test_date:
self_test_date = fields.Datetime.from_string(self.test_date) + timedelta(hours=-4)
test_date = datetime.strftime(self_test_date, '%d/%m/%Y %I:%M %p')
if self.delivery_date:
self_delivery_date = fields.Datetime.from_string(self.delivery_date) + timedelta(hours=-4)
delivery_date = datetime.strftime(fields.Datetime.from_string(self.delivery_date),
'%d/%m/%Y %I:%M %p')
if vals.get('test_date') or vals.get('delivery_date'):
if self_delivery_date and self_test_date:
if self_delivery_date.date() == self_test_date.date():
super(SaleRental, self).write({'state': 'tested_out'})
if self.modista and vals.get('test_date'):
if not vals.get('state'):
self.state = 'pending'
if not vals.get('state_internal'):
self.state_internal = self.env.ref('website_bridetobe.internal_state_ajuste').id
if self.state_internal.message_body:
message_body = self.state_internal.message_body.format(self.partner_id.name,
self.rental_product_id.barcode or "",
self.state_internal.name,
self.modista.name,
self.start_order_id.name,
test_date,
delivery_date)
elif vals.get('rental_product_id'):
message_body = "ESTIMADO CLIENTE LE INFORMAMOS QUE SE HA " \
"EFECTUANDO UN CAMBIO DE VESTIDO, " \
"CODIGO DEL VESTIDO SELECCIONADO " + (self.rental_product_id.barcode or "")
if message_body:
self.message_ids.create({
"subject": "Detalles de su Renta" + self.start_order_id.name,
"subtype_id": 1,
"res_id": self.id,
"partner_ids": [(4, self.partner_id.id)],
"needaction_partner_ids": [(4, self.partner_id.id)],
"body": message_body,
"record_name": self.display_name,
"date": datetime.today(),
"model": 'sale.rental',
"author_id": self.env.user.id,
"message_type": "email",
"email_from": self.env.user.email})
if 'rental_product_id' in vals:
product_id = self.env['product.product'].search([('product_tmpl_id', '=', self.rental_product_id.id)],
limit=1)
for move_line in self.out_picking_id.move_lines:
if move_line.product_id.product_tmpl_id.id == rental_product_id:
move_line.product_id = self.env['product.product'].search(
[('product_tmpl_id', '=', product_id.rented_product_id.id)], limit=1)
for pack_operation in self.out_picking_id.pack_operation_product_ids:
if pack_operation.product_id.product_tmpl_id.id == rental_product_id:
pack_operation.product_id = self.env['product.product'].search(
[('product_tmpl_id', '=', product_id.rented_product_id.id)], limit=1)
for move_line in self.in_picking_id.move_lines:
if move_line.product_id.product_tmpl_id.id == rental_product_id:
move_line.product_id = self.env['product.product'].search(
[('product_tmpl_id', '=', product_id.rented_product_id.id)], limit=1)
for pack_operation in self.in_picking_id.pack_operation_product_ids:
if pack_operation.product_id.product_tmpl_id.id == rental_product_id:
pack_operation.product_id = self.env['product.product'].search(
[('product_tmpl_id', '=', product_id.rented_product_id.id)], limit=1)
self.start_order_line_id.procurement_ids[0].product_id = product_id
self.start_order_line_id.procurement_ids[0].name = self.rental_product_id.name
self.start_order_line_id.product_id = product_id
self.start_order_line_id.name = self.rental_product_id.name
return sale_rental
def get_availability(self, rental_product_id=False):
date = datetime.strptime(self.start_date, DEFAULT_SERVER_DATE_FORMAT)
date_start = date - timedelta(days=date.weekday())
date_end = date + timedelta(days=7)
product_availability = self.env['sale.rental'].sudo().search(
[('rental_product_id', '=', rental_product_id or self.rental_product_id.id),
('start_date', '<=', datetime.strftime(date_end, DEFAULT_SERVER_DATE_FORMAT)),
('start_date', '>=', datetime.strftime(date_start, DEFAULT_SERVER_DATE_FORMAT)),
('state', '!=', 'cancel')])
if product_availability:
raise Warning("Este Articulo no esta disponible en esta Fecha")
@api.onchange('rental_product_id')
def onchange_rental_product_id(self):
self.get_availability()
def print_receipt(self):
receipt_url = self.env['ir.values'].get_default('stock.config.settings', 'label_printer_url') or "ZD410"
# ============================
self.next_state()
out_picking_id = self.out_picking_id
out_picking_id.sudo().force_assign()
for picking_line in out_picking_id.pack_operation_product_ids:
product_id = self.env['product.product'].search(
[('product_tmpl_id', '=', self.rented_product_id.id)])
if picking_line.product_id.id == product_id.id and picking_line.qty_done == 0:
picking_line.sudo().write({'qty_done': 1.0})
# ===============
receipt_render = """^XA
^FO0,20^A0N,50,62^FB500,0,0,C^FD{0}^FS
^FO0,60^A0N,40,40^FB500,1,0,C^FD{2}^FS
^FO30,100^BY4^BCN,100,Y,N,N^FD{1}^FS
^XZ""".format(self.sudo().partner_id.company_id.name.encode('utf-8'),
self.start_order_id.name.encode('utf-8'),
self.sudo().partner_id.name.encode('utf-8'))
return {
'type': 'ir.actions.act_window',
'name': 'Print Label',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'posbox.print',
'context': {"default_receipt_url": receipt_url,
"default_receipt": receipt_render
},
'view_id': self.env.ref('posbox_send.qztry_print_label_form').id,
'target': 'new',
}
def cancel_rental(self):
if self.out_picking_id.state != 'done':
self.out_picking_id.action_cancel()
if self.in_picking_id.state != 'done':
self.in_picking_id.action_cancel()
payment_obj = self.env['account.payment']
self.start_order_id.action_done()
self.state = 'cancel'
for invoice_id in self.start_order_id.invoice_ids:
if invoice_id.state in ('open', 'paid') and invoice_id.type not in (
'in_refund', 'out_refund') and not invoice_id.refund_invoice_ids:
if invoice_id.state in ('open'):
payment = payment_obj.create({'invoice_ids': [(4, invoice_id.id)],
'journal_id': self.env['account.journal'].search(
[('code', '=', ('PDR'))], limit=1).id,
'payment_method_id': self.env['account.payment.method'].search(
[('code', '=', 'electronic')], limit=1).id,
'amount': invoice_id.residual,
'payment_type': 'inbound',
'partner_type': 'customer',
'communication': 'Pagos Generados por Descuentos y/o Reenvolsos',
'partner_id': invoice_id.partner_id.id})
payment.post()
refund_id = invoice_id.copy({'type': 'out_refund',
'refund_reason': "Cancelacion de Renta",
'origin_invoice_ids': [(4, invoice_id.id)]})
if (fields.Datetime.from_string(fields.Date.today()) - fields.Datetime.from_string(
invoice_id.date_invoice)).days > 30:
for refund_line_id in refund_id.invoice_line_ids:
refund_line_id.invoice_line_tax_ids = False
@api.onchange('delivery_date')
def onchange_delivery_date(self):
for rental_id in self:
if rental_id.out_picking_id:
rental_id.out_picking_id.min_date = rental_id.delivery_date
@api.onchange('start_date')
def onchange_start_date(self):
self.test_date = False
self.delivery_date = False
@api.model
def create(self, vals):
start_order_line_id = self.env['sale.order.line'].browse(vals.get('start_order_line_id'))
start_order_id = self.env['sale.order'].browse(vals.get('start_order_id'))
vals['rental_product_id'] = start_order_line_id.product_id.product_tmpl_id.id
vals['comments'] = start_order_id.comments
return super(SaleRental, self).create(vals)
@api.multi
def _get_current_days(self):
for rental in self:
rental.current_days = (
datetime.today() - datetime.strptime(rental.start_date, DEFAULT_SERVER_DATE_FORMAT)).days
@api.one
def send_return_email(self):
self.message_post(
body="Estimado cliente le solicitamos realizar la devolucion del vestido codigo %s utilizado en fecha de evento %s lo antes posible." % (
(self.rental_product_id.barcode or self.rental_product_id.default_code), self.start_date),
message_type='notification',
subtype='mt_comment',
partner_ids=[self.partner_id.id])
# @api.onchange('state_internal')
# def change_state_internal(self):
# if self.state_internal.message_send:
# try:
# self.sudo().message_ids.create(
# {"subject": "Detalles de su Orden" + self.start_order_id.name,
# "subtype_id": 1,
# "res_id": self.id,
# "partner_ids": [(4, self.partner_id.id)],
# "needaction_partner_ids": [(4, self.partner_id.id)],
# "body": self.state_internal.message_body.format(self.partner_id.name,
# self.rented_product_id.name,
# self.state_internal.name,
# self.modista.name,
# self.start_order_id.name),
# "record_name": self.display_name,
# "date": datetime.today(),
# "model": 'sale.rental',
# "author_id": self.env.user.id,
# "message_type": "email",
# "email_from": self.env.user.email})
# except (KeyError, IndexError):
# _logger.error('El cuerpo del mensaje no esta Configurado correctamente')
|
[
"jgam310@gmail.com"
] |
jgam310@gmail.com
|
3ab8947751d32cd4eac01bdc44bb5678241e993c
|
1179adfd0137f9f1190c47975059bfd4a6f15c37
|
/subtitle_analysis/subtitle_analysis.py
|
957357e9d0b81a986d13bc4d86e26fdf2b5202e5
|
[] |
no_license
|
wangbichao/get_http
|
f74748a8e5af855e91918fb15fb2d69820013802
|
d59fc549073d8665a931ce24bd7eb4a73c4cb98d
|
refs/heads/master
| 2021-07-09T01:30:50.850731
| 2019-04-12T08:47:01
| 2019-04-12T08:47:01
| 147,137,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,128
|
py
|
import io
import sys
import re
# Change the default encoding of standard output
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8')
shooter01_01 = open("Shooter01_01.txt", "r+", encoding='utf8')
new_shooter01_01 = open("new_shooter01_01.txt", "w+", encoding='utf8')
line_num = 0
dialogue_switch__AB = False
new_shooter01_01.write("Dialog A: ")
while True:
temp_str = shooter01_01.readline()
if temp_str.find("00:") == -1:
line_num = line_num+1
if (line_num % 2) == 0:
if re.findall('\.$', temp_str) or\
re.findall('\?$', temp_str) or\
re.findall('\!$', temp_str):
if dialogue_switch__AB:
new_shooter01_01.write(temp_str + '\n' + "Dialog A: ")
else:
new_shooter01_01.write(temp_str + '\n' + "Dialog B: ")
dialogue_switch__AB = ~dialogue_switch__AB
else:
new_shooter01_01.write(temp_str)
if shooter01_01.readline() == '':
break
shooter01_01.close()
new_shooter01_01.close()
|
[
"bichao@amd.com"
] |
bichao@amd.com
|
d9b4c61e255061adf5319fc25e66056ef0423fda
|
98371e7846c75c6ea0e54789e72b151e1a9fd12f
|
/geojson_tv1.py
|
ae4d57d192edc45985abe2a38eabf54ae05410f6
|
[] |
no_license
|
ThomasVanV/Using-python-to-access-web-data
|
6efe38be0e0f16d1026aa1ecfb78f1a1d7ca3726
|
e6ddbd29a8fc1377444b79ad18bcbd76c1b9b141
|
refs/heads/master
| 2020-04-02T21:39:26.715856
| 2018-10-29T07:53:43
| 2018-10-29T07:53:43
| 154,805,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
import urllib.request, urllib.parse, urllib.error
import json
serviceurl = 'http://python-data.dr-chuck.net/geojson?'
while True:
address = input('Enter location: ')
if len(address) < 1 : break
url = serviceurl + urllib.parse.urlencode({'sensor':'false', 'address': address})
print('Retrieving', url)
uh = urllib.request.urlopen(url)
data = uh.read()
print('Retrieved', len(data), 'characters')
try: js = json.loads(data)
except: js = None
if 'status' not in js or js['status'] != 'OK':
print('==== Failure To Retrieve ====')
print(data)
continue
pid = js["results"][0]["place_id"]
print('PID: ', pid)
|
[
"noreply@github.com"
] |
ThomasVanV.noreply@github.com
|
bde7c731dee4c8b057ef4b71c3421bc04de773f5
|
54a709b4a88633cd75efc7c86819e112b5bd85f2
|
/linear-regression/lin-reg-closed-tf.py
|
464699c79b731a4041f1e4f69c212c5a8d98e3c7
|
[] |
no_license
|
sayeedm/python-ml-snippets
|
d50c20a5f59cea5a8ca89ed7ad444731a6f25c22
|
19ee14582f1ee275b618e6d9e9d2043dff7569ed
|
refs/heads/master
| 2020-12-02T21:01:15.663948
| 2017-09-22T20:21:14
| 2017-09-22T20:21:14
| 96,245,215
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 889
|
py
|
'''
linear regression closed form on TensorFlow
using the equation theta = (XT.X)^-1.XT.y
Author: SayeedM
Date: 03-07-2014
'''
import tensorflow as tf
import numpy as np
# lets say we train a naive system to calculate y = 4x - 1
# we will generate a bunch of randoms as training data
X = np.random.rand(1000, 1)
y = -1 + 4 * X + np.random.rand(1000, 1) # giving some random noise
# adding a bias (1)
X_b = np.c_[np.ones((1000, 1)), X]
X = tf.constant(X_b, dtype = tf.float32, name = "X")
y = tf.constant(y, dtype = tf.float32, name = "y")
XT = tf.transpose(X)
theta_func = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT, X)), XT), y)
with tf.Session() as sess:
best_theta = theta_func.eval()
X_new = np.array([5])
X_new_b = np.c_[np.ones((1, 1)), X_new]
prediction = X_new_b.dot(best_theta)
print("Prediction for ", 5, " is : ", prediction)
|
[
"dmbterminal@gmail.com"
] |
dmbterminal@gmail.com
|
3d8556493072bf6b1ae3f07336174f88855674f6
|
66f727956d81ee2543e73c60d8b25fa466ce6cca
|
/query_samples.py
|
504d3ae6ee99096bd8c91a00833dc7b6b8921e0b
|
[] |
no_license
|
cgi0911/VarOpt-Python
|
c772fb985d6b84b7a3713833285c670ef3a46ddb
|
fd96192e7ec8761354a1c6e193f0e6b2d5e8b8ba
|
refs/heads/master
| 2021-01-01T18:57:34.792274
| 2015-10-19T20:14:51
| 2015-10-19T20:14:51
| 98,470,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 739
|
py
|
#!/usr/bin/python
import pyvaropt as pv
import os
#DATA_DIR = "/home/users/cgi0911/Results/Waikato_5/fcast"
DATA_DIR = "/home/users/cgi0911/Results/Waikato_5/temp/20150928-181751/fcast"
FN_PREF = "./test_data/prefix_all.txt"
QUERY_DIR = "/home/users/cgi0911/Results/Waikato_5/temp/20150928-181751/queried"
if not os.path.exists(QUERY_DIR): os.makedirs(QUERY_DIR)
data_fns = sorted(os.listdir(DATA_DIR))
q_table = pv.PrefixQueryTable(fn=FN_PREF)
for data_fn in data_fns:
print data_fn
query_fn = data_fn.replace(".rec", ".txt")
kw_table = pv.KWTable(filetype="flowbin", fn=os.path.join(DATA_DIR, data_fn))
q_table.query(kw_table)
q_table.to_txt(os.path.join(QUERY_DIR, query_fn))
q_table.reset()
|
[
"cgi0911@gmail.com"
] |
cgi0911@gmail.com
|
c0e908289b12dd7c4ab598ea5302491c7de51e83
|
e641bd95bff4a447e25235c265a58df8e7e57c84
|
/third_party/blink/renderer/core/frame/DEPS
|
3ebe612468a80bd1bb6fd4db4f8fc0e811136ff6
|
[
"LGPL-2.0-only",
"BSD-2-Clause",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-only",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause"
] |
permissive
|
zaourzag/chromium
|
e50cb6553b4f30e42f452e666885d511f53604da
|
2370de33e232b282bd45faa084e5a8660cb396ed
|
refs/heads/master
| 2023-01-02T08:48:14.707555
| 2020-11-13T13:47:30
| 2020-11-13T13:47:30
| 312,600,463
| 0
| 0
|
BSD-3-Clause
| 2022-12-23T17:01:30
| 2020-11-13T14:39:10
| null |
UTF-8
|
Python
| false
| false
| 1,497
|
specific_include_rules = {
"ad_tracker_test\.cc": [
"+base/run_loop.h",
],
"coop_access_violation_report_body\.cc": [
"+services/network/public/cpp/cross_origin_opener_policy.h"
],
"frame_view\.cc": [
"+ui/gfx/transform.h"
],
"frame_test_helpers\.h": [
"+components/viz/common/surfaces/frame_sink_id.h"
],
"local_frame_back_forward_cache_test\.cc": [
"+base/run_loop.h",
],
"local_frame\.h": [
"+ui/gfx/transform.h"
],
"local_frame\.cc": [
"+ui/gfx/transform.h"
],
"local_frame_view\.cc": [
"+base/timer/lap_timer.h",
"+cc/tiles/frame_viewer_instrumentation.h",
"+components/paint_preview/common/paint_preview_tracker.h",
],
"remote_frame_view\.cc": [
"+components/paint_preview/common/paint_preview_tracker.h",
"+printing/buildflags/buildflags.h",
"+printing/metafile_skia.h",
],
"visual_viewport\.cc": [
"+cc/layers/solid_color_scrollbar_layer.h",
],
"web_frame_widget_base\.cc": [
"+cc/trees/swap_promise.h",
],
"web_frame_widget_base\.h": [
"+services/viz/public/mojom/hit_test/input_target_client.mojom-blink.h",
"+ui/base/mojom/ui_base_types.mojom-shared.h",
],
"web_frame_test\.cc": [
"+ui/base/mojom/ui_base_types.mojom-shared.h",
"+ui/gfx/transform.h",
],
"web_frame_widget_test\.cc": [
"+base/run_loop.h",
"+components/viz/common/surfaces/parent_local_surface_id_allocator.h",
],
"settings\.h": [
"+ui/base/pointer/pointer_device.h",
],
}
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
|
41e0ac65a19c61f5c27725bd18edd7b3503c705c
|
9c6839009b7c881e3d6a319564b6bf9bc50aaba1
|
/cookbook/eight/2CustomeStringFormat.py
|
4ea747858e0e06fa70f5c4197ccf8add05f8fb39
|
[] |
no_license
|
dalq/python-cookbook
|
d28cb05aaf54e4d2c6056a2ff6d0c8027997460f
|
7bcd0c68e26269772123887ce8589b7f9db0bd3f
|
refs/heads/master
| 2020-03-26T17:43:56.960886
| 2019-08-11T07:11:21
| 2019-08-11T07:11:21
| 145,177,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
# 自定义字符串的输出格式
_formats = {
'ymd': '{d.year}-{d.month}-{d.day}',
'mdy': '{d.month}/{d.day}/{d.year}',
'dmy': '{d.day}/{d.month}/{d.year}'
}
class Date:
def __int__(self, year, month, day):
self.year = year
self.month = month
self.day = day
def __format__(self, code):
if code == '':
code = 'ymd'
fmt = _formats[code]
return fmt.format(d=self)
d = Date(2019, 1, 27)
print(format(d))
# datetime 模块的format格式
# %A %B %d %Y 等等
|
[
"quandaling@gmail.com"
] |
quandaling@gmail.com
|
6d402d404dc360d68adc568ba48352122326ec36
|
f201636c1e686025eef88cfa3516692759b24ba6
|
/holamundoversionfinal.py
|
506821ee28696ec53a83b985a36e6a3d693b872a
|
[] |
no_license
|
Aletuno/First-repository
|
7a607ef3c2b6c066f2c44531c6a7d8e8449764bd
|
907b99131a669dd6689c20b1b8d58534576ff7e9
|
refs/heads/main
| 2023-01-03T11:55:13.188513
| 2020-10-30T20:05:24
| 2020-10-30T20:05:24
| 308,724,294
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
print("Hello World: by louie Zong")
print("https://www.youtube.com/watch?v=Yw6u6YkTgQ4")
#https://www.youtube.com/watch?v=Yw6u6YkTgQ4
|
[
"aletuno4@gmail.com"
] |
aletuno4@gmail.com
|
9d47de3d85c732d6d29bf3ae2ded68541a8b7bbb
|
6ad891474002c0daf6e81209dd56ab416c76787c
|
/src/roles/mystic.py
|
690a839c79aa3d9d486dfc674e1d648027e61096
|
[
"BSD-2-Clause"
] |
permissive
|
Trigonoculus/lykos
|
4dcdf0ee0b802bcc0f49b47ad75fb257d189e9f4
|
41f70d484fd4b94ce41f9e04126a1c069ef7bfd4
|
refs/heads/master
| 2021-01-13T10:17:15.391288
| 2016-12-04T12:21:12
| 2016-12-04T12:21:12
| 69,026,505
| 0
| 0
| null | 2016-09-23T13:19:12
| 2016-09-23T13:19:11
| null |
UTF-8
|
Python
| false
| false
| 3,961
|
py
|
import re
import random
import src.settings as var
from src.utilities import *
from src import debuglog, errlog, plog
from src.decorators import cmd, event_listener
from src.messages import messages
from src.events import Event
@event_listener("exchange_roles")
def on_exchange(evt, cli, var, actor, nick, actor_role, nick_role):
special = set(list_players(("harlot", "guardian angel", "bodyguard", "priest", "prophet", "matchmaker",
"shaman", "doctor", "hag", "sorcerer", "turncoat", "clone", "crazed shaman",
"piper", "succubus")))
evt2 = Event("get_special", {"special": special})
evt2.dispatch(cli, var)
pl = set(list_players())
wolves = set(list_players(var.WOLFTEAM_ROLES))
neutral = set(list_players(var.TRUE_NEUTRAL_ROLES))
special = evt2.data["special"]
if nick_role == "wolf mystic" and actor_role != "wolf mystic":
# # of special villagers = # of players - # of villagers - # of wolves - # of neutrals
numvills = len(special & (pl - wolves - neutral))
evt.data["actor_messages"].append(messages["wolf_mystic_info"].format("are" if numvills != 1 else "is", numvills, "s" if numvills != 1 else ""))
elif nick_role == "mystic" and actor_role != "mystic":
numevil = len(wolves)
evt.data["actor_messages"].append(messages["mystic_info"].format("are" if numevil != 1 else "is", numevil, "s" if numevil != 1 else ""))
if actor_role == "wolf mystic" and nick_role != "wolf mystic":
# # of special villagers = # of players - # of villagers - # of wolves - # of neutrals
numvills = len(special & (pl - wolves - neutral))
evt.data["nick_messages"].append(messages["wolf_mystic_info"].format("are" if numvills != 1 else "is", numvills, "s" if numvills != 1 else ""))
elif actor_role == "mystic" and nick_role != "mystic":
numevil = len(wolves)
evt.data["nick_messages"].append(messages["mystic_info"].format("are" if numevil != 1 else "is", numevil, "s" if numevil != 1 else ""))
@event_listener("transition_night_end", priority=2.01)
def on_transition_night_end(evt, cli, var):
# init with all roles that haven't been split yet
special = set(list_players(("harlot", "guardian angel", "bodyguard", "priest", "prophet", "matchmaker",
"shaman", "doctor", "hag", "sorcerer", "turncoat", "clone", "crazed shaman",
"piper", "succubus")))
evt2 = Event("get_special", {"special": special})
evt2.dispatch(cli, var)
pl = set(list_players())
wolves = set(list_players(var.WOLFTEAM_ROLES))
neutral = set(list_players(var.TRUE_NEUTRAL_ROLES))
special = evt2.data["special"]
for wolf in var.ROLES["wolf mystic"]:
# if adding this info to !myrole, you will need to save off this count so that they can't get updated info until the next night
# # of special villagers = # of players - # of villagers - # of wolves - # of neutrals
numvills = len(special & (pl - wolves - neutral))
pm(cli, wolf, messages["wolf_mystic_info"].format("are" if numvills != 1 else "is", numvills, "s" if numvills != 1 else ""))
for mystic in var.ROLES["mystic"]:
if mystic in var.PLAYERS and not is_user_simple(mystic):
pm(cli, mystic, messages["mystic_notify"])
else:
pm(cli, mystic, messages["mystic_simple"])
# if adding this info to !myrole, you will need to save off this count so that they can't get updated info until the next night
numevil = len(wolves)
pm(cli, mystic, messages["mystic_info"].format("are" if numevil != 1 else "is", numevil, "s" if numevil != 1 else ""))
@event_listener("get_special")
def on_get_special(evt, cli, var):
# mystics count as special even though they don't have any commands
evt.data["special"].update(list_players(("mystic",)))
# vim: set sw=4 expandtab:
|
[
"skizzerz@skizzerz.net"
] |
skizzerz@skizzerz.net
|
8553ea60f9ad816df14ca4ab84a39e5cf7f27d29
|
78cb6dadc7599e01b078682b175f21be673ed199
|
/280. Wiggle Sort.py
|
07113c587c92c66ec9fb649114905f64aa229656
|
[] |
no_license
|
AlexWufan/leetcode-python
|
5cf5f13dbc7d1e425fde646df618e50c488fa79f
|
435323a9fcea6a4d09266785e88fb78735e0cc3e
|
refs/heads/master
| 2021-01-13T00:49:49.870468
| 2018-04-13T18:44:19
| 2018-04-13T18:44:19
| 51,347,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
class Solution(object):
def wiggleSort(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
for i in range(len(nums)):
if i%2 == 0 and i!= 0:
if nums[i-1]<nums[i]:
nums[i-1], nums[i] = nums[i], nums[i-1]
elif nums[i-1]>nums[i] and i!= 0:
nums[i-1], nums[i] = nums[i], nums[i-1]
|
[
"mengnanszw@gmail.com"
] |
mengnanszw@gmail.com
|
0f1403e8e6732527eb879a89b591a69d618133c6
|
e103ffda9e1117131f4fdc40a41982685a3337e0
|
/api_server/models/position.py
|
007254002f7be4bd3e91ee564db550aa1d46a78a
|
[] |
no_license
|
DanDondoDan/api-server
|
b98d6b75d4f9200ef043309b06b766a5158be15e
|
213fcb366c9a123cd7582b53484ea392d1d34cf9
|
refs/heads/master
| 2022-12-11T03:19:56.668757
| 2019-01-10T13:05:51
| 2019-01-10T13:05:51
| 163,408,619
| 0
| 0
| null | 2022-12-08T01:30:14
| 2018-12-28T12:34:09
|
Python
|
UTF-8
|
Python
| false
| false
| 391
|
py
|
from django.db import models
from api_server.models.base import BaseModel
from mptt.models import MPTTModel, TreeForeignKey
class Position(BaseModel):
position = models.CharField(max_length=100)
def __str__(self):
return "{}".format(
self.position,
)
class Meta:
verbose_name = 'Position'
verbose_name_plural = 'Positions'
|
[
"Generalovboris@outlook.com"
] |
Generalovboris@outlook.com
|
40eb97d0e2f477411a169ea538f6c11efdf605c4
|
03df0f43682babd0f0854335d11b06a55eee71b0
|
/FlaskProjectDirtory/venv/Scripts/pip3.7-script.py
|
a08ce0a7e333488ea7cb3da4bd5a69ec25dc5fd6
|
[] |
no_license
|
wutanghua/FlaskProjectDirtory
|
6fe6ea5d8587397dbe526aaaea32b61276314d90
|
5b8a0a271281fb38754cf5532041ff70adb0c8fc
|
refs/heads/master
| 2022-12-13T21:55:37.494949
| 2019-08-16T08:02:58
| 2019-08-16T08:02:58
| 202,684,506
| 0
| 0
| null | 2022-12-04T07:42:34
| 2019-08-16T07:56:19
|
Python
|
IBM852
|
Python
| false
| false
| 433
|
py
|
#!D:\flask¤ţ─┐\FlaskProject\FlaskProjectDirtory\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
|
[
"laobian@qq.com"
] |
laobian@qq.com
|
aaacbc79d93759102b3f14f8a2595923bd4bbe06
|
d7567ee75e48bd7872a1c332d471ff3ce7433cb9
|
/checkout/models.py
|
070faddd0612dfd9d231967eb047e598e676e6f3
|
[] |
no_license
|
sarahbarron/ecommerce
|
30cd0ff26afa5ec9031165b63ecde8c0f7f6086f
|
aba5370fd731e7ec9e677041504f6c3457b0d405
|
refs/heads/master
| 2020-03-17T21:10:56.385918
| 2020-01-17T18:35:28
| 2020-01-17T18:35:28
| 133,947,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,055
|
py
|
from django.db import models
from products.models import Product
# Create your models here.
class Order(models.Model):
full_name = models.CharField(max_length=50, blank=False)
phone_number = models.CharField(max_length=20, blank=False)
country = models.CharField(max_length=40, blank=False)
postcode = models.CharField(max_length=20, blank=True)
town_or_city = models.CharField(max_length=40, blank=False)
street_address1 = models.CharField(max_length=40, blank=False)
street_address2 = models.CharField(max_length=40, blank=False)
county = models.CharField(max_length=40, blank=False)
date = models.DateField()
def __str__(self):
return "{0}-{1}-{2}".format(self.id, self.date, self.full_name)
class OrderLineItem(models.Model):
order = models.ForeignKey(Order, null=False)
product = models.ForeignKey(Product, null=False)
quantity = models.IntegerField(blank=False)
def __str__(self):
return "{0} {1} @ {2}".format(self.quantity, self.product.name, self.product.price)
|
[
"sarahflavin@yahoo.com"
] |
sarahflavin@yahoo.com
|
ae9defd446ca602d48fbafb507b7f935c143f78d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02594/s365888995.py
|
c25ce71edd6248ddeb38aa7051d7f1b71c695d64
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
import sys
from collections import defaultdict
readline = sys.stdin.buffer.readline
#sys.setrecursionlimit(10**8)
def geta(fn=lambda s: s.decode()):
return map(fn, readline().split())
def gete(fn=lambda s: s.decode()):
return fn(readline().rstrip())
def main():
x = gete(int)
print('Yes' if x >= 30 else 'No')
if __name__ == "__main__":
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9b5093119d2fd2bedb7f2eda8be2eaf14580a416
|
26ffd142a69f0e3a4107218166c66862932b8ef1
|
/adaptors/dbclient.py
|
ddaacc1131cdc3a033c7d95beef241f5e1615d2d
|
[] |
no_license
|
valis1/largeset
|
b79f01dbffcd01bb1ea47414fe4e968e9dbf778a
|
c0be62d150bea3c2c7570373d65776ffb3703f17
|
refs/heads/master
| 2022-02-22T18:42:11.786223
| 2019-10-14T18:52:56
| 2019-10-14T18:52:56
| 197,887,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,061
|
py
|
import os
from pymongo import MongoClient
from bson.objectid import ObjectId
from bson.errors import InvalidId
"""
Adaptor for mongoDB adaptors.
MONGODB_URI must be in environ variables
Where:
- Fields property - returns list of dicts with possible data types for generator
>>>client = DbClient()
>>>len(client.fields) > 0
True
"""
class DbClient():
def __init__(self):
self.mongo_ulr = os.environ.get("MONGODB_URI")
connect = MongoClient(self.mongo_ulr)
db = connect.heroku_lfsxmz64
self.field_collection = db.fields
self.schema_collection = db.schemas
@property
def fields(self):
try:
return list(self.field_collection.find({}, {'_id':False}))
except Exception:
return [{'id': 'Error', 'desc': 'Connection Error', 'example': 'SERVER ERROR', 'script': '',
'resolved_functions': ['']
}]
def get_schema(self, schema_id):
try:
obj_id = ObjectId(schema_id)
schema = list(self.field_collection.find({"_id":obj_id},{'_id':False}))
if len(schema) == 0:
raise ClientError('Schema not found')
else:
return schema[0]
except InvalidId:
raise ClientError('Invalid schema ID')
def updateSchema(self,schema, schema_id):
try:
obj_id = ObjectId(schema_id)
result = self.field_collection.update_one({"_id":obj_id}, {"$set":schema})
if result.matched_count == 0:
return "Schema not found"
elif result.modified_count == 0:
return "Nothing to update"
else:
return "Updated success"
except InvalidId:
raise ClientError('Invalid Schema id')
def insertSchema(self, schema):
try:
result = self.field_collection.insert_one(schema)
return str(result.inserted_id)
except Exception as e:
raise ClientError(str(e))
class ClientError(Exception):
pass
|
[
"valis.source@gmail.com"
] |
valis.source@gmail.com
|
aa8edfc5caec0ad5914c1a0e58f5351232961905
|
b225c349d571a42bb3cc8df5f12ecf8fcb665753
|
/pandas_ext/px_csv.py
|
463a0b423acc84874c858f754abbdc33f5323f47
|
[
"MIT"
] |
permissive
|
newsela/pandas_ext
|
e49492bb403cbf1ccadc751c7203635e501d4dbd
|
18b5c36c5928f9e022a2a05c34b3642484891488
|
refs/heads/master
| 2021-03-24T09:55:39.016772
| 2020-03-23T14:34:02
| 2020-03-23T14:34:02
| 119,059,085
| 4
| 1
|
MIT
| 2020-03-23T14:34:04
| 2018-01-26T14:11:31
|
Python
|
UTF-8
|
Python
| false
| false
| 437
|
py
|
import pandas as pd
import s3fs
from pandas_ext.common.utils import is_s3_path
def to_csv(df: pd.DataFrame, path: str, **kwargs) -> None:
"""Given a df, write it to s3 if necessary."""
if is_s3_path(path):
bytes_to_write = df.to_csv(None, **kwargs).encode()
s3 = s3fs.S3FileSystem()
with s3.open(path, 'wb') as dest:
return dest.write(bytes_to_write)
return df.to_csv(path, **kwargs)
|
[
"richard.fernandez.nyc@gmail.com"
] |
richard.fernandez.nyc@gmail.com
|
eb48bff33e08b8e4046218fb983b9f4a91ef0882
|
6bc991e3db089dca9ac7a5716f2114017029c6a3
|
/sppas/sppas/src/ui/phoenix/page_annotate/__init__.py
|
0d80bd489dd88f8354d2245390cbfc9aa22aae86
|
[
"MIT",
"GFDL-1.1-or-later",
"GPL-3.0-only",
"GPL-3.0-or-later"
] |
permissive
|
mirfan899/MTTS
|
8b32924754cf399147293d7e314b7fcb134f3f77
|
3167b65f576abcc27a8767d24c274a04712bd948
|
refs/heads/master
| 2020-06-11T20:59:03.788965
| 2019-10-09T12:34:02
| 2019-10-09T12:34:02
| 194,083,206
| 0
| 0
|
MIT
| 2019-06-27T11:30:30
| 2019-06-27T11:30:30
| null |
UTF-8
|
Python
| false
| false
| 81
|
py
|
from .annotate import sppasAnnotatePanel
__all__ = (
"sppasAnnotatePanel"
)
|
[
"virtuoso.irfan@gmail.com"
] |
virtuoso.irfan@gmail.com
|
6a5a6ebeb3d54206f47bd0b26776b40db7ac9dfe
|
265de7d8b2c817f3e8d9fed18bad8e1727b11b01
|
/阅读笔记6/test.py
|
71a229c110a018f445e6ac70fc6af31f5207f23c
|
[] |
no_license
|
CallMeSp/PaperNotes
|
72eb773736b5054b4a2d15b1597a1ee6255879b2
|
7cc6c07ef891e356abb8fa4dbfbf6b2824a34eff
|
refs/heads/master
| 2020-06-09T19:29:16.665037
| 2019-09-16T09:29:58
| 2019-09-16T09:29:58
| 193,493,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
import tensorflow as tf
import torch as th
import numpy as np
import tfpyth
session = tf.Session()
def get_torch_function():
a = tf.placeholder(tf.float32, name='a')
b = tf.placeholder(tf.float32, name='b')
c = 3 * a + 4 * b * b
f = tfpyth.torch_from_tensorflow(session, [a, b], c).apply
return f
f = get_torch_function()
a = th.tensor(1, dtype=th.float32, requires_grad=True)
b = th.tensor(3, dtype=th.float32, requires_grad=True)
x = f(a, b)
assert x == 39.
x.backward()
assert np.allclose((a.grad, b.grad), (3., 24.))
|
[
"995199235@qq.com"
] |
995199235@qq.com
|
9f920f4c54efc0e855d31869b66744c094fef4ff
|
cc2d5f036850f0b8f155493e57f464f2d04fc1e2
|
/learning_journal/tests.py
|
ca560472ed28dafacfca2b16ec1a9cd43fbf212b
|
[
"MIT"
] |
permissive
|
midfies/lj
|
822b31354403a13dff5613995f47eb7487dc9dcd
|
12c5aa6a7391760f0bfd4588a4eafaf8e48b3dc8
|
refs/heads/master
| 2021-01-13T02:53:55.580359
| 2017-01-09T02:16:44
| 2017-01-09T02:16:44
| 77,096,450
| 0
| 1
| null | 2017-02-06T06:46:27
| 2016-12-22T00:30:48
|
Python
|
UTF-8
|
Python
| false
| false
| 16,504
|
py
|
"""Tests for the learning journal web app."""
import pytest
import transaction
import datetime
from pyramid import testing
from pyramid.config import Configurator
from learning_journal.models import (
Entry,
get_tm_session,
)
from learning_journal.models.meta import Base
TEST_ENTRIES = [
{'title': "Testing Models 1",
'body': 'This is just a test. This is the 1st Entry',
'category': 'testing1',
'creation_date': datetime.datetime(2016, 12, 30, 0, 0),
'tags': 'test testing'},
{'title': "Testing Models 2",
'body': 'This is just a test. This is the 2nd Entry',
'category': 'testing1',
'creation_date': datetime.datetime(2016, 12, 30, 0, 0),
'tags': 'test'},
{'title': "Testing Models 3",
'body': 'This is just a test. This is the 3rd Entry',
'category': 'testing2',
'creation_date': datetime.datetime(2016, 12, 30, 0, 0),
'tags': 'testing'},
{'title': "Testing Models 4",
'body': 'This is just a test. This is the 4th Entry',
'category': 'testing2',
'creation_date': datetime.datetime(2016, 12, 30, 0, 0),
'tags': 'test'},
{'title': "Testing Models 5",
'body': 'This is just a test. This is the 5th Entry',
'category': 'testing2',
'creation_date': datetime.datetime(2016, 12, 30, 0, 0),
'tags': 'testing'},
]
@pytest.fixture(scope="session")
def configuration(request):
"""Set up a Configurator instance.
This Configurator instance sets up a pointer to the location of the
database.
It also includes the models from your app's model package.
Finally it tears everything down, including the in-memory SQLite database.
This configuration will persist for the entire duration of your PyTest run.
"""
config = testing.setUp(settings={
'sqlalchemy.url': 'postgres://midfies:password@localhost:5432/test_lj'
})
config.include("learning_journal.models")
config.include("learning_journal.routes")
def teardown():
testing.tearDown()
request.addfinalizer(teardown)
return config
@pytest.fixture
def db_session(configuration, request):
"""Create a session for interacting with the test database.
This uses the dbsession_factory on the configurator instance to create a
new database session. It binds that session to the available engine
and returns a new session for every call of the dummy_request object.
"""
session_factory = configuration.registry["dbsession_factory"]
session = session_factory()
engine = session.bind
Base.metadata.create_all(engine)
def teardown():
session.transaction.rollback()
request.addfinalizer(teardown)
return session
@pytest.fixture
def dummy_request(db_session):
"""Return a dummy request for testing."""
return testing.DummyRequest(dbsession=db_session)
@pytest.fixture
def add_posts(dummy_request):
"""Add multiple entries to the database."""
for entry in TEST_ENTRIES:
post = Entry(title=entry['title'], body=entry['body'], category=entry['category'], creation_date=entry['creation_date'], tags=entry['tags'])
dummy_request.dbsession.add(post)
@pytest.fixture
def set_auth_credentials():
"""Make a username/password combo for testing."""
import os
from passlib.apps import custom_app_context as pwd_context
os.environ["AUTH_USERNAME"] = "testme"
os.environ["AUTH_PASSWORD"] = pwd_context.hash("foobar")
# Unit Tests
def test_to_json():
"""Tets the to_json method returns a dict."""
from learning_journal.models import Entry
new_entry = Entry(title='new_title', body='new_body', creation_date='new_date', category='new_category', tags='new_tags')
assert isinstance(new_entry.to_json(), dict)
def test_list_view_is_empty_when_no_models(dummy_request):
"""Test there are no listings when db is empty."""
from learning_journal.views.default import list_view
result = list_view(dummy_request)
assert len(result['entries']) == 0
def test_list_view_returns_entries_from_db(dummy_request, add_posts):
"""Test that the list view returns entries from DB."""
from learning_journal.views.default import list_view
result = list_view(dummy_request)
assert result['entries'][0].title == "Testing Models 1"
assert result['entries'][1].category == "testing1"
assert len(result['entries']) == 5
def test_detail_view_displays_post(dummy_request, add_posts):
"""Test detail view displays post that is passed through url."""
from learning_journal.views.default import detail_view
dummy_request.matchdict['id'] = '6'
result = detail_view(dummy_request)
entry = dummy_request.dbsession.query(Entry).get(6)
assert result['entry'] == entry
def test_detail_view_of_non_existant_entry_errors(dummy_request):
"""Test detail view errors on non existant entry."""
from learning_journal.views.default import detail_view
dummy_request.matchdict['id'] = '1000'
result = detail_view(dummy_request)
assert result.status_code == 404
def test_create_view_returns_empty_list(dummy_request):
"""Test that create view returns an empty list."""
from learning_journal.views.default import create_view
assert create_view(dummy_request) == {}
def test_create_new_posts_adds_to_db(dummy_request):
"""Test that creating a posts adds a new post."""
from learning_journal.views.default import create_view
count = dummy_request.dbsession.query(Entry).count()
dummy_request.method = 'POST'
dummy_request.POST['title'] = 'test title'
dummy_request.POST['category'] = 'test category'
dummy_request.POST['tags'] = 'test tags'
dummy_request.POST['body'] = 'test body'
create_view(dummy_request)
new_count = dummy_request.dbsession.query(Entry).count()
assert new_count == count + 1
def test_edit_view_displays_post(dummy_request, add_posts):
"""Test edit view displays post that is passed through url."""
from learning_journal.views.default import edit_view
dummy_request.matchdict['id'] = '12'
result = edit_view(dummy_request)
entry = dummy_request.dbsession.query(Entry).get(12)
assert result['entry'] == entry
def test_edit_old_post_updates_post(dummy_request, add_posts):
"""Test that creating a posts adds a new post."""
from learning_journal.views.default import edit_view
query = dummy_request.dbsession.query(Entry)
dummy_request.method = 'POST'
dummy_request.matchdict['id'] = '18'
dummy_request.POST["title"] = 'test title'
dummy_request.POST['category'] = 'test category'
dummy_request.POST['tags'] = 'test tags'
dummy_request.POST['body'] = 'test body'
edit_view(dummy_request)
this_entry = query.get(18)
assert this_entry.title == 'test title'
def test_category_view_returns_with_correct_category(dummy_request, add_posts):
"""Test that category review returns entry with proper categoty."""
from learning_journal.views.default import category_view
dummy_request.matchdict['category'] = 'testing1'
result = category_view(dummy_request)
assert result['entries'][0].category == 'testing1'
def test_about_view_returns_empty_dict(dummy_request):
"""Test about view returns empty dict."""
from learning_journal.views.default import about_view
assert about_view(dummy_request) == {}
def test_login_view_returns_empty_dict(dummy_request):
"""Test login view returns empty dict."""
from learning_journal.views.default import login_view
assert login_view(dummy_request) == {}
def test_check_credentials_passes_with_good_creds(set_auth_credentials):
"""Test that check credentials works with valid creds."""
from learning_journal.security import check_credentials
assert check_credentials("testme", "foobar")
def test_check_credentials_fails_with_bad_password(set_auth_credentials):
"""Test that check credential fails on bad password."""
from learning_journal.security import check_credentials
assert not check_credentials("testme", "bad pass")
def test_check_credentials_fails_with_bad_username(set_auth_credentials):
"""Test that check credential fails on bad username."""
from learning_journal.security import check_credentials
assert not check_credentials("bad username", "foobar")
def test_check_credentials_fails_empty_creds(set_auth_credentials):
"""Test that check credential fails with no credentials."""
from learning_journal.security import check_credentials
assert not check_credentials("", "")
def test_login_view_good_creds_gets_redirect(dummy_request, set_auth_credentials):
"""Test that logging in with cred redirects to home."""
from learning_journal.views.default import login_view
from pyramid.httpexceptions import HTTPFound
dummy_request.method = "POST"
dummy_request.POST["username"] = "testme"
dummy_request.POST["password"] = "foobar"
result = login_view(dummy_request)
assert isinstance(result, HTTPFound)
def test_login_view_with_bad_creds_stays(dummy_request, set_auth_credentials):
"""Test that loggin in does nothing with bad credentials."""
from learning_journal.views.default import login_view
dummy_request.method = "POST"
dummy_request.POST["username"] = "nameuser"
dummy_request.POST["password"] = "wordpass"
result = login_view(dummy_request)
assert result == {}
def test_logout_returns_to_home(dummy_request):
"""Test that logout returns with httpfound to home."""
from learning_journal.views.default import logout_view
result = logout_view(dummy_request)
assert result.status_code == 302
def test_delete_view_redirects(dummy_request, add_posts):
"""Test that delete view redirets back to the home page."""
from learning_journal.views.default import delete_view
dummy_request.matchdict['id'] = '27'
result = delete_view(dummy_request)
assert result.status_code == 302
# Functional Tests
@pytest.fixture(scope="session")
def testapp(request):
"""Create an instance of webtests TestApp for testing routes."""
from webtest import TestApp
def main(global_config, **settings):
"""Return a Pyramid WSGI application."""
config = Configurator(settings=settings)
config.include('pyramid_jinja2')
config.include('learning_journal.models')
config.include('learning_journal.routes')
config.include('learning_journal.security')
config.scan()
return config.make_wsgi_app()
app = main({}, **{
'sqlalchemy.url': 'postgres://midfies:password@localhost:5432/test_lj'
})
testapp = TestApp(app)
session_factory = app.registry["dbsession_factory"]
session = session_factory()
engine = session.bind
Base.metadata.drop_all(engine)
Base.metadata.create_all(bind=engine)
def tearDown():
Base.metadata.drop_all(engine)
request.addfinalizer(tearDown)
return testapp
@pytest.fixture
def fill_the_db(testapp):
"""Fill the db with test data."""
session_factory = testapp.app.registry["dbsession_factory"]
with transaction.manager:
dbsession = get_tm_session(session_factory, transaction.manager)
for entry in TEST_ENTRIES:
post = Entry(title=entry['title'], body=entry['body'], category=entry['category'], creation_date=entry['creation_date'], tags=entry['tags'])
dbsession.add(post)
return dbsession
def test_layout_root(testapp):
"""Test that the contents of the root page contains <article>."""
response = testapp.get('/', status=200)
html = response.html
assert 'Marc Fieser' in html.find("footer").text
assert len(html.find_all('article')) == 0
def test_detail_view_returns_not_found_when_db_empty(testapp):
"""Test detail view returns not found when no entries."""
response = testapp.get('/journal/1', status=404)
assert response.status_code == 404
def test_non_authenticated_user_cannot_access_create_view(testapp):
"""Test that accessing create new post is forbidden without auth."""
response = testapp.get('/journal/new-entry', status=403)
assert response.status_code == 403
def test_non_authenticated_user_cannot_access_edit_view(testapp):
"""Test that accessing edit post is forbidden without auth."""
response = testapp.get('/journal/1/edit-entry', status=403)
assert response.status_code == 403
def test_category_view_contains_no_entries_with_empty_db(testapp):
"""Test that category view has no articles with empty db."""
response = testapp.get('/journal/category/testing', status=200)
html = response.html
assert len(html.find_all('article')) == 0
def test_about_me_page_contains_about_me(testapp):
"""Test about me page contains about me content."""
response = testapp.get('/about', status=200)
html = response.html
assert 'About Me' in html.find("title").text
def test_login_page_has_login_form(testapp):
"""Test about me page contains about me content."""
response = testapp.get('/login', status=200)
html = response.html
assert 'Log In' in html.find("title").text
assert len(html.find_all('form')) == 1
# ======================Let the data start=====================
def test_home_view_with_data_lists_all_articles(testapp, fill_the_db):
"""When there's data in the database, the home page has articles."""
response = testapp.get('/', status=200)
html = response.html
assert len(html.find_all("article")) == 5
def test_detail_view_has_specific_article(testapp):
"""Test that a specific article is loaded in detail view."""
response = testapp.get("/journal/1")
assert len(response.html.find_all("article")) == 1
assert "Testing Models 1" in response.text
def test_user_can_log_in(set_auth_credentials, testapp):
"""Test that a user can log in with correct credentials."""
testapp.post("/login", params={
"username": "testme",
"password": "foobar"
})
assert "auth_tkt" in testapp.cookies
def test_create_view_contains_a_form(testapp):
"""Test that create view contains a form."""
response = testapp.get('/journal/new-entry', status=200)
html = response.html
assert len(html.find_all("form")) == 1
def test_create_view_redirects_and_updates_db(testapp):
"""Test that a create view redirects."""
response = testapp.get("/journal/new-entry")
csrf_token = response.html.find(
"input",
{"name": "csrf_token"}).attrs["value"]
post_params = {'csrf_token': csrf_token, 'title': 'TestPOST', 'body': 'body', 'category': 'testing', 'tags': ''}
response = testapp.post('/journal/new-entry', post_params, status=302)
assert response.status == '302 Found'
follow_response = response.follow()
assert 'TestPOST' in follow_response.html.find_all("article")[0].text
def test_edit_view_redirects_and_updates_db(testapp):
"""Test that a edit view redirects."""
response = testapp.get("/journal/6/edit-entry")
csrf_token = response.html.find(
"input",
{"name": "csrf_token"}).attrs["value"]
post_params = {'csrf_token': csrf_token, 'title': 'TestPOST Edit', 'body': 'body', 'category': 'testing', 'tags': ''}
response = testapp.post('/journal/6/edit-entry', post_params, status=302)
assert response.status == '302 Found'
follow_response = response.follow()
assert 'TestPOST Edit' in follow_response.html.find_all("article")[0].text
def test_category_view_display_correct_amount(testapp):
"""Test that category view displays all of specific category."""
response = testapp.get('/journal/category/testing1', status=200)
html = response.html
assert len(html.findAll('article')) == 2
def test_posting_from_home_adds_to_db(testapp):
"""Test that you can post from the home page."""
response = testapp.get('/')
assert len(response.html.find_all("article")) == 6
csrf_token = response.html.find(
"input",
{"name": "csrf_token"}).attrs["value"]
post_params = {'csrf_token': csrf_token, 'title': 'TestHomePost', 'body': 'body', 'category': 'testing', 'tags': ''}
response = testapp.post('/', post_params)
response = testapp.get('/')
assert 'TestHomePost' in response.html.text
def test_logout_view_logs_out_user(testapp):
"""Test that logging out revokes the token."""
testapp.get('/logout')
assert "auth_tkt" not in testapp.cookies
|
[
"midfies@gmail.com"
] |
midfies@gmail.com
|
34d334f36d279ab35367bf2e31511b72558b5dc3
|
474cc39a11e0d9f523dee954b810d21945fdf911
|
/evaluate.py
|
0e5bc06639c7b238aa720be87c79f67719d98570
|
[] |
no_license
|
xuekunnan/yolox-pytorch
|
282f2f0e5ddfe441cc1616649ad6a95468dc9e39
|
8afa4044d7de1045ba472335ff04b5fcd80e75fa
|
refs/heads/main
| 2023-06-26T09:39:27.612740
| 2021-07-29T15:56:01
| 2021-07-29T15:56:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,420
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2021/7/24 21:50
# @Author : MingZhang
# @Email : zm19921120@126.com
import os
import cv2
import tqdm
import json
import pycocotools.coco as coco_
from pycocotools.cocoeval import COCOeval
from config import opt
from utils.util import NpEncoder
from models.yolox import Detector
def evaluate():
detector = Detector(opt)
gt_ann = opt.val_ann
img_dir = opt.dataset_path + "/images/val2017"
assert os.path.isfile(gt_ann), 'cannot find gt {}'.format(gt_ann)
coco = coco_.COCO(gt_ann)
images = coco.getImgIds()
class_ids = sorted(coco.getCatIds())
num_samples = len(images)
print('find {} samples in {}'.format(num_samples, gt_ann))
result_file = "result.json"
coco_res = []
for index in tqdm.tqdm(range(num_samples)):
img_id = images[index]
file_name = coco.loadImgs(ids=[img_id])[0]['file_name']
image_path = img_dir + "/" + file_name
img = cv2.imread(image_path)
results = detector.run(img, vis_thresh=0.01)
for res in results:
cls, conf, bbox = res[0], res[1], res[2]
if len(res) > 3:
reid_feat = res[4]
cls_index = opt.label_name.index(cls)
coco_res.append(
{'bbox': [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]],
'category_id': class_ids[cls_index],
'image_id': int(img_id),
'score': conf})
with open(result_file, 'w') as f_dump:
json.dump(coco_res, f_dump, cls=NpEncoder)
coco_det = coco.loadRes(result_file)
coco_eval = COCOeval(coco, coco_det, 'bbox')
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
# Each class is evaluated separately
# classes = [c["name"] for c in coco.loadCats(coco.getCatIds())]
# for i, cat_id in enumerate(class_ids):
# print('-------- evaluate class: {} --------'.format(classes[i]))
# coco_eval.params.catIds = cat_id
# coco_eval.evaluate()
# coco_eval.accumulate()
# coco_eval.summarize()
os.remove(result_file)
if __name__ == "__main__":
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.load_model = opt.load_model if opt.load_model != "" else os.path.join(opt.save_dir, "model_best.pth")
evaluate()
|
[
"zhangming8@github.com"
] |
zhangming8@github.com
|
37849c164ec97219cdcf44e7a9250015d014c2ec
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_drywall.py
|
b8938919b462b1e1f96d516e7786d744ec22293d
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
#calss header
class _DRYWALL():
def __init__(self,):
self.name = "DRYWALL"
self.definitions = [u'material consisting of two sheets of heavy paper with a layer of plaster between them, used to make walls and ceilings before putting on a top layer of plaster']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
dcb1530702b0dab3146bcbeb210b74f2625770f0
|
c15f7f1266b8994a7606c1e0507c06fbb54209ac
|
/simulation.py
|
306abed8d3921658b1b0ebe40c1f5314674a1f2c
|
[] |
no_license
|
rchenmit/monkeypunch
|
d11923f8bfba756068163f9ba709aa755a4fbe53
|
dd1ba721277cfd2a296e0249fddb699f96716edc
|
refs/heads/master
| 2020-05-18T06:12:13.022928
| 2015-01-31T02:52:52
| 2015-01-31T02:52:52
| 30,047,504
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,050
|
py
|
# main driver for the simulation
import car
import numpy as np
import random
import heapq
def pick_whether_theres_a_car_coming():
rand_number = random.random()
if rand_number < 0.1:
return True
else:
return False
def pick_which_car(list_of_cars):
random_index = random.randint(0, len(list_of_cars)-1)
return list_of_cars[random_index]
time_now = 0
# initialize the cars in the simulation, store in a list
l_cars_before_entering_road = [car.car(str(x)) for x in range(10)] ## all the cars in the simulation
l_cars_after_leaving_road = []
#priority queue for the road
road_A = [] #it's an empty list for now, we'll modify this by using the heapq methods
seconds_to_simulate = 60
## loop through the time (in seconds), and update the priority queue at each time,
time_to_stop_simulation = time_now + seconds_to_simulate
while time_now < time_to_stop_simulation:
# for each item in the priority queue (road_X), decrease the key (representing the seconds) by one
for queue_tuple in road_A:
queue_tuple[1].total_travel_time += 1
if queue_tuple[1].time_to_intersection == 0:
road_A.remove(queue_tuple)
l_cars_after_leaving_road.append(queue_tuple[1])
else:
queue_tuple[1].time_to_intersection -= 1
#decide whether a car is coming or not
if len(l_cars_before_entering_road) == 0:
continue
else:
bool_whether_car_coming = pick_whether_theres_a_car_coming()
# if there happens to be a car coming, pick which lucky car gets to come through
if bool_whether_car_coming:
car_that_comes = pick_which_car(l_cars_before_entering_road)
l_cars_before_entering_road.remove(car_that_comes)
heapq.heappush(road_A, (car_that_comes.time_to_intersection, car_that_comes))
# update the time
time_now += 1
# print the contents of the road right now:
print "time right now: " + str(time_now) + "--------------------------------------------"
if len(road_A) > 0:
for queue_tuple in road_A:
print "Car on road A: name = " + queue_tuple[1].name + "; Time Left Til Intersection: " + str(queue_tuple[1].time_to_intersection) + " seconds"
print "cars already passed: "
print [car.name + "," for car in l_cars_after_leaving_road]
seconds_taken_per_car = []
for car in l_cars_after_leaving_road:
seconds_taken_per_car.append(car.total_travel_time)
avg_seconds_taken_per_car = np.mean(seconds_taken_per_car)
#now, the simulation has ended; print a summary
print "--------------------------------------------------------------------------------------------------------"
print "The simulation has ended"
print "SUMMARY OF SIMULATION: ---------------------------------------------------------"
print "Time simulated (s): " + str(seconds_to_simulate)
print "Average time car took to pass through the road (s): " + str(avg_seconds_taken_per_car)
#### sample output from running this simulation:
#time right now: 1--------------------------------------------
#cars already passed:
#[]
#time right now: 2--------------------------------------------
#Car on road A: name = 2; Time Left Til Intersection: 7 seconds
#cars already passed:
#[]
#time right now: 3--------------------------------------------
#Car on road A: name = 2; Time Left Til Intersection: 6 seconds
#cars already passed:
#[]
#time right now: 4--------------------------------------------
#Car on road A: name = 2; Time Left Til Intersection: 5 seconds
#cars already passed:
#[]
#time right now: 5--------------------------------------------
#Car on road A: name = 2; Time Left Til Intersection: 4 seconds
#cars already passed:
#[]
#time right now: 6--------------------------------------------
#Car on road A: name = 2; Time Left Til Intersection: 3 seconds
#cars already passed:
#[]
#time right now: 7--------------------------------------------
#Car on road A: name = 2; Time Left Til Intersection: 2 seconds
#cars already passed:
#[]
#time right now: 8--------------------------------------------
#Car on road A: name = 2; Time Left Til Intersection: 1 seconds
#cars already passed:
#[]
#time right now: 9--------------------------------------------
#Car on road A: name = 2; Time Left Til Intersection: 0 seconds
#cars already passed:
#[]
#time right now: 10--------------------------------------------
#cars already passed:
#['2,']
#time right now: 11--------------------------------------------
#cars already passed:
#['2,']
#time right now: 12--------------------------------------------
#cars already passed:
#['2,']
#time right now: 13--------------------------------------------
#Car on road A: name = 5; Time Left Til Intersection: 6 seconds
#cars already passed:
#['2,']
#time right now: 14--------------------------------------------
#Car on road A: name = 5; Time Left Til Intersection: 5 seconds
#cars already passed:
#['2,']
#time right now: 15--------------------------------------------
#Car on road A: name = 5; Time Left Til Intersection: 4 seconds
#cars already passed:
#['2,']
#time right now: 16--------------------------------------------
#Car on road A: name = 5; Time Left Til Intersection: 3 seconds
#cars already passed:
#['2,']
#time right now: 17--------------------------------------------
#Car on road A: name = 5; Time Left Til Intersection: 2 seconds
#cars already passed:
#['2,']
#time right now: 18--------------------------------------------
#Car on road A: name = 5; Time Left Til Intersection: 1 seconds
#cars already passed:
#['2,']
#time right now: 19--------------------------------------------
#Car on road A: name = 5; Time Left Til Intersection: 0 seconds
#cars already passed:
#['2,']
#time right now: 20--------------------------------------------
#cars already passed:
#['2,', '5,']
#time right now: 21--------------------------------------------
#cars already passed:
#['2,', '5,']
#time right now: 22--------------------------------------------
#cars already passed:
#['2,', '5,']
#time right now: 23--------------------------------------------
#cars already passed:
#['2,', '5,']
#time right now: 24--------------------------------------------
#cars already passed:
#['2,', '5,']
#time right now: 25--------------------------------------------
#cars already passed:
#['2,', '5,']
#time right now: 26--------------------------------------------
#cars already passed:
#['2,', '5,']
#time right now: 27--------------------------------------------
#cars already passed:
#['2,', '5,']
#time right now: 28--------------------------------------------
#cars already passed:
#['2,', '5,']
#time right now: 29--------------------------------------------
#cars already passed:
#['2,', '5,']
#time right now: 30--------------------------------------------
#Car on road A: name = 0; Time Left Til Intersection: 6 seconds
#cars already passed:
#['2,', '5,']
#time right now: 31--------------------------------------------
#Car on road A: name = 0; Time Left Til Intersection: 5 seconds
#cars already passed:
#['2,', '5,']
#time right now: 32--------------------------------------------
#Car on road A: name = 0; Time Left Til Intersection: 4 seconds
#cars already passed:
#['2,', '5,']
#time right now: 33--------------------------------------------
#Car on road A: name = 0; Time Left Til Intersection: 3 seconds
#cars already passed:
#['2,', '5,']
#time right now: 34--------------------------------------------
#Car on road A: name = 0; Time Left Til Intersection: 2 seconds
#cars already passed:
#['2,', '5,']
#time right now: 35--------------------------------------------
#Car on road A: name = 0; Time Left Til Intersection: 1 seconds
#cars already passed:
#['2,', '5,']
#time right now: 36--------------------------------------------
#Car on road A: name = 0; Time Left Til Intersection: 0 seconds
#Car on road A: name = 8; Time Left Til Intersection: 10 seconds
#cars already passed:
#['2,', '5,']
#time right now: 37--------------------------------------------
#Car on road A: name = 8; Time Left Til Intersection: 10 seconds
#cars already passed:
#['2,', '5,', '0,']
#time right now: 38--------------------------------------------
#Car on road A: name = 8; Time Left Til Intersection: 9 seconds
#cars already passed:
#['2,', '5,', '0,']
#time right now: 39--------------------------------------------
#Car on road A: name = 8; Time Left Til Intersection: 8 seconds
#cars already passed:
#['2,', '5,', '0,']
#time right now: 40--------------------------------------------
#Car on road A: name = 8; Time Left Til Intersection: 7 seconds
#cars already passed:
#['2,', '5,', '0,']
#time right now: 41--------------------------------------------
#Car on road A: name = 8; Time Left Til Intersection: 6 seconds
#cars already passed:
#['2,', '5,', '0,']
#time right now: 42--------------------------------------------
#Car on road A: name = 8; Time Left Til Intersection: 5 seconds
#cars already passed:
#['2,', '5,', '0,']
#time right now: 43--------------------------------------------
#Car on road A: name = 8; Time Left Til Intersection: 4 seconds
#cars already passed:
#['2,', '5,', '0,']
#time right now: 44--------------------------------------------
#Car on road A: name = 8; Time Left Til Intersection: 3 seconds
#cars already passed:
#['2,', '5,', '0,']
#time right now: 45--------------------------------------------
#Car on road A: name = 8; Time Left Til Intersection: 2 seconds
#cars already passed:
#['2,', '5,', '0,']
#time right now: 46--------------------------------------------
#Car on road A: name = 8; Time Left Til Intersection: 1 seconds
#cars already passed:
#['2,', '5,', '0,']
#time right now: 47--------------------------------------------
#Car on road A: name = 7; Time Left Til Intersection: 5 seconds
#Car on road A: name = 8; Time Left Til Intersection: 0 seconds
#cars already passed:
#['2,', '5,', '0,']
#time right now: 48--------------------------------------------
#Car on road A: name = 7; Time Left Til Intersection: 4 seconds
#cars already passed:
#['2,', '5,', '0,', '8,']
#time right now: 49--------------------------------------------
#Car on road A: name = 7; Time Left Til Intersection: 3 seconds
#cars already passed:
#['2,', '5,', '0,', '8,']
#time right now: 50--------------------------------------------
#Car on road A: name = 7; Time Left Til Intersection: 2 seconds
#cars already passed:
#['2,', '5,', '0,', '8,']
#time right now: 51--------------------------------------------
#Car on road A: name = 7; Time Left Til Intersection: 1 seconds
#cars already passed:
#['2,', '5,', '0,', '8,']
#time right now: 52--------------------------------------------
#Car on road A: name = 7; Time Left Til Intersection: 0 seconds
#cars already passed:
#['2,', '5,', '0,', '8,']
#time right now: 53--------------------------------------------
#cars already passed:
#['2,', '5,', '0,', '8,', '7,']
#time right now: 54--------------------------------------------
#cars already passed:
#['2,', '5,', '0,', '8,', '7,']
#time right now: 55--------------------------------------------
#cars already passed:
#['2,', '5,', '0,', '8,', '7,']
#time right now: 56--------------------------------------------
#cars already passed:
#['2,', '5,', '0,', '8,', '7,']
#time right now: 57--------------------------------------------
#cars already passed:
#['2,', '5,', '0,', '8,', '7,']
#time right now: 58--------------------------------------------
#cars already passed:
#['2,', '5,', '0,', '8,', '7,']
#time right now: 59--------------------------------------------
#cars already passed:
#['2,', '5,', '0,', '8,', '7,']
#time right now: 60--------------------------------------------
#cars already passed:
#['2,', '5,', '0,', '8,', '7,']
#--------------------------------------------------------------------------------------------------------
#The simulation has ended
#SUMMARY OF SIMULATION: ---------------------------------------------------------
#Time simulated (s): 60
#Average time car took to pass through the road (s): 7.5
|
[
"robchen401@gmail.com"
] |
robchen401@gmail.com
|
5246e373b20c81219c7a31c3cf863bfb674ffc4a
|
d8d8144ade3b53d54d47e9115723c9e330351916
|
/backend/Himalaya/receipts/migrations/0006_auto_20200819_1240.py
|
7e684c6da5ce80ce879dbbb436cb65d23e4df95f
|
[
"MIT"
] |
permissive
|
djs2082/StoreManagement-Freelancing
|
28d70875651824a6ab310c68e1711142199e7797
|
11057cb4f155c0d36a8456c9ea3395b779516384
|
refs/heads/master
| 2023-07-20T09:12:50.718294
| 2020-08-23T14:54:28
| 2020-08-23T14:54:28
| 288,665,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
# Generated by Django 3.0.5 on 2020-08-19 07:10
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('receipts', '0005_auto_20200819_1232'),
]
operations = [
migrations.AlterField(
model_name='receipts',
name='date_time',
field=models.DateTimeField(default=datetime.datetime(2020, 8, 19, 12, 40, 15, 676233), null=True),
),
]
|
[
"dilipjoshis98@gmail.com"
] |
dilipjoshis98@gmail.com
|
a2077134075c8e3932875bae5656e38c94f5359f
|
ec365c4434b7cb2f564f251a313db89b48f0ea27
|
/June 11, 2021/2_입출력.py
|
43acc295502faf561f06de368d391f28ea6a8fff
|
[] |
no_license
|
to-Remember/TIL
|
5c01bcc809f3e109c2fb4cac243c1adc37a9c18e
|
09fd07d22403376f21d6c9c0797c2568854d6fc1
|
refs/heads/main
| 2023-06-22T05:57:14.329912
| 2021-07-13T13:46:20
| 2021-07-13T13:46:20
| 375,398,431
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,158
|
py
|
'''
오픈모드 (r,w,a-읽기,쓰기,이어쓰기)
'''
import os
import sys
def stdTest():
msg = sys.stdin.readline(10)
sys.stdout.write('msg:'+msg+'\n')
sys.stderr.write('에러 메시지 출력\n')
sys.stderr.write('에러 메시지 출력\n')
def fileRead(path):
try:
f = open(path, 'r', encoding='utf-8')
x = f.read() #파일읽기. 파일에서 읽은 내용 반환. -> 변수 ㅌdp wjwkd
print(x)
f.close()
except Exception as e:
print(e)
def filewrite(path, msg):
try:
f = open(path,'w',encoding='etf-8') #쓰기모드 파일 오픈
f.write(msg)
f.close()
except Exception as e:
print(e)
def printDirList(path):
if os.path.isdir(path):
fname = os.listdir(path) #path 폴더에 있는 모든 파일이나 디렉토리 이름(문자열)을 모두 읽어서 리스트에 담아서 반환
#path가 10day: ['a.txt', 'b.txt', '입출력.py', '패키지호출예.py']
for file in fname:
print(file)
def main():
#stdTest()
filewrite('b.txt', 'hello file\n')
fileRead('b.txt')
main()
|
[
"rosa134212k@gmail.com"
] |
rosa134212k@gmail.com
|
9e178eb38e5268548a9a7625ad43128ebe87025e
|
d297251c2eae0b93f362af83344c26e08225fd0e
|
/Fabrica_de_helado.py
|
8ec76bd126ee167ede6516ac3027505bb3f24fd8
|
[
"Apache-2.0"
] |
permissive
|
ddelgadoc/fabrica-programacion-lineal-python
|
80e3992e427b691b41aff3ac33b3ea1756044b03
|
255dbaf5af00ca21082df5f9ecd95a38490b0661
|
refs/heads/master
| 2022-11-29T16:27:33.056207
| 2020-06-29T09:39:37
| 2020-06-29T09:39:37
| 275,133,848
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,085
|
py
|
# -*- coding: utf-8 -*-
"""
@author: deyli
"""
from pulp import *
##Conjuntos
Sabor = ["V","F","CH","Valmendra"] #V:Vainilla,F:Fresa,CH:Chocolate,Valmendra:vainilla con almendra
Tam = ["P","M","G"] #P:pequeño,M:mediano,G:grande
Maq =["1","2"] #Máquinas del tratamiento de helado
Material = [] #1-leche,2-Azucar,3-agua destilada,4-Saborizantes
for i in range(1,5):
Material.append(str(i))
##Parametros
cantidad=[[1.5,2.5,3],[0.8,1.8,2.3],[1.7,2,2.7],[0.5,0.9,1.2]] #cantidades por tipo de ingrediente [[P,M,G],[P,M,G]...] (litros/u)
L = dict(zip(Material,[cantidad[0],cantidad[1],cantidad[2],cantidad[3]]))
Precio ={"P":25,"M":45,"G":75} #Precio de venta según tamaño ($/u)
Costo ={"P":11,"M":19.25,"G":24.95} #Coste de fabricación variable de una unidad de pote de helado para cada tamaño ($/u)
Dem =dict(zip(Tam,[5000,2000,3000])) #Demanda de helado para el mes de mayo (unidades)
Dip = dict(zip(Material,[50000,20000,25000,15000])) #Diponibilidad de materiales para la fabricación de los helados para el mes de mayo(litros)
Maq_d=dict(zip(Maq,[[7500,5000,4000],[6000,5110,3500]])) #Capacidad de producción de cada máquina según el tamaño del pote(unidades) [[P,M,G],[P,M,G]]
A=dict(zip(Tam,[0.5,0.7,1])) #Gramos de almendra para el sabor Valmendra según tamaño del pote(g/u)
CosteV={"P":1.5,"M":2.1,"G":3} #Coste de los gramos de almendra según tamaño ($/u)
##Problema
prob = LpProblem("Fabrica de helado",LpMaximize)
##Variables, decisiones a tomar
x = LpVariable.dicts("Cantidad_Potes_de_helado",(Tam,Sabor),0,cat="Integer") #Cantidades de potes de helados para fabricar para el mes de mayo
y = LpVariable.dicts("Utilizar_maquina",Maq,0,1,cat="Integer")
k = LpVariable("Cantidad_de_neveras_adquirir",0,cat="Integer")
R = LpVariable("Realizar_ajustes_economicos",0,1,cat="Integer") #Decidir si comprar alguna nevera nueva
u = LpVariable("Usar_nuevo_sabor_vainilla_almendra",0,1,cat="Integer")
##FO
prob += lpSum([x[i][j]*Precio[i] for i in Tam for j in Sabor]) - lpSum([x[i][j]*Costo[i] for i in Tam for j in Sabor]) - 500*k - lpSum([x[i][j]*CosteV[i] for i in Tam for j in Sabor if j == "Valmendra"]) + lpSum(5*x[i][j] for i in Tam for j in Sabor if j == "Valmendra") #5 pesos adicionales sobre el precio unitario de venta si el sabor es Valmendra
##Restricciones
for i in Tam:
prob += lpSum([x[i][j] for j in Sabor]) <= Dem[i] #Restriccion satisfacer la demanda de helado,es <= porque la FO es Max la ganancia, si fuera MIN costo fuera >=,es se podría incluir una restricción de penalización por vender más de la demanda con max ganancia
#Restriccion capacidad nevera
prob += lpSum([x[i][j] for i in Tam for j in Sabor if i == "P"])/7000 + lpSum([x[i][j] for i in Tam for j in Sabor if i == "M"])/5000 + lpSum([x[i][j] for i in Tam for j in Sabor if i == "G"])/4000 <= 1 + k
#Restricciones comprar neveras nuevas
prob += k <= 4*R
#Restrccion chocolate helado más vendido
prob += lpSum([x[i][j] for i in Tam for j in Sabor if j == "CH"]) -3*lpSum([x[i][j] for i in Tam for j in Sabor if j != "CH"]) >= 0
#Restriccion ingredientes y disponibilidad
for n in Material:
prob += lpSum([x[i][j] for i in Tam for j in Sabor if i == "P"])*L[n][0] + lpSum([x[i][j] for i in Tam for j in Sabor if i == "M"])*L[n][1] + lpSum([x[i][j] for i in Tam for j in Sabor if i == "G"])*L[n][2] <= Dip[n]
#restriccion Maquina que usar
prob += lpSum([y[i] for i in Maq]) == 1
for i in Tam:
prob += lpSum([x[i][j] for j in Sabor if i == "P"]) <= Maq_d["1"][0]*y["1"] + Maq_d["2"][0]*y["2"]
prob += lpSum([x[i][j] for j in Sabor if i == "M"]) <= Maq_d["1"][1]*y["1"] + Maq_d["2"][1]*y["2"]
prob += lpSum([x[i][j] for j in Sabor if i == "G"]) <= Maq_d["1"][2]*y["1"] + Maq_d["2"][2]*y["2"]
#restriccion nuevo sabor de helado Vainilla con almendra
prob += lpSum([x[i][j]*A[i] for i in Tam for j in Sabor if j == "Valmendra"]) <= 50000*u
prob.solve()
prob.writeLP("exa.txt")
print("Estado:",LpStatus[prob.status])
print("FO",value(prob.objective))
for v in prob.variables():
print(v.name,":",v.value())
|
[
"67110680+ddelgadoc@users.noreply.github.com"
] |
67110680+ddelgadoc@users.noreply.github.com
|
b81d2a2890c220468b8f4005a86e661d4f8d77a7
|
2e39344422c926fb8332e7e61b290a0935dd3931
|
/plox/parser.py
|
a4f15623b0df30aee794b45d561831a2a327120e
|
[] |
no_license
|
ZibingZhang/interpreters
|
4c821ecc56db2cea58304bad345b702f83ec8cbb
|
b059e3424a6cb23da9994f1feae91787afe82199
|
refs/heads/master
| 2023-07-02T16:42:07.817827
| 2021-08-01T02:55:55
| 2021-08-01T02:55:55
| 317,454,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,119
|
py
|
from __future__ import annotations
from typing import TYPE_CHECKING
import expr as ex
import lox
import stmt as st
from tokens import TokenType
if TYPE_CHECKING:
from tokens import Token
from typing import List, Optional
class Parser:
def __init__(self, tokens: List[Token]) -> None:
self._tokens = tokens
self._current = 0
@property
def _is_at_end(self) -> bool:
return self._peek().type == TokenType.EOF
@property
def _previous(self) -> Token:
return self._tokens[self._current - 1]
def parse(self) -> List[st.Stmt]:
statements = []
while not self._is_at_end:
statement = self._declaration()
if statement is not None:
statements.append(statement)
return statements
def _declaration(self) -> Optional[st.Stmt]:
try:
if self._match(TokenType.CLASS):
return self._class_declaration()
if self._match(TokenType.FUN):
return self._function('function')
if self._match(TokenType.VAR):
return self._variable_declaration()
return self._statement()
except _ParseError:
self._synchronize()
def _class_declaration(self) -> st.Class:
name = self._consume(TokenType.IDENTIFIER, 'Expect class name.')
superclass = None
if self._match(TokenType.LESS):
self._consume(TokenType.IDENTIFIER, 'Expect superclass name.')
superclass = ex.Variable(self._previous)
self._consume(TokenType.LEFT_BRACE, "Expect '{' before class body.")
methods = []
while not self._check(TokenType.RIGHT_BRACE) and not self._is_at_end:
methods.append(self._function('method'))
self._consume(TokenType.RIGHT_BRACE, "Expect '}' after class body.")
return st.Class(name, superclass, methods)
def _variable_declaration(self) -> st.Var:
name = self._consume(TokenType.IDENTIFIER, 'Expect variable name.')
initializer = self._expression() if self._match(TokenType.EQUAL) else None
self._consume(TokenType.SEMICOLON, "Expect ';' after variable declaration.")
return st.Var(name, initializer)
def _function(self, kind: str) -> st.Function:
name = self._consume(TokenType.IDENTIFIER, f'Expect {kind} name.')
self._consume(TokenType.LEFT_PAREN, f"Expect '(' after {kind} name.")
parameters = []
if not self._check(TokenType.RIGHT_PAREN):
parameters.append(self._consume(TokenType.IDENTIFIER, 'Expect parameter name.'))
while self._match(TokenType.COMMA):
if len(parameters) >= 255:
self._error(self._peek(), "Can't have more that 255 parameters.")
self._consume(TokenType.RIGHT_PAREN, "Expect ')' after parameters.")
self._consume(TokenType.LEFT_BRACE, f"Expect '{{' before {kind} body.")
body = self._block()
return st.Function(name, parameters, body)
def _statement(self) -> st.Stmt:
if self._match(TokenType.BREAK):
return self._break_statement()
if self._match(TokenType.CONTINUE):
return self._continue_statement()
if self._match(TokenType.FOR):
return self._for_statement()
if self._match(TokenType.IF):
return self._if_statement()
if self._match(TokenType.RETURN):
return self._return_statement()
if self._match(TokenType.WHILE):
return self._while_statement()
if self._match(TokenType.LEFT_BRACE):
statements = self._block()
return st.Block(statements)
return self._expression_statement()
def _break_statement(self) -> st.Break:
keyword = self._previous
self._consume(TokenType.SEMICOLON, "Expect ';' after continue value.")
return st.Break(keyword)
def _continue_statement(self) -> st.Continue:
keyword = self._previous
self._consume(TokenType.SEMICOLON, "Expect ';' after continue value.")
return st.Continue(keyword)
def _expression_statement(self) -> st.Expression:
value = self._expression()
self._consume(TokenType.SEMICOLON, "Expect a ';' after expression.")
return st.Expression(value)
def _for_statement(self) -> st.While:
self._consume(TokenType.LEFT_PAREN, "Expect '(' after 'for'.")
initializer = None
if self._match(TokenType.SEMICOLON):
pass
elif self._match(TokenType.VAR):
initializer = self._variable_declaration()
else:
initializer = self._expression_statement()
condition = ex.Literal(True) if self._check(TokenType.SEMICOLON) else self._expression()
self._consume(TokenType.SEMICOLON, "Expect ';' after loop condition.")
increment = None if self._check(TokenType.RIGHT_PAREN) else self._expression()
self._consume(TokenType.RIGHT_PAREN, "Expect ')' after for clauses.")
body = self._statement()
if increment is not None:
body = st.Block([body, st.Expression(increment)])
body = st.While(condition, body)
if initializer is not None:
body = st.Block([initializer, body])
return body
def _if_statement(self) -> st.If:
self._consume(TokenType.LEFT_PAREN, "Expect '(' after 'if'.")
condition = self._expression()
self._consume(TokenType.RIGHT_PAREN, "Expect ')' after if condition.")
then_branch = self._statement()
else_branch = self._statement() if self._match(TokenType.ELSE) else None
return st.If(condition, then_branch, else_branch)
def _return_statement(self) -> st.Return:
keyword = self._previous
value = None if self._check(TokenType.SEMICOLON) else self._expression()
self._consume(TokenType.SEMICOLON, "Expect ';' after return value.")
return st.Return(keyword, value)
def _while_statement(self) -> st.While:
self._consume(TokenType.LEFT_PAREN, "Expect '(' after 'while'.")
condition = self._expression()
self._consume(TokenType.RIGHT_PAREN, "Expect ')' after condition.")
body = self._statement()
return st.While(condition, body)
def _block(self) -> List[st.Stmt]:
statements = []
while not self._check(TokenType.RIGHT_BRACE) and not self._is_at_end:
statements.append(self._declaration())
self._consume(TokenType.RIGHT_BRACE, "Expect '}' after block.")
return statements
def _expression(self) -> ex.Expr:
return self._sequence()
def _sequence(self) -> ex.Expr:
expr = self._assignment()
while self._match(TokenType.COMMA):
op = self._previous
right = self._assignment()
expr = ex.Binary(expr, op, right)
return expr
def _assignment(self) -> ex.Expr:
expr = self._ternary()
if self._match(TokenType.EQUAL):
equals = self._previous
value = self._assignment()
if isinstance(expr, ex.Variable):
name = expr.name
return ex.Assign(name, value)
if isinstance(expr, ex.Get):
return ex.Set(expr.object, expr.name, value)
self._error(equals, 'Invalid assignment target.')
return expr
def _ternary(self) -> ex.Expr:
expr = self._logical_or()
if self._match(TokenType.QUESTION):
op1 = self._previous
true_expr = self._ternary()
op2 = self._consume(TokenType.COLON, "Expect ':' following '?'.")
false_expr = self._ternary()
return ex.Ternary(expr, op1, true_expr, op2, false_expr)
return expr
def _logical_or(self) -> ex.Expr:
expr = self._logical_and()
while self._match(TokenType.OR):
op = self._previous
right = self._logical_and()
expr = ex.Logical(expr, op, right)
return expr
def _logical_and(self) -> ex.Expr:
expr = self._equality()
while self._match(TokenType.OR):
op = self._previous
right = self._equality()
expr = ex.Logical(expr, op, right)
return expr
def _equality(self) -> ex.Expr:
expr = self._comparison()
while self._match(TokenType.BANG_EQUAL, TokenType.EQUAL_EQUAL):
op = self._previous
right = self._comparison()
expr = ex.Binary(expr, op, right)
return expr
def _comparison(self) -> ex.Expr:
expr = self._term()
while self._match(TokenType.GREATER, TokenType.GREATER_EQUAL, TokenType.LESS, TokenType.LESS_EQUAL):
op = self._previous
right = self._term()
expr = ex.Binary(expr, op, right)
return expr
def _term(self) -> ex.Expr:
expr = self._factor()
while self._match(TokenType.MINUS, TokenType.PLUS):
op = self._previous
right = self._factor()
expr = ex.Binary(expr, op, right)
return expr
def _factor(self) -> ex.Expr:
expr = self._unary()
while self._match(TokenType.SLASH, TokenType.STAR):
op = self._previous
right = self._unary()
expr = ex.Binary(expr, op, right)
return expr
def _unary(self) -> ex.Expr:
if self._match(TokenType.BANG, TokenType.MINUS):
op = self._previous
right = self._unary()
return ex.Unary(op, right)
if self._match(TokenType.EQUAL):
op = self._previous
self._assignment()
raise self._error(op, "Nothing to assign to.")
if self._match(TokenType.BANG_EQUAL, TokenType.EQUAL_EQUAL):
op = self._previous
self._comparison()
raise self._error(op, 'Not a unary operator.')
if self._match(
TokenType.GREATER,
TokenType.GREATER_EQUAL,
TokenType.LESS,
TokenType.LESS_EQUAL
):
op = self._previous
self._term()
raise self._error(op, 'Not a unary operator.')
if self._match(TokenType.PLUS):
op = self._previous
self._factor()
raise self._error(op, "Unary '+' expressions are not supported.")
if self._match(TokenType.SLASH, TokenType.STAR):
op = self._previous
self._unary()
raise self._error(op, 'Not a unary operator.')
return self._call()
def _call(self):
expr = self._primary()
while True:
if self._match(TokenType.LEFT_PAREN):
expr = self._finish_call(expr)
elif self._match(TokenType.DOT):
name = self._consume(TokenType.IDENTIFIER, "Expect property name after '.'.")
expr = ex.Get(expr, name)
else:
break
return expr
def _primary(self) -> ex.Expr:
if self._match(TokenType.FALSE):
return ex.Literal(False)
if self._match(TokenType.TRUE):
return ex.Literal(True)
if self._match(TokenType.NIL):
return ex.Literal(None)
if self._match(TokenType.NUMBER, TokenType.STRING):
return ex.Literal(self._previous.literal)
if self._match(TokenType.SUPER):
keyword = self._previous
self._consume(TokenType.DOT, "Expect a '.' after 'super'.")
method = self._consume(TokenType.IDENTIFIER, 'Expect superclass method name.')
return ex.Super(keyword, method)
if self._match(TokenType.THIS):
return ex.This(self._previous)
if self._match(TokenType.IDENTIFIER):
return ex.Variable(self._previous)
if self._match(TokenType.LEFT_PAREN):
expr = self._expression()
self._consume(TokenType.RIGHT_PAREN, "Expect ')' after expression.")
return ex.Grouping(expr)
raise self._error(self._peek(), 'Expect expression.')
def _match(self, *args: TokenType) -> bool:
for token_type in args:
if self._check(token_type):
self._advance()
return True
return False
def _check(self, token_type: TokenType) -> bool:
if self._is_at_end:
return False
return self._peek().type == token_type
def _advance(self) -> Token:
if not self._is_at_end:
self._current += 1
return self._previous
def _peek(self) -> Token:
return self._tokens[self._current]
def _consume(self, token_type: TokenType, msg: str) -> Token:
if self._check(token_type):
return self._advance()
raise self._error(self._peek(), msg)
def _synchronize(self) -> None:
self._advance()
while not self._is_at_end:
if self._previous.type == TokenType.SEMICOLON:
return
if self._peek().type in {
TokenType.CLASS,
TokenType.FUN,
TokenType.VAR,
TokenType.FOR,
TokenType.IF,
TokenType.WHILE,
TokenType.RETURN
}:
return
self._advance()
def _finish_call(self, expr: ex.Expr) -> ex.Call:
arguments = []
if not self._check(TokenType.RIGHT_PAREN):
arguments.append(self._expression())
while self._match(TokenType.COMMA):
if len(arguments) >= 255:
self._error(self._peek(), "Can't have more than 255 arguments.")
arguments.append(self._expression())
paren = self._consume(TokenType.RIGHT_PAREN, "Expect ')' after arguments.")
return ex.Call(expr, paren, arguments)
@staticmethod
def _error(token: Token, msg: str):
lox.Lox.error_token(token, msg)
return _ParseError()
class _ParseError(RuntimeError):
pass
|
[
"zibing.zha@gmail.com"
] |
zibing.zha@gmail.com
|
73c8867f776b1cd578d7cf9c653f57c47bcb1a2f
|
5f2671c7f74494d53c07964d5425d6a95abd0a92
|
/Python/Practice/DjangoTest/mysite/myapp/forms.py
|
a35c0c105a86e59416b9e51b93e5fd40cdebbea5
|
[] |
no_license
|
apoorvsemwal/Scale_Up
|
365f1771df06af3e29e205ca7c5ead592030161a
|
df2bb9222847462fa46063fff6f73503cdb877cd
|
refs/heads/master
| 2020-04-14T22:31:10.147286
| 2020-01-24T20:55:44
| 2020-01-24T20:55:44
| 164,165,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 162
|
py
|
from django.forms import ModelForm
from .models import Post
class PostForm(ModelForm):
class Meta:
model = Post
fields = ["title","text"]
|
[
"apoorv.semwal20@gmail.com"
] |
apoorv.semwal20@gmail.com
|
16bdd891e034574e94de859f08013530b115bae1
|
18c024fca7237ad848ab815177d8e479e6ce758f
|
/2021-05-27_practic/task_8.py
|
f8e220602346a2618d25bb0fb9ba01bdeef0a04d
|
[] |
no_license
|
smakotin/DU
|
7dc5b6f5ca0545f8442b874f9e5619795eebad83
|
4167f8bc74f040a02b914c04b29297847cbc751a
|
refs/heads/master
| 2023-05-19T03:52:06.359879
| 2021-06-08T18:34:21
| 2021-06-08T18:34:21
| 369,992,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
# 8. * Написать программу выводящую разложение числа на простые множители
a = int(input('Введите число: '))
result = []
div = 2
while a > 1:
while a % div == 0:
result.append(div)
a /= div
div += 1
print(f'Простые множители этого числа: {result}')
|
[
"slamfromm@gmail.com"
] |
slamfromm@gmail.com
|
d46be8063d63e916dd972289c6dc22a0f13cd888
|
c0fdfa90a4338d1d986c0c37566f7a1a79da81ab
|
/LE.py
|
af2f40550df9c8d5d7fab693550ab30ecbf6a088
|
[
"MIT"
] |
permissive
|
dogfaraway/Sci_Calc
|
8e9f00c76c6d433d467da399859ce0c86eaf8a95
|
2ec16369130c0d8ed83ff6eb4e78093f090f9687
|
refs/heads/master
| 2022-12-29T02:30:35.765229
| 2020-10-19T07:13:14
| 2020-10-19T07:13:14
| 301,761,825
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,681
|
py
|
#-*-coding:utf-8-*-
'''
多变量非线性方程求解
'''
from sympy import *
import numpy as np
np.set_printoptions(suppress=True)
n = 20000#控制迭代次数
def Henon(x,y,n):
for i in range(n):
x1 = 1 - 1.4 * x ** 2 + y
y1 = 0.3 * x
x = x1
y = y1
return x,y
def Jacobian():
count=0
a = 0.123456789
b = 0.123456789
# 使用符号方式求解
x, y = symbols("x,y")
f_mat = Matrix([1 - 1.4 * x ** 2 + y, 0.3 * x])
# 求解雅各比矩阵
jacobi_mat = f_mat.jacobian([x, y])#带变量的雅各比矩阵形式是固定的
a,b=Henon(a,b,5001)#先迭代1000次,消除初始影响.以第1001次的值作为初始值
# 这里为获取初始雅各比矩阵,将第一次放置在循环外
result = jacobi_mat.subs({x: a, y: b}) # 将变量替换为当前迭代值,得到当前的雅各比矩阵(数字)
J = result # 得到初始的雅各比矩阵
a, b = Henon(a, b, 1) # 每次迭代一次获取当前的迭代值
while(count<n-1):
result = jacobi_mat.subs({x: a, y: b}) # 将变量替换为当前迭代值,得到当前的雅各比矩阵(数字)
J = J*result # 计算累积的雅各比矩阵
a, b = Henon(a, b, 1) # 每次迭代一次获取当前的迭代值
count=count+1
return J
def LE_calculate(J):
eig_dic = J.eigenvals()#传入一个累积的雅各比矩阵
eig_list = list(eig_dic)#求累积雅各比矩阵的特征值
eig_1 = eig_list[0]
eig_2 = eig_list[1]
LE1=N(ln(abs(eig_1))/n)
LE2=N(ln(abs(eig_2))/n)
print(LE1)
print(LE2)
if __name__ == '__main__':
J=Jacobian()
LE_calculate(J)
|
[
"dogfaraway@hotmail.com"
] |
dogfaraway@hotmail.com
|
c7e350c3f84a9ca602c401271313bc5761f70202
|
75bf394beeae3549ea4756624d32fb5435374369
|
/gol.10.py
|
219c147dcf9a2e64d3451721dd3530f7e52fdcc6
|
[] |
no_license
|
zefyrrr/game_of_life
|
6b228cd9e4571533f372f8575765d5cf0dd14687
|
db563d82e00647d2b2dfdaa26e9d4dc180e57e0c
|
refs/heads/master
| 2020-03-30T18:41:11.492999
| 2014-05-17T23:11:53
| 2014-05-17T23:11:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,882
|
py
|
import random
duplicate = 0
myList = []
for i in range(1000):
myList.append([random.randint(0, 100),random.randint(0, 100)], 'unknown')
orig_len = len(myList)
# Returns 1 if adjacent
# Returns -1 if duplicate
# Returns 0 if neither
def compare_adjacency(x,y):
if x == y: # Duplicate
return -1
if (y[0] >= x[0] - 1 and y[0] <= x[0] + 1) and (y[1] >= x[1] - 1 and y[1] <= x[1] + 1) :
return 1
return 0
length = len(myList)
h=0
while h < len(myList):
match = 0
i = 0
# for i in range((length -1) , 0, -1):
while i < len(myList): # make sure our index stays in bounds
if h == i: #skip
break # iterate to the next element
ret_val = compare_adjacency(myList[h],myList[i]) # retrieve list item comparison
if ret_val == -1: # Duplicate
# print "Duplicate", myList[h],myList[i], h, i
duplicate = duplicate + 1
del myList[i]
break
if ret_val == 1:
match = match + 1
# print "Match", myList[h], myList[i], match
# if ret_val == 0:
# print "No Match"
i = i+1
#I will live
if match > 1 and match < 4:
print "Cell #", h, "has", match, "matches, is happy and will survive"
#I will divide_and_multiply
# are there three cells around a dead cell
#I will die
if match < 2 :
print "Cell #", h, "has", match, "matches, is lonely and will die"
if match > 3:
print "Cell #", h, "has", match, "matches, is overcrowded and will die"
h = h + 1
print "original length =", orig_len, "final length =", len(myList)
print "Number of duplicates =", duplicate
#I will live
#for a in range(len(myList)):
# print myList[a][0],
# print myList[a][1]
#print compare_adjacency([1,2],[3,2])
|
[
"irshad@Irshad-Siddiqis-MacBook-Pro.local"
] |
irshad@Irshad-Siddiqis-MacBook-Pro.local
|
4c96587dbc43eb524c55647522428a121fd21151
|
bc441bb06b8948288f110af63feda4e798f30225
|
/architecture_view_sdk/api/business/get_deploy_info_pb2.py
|
bb8aab22fdc244863c6b04a8f91f3bc970ff0ff0
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 15,062
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: get_deploy_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='get_deploy_info.proto',
package='business',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x15get_deploy_info.proto\x12\x08\x62usiness\"-\n\x17GetAppDeployInfoRequest\x12\x12\n\nbusinessId\x18\x01 \x01(\t\"\xa6\x03\n\x18GetAppDeployInfoResponse\x12\x0c\n\x04page\x18\x01 \x01(\x05\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\r\n\x05total\x18\x03 \x01(\x05\x12\x35\n\x04list\x18\x04 \x03(\x0b\x32\'.business.GetAppDeployInfoResponse.List\x1a\xa2\x02\n\x04List\x12\r\n\x05\x61ppId\x18\x01 \x01(\t\x12\x0f\n\x07\x61ppName\x18\x02 \x01(\t\x12<\n\x05hosts\x18\x03 \x03(\x0b\x32-.business.GetAppDeployInfoResponse.List.Hosts\x12H\n\x0bmanualHosts\x18\x04 \x03(\x0b\x32\x33.business.GetAppDeployInfoResponse.List.ManualHosts\x1a\x35\n\x05Hosts\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\n\n\x02ip\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x1a;\n\x0bManualHosts\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\n\n\x02ip\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\"\x85\x01\n\x1fGetAppDeployInfoResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x30\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\".business.GetAppDeployInfoResponseb\x06proto3')
)
_GETAPPDEPLOYINFOREQUEST = _descriptor.Descriptor(
name='GetAppDeployInfoRequest',
full_name='business.GetAppDeployInfoRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='businessId', full_name='business.GetAppDeployInfoRequest.businessId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=35,
serialized_end=80,
)
_GETAPPDEPLOYINFORESPONSE_LIST_HOSTS = _descriptor.Descriptor(
name='Hosts',
full_name='business.GetAppDeployInfoResponse.List.Hosts',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='business.GetAppDeployInfoResponse.List.Hosts.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ip', full_name='business.GetAppDeployInfoResponse.List.Hosts.ip', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='business.GetAppDeployInfoResponse.List.Hosts.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=391,
serialized_end=444,
)
_GETAPPDEPLOYINFORESPONSE_LIST_MANUALHOSTS = _descriptor.Descriptor(
name='ManualHosts',
full_name='business.GetAppDeployInfoResponse.List.ManualHosts',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='business.GetAppDeployInfoResponse.List.ManualHosts.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ip', full_name='business.GetAppDeployInfoResponse.List.ManualHosts.ip', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='business.GetAppDeployInfoResponse.List.ManualHosts.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=446,
serialized_end=505,
)
_GETAPPDEPLOYINFORESPONSE_LIST = _descriptor.Descriptor(
name='List',
full_name='business.GetAppDeployInfoResponse.List',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='appId', full_name='business.GetAppDeployInfoResponse.List.appId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='appName', full_name='business.GetAppDeployInfoResponse.List.appName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hosts', full_name='business.GetAppDeployInfoResponse.List.hosts', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='manualHosts', full_name='business.GetAppDeployInfoResponse.List.manualHosts', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GETAPPDEPLOYINFORESPONSE_LIST_HOSTS, _GETAPPDEPLOYINFORESPONSE_LIST_MANUALHOSTS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=215,
serialized_end=505,
)
_GETAPPDEPLOYINFORESPONSE = _descriptor.Descriptor(
name='GetAppDeployInfoResponse',
full_name='business.GetAppDeployInfoResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='page', full_name='business.GetAppDeployInfoResponse.page', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='business.GetAppDeployInfoResponse.page_size', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total', full_name='business.GetAppDeployInfoResponse.total', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='list', full_name='business.GetAppDeployInfoResponse.list', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GETAPPDEPLOYINFORESPONSE_LIST, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=83,
serialized_end=505,
)
_GETAPPDEPLOYINFORESPONSEWRAPPER = _descriptor.Descriptor(
name='GetAppDeployInfoResponseWrapper',
full_name='business.GetAppDeployInfoResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='business.GetAppDeployInfoResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='business.GetAppDeployInfoResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='business.GetAppDeployInfoResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='business.GetAppDeployInfoResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=508,
serialized_end=641,
)
_GETAPPDEPLOYINFORESPONSE_LIST_HOSTS.containing_type = _GETAPPDEPLOYINFORESPONSE_LIST
_GETAPPDEPLOYINFORESPONSE_LIST_MANUALHOSTS.containing_type = _GETAPPDEPLOYINFORESPONSE_LIST
_GETAPPDEPLOYINFORESPONSE_LIST.fields_by_name['hosts'].message_type = _GETAPPDEPLOYINFORESPONSE_LIST_HOSTS
_GETAPPDEPLOYINFORESPONSE_LIST.fields_by_name['manualHosts'].message_type = _GETAPPDEPLOYINFORESPONSE_LIST_MANUALHOSTS
_GETAPPDEPLOYINFORESPONSE_LIST.containing_type = _GETAPPDEPLOYINFORESPONSE
_GETAPPDEPLOYINFORESPONSE.fields_by_name['list'].message_type = _GETAPPDEPLOYINFORESPONSE_LIST
_GETAPPDEPLOYINFORESPONSEWRAPPER.fields_by_name['data'].message_type = _GETAPPDEPLOYINFORESPONSE
DESCRIPTOR.message_types_by_name['GetAppDeployInfoRequest'] = _GETAPPDEPLOYINFOREQUEST
DESCRIPTOR.message_types_by_name['GetAppDeployInfoResponse'] = _GETAPPDEPLOYINFORESPONSE
DESCRIPTOR.message_types_by_name['GetAppDeployInfoResponseWrapper'] = _GETAPPDEPLOYINFORESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetAppDeployInfoRequest = _reflection.GeneratedProtocolMessageType('GetAppDeployInfoRequest', (_message.Message,), {
'DESCRIPTOR' : _GETAPPDEPLOYINFOREQUEST,
'__module__' : 'get_deploy_info_pb2'
# @@protoc_insertion_point(class_scope:business.GetAppDeployInfoRequest)
})
_sym_db.RegisterMessage(GetAppDeployInfoRequest)
GetAppDeployInfoResponse = _reflection.GeneratedProtocolMessageType('GetAppDeployInfoResponse', (_message.Message,), {
'List' : _reflection.GeneratedProtocolMessageType('List', (_message.Message,), {
'Hosts' : _reflection.GeneratedProtocolMessageType('Hosts', (_message.Message,), {
'DESCRIPTOR' : _GETAPPDEPLOYINFORESPONSE_LIST_HOSTS,
'__module__' : 'get_deploy_info_pb2'
# @@protoc_insertion_point(class_scope:business.GetAppDeployInfoResponse.List.Hosts)
})
,
'ManualHosts' : _reflection.GeneratedProtocolMessageType('ManualHosts', (_message.Message,), {
'DESCRIPTOR' : _GETAPPDEPLOYINFORESPONSE_LIST_MANUALHOSTS,
'__module__' : 'get_deploy_info_pb2'
# @@protoc_insertion_point(class_scope:business.GetAppDeployInfoResponse.List.ManualHosts)
})
,
'DESCRIPTOR' : _GETAPPDEPLOYINFORESPONSE_LIST,
'__module__' : 'get_deploy_info_pb2'
# @@protoc_insertion_point(class_scope:business.GetAppDeployInfoResponse.List)
})
,
'DESCRIPTOR' : _GETAPPDEPLOYINFORESPONSE,
'__module__' : 'get_deploy_info_pb2'
# @@protoc_insertion_point(class_scope:business.GetAppDeployInfoResponse)
})
_sym_db.RegisterMessage(GetAppDeployInfoResponse)
_sym_db.RegisterMessage(GetAppDeployInfoResponse.List)
_sym_db.RegisterMessage(GetAppDeployInfoResponse.List.Hosts)
_sym_db.RegisterMessage(GetAppDeployInfoResponse.List.ManualHosts)
GetAppDeployInfoResponseWrapper = _reflection.GeneratedProtocolMessageType('GetAppDeployInfoResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _GETAPPDEPLOYINFORESPONSEWRAPPER,
'__module__' : 'get_deploy_info_pb2'
# @@protoc_insertion_point(class_scope:business.GetAppDeployInfoResponseWrapper)
})
_sym_db.RegisterMessage(GetAppDeployInfoResponseWrapper)
# @@protoc_insertion_point(module_scope)
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
83ec2d0d64d698f3cd08db3d2e3730943a793560
|
edf546fbad7d9dc9c60504363c61c0db2a1490ad
|
/scratchml/classification/__init__.py
|
519c81ae1aa7ca05dc46e38b2e618f1fb14e0fac
|
[
"MIT"
] |
permissive
|
shreeviknesh/ScratchML
|
d552b4703612b7e134b728c323632eb138bd11e5
|
882190c0f5d79824bb3a184792daf4299c9ff518
|
refs/heads/master
| 2022-12-03T01:09:48.196544
| 2022-11-24T02:26:03
| 2022-11-24T02:26:03
| 255,577,981
| 2
| 1
|
MIT
| 2020-05-18T08:04:48
| 2020-04-14T10:25:11
|
Python
|
UTF-8
|
Python
| false
| false
| 92
|
py
|
from .LogisticRegression import LogisticRegression
from .SVM import SVM
from .KNN import KNN
|
[
"luckyboy1998@gmail.com"
] |
luckyboy1998@gmail.com
|
21a2f8056a1f220b65b0bfb89760d533eb960bea
|
8966c06c151d2c87d926c7b89eaa927ceca651f9
|
/solution/Routes_tests.py
|
12e3d8edbd96cd1bff4bcd6505f6604fb009e803
|
[] |
no_license
|
Rodriguevb/B-KUL-H01B6B-Methodiek_van_de_Informatica-Project201415
|
5dc25cff5b063cee63505fafb0f44cfb441ed62f
|
915b9562303dbef11d9db771dd3735932d473721
|
refs/heads/master
| 2021-01-10T21:25:08.977451
| 2015-05-02T11:54:59
| 2015-05-02T11:54:59
| 34,631,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,269
|
py
|
from Routes import *
from math import *
""" Helper function for tests """
def float_equals(x,y,threshold=0.00000001):
"Whether two floating point number are approximately equal, up to a threshold"
return abs(x-y) < threshold
print "TESTING ..."
print "initializing network"
# initialize network
roads=(("LB", "leuven", "brussel", 27.0, 120), ("BL", "brussel", "leuven", 30.0, 120), ("LA", "leuven", "antwerpen", 61.0, 120),
("AL","antwerpen", "leuven", 63.0, 120), ("BO", "brussel", "oostende", 110.0, 120),
("OA", "oostende", "antwerpen", 120.0, 120), ("AH", "antwerpen", "hasselt", 78.0, 120), ("HL", "hasselt", "leuven", 60.0, 120))
network = init_network(roads)
print "testing shortest path (Floyd)"
# shortest path - floyd algorithm
assert get_shortest_route_floyd(network, "leuven", "hasselt") == ['LA', 'AH']
assert get_shortest_route_floyd(network, "brussel", "hasselt") == ['BL', 'LA', 'AH']
# floyd with 1 excluding
assert get_shortest_route_floyd(network, "brussel", "hasselt", ["leuven"]) == ['BO', 'OA', 'AH']
# floyd with non-existing path
assert get_shortest_route_floyd(network, "hasselt", "antwerpen", ["leuven"]) == None
# testing with network with multiple roads between 2 cities
# Initializing
roads2=(("LB", "leuven", "brussel", 27.0, 120), ("BL", "brussel", "leuven", 30.0, 120), ("LA", "leuven", "antwerpen", 61.0, 120),
("AL","antwerpen", "leuven", 63.0, 120), ("BO", "brussel", "oostende", 110.0, 120), ("BO_2", "brussel", "oostende", 120.0, 120),
("OA", "oostende", "antwerpen", 120.0, 120), ("AH", "antwerpen", "hasselt", 78.0, 120), ("AH_2", "antwerpen", "hasselt", 60.0, 120),
("HL", "hasselt", "leuven", 60.0, 120))
network2 = init_network(roads2)
#testing
assert get_shortest_route_floyd(network2, "brussel", "hasselt") == ['BL', 'LA', 'AH_2']
print "testing quickest path (vias)"
# quickest path - vias algorithm
# no via
assert get_quickest_route_via(network, "oostende", "hasselt", []) == ['OA', 'AH']
# 1 via
assert get_quickest_route_via(network, "oostende", "hasselt", ["brussel"]) == ['OA', 'AL', 'LB', 'BL', 'LA', 'AH']
# via does not exist (no path is found)
assert get_quickest_route_via(network, "brussel", "hasselt", ["?"]) == None
# testing with network with multiple roads between two cities
assert get_quickest_route_via(network2, "oostende", "hasselt", ["brussel"]) == ['OA', 'AL', 'LB', 'BL', 'LA', 'AH_2']
print "testing route helper functions"
# route testing
route_leuven_hasselt = ['LA', 'AH']
route_oostende_hasselt = ['OA', 'AL', 'LB', 'BL', 'LA', 'AH']
assert get_length_of_route(network, route_leuven_hasselt) == 139
assert round(get_time_of_route(network, route_leuven_hasselt), 2) == 1.16
assert round(get_time_of_route(network, route_oostende_hasselt), 2)==3.16
assert get_cities_of_route(network, route_leuven_hasselt) == ["leuven", "antwerpen", "hasselt"]
assert get_cities_of_route(network, route_oostende_hasselt) == ['oostende', 'antwerpen', 'leuven', 'brussel', 'leuven', 'antwerpen', 'hasselt']
assert cities_occur_in_route(network,["leuven", "hasselt", "oostende"], route_oostende_hasselt)
assert not cities_occur_in_route(network,["leuven", "gent", "oostende"], route_oostende_hasselt)
assert not route_contains_loop(route_oostende_hasselt)
assert route_contains_loop(["BL", "LA", "AH", "HL", "LA", "AL"])
assert route_is_contained_in_other_route(["AB", "BC", "CD"],["ZA", "AX", "XA", "AB", "BY", "YW", "WY", "YB", "BC", "CD"])
assert route_is_contained_in_other_route(["AL", "LA"], route_oostende_hasselt)
assert not route_is_contained_in_other_route(["LA", "AL"], route_oostende_hasselt)
assert is_correct_route(network, route_oostende_hasselt)
assert not is_correct_route(network, ["AL", "LB","OA", "AH"])
print "testing road helper functions"
# road testing
assert get_start(network, "AH")=="antwerpen"
assert get_end(network, "AH")=="hasselt"
assert get_length(network, "AH")== 78.0
assert get_speed(network, "AH")== 120
assert get_time(network, "AH")== 0.65
print "testing city helper functions"
# city testing
assert get_all_roads_starting_from(network, "antwerpen")==["AL", "AH"] or get_all_roads_starting_from(network, "antwerpen")==["AH", "AL"]
assert get_road_to(network, "antwerpen", "hasselt")== "AH"
assert get_road_to(network2, "antwerpen", "hasselt")== "AH_2"
assert get_road_to(network, "brussel", "antwerpen") == None
print "extensive testing of small network"
#Testing the example network in the assignment (Figure 2)
roads=(("BL", "Brussel", "Leuven", 30.0, 120),
("LA", "Leuven", "Antwerpen", 61.0, 120),
("AB", "Antwerpen", "Brussel", 44.0,120))
network = init_network(roads)
# test get_all_roads_starting_from
assert "BL" in get_all_roads_starting_from(network,"Brussel")
assert "LA" in get_all_roads_starting_from(network,"Leuven")
assert "AB" in get_all_roads_starting_from(network,"Antwerpen")
assert len(get_all_roads_starting_from(network,"Brussel")) == 1
assert len(get_all_roads_starting_from(network,"Leuven")) == 1
assert len(get_all_roads_starting_from(network,"Antwerpen")) == 1
# test get_road_to
assert "BL" == get_road_to(network,"Brussel","Leuven")
assert "LA" == get_road_to(network,"Leuven","Antwerpen")
assert "AB" == get_road_to(network,"Antwerpen","Brussel")
assert None == get_road_to(network,"Brussel","Antwerpen")
assert None == get_road_to(network,"Brussel","Brussel")
route = ["BL","LA"]
# test get_length_of_route, get_time_of_route, get_cities_of_route
assert float_equals(91.0, get_length_of_route(network,route))
assert float_equals(91.0/120, get_time_of_route(network,route))
assert ["Brussel","Leuven","Antwerpen"] == get_cities_of_route(network,route)
# test cities_occur_in_route
assert cities_occur_in_route(network,["Brussel","Leuven","Antwerpen"],route)
assert cities_occur_in_route(network,["Leuven","Antwerpen"],route)
assert cities_occur_in_route(network,["Antwerpen","Leuven"],route)
assert cities_occur_in_route(network,("Antwerpen","Leuven"),route)
assert cities_occur_in_route(network,{"Antwerpen","Leuven"},route)
assert cities_occur_in_route(network,[],route)
# test route_contains_loop
assert not route_contains_loop(route)
assert not route_contains_loop(["BL","LA","AB"])
assert route_contains_loop(["BL","LA","AB","BL"])
# test route_is_contained_in_other_route
assert route_is_contained_in_other_route(["BL","LA"],["BL","LA"])
assert route_is_contained_in_other_route(["BL"],["BL","LA"])
assert route_is_contained_in_other_route(["LA"],["BL","LA"])
assert route_is_contained_in_other_route(["AB", "BC", "CD"],["ZA", "AX", "XA", "AB", "BY", "YW", "WY", "YB", "BC", "CD"])
assert not route_is_contained_in_other_route(["BL","LA"],["BL"])
assert not route_is_contained_in_other_route(["BL","LA"],["LA","BL"])
# test is_correct_route
assert is_correct_route(network,route)
assert is_correct_route(network,["BL"])
assert is_correct_route(network,["BL","LA","AB"])
assert not is_correct_route(network,["BL","AB"])
assert not is_correct_route(network,["LA","BL"])
assert not is_correct_route(network,["AB","LA","BL"])
# test get_start, get_end
assert "Brussel" == get_start(network,"BL")
assert "Antwerpen" == get_start(network,"AB")
assert "Leuven" == get_start(network,"LA")
assert "Leuven" == get_end(network,"BL")
assert "Brussel" == get_end(network,"AB")
assert "Antwerpen" == get_end(network,"LA")
# test get_length, get_speed, get_time
assert float_equals(30.0,get_length(network,"BL"))
assert float_equals(61.0,get_length(network,"LA"))
assert float_equals(44.0,get_length(network,"AB"))
assert 120 == get_speed(network,"BL")
assert 120 == get_speed(network,"LA")
assert 120 == get_speed(network,"AB")
assert float_equals(30.0/120,get_time(network,"BL"))
assert float_equals(61.0/120,get_time(network,"LA"))
assert float_equals(44.0/120,get_time(network,"AB"))
# test get_shortest_route_floyd
assert ["BL"] == get_shortest_route_floyd(network,"Brussel","Leuven")
assert ["AB"] == get_shortest_route_floyd(network,"Antwerpen","Brussel")
assert ["LA"] == get_shortest_route_floyd(network,"Leuven","Antwerpen")
assert ["LA"] == get_shortest_route_floyd(network,"Leuven","Antwerpen",["Brussel"])
assert ["BL","LA"] == get_shortest_route_floyd(network,"Brussel","Antwerpen")
assert ["AB","BL"] == get_shortest_route_floyd(network,"Antwerpen","Leuven")
assert ["LA","AB"] == get_shortest_route_floyd(network,"Leuven","Brussel")
assert ["LA","AB"] == get_shortest_route_floyd(network,"Leuven","Brussel",[])
# test get_quickest_route_via
assert ["BL"] == get_quickest_route_via(network,"Brussel","Leuven",[])
assert ["AB"] == get_quickest_route_via(network,"Antwerpen","Brussel",[])
assert ["LA"] == get_quickest_route_via(network,"Leuven","Antwerpen",[])
assert ["BL","LA"] == get_quickest_route_via(network,"Brussel","Antwerpen",[])
assert ["BL","LA"] == get_quickest_route_via(network,"Brussel","Antwerpen",["Leuven"])
assert ["AB","BL"] == get_quickest_route_via(network,"Antwerpen","Leuven",[])
assert ["AB","BL"] == get_quickest_route_via(network,"Antwerpen","Leuven",["Brussel"])
assert ["LA","AB"] == get_quickest_route_via(network,"Leuven","Brussel",[])
assert ["LA","AB"] == get_quickest_route_via(network,"Leuven","Brussel",["Antwerpen"])
print "TESTS SUCCEEDED !!!"
|
[
"rodriguevb@gmail.com"
] |
rodriguevb@gmail.com
|
fdeb9a7da4362e0cae6f29f5124a6e3724e7c036
|
1500eab2114141fcdd105eb1d87ec55e3cf39509
|
/booking/migrations/0001_initial.py
|
89d792f9a2bdebd01ca7349ef62665d9c77fabc1
|
[] |
no_license
|
PraffulS/udaanBoxOffice
|
113d21e57242147fcb0866bf899a3281772393a6
|
f3f00a853158320141b5c558da9dde9c54b5b162
|
refs/heads/master
| 2022-12-08T05:41:58.064479
| 2020-09-06T12:46:41
| 2020-09-06T12:46:41
| 293,275,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,070
|
py
|
# Generated by Django 2.1.2 on 2018-10-14 10:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='screen',
fields=[
('screenId', models.AutoField(primary_key=True, serialize=False)),
('screenName', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='seat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rowName', models.CharField(max_length=1)),
('seatNo', models.IntegerField()),
('status', models.SmallIntegerField(default=0)),
('isAisle', models.SmallIntegerField(default=0)),
('screenId', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='booking.screen')),
],
),
]
|
[
"praffulsolanki00@gmail.com"
] |
praffulsolanki00@gmail.com
|
2db6670e8f3365a22d069c29d45a36415a913dd7
|
93f47ba04fc18c4e537f0a48fe6232e2a89a4d30
|
/examples/adspygoogle/dfp/v201403/inventory_service/get_top_level_ad_units.py
|
d4b698181effba62d933f5282e24be01a852d354
|
[
"Apache-2.0"
] |
permissive
|
jasonshih/googleads-python-legacy-lib
|
c56dc52a1dab28b9de461fd5db0fcd6020b84a04
|
510fad41ecf986fe15258af64b90f99a96dc5548
|
refs/heads/master
| 2021-04-30T22:12:12.900275
| 2015-03-06T15:35:21
| 2015-03-06T15:35:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,398
|
py
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all child ad units of the effective root ad unit.
To create ad units, run create_ad_units.py
Tags: InventoryService.getAdUnitsByStatement
"""
__author__ = 'Nicholas Chen'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.dfp import DfpUtils
def main(client):
# Initialize appropriate service.
inventory_service = client.GetService('InventoryService', version='v201403')
network_service = client.GetService('NetworkService', version='v201403')
root_id = network_service.GetCurrentNetwork()[0]['effectiveRootAdUnitId']
# Create a statement to select the children of the effective root ad unit.
values = [{
'key': 'id',
'value': {
'xsi_type': 'TextValue',
'value': root_id
}
}]
query = 'WHERE parentId = :id'
statement = DfpUtils.FilterStatement(query, values)
# Get ad units by statement.
while True:
response = inventory_service.GetAdUnitsByStatement(
statement.ToStatement())[0]
ad_units = response.get('results')
if ad_units:
# Display results.
for ad_unit in ad_units:
print ('Ad unit with ID \'%s\' and name \'%s\' was found.'
% (ad_unit['id'], ad_unit['name']))
statement.IncreaseOffsetBy(DfpUtils.PAGE_LIMIT)
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client)
|
[
"emeralddragon88@gmail.com"
] |
emeralddragon88@gmail.com
|
2c764e0b4916bb57641d73e9a3508aa05e467e62
|
911fa448f645db3c14e87895608f8c74072b4a42
|
/AIRCANVAS/AIRCANVAS_final.py
|
010443075560d95da4d3c2c3815556abd860cc8e
|
[] |
no_license
|
Ayush-Ranjan/Air-Canvas
|
0cf70da9dd6fa8c7557a9c8f58a1b8b30e007c57
|
656f9b800a23ee94c9c98ed398c6aac93096f624
|
refs/heads/master
| 2023-02-12T11:41:49.199247
| 2021-01-08T18:02:35
| 2021-01-08T18:02:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,718
|
py
|
import cv2
import numpy as np
import copy
import math
import pygame
#from appscript import app
# Environment:
# OS : Mac OS EL Capitan
# python: 3.5
# opencv: 2.4.13
# parameters
cap_region_x_begin=0.5 # start point/total width
cap_region_y_end=0.8 # start point/total width
threshold = 60 # BINARY threshold
blurValue = 41 # GaussianBlur parameter
bgSubThreshold = 50
learningRate = 0
pygame.init()
screen=pygame.display.set_mode([1000,1000])
running=True
screen.fill((255,255,255))
click=False
pos_prev=None
# variables
isBgCaptured = 0 # bool, whether the background captured
##clock = pygame.time.Clock()
realdraw=screen.copy()
canvas = screen.copy()
smoothDraw=screen.copy()
orig=(500,500)
drawpermit=False
scale=False
hand_hist = None
traverse_point = []
total_rectangle = 9
hand_rect_one_x = None
hand_rect_one_y = None
hand_rect_two_x = None
hand_rect_two_y = None
is_hand_hist_created = False
counterforsmoothing=0
penImage=pygame.image.load('pen.png')
pygame.display.set_icon(penImage)
black=(0,0,0)
violet=(58,0,84)
green=(0,255,0)
red=(255,0,0)
white=(255,255,255)
color=black
thick=7
distprev=(0,0)
def nothing(x):
pass
def skindet(frame):
global hand_rect_one_x, hand_rect_one_y
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
roi = np.zeros([90, 10, 3], dtype=hsv_frame.dtype)
for i in range(total_rectangle):
roi[i * 10: i * 10 + 10, 0: 10] = hsv_frame[hand_rect_one_x[i]:hand_rect_one_x[i] + 10,hand_rect_one_y[i]:hand_rect_one_y[i] + 10]
hand_hist = cv2.calcHist([roi], [0, 1], None, [180, 256], [0, 180, 0, 256])
return cv2.normalize(hand_hist, hand_hist, 0, 255, cv2.NORM_MINMAX)
def draw_rect(frame):
rows, cols, _ = frame.shape
tempo=frame
global total_rectangle, hand_rect_one_x, hand_rect_one_y, hand_rect_two_x, hand_rect_two_y
hand_rect_one_x = np.array([6 * rows / 20, 6 * rows / 20, 6 * rows / 20, 9 * rows / 20, 9 * rows / 20, 9 * rows / 20, 12 * rows / 20,12 * rows / 20, 12 * rows / 20], dtype=np.uint32)
hand_rect_one_y = np.array([9 * cols / 20, 10 * cols / 20, 11 * cols / 20, 9 * cols / 20, 10 * cols / 20, 11 * cols / 20, 9 * cols / 20,10 * cols / 20, 11 * cols / 20], dtype=np.uint32)
hand_rect_two_x = hand_rect_one_x + 10
hand_rect_two_y = hand_rect_one_y + 10
for i in range(total_rectangle):
cv2.rectangle(tempo, (hand_rect_one_y[i], hand_rect_one_x[i]),(hand_rect_two_y[i], hand_rect_two_x[i]),(0, 255, 0), 1)
return tempo
def hist_masking(frame, hist):
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (31, 31))
cv2.filter2D(dst, -1, disc, dst)
ret, thresh = cv2.threshold(dst, 150, 255, cv2.THRESH_BINARY)
# thresh = cv2.dilate(thresh, None, iterations=5)
thresh = cv2.merge((thresh, thresh, thresh))
return cv2.bitwise_and(frame, thresh)
def removeBG(frame):
global hand_rect_one_x,hand_rect_one_y,maskp
#newimg=hist_masking(frame,hand_hist)
fgmask = bgModel.apply(frame,learningRate=-1)
kernel2 = np.ones((3, 3), np.uint8)
fgmask = cv2.erode(fgmask, kernel2, iterations=1)
res = cv2.bitwise_and(frame, frame, mask=fgmask)
hist_mask_image = hist_masking(res, hand_hist)
kernel = np.ones((16, 16), np.uint8)
gray=hist_mask_image
gray = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]
#thresh = cv2.erode(thresh, kernel2, iterations=2)
thresh = cv2.dilate(thresh, kernel, iterations=2)
##cv2.imshow('onlyhist',hist_mask_image)
##cv2.imshow('onlybg',res)
return thresh
# Camera
camera = cv2.VideoCapture(0)
camera.set(10,200)
cv2.namedWindow('thickness_brush')
cv2.createTrackbar('trh1', 'thickness_brush', thick, 100, nothing)
while camera.isOpened() and running:
ret, frame = camera.read()
if(is_hand_hist_created==False):
show_frame = draw_rect(frame)
show_frame=cv2.flip(show_frame,1)
thick = cv2.getTrackbarPos('trh1', 'thickness_brush')
frame = cv2.bilateralFilter(frame, 5, 50, 100) # smoothing filter
frame = cv2.flip(frame, 1) # flip the frame horizontally
xlimit=frame.shape[1]-cap_region_x_begin * frame.shape[1]
ylimit=cap_region_y_end * frame.shape[0]
cv2.rectangle(frame, (int(cap_region_x_begin * frame.shape[1]), 0),(frame.shape[1], int(ylimit)), (255, 0, 0), 2)
# cv2.imshow('original', frame)
for event in pygame.event.get():
if event.type==pygame.QUIT:
running=False
# Main operation
if is_hand_hist_created == True: # this part wont run until background captured
# clip the ROI
frame=frame[0:int(cap_region_y_end * frame.shape[0]),int(cap_region_x_begin * frame.shape[1]):frame.shape[1]]
imager=removeBG(frame)
##imager=hist_mask_image#[0:int(cap_region_y_end * frame.shape[0]),int(cap_region_x_begin * frame.shape[1]):frame.shape[1]]
cv2.imshow('masker', imager)
#img = imager[0:int(cap_region_y_end * frame.shape[0]),int(cap_region_x_begin * frame.shape[1]):frame.shape[1]]
img = cv2.cvtColor(imager,cv2.COLOR_HSV2BGR)
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#img=imager
newx=1000
newy=1000
newsize = (newx, newy)
img = cv2.resize(img,newsize)
frame = cv2.resize(frame,newsize)
# get the coutours
thresh1 = copy.deepcopy(img)
contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
length = len(contours)
drawing = np.zeros(img.shape, np.uint8)
minDist = []
#print(length)
if length > 0:
res = max(contours, key=cv2.contourArea)
#res=0
if pos_prev is not None:
for i in range(length): # find the biggest contour (according to area)
temp = contours[i]
area = cv2.contourArea(temp)
M = cv2.moments(temp)
top= tuple(temp[temp[:, :, 1].argmin()][0])
cX=top[0]
cY=top[1]
#print(pos_prev,"*******",top)
dx = abs(pos_prev[0] - cX)
dy = abs(pos_prev[1] - cY)
#D = np.sqrt(dx*dx+dy*dy)
#print(dx,"---",dy)
if(dx < 50 or dy<50):
cv2.drawContours(frame, temp, 0, (255, 255, 0), 2)
minDist.append(temp)
if len(minDist)>0:
res= max(minDist, key=cv2.contourArea)
else:
pos_prev=(500,500)
if len(minDist)>0:
hull = cv2.convexHull(res)
m=10000
pos=0
for i in range(hull.shape[0]):
if hull[i][0][1]<m:
m=hull[i][0][1]
pos=i
pos_next=tuple(hull[pos][0])
if pos_prev is not None:
if(abs(pos_prev[0]-pos_next[0])<=100 and abs(pos_prev[1]-pos_next[1])<=100):
end=(orig[0]-int((pos_prev[0]-pos_next[0])*1000/newx),orig[1]-int((pos_prev[1]-pos_next[1])*1000/newy))
if(drawpermit):
pygame.draw.line(realdraw,color,orig,end,thick)
screen.blit(realdraw, (0, 0))
if(counterforsmoothing==0):
distprev=orig
elif(counterforsmoothing==3):
pygame.draw.line(smoothDraw,color,distprev,end,thick)
realdraw.blit(smoothDraw, (0, 0))
counterforsmoothing= -1
counterforsmoothing= counterforsmoothing+1
#print(distprev)
else:
counterforsmoothing= 0
orig=end
pos_prev=pos_next
pygame.draw.rect(screen, (128,128,128), (orig[0],orig[1],15,15))
else:
end=(orig[0]-int((pos_prev[0]-pos_next[0])*1000/newx),orig[1]-int((pos_prev[1]-pos_next[1])*1000/newy))
orig=(int(pos_next[0]*1000/newx),int(pos_next[1]*1000/newy))
if(drawpermit):
pygame.draw.line(realdraw,(0,0,0),end,end,thick)
pos_prev=pos_next
cv2.circle(frame, pos_prev, 3, (255,0,0), 40)
cv2.imshow('mask',img)
# Keyboard OP
if(is_hand_hist_created):
frame=cv2.resize(frame,(500,500))
cv2.imshow('photo',frame)
else:
cv2.imshow('photo',show_frame)
k = cv2.waitKey(1)
if k == 27: # press ESC to exit
camera.release()
cv2.destroyAllWindows()
break
elif k == ord('b'): # press 'b' to capture the background
color=black
elif k == ord('v'):
color=violet
elif k == ord('r'):
color= red
elif k == ord('g'):
color=green
elif k == ord('d'):
if(drawpermit):
drawpermit=False
else:
drawpermit=True
print("Start")
elif k == ord('e'):
color=white
elif k == ord('z'):
is_hand_hist_created = True
hand_hist = skindet(show_frame)
bgModel = cv2.createBackgroundSubtractorMOG2()
isBgCaptured = 1
print( '!!!Background Captured!!!')
pygame.display.flip()
|
[
"noreply@github.com"
] |
Ayush-Ranjan.noreply@github.com
|
efbc28ab8a8b660f34073bfae14828e2728f4255
|
29ffe40a8d0b1cb132da970601973651d7d19991
|
/Working Code.py
|
52008433bdb855307d31b63161d6b9f0fd0fc566
|
[] |
no_license
|
bbbilibili/Continuous-Attribute-Reduction
|
66e7256fc68b22b56c30e45aaaf8b5d5eb4a14a8
|
a02783c7884f58dfa5dce6b0c86d5d748ee8621b
|
refs/heads/master
| 2021-09-22T21:50:09.506144
| 2018-09-17T13:37:51
| 2018-09-17T13:37:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,460
|
py
|
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sklearn import preprocessing
import matplotlib.pyplot as plt
class Wine:
def __init__(self):
self.df = pd.read_csv("winequality-red.csv")
self.init_columns = self.df.columns
self.labels=self.df["quality"]
self.thresholds=[]
self.cluster_centers=[]
self.normalised = pd.DataFrame()
self.decision_entropy = []
self.TDE = 0 # total decision entropy
def normalise(self): # to normalise the data frame to such that each column has values between 0 and 1
x = self.df.values
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)# uses formula (xi - min)/(max - min)
self.df = pd.DataFrame(x_scaled)
self.df[11]=self.labels
def radius(self,column_number):
a=np.array(self.df[column_number])
b=np.mean(a)
radius = np.max(a)-b# mean is either distance to maximum or minimum, from the centroid
return radius
def assign_clusters(self):
columns = self.df.columns
for i in columns:
threshold = self.radius(i)*0.45
col=np.resize(self.df[i].values,(len(self.df),1)) # the column in a numpy format
k=2
while k<=10 :
kmeans = KMeans(n_clusters=k).fit(col)
cc=kmeans.cluster_centers_
count = 0 # number of points that are outliers
for row in xrange(len(self.df)):
x=col[row][0]
num_clusters = 0
for j in cc: # to check if the entry in the data frame lies in one cluster at least
distance = abs(j-x)
if distance<threshold:
num_clusters = 1
if num_clusters == 0:
count = count + 1
if count == 0: # if only 0 outliers are there stick to the number of clusters generated
break
else :
k = k + 1
continue
k = min(10,k)
kmeans = KMeans(n_clusters=k).fit(col)
self.df[i] = kmeans.predict(col)+1
self.thresholds.append(threshold)
self.cluster_centers.append(kmeans.cluster_centers_)
def reassign_clusters(self,col_number,threshold):#i is column number
i=col_number
col=np.resize(self.normalised[i].values,(len(self.df),1))
k=2
while k<=10 :
kmeans = KMeans(n_clusters=k).fit(col)
cc=kmeans.cluster_centers_
count = 0 # number of points that are outliers
for row in xrange(len(self.df)):
x=col[row][0]
num_clusters = 0
for j in cc: # to check if the entry in the data frame lies in one cluster at least
distance = abs(j-x)
if distance<threshold:
num_clusters = 1
if num_clusters == 0:
count = count + 1
if count == 0: # if only 3 outliers or less are there stick to the number of clusters generated
break
else :
k = k + 1
continue
k = min(10,k)
kmeans = KMeans(n_clusters=k).fit(col)
self.df[i] = kmeans.predict(col)+1
self.thresholds[col_number]=threshold
self.cluster_centers[col_number]=kmeans.cluster_centers_
def plot_graphs(self):
columns = self.df.columns
for i in columns:
col=np.resize(self.df[i].values,(len(self.df),1))
threshold = np.arange(0.01,self.radius(i),0.01)
resulting_k = []
for j in threshold:
num_excluded = 1
k=2
while num_excluded > 0:
kmeans = KMeans(n_clusters=k).fit(col)
cc=kmeans.cluster_centers_
num_excluded = 0
for row in xrange(len(self.df)):
x=col[row][0]
num_clusters = 0
for z in cc: # to check if the entry in the data frame lies in one cluster at least
distance = abs(z-x)
if distance<j:
num_clusters = 1
if num_clusters == 0:
num_excluded = num_excluded + 1
k = k+1
resulting_k.append(k)
filename="plot"+str(i)+".png"
plot=plt.figure()
plt.figure(figsize=(5,5))
plt.ylim(0,100)
plt.xlim(0,self.radius(i))
plt.title("column "+str(i))
plt.xlabel("lambda")
plt.ylabel("k")
plt.plot(threshold,np.array(resulting_k),color="black")
plt.savefig(filename)
def read_file(self, filename):
names= range(0,12)
self.normalised = pd.read_csv(filename,names=names)
self.normalised.drop(self.normalised.index[0],inplace = True)
self.normalised[11]=self.labels
def total_decision_entropy(self,attr=-1): # number of equivalence classes
if attr==-1:
df = self.df
else :
df = self.df.drop(attr,1)
dic = {}
for i in range(len(df)):
a=tuple( df.iloc[i][:-1])
b=list( df.iloc[i][-1:])
if not a in dic :
dic[a]=[]
dic[a].append(i)
else:
dic[a].append(i)
z=[]
for i in dic:
x=dic[i]
if len(x)==1:
continue
a=x[0]
val = self.df.iloc[a][11]
flag = 1
for j in x:
temp = self.df.iloc[j][11]
if temp != val :
flag = 0
break
if flag==0 :
z.append(i)
for i in z:
dic.pop(i)
if attr==-1:
self.TDE=len(dic)
return
else:
return len(dic)
def calc_significance_measures(self):
l = len(self.df.columns)-1
self.decision_entropy=[]
self.significances=[]
for i in xrange(l):
sig = self.total_decision_entropy(i)
self.decision_entropy.append(sig)
self.significances.append((self.TDE-sig)/1600.0)
return
def plot_graphs2(self):
temp=self.normalised.copy()
self.df=temp
columns = self.df.columns
qmean=np.mean(self.df[11])
for i in columns:
col=np.resize(self.df[i].values,(len(self.df),1))
threshold = np.arange(0.05,0.5,0.05)
resulting_k = []
for j in threshold:
num_excluded = 1
k=2
while num_excluded > 0 and k<200:
kmeans = KMeans(n_clusters=k).fit(col)
cc=kmeans.cluster_centers_
num_excluded = 0
print k
for row in xrange(len(self.df)):
x=col[row][0]
num_clusters = 0
for z in cc: # to check if the entry in the data frame lies in one cluster at least
distance = np.sqrt(((qmean-self.df.iloc[row][11])**2)+(x-z)**2)
if distance<j:
num_clusters = 1
if num_clusters == 0:
num_excluded = num_excluded + 1
k = k+1
resulting_k.append(k)
filename="s_plot"+str(i)+".png"
plot=plt.figure()
plt.figure(figsize=(5,5))
plt.ylim(0,300)
plt.xlim(0,0.8)
plt.title("column "+str(i))
plt.xlabel("lambda")
plt.ylabel("k")
plt.plot(threshold,np.array(resulting_k),color="black")
plt.savefig(filename)
def assign_clusters2(self):
columns = self.df.columns
for i in columns:
threshold = 0.01
col=np.resize(self.df[i].values,(len(self.df),1)) # the column in a numpy format
k=2
while k<=50 :
kmeans = KMeans(n_clusters=k).fit(col)
cc=kmeans.cluster_centers_
count = 0 # number of points that are outliers
for row in xrange(len(self.df)):
x=col[row][0]
num_clusters = 0
for j in cc: # to check if the entry in the data frame lies in one cluster at least
distance = abs(j-x)
if distance<threshold:
num_clusters = 1
if num_clusters == 0:
count = count + 1
if count == 0: # if only 0 outliers are there stick to the number of clusters generated
break
else :
k = k + 1
continue
k = min(50,k)
kmeans = KMeans(n_clusters=k).fit(col)
self.df[i] = kmeans.predict(col)+1
self.thresholds.append(threshold)
self.cluster_centers.append(kmeans.cluster_centers_)
def main():
wine = Wine()
wine.normalise()
#wine.plot_graphs()
df = pd.DataFrame(wine.df)
df.to_csv("NormalisedBeforeCompatibility.csv")
wine.read_file("NormalisedBeforeCompatibility.csv")
wine.assign_clusters()
wine.df[11]=wine.labels
df = pd.DataFrame(wine.df)
df.to_csv("ResultBeforeReassigning.csv")
dd=pd.DataFrame()
dd['thresholds'] = wine.thresholds
dd['cluster_centers'] = wine.cluster_centers
dd.to_csv("threshold_clusterCentersBefore.csv")
reassign_array = [0,2,5,6,7,8,9,10]
thresholds = [0.26,0.42,0.45,0.3,0.36,0.28,0.28,0.24]
for x,y in zip(reassign_array,thresholds):
wine.reassign_clusters(x,y)
wine.df[11]=wine.labels
df=pd.DataFrame(wine.df)
df.to_csv("ResultAfterReassigning.csv")
dd=pd.DataFrame()
dd['thresholds'] = wine.thresholds
dd['cluster_centers'] = wine.cluster_centers
dd.to_csv("threshold_clusterCentersAfter.csv")
wine.total_decision_entropy()
wine.calc_significance_measures()
print "The total decision entropy without compatibility considerations is ", wine.TDE
print "The decision entropies of each column without compatibility considerations are "
print (wine.decision_entropy)
print "The significances without compatibility considerations are "
print (wine.significances)
sig_before = wine.significances
a=wine.normalised.copy()
k=a[11]*a[11]
for i in a.columns[:-1]:
a[i]=a[i]+k
wine.df=a
wine.normalise()
df = pd.DataFrame(wine.df)
df.to_csv("NormalisedAfterCompatibility.csv")
wine.assign_clusters2()
wine.df[11]=wine.labels
df=pd.DataFrame(wine.df)
df.to_csv("ResultAfterCompatibility.csv")
dd=pd.DataFrame()
dd['thresholds'] = wine.thresholds
dd['cluster_centers'] = wine.cluster_centers
dd.to_csv("threshold_clusterCentersAfterCompatibility.csv")
wine.total_decision_entropy()
wine.calc_significance_measures()
print "The total decision entropy after compatibility considerations is ", wine.TDE
print "The decision entropies of each column after compatibility considerations are "
print (wine.decision_entropy)
print "The significances after compatibility considerations are "
print (wine.significances)
sig_after = wine.significances
print "BEFORE vs AFTER"
print (sig_before)
print (sig_after)
if __name__ == '__main__':
main()
|
[
"bhabesh@marax.in"
] |
bhabesh@marax.in
|
e864669195a62b761c60d70c9bca59aea2d89030
|
e7ee60f24d3cc4ae9d5e525de586c7e313e4e727
|
/SquareFinder2/main.py
|
f1cfd53393e1a97c16b3e66cbdaebf346d7387b6
|
[] |
no_license
|
alda-dhif17/SquareFinder
|
78ce570ab793147dd922e25af43ec2519caf6080
|
5e5048ae74bd54555146809ce4965fc9162e5c7b
|
refs/heads/master
| 2020-09-15T09:49:13.886597
| 2019-12-13T15:18:46
| 2019-12-13T15:18:46
| 223,414,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,683
|
py
|
import sys
from threading import Thread
SPACING = 5
class Point(object):
def __init__(self, x, y, n):
self.x = x
self.y = y
self.n = n
self.xx = [0, 0]
self.xy = [0, 0]
def discover_x(self, a, d, points):
for x in range(self.x+d, -1 if d < 0 else len(a[self.y]), d):
if a[self.y][x] != self.n:
break
self.xx[0 if d < 0 else 1] += 1
def discover_y(self, a, d, points):
for y in range(self.y+d, -1 if d < 0 else len(a), d):
if a[y][self.x] != self.n:
break
self.xy[0 if d < 0 else 1] += 1
def discover(self, a, points):
t = []
for i in [-1,1]:
s = Thread(target=self.discover_x, args=(a, i, points,))
s.start()
t.append(s)
s = Thread(target=self.discover_y, args=(a, i, points,))
s.start()
t.append(s)
for s in t:
s.join()
def __str__(self):
return 'Point({:d}, {:d}, {:d}: [{}], [{}])'.format(self.x, self.y, self.n,
','.join([str(n) for n in self.xx]),
','.join([str(n) for n in self.xy]))
def gen_points(a):
ps = []
for y in range(0, len(a), SPACING):
for x in range(0, len(a[y]), SPACING):
ps.append(Point(x, y, a[y][x]))
return ps
def find_point(ps, x, y):
return list(filter(lambda p: p.x == x and p.y == y, ps))[0]
def step(a, x, y, ox, oy, n):
x -= 1
y -= 1
for tx in range(ox, x+1):
if a[oy][tx] != n or a[y][tx] != n:
return False
# print('(%d, %d) | (%d, %d)' % (tx, oy, tx, y))
# print('='*32)
for ty in range(oy, y+1):
if a[ty][ox] != n or a[ty][x] != n:
return False
# print('(%d, %d) | (%d, %d)' % (ox, ty, x, ty))
return True
def find_square(a, ox, oy):
x, y = ox, oy
n = a[y][x]
l = 0
while a[y][x] == n:
x += 1
y += 1
l += 1
print(ox, oy, l)
print('='*32)
while l > 0 and not step(a, x, y, ox, oy, l):
print(x, y, l)
x -= 1
y -= 1
l -= 1
return l
def build_square(a, p):
return find_square(a, p.x, p.y)
def evolve_square(a, x, y, s):
maxi = dict(x=x, y=y, s=s)
visited_points = []
t = []
def move_x(x, y, td):
x += td
if (x, y) in visited_points:
return
s = find_square(a, x, y)
if s >= maxi['s']:
maxi['x'] = x
maxi['y'] = y
maxi['s'] = s
else:
return
visited_points.append((x, y))
tt = []
for td in (-1, 1):
s = Thread(target=move_x, args=(x, y, td,))
s.start()
tt.append(s)
s = Thread(target=move_y, args=(x, y, td,))
s.start()
tt.append(s)
for s in tt:
s.join()
def move_y(x, y, d):
y += d
if (x, y) in visited_points:
return
s = find_square(a, x, y)
if s >= maxi['s']:
maxi['x'] = x
maxi['y'] = y
maxi['s'] = s
else:
return
visited_points.append((x,y))
tt = []
for d in (-1, 1):
s = Thread(target=move_x, args=(x, y, d,))
s.start()
tt.append(s)
s = Thread(target=move_y, args=(x, y, d,))
s.start()
tt.append(s)
for s in tt:
s.join()
for d in (-1, 1):
s = Thread(target=move_x, args=(x, y, d,))
s.start()
t.append(s)
s = Thread(target=move_y, args=(x, y, d,))
s.start()
t.append(s)
for s in t:
s.join()
return maxi['x'], maxi['y'], maxi['s']
def main():
a = []
with open('input.txt', 'r') as f:
a = [[int(c) for c in l.split(' ')] for l in f.read().replace('\r\n', '\n').split('\n')]
points = gen_points(a)
for p in points:
p.discover(a, points)
points.sort(key=lambda p: min(sum(p.xx), sum(p.xy)), reverse=True)
s = build_square(a, points[0])
print('Base-square: P(%d, %d) - Side: %d' % (points[0].x, points[0].y, s))
# x, y, s = evolve_square(a, points[0].x, points[0].y, s)
# print('Biggest-square: P(%d, %d) - Side: %d' % (x, y, s))
if __name__ == '__main__':
main()
|
[
"monschein.matthias@gmail.com"
] |
monschein.matthias@gmail.com
|
e7feb22d7bffaffcae9f0b1c5136da1fe347eec7
|
d7ab436f96bddc22e31380dbc82f9aee4ed8a5b4
|
/helpers.py
|
12916b46bfdbf8ec696ee71bf4ec1eecb5eea671
|
[] |
no_license
|
micahwilliams23/kujua.github.io
|
cf428ad86fe467f903c2f64f7264d674d4cda664
|
d153024c530a4747a8b6105ebbc2742368b243b0
|
refs/heads/master
| 2020-12-08T06:34:20.090955
| 2020-01-10T01:18:45
| 2020-01-10T01:18:45
| 232,914,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,454
|
py
|
import os
import csv
from cs50 import SQL
from datetime import datetime
from flask import Flask, flash, jsonify, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
def resultsTable(searchQuery):
# get data from database
db = SQL("sqlite:///scrape/food.db")
products = db.execute("SELECT * FROM cereal WHERE product_name LIKE :sq", sq="%"+searchQuery+"%")
rowcount = len(products)
# create empty list to hold code for table
table = open("scrape/kellogg/table.txt", "w")
table.write("<div class='row row-cols-xl-4 row-cols-lg-3 row-cols-md-2 explore-row'>")
for row in range(rowcount):
# make table elements
table.write(
f"<div class='col' id='explore-result-box'><div class='explore-table-element'><div>"+
# add product image
"<img class='explore-product-image' src='"+products[row]["product_image_link"]+"'>"+
# add product name
"<text class='explore-product-name'>"+products[row]["product_name"]+"</text>"+
"</div></div></div>")
table.write("</div>")
table.close()
# prepare text to send to template
table_txt = open("scrape/kellogg/table.txt", "r")
table = table_txt.read()
table_txt.close()
return {"table":table, "rowcount":rowcount}
|
[
"micahwilliams@college.harvard.edu"
] |
micahwilliams@college.harvard.edu
|
f97dca8f5e0fb7a8da13c6b18b3448f486aad28b
|
f96f9cfebb81b42b38e25b62345e5e16c476fbaf
|
/model/labyrinth.py
|
04c1dbecb4d4ab9e3799e5e9069f446d8f83babe
|
[] |
no_license
|
Jerome-LorD/macP3var
|
bd0e6f9509f45e7179428c8df6d53b3af50a83fa
|
4be6ce2fa846814a9978949e1c7feea1970f8456
|
refs/heads/master
| 2023-01-11T23:38:59.343653
| 2020-11-12T17:34:59
| 2020-11-12T17:34:59
| 306,336,504
| 0
| 1
| null | 2022-01-25T01:11:32
| 2020-10-22T12:53:20
|
Python
|
UTF-8
|
Python
| false
| false
| 3,766
|
py
|
#! /usr/bin/python
"""labyrinth constructs and updates the game."""
import random
from pathlib import Path
from typing import List, Tuple
from settings import (
WALL_CHAR,
ROAD_CHAR,
START_CHAR,
FINISH_CHAR,
POSSIBLE_ITEMS_CHAR,
QUIT_APP,
)
class Item:
"""Keeps the state of attributes."""
def __init__(self, name: str, coords: tuple):
"""Init."""
self.name: str = name
self.coords: tuple = coords
class Labyrinth:
"""
Create the labyrinth structure.
- randomize the items,
- randomize start position,
- take items if player position on it,
- update the game.
"""
def __init__(self, filename: Path, player):
"""Init Labyrinth."""
self.level_txt: Path = filename
self.player = player
self.run: bool = True
self.walls: List[Tuple[int, int]] = []
self.roads: List[Tuple[int, int]] = []
self.possible_starts: List[Tuple[int, int]] = []
self.finish: Tuple[()] = ()
self.possible_items: List[Tuple[int, int]] = []
self.start: Tuple[()] = ()
self.items_names: List[str] = ["tube", "needle", "ether"]
self.items: List[Item] = []
self.run_states: List[str] = ["running", "process_quit", "quit"]
self.run_state: int = 0
self.load_structure()
self.randomize_items()
self.randomize_start()
self.set_player_position_on_start(self.start)
def load_structure(self):
"""Load file structure and create lists of coordinates."""
with open(self.level_txt) as level:
for pos_y, line in enumerate(level):
for pos_x, char in enumerate(line):
if char == WALL_CHAR:
self.walls.append((pos_x, pos_y))
elif char == ROAD_CHAR:
self.roads.append((pos_x, pos_y))
elif char == START_CHAR:
self.possible_starts.append((pos_x, pos_y))
self.roads.append((pos_x, pos_y))
elif char == FINISH_CHAR:
self.finish = (pos_x, pos_y)
self.roads.append((pos_x, pos_y))
elif char == POSSIBLE_ITEMS_CHAR:
self.possible_items.append((pos_x, pos_y))
self.roads.append((pos_x, pos_y))
def randomize_start(self):
"""Randomize start coordinates."""
if not self.start:
self.start = random.choice(self.possible_starts)
def set_player_position_on_start(self, start_pos):
"""Set player position on start randomized coordinate."""
self.player.pos = start_pos
def randomize_items(self):
"""Randomize items coordinates."""
for name in self.items_names:
position = random.choice(self.possible_items)
self.possible_items.remove(position)
item = Item(name, position)
self.items.append(item)
def find_item(self, player_position: tuple):
"""Remove the item if the player position is equal to an item position."""
for item in self.items:
if item.coords == player_position:
self.items.remove(item)
self.player.bag.append(item)
def update(self, control: str):
"""Update player position, items and quit the game."""
self.player.move(control, self.roads)
self.find_item(self.player.pos)
if self.player.pos == self.finish:
self.run_state += 1
state = self.run_states[self.run_state]
if control == QUIT_APP:
self.run = False
if state == "quit":
self.run = False
|
[
"jerome-divry@laposte.net"
] |
jerome-divry@laposte.net
|
66b7d03ad0d4fdfe1d74e051129b3bbe054270ea
|
fdbaf65ee80da06370c4148075148976f7ccc5d3
|
/trees/red_black.py
|
c52f9b336343e4d0014c6489cdf658fc6cd36d4f
|
[] |
no_license
|
lharrison1224/interview-prep
|
27bd58a089bd4c535078456327a6f7193db08868
|
8bd211e73dff9c1aab2071005f5851df4d85c61e
|
refs/heads/master
| 2020-03-31T15:40:47.630647
| 2018-10-25T04:59:23
| 2018-10-25T04:59:23
| 152,346,641
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,410
|
py
|
'''
This module contains source code for the implementation of
red-black trees.
'''
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
self.color = "RED"
def main():
root = rb_insert(None, 5)
root = rb_insert(root, 7)
root = rb_insert(root, 3)
root = rb_insert(root, 4)
inorder_print(root)
def rb_insert(root, val):
def rb_insert_helper(root, val):
if root is None:
return Node(val)
if root.val == val:
raise ValueError('Attempted to insert duplicate value into BST')
if val < root.val:
root.left = rb_insert_helper(root.left, val)
elif val > root.val:
root.right = rb_insert_helper(root.right, val)
return root
def left_rotate(x):
pass
def right_rotate(x):
pass
def rb_fixup(root, val):
# if the current node was the root
if root.val == val:
root.color = "BLACK"
return root
# need to check the "uncle" of the node
parents = []
tmp = root
while tmp.val != val:
parents.insert(0, tmp)
if val < tmp.val:
tmp = tmp.left
else:
tmp = tmp.right
# now the parents list is a stack where the first element is the
# parent of the node, the second is the grandparent
# parent can never be None if we have made it this far
parent = parents.pop(0)
if parent.color == "BLACK":
# if the color of the parent is black we haven't violated any properties
return root
# grandparent cannot be None either because the prior case will catch
grandparent = parents.pop(0)
uncle = grandparent.left if grandparent.left is not parent else grandparent.right
if uncle.color is "RED":
parent.color = "BLACK"
uncle.color = "BLACK"
grandparent.color = "RED"
return rb_fixup(root, grandparent.val)
else:
pass
root_after_insert = rb_insert_helper(root, val)
return rb_fixup(root_after_insert, val)
def inorder_print(root):
if root is None:
return
inorder_print(root.left)
print(root.val, root.color)
inorder_print(root.right)
if __name__ == '__main__':
main()
|
[
"lkh6yb@mail.missouri.edu"
] |
lkh6yb@mail.missouri.edu
|
bbdd13e737d269954a424c88cc5b08e966ca4d15
|
8cbea6ef2534bbc89839c65f1db363958cc5e4b3
|
/about/migrations/0009_auto_20230120_1456.py
|
81652bf53907a90b090ba11fb131c90005f58ce2
|
[
"MIT"
] |
permissive
|
IATI/IATI-Standard-Website
|
4ca3d21d8a68470bfa1e55710175357bb7b9878f
|
4cf7be72b6b3d0c46dcadcc9d9904b471215ea81
|
refs/heads/master
| 2023-09-04T03:48:08.366656
| 2023-08-16T17:03:58
| 2023-08-16T17:03:58
| 124,079,425
| 4
| 8
|
MIT
| 2023-09-08T11:57:46
| 2018-03-06T13:05:08
|
HTML
|
UTF-8
|
Python
| false
| false
| 48,991
|
py
|
# Generated by Django 3.2.4 on 2023-01-20 14:56
from django.db import migrations
import home.models
import wagtail.blocks
import wagtail.documents.blocks
import wagtail.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('about', '0008_auto_20220804_1557'),
]
operations = [
migrations.AlterField(
model_name='aboutpage',
name='content_editor',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='aboutpage',
name='content_editor_en',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='aboutpage',
name='content_editor_es',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='aboutpage',
name='content_editor_fr',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='aboutpage',
name='content_editor_pt',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='aboutsubpage',
name='content_editor',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='aboutsubpage',
name='content_editor_en',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='aboutsubpage',
name='content_editor_es',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='aboutsubpage',
name='content_editor_fr',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='aboutsubpage',
name='content_editor_pt',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='casestudypage',
name='content_editor',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='casestudypage',
name='content_editor_en',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='casestudypage',
name='content_editor_es',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='casestudypage',
name='content_editor_fr',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='casestudypage',
name='content_editor_pt',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='historypage',
name='content_editor',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='historypage',
name='content_editor_en',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='historypage',
name='content_editor_es',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='historypage',
name='content_editor_fr',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='historypage',
name='content_editor_pt',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='historypage',
name='timeline_editor',
field=wagtail.fields.StreamField([('event_block_editor', wagtail.blocks.StructBlock([('heading', wagtail.blocks.CharBlock(max_length=100, required=False)), ('description', wagtail.blocks.TextBlock(required=False))]))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='historypage',
name='timeline_editor_en',
field=wagtail.fields.StreamField([('event_block_editor', wagtail.blocks.StructBlock([('heading', wagtail.blocks.CharBlock(max_length=100, required=False)), ('description', wagtail.blocks.TextBlock(required=False))]))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='historypage',
name='timeline_editor_es',
field=wagtail.fields.StreamField([('event_block_editor', wagtail.blocks.StructBlock([('heading', wagtail.blocks.CharBlock(max_length=100, required=False)), ('description', wagtail.blocks.TextBlock(required=False))]))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='historypage',
name='timeline_editor_fr',
field=wagtail.fields.StreamField([('event_block_editor', wagtail.blocks.StructBlock([('heading', wagtail.blocks.CharBlock(max_length=100, required=False)), ('description', wagtail.blocks.TextBlock(required=False))]))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='historypage',
name='timeline_editor_pt',
field=wagtail.fields.StreamField([('event_block_editor', wagtail.blocks.StructBlock([('heading', wagtail.blocks.CharBlock(max_length=100, required=False)), ('description', wagtail.blocks.TextBlock(required=False))]))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='peoplepage',
name='content_editor',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='peoplepage',
name='content_editor_en',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='peoplepage',
name='content_editor_es',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='peoplepage',
name='content_editor_fr',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='peoplepage',
name='content_editor_pt',
field=wagtail.fields.StreamField([('h2', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h3', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('h4', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('intro', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('image_figure', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', home.models.ImageAlignmentChoiceBlock()), ('caption', wagtail.blocks.RichTextBlock(required=False))], icon='image', label='Image figure')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('aligned_html', wagtail.blocks.StructBlock([('html', wagtail.blocks.RawHTMLBlock()), ('alignment', home.models.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), ('document_box', wagtail.blocks.StreamBlock([('document_box_heading', wagtail.blocks.CharBlock(form_classname='title', help_text='Only one heading per box.', icon='title', required=False)), ('document', wagtail.documents.blocks.DocumentChooserBlock(icon='doc-full-inverse', required=False))], icon='doc-full-inverse')), ('anchor_point', wagtail.blocks.CharBlock(help_text='Custom anchor points are expected to precede other content.', icon='order-down')), ('fast_youtube_embed', wagtail.blocks.URLBlock(icon='code', label='Fast YouTube Embed'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='peoplepage',
name='profile_content_editor',
field=wagtail.fields.StreamField([('section_heading', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('paragraph', wagtail.blocks.CharBlock(icon='pilcrow')), ('rich_paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('profile_editor', wagtail.blocks.StructBlock([('name', wagtail.blocks.CharBlock(max_length=200, required=False)), ('profile_picture', wagtail.images.blocks.ImageChooserBlock(icon='image', label='Profile picture', required=False)), ('organisation_logo', wagtail.images.blocks.ImageChooserBlock(icon='image', label='Organisation logo', required=False)), ('organisation_name', wagtail.blocks.CharBlock(max_length=200, required=False)), ('IATI_role', wagtail.blocks.CharBlock(max_length=200, required=False)), ('external_role', wagtail.blocks.CharBlock(max_length=200, required=False)), ('description', wagtail.blocks.TextBlock(required=False)), ('IATI_constituency', wagtail.blocks.CharBlock(max_length=200, required=False))], icon='image'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='peoplepage',
name='profile_content_editor_en',
field=wagtail.fields.StreamField([('section_heading', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('paragraph', wagtail.blocks.CharBlock(icon='pilcrow')), ('rich_paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('profile_editor', wagtail.blocks.StructBlock([('name', wagtail.blocks.CharBlock(max_length=200, required=False)), ('profile_picture', wagtail.images.blocks.ImageChooserBlock(icon='image', label='Profile picture', required=False)), ('organisation_logo', wagtail.images.blocks.ImageChooserBlock(icon='image', label='Organisation logo', required=False)), ('organisation_name', wagtail.blocks.CharBlock(max_length=200, required=False)), ('IATI_role', wagtail.blocks.CharBlock(max_length=200, required=False)), ('external_role', wagtail.blocks.CharBlock(max_length=200, required=False)), ('description', wagtail.blocks.TextBlock(required=False)), ('IATI_constituency', wagtail.blocks.CharBlock(max_length=200, required=False))], icon='image'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='peoplepage',
name='profile_content_editor_es',
field=wagtail.fields.StreamField([('section_heading', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('paragraph', wagtail.blocks.CharBlock(icon='pilcrow')), ('rich_paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('profile_editor', wagtail.blocks.StructBlock([('name', wagtail.blocks.CharBlock(max_length=200, required=False)), ('profile_picture', wagtail.images.blocks.ImageChooserBlock(icon='image', label='Profile picture', required=False)), ('organisation_logo', wagtail.images.blocks.ImageChooserBlock(icon='image', label='Organisation logo', required=False)), ('organisation_name', wagtail.blocks.CharBlock(max_length=200, required=False)), ('IATI_role', wagtail.blocks.CharBlock(max_length=200, required=False)), ('external_role', wagtail.blocks.CharBlock(max_length=200, required=False)), ('description', wagtail.blocks.TextBlock(required=False)), ('IATI_constituency', wagtail.blocks.CharBlock(max_length=200, required=False))], icon='image'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='peoplepage',
name='profile_content_editor_fr',
field=wagtail.fields.StreamField([('section_heading', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('paragraph', wagtail.blocks.CharBlock(icon='pilcrow')), ('rich_paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('profile_editor', wagtail.blocks.StructBlock([('name', wagtail.blocks.CharBlock(max_length=200, required=False)), ('profile_picture', wagtail.images.blocks.ImageChooserBlock(icon='image', label='Profile picture', required=False)), ('organisation_logo', wagtail.images.blocks.ImageChooserBlock(icon='image', label='Organisation logo', required=False)), ('organisation_name', wagtail.blocks.CharBlock(max_length=200, required=False)), ('IATI_role', wagtail.blocks.CharBlock(max_length=200, required=False)), ('external_role', wagtail.blocks.CharBlock(max_length=200, required=False)), ('description', wagtail.blocks.TextBlock(required=False)), ('IATI_constituency', wagtail.blocks.CharBlock(max_length=200, required=False))], icon='image'))], blank=True, null=True, use_json_field=True),
),
migrations.AlterField(
model_name='peoplepage',
name='profile_content_editor_pt',
field=wagtail.fields.StreamField([('section_heading', wagtail.blocks.CharBlock(form_classname='title', icon='title')), ('paragraph', wagtail.blocks.CharBlock(icon='pilcrow')), ('rich_paragraph', wagtail.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.blocks.StructBlock([('quote', wagtail.blocks.TextBlock('quote title'))])), ('profile_editor', wagtail.blocks.StructBlock([('name', wagtail.blocks.CharBlock(max_length=200, required=False)), ('profile_picture', wagtail.images.blocks.ImageChooserBlock(icon='image', label='Profile picture', required=False)), ('organisation_logo', wagtail.images.blocks.ImageChooserBlock(icon='image', label='Organisation logo', required=False)), ('organisation_name', wagtail.blocks.CharBlock(max_length=200, required=False)), ('IATI_role', wagtail.blocks.CharBlock(max_length=200, required=False)), ('external_role', wagtail.blocks.CharBlock(max_length=200, required=False)), ('description', wagtail.blocks.TextBlock(required=False)), ('IATI_constituency', wagtail.blocks.CharBlock(max_length=200, required=False))], icon='image'))], blank=True, null=True, use_json_field=True),
),
]
|
[
"kaliithatmau@gmail.com"
] |
kaliithatmau@gmail.com
|
6fc587d3afcd1c2e58c0f61a7ae2985f7a3a5f07
|
c03fd40b2ce926d4bcdf696d5f21b774624a8316
|
/python/advanced_python_regex.py
|
762d84f44f6b5307e849a0b8c86df6cf4bb0583c
|
[] |
no_license
|
aliandra/dsp
|
0c249d3aec4b03d1d6c7cf247007d7c29bf537fe
|
5f6ff4026f7d8de025fa50ad08404743b6f0e31a
|
refs/heads/master
| 2020-06-14T12:09:04.019857
| 2017-01-08T05:33:51
| 2017-01-08T05:33:51
| 75,026,800
| 0
| 0
| null | 2016-11-29T00:31:38
| 2016-11-29T00:31:37
| null |
UTF-8
|
Python
| false
| false
| 1,420
|
py
|
import pandas as pd
import re
def read_data():
"""
Reads in and cleans faculty.csv
"""
df = pd.read_csv('faculty.csv')
df.columns = df.columns.str.strip()
df.degree = df.degree.str.replace('.', '')
df.degree = df.degree.str.strip()
return df
def freq_dict(x):
"""
creates dictionary that maps key to frequency
"""
d = dict()
for item in x:
d[item] = d.get(item, 0) + 1
return d
def print_dict(d):
for key, value in d.items():
print key, '=', value
# Q1. Find how many different degrees there are, and their frequencies
faculty = read_data()
degrees = []
for item in faculty.degree:
if item != '0':
degrees.extend(item.split())
degree_dict = freq_dict(degrees)
print 'There are %d different degrees:' % len(degree_dict)
print_dict(degree_dict)
# Q2. Find how many different titles there are, and their frequencies:
titles = [re.sub(' (of|is) .*', '', x) for x in faculty.title]
titles_dict = freq_dict(titles)
print 'There are %d different titles: ' % len(titles_dict)
print_dict(titles_dict)
# Q3. Search for email addresses and put them in a list.
emails = list(faculty.email)
print '\n'.join(emails)
# Q4. Find how many different email domains there are
domains = [re.sub('.*@', '', x) for x in emails]
domains = set(domains)
print 'There are %d different domains:' % len(domains)
print '\n'.join(domains)
|
[
"aphelan10@gmail.com"
] |
aphelan10@gmail.com
|
3c5e3277a06b699f342f879089865f6fb4021409
|
8c77b8d3a710a2177217479b04366295343eb734
|
/config.py
|
863aabfc4c4074ee552d83289a12efa00d72cfff
|
[] |
no_license
|
Modulus/MessageApplication
|
05c1711f30298d42ae241ba45051c7d85b31352b
|
9b3f1170ff67394a92a5123a056b5af4636c5e82
|
refs/heads/master
| 2021-01-21T12:40:51.035375
| 2014-11-11T12:57:03
| 2014-11-11T12:57:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
__author__ = 'johska'
class ProdConfig(object):
MONGODB_SETTINGS = {'db': 'MessageApplication', 'host': '127.0.0.1'}
class TestConfig(object):
MONGODB_SETTINGS = {'db': 'MessageApplicationTestBase', 'host': '127.0.0.1'}
|
[
"thejohnskauge@gmail.com"
] |
thejohnskauge@gmail.com
|
6ca404e6ed23427b2ad7a3bd8160782cea06f66b
|
14bf5d09c14140d8e2a2da17284832350f51bff4
|
/src/level.py
|
9f3903479324b2a9a2967ad8cbc9c5f4a4b9f175
|
[
"MIT"
] |
permissive
|
rubenwo/PythonGame
|
feac43d124469ba6e415bdbbac88727f1b9cb7fc
|
ce9c877906373de4a2abd619ab1bd90b7cf8f1f2
|
refs/heads/master
| 2022-07-21T11:22:28.357388
| 2020-05-23T11:04:47
| 2020-05-23T11:04:47
| 263,591,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
class Level(object):
def __init__(self, file_source: object):
pass
def draw(self):
pass
def update(self):
pass
|
[
"rwoldhui@avans.nl"
] |
rwoldhui@avans.nl
|
cef8a96cc1a76b374d6247ba764bfe045f85508b
|
a9cfa392b546d762ab945ebe8b3375e273f67c0e
|
/hackerrank/easy/insertionsort2.py
|
89592c8621f136d8f72c8ae6a1cf81165037a6e0
|
[] |
no_license
|
seandewar/challenge-solutions
|
041d6c678d07c751497b4f8ac9ae0c3a2fefa85e
|
ac6119427aef61d63f28c443c1cedaae8a38cf28
|
refs/heads/master
| 2023-08-15T02:58:08.654360
| 2023-08-04T22:07:12
| 2023-08-04T22:07:22
| 214,286,640
| 3
| 0
| null | 2023-02-04T11:40:28
| 2019-10-10T21:12:31
|
C++
|
UTF-8
|
Python
| false
| false
| 470
|
py
|
#!/usr/bin/env python3
# https://www.hackerrank.com/challenges/insertionsort2
def insertion_sort(arr):
if len(arr) > 1:
for i in range(1, len(arr)):
k = arr[i]
j = i
while j > 0 and arr[j - 1] > k:
arr[j] = arr[j - 1]
j -= 1
arr[j] = k
print(' '.join(map(str, arr)))
size = int(input())
arr = [int(arr_item) for arr_item in input().split(' ')]
insertion_sort(arr)
|
[
"6256228+seandewar@users.noreply.github.com"
] |
6256228+seandewar@users.noreply.github.com
|
62de7e54ed1b6d5bf53a602ab8f2e2f5fe578154
|
f176056bc541501366bf73e53bf99082480d31f1
|
/AUMachineLearning2017/handin2/src/cnn_model_solved.py
|
4b0d35866a329f184092293aade69a21704279fd
|
[] |
no_license
|
paaskus/AU-projects
|
56050775100600ea88afeb49a8ff10e248e96e75
|
afc693a2a0a9bc2d1bbbd0c1fa8966622c38f4df
|
refs/heads/master
| 2020-04-03T23:17:32.853464
| 2018-10-31T22:01:18
| 2018-10-31T22:01:18
| 155,625,943
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,546
|
py
|
import os
import tensorflow as tf
import numpy as np
from model import Model as TfModel
class Config(object):
"""Holds model hyperparams and data information.
The config class is used to store various hyperparameters and dataset
information parameters. Model objects are passed a Config() object at
instantiation.
"""
width = 28
height = 28
n_features = width * height
n_classes = 10
## Architecture configuration
# Each conv layer is a convolution using h x w convolution, c, channels, and there are k of them. Stored in tuple (h, w, c, k)
# The input is grayscale so the first layer has only one channel. For rgb color images there would be 3 for example.
conv_layers = [(5, 5, 1, 32), (5, 5, 32, 64)]
pool_sizes = [2, 2] # only use square pools i.e. t x t
# compute the output size of the convolutional layers i.e. how many values do we get back after the two steps of convolution and pooling.
conv_output_size = int(0)
### YOUR CODE HERE
tensor_dimensions = 4
conv_output_size = 7 * 7 * 64
### END CODE
hidden_size = 1024
dropout = 0.5 #
weight_decay = 1e-4
batch_size = 32
n_epochs = 5
lr = 0.001
def __init__(self, name_suffix=None, **kwargs):
## overwrite values and set paths
for key, value in kwargs.items():
setattr(self, key, value)
self.name = 'cnn_h{0}'.format(self.hidden_size)
if name_suffix is not None:
self.name = '{0}_{1}'.format(self.name, name_suffix)
weights_path = os.path.join(os.getcwd(), 'model_weights')
if not os.path.exists(weights_path):
os.makedirs(weights_path)
self.weights_path = weights_path
self.weights_file = os.path.join(self.weights_path, "{0}.weight".format(self.name))
class ConvolutionalModel(TfModel):
def add_placeholders(self):
"""Generates placeholder variables to represent the input tensors
These placeholders are used as inputs by the rest of the model building and will be fed
data during training. Note that when "None" is in a placeholder's shape, it's flexible
(so we can use different batch sizes without rebuilding the model).
Adds following nodes to the computational graph
input_placeholder: Input placeholder tensor of shape (None, n_features), type tf.float32
labels_placeholder: Labels placeholder tensor of shape (None, ), type tf.int32
dropout_placeholder: Dropout value placeholder (scalar), type tf.float32
weight_decay_placeholder: Weight Decay Value (scalar), type tf.float32
Add these placeholders to self as the instance variables
self.input_placeholder
self.labels_placeholder
self.dropout_placeholder
self.weight_decay_placeholder
(Don't change the variable names)
"""
### YOUR CODE HERE
self.input_placeholder = tf.placeholder(tf.float32, shape=[None, self.config.n_features])
self.labels_placeholder = tf.placeholder(tf.int32, shape=[None,])
self.dropout_placeholder = tf.placeholder(tf.float32)
self.weight_decay_placeholder = tf.placeholder(tf.float32)
### END CODE
def create_feed_dict(self, inputs_batch, labels_batch=None, weight_decay = 0, dropout=1):
# print("Length of inputs_batch: {0}".format(len(inputs_batch)))
"""Creates the feed_dict for the neural net.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Hint: The keys for the feed_dict should be a subset of the placeholder
tensors created in add_placeholders.
Hint: When an argument is None, don't add it to the feed_dict.
It is the same as you did for nn_model
Args:
inputs_batch: A batch of input data.
labels_batch: A batch of label data.
dropout: The dropout rate.
weight_decay: the lambda weight decay scale parameter
Returns:
feed_dict: dict, mapping from placeholders to values.
"""
### YOUR CODE HERE
feed_dict = {self.input_placeholder: inputs_batch, self.dropout_placeholder: dropout, self.weight_decay_placeholder: weight_decay}
# TODO: In principle, they could all be none. Consider making a check here for each of them!
if not labels_batch is None:
# print("Length of labels_batch: {0}".format(len(labels_batch)))
# if len(labels_batch) < 32:
# print("Not enough labels case")
# labels_batch = np.zeros(32)
feed_dict[self.labels_placeholder] = labels_batch
### END CODE
return feed_dict
def add_prediction_op(self):
"""Adds 2 layer convolution, 1-hidden-layer CNN:
l1 = max_pool(Relu(conv(x, C1)))
l2 = max_pool(Relu(conv(l1, C2)))
f = flatten(c2) - make into [-1, self.config.conv_output_size] shape
h = Relu(fW + b3) - hidden layer
h_drop = Dropout(h, dropout_rate) - use dropout
pred = h_dropU + b4 - compute output layer
Note that we are not applying a softmax to pred. The softmax will instead be done in
the add_loss_op function, which improves efficiency because we can use
tf.nn.softmax_cross_entropy_with_logits. Also it saves us some code.
tf.reshape, and tf.nn.conv2d, tf.nn.max_pool should be vital for implementing the convolution layers.
conv2d: tf.nn.conv2d
filter is [filter_height, filter_width, in_channels, out_channels] and is configured in config.conv_layers
set padding='SAME' to keep the input size unchaged
Strides, we match the convolution filter to all positions in the input thus set strides to [1, 1, 1, 1] accordingly.
max_pool: tf.nn.max_pool
ksize height and width are defined in config.pool_sizes (we use quadratic poolings)
strides must be the same as ksize to tile the max pool filter non-overlapping
So strides and ksize should be 1, pool_size_height, pool_size_width, 1
set padding='SAME' to keep the input size unchaged
Use tf.contrib.xavier_initializer to initialize Variablers C1, C2, W, U
you can initialize bias b1, b2, b3, b4 with zeros
Hint: Here are the dimensions of the various variables you will need to create
C1: (first convolution) # conv_layers[0]
b1: (number of convolutions in first conv. layer, )
C2: (second convolutional layer)
b2: (number of convolutions in second conv. layer, )
W: (conv_output_size, hidden_layer)
b3: (hidden_size,)
U: (hidden_size, n_classes)
b4: (n_classes,)
Hint: Note that tf.nn.dropout takes the keep probability (1 - p_drop) as an argument.
The keep probability should be set to the value of self.dropout_placeholder
Add these placeholders to self as the instance variables (need them for weigth decay)
self.W
self.U
Returns:
pred: tf.Tensor of shape (batch_size, n_classes)
"""
x = self.input_placeholder
x_image = tf.reshape(x, [-1, 28, 28, 1]) # (batchsize) inputs of 28 x 28 and 1 channel
xavier_init = tf.contrib.layers.xavier_initializer()
### YOUR CODE HERE
#Initialize variable
C1 = tf.Variable(xavier_init(self.config.conv_layers[0]))
C2 = tf.Variable(xavier_init(self.config.conv_layers[1]))
b1 = tf.zeros(32)
b2 = tf.zeros(64)
#Do convolution
#Calculate l1
convL1 = tf.nn.conv2d(input=x_image, filter=C1, strides=[1, 1, 1, 1], padding="SAME")
reluConvL1 = tf.nn.relu(convL1 + b1)
l1 = tf.nn.max_pool(value=reluConvL1, ksize=[1, self.config.pool_sizes[0], self.config.pool_sizes[1], 1], strides=[1, self.config.pool_sizes[0], self.config.pool_sizes[1], 1], padding="SAME")
#Calculate l2
convL2 = tf.nn.conv2d(input=l1, filter=C2, strides=[1, 1, 1, 1], padding="SAME")
reluConvL2 = tf.nn.relu(convL2 + b2)
l2 = tf.nn.max_pool(value=reluConvL2, ksize=[1, self.config.pool_sizes[0], self.config.pool_sizes[1], 1], strides=[1, self.config.pool_sizes[0], self.config.pool_sizes[1], 1], padding="SAME")
# NB! This might be faulty
f = tf.reshape(l2, [-1, self.config.conv_output_size])
#Initialize remaining variables
Wshape = (self.config.conv_output_size, self.config.hidden_size)
self.W = tf.Variable(xavier_init(Wshape))
Ushape = (self.config.hidden_size, self.config.n_classes)
self.U = tf.Variable(xavier_init(Ushape))
b3 = tf.zeros(self.config.hidden_size)
b4 = tf.zeros(self.config.n_classes)
#Do hidden layer work
# h = Relu(xW + b1) - hidden layer
features = tf.matmul(f, self.W) + b3
h = tf.nn.relu(features)
# h_drop = Dropout(h, dropout_rate) - use dropout
h_drop = tf.nn.dropout(h, self.dropout_placeholder)
# pred = h_dropU + b2 - output layer
pred = tf.matmul(h_drop, self.U) + b4
### END CODE
return pred
def add_loss_op(self, pred):
"""Adds Ops for the loss function to the computational graph.
In this case we are using cross entropy loss.
The loss should be averaged over all examples in the current minibatch.
Hint: You can use tf.nn.sparse_softmax_cross_entropy_with_logits to simplify your
implementation. You might find tf.reduce_mean useful.
You should compute
loss = sum(softmax_loss) + self.weight_decay_placeholder * ((sum_{i,j} W_{i,j}^2)+(sum_{i,j} U_{i,j}^2))
Where W are the weights for the hidden layer and into softmax
Args:
pred: A tensor of shape (batch_size, n_classes) containing the output of the neural
network before the softmax layer.
Returns:
loss: A 0-d tensor (scalar)
"""
### YOUR CODE HERE
# 1: Calculate softmax_loss
softmax_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels_placeholder, logits=pred) # NB! We are not completely sure about this!
# 2: Calculate loss according to: "loss = sum(softmax_loss) + self.weight_decay_placeholder * (\sum_{i,j} W_{i,j}^2 + \sum_{i,j} U_{i,j}^2)"
#We might need to use reduce_sum instead (on W and U) to get the proper average
loss = tf.reduce_mean(softmax_loss)
reg = self.weight_decay_placeholder * (tf.reduce_sum(self.W ** 2) + tf.reduce_sum(self.U ** 2))
### END CODE
return loss + reg
def add_training_op(self, loss):
"""Sets up the training Ops.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train. See
https://www.tensorflow.org/versions/r0.7/api_docs/python/train.html#Optimizer
for more information.
Use tf.train.AdamOptimizer for this model.
Calling optimizer.minimize() will return a train_op object.
Args:
loss: Loss tensor, from cross_entropy_loss.
Returns:
train_op: The Op for training.
"""
### YOUR CODE HERE
optimizer = tf.train.AdamOptimizer()
train_op = optimizer.minimize(loss)
### END CODE
return train_op
def predict_labels_on_batch(self, session, inputs_batch):
""" Make label predictions for the provided batch of data - helper function
Should be similar to softmax predict from hand in 1
Args:
session: tf.Session()
input_batch: np.ndarray of shape (n_samples, n_features)
Returns:
predicted_labels: np.ndarray of shape (n_samples,)
"""
predicted_labels = None
logits = self.predict_on_batch(session, inputs_batch)
predicted_labels_tensor = tf.argmax(logits, 1)
predicted_labels = session.run(predicted_labels_tensor)
return predicted_labels
if __name__=='__main__':
print('DOOH')
|
[
"bardurrunason@gmail.com"
] |
bardurrunason@gmail.com
|
7caf8ad3692897ce958ca890f7431880621d2cc0
|
2161a65e68c0202cd0c5d19d38e6f33c6bc31756
|
/manage.py
|
e33607fae37631b9c9599d5b415f400529bd99a1
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
ysguoqiang/zenith-monitor
|
6ea9897be3656f180dd43a1bbfb5a7dad5163360
|
79037d7372bf3ff499562fab351fc35433c6e2f5
|
refs/heads/master
| 2020-03-10T10:01:46.625165
| 2017-11-01T03:32:59
| 2017-11-01T03:32:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,357
|
py
|
# 作者:Forec
# 最后修改日期:2016-12-24
# 邮箱:forec@bupt.edu.cn
# 关于此文件:此文件为服务器的管理入口,注册了数据库初始化、shell控
# 制、测试等函数。
import os, time
COV = None
if os.environ.get('ZENITH_COVERAGE'):
import coverage
# 启动检测
COV = coverage.coverage(branch=True, include='app/*')
COV.start()
from app import create_app, db
from app.models import User, Device
from app.devices import deviceTable, Bulb, TV, AirCondition, PC
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
app = create_app('default')
# 按配置方案创建应用
manager = Manager(app)
# 注册管理器
migrate = Migrate(app, db)
# 注册数据库迁移
# make_shell_context 将模型注册到管理器
def make_shell_context():
return dict(
app=app,
db=db,
User=User,
Device = Device,
Bulb = Bulb,
TV = TV,
PC = PC,
AirCondition = AirCondition,
deviceTable = deviceTable)
# 向管理器注册shell指令
manager.add_command("shell", Shell(make_context=make_shell_context))
# 向管理器注册数据库迁移指令
manager.add_command("db", MigrateCommand)
# 管理器注册测试指令
@manager.command
def test(coverage=False):
"""Run the unit tests"""
if coverage and not os.environ.get('ZENITH_COVERAGE'):
import sys
os.environ['ZENITH_COVERAGE'] = '1'
# 重启全局部分
os.execvp(sys.executable, [sys.executable] + sys.argv)
#import unittest
#tests = unittest.TestLoader().discover("tests")
#unittest.TextTestRunner(verbosity=2).run(tests)
if COV:
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'temp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
# 管理器注册初始化数据库指令
@manager.command
def init():
'''Init the database'''
db.drop_all()
db.session.commit()
db.create_all()
u1 = User(email='forec@bupt.edu.cn',
nickname='Forec',
password='zenithdm',
confirmed=True,
about_me='这个人很懒,什么都没有留下')
u2 = User(email='test@test.com',
nickname='设备管理测试员',
monitor_url="http://monitor.cross.forec.cn",
token_hash='9490544C18C15B21286685B41F825684',
password='zenithdm',
confirmed=True,
about_me='我是顶点云设备管理平台测试者')
db.session.add(u1)
db.session.add(u2)
d1 = Bulb(name = '主卧照明',
code='E1A9013A447E',
owner = u2,
interval = 5,
about = '主卧右四大灯照明'
)
d2 = Bulb(name = '书房台灯',
owner = u2,
code='BA8120601307',
interval = 3,
about = '书房书橱右上第二个台灯'
)
d3 = Bulb(name = '厨房照明',
owner = u2,
code='LI29F2MV9D7Z',
interval = 3,
about = '厨房油烟机灯光'
)
d4 = TV(name = '客厅电视',
owner = u2,
code='CE683231033B',
interval = 5,
about = '65536 寸 TCL 彩电'
)
d5 = AirCondition(
name = '客房柜式空调',
owner = u2,
code='EFID2141FJKD',
interval = 5,
about = '客房柜式 GL 203 空调'
)
d6 = PC(
name = '树莓派',
owner= u2,
code = 'JFIDEO2193FJ',
interval = 5,
about = '宿舍里留守的树莓派'
)
db.session.add(d1)
db.session.add(d2)
db.session.add(d3)
db.session.add(d4)
db.session.add(d5)
db.session.add(d6)
db.session.commit()
# 清空数据库并初始化测试用户
@manager.command
def simple_init():
'''Simple Init the database'''
db.drop_all()
db.create_all()
u = User(email='forec@bupt.edu.cn',
nickname='Forec',
password='zenith',
confirmed=True,
about_me='顶点云设备管理员')
db.session.add(u)
u = User(email='test@test.com',
nickname='测试者',
password='zenith',
confirmed=True,
about_me='欢迎来到顶点云设备管理的线上测试')
db.session.add(u)
u = User(email='dragoncat@forec.cn',
nickname='龙猫',
password='zenith',
confirmed=True,
about_me='我是最萌的')
db.session.add(u)
u = User(email='non1996@forec.cn',
nickname='non1996',
password='zenith',
confirmed=True,
about_me='听说你要开车')
db.session.add(u)
u = User(email='rabbit@forec.cn',
nickname='飞翔的兔子',
password='zenith',
confirmed=True,
about_me='一只热爱生活的兔子')
db.session.add(u)
db.session.commit()
if __name__ == "__main__":
manager.run()
|
[
"Forect@hotmail.com"
] |
Forect@hotmail.com
|
7c58b4179641bab37a0bc6feca58da03bd1de647
|
8356d64ae87b2e916793ea6970ab4659e66fe272
|
/virtual/bin/alembic
|
6a73c33917e4c59c60f51123e4dd36fbee74ffae
|
[
"MIT"
] |
permissive
|
stevekibe/personalblog
|
4c85e98675a8deca4856d037fb18be805e4987bf
|
6b442846fa2d1e7d0f21a4bd46587389136fdddd
|
refs/heads/master
| 2020-03-28T19:25:02.113139
| 2018-09-18T10:51:45
| 2018-09-18T10:51:45
| 148,974,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
#!/home/steve/Documents/Python/One-minite/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from alembic.config import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"stevekibe2345@gmail.com"
] |
stevekibe2345@gmail.com
|
|
cbd5b98104f0bd7d4ccc50a2b36f9db78b8e857a
|
a974bbcbe9398887c99b92e8b5df63e029fe7769
|
/code/Graphics_Viewer/qenhancedgraphicsview.py
|
ce5db969161f640a89362e8baa79fa7247d05202
|
[
"BSD-3-Clause"
] |
permissive
|
eckerr/python_image_manipulation
|
9c21f8d57879b288f7e9f7ad34fb926e1ba8a86b
|
a0aacf7311e1ee7435df154962e634f061311209
|
refs/heads/master
| 2022-07-14T01:24:34.198184
| 2020-03-01T03:49:43
| 2020-03-01T03:49:43
| 167,699,657
| 0
| 0
| null | 2022-06-21T22:05:46
| 2019-01-26T14:46:05
|
Python
|
UTF-8
|
Python
| false
| false
| 39
|
py
|
"""
Created by Ed on 10/29/2019
"""
|
[
"edkerr@comcast.net"
] |
edkerr@comcast.net
|
2ee67f8830bbd487ae34f86023b9d3eecfa6ef6c
|
11a72c7c9a6861d56492b590eb6816611b30d834
|
/Code/faceCounter.py
|
498c7c830b935ac8d1f40072713c06af23849ead
|
[] |
no_license
|
sagarjoglekar/vineCrawl
|
625f6f4d40d03a7b02526e00bc8e9926eab8493c
|
0b83db51e16bcfdb12e8e614ed0ece2adc62e0af
|
refs/heads/master
| 2021-03-22T05:14:17.028679
| 2016-07-18T11:32:53
| 2016-07-18T11:32:53
| 46,863,944
| 1
| 0
| null | 2016-02-23T11:53:14
| 2015-11-25T13:41:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,789
|
py
|
import re
import datetime as dt
from datetime import datetime
import time
import random
import json
import sys
import requests
import os
import wget
import pickle
import cv2
import numpy as np
import cPickle
import multiprocessing as mp
visitedList = "../Logs/faceCounted.data"
root = "../vinedata/Data/"
#root = "../Data/"
faces = "faces"
frontal_face_cascade = cv2.CascadeClassifier('../haarcascades/haarcascade_frontalface_default.xml')
profile_face_cascade = cv2.CascadeClassifier('../haarcascades/haarcascade_profileface.xml')
selected = "AllVines.pkl"
loopThreshold = 0
faceNumber = "../Logs/faceCounts.pk"
def process_frontal(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.waitKey(20)
faces = []
faces = frontal_face_cascade.detectMultiScale(gray, 1.3, 5)
return len(faces)
def process_profile(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.waitKey(20)
eyes = []
eyes = profile_face_cascade.detectMultiScale(gray)
return len(eyes)
def processVideo(videoPath , facesPath , postID , pool):
cap = cv2.VideoCapture(videoPath)
totFrames = 0
flaggedFrames = 0
faces = 0
profiles = 0
i = 0
while True:
ret, frame = cap.read()
if ret:
procs = []
totFrames += 1
cv2.waitKey(20)
f = pool.apply_async(process_frontal, (frame,))
p = pool.apply_async(process_profile, (frame,))
num_front= f.get(timeout=1)
num_profile = p.get(timeout=1)
faces+= num_front
profiles+=num_profile
if(num_front>0 or num_profile>0):
flaggedFrames+=1
else:
logline = str(postID) + "," + str(totFrames) + "," + str(flaggedFrames) + "," + str(faces) + ","+ str(profiles)
print logline
logfile = open(faceNumber, 'a+')
cPickle.dump(logline , logfile);
logfile.close()
break
def getVisited():
visited = []
try:
f = open(visitedList, 'rb')
visited = pickle.load(f)
except IOError:
f = open(visitedList,"a+")
pickle.dump([],f)
return visited
def updateVisited(visited):
with open(visitedList, 'wb') as f:
pickle.dump(visited, f)
def walkLevel1Dir(root):
count = 0
dirList = []
filesList = []
for path, dirs, files in os.walk(root):
if count > 0:
return dirList , fileList
dirList = dirs
fileList = files
count = count + 1
def getPopularFile(rootDir):
f = open(rootDir + '/popular.json' ,'r')
data = json.load(f)
return data
def getPopularPosts(popular , listFile):
records = popular['data']['records']
posts=[]
for i in range (0 , len(records)):
postID = records[i]['postId']
loopCount = records[i]['loops']['count']
if(loopCount > loopThreshold):
posts.append(postID)
with open(listFile, 'wb') as f:
pickle.dump(posts, f)
return posts
def getFaces(popular , faces):
records = popular['data']['records']
vidPaths=[]
postIds=[]
for i in range (0 , len(records)):
postID = records[i]['postId']
postURL = records[i]['videoDashUrl']
if(postURL != None):
vidURL = postURL.split('//')
URLPaths = vidURL[1].split('?')
vidPath = URLPaths[0]
vidPaths.append(vidPath)
postIds.append(postID)
return vidPaths, postIds
def getVidPaths():
with open(vidpaths) as f:
content = f.readlines()
return content
#MAin Loop: Runs only once and is reculated using Cron jobs
if __name__ == '__main__':
dirs,files = walkLevel1Dir(root)
visited = getVisited()
pool = mp.Pool(processes=8)
for d in dirs:
if d not in visited:
faceDir = root + d + "/" + faces
selectedList = faceDir + "/" + selected
if not os.path.exists(faceDir):
os.makedirs(faceDir)
dataRoot = root + d
popular = getPopularFile(dataRoot)
selectedPosts = getPopularPosts(popular , selectedList)
visited.append(d)
updateVisited(visited)
paths, posts = getFaces(popular , faces)
for i in range(len(posts)):
if posts[i] in selectedPosts:
videoPath = root + d + "/videos/" + paths[i]
if os.path.exists(videoPath):
print "Processing Post ID %d with url %s"% (posts[i], paths[i])
processVideo(videoPath , faceDir , posts[i] , pool)
break
|
[
"sagarjoglekar@nmsdps000222.nms.kcl.ac.uk"
] |
sagarjoglekar@nmsdps000222.nms.kcl.ac.uk
|
98b9bfddb589209b9f3356b2bfc923ee25586b28
|
be28837967683f1a5edea2e0ccd29d76867e970c
|
/proxy.py
|
920071f797091148eb6d2e31f7c47021ee516f3f
|
[] |
no_license
|
aaronriekenberg/pyasio
|
c6d653d1d649857644d2b2bcaddb4cb7867117cb
|
bcfc32add36b1e54a8d19f76ab65c50a3094144b
|
refs/heads/master
| 2020-06-04T00:51:12.344228
| 2014-07-27T11:46:34
| 2014-07-27T11:46:34
| 3,299,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,371
|
py
|
#!/usr/bin/env python3.3
# Copyright (C) 2012 Aaron Riekenberg (aaron.riekenberg@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asio
import logging
import sys
MAX_READ_BYTES = 256 * 1024
def createLogger():
logger = logging.getLogger('proxy')
logger.setLevel(logging.INFO)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
consoleHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
return logger
logger = createLogger()
class Connection:
def __init__(self, ioService, clientToProxySocket,
remoteAddress, remotePort):
super().__init__()
self.__ioService = ioService
self.__clientToProxySocket = clientToProxySocket
self.__clientToProxyString = '{} -> {}'.format(
clientToProxySocket.getpeername(),
clientToProxySocket.getsockname())
self.__proxyToRemoteSocket = ioService.createAsyncSocket()
self.__proxyToRemoteString = ''
self.__remoteAddress = remoteAddress
self.__remotePort = remotePort
self.__connectTimer = None
def start(self):
self.__proxyToRemoteSocket.asyncConnect(
(self.__remoteAddress, self.__remotePort),
self.__connectCallback)
self.__connectTimer = self.__ioService.scheduleTimer(
deltaTimeSeconds = 5,
callback = self.__connectTimeoutTimerPop)
def close(self):
if (not self.__clientToProxySocket.closed()):
if (len(self.__clientToProxyString) > 0):
logger.info('disconnect {} (fd={})'.format(
self.__clientToProxyString,
self.__clientToProxySocket.fileno()))
self.__clientToProxySocket.close()
if (not self.__proxyToRemoteSocket.closed()):
if (len(self.__proxyToRemoteString) > 0):
logger.info('disconnect {} (fd={})'.format(
self.__proxyToRemoteString,
self.__proxyToRemoteSocket.fileno()))
self.__proxyToRemoteSocket.close()
def __connectTimeoutTimerPop(self):
if (self.__connectTimer is not None):
logger.info('connect timed out')
self.__connectTimer = None
self.close()
def __connectCallback(self, error):
if (self.__connectTimer is None):
# connection has already timed out
self.close()
else:
self.__connectTimer.cancel()
self.__connectTimer = None
if (error):
logger.info('connect error: {}'.format(error))
self.close()
else:
self.__proxyToRemoteString = '{} -> {}'.format(
self.__proxyToRemoteSocket.getpeername(),
self.__proxyToRemoteSocket.getsockname())
logger.info('connect {} (fd={})'.format(
self.__proxyToRemoteString,
self.__proxyToRemoteSocket.fileno()))
self.__clientToProxySocket.asyncRead(
MAX_READ_BYTES,
self.__readFromClientCallback)
self.__proxyToRemoteSocket.asyncRead(
MAX_READ_BYTES,
self.__readFromRemoteCallback)
def __readFromClientCallback(self, data, error):
if self.__proxyToRemoteSocket.closed():
self.close()
elif (error):
self.close()
elif not data:
self.close()
else:
self.__proxyToRemoteSocket.asyncWriteAll(data, self.__writeToRemoteCallback)
def __readFromRemoteCallback(self, data, error):
if self.__clientToProxySocket.closed():
self.close()
elif (error):
self.close()
elif not data:
self.close()
else:
self.__clientToProxySocket.asyncWriteAll(data, self.__writeToClientCallback)
def __writeToRemoteCallback(self, error):
if self.__clientToProxySocket.closed():
self.close()
elif (error):
self.close()
else:
self.__clientToProxySocket.asyncRead(MAX_READ_BYTES, self.__readFromClientCallback)
def __writeToClientCallback(self, error):
if self.__proxyToRemoteSocket.closed():
self.close()
elif (error):
self.close()
else:
self.__proxyToRemoteSocket.asyncRead(MAX_READ_BYTES, self.__readFromRemoteCallback)
class Acceptor:
def __init__(self, ioService,
localAddress, localPort,
remoteAddress, remotePort):
super().__init__()
self.__ioService = ioService
self.__localAddress = localAddress
self.__localPort = localPort
self.__remoteAddress = remoteAddress
self.__remotePort = remotePort
self.__asyncSocket = ioService.createAsyncSocket();
def start(self):
self.__asyncSocket.setReuseAddress()
self.__asyncSocket.bind((self.__localAddress, self.__localPort))
self.__asyncSocket.listen()
self.__asyncSocket.asyncAccept(self.__acceptCallback)
logger.info('listening on {} (fd={})'.format(
self.__asyncSocket.getsockname(),
self.__asyncSocket.fileno()))
def __acceptCallback(self, asyncSocket, error):
if ((not error) and (asyncSocket is not None)):
logger.info('accept {} -> {} (fd={})'.format(
asyncSocket.getpeername(),
asyncSocket.getsockname(),
asyncSocket.fileno()))
Connection(
self.__ioService, asyncSocket,
self.__remoteAddress, self.__remotePort).start()
self.__asyncSocket.asyncAccept(self.__acceptCallback)
def parseAddrPortString(addrPortString):
addrPortList = addrPortString.split(':', 1)
return (addrPortList[0], int(addrPortList[1]))
def printUsage():
logger.error(
'Usage: {} <listen addr> [<listen addr> ...] <remote addr>'.format(
sys.argv[0]))
def main():
if (len(sys.argv) < 3):
printUsage()
sys.exit(1)
localAddressPortList = map(parseAddrPortString, sys.argv[1:-1])
(remoteAddress, remotePort) = parseAddrPortString(sys.argv[-1])
ioService = asio.createAsyncIOService()
logger.info('ioService = {}'.format(ioService))
for (localAddress, localPort) in localAddressPortList:
Acceptor(ioService = ioService,
localAddress = localAddress,
localPort = localPort,
remoteAddress = remoteAddress,
remotePort = remotePort).start()
logger.info('remote address {}'.format((remoteAddress, remotePort)))
ioService.run()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
[
"aaron.riekenberg@gmail.com"
] |
aaron.riekenberg@gmail.com
|
b6baa32c9ed582a91b962f1f5b16b34da60eb58e
|
e0483caa51a6a601ea964cfe418b1bdce6fc7dfc
|
/challenge4/calculator.py
|
cb4b0ec56de2db71db192496c19a4f82b7869a30
|
[] |
no_license
|
xiajiezai/shiyanlou-001
|
3fb69496d74b16c7c7ad14ddaaf91a885e190089
|
86f397cdaeb00e90f5630a5169feba14f7ea490a
|
refs/heads/master
| 2021-09-06T23:10:29.704955
| 2018-02-13T08:28:54
| 2018-02-13T08:28:54
| 111,987,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,115
|
py
|
# -*- coding: utf-8 -*-
import sys
import csv
import queue
from multiprocessing import Process, Queue
from collections import namedtuple
IncomeTaxQuickLookupItem = namedtuple(
'IncomeTaxQuickLookupItem',
['TaxableBracket','TaxRate','Subtractor']
)
Threshold=3500
QUICK_LOOKUP=[
IncomeTaxQuickLookupItem(80000,0.45,13505),
IncomeTaxQuickLookupItem(55000,0.35,5505),
IncomeTaxQuickLookupItem(35000,0.3,2755),
IncomeTaxQuickLookupItem(9000,0.25,1005),
IncomeTaxQuickLookupItem(4500,0.2,555),
IncomeTaxQuickLookupItem(1500,0.1,105),
IncomeTaxQuickLookupItem(0,0.03,0)
]
q_user = Queue()
q_result=Queue()
#input: c, output:config.cfg,etc.
class Args(object):
def __init__(self):
self.args=sys.argv[1:]
#a list
def _value_after_option(self,option):
#methods in a class usually start with an underline
try:
index=self.args.index(option)
return self.args[index+1]
except(ValueError, IndexError):
print('Parameter Error')
#exit()
@property
def config_path(self):
return self._value_after_option('-c')
#if we don't use property, when we use config_path, we have to write args.config_path(), with the bracket.
@property
def userdata_path(self):
return self._value_after_option('-d')
@property
def export_path(self):
return self._value_after_option('-o')
args=Args()
#instantiate args before we use it in the following class
#input config.cfg, read it and write it into a config dict
class Config(object):
def __init__(self):
self.config=self._read_config()
#self.config is now a dict in class Config
def _read_config(self):
config_path=args.config_path
config={}
with open(config_path) as f:
for line in f.readlines():
key, value= line.strip().split('=')
try:
config[key.strip()]=float(value.strip())
#make them a key and a value in dict config
except ValueError:
print('Parameter Error')
exit()
return config
def _get_config(self, key):
try:
return self.config[key]
except KeyError:
print('Config Error')
exit()
@property
def insurance_base_threshold(self):
return self._get_config('JiShuL')
@property
def insurance_base_ceiling(self):
return self._get_config('JiShuH')
@property
def insurance_rate(self):
return sum([
self._get_config('YangLao'),
self._get_config('YiLiao'),
self._get_config('ShiYe'),
self._get_config('GongShang'),
self._get_config('ShengYu'),
self._get_config('GongJiJin'),
])
config=Config()
#inpu: userdata file, output: userdata list
class UserData(Process):
#Userdata inherites class Process, so init can be omitted
def _read_users_data(self):
userdata_path=args.userdata_path
with open(userdata_path) as f:
for line in f.readlines():
EmployeeNumber,income_string = line.strip().split(',')
try:
income=int(income_string)
except ValueError:
print('Parameter Error')
exit()
yield (EmployeeNumber, income)
def run(self):
for data in self._read_users_data():
q_user.put(data)
#put the result of yield in q_user
#calculate EAT and wirte it into salary.csv
class IncomeTaxCalculator(Process):
@staticmethod
def calc_social_insurance(income):
if income<config.insurance_base_threshold:
return config.insurance_base_threshold*config.insurance_rate
if income>config.insurance_base_ceiling:
return config.insurance_base_ceiling*config.insurance_rate
return income*config.insurance_rate
@classmethod
def calc_EAT(cls, income):
social_insurance=cls.calc_social_insurance(income)
#here we use calc_social_insuarance, which is a method in class IncomeTaxCalculator, so we have to use classmethod
EarningsAfterInsurance=income- social_insurance
Payable=EarningsAfterInsurance- Threshold
if Payable<=0:
return '0.00', '{:.2f}'.format(EarningsAfterInsurance)
for item in QUICK_LOOKUP:
if Payable > item.TaxableBracket:
TAX = Payable*item.TaxRate-item.Subtractor
EAT=EarningsAfterInsurance-TAX
return '{:.2f}'.format(TAX), '{:.2f}'.format(EAT)
def calc_for_all_userdata(self):
while True:
try:
EmployeeNumber, income=q_user.get(timeout=1)
#wait a second just to make sure the putting part is done before we get it
except queue.Empty:
#after 1 second and still no data, raise empty Error
return
data=[EmployeeNumber,income]
#no for loop needed because we only deal with one line at a time
social_insurance='{:.2f}'.format(self.calc_social_insurance(income))
TAX, Salary=self.calc_EAT(income)
data+= [social_insurance, TAX, Salary]
yield data
def run(self):
for data in self.calc_for_all_userdata():
q_result.put(data)
class export_to_file(Process):
def run(self):
with open(args.export_path, 'w', newline='') as f:
while True:
writer=csv.writer(f)
#if we don't use csv, we have to add comma to separate items in list
try:
item = q_result.get(timeout=1)
except queue.Empty:
return
writer.writerow(item)
#note: not writerows here
if __name__=='__main__':
workers = [
UserData(),
IncomeTaxCalculator(),
export_to_file()
]
for worker in workers:
worker.run()
|
[
"81623405@qq.com"
] |
81623405@qq.com
|
a1cd3840fddf67f5b0508511756104705109ea43
|
f6fb269abf1d9c01d4182359faf34dd748bf4326
|
/h2o-py/tests/testdir_algos/glm/pyunit_cv_cars_glm.py
|
cd0d5a0a2dc0fd977089b8f882c2840ad8a1a3be
|
[
"Apache-2.0"
] |
permissive
|
anwjones/h2o-3
|
0c2a71125e4ec56feb6d7d7a3b8c0a83ff75007f
|
074162a1432ef2f18ac8fa277d3cb0303b720fba
|
refs/heads/master
| 2021-01-15T13:23:14.015010
| 2015-12-09T21:03:17
| 2015-12-09T21:03:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,228
|
py
|
from __future__ import print_function
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import random
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
def cv_cars_glm():
# read in the dataset and construct training set (and validation set)
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
# choose the type model-building exercise (multinomial classification or regression). 0:regression, 1:binomial,
# 2:poisson
problem = random.sample(list(range(3)),1)[0]
# pick the predictors and response column, along with the correct family
predictors = ["displacement","power","weight","acceleration","year"]
if problem == 1 :
response_col = "economy_20mpg"
family = "binomial"
cars[response_col] = cars[response_col].asfactor()
elif problem == 2 :
family = "poisson"
response_col = "cylinders"
else :
family = "gaussian"
response_col = "economy"
print("Distribution: {0}".format(family))
print("Response column: {0}".format(response_col))
## cross-validation
# 1. check that cv metrics are the same over repeated "Modulo" runs
nfolds = random.randint(3,10)
glm1 = H2OGeneralizedLinearEstimator(nfolds=nfolds, family=family, fold_assignment="Modulo")
glm1.train(x=predictors, y=response_col, training_frame=cars)
glm2 = H2OGeneralizedLinearEstimator(nfolds=nfolds, family=family, fold_assignment="Modulo")
glm2.train(x=predictors, y=response_col, training_frame=cars)
pyunit_utils.check_models(glm1, glm2, True)
# 2. check that cv metrics are different over repeated "Random" runs
nfolds = random.randint(3,10)
glm1 = H2OGeneralizedLinearEstimator(nfolds=nfolds, family=family, fold_assignment="Random")
glm1.train(x=predictors, y=response_col, training_frame=cars)
glm2 = H2OGeneralizedLinearEstimator(nfolds=nfolds, family=family, fold_assignment="Random")
glm2.train(x=predictors, y=response_col, training_frame=cars)
try:
pyunit_utils.check_models(glm1, glm2, True)
assert False, "Expected models to be different over repeated Random runs"
except AssertionError:
assert True
# 3. folds_column
num_folds = random.randint(2,5)
fold_assignments = h2o.H2OFrame([[random.randint(0,num_folds-1) for f in range(cars.nrow)]])
fold_assignments.set_names(["fold_assignments"])
cars = cars.cbind(fold_assignments)
glm = H2OGeneralizedLinearEstimator(family=family, keep_cross_validation_predictions=True)
glm.train(x=predictors, y=response_col, training_frame=cars, fold_column="fold_assignments")
num_cv_models = len(glm._model_json['output']['cross_validation_models'])
assert num_cv_models==num_folds, "Expected {0} cross-validation models, but got " \
"{1}".format(num_folds, num_cv_models)
cv_model1 = h2o.get_model(glm._model_json['output']['cross_validation_models'][0]['name'])
cv_model2 = h2o.get_model(glm._model_json['output']['cross_validation_models'][1]['name'])
# 4. keep_cross_validation_predictions
cv_predictions = glm1._model_json['output']['cross_validation_predictions']
assert cv_predictions is None, "Expected cross-validation predictions to be None, but got {0}".format(cv_predictions)
cv_predictions = glm._model_json['output']['cross_validation_predictions']
assert len(cv_predictions)==num_folds, "Expected the same number of cross-validation predictions " \
"as folds, but got {0}".format(len(cv_predictions))
# 2. nfolds = 0
glm1 = H2OGeneralizedLinearEstimator(nfolds=0, family=family)
glm1.train(x=predictors, y=response_col, training_frame=cars)
# check that this is equivalent to no nfolds
glm2 = H2OGeneralizedLinearEstimator(family=family)
glm2.train(x=predictors, y=response_col, training_frame=cars)
pyunit_utils.check_models(glm1, glm2)
# 3. cross-validation and regular validation attempted
glm = H2OGeneralizedLinearEstimator(nfolds=random.randint(3,10), family=family)
glm.train(x=predictors, y=response_col, training_frame=cars, validation_frame=cars)
## error cases
# 1. nfolds == 1 or < 0
try:
glm = H2OGeneralizedLinearEstimator(nfolds=random.sample([-1,1], 1)[0], family=family)
glm.train(x=predictors, y=response_col, training_frame=cars)
assert False, "Expected model-build to fail when nfolds is 1 or < 0"
except EnvironmentError:
assert True
# 2. more folds than observations
try:
glm = H2OGeneralizedLinearEstimator(nfolds=cars.nrow+1, family=family, fold_assignment="Modulo")
glm.train(x=predictors, y=response_col, training_frame=cars)
assert False, "Expected model-build to fail when nfolds > nobs"
except EnvironmentError:
assert True
# 3. fold_column and nfolds both specified
try:
glm = H2OGeneralizedLinearEstimator(nfolds=3, family=family)
glm.train(x=predictors, y=response_col, training_frame=cars, fold_column="fold_assignments")
assert False, "Expected model-build to fail when fold_column and nfolds both specified"
except EnvironmentError:
assert True
if __name__ == "__main__":
pyunit_utils.standalone_test(cv_cars_glm)
else:
cv_cars_glm()
|
[
"spnrpa@gmail.com"
] |
spnrpa@gmail.com
|
fb6b503e36a385533a82379272bd1d8194c289f0
|
f66a3f3a2e968dee45ad339fe4237b556566e3a0
|
/Programmers/LEVEL1/AddMissingNumbers.py
|
dbe13ae3645ec9acd7c3e16c05af666ce7c178dc
|
[] |
no_license
|
Albert-learner/Algorithm
|
358e3b8496c3ea419b7f8dd74a0899b116bf1028
|
9fbb2c94ae35161ccc31e2c5976f6fdf426eea8d
|
refs/heads/main
| 2023-08-04T15:48:21.041206
| 2023-07-29T12:37:31
| 2023-07-29T12:37:31
| 209,938,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
# 49. 없는 숫자 더하기
def solution(numbers):
answer = 0
all_num = [i for i in range(10)]
for number in all_num:
if number not in numbers:
answer += number
return answer
numbers_1 = [1, 2, 3, 4, 6, 7, 8, 0]
numbers_2 = [5, 8, 4, 0, 6, 7, 9]
print(solution(numbers_1))
print(solution(numbers_2))
def solution_best(numbers):
answer = 45 - sum(numbers)
return answer
print(solution_best(numbers_1))
print(solution_best(numbers_2))
|
[
"jaebinlee96@kookmin.ac.kr"
] |
jaebinlee96@kookmin.ac.kr
|
ebfd807ea418dbad40f334b1a76958d71bf45ca9
|
44ee8130beb94b4d5feed7eb739352b25ca5426e
|
/tai64n.py
|
559c39dbef9bf11262c81a874dfb46bae0d4dadc
|
[] |
no_license
|
SigNote/SigNote-python
|
727116c15393842153a477323b1bde80b6f1d1e6
|
884fd8659e6a9f40a2c3dd898ed0c8b224f5b2b5
|
refs/heads/master
| 2020-03-16T13:24:09.046004
| 2018-05-12T04:39:14
| 2018-05-12T04:39:14
| 132,689,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
"""
TAI64N encoding and decoding.
TAI64N encodes nanosecond-accuracy timestamps and is supported by logstash.
@see: U{http://cr.yp.to/libtai/tai64.html}.
"""
from __future__ import unicode_literals
import struct
_STRUCTURE = b">QI"
_OFFSET = (2**62) + 10 # last 10 are leap seconds
def encode(timestamp):
"""
Convert seconds since epoch to TAI64N string.
@param timestamp: Seconds since UTC Unix epoch as C{float}.
@return: TAI64N-encoded time, as C{unicode}.
"""
seconds = int(timestamp)
nanoseconds = int((timestamp - seconds) * 1000000000)
seconds = seconds + _OFFSET
return struct.pack(_STRUCTURE, seconds, nanoseconds)
def decode(tai64n):
"""
Convert TAI64N string to seconds since epoch.
Note that dates before 2013 may not decode accurately due to leap second
issues. If you need correct decoding for earlier dates you can try the
tai64n package available from PyPI (U{https://pypi.python.org/pypi/tai64n}).
@param tai64n: TAI64N-encoded time, as C{unicode}.
@return: Seconds since UTC Unix epoch as C{float}.
"""
seconds, nanoseconds = struct.unpack(_STRUCTURE, tai64n)
seconds -= _OFFSET
return seconds + (nanoseconds / 1000000000.0)
|
[
"kt@connectfree.co.jp"
] |
kt@connectfree.co.jp
|
b40548855d8ae7f86fa9536322b5a2c63b67efc4
|
4abbfba4d1b313a84bff46cb091f64010f7a13b7
|
/tutorial/items.py
|
66e9233f0975af58d46265915822d2e2889e3675
|
[] |
no_license
|
bigsuperangel/scrapy
|
ba0e6ff22660325a4f7ae4aaab8943e91758c306
|
b98e5a377f0d9e3fbb876cb0598bbcd2387b77df
|
refs/heads/master
| 2020-05-24T04:39:04.029823
| 2017-03-13T12:04:10
| 2017-03-13T12:04:10
| 84,822,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class BooksItem(scrapy.Item):
date = scrapy.Field()
title = scrapy.Field()
image_urls = scrapy.Field()
images = scrapy.Field()
image_paths = scrapy.Field()
|
[
"bigsuperangel@gmail.com"
] |
bigsuperangel@gmail.com
|
f060f8794f263be4b6a477a6cde368a4931e8605
|
4e6e7d7df76508c646eaa661da2007f1b126bf9e
|
/RLLAB/utils/dijkstra.py
|
9509ad8fa280d7ba54760bb6bb092654183b3c19
|
[] |
no_license
|
maxiaoba/QMDPNET
|
7448a5a40bc7944f4c526940d0ce4b513b5795a3
|
92572cb634a6f8cdc63bffbf91f55a756e8efc83
|
refs/heads/master
| 2021-04-28T03:19:18.622290
| 2018-07-26T01:29:07
| 2018-07-26T01:29:07
| 122,137,564
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,223
|
py
|
# Dijkstra's algorithm for shortest paths
# David Eppstein, UC Irvine, 4 April 2002
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/117228
from utils.priodict import priorityDictionary
def Dijkstra(G,start,end=None):
"""
Find shortest paths from the start vertex to all vertices nearer than or equal to the end.
The input graph G is assumed to have the following representation:
A vertex can be any object that can be used as an index into a dictionary.
G is a dictionary, indexed by vertices. For any vertex v, G[v] is itself a dictionary,
indexed by the neighbors of v. For any edge v->w, G[v][w] is the length of the edge.
This is related to the representation in <http://www.python.org/doc/essays/graphs.html>
where Guido van Rossum suggests representing graphs as dictionaries mapping vertices
to lists of outgoing edges, however dictionaries of edges have many advantages over lists:
they can store extra information (here, the lengths), they support fast existence tests,
and they allow easy modification of the graph structure by edge insertion and removal.
Such modifications are not needed here but are important in many other graph algorithms.
Since dictionaries obey iterator protocol, a graph represented as described here could
be handed without modification to an algorithm expecting Guido's graph representation.
Of course, G and G[v] need not be actual Python dict objects, they can be any other
type of object that obeys dict protocol, for instance one could use a wrapper in which vertices
are URLs of web pages and a call to G[v] loads the web page and finds its outgoing links.
The output is a pair (D,P) where D[v] is the distance from start to v and P[v] is the
predecessor of v along the shortest path from s to v.
Dijkstra's algorithm is only guaranteed to work correctly when all edge lengths are positive.
This code does not verify this property for all edges (only the edges examined until the end
vertex is reached), but will correctly compute shortest paths even for some graphs with negative
edges, and will raise an exception if it discovers that a negative edge has caused it to make a mistake.
"""
D = {} # dictionary of final distances
P = {} # dictionary of predecessors
Q = priorityDictionary() # estimated distances of non-final vertices
Q[start] = 0
for v in Q:
D[v] = Q[v]
if v == end: break
for w in G[v]:
vwLength = D[v] + G[v][w]
if w in D:
if vwLength < D[w]:
raise ValueError("Dijkstra: found better path to already-final vertex")
elif w not in Q or vwLength < Q[w]:
Q[w] = vwLength
P[w] = v
return (D,P)
def shortestPath(G,start,end):
"""
Find a single shortest path from the given start vertex to the given end vertex.
The input has the same conventions as Dijkstra().
The output is a list of the vertices in order along the shortest path.
"""
D,P = Dijkstra(G,start,end)
Path = []
while 1:
Path.append(end)
if end == start: break
end = P[end]
Path.reverse()
return Path
# example, CLR p.528
#G = {'s': {'u':10, 'x':5},
# 'u': {'v':1, 'x':2},
# 'v': {'y':4},
# 'x':{'u':3,'v':9,'y':2},
# 'y':{'s':7,'v':6}}
#print Dijkstra(G,'s')
#print shortestPath(G,'s','v')
|
[
"maxiaoba@umich.edu"
] |
maxiaoba@umich.edu
|
e7aab5c24c1087e0f53dadb2c89678508f3f85a3
|
05f853a825f517f9d72dbfa287b22206a44a6a6b
|
/dagrevis_lv/manage.py
|
1057a590e173c9b8e9d0d7091bec14a9e366e867
|
[
"MIT"
] |
permissive
|
daGrevis/daGrevis.lv
|
6ed07a6fd20c0efb32ea81146b72f43bb560a197
|
7a7dfdacac96d4633a998a73a584f29157150457
|
refs/heads/master
| 2020-04-06T15:51:08.678766
| 2016-05-07T16:44:27
| 2016-05-07T16:44:27
| 5,551,732
| 1
| 1
| null | 2014-06-14T10:00:20
| 2012-08-25T13:48:35
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 254
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dagrevis_lv.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"dagrevis@gmail.com"
] |
dagrevis@gmail.com
|
e389942e85872870ee827f5e96c09ce2587dcc72
|
1df81fa247ae8d5ad38855d9fafb5c6338f0e206
|
/env/lib/python3.6/site-packages/orthopy/hexahedron/tools.py
|
622cc441b35460691d2219c95b50191dd86aa4d7
|
[
"MIT"
] |
permissive
|
kassiuskohvakka/CompPhys-project
|
7e8d89408bb4c7b9346f33629bdf293348a1cfb9
|
c2f688da138ec285d3d794a9cc56f6997aaf36f9
|
refs/heads/master
| 2020-05-20T21:22:37.262018
| 2019-05-23T14:14:44
| 2019-05-23T14:14:44
| 185,756,298
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
# -*- coding: utf-8 -*-
#
def write(filename, f):
import meshio
import meshzoo
points, cells = meshzoo.cube(-1, +1, -1, +1, -1, +1, 50, 50, 50)
vals = f(points)
meshio.write(
filename, points, {'tetra': cells}, point_data={'f': vals}
)
return
|
[
"kassius.kohvakka@gmail.com"
] |
kassius.kohvakka@gmail.com
|
1b075827e6cbd24fc844d524a82e550d70dd473f
|
3a682c9375bda0f651cbaebfd3e6e8ffbeb50233
|
/indexd/urls/blueprint.py
|
7d34d75a0c328cfc1d40f781e2c58d48e80a07f0
|
[
"Apache-2.0"
] |
permissive
|
uc-cdis/indexd
|
9f28ba9a7728424b4c2a576dcb211a5f19b38039
|
c6c8a734486a690f4098a689e2d7376dbc02f162
|
refs/heads/master
| 2023-08-17T19:18:11.129685
| 2023-06-09T20:45:35
| 2023-06-09T20:45:35
| 44,071,955
| 18
| 27
|
Apache-2.0
| 2023-09-07T19:21:06
| 2015-10-11T22:20:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,711
|
py
|
import json
from flask import Blueprint, Response, request
from flask.json import jsonify
from indexd.errors import UserError
from indexd.index.drivers.query.urls import AlchemyURLsQueryDriver
blueprint = Blueprint("urls", __name__)
@blueprint.route("/q", methods=["GET"])
def query():
"""Queries indexes based on URLs
Params:
exclude (str): only include documents (did) with urls that does not match this pattern
include (str): only include documents (did) with a url matching this pattern
version (str): return only records with version number
fields (str): comma separated list of fields to return, if not specified return all fields
limit (str): max results to return
offset (str): where to start the next query from
Returns:
flask.Response: json list of matching entries
`
[
{"did": "AAAA-BB", "rev": "1ADs" "urls": ["s3://some-randomly-awesome-url"]},
{"did": "AAAA-CC", "rev": "2Nsf", "urls": ["s3://another-randomly-awesome-url"]}
]
`
"""
record_list = blueprint.driver.query_urls(**request.args.to_dict())
return Response(
json.dumps(record_list, indent=2, separators=(", ", ": ")),
200,
mimetype="application/json",
)
@blueprint.route("/metadata/q")
def query_metadata():
"""Queries indexes by URLs metadata key and value
Params:
key (str): metadata key
value (str): metadata value for key
url (str): full url or pattern for limit to
fields (str): comma separated list of fields to return, if not specified return all fields
version (str): filter only records with a version number
limit (str): max results to return
offset (str): where to start the next query from
Returns:
flask.Response: json list of matching entries
`
[
{"did": "AAAA-BB", "rev": "1ADs" "urls": ["s3://some-randomly-awesome-url"]},
{"did": "AAAA-CC", "rev": "2Nsf", "urls": ["s3://another-randomly-awesome-url"]}
]
`
"""
record_list = blueprint.driver.query_metadata_by_key(**request.args.to_dict())
return Response(
json.dumps(record_list, indent=2, separators=(", ", ": ")),
200,
mimetype="application/json",
)
@blueprint.record
def pre_config(state):
driver = state.app.config["INDEX"]["driver"]
blueprint.logger = state.app.logger
blueprint.driver = AlchemyURLsQueryDriver(driver)
@blueprint.errorhandler(UserError)
def handle_user_error(err):
return jsonify(error=str(err)), 400
|
[
"noreply@github.com"
] |
uc-cdis.noreply@github.com
|
c582cfeedf90c737ab38a2bfabdb15631df82473
|
e089fd839b03987ed540f472bdf89c0b60a33d81
|
/AlphaZeroModule/train.py
|
90c2b8b627719771536dccda1d2ea7e4f4660cae
|
[] |
no_license
|
ussrlord/GO_AI
|
18950d5c4c483698c3cdf8afca2136acb04f726d
|
61875ed346a7a53270262fc384726f32d49c02f6
|
refs/heads/master
| 2020-07-29T18:15:33.404183
| 2019-09-23T07:52:46
| 2019-09-23T07:52:46
| 209,915,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,665
|
py
|
# -*- coding: utf-8 -*-
"""
An implementation of the training pipeline of AlphaZeroModule for Gomoku
@author: Junxiao Song
"""
from __future__ import print_function
import random
import numpy as np
from collections import defaultdict, deque
from AlphaZeroModule.game import Board, Game
from AlphaZeroModule.mcts_pure import MCTSPlayer as MCTS_Pure
from AlphaZeroModule.mcts_alphaZero import MCTSPlayer
from AlphaZeroModule.policy_value_net import PolicyValueNet # Theano and Lasagne
class TrainPipeline():
def __init__(self, init_model=None):
# params of the board and the game
self.board_width = 6
self.board_height = 6
self.n_in_row = 4
self.board = Board(width=self.board_width,
height=self.board_height,
n_in_row=self.n_in_row)
self.game = Game(self.board)
# training params
self.learn_rate = 2e-3
self.lr_multiplier = 1.0 # adaptively adjust the learning rate based on KL
self.temp = 1.0 # the temperature param
self.n_playout = 400 # num of simulations for each move
self.c_puct = 5
self.buffer_size = 10000
self.batch_size = 512 # mini-batch size for training
self.data_buffer = deque(maxlen=self.buffer_size)
self.play_batch_size = 1
self.epochs = 5 # num of train_steps for each update
self.kl_targ = 0.02
self.check_freq = 50
self.game_batch_num = 1500
self.best_win_ratio = 0.0
# num of simulations used for the pure mcts, which is used as
# the opponent to evaluate the trained policy
self.pure_mcts_playout_num = 1000
if init_model:
# start training from an initial policy-value net
self.policy_value_net = PolicyValueNet(self.board_width,
self.board_height,
model_file=init_model)
else:
# start training from a new policy-value net
self.policy_value_net = PolicyValueNet(self.board_width,
self.board_height)
self.mcts_player = MCTSPlayer(self.policy_value_net.policy_value_fn,
c_puct=self.c_puct,
n_playout=self.n_playout,
is_selfplay=1)
def get_equi_data(self, play_data):
"""augment the data set by rotation and flipping
play_data: [(state, mcts_prob, winner_z), ..., ...]
"""
extend_data = []
for state, mcts_porb, winner in play_data:
for i in [1, 2, 3, 4]:
# rotate counterclockwise
equi_state = np.array([np.rot90(s, i) for s in state])
equi_mcts_prob = np.rot90(np.flipud(
mcts_porb.reshape(self.board_height, self.board_width)), i)
extend_data.append((equi_state,
np.flipud(equi_mcts_prob).flatten(),
winner))
# flip horizontally
equi_state = np.array([np.fliplr(s) for s in equi_state])
equi_mcts_prob = np.fliplr(equi_mcts_prob)
extend_data.append((equi_state,
np.flipud(equi_mcts_prob).flatten(),
winner))
return extend_data
def collect_selfplay_data(self, n_games=1):
"""collect self-play data for training"""
for i in range(n_games):
winner, play_data = self.game.start_self_play(self.mcts_player,
temp=self.temp)
play_data = list(play_data)[:]
self.episode_len = len(play_data)
# augment the data
play_data = self.get_equi_data(play_data)
self.data_buffer.extend(play_data)
def policy_update(self):
"""update the policy-value net"""
mini_batch = random.sample(self.data_buffer, self.batch_size)
state_batch = [data[0] for data in mini_batch]
mcts_probs_batch = [data[1] for data in mini_batch]
winner_batch = [data[2] for data in mini_batch]
old_probs, old_v = self.policy_value_net.policy_value(state_batch)
for i in range(self.epochs):
loss, entropy = self.policy_value_net.train_step(
state_batch,
mcts_probs_batch,
winner_batch,
self.learn_rate*self.lr_multiplier)
new_probs, new_v = self.policy_value_net.policy_value(state_batch)
kl = np.mean(np.sum(old_probs * (
np.log(old_probs + 1e-10) - np.log(new_probs + 1e-10)),
axis=1)
)
if kl > self.kl_targ * 4: # early stopping if D_KL diverges badly
break
# adaptively adjust the learning rate
if kl > self.kl_targ * 2 and self.lr_multiplier > 0.1:
self.lr_multiplier /= 1.5
elif kl < self.kl_targ / 2 and self.lr_multiplier < 10:
self.lr_multiplier *= 1.5
explained_var_old = (1 -
np.var(np.array(winner_batch) - old_v.flatten()) /
np.var(np.array(winner_batch)))
explained_var_new = (1 -
np.var(np.array(winner_batch) - new_v.flatten()) /
np.var(np.array(winner_batch)))
print(("kl:{:.5f},"
"lr_multiplier:{:.3f},"
"loss:{},"
"entropy:{},"
"explained_var_old:{:.3f},"
"explained_var_new:{:.3f}"
).format(kl,
self.lr_multiplier,
loss,
entropy,
explained_var_old,
explained_var_new))
return loss, entropy
def policy_evaluate(self, n_games=10):
"""
Evaluate the trained policy by playing against the pure MCTS player
Note: this is only for monitoring the progress of training
"""
current_mcts_player = MCTSPlayer(self.policy_value_net.policy_value_fn,
c_puct=self.c_puct,
n_playout=self.n_playout)
pure_mcts_player = MCTS_Pure(c_puct=5,
n_playout=self.pure_mcts_playout_num)
win_cnt = defaultdict(int)
for i in range(n_games):
winner = self.game.start_play(current_mcts_player,
pure_mcts_player,
start_player=i % 2,
is_shown=0)
win_cnt[winner] += 1
win_ratio = 1.0*(win_cnt[1] + 0.5*win_cnt[-1]) / n_games
print("num_playouts:{}, win: {}, lose: {}, tie:{}".format(
self.pure_mcts_playout_num,
win_cnt[1], win_cnt[2], win_cnt[-1]))
return win_ratio
def run(self):
"""run the training pipeline"""
try:
for i in range(self.game_batch_num):
self.collect_selfplay_data(self.play_batch_size)
print("batch i:{}, episode_len:{}".format(
i+1, self.episode_len))
if len(self.data_buffer) > self.batch_size:
loss, entropy = self.policy_update()
# check the performance of the current model,
# and save the model params
if (i+1) % self.check_freq == 0:
print("current self-play batch: {}".format(i+1))
win_ratio = self.policy_evaluate()
self.policy_value_net.save_model('./current_policy.model')
if win_ratio > self.best_win_ratio:
print("New best policy!!!!!!!!")
self.best_win_ratio = win_ratio
# update the best_policy
self.policy_value_net.save_model('./best_policy.model')
if (self.best_win_ratio == 1.0 and
self.pure_mcts_playout_num < 5000):
self.pure_mcts_playout_num += 1000
self.best_win_ratio = 0.0
except KeyboardInterrupt:
print('\n\rquit')
if __name__ == '__main__':
training_pipeline = TrainPipeline()
training_pipeline.run()
|
[
"ussrlord@foxmail.com"
] |
ussrlord@foxmail.com
|
298ba038bcc60a5d3a1ad0f46cccc745b862288b
|
608b43bb13fab243f8fc03579389d33e26bebb59
|
/Pawn.py
|
885167676166eca68ed016684a1dcf396acc31e3
|
[] |
no_license
|
KaiHoshijo/Chess
|
03912c5d4f7844b320822e3634562d2523f9621b
|
4fff1cec23b8dcc9d53427bce886c5c121565146
|
refs/heads/master
| 2022-10-28T20:05:42.486296
| 2020-06-08T22:38:49
| 2020-06-08T22:38:49
| 270,846,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,260
|
py
|
import pygame
import King
import Queen
import Rook
import Bishop
import Knight
class pawn(King.king):
def __init__(self, screen, board, image, rect, row, col, colour, queenImage, rookImage, bishopImage,knightImage, opposingKing):
super().__init__(screen, board, image, rect, row, col, colour)
self.firstMove = True
self.moveTwo = False
self.queenImage = queenImage
self.rookImage = rookImage
self.bishopImage = bishopImage
self.knightImage = knightImage
self.opposingKing = opposingKing
if (colour == "W"):
self.target = 0
else:
self.target = 7
# move two down
def moveTwoDown(self, ):
allMoves = [[self.move(-2, 0), -2, 0]]
if (self.passPoint(allMoves) != "E"): return [[False, -2, 0]]
return allMoves
# move two up
def moveTwoUp(self, ):
allMoves = [[self.move(2, 0), 2, 0]]
if (self.passPoint(allMoves) != "E"): return [[False, 2, 0]]
return allMoves
# find available moves
def findMoves(self, show=True, isKing=False):
if (self.colour == "W"):
# return all moves available
if (show):
standardMoves = [self.moveDown(["W", "B"])]
else:
standardMoves = []
# move down left if enemy there
downLeft = self.board.getPosition(self.row-1, self.col-1)
if (downLeft != None):
if (downLeft[0] == self.getOppositeColour() or not show):
standardMoves.append(self.moveDownLeft())
if (self.row == 3):
for piece in self.enemyPieces:
if (piece.getPosition() == (self.row, self.col-1)):
try:
if (piece.moveTwo):
standardMoves.append(self.moveUpRight())
except:
continue
# move down right if enemy there
downRight = self.board.getPosition(self.row-1, self.col+1)
if (downRight != None):
if (downRight[0] == self.getOppositeColour() or not show):
standardMoves.append(self.moveDownRight())
if (self.row == 3):
for piece in self.enemyPieces:
if (piece.getPosition() == (self.row, self.col+1)):
try:
if (piece.moveTwo):
standardMoves.append(self.moveDownRight())
except:
continue
# move up two if first move
if (self.firstMove and show):
standardMoves.append(self.moveTwoDown())
else:
if (show):
standardMoves = [self.moveUp(["W", "B"])]
else:
standardMoves = []
if (self.firstMove and show):
# move up two if first move
standardMoves.append(self.moveTwoUp())
# move up left if enemy there
upLeft = self.board.getPosition(self.row+1, self.col-1)
if (upLeft != None):
if (upLeft[0] == self.getOppositeColour() or not show):
standardMoves.append(self.moveUpLeft())
if (self.row == 4):
for piece in self.enemyPieces:
if (piece.getPosition() == (self.row, self.col-1)):
try:
if (piece.moveTwo):
standardMoves.append(self.moveUpLeft())
except:
continue
# move up right if enemy there
upRight = self.board.getPosition(self.row+1, self.col+1)
if (upRight != None):
if (upRight[0] == self.getOppositeColour() or not show):
standardMoves.append(self.moveUpRight())
if (self.row == 4):
for piece in self.enemyPieces:
if (piece.getPosition() == (self.row, self.col+1)):
try:
if (piece.moveTwo):
standardMoves.append(self.moveUpRight())
except:
continue
return standardMoves
# confirm move
def confirmDragMove(self, move):
# ensuring that the piece only moves to a desired sport
currentPosition = self.getPosition()
if (self.chosenRect != None):
# check if the mouse is touching still touching the square after the mouse let go
if (self.rect.colliderect(self.chosenRect)):
if (self.takePiece(self.chosenPosition)):
move = self.increaseMove(move)
self.setPosition(self.chosenPosition)
if (self.chosenPosition[0] - 2 == currentPosition[0] or self.chosenPosition[0] + 2 == currentPosition[0]):
self.moveTwo = True
if (self.chosenPosition == (currentPosition[0]+1, currentPosition[1]+1) or self.chosenPosition == (currentPosition[0]+1, currentPosition[1]-1)):
for piece in self.enemyPieces:
if (piece.getPosition() == (self.chosenPosition[0] - 1, self.chosenPosition[1])):
piece.delete()
if (self.chosenPosition == (currentPosition[0]-1, currentPosition[1]+1) or self.chosenPosition == (currentPosition[0]-1, currentPosition[1]-1)):
for piece in self.enemyPieces:
if (piece.getPosition() == (self.chosenPosition[0] + 1, self.chosenPosition[1])):
piece.delete()
if (self.chosenPosition[0] == self.target):
piece = input("Which piece do you want to become? Queen, Rook, Knight, or Bishop: ")
while (piece.lower() not in ['queen', 'rook', 'knight', 'bishop']):
piece = input("Which piece do you want to become? Queen, Rook, Knight, or Bishop: ")
if (piece == 'queen'):
sidePieces = self.sidePieces
enemyPieces = self.enemyPieces
queen = Queen.queen(self.screen, self.board, self.queenImage, self.rect, self.row, self.col, self.colour)
queen.setSidePieces(sidePieces)
queen.setEnemyPieces(enemyPieces)
self.sidePieces.append(queen)
self.delete()
# print(queen.sidePieces)
elif (piece == 'rook'):
sidePieces = self.sidePieces
enemyPieces = self.enemyPieces
rook = Rook.rook(self.screen, self.board, self.rookImage, self.rect, self.row, self.col, self.colour)
rook.setSidePieces(self.sidePieces)
rook.setEnemyPieces(self.enemyPieces)
self.sidePieces.append(rook)
self.delete()
elif (piece == 'knight'):
sidePieces = self.sidePieces
enemyPieces = self.enemyPieces
knight = Knight.knight(self.screen, self.board, self.knightImage, self.rect, self.row, self.col, self.colour)
knight.setSidePieces(self.sidePieces)
knight.setEnemyPieces(self.enemyPieces)
self.sidePieces.append(knight)
self.delete()
elif (piece == 'bishop'):
sidePieces = self.sidePieces
enemyPieces = self.enemyPieces
bishop = Bishop.bishop(self.screen, self.board, self.bishopImage, self.rect, self.row, self.col, self.colour)
bishop.setSidePieces(self.sidePieces)
bishop.setEnemyPieces(self.enemyPieces)
self.sidePieces.append(bishop)
self.delete()
if (self.opposingKing.kingCheck()[0]):
self.opposingKing.image.set_alpha(100)
self.firstMove = False
else:
self.setPosition(currentPosition)
# resetting for later use
self.currentPositon = None
self.chosenRect = None
return [True, move]
else:
self.setPosition(currentPosition)
return [False, move]
|
[
"noreply@github.com"
] |
KaiHoshijo.noreply@github.com
|
e33d6251cc2f1288800dd9b790cedf351831595a
|
b0b903784c5fbf551a53b5616c713acfeff46d99
|
/jcc/cli.py
|
a3505bb098fa5db217f6b5ac1d08cc1a94fe49fe
|
[] |
no_license
|
0x0L/jcc
|
c4459e316673d41b1f0ff87c76c6f49453a44b1e
|
b7a210becc34be931ccc4c0a60cb24c7c2e702ec
|
refs/heads/master
| 2020-03-12T06:48:08.612294
| 2018-04-21T13:18:52
| 2018-04-21T17:05:16
| 130,493,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,608
|
py
|
import argparse
import os
import sys
from .client import JupyterContentsClient
from . import __version__
def main():
version = '%(prog)s ' + __version__
env_url = os.environ.get('JCC_URL')
env_token = os.environ.get('JCC_TOKEN')
parser = argparse.ArgumentParser(
description='Jupyter Contents Client command line interface')
parser.add_argument('VERB', choices=['GET', 'PUT'], type=str.upper,
help='specify which action to perform')
parser.add_argument('FILE', nargs='*',
help='list of files or directories to transfer')
parser.add_argument('--url', nargs='?', default=env_url,
help='defaults to the JCC_URL environment variable')
parser.add_argument('--token', nargs='?', default=env_token,
help='defaults to the JCC_TOKEN environment variable')
parser.add_argument('--prefix', nargs='?', default='',
help='set upload or download path prefix')
parser.add_argument('-s', '--silent', action='store_true',
help='do not show progress bars')
parser.add_argument('-v', '--version', action='version', version=version)
args = vars(parser.parse_args())
url, token = args['url'], args['token']
if not url or not token:
print('Both URL and TOKEN are mandatory.')
sys.exit(1)
jcc = JupyterContentsClient(url, token, show_progress=not args['silent'])
action = dict(GET=jcc.download, PUT=jcc.upload)[args['VERB']]
for f in args['FILE']:
action(f, args['prefix'])
|
[
"0x0L@github.com"
] |
0x0L@github.com
|
64504a6de6062e5f0cd3d7493539724d88ae1d5b
|
416f3effbd8618c8e654afc099bee5c09f7ad71f
|
/qt-Notepad.py
|
f968068750dca9b00b7b28308201db94d9644431
|
[] |
no_license
|
lewisde/qt-Notepad
|
c02720a1793769e9d3eb816c5b770c797745f5ab
|
690d58040e4f7e323752a2a6c41ded969df18c47
|
refs/heads/master
| 2021-01-10T08:36:46.343409
| 2015-12-05T02:30:36
| 2015-12-05T02:30:36
| 46,058,562
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,122
|
py
|
#!/usr/local/bin/python3
import os
import sys
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QAction
from PyQt5.QtWidgets import QTextEdit
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtGui import QIcon
class Example(QMainWindow):
def __init__(self):
super().__init__()
self.init_ui()
self.filenames = []
def init_ui(self):
self.setGeometry(300, 300, 300, 220)
self.setWindowTitle('Icon')
self.setWindowIcon(QIcon('web.png'))
new_action = QAction('New File', self)
new_action.setShortcut('Ctrl+N')
new_action.setStatusTip('Create new file')
new_action.triggered.connect(self.new_file)
open_action = QAction('Open...', self)
open_action.setShortcut('Ctrl+O')
open_action.setStatusTip('Open a file')
open_action.triggered.connect(self.open_file)
save_action = QAction('Save File', self)
save_action.setShortcut('Ctrl+S')
save_action.setStatusTip('Save current file')
save_action.triggered.connect(self.save_file)
new_save_action = QAction('Save File As...', self)
new_save_action.setShortcut('Shift+Ctrl+S')
new_save_action.setStatusTip('Save current file')
new_save_action.triggered.connect(self.save_file_as)
close_action = QAction('Close File', self)
close_action.setShortcut('Ctrl+Q')
close_action.setStatusTip('Close Notepad')
close_action.triggered.connect(self.close)
undo_action = QAction('Undo', self)
undo_action.setShortcut('Ctrl+Z')
copy_action = QAction('Copy', self)
copy_action.setShortcut('Ctrl+C')
cut_action = QAction('Cut', self)
cut_action.setShortcut('Ctrl+X')
paste_action = QAction('Paste', self)
paste_action.setShortcut('Ctrl+V')
minimize_action = QAction('Minimize', self)
minimize_action.setShortcut('Ctrl+M')
view_action = QAction('Show', self)
view_action.setShortcut('Ctrl+/')
menubar = self.menuBar()
file_menu = menubar.addMenu('&File')
edit_menu = menubar.addMenu('&Edit')
view_menu = menubar.addMenu('&View')
window_menu = menubar.addMenu('&Window')
file_menu.addAction(new_action)
file_menu.addAction(open_action)
file_menu.addAction(save_action)
file_menu.addAction(new_save_action)
file_menu.addAction(close_action)
edit_menu.addAction(undo_action)
edit_menu.addAction(copy_action)
edit_menu.addAction(cut_action)
edit_menu.addAction(paste_action)
view_menu.addAction(view_action)
window_menu.addAction(minimize_action)
self.text = QTextEdit(self)
self.setCentralWidget(self.text)
self.setGeometry(300, 300, 1324, 1068)
self.setWindowTitle('Notepad')
self.setStyleSheet('font-size: 14pt; font-family: Courier;')
self.show()
def new_file(self):
self.text.clear()
def save_file(self):
if len(self.filenames) > 0:
filename = self.filenames[0]
f = open(filename, 'w')
filedata = self.text.toPlainText()
f.write(filedata)
f.close()
else:
self.save_file_as()
def save_file_as(self):
filename = QFileDialog.getSaveFileName(
self, 'Save File', os.getenv('HOME'))[0]
print(filename)
if filename != ('', ''):
f = open(filename, 'w')
filedata = self.text.toPlainText()
f.write(filedata)
f.close()
def open_file(self):
filename = QFileDialog.getOpenFileName(
self, 'Open File', os.getenv('HOME'))[0]
f = open(filename, 'r')
filedata = f.read()
self.text.setText(filedata)
f.close()
self.setWindowTitle(filename)
self.filenames.append(filename)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
|
[
"lewisde@gmail.com"
] |
lewisde@gmail.com
|
20a7049cb4cc82c0339b4cd3e0b33361045cd3ed
|
6ad58af0bfe268183c8cfa603f0b470b60e8b163
|
/project/celery.py
|
9db55d4eec0a733767abb250ab1c4f430985624d
|
[
"MIT"
] |
permissive
|
zubedev/The_Doe_Agency
|
e21587c9716d08c6d9142198fa7467b9aa7f4bbf
|
2545aeae71c779166bef78941cac36551498ca76
|
refs/heads/main
| 2023-07-05T05:43:05.955514
| 2021-08-06T06:58:35
| 2021-08-06T06:58:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
import os
from celery import Celery
# Set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
app = Celery("project")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django apps.
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print(f"Request: {self.request!r}")
|
[
"ziibii88@gmail.com"
] |
ziibii88@gmail.com
|
cb8ad4652800f8297f6d6017cdcac446e4d48808
|
5c6395fadd36a42b18852f9091db1d550bef52d6
|
/core/interpreters/datasource/__init__.py
|
9301a9e4fa1ccc760a269e91cea8144925457d0c
|
[] |
no_license
|
alpha-i/service-ads-wind-energy
|
ade2d07bd47a653085f75647bda9916c73853d43
|
5ba611cb351c00392aad464fda12448b7bf5383f
|
refs/heads/master
| 2020-04-20T21:17:29.390204
| 2019-02-04T16:03:34
| 2019-02-04T16:03:34
| 169,104,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
from core.interpreters.datasource.hd5 import FlightDatasourceInterpreter
|
[
"giajj@hotmail.com"
] |
giajj@hotmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.