blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6af8977c2b1e73ffa5af4c350cdb9502a5c999c4
|
98cf41377e195a721b149a025ef91320c0ef7b88
|
/snippets/migrations/0001_initial.py
|
6486bd40517221ced0d93e030d7a4da4c648f093
|
[] |
no_license
|
Rokin1/api_tutorial
|
50d61ae8cb965e11b52c07a8d53e3c1b3a9c065d
|
d5e231d763c1a1558b67fb142a9faf117aab436e
|
refs/heads/master
| 2023-06-13T05:36:40.006379
| 2021-07-01T12:02:56
| 2021-07-01T12:02:56
| 382,018,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
# Generated by Django 3.2.4 on 2021-06-28 22:05
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Snippet',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(blank=True, default='', max_length=100)),
('code', models.TextField()),
('linenos', models.BooleanField(default=False)),
('language', models.CharField(choices=[('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'), ('6', '6')], default='python', max_length=100)),
],
options={
'ordering': ['created'],
},
),
]
|
[
"casselled@yahoo.com"
] |
casselled@yahoo.com
|
cca5c51733d6ca993705e645a989644358f8fd2a
|
6a3b128e875d9651d82580693f95d3c5931556a9
|
/CNN_models/train3.py
|
ab813b2dc957214db4b91d52dbb8bccae2e6eacc
|
[] |
no_license
|
akshayg056/Empass_Emotion_Video
|
5469102357f755969b0602ac01056918a44baffb
|
bfdbef70d6d106ab763d9888385f2fdf9ed05353
|
refs/heads/master
| 2020-05-04T23:53:01.726081
| 2019-05-14T19:27:40
| 2019-05-14T19:27:40
| 179,558,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,929
|
py
|
import numpy as np
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D,Convolution2D,ZeroPadding2D,BatchNormalization
from keras import backend as K
from keras.preprocessing import image
from keras.applications.mobilenet import MobileNet
from keras.applications.vgg16 import preprocess_input, decode_predictions
from keras.models import Model
import timeit
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import tensorflow as tf
import math
import sys
import time
import datetime
import os
batch_size = 128
num_classes = 7
epochs = 100
class NISTHelper():
def __init__(self, train_img, train_label, test_img, test_label):
self.i = 0
self.test_i = 0
self.training_images = train_img
self.training_labels = train_label
self.test_images = test_img
self.test_labels = test_label
def next_batch(self, batch_size):
x = self.training_images[self.i:self.i + batch_size]
y = self.training_labels[self.i:self.i + batch_size]
self.i = (self.i + batch_size) % len(self.training_images)
return x, y
def test_batch(self, batch_size):
x = self.test_images[self.test_i:self.test_i + batch_size]
y = self.test_labels[self.test_i:self.test_i + batch_size]
self.test_i = (self.test_i + batch_size) % len(self.test_images)
return x, y
def unison_shuffled_copies(a, b):
"""Returns 2 unison shuffled copies of array a and b"""
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def log(logstr):
"""Prints logstr to console with current time"""
print(datetime.datetime.now().isoformat() + " " + logstr)
log("Loading data...")
images = np.load("nist_images_32x32.npy")
labels = np.load("nist_labels_32x32.npy")
log("Data loaded... Shuffling...")
images, labels = unison_shuffled_copies(images, labels)
log("Shuffled!")
split = math.ceil(len(images) * 0.8)
train_imgs = images[:split]
train_labels = labels[:split]
test_imgs = images[split:]
test_labels = labels[split:]
log("Performed train-test split")
print(type(test_imgs))
nist = NISTHelper(train_imgs, train_labels, test_imgs, test_labels)
x = tf.placeholder(tf.float32, shape=[None, 32, 32, 1], name="x") # Input, shape = ?x32x32x1
y_true = tf.placeholder(tf.float32, shape=[None, 47], name="y_true") # Label
print()
print(train_imgs.shape)
model = Sequential()
model.add(Conv2D(32, (5, 5), padding='same', activation='relu', input_shape=(32,32,1)))
model.add(Conv2D(32, (5, 5), padding='same', activation='relu'))
model.add(Conv2D(32, (5, 5), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(7, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD(),
metrics=['accuracy'])
model.fit(train_imgs,train_labels,
batch_size=batch_size,
epochs=250,
verbose=1,
validation_data=(test_imgs,test_labels))
model.summary()
##incorrects = np.nonzero(model.predict(x).reshape((-1,)) != y_true)
##print(incorrects)
model.save("model.h5")
|
[
"noreply@github.com"
] |
akshayg056.noreply@github.com
|
5a1b36e45a7a634f8372895574c63ff61a13cc53
|
8a10ac7e5719e91f43dbfb4cc9384b82aae1b891
|
/honcho/tasks/weather.py
|
3886cab165a7024c549bb0af51faab3b01a3f7e9
|
[] |
no_license
|
wallinb/amigos3
|
4984abb65d1373345dc14117c693a1f0e500b065
|
e14a94f7ba23657e519ba858fa85b1538e638505
|
refs/heads/master
| 2021-10-21T09:13:47.258538
| 2021-10-15T22:54:52
| 2021-10-15T22:54:52
| 191,256,566
| 0
| 2
| null | 2021-07-27T02:21:59
| 2019-06-10T22:53:57
|
Python
|
UTF-8
|
Python
| false
| false
| 4,729
|
py
|
import re
from collections import namedtuple
from contextlib import closing
from datetime import datetime
from logging import getLogger
from time import time
from serial import Serial
import honcho.core.data as data
from honcho.config import (DATA_TAGS, GPIO, TIMESTAMP_FMT, WXT_BAUD, WXT_PORT,
WXT_SAMPLES, WXT_TIMEOUT)
from honcho.core.data import log_serialized, serialize
from honcho.core.gpio import powered
from honcho.tasks.common import task
from honcho.tasks.sbd import queue_sbd
from honcho.util import average_datetimes
logger = getLogger(__name__)
_DATA_KEYS = (
"timestamp",
"wind_direction",
"wind_speed",
"temperature",
"humidity",
"pressure",
"rain_accumulation",
"rain_duration",
"rain_intensity",
"rain_peak_intensity",
"hail_accumulation",
"hail_duration",
"hail_intensity",
"hail_peak_intensity",
"heater_temperature",
"heater_voltage",
"supply_voltage",
)
DATA_KEYS = namedtuple("DATA_KEYS", (el.upper() for el in _DATA_KEYS))(*_DATA_KEYS)
CONVERSION_TO_VALUE = {
DATA_KEYS.WIND_DIRECTION: float,
DATA_KEYS.WIND_SPEED: float,
DATA_KEYS.TEMPERATURE: float,
DATA_KEYS.HUMIDITY: float,
DATA_KEYS.PRESSURE: float,
DATA_KEYS.RAIN_ACCUMULATION: float,
DATA_KEYS.RAIN_DURATION: float,
DATA_KEYS.RAIN_INTENSITY: float,
DATA_KEYS.RAIN_PEAK_INTENSITY: float,
DATA_KEYS.HAIL_ACCUMULATION: float,
DATA_KEYS.HAIL_DURATION: float,
DATA_KEYS.HAIL_INTENSITY: float,
DATA_KEYS.HAIL_PEAK_INTENSITY: float,
DATA_KEYS.HEATER_TEMPERATURE: float,
DATA_KEYS.HEATER_VOLTAGE: float,
DATA_KEYS.SUPPLY_VOLTAGE: float,
}
CONVERSION_TO_STRING = {
DATA_KEYS.TIMESTAMP: "{0:" + TIMESTAMP_FMT + "}",
DATA_KEYS.WIND_DIRECTION: "{0:.4f}",
DATA_KEYS.WIND_SPEED: "{0:.4f}",
DATA_KEYS.TEMPERATURE: "{0:.4f}",
DATA_KEYS.HUMIDITY: "{0:.4f}",
DATA_KEYS.PRESSURE: "{0:.4f}",
DATA_KEYS.RAIN_ACCUMULATION: None,
DATA_KEYS.RAIN_DURATION: None,
DATA_KEYS.RAIN_INTENSITY: None,
DATA_KEYS.RAIN_PEAK_INTENSITY: None,
DATA_KEYS.HAIL_ACCUMULATION: None,
DATA_KEYS.HAIL_DURATION: None,
DATA_KEYS.HAIL_INTENSITY: None,
DATA_KEYS.HAIL_PEAK_INTENSITY: None,
DATA_KEYS.HEATER_TEMPERATURE: "{0:.4f}",
DATA_KEYS.HEATER_VOLTAGE: "{0:.4f}",
DATA_KEYS.SUPPLY_VOLTAGE: "{0:.4f}",
}
WeatherSample = namedtuple("WeatherSample", DATA_KEYS)
LINE_PATTERN = (
r"0R0,"
r"Dm=(?P<wind_direction>[\d\.]+).,"
r"Sm=(?P<wind_speed>[\d\.]+).,"
r"Ta=(?P<temperature>[\+\-\d\.]+).,"
r"Ua=(?P<humidity>[\d\.]+).,"
r"Pa=(?P<pressure>[\d\.]+).,"
r"Rc=(?P<rain_accumulation>[\d\.]+).,"
r"Rd=(?P<rain_duration>[\d\.]+).,"
r"Ri=(?P<rain_intensity>[\d\.]+).,"
r"Hc=(?P<hail_accumulation>[\d\.]+).,"
r"Hd=(?P<hail_duration>[\d\.]+).,"
r"Hi=(?P<hail_intensity>[\d\.]+).,"
r"Rp=(?P<rain_peak_intensity>[\d\.]+).,"
r"Hp=(?P<hail_peak_intensity>[\d\.]+).,"
r"Th=(?P<heater_temperature>[\+\-\d\.]+).,"
r"Vh=(?P<heater_voltage>[\+\-\d\.]+).,"
r"Vs=(?P<supply_voltage>[\+\-\d\.]+)."
)
def parse_sample(s):
row = re.search(LINE_PATTERN, s).groupdict()
sample = WeatherSample(
timestamp=datetime.now(),
**dict((key, CONVERSION_TO_VALUE[key](value)) for key, value in row.items())
)
return sample
def get_samples(n=12):
logger.debug("Getting {0} samples".format(n))
samples = []
start_time = time()
with powered([GPIO.WXT]):
with closing(Serial(WXT_PORT, WXT_BAUD, timeout=60)) as serial:
while len(samples) < n and time() - start_time < WXT_TIMEOUT:
line = serial.readline()
logger.debug("Read line from vaisala: {0}".format(line))
if re.search(LINE_PATTERN, line):
samples.append(parse_sample(line))
logger.debug("{0} of {1} samples collected".format(len(samples), n))
return samples
def average_samples(samples):
logger.debug("Averaging {0} samples".format(len(samples)))
n = len(samples)
timestamp = average_datetimes([sample.timestamp for sample in samples])
averaged = WeatherSample(
timestamp=timestamp,
**dict(
(key, sum(getattr(sample, key) for sample in samples) / float(n))
for key in DATA_KEYS[1:]
)
)
return averaged
def print_samples(samples):
data.print_samples(samples, CONVERSION_TO_STRING)
@task
def execute():
samples = get_samples(n=WXT_SAMPLES)
average = average_samples(samples)
serialized = serialize(average, CONVERSION_TO_STRING)
log_serialized(serialized, DATA_TAGS.WXT)
queue_sbd(serialized, DATA_TAGS.WXT)
|
[
"bruce.wallin@nsidc.org"
] |
bruce.wallin@nsidc.org
|
a8ee75b513d0a8bea3478ab3b83b92f1085ef5a6
|
85462df28c512100858b15b98190a5e8712a41d2
|
/env/bin/easy_install
|
573be730858fc3781720e110c85349b6da44e724
|
[
"BSD-2-Clause"
] |
permissive
|
adboy316/bynder_unsplash_task
|
e3ff2dcfcc290cce9bbd42145f945a7e1d0830ad
|
08fdb923c823c110e953add3cb1a2bc56926f5ad
|
refs/heads/master
| 2020-07-04T18:45:17.958237
| 2019-08-14T15:38:03
| 2019-08-14T15:38:03
| 202,379,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
#!/Users/arieldelgado/Desktop/unsplash_bynder/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"adboy316@yahoo.com"
] |
adboy316@yahoo.com
|
|
546e940aa043ed0cd00aac18668d4579cc5fa46b
|
6cb56dda8534844791c0efd81f14f193cafa5c29
|
/apps/employers/migrations/0011_listing_charge_id.py
|
99a90fefaeddcf78dadd6839c14136b6c961187a
|
[] |
no_license
|
KristoD/JobScout
|
7ba97b073d64053948182c8a61a82c530d52d1a0
|
817b255a642e86ff93990213fdecef584c0dda41
|
refs/heads/master
| 2021-01-24T16:26:13.555450
| 2018-08-13T10:58:00
| 2018-08-13T10:58:00
| 123,190,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-03-01 01:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('employers', '0010_auto_20180228_0624'),
]
operations = [
migrations.AddField(
model_name='listing',
name='charge_id',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
]
|
[
"chrisdedeian@gmail.com"
] |
chrisdedeian@gmail.com
|
a15643e59225b148d20c4da1d1959c67b1701020
|
f4658fa85ff0d90fcda3297f3cc54d1bfb5c1fc2
|
/pattoo/post.py
|
f033ffe32bfb5e64ea1d507e243eeb955c4aa278
|
[
"Apache-2.0"
] |
permissive
|
palisadoes/pattoo-os
|
a3a00744f14d0b86a507819fb493d69d8daeb26b
|
cccf0ddb50a8bb971c0c527b4ea5ef96c6819fac
|
refs/heads/master
| 2022-02-24T15:02:25.962388
| 2019-10-07T04:17:24
| 2019-10-07T04:17:24
| 213,039,355
| 0
| 0
|
Apache-2.0
| 2019-10-05T17:12:07
| 2019-10-05T17:12:07
| null |
UTF-8
|
Python
| false
| false
| 5,500
|
py
|
#!/usr/bin/env python3
"""Pattoo Agent class.
Description:
This script:
1) Processes a variety of information from agents
2) Posts the data using HTTP to a server listed
in the configuration file
"""
# Standard libraries
import os
import socket
import json
from collections import defaultdict
# pip3 libraries
import requests
# Pattoo libraries
from pattoo import log
from pattoo import general
from pattoo import data as pattoo_data
from pattoo import configuration
class Data(object):
"""Pattoo agent that gathers data."""
def __init__(self, _data):
"""Initialize the class.
Args:
_data: ConfigAgent configuration object
agent_name: Name of agent
Returns:
None
"""
# Initialize key variables
self._data = _data
# Get the agent_name
if 'agent_program' in self._data:
self._agent_name = self._data['agent_program']
else:
self._agent_name = ''
# Get the agent ID
config = configuration.ConfigAgent(self._agent_name)
agent_id = pattoo_data.get_agent_id(config)
# Construct URL for server
if config.api_server_https() is True:
prefix = 'https://'
else:
prefix = 'http://'
self._url = (
'{}{}:{}/{}/receive/{}'.format(
prefix, config.api_server_name(),
config.api_server_port(), config.api_server_uri(), agent_id))
# Create the cache directory
self._cache_dir = config.agent_cache_directory()
if os.path.exists(self._cache_dir) is False:
os.mkdir(self._cache_dir)
# All cache files created by this agent will end with this suffix.
devicehash = general.hashstring(self._data['agent_hostname'], sha=1)
self._cache_filename_suffix = '{}_{}.json'.format(agent_id, devicehash)
def post(self, save=True, data=None):
"""Post data to central server.
Args:
save: When True, save data to cache directory if postinf fails
data: Data to post. If None, then uses self._data (For testing)
Returns:
success: True: if successful
"""
# Initialize key variables
success = False
response = False
timestamp = self._data['timestamp']
# Create data to post
if data is None:
data = self._data
# Post data save to cache if this fails
try:
result = requests.post(self._url, json=data)
response = True
except:
if save is True:
# Create a unique very long filename to reduce risk of
filename = '{}/{}_{}'.format(
self._cache_dir, timestamp, self._cache_filename_suffix)
# Save data
with open(filename, 'w') as f_handle:
json.dump(data, f_handle)
# Define success
if response is True:
if result.status_code == 200:
success = True
# Log message
if success is True:
log_message = (
'Agent "{}" successfully contacted server {}'
''.format(self._agent_name, self._url))
log.log2info(1027, log_message)
else:
log_message = (
'Agent "{}" failed to contact server {}'
''.format(self._agent_name, self._url))
log.log2warning(1028, log_message)
# Return
return success
def purge(self):
"""Purge data from cache by posting to central server.
Args:
None
Returns:
success: "True: if successful
"""
# Initialize key variables
agent_id = self._data['agent_id']
# Add files in cache directory to list only if they match the
# cache suffix
all_filenames = [filename for filename in os.listdir(
self._cache_dir) if os.path.isfile(
os.path.join(self._cache_dir, filename))]
filenames = [
filename for filename in all_filenames if filename.endswith(
self._cache_filename_suffix)]
# Read cache file
for filename in filenames:
# Only post files for our own UID value
if agent_id not in filename:
continue
# Get the full filepath for the cache file and post
filepath = os.path.join(self._cache_dir, filename)
with open(filepath, 'r') as f_handle:
try:
data = json.load(f_handle)
except:
# Log removal
log_message = (
'Error reading previously cached agent data file {} '
'for agent {}. May be corrupted.'
''.format(filepath, self._agent_name))
log.log2die(1064, log_message)
# Post file
success = self.post(save=False, data=data)
# Delete file if successful
if success is True:
os.remove(filepath)
# Log removal
log_message = (
'Purging cache file {} after successfully '
'contacting server {}'
''.format(filepath, self._url))
log.log2info(1029, log_message)
|
[
"blackriver@simiya.com"
] |
blackriver@simiya.com
|
d7e179f6b91e6c3f2e74f659376f96638fbc3564
|
574eadde8091c12fb0bb0189d77cb23102321a88
|
/Chapter 3/abcdCallStack.py
|
d3bae82b5c77b9d39d436140e9edbeaf134311f0
|
[] |
no_license
|
kmanago/AutomateTheBoringStuff
|
ae7c3fe14bbd95ef03693566b5a7bdb558e1e6f9
|
94b71d4778f0e246f63762ceed0b7389ca0b40cb
|
refs/heads/master
| 2020-06-05T08:45:36.815486
| 2020-05-19T19:00:11
| 2020-05-19T19:00:11
| 192,381,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
def a():
print('a() starts')
b()
d()
print('a() returns')
def b():
print('b() starts')
c()
print('b() returns')
def c():
print('c() starts')
print('c() returns')
def d():
print('d() starts')
print('d() returns')
a()
|
[
"martaisa.manago@alithya.com"
] |
martaisa.manago@alithya.com
|
e0f904c27fe02a117392aafbc7b49302eb66c8b4
|
2ee57961747f70474cd67a275b8440af2370e11f
|
/16.py
|
caf30ff3fc6e658f1740f1f099c770ab716ec65a
|
[] |
no_license
|
usha-developer/seleniumtestingpractice
|
5b1fd0eb14469895be0ce1a2159c01429adf6b57
|
1d06c365370a757ec8f56da7b164b2c022390a74
|
refs/heads/main
| 2023-06-22T18:08:49.820354
| 2021-07-27T10:51:17
| 2021-07-27T10:51:17
| 389,946,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,010
|
py
|
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
#chromeOptions=Options()
#chromeOptions.add_experimental_option("prefs",{"download.default_directory": "C:\Downloadedfiles"})
driver=webdriver.Chrome(executable_path="C:\Drivers\chromedriver.exe")
#driver=webdriver.Firefox(executable_path="C:\Drivers\geckodriver.exe")
driver.maximize_window()
driver.get("http://demo.automationtesting.in/FileDownload.html")
#driver.find_element_by_xpath("//*[@id='textbox']").send_keys("welcome to selenium tutorials")
#driver.find_element_by_xpath("//*[@id='createTxt']").click()
#driver.find_element_by_xpath("//*[@id='link-to-download']").click()
driver.find_element_by_xpath("//*[@id='pdfbox']").send_keys("welcome to selenium tutorials")
driver.find_element_by_xpath("//*[@id='createPdf']").click()
driver.find_element_by_xpath("//*[@id='pdf-link-to-download']").click()
|
[
"noreply@github.com"
] |
usha-developer.noreply@github.com
|
877758f85ad31b3384a6c75d08e3a516dc9c78e0
|
24c19c0317e1767f0b1ccb9e6c6e3e229624d877
|
/test/webdnn_test/graph_test/operators_test/concat_test.py
|
0d309686bccc9134aa912e0951fdb7bd27feb30d
|
[
"Zlib",
"MIT"
] |
permissive
|
dolaameng/webdnn
|
6742fc556957845ea0faedd599359da251873f1e
|
c56154709a95706a68343555f926222e1fc0516a
|
refs/heads/master
| 2021-01-24T07:32:32.452789
| 2017-06-04T03:11:07
| 2017-06-04T03:11:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,633
|
py
|
import itertools
from typing import Type
import numpy as np
from nose.tools import raises
from webdnn.graph.axis import Axis
from webdnn.graph.operators.concat import Concat
from webdnn.graph.order import Order, OrderC, OrderNC, OrderCN, OrderNHWC, OrderHWNC, OrderHWCN, OrderCNHW, \
OrderCHWN, OrderNCHW
from webdnn.graph.variable import Variable
def main(order1: Type[Order], order2: Type[Order], concat_axis: Axis):
default_order = {
1: OrderC,
2: OrderNC,
4: OrderNHWC
}
op = Concat(None, axis=concat_axis)
x1 = Variable(np.arange(order1.ndim) + 1, default_order[order1.ndim])
x2 = Variable(np.arange(order2.ndim) + 1, default_order[order2.ndim])
x1.change_order(order1)
x2.change_order(order2)
y, = op(x1, x2)
for axis in y.order.axes:
if axis == concat_axis:
assert y.shape_dict[axis] == x1.shape_dict[axis] + x2.shape_dict[axis]
else:
assert y.shape_dict[axis] == x1.shape_dict[axis]
# FIXME 各orderをテストにわけられないか
def test_every_order():
orders = [OrderC, OrderNC, OrderCN, OrderNHWC, OrderHWNC, OrderHWCN, OrderNCHW, OrderCNHW, OrderCHWN]
axes = [Axis.N, Axis.H, Axis.W, Axis.C]
for order1, order2, axis in itertools.product(orders, orders, axes):
if set(order1.axes) != set(order2.axes) or axis not in order1.axes:
continue
main(order1, order2, axis)
@raises(AssertionError)
def test_invalid_size():
op = Concat(None, axis=Axis.C)
v1 = Variable((2, 3, 4, 5), OrderNHWC)
v2 = Variable((2, 3, 7, 6), OrderNHWC)
v3, = op(v1, v2)
|
[
"y.kikura@gmail.com"
] |
y.kikura@gmail.com
|
c206acf5923a912bf8e43b30901c1781e630b082
|
f1e2e511dcd1b23f5633bd56d7d52b61f0ba8f9b
|
/Python/Lecture9/9-0.py
|
65f6029203b3a16558947cb56db32b6e52b7ff31
|
[] |
no_license
|
hamin7/ITE3035_Python
|
6a9bd7854fcaba2e5381f07dc2a76d4d80abf0d1
|
49d582b6772456bfa03f10500299063de26e8d98
|
refs/heads/master
| 2021-06-25T10:05:46.827177
| 2020-12-28T13:36:15
| 2020-12-28T13:36:15
| 188,181,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,070
|
py
|
import random
import time
chance = random.randint(1,2)
print('''
----------인생역전프로그램----------
축하합니다. 당신은 인생역전 프로그램에서 최후의 1인이 되어 마지막 선택을 해야합니다.
당신 앞에는 천사카드와 악마카드가 뒤집어져 있습니다.
천사카드를 고르면 상금 10억원을 타게 될 것이고
악마카드를 고르면 모든것을 잃고 빈손으로 돌아가게 됩니다.
''')
ans = 'yes'
while ans != 'no':
choice = int(input('어떤 카드를 고르실까요? 1,2'))
if choice == chance:
time.sleep(2)
print('''
----------
| 천사 |
----------
''')
print('천사카드. 축하합니다. 100억을 받게 되셨습니다.')
ans = input('다시 선택하시겠습니까? yes or no')
else:
time.sleep(2)
print('''
----------
| 악마 |
----------
''')
print('악마카드. 아쉽다...')
ans = input('다시 선택하시겠습니까? yes or no')
|
[
"ggamini7@gmail.com"
] |
ggamini7@gmail.com
|
69e45ca1b5115c546bb8ab6ec2b5ec62ba9dd980
|
aabdb61eeea7b93076017f264b214161231ff3a5
|
/polls/models.py
|
a8a4c8474c0ce0d0a24477bba8a6755a26c14dfa
|
[] |
no_license
|
scottbenton/django-getting-started
|
644c07b33bf9dc3304a0c55b535d5ade4ac67273
|
d25a34e0edd2d54d505f2f877197d3cb3bbe1ace
|
refs/heads/master
| 2023-03-24T07:14:55.211998
| 2021-03-21T03:27:46
| 2021-03-21T03:27:46
| 349,597,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
import datetime
from django.db import models
from django.utils import timezone
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def was_published_recently(self):
return self.pub_date >= (timezone.now() - datetime.timedelta(days=1))
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
|
[
"scott@scottbenton.dev"
] |
scott@scottbenton.dev
|
57ddc7252c0f15fd814ffcf0c64541feb898d1de
|
a46116d41839b4aa9d0eac6154f67fd9a50142e9
|
/387-First_Unique_Character_in_a_String.py
|
0b20136094c8d8762aa94f96fc45c3ff49d853d1
|
[] |
no_license
|
weichuntsai0217/leetcode
|
85a600385d0d9a5d6d35788b0fccfd325b3e9157
|
f2ea7165d40b392b8f8892ba7152438001900a4d
|
refs/heads/master
| 2020-04-17T23:53:14.151694
| 2016-10-19T09:04:10
| 2016-10-19T09:04:10
| 66,351,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
class Solution(object):
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
freq = {}
for i, c in enumerate(s):
if c not in freq:
freq[c] = [i, 1]
else:
freq[c][1] += 1
first = len(s)
for c in freq:
if freq[c][1] == 1 and freq[c][0] < first: first = freq[c][0]
return first if first != len(s) else -1
|
[
"jimmy_tsai@trend.com.tw"
] |
jimmy_tsai@trend.com.tw
|
17709bfd1c3d21385da4ef5150b7684988173639
|
38dd9951c21efc2a60a60a076f5e9a2c4a5cd423
|
/fantasm/state.py
|
8660dd4abfaa9e7d88895ad52cf82016b6a63998
|
[] |
no_license
|
awolf/Foojal
|
9a6886a3a2294e6cf566b67b37d71ee398f442e0
|
0227b9621c7b0d5103ee483cebd2f9e5275dbd0f
|
refs/heads/master
| 2020-05-31T13:55:45.358657
| 2011-10-15T16:04:38
| 2011-10-15T16:04:38
| 2,582,406
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,069
|
py
|
""" Fantasm: A taskqueue-based Finite State Machine for App Engine Python
Docs and examples: http://code.google.com/p/fantasm/
Copyright 2010 VendAsta Technologies Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from google.appengine.api.taskqueue.taskqueue import Task, TaskAlreadyExistsError, TombstonedTaskError
from fantasm import constants
from fantasm.transition import Transition
from fantasm.exceptions import UnknownEventError, InvalidEventNameRuntimeError
from fantasm.utils import knuthHash
from fantasm.lock import RunOnceSemaphore
class State(object):
""" A state object for a machine. """
def __init__(self, name, entryAction, doAction, exitAction, machineName=None,
isFinalState=False, isInitialState=False, isContinuation=False, fanInPeriod=constants.NO_FAN_IN):
"""
@param name: the name of the State instance
@param entryAction: an FSMAction instance
@param doAction: an FSMAction instance
@param exitAction: an FSMAction instance
@param machineName: the name of the machine this State is associated with
@param isFinalState: a boolean indicating this is a terminal state
@param isInitialState: a boolean indicating this is a starting state
@param isContinuation: a boolean indicating this is a continuation State
@param fanInPeriod: integer (seconds) representing how long these states should collect before dispatching
"""
assert not (exitAction and isContinuation) # TODO: revisit this with jcollins, we want to get it right
assert not (exitAction and fanInPeriod > constants.NO_FAN_IN) # TODO: revisit this with jcollins
self.name = name
self.entryAction = entryAction
self.doAction = doAction
self.exitAction = exitAction
self.machineName = machineName # is this really necessary? it is only for logging.
self.isInitialState = isInitialState
self.isFinalState = isFinalState
self.isContinuation = isContinuation
self.isFanIn = fanInPeriod != constants.NO_FAN_IN
self.fanInPeriod = fanInPeriod
self._eventToTransition = {}
def addTransition(self, transition, event):
""" Adds a transition for an event.
@param transition: a Transition instance
@param event: a string event that results in the associated Transition to execute
"""
assert isinstance(transition, Transition)
assert isinstance(event, basestring)
assert not (self.exitAction and transition.target.isContinuation) # TODO: revisit this with jcollins
assert not (self.exitAction and transition.target.isFanIn) # TODO: revisit
self._eventToTransition[event] = transition
def getTransition(self, event):
""" Gets the Transition for a given event.
@param event: a string event
@return: a Transition instance associated with the event
@raise an UnknownEventError if event is unknown (i.e., no transition is bound to it).
"""
try:
return self._eventToTransition[event]
except KeyError:
import logging
logging.critical('Cannot find transition for event "%s". (Machine %s, State %s)',
event, self.machineName, self.name)
raise UnknownEventError(event, self.machineName, self.name)
def dispatch(self, context, event, obj):
""" Fires the transition and executes the next States's entry, do and exit actions.
@param context: an FSMContext instance
@param event: a string event to dispatch to the State
@param obj: an object that the Transition can operate on
@return: the event returned from the next state's main action.
"""
transition = self.getTransition(event)
if context.currentState.exitAction:
try:
context.currentAction = context.currentState.exitAction
context.currentState.exitAction.execute(context, obj)
except Exception:
context.logger.error('Error processing entry action for state. (Machine %s, State %s, exitAction %s)',
context.machineName,
context.currentState.name,
context.currentState.exitAction.__class__)
raise
# join the contexts of a fan-in
contextOrContexts = context
if transition.target.isFanIn:
taskNameBase = context.getTaskName(event, fanIn=True)
contextOrContexts = context.mergeJoinDispatch(event, obj)
if not contextOrContexts:
context.logger.info('Fan-in resulted in 0 contexts. Terminating machine. (Machine %s, State %s)',
context.machineName,
context.currentState.name)
obj[constants.TERMINATED_PARAM] = True
transition.execute(context, obj)
if context.currentState.entryAction:
try:
context.currentAction = context.currentState.entryAction
context.currentState.entryAction.execute(contextOrContexts, obj)
except Exception:
context.logger.error('Error processing entry action for state. (Machine %s, State %s, entryAction %s)',
context.machineName,
context.currentState.name,
context.currentState.entryAction.__class__)
raise
if context.currentState.isContinuation:
try:
token = context.get(constants.CONTINUATION_PARAM, None)
nextToken = context.currentState.doAction.continuation(contextOrContexts, obj, token=token)
if nextToken:
context.continuation(nextToken)
context.pop(constants.CONTINUATION_PARAM, None) # pop this off because it is really long
except Exception:
context.logger.error('Error processing continuation for state. (Machine %s, State %s, continuation %s)',
context.machineName,
context.currentState.name,
context.currentState.doAction.__class__)
raise
# either a fan-in resulted in no contexts, or a continuation was completed
if obj.get(constants.TERMINATED_PARAM):
return None
nextEvent = None
if context.currentState.doAction:
try:
context.currentAction = context.currentState.doAction
nextEvent = context.currentState.doAction.execute(contextOrContexts, obj)
except Exception:
context.logger.error('Error processing action for state. (Machine %s, State %s, Action %s)',
context.machineName,
context.currentState.name,
context.currentState.doAction.__class__)
raise
if transition.target.isFanIn:
# this prevents fan-in from re-counting the data if there is an Exception
# or DeadlineExceeded _after_ doAction.execute(...) succeeds
index = context.get(constants.INDEX_PARAM)
workIndex = '%s-%d' % (taskNameBase, knuthHash(index))
semaphore = RunOnceSemaphore(workIndex, context)
semaphore.writeRunOnceSemaphore(payload=obj.get(constants.TASK_NAME_PARAM))
try:
# at this point we have processed the work items, delete them
task = Task(name=obj.get(constants.TASK_NAME_PARAM, '') + '-cleanup',
url=constants.DEFAULT_CLEANUP_URL,
params={'workIndex': workIndex})
context.Queue(name=constants.DEFAULT_CLEANUP_QUEUE_NAME).add(task)
except (TaskAlreadyExistsError, TombstonedTaskError):
context.logger.info("Fan-in cleanup Task already exists.")
if nextEvent:
if not isinstance(nextEvent, str) or not constants.NAME_RE.match(nextEvent):
raise InvalidEventNameRuntimeError(nextEvent, context.machineName, context.currentState.name,
context.instanceName)
return nextEvent
|
[
"adam.wolf@bluecognition.com"
] |
adam.wolf@bluecognition.com
|
43a9a9c0d9b65b4a0aad5d154adebfbe78b142f0
|
9fd9a5cf4c945af1baffb4f42521a962890ce165
|
/dele.py
|
a94090117c4904a2f668417f96b1e98b1a92af44
|
[] |
no_license
|
UjjwalDash/Quiz_app
|
332053175265eb5677a15969e0c9e945ac0bc91c
|
9a27527c17d4227e0744f5b1f09adb6186066b1f
|
refs/heads/main
| 2023-06-24T04:43:08.730752
| 2021-07-24T06:26:12
| 2021-07-24T06:26:12
| 389,015,053
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,984
|
py
|
from tkinter import *
import sqlite3
import tkinter.messagebox
mem1=Tk()
mem1.title('Quiz')
mem1.maxsize(1000,300)
mem1.configure(bg='Powder Blue')
member=Frame(mem1,bg='Powder Blue',bd=5)
member.pack(side=TOP)
btnfra=Frame(mem1,bg='Powder Blue',bd=5)
btnfra.pack(side=BOTTOM)
mno=StringVar()
def reset():
mno.set('')
def exi():
mem1.destroy()
def delete():
a=mno.get()
crr=sqlite3.connect('QUIZ.db')
c=crr.cursor()
c.execute('select* from quize')
l=c.fetchall()
d={}
for row in l:
d[row[0]]=row[1],row[2],row[3],row[4],row[5]
try:
b=d[a]
A=b[0]
B=b[1]
C=b[2]
D=b[3]
Ans=b[4]
try:
cnx=sqlite3.connect('QUIZ.db')
Cursor=cnx.cursor()
Qry=("DELETE FROM quize WHERE question='%s'"%(a))
Cursor.execute(Qry)
cnx.commit()
Cursor.close()
tkinter.messagebox.showinfo('program',"Question Deleted Successfully........")
reset()
except:
tkinter.messagebox.showinfo('program',"Question does not exist........")
except:
tkinter.messagebox.showinfo("program",'No such No such Question found')
question=Label(member,font=('arial 18 bold'),bg='Powder Blue',text='Question:')
question.grid(row=0,column=0)
questiontxt=Entry(member,font=('arial 16 bold'),textvar=mno,width=50,justify=LEFT,bd=8)
questiontxt.grid(row=0,column=1)
btn=Button(btnfra,text='Delete',font=('arial 16 bold'),command=delete,bd=8,pady=5,
bg='Orange',width=10)
btn.grid(row=4,column=0,sticky='w')
btn1=Button(btnfra,text='Reset',font=('arial 16 bold'),command=reset,bd=8,
pady=5,bg='Yellow',width=10)
btn1.grid(row=4,column=1,sticky='w')
btn2=Button(btnfra,text='Exit',font=('arial 16 bold'),command=exi,bd=8,
pady=5,bg='Red',width=10)
btn2.grid(row=4,column=2,sticky='w')
mem1.mainloop()
|
[
"noreply@github.com"
] |
UjjwalDash.noreply@github.com
|
6b20acbcf552ec7d07c9b06d8609f99fa16c7ac1
|
343bdaddfc66c6316e2cee490e9cedf150e3a5b7
|
/0001_0100/0045/0045.py
|
20c9548c49061dd274fa02cae02fc43c06c24438
|
[] |
no_license
|
dm-alexi/acmp
|
af7f6b4484b78f5922f3b464406a0ba5dea0d738
|
3fa0016d132adfeab7937b3e8c9687a34642c93a
|
refs/heads/master
| 2021-07-09T15:14:25.857086
| 2020-10-20T19:08:54
| 2020-10-20T19:08:54
| 201,908,038
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
with open("input.txt", "r") as f, open("output.txt", "w") as q:
n = int(f.read())
s = "1" if n else "10"
if n > 1:
a = [0] * 10
for i in range(9, 1, -1):
while n % i == 0:
n //= i
a[i] += 1
s = "".join((str(i) * a[i]) for i in range(2, 10)) if n == 1 else "-1"
q.write(s)
|
[
"dm2.alexi@gmail.com"
] |
dm2.alexi@gmail.com
|
743a6e1ca84db32348dd91efedde31759a3ada4a
|
0e4860fecfdd34a3255003cc8c8df086c14083dd
|
/python/source_code/AutomatePython/13-pdf-word/addContentToDocx.py
|
e226f5539fac401c7112e21e3072b634221c2153
|
[] |
no_license
|
anzhihe/learning
|
503ab9a58f280227011da5eaa4b14b46c678e6f3
|
66f7f801e1395207778484e1543ea26309d4b354
|
refs/heads/master
| 2023-08-08T11:42:11.983677
| 2023-07-29T09:19:47
| 2023-07-29T09:19:47
| 188,768,643
| 1,443
| 617
| null | 2023-08-24T02:10:34
| 2019-05-27T04:04:10
|
Python
|
UTF-8
|
Python
| false
| false
| 302
|
py
|
import docx
doc = docx.Document()
doc.add_paragraph('Hello world!')
paraObj1 = doc.add_paragraph('This is a second paragraph.')
paraObj2 = doc.add_paragraph('This is a yet another paragraph.')
paraObj1.add_run(' This text is being added to the second paragraph.')
doc.save('multipleParagraphs.docx')
|
[
"anzhihe1218@gmail.com"
] |
anzhihe1218@gmail.com
|
18338717c675007d1f1c14af64e1a6bdf17381ab
|
c069fd1d580ab986c976bf0fe1509bac6338440e
|
/venv/Scripts/django-admin.py
|
0d65b5a4ceba6c2b091549191470943f923bcf89
|
[] |
no_license
|
adamsagas/PythonReq2
|
01b7f653e13b2b59e8acf8770ab30b797f88ef83
|
b2790878658e369c1dc40a135605fcbbb4d84085
|
refs/heads/master
| 2023-06-02T11:26:37.880719
| 2021-06-23T13:37:24
| 2021-06-23T13:37:24
| 379,613,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
#!C:\Users\AdamSagas_jxgmrac\PycharmProjects\Git\PythonReq2\venv\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"78692714+adamsagas@users.noreply.github.com"
] |
78692714+adamsagas@users.noreply.github.com
|
82fa358282ffc2c663186533546e78ee84f2c20d
|
c938453d1772d3d8c24a2169dd3aba1862a4d925
|
/config/urls.py
|
00fd1693683027995fde720c5de887b65056069c
|
[
"MIT"
] |
permissive
|
dcopm999/sibdev
|
ab13d880cb91c84a96107345939dd7184a1aa267
|
9dc01ed5d172869d4870c847f01d168602f31be8
|
refs/heads/master
| 2022-11-17T10:59:23.412299
| 2020-07-20T12:52:42
| 2020-07-20T12:52:42
| 279,256,258
| 0
| 0
|
MIT
| 2020-07-20T12:39:28
| 2020-07-13T09:23:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,101
|
py
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import include, path
from django.views import defaults as default_views
from django.views.generic import TemplateView
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path("", TemplateView.as_view(template_name="pages/home.html"), name="home"),
path(
"about/", TemplateView.as_view(template_name="pages/about.html"), name="about"
),
# Django Admin, use {% url 'admin:index' %}
path(settings.ADMIN_URL, admin.site.urls),
# User management
path("users/", include("sibdev.users.urls", namespace="users")),
path("accounts/", include("allauth.urls")),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# Static file serving when using Gunicorn + Uvicorn for local web socket development
urlpatterns += staticfiles_urlpatterns()
# API URLS
urlpatterns += [
# API base url
path("api/", include("config.api_router")),
# DRF auth token
path("auth-token/", obtain_auth_token),
]
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
|
[
"dcopm999@gmail.com"
] |
dcopm999@gmail.com
|
b4e044fe4e30acb0804ab1dc73629fab8472c4cf
|
a0d2b0dc48b8d97f5b0a08aab8e4abd07a4ea759
|
/markov-data-boti.py
|
3798d299debd5b13923fbf9f25b1e4909b53e3d3
|
[
"MIT"
] |
permissive
|
saefsten/supermarket
|
12fc6a16c6ee967a11efec5f9c60796a74a2f18a
|
9c324bf78dd1f16acae0da733b2ecb5a7e3b54c5
|
refs/heads/main
| 2023-01-12T06:09:24.096172
| 2020-11-20T17:11:25
| 2020-11-20T17:11:25
| 313,380,853
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,574
|
py
|
#!/usr/bin/env python
# coding: utf-8
"""
Project Markov -
@author: boti
"""
# Import pandas etc
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
def plotsections(df,dayofweek):
totalcustomers = df['customer_no'].nunique()
print(totalcustomers)
print (f'Total number of customers on {dayofweek} : {str(totalcustomers)}')
print (f'Total number of customers at each location on {dayofweek} :')
print(df.groupby('location')['customer_no'].nunique())
df[df['location']=='spices'].resample('1h')['customer_no'].nunique().plot()
df[df['location']=='fruit'].resample('1h')['customer_no'].nunique().plot()
df[df['location']=='drinks'].resample('1h')['customer_no'].nunique().plot()
df[df['location']=='dairy'].resample('1h')['customer_no'].nunique().plot()
filename = 'plots/sections-' + dayofweek + '.jpg'
plt.savefig(filename)
plt.cla()
df[df['location']=='checkout'].resample('1h')['customer_no'].nunique().plot()
filename = 'plots/checkout-' + dayofweek + '.jpg'
plt.savefig(filename)
plt.cla()
finalcheckoutcustomers = totalcustomers - df[df['location']=='checkout']['customer_no'].count()
print(f'An additional {finalcheckoutcustomers} cutomers check out at 22:00 on {dayofweek}.')
def statediagram(df):
totalcustomers = df['customer_no'].nunique()
states = pd.DataFrame(columns=['from','to'])
for i in range (totalcustomers):
route = df[df['customer_no']==i + 1]['location'].values
if route[len(route)-1] != 'checkout':
route = np.append(route, 'checkout')
for j in range(len(route)-1):
states.loc[len(states)] = route[j:j+2]
return states
#os.chdir('/home/boti/Spiced/git-repos/stochastic-sage-student-code/project_08/')
monday = pd.read_csv('./monday.csv',sep=';',index_col='timestamp', parse_dates=True)
tuesday = pd.read_csv('./tuesday.csv',sep=';',index_col='timestamp', parse_dates=True)
wednesday = pd.read_csv('./wednesday.csv',sep=';',index_col='timestamp', parse_dates=True)
thursday = pd.read_csv('./thursday.csv',sep=';',index_col='timestamp', parse_dates=True)
friday = pd.read_csv('./friday.csv',sep=';',index_col='timestamp', parse_dates=True)
plotsections(monday,'Monday')
plotsections(tuesday,'Tuesday')
plotsections(wednesday,'Wednesday')
plotsections(thursday,'Thursday')
plotsections(friday,'Friday')
monstates = statediagram(monday)
print(monstates.groupby(['from'])['to'].value_counts().unstack())
print(pd.crosstab(monstates['from'], monstates['to'],normalize=0))
|
[
"x@vitos.tv"
] |
x@vitos.tv
|
c77fd258ec69e67562ba66cf3d8d9e89497f10d5
|
06fef9d2d307cca6d5143990d3032c2f566654b2
|
/checkpoint_log_parser.py
|
afdbc44171c7c8ce8d36819bb9a4f22ae7cef92a
|
[] |
no_license
|
aditya-wate/checkpoint_log_parser
|
f9a5052cc198bcf2400dbd95df2151c7977f184f
|
9eccf1e0be7b87829b141493660bb373b5e86930
|
refs/heads/master
| 2020-04-06T04:55:32.478223
| 2017-03-21T16:37:51
| 2017-03-21T16:37:51
| 71,390,283
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
import re
log_file = "C:\Users\\awate\Documents\Miscellaneous\server.log"
#log_file = "one_line_cp.log"
log_list = list()
def extract_fields_to_dict():
with open(log_file) as ol:
for line in ol:
try:
rx = \
r"([A-Za-z0-9_:;. ]*)(>eth[0-9]{1,2} )([A-Za-z0-9_:;.{}&\- ]*)"
m = re.match(rx,line)
event = m.group(3).split("; ") # content after the interface
fields = dict(el.split(": ") for el in event)
log_list.append(fields)
except:
continue
return log_list
def main():
print extract_fields_to_dict()
if __name__ == '__main__':
main()
|
[
"awate@C02T40WXG8WL.local"
] |
awate@C02T40WXG8WL.local
|
b72973b13e4ff60b549ba28635d34749e129df63
|
fe8c94060b208d686ebb987fbc37c4d175c5c047
|
/peco.py
|
00e0d631726c76a99a175656d8c09ff5d6849e0a
|
[] |
no_license
|
hachibeeDI/peco-chrome
|
aa2cbf42014a8c4536f9620a08aad8c93cfeb6d4
|
fc48238fa28af9d0110f59fa1030a89ed24567a5
|
refs/heads/master
| 2021-01-10T11:06:06.019871
| 2015-06-02T08:47:51
| 2015-06-02T08:47:51
| 36,720,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
# -*- coding: utf-8 -*-
from __future__ import (print_function, division, absolute_import, unicode_literals, )
from subprocess import Popen, PIPE, STDOUT
class Peco(object):
def filter(self, source):
peco_filterd = Popen(["peco", '--query', '"$LBUFFER"'], stdin=PIPE, stdout=PIPE, stderr=STDOUT, shell=True)
return peco_filterd.communicate(input=source)
|
[
"8hachibee125@gmail.com"
] |
8hachibee125@gmail.com
|
a28a4cb0c2674b62b154309a34f9e712e93ccdb1
|
f754b014aa2af7487f09040d0208a6080719088e
|
/3.18可视化/pandas绘图.py
|
7b7985b04b9a7900d77efc637539c08a44c1d6fd
|
[] |
no_license
|
aallenchen2018/2019.3.20shujufenxi
|
09f39e0dbd20225f537d001981cc380a923d5d57
|
d4cccfb4784598d101f80ceb6b5cd2f147c0c4b2
|
refs/heads/master
| 2020-04-30T06:38:11.289204
| 2019-03-24T04:08:14
| 2019-03-24T04:08:14
| 176,657,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,030
|
py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
data=pd.DataFrame(100*np.abs(np.random.rand(4,4)),index=['one','two','three','four'],columns=['HW','ZTE','ESSS','NOKIA'])
####线状图
data.plot(kind='line',figsize=(10,6),grid=True,ylim=(0,200))
#####柱状图(单个公司)
fig=plt.figure(figsize=(10,10))
ax=fig.subplots(2)
data['ZTE'].plot(kind='bar',ax=ax[0])###竖向柱状图
ax[0].set_xticklabels(labels=data['ZTE'].index,rotation=0)
data['ZTE'].plot(kind='barh',ax=ax[1])####横向柱状图
####柱状图2(所有公司)
fig1=plt.figure(figsize=(10,10))
ax1=fig1.subplots(1,2)
data.plot(kind='bar',ax=ax1[0])
data.T.plot(kind='bar',ax=ax1[1],stacked=True)
plt.show()
#####绘图
comp1 = np.random.normal(0,1,size=20000)
comp2 = np.random.normal(10,2,size=20000)
values = pd.Series(np.concatenate([comp1,comp2]))
values.plot(kind='hist',bins=50,density=True)
values.plot(kind='kde',style='r--')
|
[
"346782257@.com"
] |
346782257@.com
|
2019198f76ce1c4e2f551f82364ead186b64aa98
|
0008bf21bfd8600668a263a2c68542d4ac476e1f
|
/pong-game/main.py
|
8988419ade33b399c7a849146fdcc17eee59110f
|
[] |
no_license
|
gabbar23/python-intermediate-projects_
|
9ff0086c4f5f99d8d597dd82c3d22511d0a12d8c
|
aecc5d4bca93ba80d0a401b2e4974ae0456e299a
|
refs/heads/master
| 2023-05-10T05:40:47.050701
| 2021-06-09T12:47:32
| 2021-06-09T12:47:32
| 368,608,011
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
from turtle import Turtle, Screen
from paddle import Paddle
from ball import Ball
from score import Scoreboard
import time
START_CORD1 = ((-340, 0), (-340, 20), (-340, 40))
START_CORD2 = ((340, 0), (340, 20), (340, 40))
ball = Ball()
score_l=Scoreboard((-50,220))
score_r=Scoreboard((50,220))
screen = Screen()
screen.listen()
screen.bgcolor("black")
screen.setup(700, 500)
screen.tracer(0)
r_paddle = Paddle((330, 0))
l_paddle = Paddle((-330, 0))
screen.onkeypress(r_paddle.move_up, 'Up')
screen.onkeypress(r_paddle.move_down, 'Down')
screen.onkeypress(l_paddle.move_up, 'w')
screen.onkeypress(l_paddle.move_down, 's')
while True:
screen.update()
time.sleep(ball.fast)
ball.move()
if ball.ycor() > 235 or ball.ycor() < -235:
ball.bounce()
if (ball.distance(l_paddle) < 50 and ball.xcor() < -310) or (ball.distance(r_paddle) < 50 and ball.xcor() > 310):
ball.paddle_bounce()
if ball.xcor()>360:
score_l.score_add()
ball.reset()
if ball.xcor() < -360:
score_r.score_add()
ball.reset()
|
[
"amansaini842@gmail.com"
] |
amansaini842@gmail.com
|
514105a53d73bf3577e4eb05e8072badc6037bc8
|
fb3bfc4ad9cef735a0a78f64c100796cb3280dab
|
/day19.py
|
645bb2407a89c2b21ed9e9a15530c0093e7d0878
|
[] |
no_license
|
morteako/advent-of-code
|
c50904f294922ff5872bc07db997c3092e3edd6b
|
ece7de49c4e903ba0049bc259cf26f3725c395a7
|
refs/heads/master
| 2020-06-15T19:07:54.928476
| 2017-11-05T17:28:12
| 2017-11-05T17:28:12
| 75,269,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
import numpy as np
num = 5
# num = 3005290
elves = [[1,"nr"+str(x+1)] for x in range(num)]
length = num
i = 0
while len(elves) > 1:
print(len(elves))
mod = i % len(elves)
mod1 = (i+1) % len(elves)
# print(123)
# if elves[mod][0] == 0:
print(i,elves,mod,mod1,elves[mod])
# asdasd
# del elves[mod]
# length -= 1
# continue
elves[mod][0] += elves[mod1][0]
# print(123)
del elves[mod1]
# print(123)
i += 1
# print(i,length,elves)
input()
print("svar:", elves)
|
[
"Morten Aske Kolstad"
] |
Morten Aske Kolstad
|
a012a1945078592972a0b0eb86bd73c2f1c91307
|
817447bfc083893542a98024e546a23d828c23b2
|
/main.py
|
ceb0e3209e0647804c310bc14285ec801f5d1255
|
[] |
no_license
|
Woofka/tgbot-remote-helper-daemon
|
53bc94361d601cafdec8500af98d8fa197801f80
|
0c6ba76ab3f95da45c24275d2a2981096986e379
|
refs/heads/master
| 2022-11-30T22:09:08.525015
| 2020-08-01T21:52:10
| 2020-08-01T21:52:10
| 282,190,058
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,923
|
py
|
import subprocess
import re
import socket
import logging
import os
import datetime
from logger import setup_logger
from protocol import Protocol
setup_logger()
log = logging.getLogger('logger')
RU = {
'encoding': 'cp866',
'startup_time': 'Время загрузки системы',
'mac': r'.{0,}Физический адрес.{0,}',
'ip': r'.{0,}IPv4-адрес.{0,}'
}
STARTUP_FILE_NAME = 'startup.info'
PROTO_PORT_SERVER = 10788
PROTO_PORT_CLIENT = 10789
def system_startup_time():
if os.path.exists(STARTUP_FILE_NAME):
with open(STARTUP_FILE_NAME, 'r') as f:
return f.read()
else:
return update_startup_time()
def update_startup_time():
with open(STARTUP_FILE_NAME, 'w') as f:
time = datetime.datetime.now().isoformat()
f.write(time)
return time
def handle(data):
packet = Protocol.decode(data)
if packet.code == Protocol.CODE_IFALIVE:
return Protocol(Protocol.CODE_IFALIVE, packet.uid, packet.cid).encode()
elif packet.code == Protocol.CODE_ASKSTARTTIME:
return Protocol(Protocol.CODE_STARTTIME, packet.uid, packet.cid, system_startup_time()).encode()
else:
return None
def main():
log.info('Starting daemon')
log.info('Updating startup time')
update_startup_time()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.bind(('', PROTO_PORT_SERVER))
while True:
data, addr = sock.recvfrom(1024)
log.debug(f'From {addr[0]}:{addr[1]} {data}')
try:
answ = handle(data)
log.debug(f'Response: {answ}')
if answ is not None:
sock.sendto(answ, (addr[0], PROTO_PORT_CLIENT))
except KeyboardInterrupt:
break
except Exception as err:
log.error(f'Packet: {data}. Error msg: {err}')
if __name__ == '__main__':
main()
|
[
"v.d.sobolev@ya.ru"
] |
v.d.sobolev@ya.ru
|
f237237b67de61db0dbd706d30d17fdb4a1b2095
|
6923f7ea47f0e59c44dfb7ca908352009712d8a6
|
/ch09/ch09_01/store/store_01.py
|
2ce3967c6ecda53a6b4c021f201c3efd1d42452e
|
[
"MIT"
] |
permissive
|
AngelLiang/the-hackers-guide-to-python-3rd-edition
|
489ed40bad8c8ba3b9bfc9b85ab9962beaf2436c
|
4e06c8d58e0d18fff612762e2c57a5050d22cc96
|
refs/heads/master
| 2021-07-08T07:20:09.387450
| 2019-12-28T07:10:48
| 2019-12-28T07:10:48
| 230,555,454
| 0
| 0
|
MIT
| 2021-03-20T02:53:08
| 2019-12-28T03:58:12
|
Python
|
UTF-8
|
Python
| false
| false
| 516
|
py
|
"""
考虑这样一组方法,它们在被调用时检查“用户名”参数
"""
class Store(object):
def __init__(self):
self.storage = {}
def get_food(self, username, food):
if username != 'admin':
raise Exception('This user is not allowed to get food')
return self.storage.get(food)
def put_food(self, username, food):
if username != 'admin':
raise Exception('This user is not allowed to put food')
return self.storage.put(food)
|
[
"pl01665077@163.com"
] |
pl01665077@163.com
|
a780996a48a3597eac28adbdf55912a7bac858d6
|
f61e1a78a630dab7f6ec0bfc67e596c843c4517b
|
/03_functions_as_objects/03_function_decorators_and_closures/06_using_nonlocal_variables_to_calculate_a_running_average.py
|
27bd6b3def8a7473d73912b27c2592c0b41fa3e7
|
[] |
no_license
|
xwzliang/fluent_python
|
0e8879997dca36a19da1127de6d35ad5cf901151
|
9477b544309616f43de4da65e103428f8f8512a6
|
refs/heads/master
| 2021-08-06T15:27:15.896536
| 2020-06-03T05:46:44
| 2020-06-03T05:46:44
| 183,656,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,172
|
py
|
# Our previous implementation of make_averager was not efficient. We stored all the values in the historical series and computed their sum every time averager was called. A better implementation would just store the total and the number of items so far, and compute the mean from these two numbers.
def make_average_broken():
"""
>>> avg = make_average_broken()
>>> avg(10) # doctest +ELLIPSIS
Traceback (most recent call last):
...
UnboundLocalError: local variable 'count' referenced before assignment
"""
count = 0
total = 0
def averager(new_value):
# The problem is that the statement count += 1 actually means the same as count = count + 1, when count is a number or any immutable type. So we are actually assigning to count in the body of averager, and that makes it a local variable. The same problem affects the total variable.
# We did not have this problem in previous example because we never assigned to the series name; we only called series.append and invoked sum and len on it. So we took advantage of the fact that lists are mutable.
# But with immutable types like numbers, strings, tuples, etc., all you can do is read, but never update. If you try to rebind them, as in count = count + 1, then you are implicitly creating a local variable count. It is no longer a free variable, and therefore it is not saved in the closure.
count += 1
total += new_value
return total / count
return averager
def make_average():
"""
>>> avg = make_average()
>>> avg(10)
10.0
>>> avg(11)
10.5
>>> avg(12)
11.0
"""
count = 0
total = 0
def averager(new_value):
# The nonlocal declaration was introduced in Python 3. It lets you flag a variable as a free variable even when it is assigned a new value within the function. If a new value is assigned to a nonlocal variable, the binding stored in the closure is changed.
nonlocal count, total
count += 1
total += new_value
return total / count
return averager
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"18706703437@163.com"
] |
18706703437@163.com
|
ca22028c3a9cf36604959663a52a6adf5c7b1067
|
b3ab259a24aec7aa0f1f11ec70bf6e33ea094724
|
/airflow_ml_dags/tests/conftest.py
|
8ed2ba77d81e5de4cfc651765423227064c7bec9
|
[] |
no_license
|
made-ml-in-prod-2021/opl242-test
|
56ac054ae43c3f0d496363783b87ffa76f46fecf
|
7e7116ca27b980dcf57caade4a99e94075cbf9fb
|
refs/heads/main
| 2023-06-08T22:00:02.151094
| 2021-06-25T09:46:04
| 2021-06-25T09:46:04
| 353,401,501
| 0
| 0
| null | 2021-06-25T09:46:05
| 2021-03-31T15:20:31
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 146
|
py
|
import pytest
from airflow.models import DagBag
@pytest.fixture()
def dag_bag():
return DagBag(dag_folder="../dags", include_examples=False)
|
[
"alexey.opolchenov@hpe.com"
] |
alexey.opolchenov@hpe.com
|
bb2ccaef367f272790672ad675bf028e70f72b1a
|
6a7ff23548f88f079411fb99192bfa26e5b085da
|
/src/linear_regression.py
|
83b8944a0ad280493220e47428ccfb47384819d3
|
[] |
no_license
|
AlexMontgomerie/ML
|
c399fc12e3b7c304c3939173fa8b1ff5121f586d
|
84406a35f60379a6346755f6cca34ac2def5ca08
|
refs/heads/master
| 2021-01-25T12:42:40.750142
| 2018-03-23T00:09:24
| 2018-03-23T00:09:24
| 123,496,831
| 1
| 0
| null | 2018-03-23T00:09:24
| 2018-03-01T21:54:26
|
Python
|
UTF-8
|
Python
| false
| false
| 9,851
|
py
|
from matplotlib import pyplot as plt
from sklearn import linear_model
from sklearn.metrics import accuracy_score
import numpy as np
import seaborn as sns
from sklearn.svm import SVR
from sklearn.model_selection import KFold
from sklearn.metrics import mean_absolute_error
np.random.seed(0)
NUM_FEATURE = 12
def get_data(red=True):
#get red wine data
red_data = np.genfromtxt('../data/winequality-red.csv',delimiter=';')
red_data = np.delete(red_data,(0),axis=0)
red_feature = np.array([[1 for i in range(red_data.shape[0])]])
red_y = np.array([red_data[:,11]])
red_data = np.append(red_data[:,0:11],red_feature.T,axis=1)
red_data = np.append(red_data,red_y.T,axis=1)
if red:
return red_data
#get white wine data
white_data = np.genfromtxt('../data/winequality-white.csv',delimiter=';')
white_data = np.delete(white_data,(0),axis=0)
white_feature = np.array([[-1 for i in range(white_data.shape[0])]])
white_y = np.array([white_data[:,11]])
white_data = np.append(white_data[:,0:11],white_feature.T,axis=1)
white_data = np.append(white_data,white_y.T,axis=1)
#combine the 2
data = np.append(red_data,white_data,axis=0)
np.random.shuffle(data)
return data
#######################
### Normalise data ###
def normalise(data):
return (data - data.mean())/data.std()
def normalise_all(data):
for i in range(NUM_FEATURE):
data[:,i] = normalise(data[:,i])
return data
######################
### Split Training, Test and Validation sets ###
def split_data(data,train_split=0.8):
#train,test,val
train,test = np.split(data, [int(train_split*data.shape[0])])
train_x,train_y = np.hsplit(train,[NUM_FEATURE])
test_x,test_y = np.hsplit(test, [NUM_FEATURE])
for i in range(NUM_FEATURE):
mean = train_x[:,i].mean()
std = train_x[:,i].std()
#normalise training data
#normalise test data
test[:,i]
return train_x,train_y,test_x,test_y
##### Loss Functions #####
def square_loss(y,pred_y):
return ((y-pred_y)**2).mean()
#N = len(y)
#error_sum = 0
#for i in range(N):
# error_sum += (y[i]-pred_y[i])**2
#return error/N
def mae_loss(y,pred_y):
return (y-pred_y).mean()
def identity_loss(y,pred_y):
N = len(y)
error_sum = 0
for i in range(N):
if y[i] != pred_y[i]:
error_sum += 1
return error_sum/N
##########################
################################################
class lin_regr:
def __init__(self,data,reguliser):
self.data = data
self.train_x = np.array([])
self.train_y = np.array([])
self.test_x = np.array([])
self.test_y = np.array([])
self.weights = np.array([])
self.reguliser = 'ridge'
self.init_data()
def split_data(self,train_split=0.8):
#TODO: randomise data split
train,test = np.split(self.data, [int(train_split*self.data.shape[0])])
#update datasets
self.train_x,self.train_y = np.hsplit(train,[NUM_FEATURE])
self.test_x,self.test_y = np.hsplit(test, [NUM_FEATURE])
def init_data(self):
self.split_data()
#normalise all x
self.train_x = normalise_all(self.train_x)
self.test_x = normalise_all(self.test_x)
#bias all x
self.train_x = self.add_bias(self.train_x)
self.test_x = self.add_bias(self.test_x)
def fit(self,x,y,l=0.001):
if self.reguliser=='ridge':
tmp = np.matmul(x.T,x) + np.diag([l for i in range(NUM_FEATURE+1)])
tmp = np.linalg.pinv(tmp)
tmp = np.matmul(tmp,x.T)
self.weights = np.matmul(tmp,y)
else:
pinv_x = np.linalg.pinv(x)
self.weights = np.matmul(pinv_x,y)
def add_bias(self,x):
ones = np.array([[1] for i in range(len(x[:,0]))])
return np.concatenate((x,ones),axis=1)
def predict(self,x):
y = np.dot(x,self.weights)
return classify(y)
def classify(self,y):
for i in range(len(y)):
for j in range(11):
if y[i] > (j-0.5) and y[i] <= (j+0.5):
y[i] = j
if y[i] < 0:
y[i] = 0
if y[i] > 10:
y[i] = 10
return y
def cross_validation(self,k=10,loss=square_loss):
#split into K folds
data = np.concatenate((self.train_x,self.train_y),axis=1)
kf = KFold(n_splits=k)
error_sum = 0
for train,test in kf.split(data):
train_data = np.array(data)[train]
test_data = np.array(data)[test]
train_x, train_y = np.hsplit(train_data,[NUM_FEATURE+1])
test_x , test_y = np.hsplit(test_data,[NUM_FEATURE+1])
self.fit(train_x,train_y)
pred_y = self.predict(test_x)
pred_y = self.classify(pred_y)
error_sum += loss(test_y,pred_y)
print("cross validation error: ",error_sum/k)
return error_sum/k
### Compute Linear Regression Weights ###
def fit(x,y,ridge=False,l=0.001):
if ridge:
tmp = np.matmul(np.transpose(x),x) + np.diag([l for i in range(NUM_FEATURE+1)])
tmp = np.linalg.pinv(tmp)
tmp = np.matmul(tmp,x.T)
return np.matmul(tmp,y)
else:
pinv_x = np.linalg.pinv(x)
return np.matmul(pinv_x,y)
def add_bias(x):
ones = np.array([[1] for i in range(len(x[:,0]))])
return np.concatenate((x,ones),axis=1)
def my_lin_regr(data):
train_x,train_y,test_x,test_y = split_data(data)
#normalise all x
train_x = normalise_all(train_x)
test_x = normalise_all(test_x)
#train_x = train_x[:,[1,10]]
#test_x = test_x[:,[1,10]]
#bias all x
train_x = add_bias(train_x)
test_x = add_bias(test_x)
#get learned weight
weights = fit(train_x,train_y,ridge=True)
cross_validation(train_x,train_y,weights)
pred_y = predict(train_x,weights)
print("(my) linear regression accuracy: ",accuracy_score(train_y,pred_y))
def bin_classify_y(y,val):
tmp = np.array([[0] for i in range(len(y))])
for i in range(len(y)):
if y[i][0] >= val:
tmp[i][0] = 1
else:
tmp[i][0] = -1
return tmp
def predict(x,weights):
y = np.dot(x,weights)
return classify(y)
def classify(y):
for i in range(len(y)):
for j in range(11):
if y[i] > (j-0.5) and y[i] <= (j+0.5):
y[i] = j
if y[i] < 0:
y[i] = 0
if y[i] > 10:
y[i] = 10
return y
def svd_reduction(data):
u,s,vh = np.linalg.svd(data,full_matrices=False)
data = np.matmul(np.matmul(u,np.diag(s)),vh)
return data
#########################################
def sk_lin_regr(data):
#data = normalise(data)
train_x,train_y,test_x,test_y = split_data(data)
print(train_x[0])
regr = linear_model.LinearRegression()
regr.fit(train_x,train_y)
pred_y = regr.predict(test_x)
print("(sk) linear regression MAE: ",mean_absolute_error(test_y,pred_y))
return regr
def sk_lasso_regr(data,alpha=0.1):
#data = normalise(data)
train_x,train_y,test_x,test_y = split_data(data)
regr = linear_model.Lasso(alpha)
regr.fit(train_x,train_y)
pred_y = regr.predict(test_x)
print("(sk) linear regression Lasso MAE: ",mean_absolute_error(test_y,pred_y))
return regr
def sk_ridge_regr(data,alpha=0.1):
#data = normalise(data)
train_x,train_y,test_x,test_y = split_data(data)
regr = linear_model.Ridge(alpha)
regr.fit(train_x,train_y)
pred_y = regr.predict(train_x)
'''
#tuning hyperparameter
lmbda = 50
prev_err = cross_validation(train_x,train_y,regr);
print("Previous ERROR: ",prev_err)
alpha += lmbda*0.00001
for i in range(200):
regr = linear_model.Ridge(alpha)
regr.fit(train_x,train_y)
curr_err = cross_validation(train_x,train_y,regr);
err_gradient = curr_err - prev_err
print("Error: ",curr_err,", Alpha: ",alpha,", gradient: ",err_gradient)
alpha = alpha - lmbda*err_gradient
'''
pred_y = regr.predict(test_x)
print("(sk) linear regression Ridge MAE: ",mean_absolute_error(test_y,pred_y))
return regr
def sk_elastic_regr(data,alpha=0.1, l1_ratio=0.5):
#data = normalise(data)
train_x,train_y,test_x,test_y = split_data(data)
regr = linear_model.ElasticNet(alpha, l1_ratio)
regr.fit(train_x,train_y)
pred_y = regr.predict(test_x)
pred_y = classify(pred_y)
print("(sk) linear regression Elastic Net MAE: ",mean_absolute_error(test_y,pred_y))
return regr
def sk_svm(data):
#split the data
train_x,train_y,test_x,test_y = split_data(data)
#normalise all x
train_x = normalise_all(train_x)
test_x = normalise_all(test_x)
print(bin_classify_y(test_y,6))
#define SVR
clf = SVR(C=1.0, epsilon=0.2)
clf.fit(train_x,train_y)
#predict y using SVC
pred_y = clf.predict(test_x)
pred_y = classify(pred_y)
print("(sk) support vector machine for regression accuracy: {}",accuracy_score(test_y,pred_y))
return
##### Validation #####
#TODO: create K-fold cross-validation function
def cross_validation(train_x,train_y,model,k=10,loss=square_loss):
#split into K folds
data = np.concatenate((train_x,train_y),axis=1)
kf = KFold(n_splits=k)
error_sum = 0
for train,test in kf.split(data):
train_data = np.array(data)[train]
test_data = np.array(data)[test]
train_x, train_y = np.hsplit(train_data,[NUM_FEATURE])
test_x , test_y = np.hsplit(test_data,[NUM_FEATURE])
model.fit(train_x,train_y)
pred_y = model.predict(test_x)
error_sum += loss(test_y,pred_y)
return error_sum/k
if __name__=="__main__":
data = get_data()
model = sk_lin_regr(data)
#sk_ridge_regr(data,0.05)
'''
for i in range(20):
alpha = pow(10,-(i-5))
print("Alpha: ", alpha)
#sk_lasso_regr(data,alpha)
sk_ridge_regr(data,alpha)
print("\n ////////////////////// \n")
for i in range(20):
for j in range(10):
alpha = pow(10,-i)
l1_ratio = 0.1*(j+1)
print("Alpha: ", alpha,", L1 Ratio: ",l1_ratio)
sk_elastic_regr(data,alpha,l1_ratio)
#my_lin_regr(data)
#sk_svm(data)
#tmp = lin_regr(data,'ridge')
#tmp.cross_validation(loss=identity_loss)
'''
|
[
"AlexMontgomerie@github.com"
] |
AlexMontgomerie@github.com
|
d349920afeff629eaa5fa25892e2b4a8db4469c3
|
db0f2cd9a6eda7bfe05141e707936765a12fc342
|
/Backend/FuzzingManager.py
|
c19ff0e33d1db739cb9b18f62324c421da2d3485
|
[] |
no_license
|
nhrade/software-2-cerberus5
|
b96b063406588595f94b52104e0d1e5a768846ef
|
d2fe157b695e5d4514263f990cbd87397914e493
|
refs/heads/master
| 2020-04-27T10:13:19.845541
| 2019-05-17T15:41:28
| 2019-05-17T15:41:28
| 174,244,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 691
|
py
|
from PacketModifier import *
from Fuzzer import *
from scapy.all import *
import sys
import afl
'''
FuzzingManager.py
Manages the fuzzing of packets and is external interface to other subsystems
'''
class FuzzingManager:
def __init__(self):
self.packetModifier = PacketModifier()
self.fuzzer = Fuzzer('afl_input/', 'afl_output/')
afl.init()
def generateFuzzValue(self, field, vrange):
v = self.fuzzer.generateValue()
IP(dst=v[1], src=v[1])
os._exit(0)
def generateFuzzedPacket(self, packet, field, vrange, value):
pass
if __name__ == '__main__':
fm = FuzzingManager()
fv = fm.generateFuzzValue('dst', (1, 10))
|
[
"noahh717@gmail.com"
] |
noahh717@gmail.com
|
12efc0252584a8df85b0a963bb3f768250a40eaf
|
0e69b74bf7a7eee0937a2a8f199ce1538e4e72b4
|
/__init__.py
|
e59b4b940c9ae8eef2e0ae07f9bdfb25adbe78ae
|
[
"MIT"
] |
permissive
|
Zhou-chengy/Pyai
|
5a99059c704a36f5f8761aed6a3a993d947c2e08
|
152aab5ed2b0d87357d5756bfda6c48b310fbbab
|
refs/heads/main
| 2022-12-22T12:30:06.907229
| 2020-10-03T07:51:02
| 2020-10-03T07:51:02
| 300,823,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,608
|
py
|
__version__ = '1.0.1'
class robot(object):
def myrobot(self):
print('你好,请问需要什么帮助?')
ji = input()
if ji=='你好':
print('你好')
if ji=='你的名字叫什么':
print('我的名字叫Robin')
if ji=='你会说中文吗':
print('我当然会啊!')
if ji=='你会Pyhon吗?':
print('我想做一条蟒蛇,把你紧紧缠住,一辈子')
if ji=='你会说中文吗?':
print('我说的都是标准中文')
if ji=='圆周率':
print('3.14159265358979')
if ji=='你会编程吗?':
print('当然会啊,有我不会东西吗?哈哈哈')
if ji=='会Java吗?':
print('编程这东西主要在于你的思想,而不是什么语言')
if ji=='你很会说话':
print('我嘴刚抹了蜜')
if ji=='谢谢':
print('不客气,应该的')
if ji=='你会说什么语言':
print('我会中文')
if ji=='诸葛亮是谁':
print(' 诸葛亮(181年~234年10月8日 [1] ),字孔明,号卧龙,琅琊阳都(今山东省沂南县)人 [2] ,三国时期蜀汉丞相,中国古')
print('代杰出的政治家、军事家、文学家。')
if ji=='中华人民共和国哪一年成立?':
print('1949年')
if ji=='1+1等于几':
print('1+1=2')
if ji=='再见':
print('不想说拜拜,好像在和你聊一会儿啊!')
def close(self):
break
def main():
pass
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
Zhou-chengy.noreply@github.com
|
6d4da2ccd59c101b899a3bbe4bc058c17268285a
|
a0aa4fe19b6723bd81da4d96739462dd4d5aa4ec
|
/thebreeze_app/apps.py
|
7d4fb58f7a750f316241f2239bb4aad863e2c152
|
[] |
no_license
|
PickleEric/thebreeze
|
c44c45b7a51db160d0b7e5f5468fa0ec3f7eff6a
|
83cc40a014a88c1b11ec695cdd935630385db34c
|
refs/heads/master
| 2023-05-29T16:18:52.872082
| 2021-06-23T04:08:26
| 2021-06-23T04:08:26
| 371,221,655
| 0
| 0
| null | 2021-06-23T04:08:27
| 2021-05-27T02:19:07
|
Python
|
UTF-8
|
Python
| false
| false
| 100
|
py
|
from django.apps import AppConfig
class ThebreezeAppConfig(AppConfig):
name = 'thebreeze_app'
|
[
"60155510+PickleEric@users.noreply.github.com"
] |
60155510+PickleEric@users.noreply.github.com
|
8932559676ce4581eaab77f86fe87fe17d5d9f15
|
af688fedfc22824e29ecb61aff6d9cf786c26db1
|
/fluidics/valves/tc_commands.py
|
45c9a9eeb6978ddc4fb9547e32f7252119e8b782
|
[] |
no_license
|
keyuxi/fluidics-control
|
f8ee6be456a0b7493004f9567e37987a95f42edf
|
ff7df176a788e08a7f61ece0e252f1c760f51fae
|
refs/heads/master
| 2020-12-07T11:52:34.702159
| 2020-01-16T00:55:30
| 2020-01-16T00:55:30
| 232,716,278
| 0
| 0
| null | 2020-01-09T03:44:00
| 2020-01-09T03:44:00
| null |
UTF-8
|
Python
| false
| false
| 5,953
|
py
|
import serial
def tc_initialize(ser):
cmd_name = ["1. INPUT1 (reads the temperature of the primary thermistor)","2. DESIRED CONTROL VALUE (set value)","3. POWER OUTPUT","4. ALARM STATUS","5. INPUT 2","6. OUTPUT CURRENT COUNTS","7. ALARM TYPE",
"8. SET TYPE DEFINE (the desired control temperature or “set temp” input definition)","9. SENSOR TYPE","10. CONTROL TYPE","11. CONTROL OUTPUT POLARITY","12. POWER ON/OFF",
"13. OUTPUT SHUTDOWN IF ALARM","14. FIXED DESIRED CONTROL SETTING","15. PROPORTIONAL BANDWIDTH","16. INTEGRAL GAIN","17. DERIVATIVE GAIN","18. LOW EXTERNAL SET RANGE",
"19. HIGH EXTERNAL SET RANGE","20. ALARM DEADBAND","21. HIGH ALARM SETTING","22. LOW ALARM SETTING","23. CONTROL DEADBAND SETTING","24. INPUT1 OFFSET","25. INPUT2 OFFSET","26. HEAT MULTIPLIER",
"27. COOL MULTIPLIER","28. OVER CURRENT COUNT COMPARE VALUE","29. ALARM LATCH ENABLE","30. COMMUNICATIONS ADDRESS (reserved command, do not use)","31. ALARM LATCH RESET","32. CHOOSE SENSOR FOR ALARM FUNCTION",
"33. CHOOSE °C or °F TEMPERATURE WORKING UNITS","34. EEPROM WRITE ENABLE","35. OVER CURRENT CONTINUOUS","36. OVER CURRENT RESTART ATTEMPS","37. JP3 DISPLAY ENABLE"]
read_cmd = ["01","03","02","05","06","07","41","42","43","44","45","46","47","50","51","52","53","54","55","56","57","58","59","5a","5b","5c","5d","5e","48",None,None,"4a","4b","4c","4d","5f","4e"]
write_cmd = [None,None,None,None,None,None,"28","29","2a","2b","2c","2d","2e","1c","1d","1e","1f","20","21","22","23","24","25","26","27","0c","0d","0e","2f",None,"33","31","32","34","35","0f","36"]
init_val = [2043,0,0,0,15566,130,0,0,1,2,1,0,1,0,500,0,0,0,10000,100,10000,0,100,0,0,20,20,14,0,None,0,0,1,1,0,300,0]
for cmd_i in range(len(read_cmd)):
if read_cmd[cmd_i] != None:
print('\n\n%s: read %s' %(cmd_name[cmd_i], read_cmd[cmd_i]))
val_back, _ = tc_talk(ser, [c for c in read_cmd[cmd_i]], 0)
if write_cmd[cmd_i] != None and val_back != init_val[cmd_i]:
print("Reset %s from %d to %d." % (cmd_name[cmd_i], val_back, init_val[cmd_i]))
_ = tc_talk(ser, [c for c in write_cmd[cmd_i]], init_val[cmd_i])
val_back_check,_ = tc_talk(ser, [c for c in read_cmd[cmd_i]], 0)
print(val_back_check)
def tc_talk(ser, cmd_address, cmd_val):
"""
Args:
cmd_address - list of 2, specify the type of cmd
cmd_val - decimal value for write
Returns:
val_back - int, converted decimal
buf - list of length 8, original msg form TC
"""
buf = [0,0,0,0,0,0,0,0,0,0,0,0]
msg = tc_encoder(cmd_address, cmd_val)
print('send: %s' % ''.join(msg))
for pn in range(0,16):
ser.write((msg[pn]).encode())
for pn in range(0,12):
buf[pn]=ser.read(1).decode()
#print(buf[pn])
print('back: %s' % ''.join(buf))
val_back = tc_decoder(buf, msg)
return val_back, buf
def tc_encoder(cmd_address, cmd_val):
"""
Args:
cmd_address - list of 2, specify the type of cmd
cmd_val - int, decimal value for write
Returns:
cmd_str - list of char, final command to send to TC3625
"""
# convert value
val_hex = dec2hex(cmd_val)
# device address + command address + hex value
cmd_str_core = ['0', '0'] + cmd_address + val_hex
# check sum
cmd_str_core_ascii = [ord(char) for char in cmd_str_core]
cmd_sum = hex(sum(cmd_str_core_ascii))[2:]
cmd_sum = cmd_sum[-2:].zfill(2)
# complete string
cmd_str = ['*'] + cmd_str_core + [char.lower() for char in cmd_sum] + ['\r']
return cmd_str
def tc_decoder(cmd_back, cmd_str):
"""
Returns:
val_back - int, decimal
"""
if cmd_back == [0,0,0,0,0,0,0,0,0,0,0,0]:
print(cmd_str)
print('Cannot hear from COM.')
val_back = None
elif cmd_back == ['*','X','X','X','X','X','X','X','X','c','0','\r']:
print(cmd_str)
print('Check sum for the input is incorrect.')
val_back = None
else:
back_sum = hex(sum([ord(char) for char in cmd_back[1:-3]]))[2:]
back_sum = back_sum[-2:].zfill(2)
if back_sum != (cmd_back[-3] + cmd_back[-2]):
print('back checksum: %s' % back_sum)
print(cmd_back)
print('Checksum of the command returned from COM is incorrect.')
val_back = None
else:
print(cmd_back[1:9])
val_back = int(''.join(cmd_back[1:9]), 16)
print('value back: %d' % val_back)
return val_back
def hexc2dec(bufp):
"""
Args:
bufp - list of len 8
Returns:
newval - int in decimal
"""
newval=0
divvy=pow(16,7)
#sets the word size to DDDDDDDD
for pn in range (1,7):
vally=ord(bufp[pn])
if(vally < 97):
subby=48
else:
subby=87
# ord() converts the character to the ascii number value
newval += ((ord(bufp[pn])-subby)*divvy)
divvy/=16
if(newval > pow(16,8)/2-1):
newval=newval-pow(16,8)
#distinguishes between positive and negative numbers
return newval
def dec2hex(val):
"""
Args:
val - int, decimal
Returns:
list of len 8, hex value
"""
hexval = hex(val)[2:].zfill(8)
return [char.lower() for char in hexval]
if __name__ == "__main__":
# tc = TC()
# tc.talk()
# tc.listen()
# tc.close()
ser = serial.Serial('com3', 9600, timeout=1)
tc_initialize(ser)
# _ = tc_talk(ser, ['1','c'], 1000)
# set_temp, _ = tc_talk(ser, ['5','0'], 0)
# print(set_temp/100.0)
# val_back, buf = tc_talk(ser, ['0','1'], 0)
# print(val_back/100.0)
ser.close()
print("PORT CLOSED")
|
[
"keyuxi@yeah.net"
] |
keyuxi@yeah.net
|
b98ec8bf3c86e1530a28a16eb01c6257fb3d0e96
|
8dab1c4de820c66b386cf4560ff8c661a1ce6dea
|
/prueba/public/my-project/application.wsgi
|
c05f2d91cc25b3b565a15a8e06b0336d6186304a
|
[] |
no_license
|
danmcode/servicios-telematicos
|
fc83cecac4eb0f87809bbf3ebdcdc10c4f9d787f
|
ab5913a138626aa8ced6cd4ad3bca8375a402833
|
refs/heads/master
| 2023-04-06T04:18:38.760867
| 2021-04-20T02:50:18
| 2021-04-20T02:50:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
wsgi
|
#!/usr/bin/python
import sys
sys.path.insert(0,"/var/www/my-project/")
from app import app as application
|
[
"danielmuelasrivera483@gmail.com"
] |
danielmuelasrivera483@gmail.com
|
186559069415e595b28a5f470d5ff3dad932a6ae
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/travelport/models/base_search_rsp_5.py
|
b0dbc89115df2f46fc141656bd0422326d1818ea
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696
| 2023-07-25T18:01:22
| 2023-07-25T18:01:22
| 222,543,692
| 6
| 1
| null | 2023-06-25T07:21:04
| 2019-11-18T21:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 666
|
py
|
from __future__ import annotations
from dataclasses import dataclass, field
from travelport.models.base_rsp_5 import BaseRsp5
from travelport.models.next_result_reference_5 import NextResultReference5
__NAMESPACE__ = "http://www.travelport.com/schema/common_v37_0"
@dataclass
class BaseSearchRsp5(BaseRsp5):
class Meta:
name = "BaseSearchRsp"
next_result_reference: list[NextResultReference5] = field(
default_factory=list,
metadata={
"name": "NextResultReference",
"type": "Element",
"namespace": "http://www.travelport.com/schema/common_v37_0",
"max_occurs": 999,
}
)
|
[
"chris@komposta.net"
] |
chris@komposta.net
|
b6fe39655b437a466d7d0d92b5db2d795146ce61
|
cf3c6990c7470ad37a3ff509deb5b427baa4f682
|
/Map_Gen/Engine.py
|
f246b7a375e09625b4be78139f669d2067bcc5bb
|
[
"Apache-2.0"
] |
permissive
|
Djsurry/WordRPG
|
350c550929a36ad0f921b1d861887a5fa3ad301f
|
03dec4244530fed43c4e925d1489ad0356c265b0
|
refs/heads/master
| 2020-04-11T14:45:20.859080
| 2018-12-15T02:13:45
| 2018-12-15T02:13:45
| 161,866,734
| 0
| 0
|
Apache-2.0
| 2018-12-15T03:39:11
| 2018-12-15T03:39:11
| null |
UTF-8
|
Python
| false
| false
| 8,258
|
py
|
##-- Import --##
import os
import time
import random
import sqlite3
import pandas
##-- Custom Import --##
#from Map_Gen import Biome
from Map_Gen import Biome
from Map_Gen import SubBiome
##-- GLOBAL VERAIBLES --##
##-- Map Width and Hight --##
##-- I am thinking maybe start with a 50 x 50 map but when all is said and done --##
##-- 1000 x 1000 will be the map size --##
map_width = 100
map_hight = 100
sub_map_width = 10
sub_map_hight = 10
map_seed = []
# coords
x = 1
y = 1
##-- Clears print screen --##
def clear():
if os.name == 'nt':
_ = os.system('cls')
else:
_ = os.system('clear')
##-- This is the blueprint of the tiles or biomes --##
class Tile:
def __init__(self, coords_x, coords_y, abriv, name, rarity, difficulty, enterable, exit_x, exit_y, discovered=False):
self.coords_x = coords_x
self.coords_y = coords_y
self.abriv = abriv
self.name = name
self.rarity = rarity ##-- How often you need to be spawned --##
self.difficulty = difficulty
self.enterable = enterable ##-- True or False --##
self.floor = 0 ##-- in SQL it is tile_level --##
self.exit_x = exit_x
self.exit_y = exit_y
self.discovered = discovered
class SubTile:
def __init__(self, from_x, from_y, coords_x, coords_y, biome_name, place_name, difficulty):
self.from_x = from_x
self.from_y = from_y
self.coords_x = coords_x
self.coords_y = coords_y
self.biome_name = biome_name
self.place_name = place_name
self.difficulty = difficulty
self.floor = 1 ##-- in SQL it is tile_level --##
self.discovered = False
##-- Finds a random biome to be made in a tile --##
def get_random_biome():
biome_keys = Biome.world_biomes.keys()
list_of_biome_abriv = []
for bio in biome_keys:
list_of_biome_abriv.append(bio)
return random.choice(list_of_biome_abriv)
def get_random_sub_biome(key):
biome_keys = SubBiome.sub_biome[key].keys()
list_of_sub_biome_abriv = []
for bio in biome_keys:
list_of_sub_biome_abriv.append(bio)
return random.choice(list_of_sub_biome_abriv)
def set_exit_coords_x():
return random.randint(0, sub_map_width)
def set_exit_coords_y():
return random.randint(0, sub_map_hight)
##-- right now there is only Common Uncommon and Rare, But maybe i will add Epic as well --##
def rare_control():
while True:
r_biome = get_random_biome()
if Biome.world_biomes[r_biome]['rarity'] == 'Rare':
num = random.randint(0,100)
if num == 50:
return r_biome
elif Biome.world_biomes[r_biome]['rarity'] == 'Uncommon':
num = random.randint(0,20)
if num == 10:
return r_biome
elif Biome.world_biomes[r_biome]['rarity'] == 'Common':
return r_biome
##-- Gets a random number to set biome difficulty --##
def ran_diff():
return random.randint(1, 10)
##-- Map is placed into a class here --##
def map_builder():
loading_dots = 0
clear()
print('Loading.')
for spot in range(0, (map_width * map_hight)):
##-- spot is index and coord_x and coord_y is finding the (x, y) coordinates from index (AKA spot) --##
if spot in [10, 100, 500, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000]:
loading_dots += 1
clear()
print('-------------------')
print('Loading' + '.' * loading_dots)
print('-------------------')
coord_y = int((spot / map_width))
coord_x = spot - (map_width * coord_y)
biome = rare_control()
map_seed.append(biome)
biome_name = Biome.world_biomes[biome]['name']
biome_rarity = Biome.world_biomes[biome]['rarity']
biome_layers = Biome.world_biomes[biome]['enterable']
##-- coords_x, coords_y, name, rarity, difficulty, enterable=False, discovered=False --##
##-- coords_x, coords_y, name, rarity, difficulty, enterable, exit_x, exit_y, discovered=False --##
insert_tile(Tile(coord_x, coord_y, biome, biome_name, biome_rarity, ran_diff(), biome_layers, set_exit_coords_x(), set_exit_coords_y()))
if biome_layers:
for subspot in range(0, (sub_map_width * sub_map_hight)):
sub_coord_y = int((subspot / sub_map_width))
sub_coord_x = subspot - (sub_map_width * sub_coord_y)
# print(f"X: {coord_x}")
# print(f"Y: {coord_y}")
# print(f"SUB X: {sub_coord_x}")
# print(f"SUB Y: {sub_coord_y}")
# input("Enter")
try:
sub_biome_name = SubBiome.sub_biome[biome][get_random_sub_biome(biome)]['name']
except:
continue
insert_subtile(SubTile(coord_x, coord_y, sub_coord_x, sub_coord_y, biome_name, sub_biome_name, ran_diff()))
##-- mapfile is the varible name of the file that the map is stored in --##
mapfile = 'Worldmap.db'
##-- Conn connects to the database --##
conn = sqlite3.connect(mapfile)
##-- c is what lets you write
c = conn.cursor()
def neat_layout():
df1 = pandas.read_sql_query("SELECT * FROM tile ;", conn)
df2 = pandas.read_sql_query("SELECT * FROM subTile ;", conn)
print(df1)
print("\n--------------------------------------------------------------------------------------------------\n")
print(df2)
def make_map_datebase():
with conn:
c.execute("""CREATE TABLE IF NOT EXISTS tile (coords_x int, coords_y int, abriv text, place_name text, rarity text, difficulty int, enterable boolean, tile_level int, exit_x int, exit_y int, discovered boolean)""")
c.execute("""CREATE TABLE IF NOT EXISTS subTile (from_x int, from_y int, coords_x int, coords_y int, biome_name text, place_name text, difficulty int, tile_level int, discovered boolean)""")
def insert_tile(tile):
with conn:
c.execute("INSERT INTO tile VALUES (:coords_x, :coords_y, :abriv, :place_name, :rarity, :difficulty, :enterable, :tile_level, :exit_x, :exit_y, :discovered)",
{'coords_x': tile.coords_x, 'coords_y': tile.coords_y, 'abriv': tile.abriv, 'place_name': tile.name, 'rarity': tile.rarity, 'difficulty': tile.difficulty, 'enterable': tile.enterable, 'tile_level': tile.floor, 'exit_x': tile.exit_x, 'exit_y': tile.exit_y, 'discovered': tile.discovered})
def insert_subtile(subtile):
with conn:
c.execute("INSERT INTO subTile VALUES (:from_x, :from_y, :coords_x, :coords_y, :biome_name, :place_name, :difficulty, :tile_level, :discovered)",
{'from_x': subtile.from_x, 'from_y': subtile.from_y, 'coords_x': subtile.coords_x, 'coords_y': subtile.coords_y, 'biome_name': subtile.biome_name, 'place_name': subtile.place_name, 'difficulty': subtile.difficulty, 'tile_level': subtile.floor, 'discovered': subtile.discovered})
def get_tile(x,y):
c.execute("SELECT * FROM tile WHERE coords_x=:coords_x AND coords_y=:coords_y;", {'coords_x': x, 'coords_y': y})
return c.fetchall()
def get_subTile(from_x, from_y, x, y):
c.execute("SELECT * FROM subTile WHERE from_x=:from_x AND from_y=:from_y AND coords_x=:coords_x AND coords_y=:coords_y;", {'from_x': from_x, 'from_y': from_y, 'coords_x': x, 'coords_y': y})
return c.fetchall()
def get_all():
c.execute("SELECT * FROM map;")
return c.fetchall()
def update_tile(x,y,discovered):
with conn:
c.execute("""UPDATE tile SET discovered=:discovered WHERE coords_x=:coords_x AND coords_y=:coords_y""",
{'coords_x': x, 'coords_y': y, 'discovered': discovered})
def update_subTile(from_x, from_y, x, y, discovered):
with conn:
c.execute("""UPDATE subTile SET discovered=:discovered WHERE from_x=:from_x AND from_y=:from_y AND coords_x=:coords_x AND coords_y=:coords_y""",
{'from_x': from_x, 'from_y': from_y, 'coords_x': x, 'coords_y': y, 'discovered': discovered})
|
[
"43012445+cowboy8625@users.noreply.github.com"
] |
43012445+cowboy8625@users.noreply.github.com
|
108f22444d17c4261db1d0bb0766a7ed666a18ff
|
5244b65ca44d1d66f827d608462186ae3eba0b17
|
/cst042_oblig2/src/src_client.py
|
55c0edfac431d8bd4435007bbd61305cd2c0874a
|
[] |
no_license
|
camillavilla1/inf_2301
|
08d6bf2ff2bc224bb9099b6d8470177907c91c3e
|
db9a8992cdd5f9a7c30bd7083b5de75d58e940ae
|
refs/heads/master
| 2021-08-29T17:13:20.259405
| 2017-12-14T12:07:09
| 2017-12-14T12:07:09
| 114,245,850
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,783
|
py
|
import socket, sys
from Crypto.PublicKey import RSA
from Crypto.Cipher import AES
class Client(object):
def __init__(self, ):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create a socket object
def rsa_pub_key(self):
"""Generate new RSA-key, send public key to server"""
self.new_key = RSA.generate(2048) #contain public- and private key
self.public_key = self.new_key.publickey()
new_pubkey = open("pem.pem", 'w') #make new pem-file for public key
new_pubkey.write(self.public_key.exportKey("PEM")) #write the public key to the file
new_pubkey.close()
open_pem = open("pem.pem", 'r')
pub_key = open_pem.read()
open_pem.close()
self.socket.send(pub_key)
def get_aeskey(self):
"""Get AES key from server. Decrypt it"""
self.aeskey = self.socket.recv(4096)
self.decr_aes = self.new_key.decrypt(self.aeskey)[:16] #decrypt with pub/sec key
self.iv = self.new_key.decrypt(self.aeskey)[16:]
def new_text(self):
"""Get text from the server and opens new text file for the message"""
self.data = self.socket.recv(4096)
new_txtfile = open("text_client.txt", 'w')
new_txtfile.write(self.data)
new_txtfile.close()
def get_msg(self):
"""Get message from server, decrypt message and put it in new document"""
cipher = AES.new(self.decr_aes, AES.MODE_CFB, self.iv)
dec_msg = cipher.decrypt(self.data)
new_msg = open("text_client.txt", 'a')
new_msg.write(dec_msg)
def run(self, host="localhost", port=8080):
host = sys.argv[1]
port = int(sys.argv[2])
self.socket.connect((host, port))
self.rsa_pub_key()
self.get_aeskey()
self.new_text()
self.get_msg()
if __name__ == '__main__':
client = Client()
print "Client running..."
client.run()
|
[
"dc-milla@hotmail.com"
] |
dc-milla@hotmail.com
|
71b7d904c4b631395ebd6d5e10b3d98b8233d49e
|
3d6159a65ef09aa7f01aa5c53111d918f8b59dee
|
/smssearchreqs/models.py
|
8329592040210fcfb9e2c508af96b0f388ce9e3e
|
[] |
no_license
|
codeht1/foxsearch
|
6e3e5a7de086fefba10c8cab259fa519b06aa7fc
|
2cc706a592949802dc8e6b3f66504ce27bc7d020
|
refs/heads/master
| 2016-09-06T06:19:31.256647
| 2014-10-28T17:37:50
| 2014-10-28T17:37:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
from django.db import models
# Create your models here.
class smssearchreq(models.Model):
mobilenum=models.DecimalField(max_digits=10,decimal_places=0)
query = models.CharField(max_length=200)
timestamp=models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return "%s" %(self.mobilenum)
|
[
"praveenyadav1193@yahoo.com"
] |
praveenyadav1193@yahoo.com
|
a8b8e6b8f9a3395df1433080c32261269530e44f
|
73a55f07a5c0bf3b3dbfc696f2f22cb2c9485c5f
|
/lab3/ex3_1.py
|
ef074fba9e7a5293646cddeae992b5f5804d5114
|
[] |
no_license
|
korliakov/infa_2020_Oksanichenko
|
158ec38eae8693f00cf3fe1330845c36730eba93
|
0064c1319fe16e234ded394d7aee3b30a4b88cf9
|
refs/heads/master
| 2022-12-22T11:49:10.569667
| 2020-09-24T07:58:50
| 2020-09-24T07:58:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,108
|
py
|
import pygame
import numpy as np
from pygame.draw import *
pygame.init()
FPS = 30
screen = pygame.display.set_mode((600, 600))
WHT = (255, 255, 255)
YLW = (255, 255, 0)
BLC = (0, 0, 0)
RED = (255, 0, 0)
screen.fill(WHT)
circle(screen, YLW, [300, 300], 200)
circle(screen, BLC, [300, 300], 200, 1)
circle(screen, BLC, [370, 250], 20) # eyes
circle(screen, RED, [370, 250], 10)
circle(screen, BLC, [230, 250], 30)
circle(screen, RED, [230, 250], 10)
x1e1 = 370 - np.sin((np.pi / 180) * 30) * 20
y1e1 = 250 - np.cos((np.pi / 180) * 30) * 20
x2e1 = x1e1 - np.cos((np.pi / 180) * 30) * 40
y2e1 = y1e1 + np.sin((np.pi / 180) * 30) * 40
x3e1 = x2e1 - np.sin((np.pi / 180) * 30) * 10
y3e1 = y2e1 - np.cos((np.pi / 180) * 30) * 10
x4e1 = x3e1 + np.cos((np.pi / 180) * 30) * 80
y4e1 = y3e1 - np.sin((np.pi / 180) * 30) * 80
x5e1 = x4e1 + np.sin((np.pi / 180) * 30) * 10
y5e1 = y4e1 + np.cos((np.pi / 180) * 30) * 10
x6e1 = x5e1 - np.cos((np.pi / 180) * 30) * 40
y6e1 = y5e1 + np.sin((np.pi / 180) * 30) * 40
Eyebrown1 = [(x1e1, y1e1), (x2e1, y2e1), (x3e1, y3e1), (x4e1, y4e1), (x5e1, y5e1), (x6e1, y6e1)]
x1e2 = 230 + np.sin((np.pi / 180) * 30) * 30
y1e2 = 250 - np.cos((np.pi / 180) * 30) * 30
x2e2 = x1e2 + np.cos((np.pi / 180) * 30) * 50
y2e2 = y1e2 + np.sin((np.pi / 180) * 30) * 50
x3e2 = x2e2 + np.sin((np.pi / 180) * 30) * 15
y3e2 = y2e2 - np.cos((np.pi / 180) * 30) * 15
x4e2 = x3e2 - np.cos((np.pi / 180) * 30) * 100
y4e2 = y3e2 - np.sin((np.pi / 180) * 30) * 100
x5e2 = x4e2 - np.sin((np.pi / 180) * 30) * 15
y5e2 = y4e2 + np.cos((np.pi / 180) * 30) * 15
x6e2 = x5e2 + np.cos((np.pi / 180) * 30) * 50
y6e2 = y5e2 + np.sin((np.pi / 180) * 30) * 50
Eyebrown2 = [(x1e2, y1e2), (x2e2, y2e2), (x3e2, y3e2), (x4e2, y4e2), (x5e2, y5e2), (x6e2, y6e2)]
polygon(screen, BLC, Eyebrown1)
polygon(screen, BLC, Eyebrown2)
x1 = 200
y1 = 400
w = 200
h = 20
rect(screen, BLC, (x1, y1, w, h))
pygame.display.update()
clock = pygame.time.Clock()
finished = False
while not finished:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
finished = True
pygame.quit()
|
[
"okssolotheodor@gmail.com"
] |
okssolotheodor@gmail.com
|
b3b049e2bbef6b4c5bd3b74c884bd4c0cff4e8c8
|
c688d5e3f3e550ffc74e4fcb0f356dc04d864173
|
/chess/dl/models.py
|
3f16a6448bc97fc6dc7c438484f55aba6ea16853
|
[] |
no_license
|
HenryDashwood/chess-engine
|
1fc82b67eb44c9f4c834ac07fc5089455f456335
|
1030656035f56535ab6093dcc4f735dbcf16d812
|
refs/heads/master
| 2021-06-26T06:34:02.887173
| 2020-10-25T23:58:14
| 2020-10-25T23:58:14
| 154,219,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,104
|
py
|
import torch
from torch import nn
from torch.nn import functional as F
class Model1(nn.Module):
def __init__(self):
super(Model1, self).__init__()
self.fc1 = nn.Linear(773, 400)
self.bn1 = nn.BatchNorm1d(400)
self.fc2 = nn.Linear(400, 200)
self.bn2 = nn.BatchNorm1d(200)
self.fc3 = nn.Linear(200, 100)
self.bn3 = nn.BatchNorm1d(100)
self.fc4 = nn.Linear(100, 1)
self.bn4 = nn.BatchNorm1d(1)
def forward(self, x):
x = F.leaky_relu(self.bn1(self.fc1(x)))
x = F.leaky_relu(self.bn2(self.fc2(x)))
x = F.leaky_relu(self.bn3(self.fc3(x)))
x = self.bn4(self.fc4(x))
return torch.tanh(x)
class Model2(nn.Module):
def __init__(self):
super(Model2, self).__init__()
self.a1 = nn.Conv2d(12, 16, kernel_size=3, padding=1)
self.a2 = nn.Conv2d(16, 16, kernel_size=3, padding=1)
self.a3 = nn.Conv2d(16, 32, kernel_size=3, stride=2)
self.b1 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.b2 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.b3 = nn.Conv2d(32, 64, kernel_size=3, stride=2)
self.c1 = nn.Conv2d(64, 64, kernel_size=2, padding=1)
self.c2 = nn.Conv2d(64, 64, kernel_size=2, padding=1)
self.c3 = nn.Conv2d(64, 128, kernel_size=2, stride=2)
self.d1 = nn.Conv2d(128, 128, kernel_size=1)
self.d2 = nn.Conv2d(128, 128, kernel_size=1)
self.d3 = nn.Conv2d(128, 128, kernel_size=1)
self.last = nn.Linear(128, 1)
def forward(self, x):
x = F.relu(self.a1(x))
x = F.relu(self.a2(x))
x = F.relu(self.a3(x))
# 4x4
x = F.relu(self.b1(x))
x = F.relu(self.b2(x))
x = F.relu(self.b3(x))
# 2x2
x = F.relu(self.c1(x))
x = F.relu(self.c2(x))
x = F.relu(self.c3(x))
# 1x128
x = F.relu(self.d1(x))
x = F.relu(self.d2(x))
x = F.relu(self.d3(x))
x = x.view(-1, 128)
x = self.last(x)
# value output
return torch.tanh(x)
|
[
"henry.dashwood@curationcorp.com"
] |
henry.dashwood@curationcorp.com
|
0706579f4674885cfbd07584590fb145738a4ed9
|
bc4554da10f2e1670f13da68c350b7ae8ee91a9e
|
/forms.py
|
d7bc04c504a3ca3b6f09772cc2ac7f4016c2372b
|
[] |
no_license
|
ryan-ormsby/FlaskApp
|
cc470dfac2969d54fcb3beaf3f22c84e04fd341c
|
4960ff5f167ea1f9a96db134711bb9a2479201fc
|
refs/heads/master
| 2021-08-24T02:01:41.520546
| 2017-12-07T15:17:29
| 2017-12-07T15:17:29
| 109,185,343
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField, BooleanField
from wtforms.validators import DataRequired
class AddRiskForm(FlaskForm):
orderid = StringField('orderid', validators=[DataRequired()])
class AddReviewForm(FlaskForm):
nickname = StringField('nickname', validators=[DataRequired()])
review = StringField('review', validators=[DataRequired()])
|
[
"ryan.ormsby@shopify.com"
] |
ryan.ormsby@shopify.com
|
274b1254246dc752a8cf053ff8afd8f8684334b5
|
a9e1e7ecbdb8d4f82a0b5e626f1c1ec4e365853d
|
/lib/Stochastic.py
|
626389c2fd44a4e8998306a641078267d3694241
|
[] |
no_license
|
ahrgomez/apTrader
|
6d1df2f52c3c82879ef0575aa0bc4c5ae5b39919
|
879f3af42dd0f7c926f0bc974fd320a7a0ff0dbc
|
refs/heads/master
| 2022-12-16T07:04:12.526346
| 2022-12-08T12:15:28
| 2022-12-08T12:15:28
| 85,666,771
| 0
| 1
| null | 2022-06-21T21:16:40
| 2017-03-21T06:24:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,130
|
py
|
import pandas as pd
import numpy as np
import SimpleMobileAverage
class Stochastic(object):
periods = 0
smoothing = 0
sma = None
def __init__(self, period, smoothing):
self.periods = period
self.smoothing = smoothing
self.sma = SimpleMobileAverage.SimpleMobileAverage(smoothing)
def Calculate(self, high_prices, low_prices, close_prices):
return self.getSlowStochastic(low_prices, high_prices, close_prices)
def getFastStochastic(self, low_prices, high_prices, close_prices):
low_min = low_prices.rolling(window=self.periods,center=False).min()
high_max = high_prices.rolling(window=self.periods,center=False).max()
k_fast = 100 * (close_prices - low_min)/(high_max - low_min)
k_fast = k_fast.dropna()
d_fast = self.sma.Calculate(k_fast)
return k_fast, d_fast
def getSlowStochastic(self, low_prices, high_prices, close_prices):
k_fast, d_fast = self.getFastStochastic(low_prices, high_prices, close_prices)
k_slow = d_fast
d_slow = self.sma.Calculate(k_slow)
return k_slow, d_slow
|
[
"root@agtemac.local"
] |
root@agtemac.local
|
a24d776dc821a4f9c43ad84683108ed6172c2508
|
f597f371bb2b18ce4d28001b1e9508616bf8e0fa
|
/staging/versions/53e5dd8445f9_.py
|
93da22374527b4f3429b794515cb298f4871ffbe
|
[
"MIT"
] |
permissive
|
farbodab/flatteningthecurve
|
e7b3bb21d8fba9857c5cc2ef41bbae4a7ac863b3
|
692fd9c8d78355e1208ff85a2cd1038da11c392f
|
refs/heads/master
| 2022-12-11T09:26:55.405564
| 2021-07-11T16:35:00
| 2021-07-11T16:35:00
| 249,292,508
| 1
| 3
|
MIT
| 2022-12-08T03:51:54
| 2020-03-22T23:31:49
|
Python
|
UTF-8
|
Python
| false
| false
| 897
|
py
|
"""empty message
Revision ID: 53e5dd8445f9
Revises: 0d0d42ac0c04
Create Date: 2020-05-19 18:56:45.511549
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '53e5dd8445f9'
down_revision = '0d0d42ac0c04'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('viz', sa.Column('text_bottom', sa.String(), nullable=True))
op.add_column('viz', sa.Column('text_top', sa.String(), nullable=True))
op.drop_column('viz', 'text')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('viz', sa.Column('text', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_column('viz', 'text_top')
op.drop_column('viz', 'text_bottom')
# ### end Alembic commands ###
|
[
"farbod.ab@hotmail.ca"
] |
farbod.ab@hotmail.ca
|
4ad17512530bf395fbef90133d6f3bfe4cf30fd6
|
21a7462f5c6d7258aa4add66f3961bcced6fcb7e
|
/plugins/check-printer-hp-2600n/check_printer_hp_2600n.py
|
44a34228d174a8d108fde45b945a975f31e1b0cf
|
[] |
no_license
|
titilambert/monitoring-tools
|
dfa7188ab448fa1c9648c5ec403bdd6a7202dd8f
|
ef0331019d5fe999675a697194b4eefe5f8ef1e3
|
refs/heads/master
| 2021-01-18T17:31:36.409890
| 2015-06-04T13:34:23
| 2015-06-04T13:34:33
| 37,618,022
| 1
| 0
| null | 2015-06-17T20:05:51
| 2015-06-17T20:05:51
| null |
UTF-8
|
Python
| false
| false
| 22
|
py
|
check_printer_hp_2600n
|
[
"alexandre.viau@savoirfairelinux.com"
] |
alexandre.viau@savoirfairelinux.com
|
47cf7a841d1897bf19ffa23851159a81a5ff34fc
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startPyquil3185.py
|
e768b8db399f05c768b8bac81015e4c147695c7e
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,946
|
py
|
# qubit number=4
# total number=44
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(3) # number=19
prog += Y(2) # number=36
prog += CZ(0,3) # number=20
prog += H(3) # number=21
prog += CNOT(0,3) # number=14
prog += CNOT(0,3) # number=25
prog += CNOT(0,3) # number=28
prog += X(3) # number=29
prog += CNOT(0,3) # number=30
prog += CNOT(3,1) # number=35
prog += Y(2) # number=34
prog += CNOT(0,3) # number=27
prog += H(3) # number=22
prog += CZ(0,3) # number=23
prog += H(3) # number=24
prog += CNOT(0,3) # number=13
prog += H(3) # number=18
prog += Z(3) # number=10
prog += H(1) # number=2
prog += CNOT(2,0) # number=41
prog += Z(2) # number=42
prog += CNOT(2,0) # number=43
prog += H(2) # number=3
prog += H(3) # number=4
prog += CNOT(3,0) # number=31
prog += CNOT(3,0) # number=37
prog += Z(3) # number=38
prog += CNOT(3,0) # number=39
prog += CNOT(3,0) # number=33
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=9
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil3185.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
db3925664d66c296e75f8e1ca15616bf73b1098e
|
24e004c618421f2dfaf81d746d99b6303d9118f0
|
/john_sinclair/load_albums.py
|
4723d2114cb223535a8308a29bb740c7f9b023d4
|
[
"MIT"
] |
permissive
|
benmaier/drei-fragezeichen-sleeping-beauty
|
34c4f22506a2678f8537ad18df4c2beab2ca6366
|
a43b9d0e0a052b37262bad218be99d79507b7f73
|
refs/heads/main
| 2023-06-24T03:38:08.167569
| 2021-07-28T12:38:24
| 2021-07-28T12:38:24
| 390,154,323
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,185
|
py
|
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import simplejson as json
import pathlib
from rich import print
with open(pathlib.Path.home()/".spotipy"/"identity.json",'r') as f:
credentials = json.load(f)
sp = spotipy.Spotify(auth_manager=SpotifyClientCredentials(**credentials))
#results = sp.search(q='drei ???', limit=20,type='artist')
#print(results)
#for idx, track in enumerate(results['tracks']['items']):
# print(idx, track['name'])
# found artist uri: spotify:artist:3meJIgRw7YleJrmbpbJK6S
johnsinc_uri = "spotify:artist:6lly2jn9MqaxaWRrkEzOsJ"
results = sp.artist_albums(johnsinc_uri, album_type='album')
albums = results['items']
while results['next']:
results = sp.next(results)
albums.extend(results['items'])
rows = []
for album in albums:
name = album['name']
this_album = {}
this_album['release_date'] = album['release_date']
this_album['total_tracks'] = album['total_tracks']
this_album['uri'] = album['uri']
this_album['name'] = album['name']
rows.append(this_album)
with open('albums.json','w') as f:
rows = sorted(rows,key=lambda x: x['release_date'])
json.dump(rows,f, indent=4)
|
[
"benjaminfrankmaier@gmail.com"
] |
benjaminfrankmaier@gmail.com
|
d482b07206cd0fd67c54015df9dad615eb0158b6
|
5f57fe6ee9e31a135ba50bc0b045f11945edb2ca
|
/tests.py
|
9b71544f987973d5b3d70276f3092aaae82a7053
|
[] |
no_license
|
should-you-contribute/should-you-contribute.github.io
|
1985a4a86c4fb28c20c82ae324a6deab0bb321b9
|
b3c561c5c6b5e698cfc84fb2d77181b667d363ce
|
refs/heads/master
| 2020-06-09T02:08:54.297994
| 2018-10-01T21:35:31
| 2018-10-01T21:35:31
| 37,884,905
| 1
| 1
| null | 2016-01-05T02:27:37
| 2015-06-22T22:56:02
|
HTML
|
UTF-8
|
Python
| false
| false
| 8,389
|
py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import unittest
import time
class NewVisitorTest(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_can_get_information_from_github(self):
# Tux is a newcomer to open source and github. Zie wants to know whether
# the github repository zie's found might be a good project to contribute
# to, and has heard about this site. So zie visits it.
self.browser.get("http://127.0.0.1:8000/index.html")
# Tux notices that the title page and header include the phrase "Should
# I Contribute?"
project_name = "Should You Contribute?"
self.assertIn(project_name, self.browser.title)
self.assertEquals(project_name.upper(),
self.browser.find_element_by_id('project_name').text)
# Tux also notices a checklist on the page. The checklist is not yet filled
# out - instead of #s, there are ? marks in each spot.
self.assertEquals(self.browser.find_element_by_id('checklist_title').text,
"CHECKLIST")
self.assertEquals(6,
len(self.browser.find_elements_by_class_name('fa-question-circle')))
# Tux sees that the main page is prompting hir to enter the name of the
# repository.
self.assertTrue(self.browser.find_element_by_id('repo_name'))
# The first time Tux tries, zie enters the name wrong and sees
# a message asking hir to try again.
repo_input_box = self.browser.find_element_by_id('repo_name')
repo_input_box.send_keys('shaunagm/terrible-idea-for-a-repo-name')
# self.browser.implicitly_wait(6)
repo_input_box.send_keys(Keys.ENTER)
time.sleep(2) # For some reason, webdriver's explicit waiting *or* implicit waiting not working
self.assertIn("That is not a valid, public Github repository.",
self.browser.find_element_by_id('repo_error').text)
# The second time Tux enters the repository name correctly. Zie sees a
# message telling hir that the information was successfully obtained.
repo_input_box.clear()
working_repo = "shaunagm/WelcomeBot"
repo_input_box.send_keys(working_repo)
# self.browser.implicitly_wait(6)
repo_input_box.send_keys(Keys.ENTER)
time.sleep(2) # For some reason, webdriver's explicit waiting *or* implicit waiting not working
self.assertIn("Success!",
self.browser.find_element_by_id('repo_error').text)
# Looking at the checklist, Tux sees that the checklist's header now contains
# the name of the repository.
self.assertIn(working_repo.upper(),
self.browser.find_element_by_id('checklist_title').text)
# Tux also sees that the ? marks have all been replaced by numbers.
# In the first row, which has the prompt "How many commits have been
# made in the last month?" now says something like "There have been X
# commits in the last month".
self.assertIn("commits in the last month.",
self.browser.find_element_by_id('checklist_commits').text)
# The second row, which has the prompt "How many contributors are there
# to this repository?" now says something like "There are X contributors
# to this repository."
self.assertIn("contributors to this repository",
self.browser.find_element_by_id('checklist_contribs').text)
# The third row, which has the prompt "What percentage of issues have replies?"
# now says something like "X% of issues have replies."
self.assertIn("issues get replies. The median number of replies ",
self.browser.find_element_by_id('checklist_issues').text)
# The fourth row, which has the prompt "What percentage of pull requests
# are merged?" now says something like "X% of pull requests have been merged."
self.assertIn("pull requests have been merged.",
self.browser.find_element_by_id('checklist_mergedprs').text)
# The fifth row, which has the prompt "Does the repository have a README?"
# now says something like "The repository has a README."
self.assertIn("The repository has a readme",
self.browser.find_element_by_id('checklist_files').text)
# The sixth row, which has the prompt "Does the issue tracker label issues
# as good for newcomers?" now says something like "The tracker has issues labeled
# X"
self.assertIn("The tracker has issues labeled ",
self.browser.find_element_by_id('checklist_labels').text)
# Tux reads down the list. Zie is confused about what the first item on the list
# means, but sees a button labelled "Learn More".
self.assertIn("Learn More",
self.browser.find_element_by_id('checklist_commits_prompt').text)
# Tux clicks the "Learn More" button. A modal window pops up containing more
# information about that item.
self.assertEqual(False,
self.browser.find_element_by_id('commits-info').is_displayed())
self.browser.find_element_by_css_selector('#padder-row > div > div:nth-child(3) > div.col-lg-6.checklist-item.checklist-description > p > button').click()
time.sleep(1) # For some reason, we need to sleep here to get the following test to work
self.assertEqual('false',
self.browser.find_element_by_id('commits-info').get_attribute("aria-hidden"))
# Enlightened, Tux closes the modal window.
self.browser.find_element_by_css_selector('#commits-info > div > div > div.modal-header > button > span').click()
time.sleep(1) # For some reason, we need to sleep here to get the following test to work
self.assertEqual(False,
self.browser.find_element_by_id('commits-info').is_displayed())
# Tux then clicks the "Learn More" button for the next item.
self.assertEqual(False,
self.browser.find_element_by_id('contribs-info').is_displayed())
self.browser.find_element_by_css_selector('#padder-row > div > div:nth-child(4) > div.col-lg-6.checklist-item.checklist-description > p > button').click()
time.sleep(1) # For some reason, we need to sleep here to get the following test to work
self.assertEqual('false',
self.browser.find_element_by_id('contribs-info').get_attribute("aria-hidden"))
# A different modal window corresponding to the next item pops up. Tux
# closes that too.
self.browser.find_element_by_css_selector('#contribs-info > div > div > div.modal-header > button > span').click()
time.sleep(1) # For some reason, we need to sleep here to get the following test to work
self.assertEqual(False,
self.browser.find_element_by_id('contribs-info').is_displayed())
# Tux clicks on the "Find one!" link. A new window with a guide to finding projects appears.
self.browser.find_element_by_id("find_one").click()
self.browser.switch_to_window(self.browser.window_handles[1])
self.assertIn("Finding a Project",
self.browser.title)
self.browser.switch_to_window(self.browser.window_handles[0])
self.assertIn(project_name,
self.browser.title)
# Tux is so excited zie decides to enter a different github repository.
# When zie does, zie sees "success!" above the search bar.
repo_input_box.clear()
new_repo = "railsbridge/bridge_troll"
repo_input_box.send_keys(new_repo)
repo_input_box.send_keys(Keys.ENTER)
time.sleep(2) # For some reason, webdriver's explicit waiting *or* implicit waiting not working
self.assertIn("Success!",
self.browser.find_element_by_id('repo_error').text)
# Tux also sees the new repository name in the checklist header
# and new information in the checklist data.
self.assertIn(new_repo.upper(),
self.browser.find_element_by_id('checklist_title').text)
self.assertNotIn(working_repo.upper(),
self.browser.find_element_by_id('checklist_title').text)
if __name__ == '__main__':
unittest.main()
|
[
"shaunagm@gmail.com"
] |
shaunagm@gmail.com
|
9131b2f1dacab69bf3da816ef02ec61aed408220
|
895234e39ee4cd7b1d893f172df9979b95ab5279
|
/magenta/models/music_vae/lstm_models.py
|
5bb32c46abda12ecd42e38013af3941fe9869568
|
[
"Apache-2.0"
] |
permissive
|
craffel/magenta
|
039224afb9eb9cd197f4dc4e28029c932390072e
|
4a5e55ef27040b0c963eae9d828162ec6a7c20f9
|
refs/heads/master
| 2021-05-05T05:16:14.626337
| 2018-01-25T01:06:30
| 2018-01-25T01:06:30
| 118,673,643
| 1
| 0
| null | 2018-01-23T21:41:53
| 2018-01-23T21:41:53
| null |
UTF-8
|
Python
| false
| false
| 36,283
|
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LSTM-based encoders and decoders for MusicVAE."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
# internal imports
import tensorflow as tf
from magenta.common import flatten_maybe_padded_sequences
from magenta.common import Nade
from magenta.models.music_vae import base_model
from tensorflow.contrib import rnn
from tensorflow.contrib import seq2seq
from tensorflow.contrib.cudnn_rnn.python.layers import cudnn_rnn
from tensorflow.python.framework import tensor_util
from tensorflow.python.layers import core as layers_core
from tensorflow.python.util import nest
def rnn_cell(rnn_cell_size, dropout_keep_prob, is_training=True):
"""Builds an LSTMBlockCell based on the given parameters."""
dropout_keep_prob = dropout_keep_prob if is_training else 1.0
cells = []
for layer_size in rnn_cell_size:
cell = rnn.LSTMBlockCell(layer_size)
cell = rnn.DropoutWrapper(
cell,
input_keep_prob=dropout_keep_prob)
cells.append(cell)
return rnn.MultiRNNCell(cells)
def cudnn_lstm_layer(layer_sizes, dropout_keep_prob, is_training=True,
name_or_scope='rnn'):
"""Builds a CudnnLSTM Layer based on the given parameters."""
dropout_keep_prob = dropout_keep_prob if is_training else 1.0
for ls in layer_sizes:
if ls != layer_sizes[0]:
raise ValueError(
'CudnnLSTM does not support layers with differing sizes. Got: %s',
layer_sizes)
lstm = cudnn_rnn.CudnnLSTM(
num_layers=len(layer_sizes),
num_units=layer_sizes[0],
direction='unidirectional',
dropout=1.0 - dropout_keep_prob,
name=name_or_scope)
class BackwardCompatibleCudnnLSTMSaveable(
tf.contrib.cudnn_rnn.CudnnLSTMSaveable):
"""Overrides CudnnLSTMSaveable for backward-compatibility."""
def _cudnn_to_tf_biases(self, *cu_biases):
"""Overrides to subtract 1.0 from `forget_bias` (see BasicLSTMCell)."""
(tf_bias,) = (
super(BackwardCompatibleCudnnLSTMSaveable, self)._cudnn_to_tf_biases(
*cu_biases))
i, c, f, o = tf.split(tf_bias, 4)
# Non-Cudnn LSTM cells add 1.0 to the forget bias variable.
return (tf.concat([i, c, f - 1.0, o], axis=0),)
def _tf_to_cudnn_biases(self, *tf_biases):
"""Overrides to add 1.0 to `forget_bias` (see BasicLSTMCell)."""
(tf_bias,) = tf_biases
i, c, f, o = tf.split(tf_bias, 4)
# Non-Cudnn LSTM cells add 1.0 to the forget bias variable.
return (
super(BackwardCompatibleCudnnLSTMSaveable, self)._tf_to_cudnn_biases(
tf.concat([i, c, f + 1.0, o], axis=0)))
def _TFCanonicalNamePrefix(self, layer, is_fwd=True):
"""Overrides for backward-compatible variable names."""
if self._direction == 'unidirectional':
return 'multi_rnn_cell/cell_%d/lstm_cell' % layer
else:
return (
'cell_%d/bidirectional_rnn/%s/multi_rnn_cell/cell_0/lstm_cell'
% (layer, 'fw' if is_fwd else 'bw'))
lstm._saveable_cls = BackwardCompatibleCudnnLSTMSaveable # pylint:disable=protected-access
return lstm
def _cudnn_lstm_state(lstm_cell_state):
"""Convert tuple of LSTMCellStateTuples to CudnnLSTM format."""
h = tf.stack([s.h for s in lstm_cell_state])
c = tf.stack([s.c for s in lstm_cell_state])
return (h, c)
def _get_final(time_major_sequence, sequence_length):
final_index = tf.stack(
[tf.maximum(0, sequence_length - 1),
tf.range(sequence_length.shape[0])],
axis=1)
return tf.gather_nd(time_major_sequence, final_index)
def initial_cell_state_from_embedding(cell, z, name=None):
"""Computes an initial RNN `cell` state from an embedding, `z`."""
flat_state_sizes = nest.flatten(cell.state_size)
return nest.pack_sequence_as(
cell.zero_state(batch_size=z.shape[0], dtype=tf.float32),
tf.split(
tf.layers.dense(
z,
sum(flat_state_sizes),
activation=tf.tanh,
kernel_initializer=tf.random_normal_initializer(stddev=0.001),
name=name),
flat_state_sizes,
axis=1))
def _get_sampling_probability(hparams, is_training):
"""Returns the sampling probability as a tensor based on the hparams.
Supports three sampling schedules (`hparams.sampling_schedule`):
constant: `hparams.sampling_rate` is the sampling probability. Must be in
the interval [0, 1].
exponential: `hparams.sampling_rate` is the base of the decay exponential.
Must be in the interval (0, 1). Larger values imply a slower increase in
sampling.
inverse_sigmoid: `hparams.sampling_rate` is in the interval [1, inf).
Larger values imply a slower increase in sampling.
A constant value of 0 is returned if `hparams.sampling_schedule` is undefined.
If not training and a non-0 sampling schedule is defined, a constant value of
1 is returned since this is assumed to be a test/eval job associated with a
scheduled sampling trainer.
Args:
hparams: An HParams object containing model hyperparameters.
is_training: Whether or not the model is being used for training.
Raises:
ValueError: On an invalid `sampling_schedule` or `sampling_rate` hparam.
"""
if (not hasattr(hparams, 'sampling_schedule') or
not hparams.sampling_schedule or
(hparams.sampling_schedule == 'constant' and hparams.sampling_rate == 0)):
return tf.constant(0.0)
if not is_training:
# This is likely an eval/test job associated with a training job using
# scheduled sampling.
tf.logging.warning(
'Setting non-training sampling schedule from %s:%f to constant:1.0.',
hparams.sampling_schedule, hparams.sampling_rate)
hparams.sampling_schedule = 'constant'
hparams.sampling_rate = 1.0
schedule = hparams.sampling_schedule
rate = hparams.sampling_rate
step = tf.to_float(tf.train.get_global_step())
if schedule == 'constant':
if not 0 <= rate <= 1:
raise ValueError(
'`constant` sampling rate must be in the interval [0, 1]. Got %f.'
% rate)
sampling_probability = tf.constant(rate)
elif schedule == 'inverse_sigmoid':
if rate < 1:
raise ValueError(
'`inverse_sigmoid` sampling rate must be at least 1. Got %f.' % rate)
k = tf.constant(rate)
sampling_probability = 1.0 - k / (k + tf.exp(step / k))
elif schedule == 'exponential':
if not 0 < rate < 1:
raise ValueError(
'`exponential` sampling rate must be in the interval (0, 1). Got %f.'
% hparams.sampling_rate)
k = tf.constant(rate)
sampling_probability = 1.0 - tf.pow(k, step)
else:
raise ValueError('Invalid `sampling_schedule`: %s' % schedule)
tf.summary.scalar('sampling_probability', sampling_probability)
return sampling_probability
class LstmEncoder(base_model.BaseEncoder):
"""Unidirectional LSTM Encoder."""
def build(self, hparams, is_training=True, name_or_scope='encoder'):
self._is_training = is_training
self._name_or_scope = name_or_scope
self._use_cudnn = hparams.use_cudnn
tf.logging.info('\nEncoder Cells (unidirectional):\n'
' units: %s\n',
hparams.enc_rnn_size)
if self._use_cudnn:
self._cudnn_lstm = cudnn_lstm_layer(
hparams.enc_rnn_size,
hparams.dropout_keep_prob,
is_training,
name_or_scope=self._name_or_scope)
else:
self._cell = rnn_cell(
hparams.enc_rnn_size, hparams.dropout_keep_prob, is_training)
def encode(self, sequence, sequence_length):
# Convert to time-major.
sequence = tf.transpose(sequence, [1, 0, 2])
if self._use_cudnn:
outputs, _ = self._cudnn_lstm(
sequence, training=self._is_training)
return _get_final(outputs, sequence_length)
else:
outputs, _ = tf.nn.dynamic_rnn(
self._cell, sequence, sequence_length, dtype=tf.float32,
time_major=True, scope=self._name_or_scope)
return outputs[-1]
class BidirectionalLstmEncoder(base_model.BaseEncoder):
"""Bidirectional LSTM Encoder."""
def build(self, hparams, is_training=True, name_or_scope='encoder'):
self._is_training = is_training
self._name_or_scope = name_or_scope
self._use_cudnn = hparams.use_cudnn
tf.logging.info('\nEncoder Cells (bidirectional):\n'
' units: %s\n',
hparams.enc_rnn_size)
if isinstance(name_or_scope, tf.VariableScope):
name = name_or_scope.name
reuse = name_or_scope.reuse
else:
name = name_or_scope
reuse = None
cells_fw = []
cells_bw = []
for i, layer_size in enumerate(hparams.enc_rnn_size):
if self._use_cudnn:
cells_fw.append(cudnn_lstm_layer(
[layer_size], hparams.dropout_keep_prob, is_training,
name_or_scope=tf.VariableScope(
reuse,
name + '/cell_%d/bidirectional_rnn/fw' % i)))
cells_bw.append(cudnn_lstm_layer(
[layer_size], hparams.dropout_keep_prob, is_training,
name_or_scope=tf.VariableScope(
reuse,
name + '/cell_%d/bidirectional_rnn/bw' % i)))
else:
cells_fw.append(
rnn_cell([layer_size], hparams.dropout_keep_prob, is_training))
cells_bw.append(
rnn_cell([layer_size], hparams.dropout_keep_prob, is_training))
self._cells = (cells_fw, cells_bw)
def encode(self, sequence, sequence_length):
cells_fw, cells_bw = self._cells
if self._use_cudnn:
# Implements stacked bidirectional LSTM for variable-length sequences,
# which are not supported by the CudnnLSTM layer.
inputs_fw = tf.transpose(sequence, [1, 0, 2])
for lstm_fw, lstm_bw in zip(cells_fw, cells_bw):
outputs_fw, _ = lstm_fw(inputs_fw, training=self._is_training)
inputs_bw = tf.reverse_sequence(
inputs_fw, sequence_length, seq_axis=0, batch_axis=1)
outputs_bw, _ = lstm_bw(inputs_bw, training=self._is_training)
outputs_bw = tf.reverse_sequence(
outputs_bw, sequence_length, seq_axis=0, batch_axis=1)
inputs_fw = tf.concat([outputs_fw, outputs_bw], axis=2)
last_h_fw = _get_final(outputs_fw, sequence_length)
# outputs_bw has already been reversed, so we can take the first element.
last_h_bw = outputs_bw[0]
else:
_, states_fw, states_bw = rnn.stack_bidirectional_dynamic_rnn(
cells_fw,
cells_bw,
sequence,
sequence_length=sequence_length,
time_major=False,
dtype=tf.float32,
scope=self._name_or_scope)
# Note we access the outputs (h) from the states since the backward
# ouputs are reversed to the input order in the returned outputs.
last_h_fw = states_fw[-1][-1].h
last_h_bw = states_bw[-1][-1].h
return tf.concat([last_h_fw, last_h_bw], 1)
class BaseLstmDecoder(base_model.BaseDecoder):
"""Abstract LSTM Decoder class.
Implementations must define the following abstract methods:
-`_sample`
-`_flat_reconstruction_loss`
"""
def build(self, hparams, output_depth, is_training=False):
self._is_training = is_training
tf.logging.info('\nDecoder Cells:\n'
' units: %s\n',
hparams.dec_rnn_size)
self._sampling_probability = _get_sampling_probability(
hparams, is_training)
self._output_depth = output_depth
self._output_layer = layers_core.Dense(
output_depth, name='output_projection')
self._dec_cell = rnn_cell(
hparams.dec_rnn_size, hparams.dropout_keep_prob, is_training)
self._cudnn_dec_lstm = cudnn_lstm_layer(
hparams.dec_rnn_size, hparams.dropout_keep_prob, is_training,
name_or_scope='decoder') if hparams.use_cudnn else None
@abc.abstractmethod
def _sample(self, rnn_output, temperature):
"""Core sampling method for a single time step.
Args:
rnn_output: The output from a single timestep of the RNN, sized
`[batch_size, rnn_output_size]`.
temperature: A scalar float specifying a sampling temperature.
Returns:
A batch of samples from the model.
"""
pass
@abc.abstractmethod
def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):
"""Core loss calculation method for flattened outputs.
Args:
flat_x_target: The flattened ground truth vectors, sized
`[sum(x_length), self._output_depth]`.
flat_rnn_output: The flattened output from all timeputs of the RNN,
sized `[sum(x_length), rnn_output_size]`.
Returns:
r_loss: The unreduced reconstruction losses, sized `[sum(x_length)]`.
metric_map: A map of metric names to tuples, each of which contain the
pair of (value_tensor, update_op) from a tf.metrics streaming metric.
truths: Ground truth labels.
predictions: Predicted labels.
"""
pass
def _decode(self, z, helper, max_length=None, x_input=None):
"""Decodes the given batch of latent vectors vectors, which may be 0-length.
Args:
z: Batch of latent vectors, sized `[batch_size, z_size]`, where `z_size`
may be 0 for unconditioned decoding.
helper: A seq2seq.Helper to use. If a TrainingHelper is passed and a
CudnnLSTM has previously been defined, it will be used instead.
max_length: (Optinal) The maximum iterations to decode.
x_input: (Optional) The inputs to the decoder for teacher forcing.
Required if CudnnLSTM is to be used.
Returns:
final_output: The final seq2seq.BasicDecoderOutput.
final_state: The final states of the decoder, or None if using Cudnn.
"""
initial_state = initial_cell_state_from_embedding(
self._dec_cell, z, name='decoder/z_to_initial_state')
# CudnnLSTM does not support sampling so it can only replace TrainingHelper.
if self._cudnn_dec_lstm and type(helper) is seq2seq.TrainingHelper: # pylint:disable=unidiomatic-typecheck
rnn_output, _ = self._cudnn_dec_lstm(
tf.transpose(x_input, [1, 0, 2]),
initial_state=_cudnn_lstm_state(initial_state),
training=self._is_training)
with tf.variable_scope('decoder'):
rnn_output = self._output_layer(rnn_output)
final_output = seq2seq.BasicDecoderOutput(
rnn_output=tf.transpose(rnn_output, [1, 0, 2]), sample_id=None)
# TODO(adarob): Return a final state for fixed-length outputs.
final_state = None
else:
if self._cudnn_dec_lstm:
tf.logging.warning(
'CudnnLSTM does not support sampling. Using `dynamic_decode` '
'instead.')
decoder = seq2seq.BasicDecoder(
self._dec_cell,
helper,
initial_state=initial_state,
output_layer=self._output_layer)
final_output, final_state, _ = seq2seq.dynamic_decode(
decoder,
maximum_iterations=max_length,
swap_memory=True,
scope='decoder')
return final_output, final_state
def reconstruction_loss(self, x_input, x_target, x_length, z=None):
"""Reconstruction loss calculation.
Args:
x_input: Batch of decoder input sequences for teacher forcing, sized
`[batch_size, max(x_length), output_depth]`.
x_target: Batch of expected output sequences to compute loss against,
sized `[batch_size, max(x_length), output_depth]`.
x_length: Length of input/output sequences, sized `[batch_size]`.
z: (Optional) Latent vectors. Required if model is conditional. Sized
`[n, z_size]`.
Returns:
r_loss: The reconstruction loss for each sequence in the batch.
metric_map: Map from metric name to tf.metrics return values for logging.
truths: Ground truth labels.
predictions: Predicted labels.
final_state: The final states of the decoder, or None if using Cudnn.
"""
batch_size = x_input.shape[0].value
has_z = z is not None
z = tf.zeros([batch_size, 0]) if z is None else z
repeated_z = tf.tile(
tf.expand_dims(z, axis=1), [1, tf.shape(x_input)[1], 1])
sampling_probability_static = tensor_util.constant_value(
self._sampling_probability)
if sampling_probability_static == 0.0:
# Use teacher forcing.
x_input = tf.concat([x_input, repeated_z], axis=2)
helper = seq2seq.TrainingHelper(x_input, x_length)
else:
# Use scheduled sampling.
helper = seq2seq.ScheduledOutputTrainingHelper(
inputs=x_input,
sequence_length=x_length,
auxiliary_inputs=repeated_z if has_z else None,
sampling_probability=self._sampling_probability,
next_inputs_fn=self._sample)
decoder_outputs, final_state = self._decode(
z, helper=helper, x_input=x_input)
flat_x_target = flatten_maybe_padded_sequences(x_target, x_length)
flat_rnn_output = flatten_maybe_padded_sequences(
decoder_outputs.rnn_output, x_length)
r_loss, metric_map, truths, predictions = self._flat_reconstruction_loss(
flat_x_target, flat_rnn_output)
# Sum loss over sequences.
cum_x_len = tf.concat([(0,), tf.cumsum(x_length)], axis=0)
r_losses = []
for i in range(batch_size):
b, e = cum_x_len[i], cum_x_len[i + 1]
r_losses.append(tf.reduce_sum(r_loss[b:e]))
r_loss = tf.stack(r_losses)
return r_loss, metric_map, truths, predictions, final_state
def sample(self, n, max_length=None, z=None, temperature=1.0,
start_inputs=None, end_fn=None):
"""Sample from decoder with an optional conditional latent vector `z`.
Args:
n: Scalar number of samples to return.
max_length: (Optional) Scalar maximum sample length to return. Required if
data representation does not include end tokens.
z: (Optional) Latent vectors to sample from. Required if model is
conditional. Sized `[n, z_size]`.
temperature: (Optional) The softmax temperature to use when sampling, if
applicable.
start_inputs: (Optional) Initial inputs to use for batch.
Sized `[n, output_depth]`.
end_fn: (Optional) A callable that takes a batch of samples (sized
`[n, output_depth]` and emits a `bool` vector
shaped `[batch_size]` indicating whether each sample is an end token.
Returns:
samples: Sampled sequences. Sized `[n, max_length, output_depth]`.
final_state: The final states of the decoder.
Raises:
ValueError: If `z` is provided and its first dimension does not equal `n`.
"""
if z is not None and z.shape[0].value != n:
raise ValueError(
'`z` must have a first dimension that equals `n` when given. '
'Got: %d vs %d' % (z.shape[0].value, n))
# Use a dummy Z in unconditional case.
z = tf.zeros((n, 0), tf.float32) if z is None else z
# If not given, start with zeros.
start_inputs = start_inputs if start_inputs is not None else tf.zeros(
[n, self._output_depth], dtype=tf.float32)
# In the conditional case, also concatenate the Z.
start_inputs = tf.concat([start_inputs, z], axis=-1)
sample_fn = lambda x: self._sample(x, temperature)
end_fn = end_fn or (lambda x: False)
# In the conditional case, concatenate Z to the sampled value.
next_inputs_fn = lambda x: tf.concat([x, z], axis=-1)
sampler = seq2seq.InferenceHelper(
sample_fn, sample_shape=[self._output_depth], sample_dtype=tf.float32,
start_inputs=start_inputs, end_fn=end_fn, next_inputs_fn=next_inputs_fn)
decoder_outputs, final_state = self._decode(
z, helper=sampler, max_length=max_length)
return decoder_outputs.sample_id, final_state
class CategoricalLstmDecoder(BaseLstmDecoder):
"""LSTM decoder with single categorical output."""
def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):
flat_logits = flat_rnn_output
flat_truth = tf.argmax(flat_x_target, axis=1)
flat_predictions = tf.argmax(flat_logits, axis=1)
r_loss = tf.nn.softmax_cross_entropy_with_logits(
labels=flat_x_target, logits=flat_logits)
metric_map = {
'metrics/accuracy':
tf.metrics.accuracy(flat_truth, flat_predictions),
'metrics/mean_per_class_accuracy':
tf.metrics.mean_per_class_accuracy(
flat_truth, flat_predictions, flat_x_target.shape[-1].value),
}
return r_loss, metric_map, flat_truth, flat_predictions
def _sample(self, rnn_output, temperature=1.0):
sampler = tf.contrib.distributions.OneHotCategorical(
logits=rnn_output / temperature, dtype=tf.float32)
return sampler.sample()
def sample(self, n, max_length=None, z=None, temperature=None,
start_inputs=None, beam_width=None, end_token=None):
"""Overrides BaseLstmDecoder `sample` method to add optional beam search.
Args:
n: Scalar number of samples to return.
max_length: (Optional) Scalar maximum sample length to return. Required if
data representation does not include end tokens.
z: (Optional) Latent vectors to sample from. Required if model is
conditional. Sized `[n, z_size]`.
temperature: (Optional) The softmax temperature to use when not doing beam
search. Defaults to 1.0. Ignored when `beam_width` is provided.
start_inputs: (Optional) Initial inputs to use for batch.
Sized `[n, output_depth]`.
beam_width: (Optional) Width of beam to use for beam search. Beam search
is disabled if not provided.
end_token: (Optional) Scalar token signaling the end of the sequence to
use for early stopping.
Returns:
samples: Sampled sequences. Sized `[n, max_length, output_depth]`.
final_state: The final states of the decoder.
Raises:
ValueError: If `z` is provided and its first dimension does not equal `n`.
"""
if beam_width is None:
end_fn = (None if end_token is None else
lambda x: tf.equal(tf.argmax(x, axis=-1), end_token))
return super(CategoricalLstmDecoder, self).sample(
n, max_length, z, temperature, start_inputs, end_fn)
# If `end_token` is not given, use an impossible value.
end_token = self._output_depth if end_token is None else end_token
if z is not None and z.shape[0].value != n:
raise ValueError(
'`z` must have a first dimension that equals `n` when given. '
'Got: %d vs %d' % (z.shape[0].value, n))
if temperature is not None:
tf.logging.warning('`temperature` is ignored when using beam search.')
# Use a dummy Z in unconditional case.
z = tf.zeros((n, 0), tf.float32) if z is None else z
# If not given, start with dummy `-1` token and replace with zero vectors in
# `embedding_fn`.
start_tokens = (
tf.argmax(start_inputs, axis=-1, output_type=tf.int32)
if start_inputs is not None else
-1 * tf.ones([n], dtype=tf.int32))
initial_state = initial_cell_state_from_embedding(
self._dec_cell, z, name='decoder/z_to_initial_state')
beam_initial_state = seq2seq.tile_batch(
initial_state, multiplier=beam_width)
# Tile `z` across beams.
beam_z = tf.tile(tf.expand_dims(z, 1), [1, beam_width, 1])
def embedding_fn(tokens):
# If tokens are the start_tokens (negative), replace with zero vectors.
next_inputs = tf.cond(
tf.less(tokens[0, 0], 0),
lambda: tf.zeros([n, beam_width, self._output_depth]),
lambda: tf.one_hot(tokens, self._output_depth))
# Concatenate `z` to next inputs.
next_inputs = tf.concat([next_inputs, beam_z], axis=-1)
return next_inputs
decoder = seq2seq.BeamSearchDecoder(
self._dec_cell,
embedding_fn,
start_tokens,
end_token,
beam_initial_state,
beam_width,
output_layer=self._output_layer,
length_penalty_weight=0.0)
final_output, final_state, _ = seq2seq.dynamic_decode(
decoder,
maximum_iterations=max_length,
swap_memory=True,
scope='decoder')
# Returns samples and final states from the best beams.
return (tf.one_hot(final_output.predicted_ids[:, :, 0], self._output_depth),
nest.map_structure(lambda x: x[:, 0], final_state.cell_state))
class MultiOutCategoricalLstmDecoder(CategoricalLstmDecoder):
"""LSTM decoder with multiple categorical outputs."""
def __init__(self, output_depths):
self._output_depths = output_depths
def build(self, hparams, output_depth, is_training):
if sum(self._output_depths) != output_depth:
raise ValueError(
'Decoder output depth does not match sum of sub-decoders: %s vs %d',
self._output_depths, output_depth)
super(MultiOutCategoricalLstmDecoder, self).build(
hparams, output_depth, is_training)
def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):
split_x_target = tf.split(flat_x_target, self._output_depths, axis=-1)
split_rnn_output = tf.split(
flat_rnn_output, self._output_depths, axis=-1)
losses = []
truths = []
predictions = []
metric_map = {}
for i in range(len(self._output_depths)):
l, m, t, p = (
super(MultiOutCategoricalLstmDecoder, self)._flat_reconstruction_loss(
split_x_target[i], split_rnn_output[i]))
losses.append(l)
truths.append(t)
predictions.append(p)
for k, v in m.items():
metric_map['%s_%d' % (k, i)] = v
return (tf.reduce_sum(losses, axis=0),
metric_map,
tf.stack(truths),
tf.stack(predictions))
def _sample(self, rnn_output, temperature=1.0):
split_logits = tf.split(rnn_output, self._output_depths, axis=-1)
samples = []
for logits, output_depth in zip(split_logits, self._output_depths):
sampler = tf.contrib.distributions.Categorical(
logits=logits / temperature)
sample_label = sampler.sample()
samples.append(tf.one_hot(sample_label, output_depth, dtype=tf.float32))
return tf.concat(samples, axis=-1)
class HierarchicalMultiOutLstmDecoder(base_model.BaseDecoder):
"""Hierarchical LSTM decoder with (optional) multiple categorical outputs."""
def __init__(self, core_decoders, output_depths):
"""Initializer for a HierarchicalMultiOutLstmDecoder.
Args:
core_decoders: The BaseDecoder implementation class(es) to use at the
output layer.
output_depths: A list of output depths for the core decoders.
Raises:
ValueError: If the number of core decoders and output depths are not
equal.
"""
if len(core_decoders) != len(output_depths):
raise ValueError(
'The number of `core_decoders` and `output_depths` provided to a '
'HierarchicalMultiOutLstmDecoder must be equal. Got: %d != %d',
len(core_decoders), len(output_depths))
self._core_decoders = core_decoders
self._output_depths = output_depths
def build(self, hparams, output_depth, is_training):
if sum(self._output_depths) != output_depth:
raise ValueError(
'Decoder output depth does not match sum of sub-decoders: %s vs %d',
self._output_depths, output_depth)
self.hparams = hparams
self._is_training = is_training
for j, (cd, od) in enumerate(zip(self._core_decoders, self._output_depths)):
with tf.variable_scope('core_decoder_%d' % j):
cd.build(hparams, od, is_training)
def _hierarchical_decode(self, z=None):
hparams = self.hparams
batch_size = hparams.batch_size
if z is None:
learned_initial_embedding = tf.get_variable(
'learned_initial_embedding',
shape=hparams.z_size,
initializer=tf.random_normal_initializer(stddev=0.001))
embeddings = [tf.stack([learned_initial_embedding] * batch_size)]
else:
embeddings = [z]
for i, h_size in enumerate(hparams.hierarchical_output_sizes):
if h_size % len(embeddings) != 0:
raise ValueError(
'Each size in `hierarchical_output_sizes` must be evenly divisible '
'by the previous. Got: %d !/ %d', h_size, len(embeddings))
num_steps = h_size // len(embeddings)
all_outputs = []
with tf.variable_scope('hierarchical_layer_%d' % i) as scope:
cell = rnn_cell(
hparams.dec_rnn_size, hparams.dropout_keep_prob, self._is_training)
cudnn_cell = cudnn_lstm_layer(
hparams.dec_rnn_size, hparams.dropout_keep_prob, self._is_training)
for e in embeddings:
e.set_shape([batch_size] + e.shape[1:].as_list())
initial_state = initial_cell_state_from_embedding(
cell, e, name='e_to_initial_state')
if hparams.use_cudnn:
input_ = tf.zeros([num_steps, batch_size, 1])
outputs, _ = cudnn_cell(
input_,
initial_state=_cudnn_lstm_state(initial_state),
training=self._is_training)
outputs = tf.unstack(outputs)
else:
input_ = [tf.zeros([batch_size, 1])] * num_steps
outputs, _ = tf.nn.static_rnn(
cell, input_, initial_state=initial_state)
all_outputs.extend(outputs)
# Reuse layer next time.
scope.reuse_variables()
embeddings = all_outputs
return embeddings
def reconstruction_loss(self, x_input, x_target, x_length, z=None):
embeddings = self._hierarchical_decode(z)
n = len(embeddings)
# TODO(adarob): Support variable length outputs.
with tf.control_dependencies([
tf.assert_equal(
x_length, (x_length[0] // n) * n,
message='HierarchicalMultiOutLstmDecoder requires `x_length` to '
'all be equal and divisible by the final number of embeddings.')]):
x_input = tf.identity(x_input)
# Split sequences into n x M subsequences where M is the number of core
# models.
split_x_input = [
tf.split(x, self._output_depths, axis=-1)
for x in tf.split(x_input, n, axis=1)]
split_x_target = [
tf.split(x, self._output_depths, axis=-1)
for x in tf.split(x_target, n, axis=1)]
loss_outputs = [[] for _ in self._core_decoders]
# Compute reconstruction loss for the n x M split sequences.
for i, e in enumerate(embeddings):
for j, cd in enumerate(self._core_decoders):
with tf.variable_scope('core_decoder_%d' % j, reuse=i > 0):
# TODO(adarob): Sample initial inputs when using scheduled sampling.
loss_outputs[j].append(
cd.reconstruction_loss(
split_x_input[i][j], split_x_target[i][j], x_length // n, e))
# Accumulate the split sequence losses.
all_r_losses = []
all_truth = []
all_predictions = []
metric_map = {}
all_final_states = []
for j, loss_outputs_j in enumerate(loss_outputs):
r_losses, _, truth, predictions, final_states = zip(*loss_outputs_j)
all_r_losses.append(tf.reduce_sum(r_losses, axis=0))
all_truth.append(tf.concat(truth, axis=-1))
all_predictions.append(tf.concat(predictions, axis=-1))
metric_map['metrics/accuracy_%d' % j] = tf.metrics.accuracy(
all_truth[-1], all_predictions[-1])
metric_map['metrics/mean_per_class_accuracy_%d' % j] = (
tf.metrics.mean_per_class_accuracy(
all_truth[-1], all_predictions[-1], self._output_depths[j]))
all_final_states.append(final_states)
return (tf.reduce_sum(all_r_losses, axis=0),
metric_map,
tf.stack(all_truth, axis=-1),
tf.stack(all_predictions, axis=-1),
all_final_states)
def sample(self, n, max_length=None, z=None, temperature=1.0,
**core_sampler_kwargs):
if z is not None and z.shape[0].value != n:
raise ValueError(
'`z` must have a first dimension that equals `n` when given. '
'Got: %d vs %d' % (z.shape[0].value, n))
if max_length is None:
# TODO(adarob): Support variable length outputs.
raise ValueError(
'HierarchicalMultiOutLstmDecoder requires `max_length` be provided '
'during sampling.')
embeddings = self._hierarchical_decode(z)
sample_ids = []
final_states = []
for j, cd in enumerate(self._core_decoders):
sample_ids_j = []
final_states_j = []
with tf.variable_scope('core_decoder_%d' % j) as scope:
for e in embeddings:
sample_ids_j_e, final_states_j_e = cd.sample(
n,
max_length // len(embeddings),
z=e,
temperature=temperature,
start_inputs=(
sample_ids_j[-1][:, -1] if sample_ids_j else None),
**core_sampler_kwargs)
sample_ids_j.append(sample_ids_j_e)
final_states_j.append(final_states_j_e)
scope.reuse_variables()
sample_ids.append(tf.concat(sample_ids_j, axis=1))
final_states.append(final_states_j)
return tf.concat(sample_ids, axis=-1), final_states
class MultiLabelRnnNadeDecoder(BaseLstmDecoder):
"""LSTM decoder with multi-label output provided by a NADE."""
def build(self, hparams, output_depth, is_training=False):
self._nade = Nade(
output_depth, hparams.nade_num_hidden, name='decoder/nade')
super(MultiLabelRnnNadeDecoder, self).build(
hparams, output_depth, is_training)
# Overwrite output layer for NADE parameterization.
self._output_layer = layers_core.Dense(
self._nade.num_hidden + output_depth, name='output_projection')
def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):
b_enc, b_dec = tf.split(
flat_rnn_output,
[self._nade.num_hidden, self._output_depth], axis=1)
ll, cond_probs = self._nade.log_prob(
flat_x_target, b_enc=b_enc, b_dec=b_dec)
r_loss = -ll
flat_truth = tf.cast(flat_x_target, tf.bool)
flat_predictions = tf.greater_equal(cond_probs, 0.5)
metric_map = {
'metrics/accuracy':
tf.metrics.mean(
tf.reduce_all(tf.equal(flat_truth, flat_predictions), axis=-1)),
'metrics/recall':
tf.metrics.recall(flat_truth, flat_predictions),
'metrics/precision':
tf.metrics.precision(flat_truth, flat_predictions),
}
return r_loss, metric_map, flat_truth, flat_predictions
def _sample(self, rnn_output, temperature=None):
"""Sample from NADE, returning the argmax if no temperature is provided."""
b_enc, b_dec = tf.split(
rnn_output, [self._nade.num_hidden, self._output_depth], axis=1)
sample, _ = self._nade.sample(
b_enc=b_enc, b_dec=b_dec, temperature=temperature)
return sample
def get_default_hparams():
"""Returns copy of default HParams for LSTM models."""
hparams_map = base_model.get_default_hparams().values()
hparams_map.update({
'conditional': True,
'dec_rnn_size': [512], # Decoder RNN: number of units per layer.
'enc_rnn_size': [256], # Encoder RNN: number of units per layer per dir.
'dropout_keep_prob': 1.0, # Probability all dropout keep.
'sampling_schedule': 'constant', # constant, exponential, inverse_sigmoid
'sampling_rate': 0.0, # Interpretation is based on `sampling_schedule`.
'use_cudnn': False, # Uses faster CudnnLSTM to train. For GPU only.
})
return tf.contrib.training.HParams(**hparams_map)
|
[
"noreply@github.com"
] |
craffel.noreply@github.com
|
7775347dd18c90d2568ca4294082bdaac633f810
|
315a6f349eb7c475ecbb7c8a1a9a79f01198ace2
|
/Exercicios-Python/Basico/Exercicios/exercicio28.py
|
29d66cf493d2f057c6d74727fdc1040a36b501f5
|
[] |
no_license
|
SobrancelhaDoDragao/Exercicio-De-Programacao
|
59447e7fd87621a05f52d21dfed96757be96437b
|
1ee7976acc978274f0314ae7e985c537d78bb462
|
refs/heads/master
| 2020-05-24T04:08:06.160008
| 2019-08-10T22:53:18
| 2019-08-10T22:53:18
| 187,085,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
# Escreva um programa que faça o computador "pensar" em um número
# inteiro entre 0 e 5 e peça para o usuário tentar descobrir qual
# foi o número escolhido pelo computador.
from random import randint
from time import sleep
numero_sorteado = randint(0, 5)
chute_usuario = int(input("Adivinhe um numero de 1 à 5: "))
print("PROCESSANDO....")
#Esse comando faz o computador adormecer por um tempo.
sleep(1.5)
if chute_usuario == numero_sorteado:
print("Parabéns você acertou!!")
else:
print("Você errou!!")
|
[
"eudson.duraes@gmail.com"
] |
eudson.duraes@gmail.com
|
3b5a6cd1c83b112f9da518dd26ccae02d454cd09
|
cacef898f68f8adec19e8f56e28a021176c33d6b
|
/SDICE.py
|
6c795876ff3c6564bbe4feaa2c76582c0b75907e
|
[] |
no_license
|
CharmiShah26/Code-Chef
|
851f3731ed7d8df460a7de9ee768667f5707b1b1
|
a79347fe147bc8f47997e8a85f431c9fec48f139
|
refs/heads/main
| 2023-05-31T09:41:04.328454
| 2021-06-07T08:36:09
| 2021-06-07T08:36:09
| 345,911,668
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 774
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 3 12:08:04 2021
@author: charm
"""
for i in range(int(input())):
n=int(input())
result=mod=div=0
mod=n%4
div=n//4
if(n>4):
betn_layer=16*(div-1)#top most 4 4 4 4 of 1 row gets subracted since 60 is of subtracted 1's in it
result=(div*60)-betn_layer
if(mod==1):
result+=20
elif(mod==2):
result+=36
elif(mod==3):
result+=51
result=result-(4*mod)
else:
if(mod==0):
result=60
elif(mod==1):
result=20
elif(mod==2):
result=36
elif(mod==3):
result=51
print(result)
|
[
"noreply@github.com"
] |
CharmiShah26.noreply@github.com
|
5193fbf761d9b6189a54c73713e04d10544fd94c
|
f2604c10b9f32feb5935fb29e2a8d798371854e3
|
/Functions/is_prime.py
|
c55958d891db898f5a432611986402fb85cbab3c
|
[] |
no_license
|
dlordtemplar/python-projects
|
622cc25f2de93f01d32e6eabe8ee1df3789be78b
|
d4428cc3b11fe229ce1bc3a0c42176882b153b0f
|
refs/heads/master
| 2021-08-06T15:45:44.411257
| 2017-11-06T11:31:54
| 2017-11-06T11:31:54
| 109,685,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
'''
Implement a function is_prime(x) that returns True if x is prime, False
otherwise. (1 Point)
>>> is_prime(7)
True
>>> is_prime(15)
False
'''
# ... your code ...
def is_prime(num):
if (num == 1):
return False
isPrime = True
for y in range(2, num - 1):
if (isPrime):
if (num % y == 0):
isPrime = False
return isPrime
if __name__ == '__main__':
print('is_prime')
print('Test1', is_prime(1) == False)
print('Test2', is_prime(2) == True)
print('Test3', is_prime(3) == True)
print('Test4', is_prime(4) == False)
print('Test5', is_prime(5) == True)
|
[
"dlordtemplar@gmail.com"
] |
dlordtemplar@gmail.com
|
e76b6a4c02c2a9399cb5b61415395bea8bac28bd
|
fc51acde1574581ecc116d7c4558790d6489e2ee
|
/shipping/views.py
|
524cc23869871781b6395c18c89328edd9ec83ca
|
[] |
no_license
|
sherylg343/janeric
|
9d2a7c270a4a158376b06c052c4c7aa644ae8c01
|
7c936edaf0cb823fcfd094e7412fc818eec2dc18
|
refs/heads/master
| 2023-02-15T19:47:56.179613
| 2021-01-09T03:58:22
| 2021-01-09T03:58:22
| 317,326,327
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
from django.shortcuts import render
def shipping(request):
""" A view to render shipping.html page """
return render(request, 'shipping/shipping.html')
|
[
"sheryl.s.goldberg@gmail.com"
] |
sheryl.s.goldberg@gmail.com
|
d0c81444b3d07eba38ec63fe0377e35b735e9fe5
|
f4136db076face6d5c5b6126c70b25e55f67148c
|
/src/days-2-4-adv/item.py
|
e244fe3d0466471b4a216bbd519272ce8c8bfbb2
|
[] |
no_license
|
Jameson13B/Intro-Python
|
cb22e0505bf63d2d32a1777881c62539b60174b2
|
9bd5d7e94bb68ab5206247dc9f81bbbf4284ef2c
|
refs/heads/master
| 2020-03-27T09:22:19.010909
| 2018-08-31T15:18:43
| 2018-08-31T15:18:43
| 146,335,007
| 0
| 0
| null | 2018-08-27T18:03:24
| 2018-08-27T18:03:23
| null |
UTF-8
|
Python
| false
| false
| 109
|
py
|
class Item:
def __init__(self, name, description):
self.name = name
self.description = description
|
[
"jameson@Jamesons-MacBook-Pro.local"
] |
jameson@Jamesons-MacBook-Pro.local
|
10e1cc18c2b18658d9f17170ea4c128edbd1504f
|
1dbc928c3e3372e89c32d54e901341cdb3348684
|
/raspi/raspi_controller.py
|
4a501d5ee55e721d6af3a4165a28d14399b371bb
|
[] |
no_license
|
RowlandOti/facial-barnacle-backend
|
af83b82c5e1d0c8741259e2e521ba13cc5ba84ca
|
4915bb446eaa87e0fb0e58b2df3b4fc5b66dba1b
|
refs/heads/main
| 2023-04-23T07:43:31.707773
| 2021-05-03T12:31:02
| 2021-05-03T12:31:02
| 359,898,080
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 619
|
py
|
import time
import RPi.GPIO as GPIO
def should_drive_wheels(is_granted_access):
servo_pin = 12
GPIO.setmode(GPIO.BCM)
GPIO.setup(servo_pin, GPIO.OUT)
servo = GPIO.PWM(servo_pin, 50) # GPIO 12 for PWM with 50Hz
if is_granted_access:
servo.start(2.5) # Initialization
set_angle(180, servo)
return True
servo.stop() # Stop
servo.ChangeDutyCycle(0)
return False
def set_angle(angle, servo):
duty = int(angle / 18 + 2)
for i in range(0, duty, 2):
servo.ChangeDutyCycle(i)
time.sleep(1)
time.sleep(1)
servo.ChangeDutyCycle(0)
|
[
"r.otieno@kokonetworks.com"
] |
r.otieno@kokonetworks.com
|
79104c091b2bd910ae0b1582fc58fb639954928b
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/eventhub/azure-eventhub/azure/eventhub/_transport/_uamqp_transport.py
|
064ea88b753f0dbfadbc058344d508691cbfa84e
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080
| 2023-06-08T02:53:04
| 2023-06-08T02:53:04
| 222,384,897
| 1
| 0
|
MIT
| 2023-09-08T08:38:48
| 2019-11-18T07:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 29,890
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import time
import logging
from typing import Optional, Union, Any, Tuple
try:
from uamqp import (
c_uamqp,
BatchMessage,
constants,
MessageBodyType,
Message,
types,
SendClient,
ReceiveClient,
Source,
utils,
authentication,
AMQPClient,
compat,
errors,
Connection,
__version__,
)
from uamqp.message import (
MessageHeader,
MessageProperties,
)
uamqp_installed = True
except ImportError:
uamqp_installed = False
from ._base import AmqpTransport
from ..amqp._constants import AmqpMessageBodyType
from .._constants import (
NO_RETRY_ERRORS,
PROP_PARTITION_KEY,
UAMQP_LIBRARY,
)
from ..exceptions import (
ConnectError,
OperationTimeoutError,
EventHubError,
AuthenticationError,
ConnectionLostError,
EventDataError,
EventDataSendError,
)
_LOGGER = logging.getLogger(__name__)
if uamqp_installed:
def _error_handler(error):
"""
Called internally when an event has failed to send so we
can parse the error to determine whether we should attempt
to retry sending the event again.
Returns the action to take according to error type.
:param error: The error received in the send attempt.
:type error: Exception
:rtype: ~uamqp.errors.ErrorAction
"""
if error.condition == b"com.microsoft:server-busy":
return errors.ErrorAction(retry=True, backoff=4)
if error.condition == b"com.microsoft:timeout":
return errors.ErrorAction(retry=True, backoff=2)
if error.condition == b"com.microsoft:operation-cancelled":
return errors.ErrorAction(retry=True)
if error.condition == b"com.microsoft:container-close":
return errors.ErrorAction(retry=True, backoff=4)
if error.condition in NO_RETRY_ERRORS:
return errors.ErrorAction(retry=False)
return errors.ErrorAction(retry=True)
class UamqpTransport(AmqpTransport): # pylint: disable=too-many-public-methods
"""
Class which defines uamqp-based methods used by the producer and consumer.
"""
# define constants
MAX_FRAME_SIZE_BYTES = constants.MAX_FRAME_SIZE_BYTES
MAX_MESSAGE_LENGTH_BYTES = constants.MAX_MESSAGE_LENGTH_BYTES
TIMEOUT_FACTOR = 1000
CONNECTION_CLOSING_STATES: Tuple = ( # pylint:disable=protected-access
c_uamqp.ConnectionState.CLOSE_RCVD, # pylint:disable=c-extension-no-member
c_uamqp.ConnectionState.CLOSE_SENT, # pylint:disable=c-extension-no-member
c_uamqp.ConnectionState.DISCARDING, # pylint:disable=c-extension-no-member
c_uamqp.ConnectionState.END, # pylint:disable=c-extension-no-member
)
TRANSPORT_IDENTIFIER = f"{UAMQP_LIBRARY}/{__version__}"
# define symbols
PRODUCT_SYMBOL = types.AMQPSymbol("product")
VERSION_SYMBOL = types.AMQPSymbol("version")
FRAMEWORK_SYMBOL = types.AMQPSymbol("framework")
PLATFORM_SYMBOL = types.AMQPSymbol("platform")
USER_AGENT_SYMBOL = types.AMQPSymbol("user-agent")
PROP_PARTITION_KEY_AMQP_SYMBOL = types.AMQPSymbol(PROP_PARTITION_KEY)
@staticmethod
def build_message(**kwargs):
"""
Creates a uamqp.Message with given arguments.
:rtype: uamqp.Message
"""
return Message(**kwargs)
@staticmethod
def build_batch_message(**kwargs):
"""
Creates a uamqp.BatchMessage with given arguments.
:rtype: uamqp.BatchMessage
"""
return BatchMessage(**kwargs)
@staticmethod
def to_outgoing_amqp_message(annotated_message):
"""
Converts an AmqpAnnotatedMessage into an Amqp Message.
:param AmqpAnnotatedMessage annotated_message: AmqpAnnotatedMessage to convert.
:rtype: uamqp.Message
"""
message_header = None
header_vals = annotated_message.header.values() if annotated_message.header else None
# If header and non-None header values, create outgoing header.
if annotated_message.header and header_vals.count(None) != len(header_vals):
message_header = MessageHeader()
message_header.delivery_count = annotated_message.header.delivery_count
message_header.time_to_live = annotated_message.header.time_to_live
message_header.first_acquirer = annotated_message.header.first_acquirer
message_header.durable = annotated_message.header.durable
message_header.priority = annotated_message.header.priority
message_properties = None
properties_vals = annotated_message.properties.values() if annotated_message.properties else None
# If properties and non-None properties values, create outgoing properties.
if annotated_message.properties and properties_vals.count(None) != len(properties_vals):
message_properties = MessageProperties(
message_id=annotated_message.properties.message_id,
user_id=annotated_message.properties.user_id,
to=annotated_message.properties.to,
subject=annotated_message.properties.subject,
reply_to=annotated_message.properties.reply_to,
correlation_id=annotated_message.properties.correlation_id,
content_type=annotated_message.properties.content_type,
content_encoding=annotated_message.properties.content_encoding,
creation_time=int(annotated_message.properties.creation_time)
if annotated_message.properties.creation_time else None,
absolute_expiry_time=int(annotated_message.properties.absolute_expiry_time)
if annotated_message.properties.absolute_expiry_time else None,
group_id=annotated_message.properties.group_id,
group_sequence=annotated_message.properties.group_sequence,
reply_to_group_id=annotated_message.properties.reply_to_group_id,
encoding=annotated_message._encoding # pylint: disable=protected-access
)
# pylint: disable=protected-access
amqp_body_type = annotated_message.body_type
if amqp_body_type == AmqpMessageBodyType.DATA:
amqp_body_type = MessageBodyType.Data
amqp_body = list(annotated_message._data_body)
elif amqp_body_type == AmqpMessageBodyType.SEQUENCE:
amqp_body_type = MessageBodyType.Sequence
amqp_body = list(annotated_message._sequence_body)
else:
amqp_body_type = MessageBodyType.Value
amqp_body = annotated_message._value_body
return Message(
body=amqp_body,
body_type=amqp_body_type,
header=message_header,
properties=message_properties,
application_properties=annotated_message.application_properties,
annotations=annotated_message.annotations,
delivery_annotations=annotated_message.delivery_annotations,
footer=annotated_message.footer
)
@staticmethod
def update_message_app_properties(message, key, value):
"""
Adds the given key/value to the application properties of the message.
:param pyamqp.Message message: Message.
:param str key: Key to set in application properties.
:param str Value: Value to set for key in application properties.
:rtype: pyamqp.Message
"""
if not message.application_properties:
message.application_properties = {}
message.application_properties.setdefault(key, value)
return message
@staticmethod
def get_batch_message_encoded_size(message):
"""
Gets the batch message encoded size given an underlying Message.
:param uamqp.BatchMessage message: Message to get encoded size of.
:rtype: int
"""
return message.gather()[0].get_message_encoded_size()
@staticmethod
def get_message_encoded_size(message):
"""
Gets the message encoded size given an underlying Message.
:param uamqp.Message message: Message to get encoded size of.
:rtype: int
"""
return message.get_message_encoded_size()
@staticmethod
def get_remote_max_message_size(handler):
"""
Returns max peer message size.
:param AMQPClient handler: Client to get remote max message size on link from.
:rtype: int
"""
return handler.message_handler._link.peer_max_message_size # pylint:disable=protected-access
@staticmethod
def create_retry_policy(config):
"""
Creates the error retry policy.
:param ~azure.eventhub._configuration.Configuration config: Configuration.
"""
return errors.ErrorPolicy(max_retries=config.max_retries, on_error=_error_handler)
@staticmethod
def create_link_properties(link_properties):
"""
Creates and returns the link properties.
:param dict[bytes, int] link_properties: The dict of symbols and corresponding values.
:rtype: dict
"""
return {types.AMQPSymbol(symbol): types.AMQPLong(value) for (symbol, value) in link_properties.items()}
@staticmethod
def create_connection(**kwargs):
"""
Creates and returns the uamqp Connection object.
:keyword str host: The hostname, used by uamqp.
:keyword JWTTokenAuth auth: The auth, used by uamqp.
:keyword str endpoint: The endpoint, used by pyamqp.
:keyword str container_id: Required.
:keyword int max_frame_size: Required.
:keyword int channel_max: Required.
:keyword int idle_timeout: Required.
:keyword Dict properties: Required.
:keyword int remote_idle_timeout_empty_frame_send_ratio: Required.
:keyword error_policy: Required.
:keyword bool debug: Required.
:keyword str encoding: Required.
"""
endpoint = kwargs.pop("endpoint") # pylint:disable=unused-variable
custom_endpoint_address = kwargs.pop("custom_endpoint_address") # pylint:disable=unused-variable
host = kwargs.pop("host")
auth = kwargs.pop("auth")
return Connection(
host,
auth,
**kwargs
)
@staticmethod
def close_connection(connection):
"""
Closes existing connection.
:param connection: uamqp or pyamqp Connection.
"""
connection.destroy()
@staticmethod
def get_connection_state(connection):
"""
Gets connection state.
:param connection: uamqp or pyamqp Connection.
"""
return connection._state # pylint:disable=protected-access
@staticmethod
def create_send_client(*, config, **kwargs): # pylint:disable=unused-argument
"""
Creates and returns the uamqp SendClient.
:param ~azure.eventhub._configuration.Configuration config: The configuration.
:keyword str target: Required. The target.
:keyword JWTTokenAuth auth: Required.
:keyword int idle_timeout: Required.
:keyword network_trace: Required.
:keyword retry_policy: Required.
:keyword keep_alive_interval: Required.
:keyword str client_name: Required.
:keyword dict link_properties: Required.
:keyword properties: Required.
"""
target = kwargs.pop("target")
retry_policy = kwargs.pop("retry_policy")
network_trace = kwargs.pop("network_trace")
return SendClient(
target,
debug=network_trace,
error_policy=retry_policy,
**kwargs
)
@staticmethod
def _set_msg_timeout(producer, timeout_time, last_exception, logger):
if not timeout_time:
return
remaining_time = timeout_time - time.time()
if remaining_time <= 0.0:
if last_exception:
error = last_exception
else:
error = OperationTimeoutError("Send operation timed out")
logger.info("%r send operation timed out. (%r)", producer._name, error) # pylint: disable=protected-access
raise error
producer._handler._msg_timeout = remaining_time * 1000 # type: ignore # pylint: disable=protected-access
@staticmethod
def send_messages(producer, timeout_time, last_exception, logger):
"""
Handles sending of event data messages.
:param ~azure.eventhub._producer.EventHubProducer producer: The producer with handler to send messages.
:param int timeout_time: Timeout time.
:param last_exception: Exception to raise if message timed out. Only used by uamqp transport.
:param logger: Logger.
"""
# pylint: disable=protected-access
producer._open()
producer._unsent_events[0].on_send_complete = producer._on_outcome
UamqpTransport._set_msg_timeout(producer, timeout_time, last_exception, logger)
producer._handler.queue_message(*producer._unsent_events) # type: ignore
producer._handler.wait() # type: ignore
producer._unsent_events = producer._handler.pending_messages # type: ignore
if producer._outcome != constants.MessageSendResult.Ok:
if producer._outcome == constants.MessageSendResult.Timeout:
producer._condition = OperationTimeoutError("Send operation timed out")
if producer._condition:
raise producer._condition
@staticmethod
def set_message_partition_key(message, partition_key, **kwargs): # pylint:disable=unused-argument
# type: (Message, Optional[Union[bytes, str]], Any) -> Message
"""Set the partition key as an annotation on a uamqp message.
:param ~uamqp.Message message: The message to update.
:param str partition_key: The partition key value.
:rtype: Message
"""
if partition_key:
annotations = message.annotations
if annotations is None:
annotations = {}
annotations[
UamqpTransport.PROP_PARTITION_KEY_AMQP_SYMBOL # TODO: see if setting non-amqp symbol is valid
] = partition_key
header = MessageHeader()
header.durable = True
message.annotations = annotations
message.header = header
return message
@staticmethod
def add_batch(event_data_batch, outgoing_event_data, event_data):
"""
Add EventData to the data body of the BatchMessage.
:param event_data_batch: BatchMessage to add data to.
:param outgoing_event_data: Transformed EventData for sending.
:param event_data: EventData to add to internal batch events. uamqp use only.
:rtype: None
"""
# pylint: disable=protected-access
event_data_batch._internal_events.append(event_data)
event_data_batch._message._body_gen.append(
outgoing_event_data._message
)
@staticmethod
def create_source(source, offset, selector):
"""
Creates and returns the Source.
:param str source: Required.
:param int offset: Required.
:param bytes selector: Required.
"""
source = Source(source)
if offset is not None:
source.set_filter(selector)
return source
@staticmethod
def create_receive_client(*, config, **kwargs): # pylint: disable=unused-argument
"""
Creates and returns the receive client.
:param ~azure.eventhub._configuration.Configuration config: The configuration.
:keyword str source: Required. The source.
:keyword str offset: Required.
:keyword str offset_inclusive: Required.
:keyword JWTTokenAuth auth: Required.
:keyword int idle_timeout: Required.
:keyword network_trace: Required.
:keyword retry_policy: Required.
:keyword str client_name: Required.
:keyword dict link_properties: Required.
:keyword properties: Required.
:keyword link_credit: Required. The prefetch.
:keyword keep_alive_interval: Required.
:keyword desired_capabilities: Required.
:keyword streaming_receive: Required.
:keyword message_received_callback: Required.
:keyword timeout: Required.
"""
source = kwargs.pop("source")
symbol_array = kwargs.pop("desired_capabilities")
desired_capabilities = None
if symbol_array:
symbol_array = [types.AMQPSymbol(symbol) for symbol in symbol_array]
desired_capabilities = utils.data_factory(types.AMQPArray(symbol_array))
retry_policy = kwargs.pop("retry_policy")
network_trace = kwargs.pop("network_trace")
link_credit = kwargs.pop("link_credit")
streaming_receive = kwargs.pop("streaming_receive")
message_received_callback = kwargs.pop("message_received_callback")
client = ReceiveClient(
source,
debug=network_trace, # pylint:disable=protected-access
error_policy=retry_policy,
desired_capabilities=desired_capabilities,
prefetch=link_credit,
receive_settle_mode=constants.ReceiverSettleMode.ReceiveAndDelete,
auto_complete=False,
**kwargs
)
# pylint:disable=protected-access
client._streaming_receive = streaming_receive
client._message_received_callback = (message_received_callback)
return client
@staticmethod
def open_receive_client(*, handler, client, auth):
"""
Opens the receive client and returns ready status.
:param ReceiveClient handler: The receive client.
:param ~azure.eventhub.EventHubConsumerClient client: The consumer client.
:param auth: Auth.
:rtype: bool
"""
# pylint:disable=protected-access
handler.open(connection=client._conn_manager.get_connection(
client._address.hostname, auth
))
@staticmethod
def check_link_stolen(consumer, exception):
"""
Checks if link stolen and handles exception.
:param consumer: The EventHubConsumer.
:param exception: Exception to check.
"""
if (
isinstance(exception, errors.LinkDetach)
and exception.condition == constants.ErrorCodes.LinkStolen # pylint: disable=no-member
):
raise consumer._handle_exception(exception) # pylint: disable=protected-access
@staticmethod
def create_token_auth(auth_uri, get_token, token_type, config, **kwargs):
"""
Creates the JWTTokenAuth.
:param str auth_uri: The auth uri to pass to JWTTokenAuth.
:param get_token: The callback function used for getting and refreshing
tokens. It should return a valid jwt token each time it is called.
:param bytes token_type: Token type.
:param ~azure.eventhub._configuration.Configuration config: EH config.
:keyword bool update_token: Required. Whether to update token. If not updating token,
then pass 300 to refresh_window.
"""
update_token = kwargs.pop("update_token")
refresh_window = 300
if update_token:
refresh_window = 0
token_auth = authentication.JWTTokenAuth(
auth_uri,
auth_uri,
get_token,
token_type=token_type,
timeout=config.auth_timeout,
http_proxy=config.http_proxy,
transport_type=config.transport_type,
custom_endpoint_hostname=config.custom_endpoint_hostname,
port=config.connection_port,
verify=config.connection_verify,
refresh_window=refresh_window
)
if update_token:
token_auth.update_token()
return token_auth
@staticmethod
def create_mgmt_client(address, mgmt_auth, config):
"""
Creates and returns the mgmt AMQP client.
:param _Address address: Required. The Address.
:param JWTTokenAuth mgmt_auth: Auth for client.
:param ~azure.eventhub._configuration.Configuration config: The configuration.
"""
mgmt_target = f"amqps://{address.hostname}{address.path}"
return AMQPClient(
mgmt_target,
auth=mgmt_auth,
debug=config.network_tracing
)
@staticmethod
def open_mgmt_client(mgmt_client, conn):
"""
Opens the mgmt AMQP client.
:param AMQPClient mgmt_client: uamqp AMQPClient.
:param conn: Connection.
"""
mgmt_client.open(connection=conn)
@staticmethod
def get_updated_token(mgmt_auth):
"""
Return updated auth token.
:param mgmt_auth: Auth.
"""
return mgmt_auth.token
@staticmethod
def mgmt_client_request(mgmt_client, mgmt_msg, **kwargs):
"""
Send mgmt request.
:param AMQP Client mgmt_client: Client to send request with.
:param str mgmt_msg: Message.
:keyword bytes operation: Operation.
:keyword operation_type: Op type.
:keyword status_code_field: mgmt status code.
:keyword description_fields: mgmt status desc.
"""
operation_type = kwargs.pop("operation_type")
operation = kwargs.pop("operation")
response = mgmt_client.mgmt_request(
mgmt_msg,
operation,
op_type=operation_type,
**kwargs
)
status_code = response.application_properties[kwargs.get("status_code_field")]
description = response.application_properties.get(
kwargs.get("description_fields")
) # type: Optional[Union[str, bytes]]
return status_code, description, response
@staticmethod
def get_error(status_code, description):
"""
Gets error corresponding to status code.
:param status_code: Status code.
:param str description: Description of error.
"""
if status_code in [401]:
return errors.AuthenticationException(
f"Management authentication failed. Status code: {status_code}, Description: {description!r}"
)
if status_code in [404]:
return ConnectError(
f"Management connection failed. Status code: {status_code}, Description: {description!r}"
)
return errors.AMQPConnectionError(
f"Management request error. Status code: {status_code}, Description: {description!r}"
)
@staticmethod
def check_timeout_exception(base, exception):
"""
Checks if timeout exception.
:param base: ClientBase.
:param exception: Exception to check.
"""
if not base.running and isinstance(
exception, compat.TimeoutException
):
exception = errors.AuthenticationException(
"Authorization timeout."
)
return exception
@staticmethod
def _create_eventhub_exception(exception):
if isinstance(exception, errors.AuthenticationException):
error = AuthenticationError(str(exception), exception)
elif isinstance(exception, errors.VendorLinkDetach):
error = ConnectError(str(exception), exception)
elif isinstance(exception, errors.LinkDetach):
error = ConnectionLostError(str(exception), exception)
elif isinstance(exception, errors.ConnectionClose):
error = ConnectionLostError(str(exception), exception)
elif isinstance(exception, errors.MessageHandlerError):
error = ConnectionLostError(str(exception), exception)
elif isinstance(exception, errors.AMQPConnectionError):
error_type = (
AuthenticationError
if str(exception).startswith("Unable to open authentication session")
else ConnectError
)
error = error_type(str(exception), exception)
elif isinstance(exception, compat.TimeoutException):
error = ConnectionLostError(str(exception), exception)
else:
error = EventHubError(str(exception), exception)
return error
@staticmethod
def _handle_exception(
exception, closable, *, is_consumer=False # pylint:disable=unused-argument
): # pylint:disable=too-many-branches, too-many-statements
try: # closable is a producer/consumer object
name = closable._name # pylint: disable=protected-access
except AttributeError: # closable is an client object
name = closable._container_id # pylint: disable=protected-access
if isinstance(exception, KeyboardInterrupt): # pylint:disable=no-else-raise
_LOGGER.info("%r stops due to keyboard interrupt", name)
closable._close_connection() # pylint:disable=protected-access
raise exception
elif isinstance(exception, EventHubError):
closable._close_handler() # pylint:disable=protected-access
raise exception
elif isinstance(
exception,
(
errors.MessageAccepted,
errors.MessageAlreadySettled,
errors.MessageModified,
errors.MessageRejected,
errors.MessageReleased,
errors.MessageContentTooLarge,
),
):
_LOGGER.info("%r Event data error (%r)", name, exception)
error = EventDataError(str(exception), exception)
raise error
elif isinstance(exception, errors.MessageException):
_LOGGER.info("%r Event data send error (%r)", name, exception)
error = EventDataSendError(str(exception), exception)
raise error
else:
if isinstance(exception, errors.AuthenticationException):
if hasattr(closable, "_close_connection"):
closable._close_connection() # pylint:disable=protected-access
elif isinstance(exception, errors.LinkDetach):
if hasattr(closable, "_close_handler"):
closable._close_handler() # pylint:disable=protected-access
elif isinstance(exception, errors.ConnectionClose):
if hasattr(closable, "_close_connection"):
closable._close_connection() # pylint:disable=protected-access
elif isinstance(exception, errors.MessageHandlerError):
if hasattr(closable, "_close_handler"):
closable._close_handler() # pylint:disable=protected-access
else: # errors.AMQPConnectionError, compat.TimeoutException
if hasattr(closable, "_close_connection"):
closable._close_connection() # pylint:disable=protected-access
return UamqpTransport._create_eventhub_exception(exception)
|
[
"noreply@github.com"
] |
openapi-env-test.noreply@github.com
|
c04b226c5c404639c6f664392701c8166de8a5e7
|
6e8581602f34e5d8fd4c927193cbe0b52b8b8302
|
/firebasetri.py
|
8bbec63156468fc38fc367d10aa8e529b94f3c97
|
[] |
no_license
|
akshaynaik797/index
|
5a9563fae6d6f91cea53ad16196c6f4b51a70e86
|
829f03adf27f53c132551cc78b08413fa980471a
|
refs/heads/master
| 2023-05-31T04:07:58.267651
| 2021-06-22T06:32:52
| 2021-06-22T06:32:52
| 309,669,567
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 973
|
py
|
import firebase_admin
from firebase_admin import credentials, messaging
cred = credentials.Certificate("iclaim-918b7-firebase-adminsdk-fpmv7-20c88a4f47.json")
firebase_admin.initialize_app(cred)
def send_to_token():
# [START send_to_token]
# This registration token comes from the client FCM SDKs.
registration_token = 'c8TNUgONQ-myTWY96XVZ7F:APA91bEIRZ2-WM2DcGDFCT7L9r8_i_Y1Us6VMmzmO8FugrsJEokiKsvr9qvwily3IhgeU3qLE44jN7287xEpkKjft2Bj2cSf9NAuO9GQ3E_7Tqepqb5pBsX0LfUpYT3Ac625QzJ2p69Z'
# See documentation on defining a message payload.
message = messaging.Message(
data={6
'score': '850',
'time': '2:45',
},
token=registration_token,
)
# Send a message to the device corresponding to the provided
# registration token.
response = messaging.send(message)
# Response is a message ID string.
print('Successfully sent message:', response)
# [END send_to_token]
send_to_token()
|
[
"akshaynaik797@gmail.com"
] |
akshaynaik797@gmail.com
|
c3d7754f00bdd9a1907ead98375207583f3c1a8c
|
44bb8188bd5256441c0f0bfd0d9c1dec1d4d7468
|
/Prob.py
|
1ae93eb5a00baad6f4f16452c686c3d384972c73
|
[] |
no_license
|
Yout-bit/Dice-sum-prob
|
4c523f5e8f2219db122e8ca31742201af7ce4530
|
cb7cd78a319d9642ccc4293733986e301d5ffed9
|
refs/heads/main
| 2023-06-18T08:04:27.312155
| 2021-07-08T09:38:14
| 2021-07-08T09:38:14
| 316,950,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,556
|
py
|
from flask import Flask
app = Flask(__name__)
@app.route("/")
def main():
d1 = int(input("Sides of first dice "))
d2 = int(input("Sides of second dice "))
d = []
dd = []
out = []
prob = []
for i in range (1, (d1+1)):
d.append(i)
for i in range (1, (d2+1)):
dd.append(i)
for i in range (len(d)):
for j in range (len(dd)):
out.append(d[i]+dd[j])
for i in range (2, (d1 + d2 + 1)):
x = (out.count(i))/(d1*d2)
prob.append([i,x])
print("""
------------------
| Ordered by sum |
------------------
""")
for i in range (len(prob)):
print (str(prob[i][0]) + " probability = " + str(prob[i][1]))
while True:
Switches = 0
for i in range(len(prob) - 1):
if prob[i][1] > prob[i + 1][1]:
x = prob[i + 1]
prob[i + 1] = prob[i]
prob[i] = x
Switches += 1
else:
pass
if Switches == 0:
break
print("""
--------------------------
| Ordered by probability |
--------------------------
""")
for i in range (len(prob)):
print (str(prob[i][0]) + " probability = " + str(prob[i][1]))
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=80)
|
[
"noreply@github.com"
] |
Yout-bit.noreply@github.com
|
167f2c008bc7e27652e62eb02f51f79cf0b391d9
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/abc110/A/4933080.py
|
c8c5fd0fc109d36630a34059e1349e32e3756574
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
A = list(map(int,input().split()))
A = sorted(A)
B = str(A[2]) + str(A[1])
B = int(B)
print(B+A[0])
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
5da41aa6243ed621c51c11711148bdb0a6b5b087
|
0b05aca64d4e9c455da1a4e7fb2dff748e62b9cf
|
/apps/propositions/migrations/0019_auto_20210821_0530.py
|
3ac81db0e62b94592b7ba5b060686eac1e9141b8
|
[] |
no_license
|
abdulwahed-mansour/modularhistory
|
6215ba45604a1c497ef3a029b6c3e44523a66a75
|
8bbdc8eec3622af22c17214051c34e36bea8e05a
|
refs/heads/main
| 2023-09-03T21:27:17.730946
| 2021-11-05T21:12:39
| 2021-11-05T21:12:39
| 426,674,118
| 1
| 0
| null | 2021-11-10T15:25:24
| 2021-11-10T15:25:23
| null |
UTF-8
|
Python
| false
| false
| 3,575
|
py
|
# Generated by Django 3.1.13 on 2021-08-21 05:30
from typing import TYPE_CHECKING
import django.db.models.deletion
from django.db import migrations, models
import apps.topics.models.taggable
import core.fields.m2m_foreign_key
if TYPE_CHECKING:
from apps.propositions.models.proposition import Proposition as _Proposition
from apps.topics.models.topic import Topic
def forwards_func(apps, schema_editor):
Proposition: type['_Proposition'] = apps.get_model('propositions', 'Proposition')
TopicRelation = apps.get_model('propositions', 'TopicRelation')
p: '_Proposition'
for p in Proposition.objects.all():
tag: 'Topic'
for tag in p.tags.all():
TopicRelation.objects.get_or_create(topic=tag, content_object=p)
class Migration(migrations.Migration):
dependencies = [
('topics', '0003_auto_20210820_2138'),
('propositions', '0018_auto_20210821_0002'),
]
operations = [
migrations.AddField(
model_name='quoterelation',
name='deleted',
field=models.DateTimeField(editable=False, null=True),
),
migrations.AddField(
model_name='quoterelation',
name='verified',
field=models.BooleanField(default=False, verbose_name='verified'),
),
migrations.AlterField(
model_name='proposition',
name='tags',
field=models.ManyToManyField(
blank=True,
related_name='propositions_proposition_set',
to='topics.Topic',
verbose_name='tags',
),
),
migrations.CreateModel(
name='TopicRelation',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('deleted', models.DateTimeField(editable=False, null=True)),
('verified', models.BooleanField(default=False, verbose_name='verified')),
('position', models.PositiveSmallIntegerField(blank=True, default=0)),
(
'content_object',
core.fields.m2m_foreign_key.ManyToManyForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='topic_relations',
to='propositions.proposition',
verbose_name='proposition',
),
),
(
'topic',
core.fields.m2m_foreign_key.ManyToManyForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='propositions_topicrelation_set',
to='topics.topic',
),
),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='proposition',
name='new_tags',
field=apps.topics.models.taggable.TagsField(
blank=True,
related_name='proposition_set',
through='propositions.TopicRelation',
to='topics.Topic',
verbose_name='tags',
),
),
migrations.RunPython(forwards_func, migrations.RunPython.noop),
]
|
[
"noreply@github.com"
] |
abdulwahed-mansour.noreply@github.com
|
76f85c0387e0f3625bf4e82becb43ca5b33cf2be
|
c3b95aa2d66409f549c33b785b1076cd97badd02
|
/api/client/src/pcluster_client/model/image_builder_image_status.py
|
0a1066967bbcbe0c6f06db7297463bec5398dc06
|
[
"Apache-2.0"
] |
permissive
|
sean-smith/aws-parallelcluster
|
40292c19ca7089d8fab2903862c0e6191b7ba4f0
|
2def5e9f0c83cd417ae76971f7be0748dbf9eafa
|
refs/heads/develop
| 2023-03-11T00:33:50.171466
| 2022-05-19T19:42:47
| 2022-05-19T19:45:47
| 157,621,383
| 2
| 0
|
Apache-2.0
| 2023-03-03T01:19:05
| 2018-11-14T22:47:45
|
Python
|
UTF-8
|
Python
| false
| false
| 7,427
|
py
|
"""
ParallelCluster
ParallelCluster API # noqa: E501
The version of the OpenAPI document: 3.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from pcluster_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class ImageBuilderImageStatus(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'PENDING': "PENDING",
'CREATING': "CREATING",
'BUILDING': "BUILDING",
'TESTING': "TESTING",
'DISTRIBUTING': "DISTRIBUTING",
'INTEGRATING': "INTEGRATING",
'AVAILABLE': "AVAILABLE",
'CANCELLED': "CANCELLED",
'FAILED': "FAILED",
'DEPRECATED': "DEPRECATED",
'DELETED': "DELETED",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""ImageBuilderImageStatus - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str):, must be one of ["PENDING", "CREATING", "BUILDING", "TESTING", "DISTRIBUTING", "INTEGRATING", "AVAILABLE", "CANCELLED", "FAILED", "DEPRECATED", "DELETED", ] # noqa: E501
Keyword Args:
value (str):, must be one of ["PENDING", "CREATING", "BUILDING", "TESTING", "DISTRIBUTING", "INTEGRATING", "AVAILABLE", "CANCELLED", "FAILED", "DEPRECATED", "DELETED", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
|
[
"charlesg3@gmail.com"
] |
charlesg3@gmail.com
|
752f4ca55382c8effe33b3bf53b17c748dcc7bc4
|
1fe8d4133981e53e88abf633046060b56fae883e
|
/venv/lib/python3.8/site-packages/tensorflow/python/util/all_util.py
|
c8f962d5cc1a185e0522f0a1ae9c6d04cd51f470
|
[] |
no_license
|
Akira331/flask-cifar10
|
6c49db8485038731ce67d23f0972b9574746c7a7
|
283e7a2867c77d4b6aba7aea9013bf241d35d76c
|
refs/heads/master
| 2023-06-14T16:35:06.384755
| 2021-07-05T14:09:15
| 2021-07-05T14:09:15
| 382,864,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:a170c3a8c37058bfb766d86fa872bc97a2a9455eaf1276230ed371bbe5d22521
size 4693
|
[
"business030301@gmail.com"
] |
business030301@gmail.com
|
dec0e0d0ae9802f6d1c1999c1944aa061b829a98
|
0c808f0db20c06aa4aa1e1f97225edd4c8d3674b
|
/certificate/asgi.py
|
fff6f2b3cd2937945b3f12173b467f722ef228c3
|
[] |
no_license
|
SSJ26/certificates-code
|
86bec19762056880eae0dc957077a9025e1a5f82
|
85f6b3a4b4e7147dbc9fcb3fec4e7628268f3680
|
refs/heads/main
| 2023-03-28T15:29:48.752769
| 2021-03-29T18:55:57
| 2021-03-29T18:55:57
| 352,591,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
ASGI config for certificate project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'certificate.settings')
application = get_asgi_application()
|
[
"38113204+SSJ26@users.noreply.github.com"
] |
38113204+SSJ26@users.noreply.github.com
|
dba7be6cf6851ccc19e3ed8c1c2b5cdef9d72d93
|
cf91b4e409ff41debf0b50ff6c0a944afee7114c
|
/conanfile.py
|
730e47936de47bcd96d3ef6060e358a7710b8b3c
|
[] |
no_license
|
bilke/conan-hello
|
eb835e0be0d0ec29ac444883b99314c0832355e6
|
3d0cd3cb4ca1b1f68ec4e31b1537d19d03aad9cc
|
refs/heads/master
| 2016-08-11T21:02:47.513065
| 2016-03-17T07:38:06
| 2016-03-17T07:38:06
| 54,034,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
from conans import ConanFile, CMake
class HelloConan(ConanFile):
name = "Hello"
version = "0.2"
generators = "cmake"
settings = "os", "compiler", "build_type", "arch"
exports = ["CMakeLists.txt"]
url="http://github.com/bilke/conan-hello"
license="none"
def source(self):
self.run("git clone https://github.com/memsharded/hello.git")
def build(self):
cmake = CMake(self.settings)
self.run("cmake . %s" % cmake.command_line)
self.run("cmake --build . %s" % cmake.build_config)
def package(self):
self.copy("*.h", dst="include", src="hello")
self.copy("*.lib", dst="lib", src="hello/lib")
self.copy("*.a", dst="lib", src="hello/lib")
def package_info(self):
self.cpp_info.libs = ["hello"]
|
[
"lars.bilke@ufz.de"
] |
lars.bilke@ufz.de
|
f7e1fdc9190476e573cb65b49eccd73e25157333
|
925b818801ef282b4fe65f6ace749304832d8b96
|
/symlearn/recursnn/recursnn_helper.py
|
99404e8086fadb7135e5dc1177afe6ac34e6645b
|
[] |
no_license
|
renewang/Symlearn
|
b5f9a270fe75dfc5ac1710ccded59c2231eae5f0
|
1da4c4eb6af0d6d839080623b61116baa8d2299d
|
refs/heads/master
| 2022-12-26T06:24:32.431991
| 2018-11-20T15:28:56
| 2018-11-20T15:28:56
| 89,684,591
| 0
| 0
| null | 2022-06-21T21:11:23
| 2017-04-28T08:19:26
|
Python
|
UTF-8
|
Python
| false
| false
| 13,437
|
py
|
from itertools import groupby
from io import StringIO
from sklearn.preprocessing import LabelBinarizer
from symlearn.csgraph import adjmatrix
from . import recursnn_utils
from .recursnn_utils import (visit_inodes_inorder,
calculate_spanweight, get_spanrange)
import numpy as np
import scipy as sp
import logging
logger = logging.getLogger(__name__)
def computeNumericalGradient(costfunc, param, keyword=None, eps=1e-4):
"""
A python version of computeNumericalGradient.m from Andrew Ng's Machine
Learning Open Course (PA4).
The description is from the computeNumericalGradient.m file:
%COMPUTENUMERICALGRADIENT Computes the gradient using "finite differences"
%and gives us a numerical estimate of the gradient.
% numgrad = COMPUTENUMERICALGRADIENT(J, theta) computes the numerical
% gradient of the function J around theta. Calling y = J(theta) should
% return the function value at theta.
% Notes: The following code implements numerical gradient checking, and
% returns the numerical gradient.It sets numgrad(i) to (a numerical
% approximation of) the partial derivative of J with respect to the
% i-th input argument, evaluated at theta. (i.e., numgrad(i) should
% be the (approximately) the partial derivative of J with respect
% to theta(i).)
%
@params costfunc is a partial bound functions whose first two arguments
will bind precomputed tree and childrenmat
@params learner is a recursive-auto-encoder instance
potential example code:
func = lambda x, y, score_func: score_func(x, y)
cal_cost_obj = partial(func, X_sim, y_sim)
numgrads = computeNumericalGradient(cal_cost_obj, logloss, classifier.W)
assert(np.all(np.allclose(numgrads, analgrads))==True)
"""
numgrad = np.zeros_like(param)
perturb = np.zeros_like(param)
for i, (p, n) in enumerate(zip(
np.nditer(perturb, order='C', op_flags=['writeonly']),
np.nditer(numgrad, order='C', op_flags=['writeonly']))):
p[...] = eps
# ensure the change is effect
assert(param.ndim == 1 or perturb[i//perturb.shape[1],
i % perturb.shape[1]] == eps)
if keyword:
# calculate cost function with parameter - purturb
loss_rhs = costfunc(**{keyword: (param - perturb)})
# calculate cost function with parameter + purturb
loss_lhs = costfunc(**{keyword: (param + perturb)})
else:
loss_rhs = costfunc((param - perturb))
loss_lhs = costfunc((param + perturb))
# Compute Numerical Gradient
n[...] = (loss_lhs - loss_rhs)/(2*eps)
p[...] = 0.
# ensure the change is effect
assert(param.ndim == 1 or perturb[i//perturb.shape[1],
i % perturb.shape[1]] == 0.)
return(numgrad)
def label_binarize(y_sim, *args, **kwargs):
"""
@param y_sim is y label either in 1D or on-hot-coding
"""
if np.all(np.unique(y_sim) == np.arange(2)): # binary cases
# TODO: change to sklearn.preprocessing.label_binarize, and given
# classes = 2
y = np.vstack([1-y_sim, y_sim]).T
else:
y = LabelBinarizer().fit_transform(y_sim)
return(y)
class Memoize():
"""
Memoize(fn) - an instance which acts like fn but memoizes its arguments
Will only work on functions with non-mutable arguments
Borrow from memorizing recipe: http://code.activestate.com/recipes/52201/
"""
def __init__(self, fn):
self.fn = fn
self.memo = {}
def __call__(self, keyword, *args, index=-1):
key = self._compute_hash_key(*args)
if key not in self.memo:
self.memo[key] = self.fn(*args)
if hasattr(self.memo[key], keyword):
return getattr(self.memo[key], keyword)[index]
else:
return None
def _compute_hash_key(self, *args):
strbuf = StringIO()
for a in args:
strbuf.writelines(str(a))
return(hash(strbuf.getvalue()))
def optimize_wrapper(wrapped_func, newshape):
"""
to return a new function which can take 1D as input and then pass to the
wrapped_func for actual calculation
@param wrapped_func is a function receives reshape arguments by
optimize_wrapper function
@param newshape is a new shape in tuple passing to wrapped_func
"""
def wrapper(param, *args, **kwargs):
# the required transformed here
param_t = param.reshape(newshape)
result = wrapped_func(param_t, *args, **kwargs)
return(result.ravel())
return(wrapper)
def modify_partial(func, *args):
"""
make partial to bind 1st parameter
@param func is the function object whose 1st paramter will be pre-bound
@param args are the rest arguments will be passed into func
"""
def bind_first_arg(x):
res = func(x, *args)
return(res)
return(bind_first_arg)
def activation_gradient(activation, X):
"""
manually calculate f'(x)
@param activation is a function object which serves as activationi function
in neural network. Either is tanh or expit
@param X is the input numpy array
"""
z = 1.0
w = 1.0
if activation.__name__ == 'tanh':
z = 2.0
w = 4.0
elif activation.__name__ != 'expit':
raise NotImplementedError
return(w*sp.special.expit(z*X)*(1-sp.special.expit(z*X)))
def compute_len_stats(len_dist):
"""
return lengths distribution of sentence plus cumulative sum
@param len_dist is a list whose elements are corresponding sentence length
"""
# TODO: consider to replace with numpy.bincount
len_count = np.zeros(len(np.unique(len_dist)) + 1, dtype=np.int)
for i, cur_len in enumerate(np.unique(len_dist)):
len_count[i + 1] = np.sum(len_dist == cur_len)
return(np.unique(len_dist), len_count)
def _iter_samples(word2index, embedding, **kwargs):
"""
prepare sample for recursive trees-based training
@param word2index is a list/OrderedDict/numpy.ndarray which each entry
holds the word order of sentence
@param embedding is a word embedding matrix used for construct input word
vectors
"""
params_name = [
'input_vectors',
'traversal_order',
'inverse_inputs',
'parent_children_mapping',
'true_output',
'span_weight']
params_types = [list] * 6
hyper_params = {'error_weight': 0.1, 'alpha': 0.01,
'classification_threshold': 0.5, 'corruption_level': 0.}
# TODO: improve this code
for param_name in hyper_params.keys():
# TODO: check out argparser in python std
if param_name in kwargs:
logger.debug(
"hyperparameters {} gets updated to {:.3f} (default:"
"{:.3f})".format(param_name,
kwargs[param_name], hyper_params[param_name]))
hyper_params[param_name] = kwargs[param_name]
if 'true_output' not in kwargs:
params_name.remove('true_output')
true_output = None
else:
true_output = kwargs['true_output']
if kwargs.get('trees'):
trees = kwargs['trees']
iter_trees = iter(trees)
else:
trees = None
iter_trees = None
if kwargs.get('max_len'):
max_len = int(kwargs['max_len'])
else:
max_len = np.max(list(map(len, word2index)))
examples = []
index2len = []
iter_over_word2index = groupby(word2index, key=len)
start = 0
for cur_len, word_orders in iter_over_word2index:
if cur_len > max_len:
break
# allocate new list
examples.append({})
tree_size = 2 * cur_len - 1
for key, cls in zip(params_name, params_types):
examples[-1][key] = cls()
for word_order in word_orders:
if not trees and kwargs.get('autoencoder'):
est_gtree, est_cost = recursnn_utils.build_greedy_tree(
word_order, embedding=embedding,
autoencoder=kwargs.get('autoencoder'))
else:
try:
est_gtree = next(iter_trees)
except StopIteration:
if cur_len != max_len:
raise RuntimeError
else:
pass
except:
raise
vocab = recursnn_utils.embed_words(word_order, embedding)
est_gtinfo = recursnn_utils.preprocess_input(est_gtree, vocab)
assert(
np.all(np.linalg.norm(est_gtinfo[2][np.sum(
est_gtinfo[2], axis=1) > 0], 2, axis=1)) == 1.0)
if kwargs.get('span_weight'):
span_weight = est_gtinfo[3]
else:
span_weight = 0.5 * np.ones_like(est_gtinfo[3])
if isinstance(true_output, np.ndarray):
assignments = [est_gtinfo[2], est_gtinfo[0],
np.zeros_like(est_gtinfo[2]), est_gtinfo[1],
true_output[start:start + tree_size],
span_weight]
start += tree_size
else:
assignments = [est_gtinfo[2], est_gtinfo[0],
np.zeros_like(est_gtinfo[2]), est_gtinfo[1],
span_weight]
for key, val in zip(params_name, assignments):
examples[-1][key].append(val)
sample_size = len(examples[-1]['input_vectors'])
for key, val in examples[-1].items():
if isinstance(val, list):
# first, combine the matrices for all samples
examples[-1][key] = np.concatenate(val, axis=0)
assert(examples[-1][key].ndim <= 2)
if key == 'traversal_order':
examples[-1][key] += np.repeat(np.arange(sample_size) *
tree_size, cur_len - 1)
elif key == 'parent_children_mapping':
examples[-1][key] += (
np.repeat(np.arange(sample_size) *
tree_size, tree_size))[:, np.newaxis]
elif key == 'true_output':
if examples[-1][key].ndim == 2:
examples[-1][key] = np.argmax(examples[-1][key], axis=1)
index2len.append(cur_len)
examples[-1].update(hyper_params)
examples[-1]['sample_size'] = sample_size
return((np.array(index2len), examples))
def _combine_sample(examples):
"""
combine sample produce by _iter_sample which are grouped by lengths into
one chunk
"""
combined_example = {}
for example in examples:
for key, value in example.items():
ent = combined_example.setdefault(key, [])
ent.append(value)
assert(np.all(np.asarray(list(map(len, combined_example.values()))) ==
len(examples)))
return(combined_example)
def _iter_matrix_groups(matrix, **kwargs):
if 'embedding' in kwargs and 'vocab_size' in kwargs:
raise RuntimeError(
'embedding and vocab_size cannot be specified at the same time')
elif 'embedding' not in kwargs and 'vocab_size' not in kwargs:
raise RuntimeError(
'embedding and vocab_size cannot be specified at the same time')
embedding = kwargs.get('embedding', None)
if hasattr(embedding, 'components_'):
embedding = kwargs.get('embedding').components_.T
vocab_size = kwargs.get('vocab_size', None) or embedding.shape[0]
if kwargs.get('traverse_func'):
traverse_func = kwargs.get('traverse_func')
else:
traverse_func = visit_inodes_inorder
mapping = recursnn_utils.retrieve_mapping(matrix)
span_range = get_spanrange(matrix)
traversal_order = traverse_func(matrix)
# checking if indexing in mapping are attainable (not heap-ordered)
if np.max(mapping) >= len(mapping):
remap_lookup = np.ma.MaskedArray(np.zeros(matrix.shape[0],
dtype=np.intp), mask=matrix.diagonal() == 0)
remap_lookup[~remap_lookup.mask] = np.arange(np.sum(
[matrix.diagonal() > 0]))
for row in np.nditer(mapping, op_flags=['readwrite'],
flags=['external_loop']):
row[...] = remap_lookup[row]
for cols in np.nditer(traversal_order, op_flags=['readwrite'],
flags=['external_loop']):
cols[...] = remap_lookup[cols]
if kwargs.get('return_weight', False):
span_weight = calculate_spanweight(matrix, span_range)
node_weight = np.nan_to_num(
np.clip(span_weight, 1, np.inf)[mapping] / np.sum(
np.clip(span_weight, 1, np.inf)[mapping], axis=1,
keepdims=True))
else:
node_weight = span_range
if embedding is None:
# return indices
word_mat = recursnn_utils.get_wordindices(matrix, vocab_size,
node_weight)
else:
word_mat = recursnn_utils.get_wordmatrix(matrix, embedding)
return(traversal_order, word_mat, node_weight, mapping)
|
[
"renewang@users.noreply.github.com"
] |
renewang@users.noreply.github.com
|
729d713a868825738180938e1458012ac6cbbb8b
|
200fc6bfe4b6a3004836bf527e62a3dbdc93ff1c
|
/users/apps.py
|
666435576b2933cb56a397ed1c08a38f87b5cdf6
|
[] |
no_license
|
risknet/reference_python_web_standard
|
0e77c08da300c345e22608e68e34ef1c5b665c11
|
e2f30f3efc1448094ce5e85562ac3c27d37f4fc4
|
refs/heads/master
| 2022-12-05T15:09:45.861554
| 2020-02-17T15:03:50
| 2020-02-17T15:03:50
| 241,135,996
| 0
| 0
| null | 2022-11-22T05:19:47
| 2020-02-17T15:04:32
|
Python
|
UTF-8
|
Python
| false
| false
| 161
|
py
|
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'users'
# triggering signal
def ready(self):
import users.signals
|
[
"jaelee2009@gmail.com"
] |
jaelee2009@gmail.com
|
53485b533dd5e0253e0861a3134cfd9a2f72ecee
|
8cb5ebd1993d23074f55db2e8aa71eb24dd8ce17
|
/lib/screens/custom_widgets/tasklist.py
|
3dbaa62152ec060c21e08cebd219fb06f8cf780b
|
[] |
no_license
|
JahnsMichael/qtile
|
0f6bda0dbfe8aabe7fe4000b7d6e6d3fd9164416
|
419dc2d0f2cc3b4c63c66c98ccbca1686a5b873d
|
refs/heads/master
| 2023-07-20T22:25:41.063340
| 2021-09-01T15:44:02
| 2021-09-01T15:44:02
| 361,193,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
from libqtile.widget.tasklist import TaskList
class CustomTaskList(TaskList):
def __init__(self, **config):
super().__init__(**config)
self.add_callbacks({
'Button1': self.select_window,
'Button2': self.kill_window,
'Button3': self.maximize_window,
})
def kill_window(self):
if self.clicked:
window = self.clicked
window.group.focus(window, False)
window.cmd_kill()
def maximize_window(self):
if self.clicked:
window = self.clicked
window.group.focus(window, False)
window.cmd_toggle_maximize()
|
[
"michael.jf.jm@gmail.com"
] |
michael.jf.jm@gmail.com
|
a0ec26a115c3a0897cbf92beda45b68db9012d59
|
63a4d28c5af83dc8df0a2939a5817817658e9117
|
/pet/urls.py
|
900c720099a32d32b12bd0970064ae46e61e8e5f
|
[] |
no_license
|
arv1983/KenziePet
|
12da6c3b94cd9e9be6b7003fea28e8f6b9fad7e6
|
c4a456247c6bf3a141de1bed060c231a37853e10
|
refs/heads/main
| 2023-07-17T00:47:16.405185
| 2021-08-06T17:11:00
| 2021-08-06T17:11:00
| 397,413,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
from django.urls import path
from .views import GroupView
urlpatterns = [
path('animals/', GroupView.as_view()),
path('animals/<int:animal_id>/', GroupView.as_view())
]
|
[
"ciclista-rs@hotmail.com"
] |
ciclista-rs@hotmail.com
|
2b7c279a57035e546398ab3c7e54d5dccfe603b5
|
62179a165ec620ba967dbc20016e890978fbff50
|
/tests/common/test_builder_state.py
|
2353979abfefdfd4876a61cf0799d1b828e0e4e2
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/nncf
|
91fcf153a96f85da166aacb7a70ca4941e4ba4a4
|
c027c8b43c4865d46b8de01d8350dd338ec5a874
|
refs/heads/develop
| 2023-08-24T11:25:05.704499
| 2023-08-23T14:44:05
| 2023-08-23T14:44:05
| 263,687,600
| 558
| 157
|
Apache-2.0
| 2023-09-14T17:06:41
| 2020-05-13T16:41:05
|
Python
|
UTF-8
|
Python
| false
| false
| 4,436
|
py
|
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Union
from nncf import NNCFConfig
from nncf.api.compression import CompressionAlgorithmController
from nncf.api.compression import TModel
from nncf.common.composite_compression import CompositeCompressionAlgorithmBuilder
from nncf.common.compression import BaseCompressionAlgorithmBuilder
from nncf.common.graph.transformations.layout import TransformationLayout
from nncf.common.utils.registry import Registry
STATE_ATTR = "state"
DIFF_STATE_ATTR = STATE_ATTR + "__"
class A(BaseCompressionAlgorithmBuilder):
def __init__(self, config: NNCFConfig, should_init: bool = True, state_value: int = 1, name: str = "A"):
setattr(self, Registry.REGISTERED_NAME_ATTR, name)
super().__init__(config, should_init)
self.state_value = state_value
def _load_state_without_name(self, state_without_name: Dict[str, Any]):
self.state_value = state_without_name.get(STATE_ATTR)
def _get_state_without_name(self) -> Dict[str, Any]:
return {STATE_ATTR: self.state_value}
def apply_to(self, model: TModel) -> TModel:
pass
def _build_controller(self, model: TModel) -> CompressionAlgorithmController:
pass
def get_transformation_layout(self, model: TModel) -> TransformationLayout:
pass
def initialize(self, model: TModel) -> None:
pass
class CA(CompositeCompressionAlgorithmBuilder):
@property
def name(self) -> str:
pass
def add(self, child_builder) -> None:
self._child_builders.append(child_builder)
def apply_to(self, model: TModel) -> TModel:
pass
def build_controller(self, model: TModel) -> CompressionAlgorithmController:
pass
def get_transformation_layout(self, model: TModel) -> TransformationLayout:
pass
def initialize(self, model: TModel) -> None:
pass
def _get_mock_config(algo_name: Union[List[str], str]) -> NNCFConfig:
config = NNCFConfig()
config["input_info"] = {"sample_size": [1, 1]}
if isinstance(algo_name, list):
lst = []
for alg_n in algo_name:
lst.append({"algorithm": alg_n})
config["compression"] = lst
else:
assert isinstance(algo_name, str)
config["compression"] = {"algorithm": algo_name}
return config
def test_builder_state_load():
config = _get_mock_config("A")
builder = A(config, True, 1)
builder.state_value += 1
saved_state = builder.get_state()
builder = A(config, True, 1)
builder.load_state(saved_state)
assert builder.state_value == 2
def test_basic_composite_builder_load():
def create_builder():
config = _get_mock_config(["A", "A2"])
c = CA(config, True)
a = A(config, True, 1)
b = A(config, True, 2, "A2")
c.add(a)
c.add(b)
return c, a, b
composite_bldr, bldr1, bldr2 = create_builder()
bldr1.state_value += 1
bldr2.state_value += 2
saved_state = composite_bldr.get_state()
composite_bldr, bldr1, bldr2 = create_builder()
composite_bldr.load_state(saved_state)
assert bldr1.state_value == 2
assert bldr2.state_value == 4
def test_advanced_composite_ctrl_load():
config = _get_mock_config(["A", "A2", "A3"])
composite_builder = CA(config, True)
ctrl1 = A(config, True, 1)
ctrl2 = A(config, True, 2, name="A2")
composite_builder.add(ctrl1)
composite_builder.add(ctrl2)
ctrl1.state_value += 1
ctrl2.state_value += 2
saved_state = composite_builder.get_state()
composite_builder = CA(config, True)
ctrl1 = A(config, True, 1)
ctrl3 = A(config, True, 3, name="A3")
composite_builder.add(ctrl1)
composite_builder.add(ctrl3)
composite_builder.load_state(saved_state)
assert ctrl1.state_value == 2
assert ctrl3.state_value == 3
|
[
"noreply@github.com"
] |
openvinotoolkit.noreply@github.com
|
98b5212ac8776c8d2c2b282ba3d02150f45e9864
|
f6576c6551a1c44a0b6753587a6ce5cff978cc7c
|
/crawler/temp.py
|
0f08e16404990d70f775ad5c759c3c9a438655ba
|
[] |
no_license
|
sharma-anshul/nRelate-Anshul
|
4cf1b6f739c82397c8c471c51d398bdaa5cf4ac7
|
63d8ca30c2e1812940156e69266f474d8eead7d8
|
refs/heads/master
| 2021-01-10T21:37:20.711072
| 2013-07-26T18:23:22
| 2013-07-26T18:23:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,102
|
py
|
import classifier
import distmat
import patternify
def compute_classifier_domain_info():
patterns = set()
# Gets a sample of links from the website starting from the homepage
already_analyzed = set()
links = self._get_all_internal_links(self._start_url)
for i in range(depth - 1):
if len(links) > 3000:
break
sub_links = set()
cpt = 0
for link in links:
print "link %d/%d [%s]" % (cpt, len(links), link)
cpt += 1
if (len(sub_links) > 3000):
break
if not link in already_analyzed:
sub_links = sub_links.union(self._get_all_internal_links(link))
already_analyzed.add(link)
links = links.union(sub_links)
self.log("Finished. Got %d sublinks on %d levels" % (len(links), depth))
# Identifies links pointing to content and generates patterns
ones, zeros = classifier.testSVM(links)
mean, distMat = distmat.getDistanceMatrix(ones)
content = []
for link in distMat:
if sum(distMat[link])/len(distMat) >= mean:
content += [link]
patterns = patternify.getPatterns(content)
return patterns
|
[
"ansharma@seas.upenn.edu"
] |
ansharma@seas.upenn.edu
|
f05cbb3d16420419b9e94c281c67618ca1759539
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/get_long_week/large_fact_and_good_year/want_fact/place_or_number.py
|
b65c0ab506ff8f0f45c1acdf99cbe05aa37dd954
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
#! /usr/bin/env python
def know_public_part_by_high_problem(str_arg):
good_problem(str_arg)
print('government')
def good_problem(str_arg):
print(str_arg)
if __name__ == '__main__':
know_public_part_by_high_problem('different_man')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
9476c7c9346a2c3ce4257c6f31fcd18dbb75b089
|
548259a1cd5a0597a53030e48e9bf8437050d3fd
|
/blockchain_app/urls.py
|
3d20899ae94c366510538e8fefb545db7e7bb107
|
[] |
no_license
|
jaqb8/blockchain-app
|
3de8e7519a773f25a6d546df7f2b773dc736d5dd
|
765e6478676ab16bd812157d2741660673487235
|
refs/heads/main
| 2023-07-10T08:17:56.304102
| 2020-11-19T14:04:15
| 2020-11-19T14:04:15
| 303,366,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 800
|
py
|
"""blockchain_app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('node.urls'))
]
|
[
"qba.anisz@gmail.com"
] |
qba.anisz@gmail.com
|
5f16f0b6165e23db3dcdc7170c7e4f92cd1ec027
|
d1444abdc292806d175e5550d28adfd5237a462b
|
/Python-Refresher/29_imports_in_python/code.py
|
178141af9628d18b66cd6b5d43197498a6dec770
|
[] |
no_license
|
Bjcurty/PycharmProjects
|
946cc1f9c215f48e2fb2e7ea3f683bddceabd00a
|
6e705e445aa207792b596c481d01b27272f0ffd2
|
refs/heads/master
| 2022-09-05T13:24:09.740774
| 2020-05-04T15:15:45
| 2020-05-04T15:15:45
| 257,973,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
# from mymodule import divide
#
# print(divide(10, 2))
# print(__name__)
# import sys # allows python to find files locally
# # error because it doesn't exist
# # import sysadsf
#
# print(sys.path)
import sys
import mymodule
# print(sys.modules)
# print(mymodule)
|
[
"bradley@haulynx.com"
] |
bradley@haulynx.com
|
446557cbbd515e88900a2ae6dd079d11c860fb8f
|
1a756d45d9e6294ae7945bd018fb0ccebb60806d
|
/Chapter3/3.3/3.3.3/stack.py
|
55270b39e3042169e92a69d6fc306c6e21c01874
|
[] |
no_license
|
WustAnt/Python-Algorithm
|
0a7f32dfa3da7a640eb303c1e2cbe62896df025e
|
5da5236e6c0cd3c8410c3925c5744c97d20bfc26
|
refs/heads/master
| 2022-12-07T22:19:20.130446
| 2020-08-21T09:25:28
| 2020-08-21T09:25:28
| 284,865,740
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 846
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2020/8/1 9:25
# @Author : WuatAnt
# @File : stack.py
# @Project : Python数据结构与算法分析
"""
Stack()使用列表保存数据,默认将列表的尾部作为栈的顶端
"""
class Stack:
"""
Stack():创建空栈
.push(item):将一个元素添加到栈的顶端
.pop():将栈顶端的元素移除,返回一个参数item
.peek():返回栈顶端的元素
.isEmpty():检查栈是否为空,返回布尔值
.size():返回栈中元素的数目
"""
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self,item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[-1]
def size(self):
return len(self.items)
|
[
"ghostimage@163.com"
] |
ghostimage@163.com
|
1a4585ae35eeb8dec7cfd5a98936ed5776bca91b
|
ecba2b98fbf547e6a5b48f180f2c837ce3e185e6
|
/Python/AI learns to play FlappyBird/main.py
|
5d4fa830158def924de4b1aae074deaf44f80ba8
|
[] |
no_license
|
vafakaramzadegan/scriptshot.ir
|
0bbce6350fe88563e5584149942d4c11b8528e23
|
c10ac4407be9e94a1ddddd43a9ed8a7163210b35
|
refs/heads/master
| 2023-07-11T14:30:48.815612
| 2021-08-22T03:48:30
| 2021-08-22T03:48:30
| 252,829,910
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,631
|
py
|
'''
FlappyBird AI
---------------------------------
AI plays FlappyBird using Genetic Algorithm, implemented in
Tensorflow and Keras.
Author:
Scriptshot.ir
https://github.com/vafakaramzadegan/scriptshot.ir
'''
import pygame
from functions import functions
import random
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
import numpy as np
'''
init PyGame
'''
pygame.init()
clock = pygame.time.Clock()
WINDOW_WIDTH, WINDOW_HEIGHT = 420, 768
WINDOW = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption('FlappyBird AI')
FLOOR_HEIGHT = 150
# tools for loading sprites and drawing on screen
toolkit = functions(WINDOW)
'''
default settings for AI
'''
# number of birds in each generation
BIRDS_COUNT = 50
# number of neurons to mutate in each generation
MUTATION_RATE = .2
# draw the guide lines
SHOW_LINES = False
# the data of the best brain
BEST_BRAIN = None
# the fittness of the best bird
BEST_FITNESS = 0.0
# the index of current generation
GENERATION_INDEX = 1
'''
Pipe object
'''
class Pipe:
img = toolkit.sprites['world']['pipe']
# the gap between two pipes (in pixels)
GAP = 150
# how fast the pipes move
velocity = 5
def __init__(self, x):
self.x = x
self.height = 0
self.top_pipe_top = 0
self.bottom_pipe_top = 0
self.PIPE_BOTTOM = self.img
# we build the top pipe by flipping the bottom pipe
self.PIPE_TOP = pygame.transform.flip(self.img, False, True)
self.passed = False
self.set_height()
def set_height(self):
# random height for pipes
self.height = random.randrange(50, 400)
self.top_pipe_top = self.height - self.PIPE_TOP.get_height()
self.bottom_pipe_top = self.height + self.GAP
def move(self):
self.x -= self.velocity
def draw(self, window):
window.blit(self.PIPE_TOP, (self.x, self.top_pipe_top))
window.blit(self.PIPE_BOTTOM, (self.x, self.bottom_pipe_top))
def collide(self, bird, window):
# we check for collision by using masks
bird_mask = bird.get_mask()
top_mask = pygame.mask.from_surface(self.PIPE_TOP)
bottom_mask = pygame.mask.from_surface(self.PIPE_BOTTOM)
top_offset = (self.x - bird.x, self.top_pipe_top - round(bird.y))
bottom_offset = (self.x - bird.x, self.bottom_pipe_top - round(bird.y))
# check for overlap
b_point = bird_mask.overlap(bottom_mask, bottom_offset)
t_point = bird_mask.overlap(top_mask, top_offset)
# return True if there is a collision
return True if b_point or t_point else False
'''
The Brain of a bird
'''
class Brain:
def __init__(self):
# a Keras.Sequential model is needed for our DNN
self.model = Sequential()
# this is the hidden layer with four neurons.
# we have these three inputs:
# 1) bird's y position
# 2) bird's vertical distance to the top pipe
# 3) bird's vertical distance to the bottom pipe
# so the input_shape for the hidden layer is a tensor with three elements
self.model.add(Dense(units=4, input_shape=(3, ), activation='sigmoid'))
# the output layer has two neurons:
# 1) jump
# 2) no jump
self.model.add(Dense(units=2, activation='softmax'))
def loadFromFile(self, fn):
data = np.load(fn, allow_pickle=True)
self.setData(data)
def saveToFile(self, fn):
np.save(fn, self.model.get_weights())
# set the weights and biases of neurons
def setData(self, brainData):
brain = np.array(brainData)
# mutation happens here
for index, data in enumerate(brain):
if random.uniform(0, 1) <= MUTATION_RATE:
brain[index] = data + random.uniform(-1, 1)
# set mutated weights
self.model.set_weights(brain)
def decide(self, data):
return self.model.predict(data)
def copy(self):
return self.model.get_weights()
'''
Bird object
'''
class Bird:
img = toolkit.sprites['bird']['idle']
crashed = False
# number of frames it takes for the bird to change the position of its wing
FLAPS_ANIMATION_TIME = 2
# how fast the bird rotates
ROTATE_VEL = 15
def __init__(self, x, y):
self.x = x
self.y = y
self.tilt = 0
self.tick = 0
self.frameIndex = 0
self.vel = 0
self.height = self.y
self.brain = Brain()
# indicates how long the bird survived
self.fitness = .0
def jump(self):
self.vel = -8
self.tick = 0
self.height = self.y
def move(self):
self.tick += 1
# the bird has survived for one more frame, so the fitness increases.
# the fitness could be altered in various ways.
self.fitness += .01
# calculate the displacement based on the time of freefall
self.displacement = self.vel*(self.tick) + 1.5*self.tick**2
if self.displacement >= 16: self.displacement = (self.displacement/abs(self.displacement)) * 16
# change the vertical position by displacement value
self.y = self.y + self.displacement
# bird is ascending
if self.displacement < 0 or self.y < self.height + 50:
if self.tilt < 25:
self.tilt = 25
else: # bird is descending
if self.tilt > -90:
self.tilt -= self.ROTATE_VEL
# the bird crashes by moving out of the screen or hitting the floor
if (self.y + self.img.get_height() < 0 or self.y + self.img.get_height() >= WINDOW_HEIGHT - FLOOR_HEIGHT):
self.crashed = True
def draw(self, window):
self.frameIndex += 1
if self.frameIndex < self.FLAPS_ANIMATION_TIME:
self.img = toolkit.sprites['bird']['ascend']
elif self.frameIndex < self.FLAPS_ANIMATION_TIME * 2:
self.img = toolkit.sprites['bird']['idle']
elif self.frameIndex < self.FLAPS_ANIMATION_TIME * 3:
self.img = toolkit.sprites['bird']['descend']
else:
self.frameIndex = 0
rotated_image = pygame.transform.rotate(self.img, self.tilt)
window.blit(rotated_image, (self.x, self.y))
# returns the mask of the bird for detecting collisions
def get_mask(self):
return pygame.mask.from_surface(self.img)
'''
Background object
'''
class Background:
img = toolkit.sprites['world']['background']
width = img.get_width()
height = img.get_height()
# how fast the background moves
velocity = 1
def __init__(self):
self.x1 = 0
self.x2 = self.width
def move(self):
self.x1 -= self.velocity
self.x2 -= self.velocity
if self.x1 + self.width < 0:
self.x1 = self.x2 + self.width
if self.x2 + self.width < 0:
self.x2 = self.x1 + self.width
def draw(self, window):
# the background moves infinitely, so we need a sequence of two images
window.blit(self.img, (self.x1, 0))
window.blit(self.img, (self.x2, 0))
'''
Floor object
'''
class Floor:
img = toolkit.sprites['world']['floor']
width = img.get_width()
height = img.get_height()
# the floor moves as fast as pipes
velocity = 5
def __init__(self):
self.x1 = 0
self.x2 = self.width
def move(self):
self.x1 -= self.velocity
self.x2 -= self.velocity
if self.x1 + self.width < 0:
self.x1 = self.x2 + self.width
if self.x2 + self.width < 0:
self.x2 = self.x1 + self.width
def draw(self, window):
window.blit(self.img, (self.x1, WINDOW_HEIGHT - FLOOR_HEIGHT))
window.blit(self.img, (self.x2, WINDOW_HEIGHT - FLOOR_HEIGHT))
'''
The main menu
'''
mainmenu_active = True
def mainmenu():
WINDOW.blit(toolkit.sprites['titles']['game_title'], (WINDOW_WIDTH / 2 - toolkit.sprites['titles']['game_title'].get_width() / 2, 100))
WINDOW.blit(toolkit.sprites['titles']['scriptshot'], (WINDOW_WIDTH / 2 - toolkit.sprites['titles']['scriptshot'].get_width() / 2, 180))
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
# play button
toolkit.button(toolkit.sprites['buttons']['play'],
WINDOW_WIDTH / 2 - toolkit.sprites['buttons']['play'].get_width() / 2,
300,
close_mainmenu)
pygame.display.update()
clock.tick(30)
def close_mainmenu():
global mainmenu_active
mainmenu_active = False
run()
'''
the main function
'''
def run():
global BEST_BRAIN, BEST_FITNESS, GENERATION_INDEX
# the number of pipes passed in the current generation
score = 0
# objects
background = Background()
floor = Floor()
birds = []
pipes = [Pipe(500)]
# all the birds in a generation have the same random starting position
startPosX = random.randint(20, 100)
startPosY = random.randint(20, WINDOW_HEIGHT - FLOOR_HEIGHT - 40)
for i in range(BIRDS_COUNT):
bird = Bird(startPosX, startPosY)
if BEST_BRAIN is not None: bird.brain.setData(BEST_BRAIN)
birds.append(bird)
while True:
clock.tick(30)
# the last alive bird in a generation is the best one
if len(birds) == 1:
if birds[0].fitness > BEST_FITNESS:
BEST_FITNESS = birds[0].fitness
BEST_BRAIN = birds[0].brain.copy()
# the current generation went extinct, and the next one
# must start
elif len(birds) == 0:
GENERATION_INDEX += 1
run()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
floor.move()
background.move()
pipe_ind = 0
if len(pipes) > 1 and bird.x > pipes[0].x + pipes[0].PIPE_TOP.get_width():
pipe_ind = 1
for x, bird in enumerate(birds):
bird.move()
# bird decides whether to jump or not
res = bird.brain.decide([[bird.y, abs(bird.y - pipes[pipe_ind].height), abs(bird.y - pipes[pipe_ind].bottom_pipe_top)]])
# the first output neuron represents jump action
if np.argmax(res) == 0: bird.jump()
add_pipe = False
pipes_to_remove = []
for pipe in pipes:
pipe.move()
for bird in birds:
if (bird.crashed or pipe.collide(bird, WINDOW)):
# remove crashed birds
birds.pop(birds.index(bird))
# a pipe is destroyed after moving out of screen
if pipe.x + pipe.PIPE_TOP.get_width() < 0:
pipes_to_remove.append(pipe)
# add new pipe if the current one has been passed by the birds
if not pipe.passed and pipe.x < bird.x:
pipe.passed = True
add_pipe = True
for r in pipes_to_remove:
pipes.remove(r)
if add_pipe:
score += 1
pipes.append(Pipe(WINDOW_WIDTH))
toolkit.updateDisplay(background, floor, birds, pipes, score, not mainmenu_active, pipe_ind, GENERATION_INDEX, SHOW_LINES)
if mainmenu_active:
mainmenu()
if __name__ == '__main__':
run()
|
[
"noreply@github.com"
] |
vafakaramzadegan.noreply@github.com
|
154832d1c75a95da042fa771507421a8b8fc5a0d
|
697627ec7a9e7ec1bceaad0c7a0da48e966630ac
|
/Algorithm/plusMinus.py
|
c64e8b15ac5b7447228d32cf0ca7b4f8c8c16efc
|
[] |
no_license
|
jasper2326/HackerRank
|
ab0090cc9906472f29c91225c1cf8f1d3a19113c
|
1e5bad3bc734085fc12ce1cbd126742f2be506da
|
refs/heads/master
| 2021-05-06T09:49:22.907317
| 2018-03-27T08:51:59
| 2018-03-27T08:51:59
| 114,071,619
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
import sys
n = int(raw_input().strip())
arr = map(int,raw_input().strip().split(' '))
minus = 0
plus = 0
zero = 0
for num in arr:
if num > 0:
plus += 1
elif num == 0:
zero += 1
elif num < 0:
minus += 1
print plus * 1.0 / n
print minus * 1.0 / n
print zero * 1.0 / n
|
[
"jasper_jiao@hotmail.com"
] |
jasper_jiao@hotmail.com
|
163957526ce8c555c76ddc5827950cbd122cd6ef
|
15447259ae95c3df176d0e1c9a2f16b857c2d36d
|
/data_import_pkg/tweet_parser/read_lid_spaeng.py
|
4760bedf8a1aec8d5214e6f9a90117866864e1fd
|
[] |
no_license
|
FilipePintoReis/PLEI_FEUP
|
4dc0a58f30f66d2304e3903749ded19281532a19
|
73579e8a2786a5430ff31638ff2d84ec27d2eb5f
|
refs/heads/main
| 2023-02-18T10:46:20.152610
| 2021-01-11T16:48:55
| 2021-01-11T16:48:55
| 315,767,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,215
|
py
|
'''
Contains LinceFileReader
'''
class LinceFileReader:
'''
This class read files from lince dataset when given a prefix.
The file structure inside the prefix folder is ought to be:
prefix/
train.conll
test.conll
dev.conll
'''
@staticmethod
def train_data(prefix):
'''
Returns training data based on a prefix
'''
train_str = f"{prefix}/train.conll"
train_file = open(train_str, "r", encoding="UTF-8")
train_string = train_file.read()
train_file.close()
return train_string
@staticmethod
def test_data(prefix):
'''
Returns test data based on a prefix
'''
test_str = f"{prefix}/test.conll"
test_file = open(test_str, "r", encoding="UTF-8")
test_string = test_file.read()
test_file.close()
return test_string
@staticmethod
def dev_data(prefix):
'''
Returns validation data based on a prefix
'''
dev_str = f"{prefix}/dev.conll"
dev_file = open(dev_str, "r", encoding="UTF-8")
dev_string = dev_file.read()
dev_file.close()
return dev_string
|
[
"freis@semasio.com"
] |
freis@semasio.com
|
7da99af886cbcc786fce422085ebbf1e0fe50477
|
9d8b8a91b62f416422d74b02bc64a9d6cc20a8fe
|
/Other Codes/login.py
|
afc8a6f2bcdb900488206c59f6629076dc64988b
|
[] |
no_license
|
sakshid22/HealingManagementSystem
|
4e7e06e7bac2acb0f7127d04e50626b04ba6b09f
|
846c1a61ed9f72f66fa022fe928dc9fe35280c3b
|
refs/heads/master
| 2021-05-13T21:02:27.564219
| 2018-01-08T03:50:00
| 2018-01-08T03:50:00
| 116,451,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
import Tkinter as tk
import tkFileDialog as tkfd
import tkMessageBox as tkmb
#import xlrd
#GUI setup
root = tk.Tk()
root.title("TLA Database Tool")
user_label = tk.Label(root, text = "username").pack()
user = tk.Entry(root, textvariable = tk.StringVar()).pack()
pw_label = tk.Label(root, text = "password").pack()
#pw = tk.Entry(root, show = "*", textvariable = tk.StringVar()).pack()
#login_button = tk.Button(root, text = "Login", command = lambda: logintoDB(user,pw)).pack()
root.mainloop()
|
[
"noreply@github.com"
] |
sakshid22.noreply@github.com
|
20e89b7811131af3ba6bbfd580c9ce135bd77ec0
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/2/gro.py
|
fe80ab7e828b5a1f15bb709c847e422fdd054e5c
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018
| 2016-11-13T20:45:50
| 2016-11-13T20:45:50
| 73,624,224
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'grO':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"juliettaylorswift@gmail.com"
] |
juliettaylorswift@gmail.com
|
59d265402995ddefa4c085f0fb5aa9cf9e37513d
|
769c5e0f1836eccff49bf3c041a8951c4c0852c3
|
/log_profile.py
|
533b09b6db8e3c9f78e969633330a05e39f3fdef
|
[] |
no_license
|
Ewenwan/upy-profile
|
1ee6515d413a77263ed123ae118be9227720e3e3
|
3c72aa5c720c68359748a77119e89603d1d5b135
|
refs/heads/master
| 2022-04-26T02:42:50.634705
| 2020-04-26T16:01:50
| 2020-04-26T16:01:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,145
|
py
|
import sys
import time
plog_field_count = 7
plog_size = 0
plog_next_idx = 0
plog = None
def alloc_plog(size):
global plog_size, plog
plog_size = size
plog = [[None]*plog_field_count for x in range(size)]
def analize_plog(plog):
frame_stats = {}
func_stats = {}
for op, frame_id, parent_frame_id, filename, lineno, func_name, now_us in plog:
loc_id = (filename, lineno, func_name)
# print(op, arg, id(frame), loc_id)
if op == 'call':
frame_stats[frame_id] = [now_us, 0]
elif op == 'return' or op == 'exception':
start_tm_us, callees_acc_us = frame_stats[frame_id]
duration_us = now_us - start_tm_us
# add total time spent in this frame to the parent frame
if parent_frame_id in frame_stats:
frame_stats[parent_frame_id][1] += duration_us
# subtract time spent in callees from current frame
duration_us -= callees_acc_us
#print(op, arg, frame_id, loc_id, 'parent frame:\t', parent_frame_id, 'acc:\t', callees_acc_us, frame_stats[parent_frame_id][1] if parent_frame_id in frame_stats else 0, duration_us)
del frame_stats[frame_id]
if loc_id in func_stats:
call_count, min_us, max_us, tot_us = func_stats[loc_id]
call_count += 1
min_us = min(min_us, duration_us)
max_us = max(max_us, duration_us)
tot_us += duration_us
else:
call_count, min_us, max_us, tot_us = (1, duration_us, duration_us, duration_us)
func_stats[loc_id] = call_count, min_us, max_us, tot_us
else:
pass
return func_stats
def _log_trace_func(frame, op, arg):
global plog_next_idx
if op == 'line':
return
now_us = time.ticks_us()
if plog_next_idx >= plog_size:
return
code = frame.f_code
# assigning like this `[index][:] = fields` does not replace the item in the outer list
plog[plog_next_idx][:] = op, id(frame), id(frame.f_back), code.co_filename, frame.f_lineno, code.co_name, now_us
plog_next_idx += 1
return _log_trace_func
def test2(a):
#time.sleep(0.1)
pass
def test1(a):
test2(a)
test2(a)
test2(a)
test2(a)
test2(a)
def __main__():
test1('lalala')
test1('qaqaqa')
alloc_plog(1000)
sys.settrace(_log_trace_func)
__main__()
print(analize_plog(plog))
|
[
"dbrignoli@audioscience.com"
] |
dbrignoli@audioscience.com
|
526ac7474db46277883c897afc5178b2f2270a15
|
362e8ab79233f72d0afc6877a59380ea6674c585
|
/EMuChannel/python/PreselectionWithExplicitEleId.py
|
235f81be5efb57eb28c8643d340fdc5146aac7b3
|
[] |
no_license
|
marthaEstefany/DisplacedSUSY
|
792a07cf15a00def25271930d15bb639dbeac515
|
81de5ca0c9e9c53d64b91bbe7db4a700bb5af923
|
refs/heads/Run2
| 2021-12-05T12:03:09.003009
| 2018-12-02T19:40:26
| 2018-12-02T19:40:26
| 127,357,296
| 0
| 0
| null | 2019-01-13T21:12:11
| 2018-03-29T23:21:01
|
Python
|
UTF-8
|
Python
| false
| false
| 4,688
|
py
|
import FWCore.ParameterSet.Config as cms
import copy
import string
from DisplacedSUSY.EMuChannel.CutDefinitions import *
from DisplacedSUSY.StandardAnalysis.ElectronIdCutDefinitions import *
##########################################################################################
#USE THIS CONFIG ONLY FOR ELECTRON TIGHT ID TESTS!
##########################################################################################
PreselectionWithExplicitEleIdBarrel = cms.PSet(
name = cms.string("PreselectionWithExplicitEleIdBarrel"),
triggers = triggersMuonPhoton,
cuts = cms.VPSet()
)
### jet selection (just for plotting purposes, doesn't make event cuts)
PreselectionWithExplicitEleIdBarrel.cuts.extend(atLeastZero_jet_basic_selection_cuts)
### at least one good electron
PreselectionWithExplicitEleIdBarrel.cuts.append(electron_eta_cut)
PreselectionWithExplicitEleIdBarrel.cuts.append(electron_gap_veto)
if os.environ["CMSSW_VERSION"].startswith ("CMSSW_8_0_"):
PreselectionWithExplicitEleIdBarrel.cuts.append(electron_pt_42_cut)
elif os.environ["CMSSW_VERSION"].startswith ("CMSSW_9_4_"):
PreselectionWithExplicitEleIdBarrel.cuts.append(electron_pt_50_cut)
PreselectionWithExplicitEleIdBarrel.cuts.append(electron_isEB_cut)
PreselectionWithExplicitEleIdBarrel.cuts.append(electron_sigmaIetaIetaEB_cut)
PreselectionWithExplicitEleIdBarrel.cuts.append(electron_deltaPhiSuperClusterEB_cut)
PreselectionWithExplicitEleIdBarrel.cuts.append(electron_deltaEtaSuperClusterEB_cut)
PreselectionWithExplicitEleIdBarrel.cuts.append(electron_hadronicOverEmEB_cut)
if os.environ["CMSSW_VERSION"].startswith ("CMSSW_8_0_"):
PreselectionWithExplicitEleIdBarrel.cuts.append(electron_abs_1overE_1overP_cut)
elif os.environ["CMSSW_VERSION"].startswith ("CMSSW_9_4_"):
PreselectionWithExplicitEleIdBarrel.cuts.append(electron_abs_1overE_1overP_EB_cut)
PreselectionWithExplicitEleIdBarrel.cuts.append(electron_missingInnerHits_cut)
PreselectionWithExplicitEleIdBarrel.cuts.append(electron_passConversionVeto_cut)
### at least one good muon
PreselectionWithExplicitEleIdBarrel.cuts.append(muon_eta_cut)
if os.environ["CMSSW_VERSION"].startswith ("CMSSW_8_0_"):
PreselectionWithExplicitEleIdBarrel.cuts.append(muon_pt_40_cut)
elif os.environ["CMSSW_VERSION"].startswith ("CMSSW_9_4_"):
PreselectionWithExplicitEleIdBarrel.cuts.append(muon_pt_50_cut)
PreselectionWithExplicitEleIdBarrel.cuts.append(muon_global_cut)
PreselectionWithExplicitEleIdBarrel.cuts.append(muon_id_cut)
PreselectionWithExplicitEleIdBarrel.cuts.append(muon_iso_cut)
PreselectionWithExplicitEleIdEndcap = cms.PSet(
name = cms.string("PreselectionWithExplicitEleIdEndcap"),
triggers = triggersMuonPhoton,
cuts = cms.VPSet()
)
### jet selection (just for plotting purposes, doesn't make event cuts)
PreselectionWithExplicitEleIdEndcap.cuts.extend(atLeastZero_jet_basic_selection_cuts)
### at least one good electron
PreselectionWithExplicitEleIdEndcap.cuts.append(electron_eta_cut)
PreselectionWithExplicitEleIdEndcap.cuts.append(electron_gap_veto)
if os.environ["CMSSW_VERSION"].startswith ("CMSSW_8_0_"):
PreselectionWithExplicitEleIdEndcap.cuts.append(electron_pt_42_cut)
elif os.environ["CMSSW_VERSION"].startswith ("CMSSW_9_4_"):
PreselectionWithExplicitEleIdEndcap.cuts.append(electron_pt_50_cut)
PreselectionWithExplicitEleIdEndcap.cuts.append(electron_isEE_cut)
PreselectionWithExplicitEleIdEndcap.cuts.append(electron_sigmaIetaIetaEE_cut)
PreselectionWithExplicitEleIdEndcap.cuts.append(electron_deltaPhiSuperClusterEE_cut)
PreselectionWithExplicitEleIdEndcap.cuts.append(electron_deltaEtaSuperClusterEE_cut)
PreselectionWithExplicitEleIdEndcap.cuts.append(electron_hadronicOverEmEE_cut)
if os.environ["CMSSW_VERSION"].startswith ("CMSSW_8_0_"):
PreselectionWithExplicitEleIdEndcap.cuts.append(electron_abs_1overE_1overP_cut)
elif os.environ["CMSSW_VERSION"].startswith ("CMSSW_9_4_"):
PreselectionWithExplicitEleIdEndcap.cuts.append(electron_abs_1overE_1overP_EE_cut)
PreselectionWithExplicitEleIdEndcap.cuts.append(electron_missingInnerHits_cut)
PreselectionWithExplicitEleIdEndcap.cuts.append(electron_passConversionVeto_cut)
### at least one good muon
PreselectionWithExplicitEleIdEndcap.cuts.append(muon_eta_cut)
if os.environ["CMSSW_VERSION"].startswith ("CMSSW_8_0_"):
PreselectionWithExplicitEleIdEndcap.cuts.append(muon_pt_40_cut)
elif os.environ["CMSSW_VERSION"].startswith ("CMSSW_9_4_"):
PreselectionWithExplicitEleIdEndcap.cuts.append(muon_pt_50_cut)
PreselectionWithExplicitEleIdEndcap.cuts.append(muon_global_cut)
PreselectionWithExplicitEleIdEndcap.cuts.append(muon_id_cut)
PreselectionWithExplicitEleIdEndcap.cuts.append(muon_iso_cut)
|
[
"juliette.alimena@cern.ch"
] |
juliette.alimena@cern.ch
|
00be820482646996efd41e15619354b36f008fc3
|
c7b21d0a6ad68733aacd4722dc804bd5943b15b4
|
/pur_beurre/tests/test_urls.py
|
acc4dbce0ea72c8a446abfe004d9b2508abc228d
|
[] |
no_license
|
pythonmentor/benjamin-p11
|
e44cc3ed8a4eb140b08a8b4a1689b8155677dc7d
|
dca43e8ef5952c5eb374f18d955260da03e025f5
|
refs/heads/master
| 2022-10-02T15:46:46.619965
| 2020-06-05T12:08:39
| 2020-06-05T12:08:39
| 268,841,345
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
from django.test import TestCase
from django.urls import resolve
from django.shortcuts import reverse
from pur_beurre.views import home, legal_notice
class UrlTestCase(TestCase):
def test_home_url_view(self):
found = resolve(reverse("home"))
self.assertEqual(found.func, home)
def test_legal_notice_url_view(self):
found = resolve(reverse("legal_notice"))
self.assertEqual(found.func, legal_notice)
|
[
"thierry@chappuis.io"
] |
thierry@chappuis.io
|
8b0bfb43ff5a4f396713d6a971a4594348a49c64
|
2ab02a03eb1253d43519ee1be1c11ea14172ac00
|
/cycle/bin/python-config
|
1a59d23ed62ad9deab3112341190f9947eaa1990
|
[] |
no_license
|
archi-007/bicycle-rental
|
1d948a936784ccef9426f571deff2c8660a05d0c
|
0c4dbb5fde969b8ce29a9852efadc482a8459195
|
refs/heads/master
| 2021-09-25T15:35:11.401952
| 2021-04-11T21:30:32
| 2021-04-11T21:30:32
| 239,024,197
| 0
| 2
| null | 2021-09-22T18:32:37
| 2020-02-07T21:17:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,355
|
#!/home/archisman/Desktop/banchod/cycle/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"archiribhu@gmail.com"
] |
archiribhu@gmail.com
|
|
cc6bf9c20b5c44a499abbb56330cd3825302f7b6
|
b6154329bbca55a1a091f61718cef0d05817914b
|
/firstpython.py
|
5744bd0b16bd2045aa9bb69a06009b17651ac71b
|
[] |
no_license
|
97-shuang-chen/testrepo
|
c0d9e89fd453823b08cd244eb889156663718bd8
|
6cd25c1b74a9a71b23de5f73adc03f6b9f0c4e66
|
refs/heads/main
| 2023-03-02T17:20:46.106534
| 2021-02-06T19:22:19
| 2021-02-06T19:22:19
| 336,609,389
| 0
| 0
| null | 2021-02-06T19:22:20
| 2021-02-06T18:37:46
|
Python
|
UTF-8
|
Python
| false
| false
| 44
|
py
|
# Display the output
print('hello, world!')
|
[
"noreply@github.com"
] |
97-shuang-chen.noreply@github.com
|
0f5f966d6bb914a9176917f7154551b72d0f4c53
|
26b931a5f38639838d0b0fcba65a3dd858989f41
|
/auto_ts/utils/colors.py
|
bf44a1acd4349ff55c28ef1cb630266a4cfa6f31
|
[
"Apache-2.0"
] |
permissive
|
AutoViML/Auto_TS
|
7ffd967703b89c74b161386ecef4703393699184
|
9b13d7d629385f34aebc54cfca82fc8cf7506c48
|
refs/heads/master
| 2023-06-07T01:56:55.541110
| 2023-05-17T01:16:05
| 2023-05-17T01:16:05
| 240,736,421
| 624
| 110
|
Apache-2.0
| 2023-09-01T17:23:44
| 2020-02-15T15:23:32
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 246
|
py
|
class colorful:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
|
[
"rsesha2001@yahoo.com"
] |
rsesha2001@yahoo.com
|
bac401bb4b9e15d012dc3864c835da2b4c0e6e96
|
ceb3d82494813cd21e38231964e098bb3efe093b
|
/Segmentation/flood.py
|
6f30aeb52b98062448c12265b2ebc669cd77d382
|
[
"Apache-2.0"
] |
permissive
|
Joevaen/Scikit-image_On_CT
|
0c0a306a9ca18668bd9bb4105e577766b1d5578b
|
e3bf0eeadc50691041b4b7c44a19d07546a85001
|
refs/heads/main
| 2023-03-16T01:28:04.871513
| 2021-03-16T07:53:57
| 2021-03-16T07:53:57
| 344,071,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 37
|
py
|
# Mask corresponding to a flood fill.
|
[
"joevaen@126.com"
] |
joevaen@126.com
|
c6bf7f7d10fbca23871af2147d25627883f41608
|
31d48e64439aef62c6b752caaf0210eaf500f99c
|
/stock_price/admin.py
|
7fce80fc9a736d61dc01882fa44c8c551e19d585
|
[] |
no_license
|
RyomaKawata1025/Data_processing_app
|
c907d19de5dabf64c066eac99f02432ef6025226
|
5ae8ae89a7fa57e6533846f0001be0b294fb696b
|
refs/heads/main
| 2023-03-02T14:30:29.466553
| 2021-02-15T10:46:38
| 2021-02-15T10:46:38
| 339,038,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
from django.contrib import admin
from .models import NIKKEI,SP500,USDJPY,BITCOIN,COMMON
admin.site.register(NIKKEI)
admin.site.register(SP500)
admin.site.register(USDJPY)
admin.site.register(BITCOIN)
admin.site.register(COMMON)
|
[
"kawataryouma@kawadaryoumanoMacBook-Air.local"
] |
kawataryouma@kawadaryoumanoMacBook-Air.local
|
aeef294b4e1f262d5e7de5bcdcb2ef52e9746fd1
|
066120cc7a5d550a2d8c2c1f404d7fa90a89ac9f
|
/devs/migrations/0021_remove_device_client.py
|
27aa693a041ae6a372803b1c23cb27482201c254
|
[] |
no_license
|
dpg3000/iec60870_5_configurator
|
7bfd2ae1947d2c4990a429143314e99ab6d4b614
|
789ed53c735d028050fcc145ac1488ad9aac6858
|
refs/heads/master
| 2022-09-14T04:58:58.835395
| 2020-03-02T12:58:46
| 2020-03-02T12:58:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
# Generated by Django 2.1.5 on 2019-12-05 14:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('devs', '0020_auto_20191205_1505'),
]
operations = [
migrations.RemoveField(
model_name='device',
name='Client',
),
]
|
[
"dpou18@gmail.com"
] |
dpou18@gmail.com
|
e9a8dde86248aa09ca4a251fa9cfa9038bc21ee6
|
808c852c709f76aa0c36362f060128eb9a8b3b54
|
/torchtimeseries/models/__init__.py
|
004cf2c1bd9ec0b6d58b5e5c874c0a429ef12d42
|
[] |
no_license
|
timeseriesAI/timeseriesAI1
|
fea3a61b89661594026b21cd8fab58687f2186c7
|
8c0ff06737ab0b15938372de3d298b8a6083b120
|
refs/heads/master
| 2021-06-11T01:58:48.739894
| 2020-11-12T10:17:59
| 2020-11-12T10:17:59
| 254,351,793
| 18
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
from .FCN import *
from .ResNet import *
from .ResCNN import *
from .InceptionTime import *
from .layers import *
from .ROCKET import *
|
[
"oguiza@gmail.com"
] |
oguiza@gmail.com
|
597dc9651bdd04a071d74c5c2b3f9e4e5ffba698
|
394ed7b7f7d65a309a9a041a472500f160215f55
|
/marvel_env/bin/alembic
|
354c8cbdf294b2fcc142b9f3a30cb5017c6659b7
|
[] |
no_license
|
allyroo/marvel_project
|
4f5dde0a5e54d3c0525e5ebf43bddaa5d3789829
|
3533f937d8560fcc7e8ebecdc616f39f15375157
|
refs/heads/master
| 2023-06-09T00:15:22.507846
| 2021-06-28T05:19:00
| 2021-06-28T05:19:00
| 380,913,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
#!/Users/Allyse/Documents/coding_temple_rangers_63/week_5/marvel_inventory/marvel_env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from alembic.config import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"allyse.arucan@gmail.com"
] |
allyse.arucan@gmail.com
|
|
762aed1530a3b36da97232a519cf42dab6b4cfa0
|
75ef339fd43451036ea7e3540fe097d3c7df212c
|
/catchphrases_uni.py
|
9ed13ccc53d43a9236ddaef92b6c2d10c679ff69
|
[] |
no_license
|
snake48/PIPHRASE
|
a028ffef652c073fd1c9f4d4b9a82a3af587d135
|
42774bf35c4d02dede7adb081437755ebb244365
|
refs/heads/master
| 2020-05-07T06:03:53.899995
| 2015-09-06T18:01:59
| 2015-09-06T18:01:59
| 42,012,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39,322
|
py
|
import time,random
import unicornhat as uh
cp_boat = [
[[[255, 255, 0], [255, 255, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[255, 255, 0], [255, 255, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 255, 0], [0, 255, 0]], [[0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 255, 0], [0, 255, 0]]],
[[[255, 255, 0], [255, 255, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[255, 255, 0], [255, 255, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 255, 0], [0, 255, 0]], [[0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 255, 0], [0, 255, 0]]],
[[[255, 255, 0], [255, 255, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[255, 255, 0], [255, 255, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[255, 0, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 255, 0], [0, 255, 0]], [[0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 255, 0], [0, 255, 0]]],
[[[255, 255, 0], [255, 255, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[255, 255, 0], [255, 255, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[255, 0, 0], [255, 0, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[255, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 255, 0], [0, 255, 0]], [[0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 255, 0], [0, 255, 0]]],
[[[255, 255, 0], [255, 255, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[255, 255, 0], [255, 255, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[255, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[255, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[255, 0, 0], [255, 0, 0], [255, 0, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[255, 0, 0], [255, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 255, 0], [0, 255, 0]], [[0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 255, 0], [0, 255, 0]]],
[[[255, 255, 0], [255, 255, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[255, 255, 0], [255, 255, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [255, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[255, 255, 255], [255, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[255, 0, 0], [255, 0, 0], [255, 0, 0], [255, 0, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 0, 255], [255, 0, 0], [255, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 255, 0], [0, 255, 0]], [[0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 255, 0], [0, 255, 0]]],
[[[255, 255, 0], [255, 255, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[255, 255, 0], [255, 255, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [255, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [255, 255, 255], [255, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [255, 0, 0], [255, 0, 0], [255, 0, 0], [255, 0, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 0, 255], [0, 0, 255], [255, 0, 0], [255, 0, 0], [0, 0, 255], [0, 0, 255], [0, 255, 0], [0, 255, 0]], [[0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 255, 0], [0, 255, 0]]],
[[[255, 255, 0], [255, 255, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[255, 255, 0], [255, 255, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [255, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [255, 255, 255], [255, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [255, 0, 0], [255, 0, 0], [255, 0, 0], [255, 0, 0], [0, 255, 255], [0, 255, 255]], [[0, 0, 255], [0, 0, 255], [0, 0, 255], [255, 0, 0], [255, 0, 0], [0, 0, 255], [0, 255, 0], [0, 255, 0]], [[0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 255, 0], [0, 255, 0]]],
[[[255, 255, 0], [255, 255, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[255, 255, 0], [255, 255, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [255, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [255, 255, 255], [255, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [255, 0, 0], [255, 0, 0], [255, 0, 0], [255, 0, 0], [0, 255, 255]], [[0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [255, 0, 0], [255, 0, 0], [0, 255, 0], [0, 255, 0]], [[0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 255, 0], [0, 255, 0]]],
]
cp_weather = [
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 255, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 255, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 255, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 255, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 255, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 255, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 255, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 255, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 255, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 255, 255], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 255, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 255], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 255, 255], [0, 255, 0], [0, 255, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 255, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 255, 255], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 255, 255], [0, 255, 0], [0, 255, 255], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 255], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 255, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 255], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 255, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [255, 255, 0], [255, 255, 0]], [[0, 0, 0], [102, 0, 204], [102, 0, 204], [102, 0, 204], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 255, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
]
cp_cake = [
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[255, 255, 255], [255, 255, 255], [255, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[255, 128, 0], [255, 128, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[255, 128, 0], [255, 128, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[255, 255, 255], [255, 255, 255], [255, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[255, 128, 0], [255, 128, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[255, 128, 0], [255, 128, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0], [255, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0], [255, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0], [255, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 0]], [[255, 255, 255], [255, 255, 255], [255, 255, 255], [0, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 0]], [[255, 128, 0], [255, 128, 0], [255, 128, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0]], [[255, 128, 0], [255, 128, 0], [255, 128, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0]]],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0], [255, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0], [255, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0], [255, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 0]], [[255, 255, 255], [255, 255, 255], [255, 255, 255], [0, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 0]], [[255, 128, 0], [255, 128, 0], [255, 128, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0]], [[255, 128, 0], [255, 128, 0], [255, 128, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0]]],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0], [255, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0], [255, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0], [255, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 0]], [[255, 255, 255], [255, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 0]], [[255, 128, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0]], [[255, 128, 0], [255, 128, 0], [255, 128, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0]]],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0], [255, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0], [255, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 0]], [[255, 255, 255], [255, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 0]], [[255, 128, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0]], [[255, 128, 0], [255, 128, 0], [255, 128, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0]]],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0], [255, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0], [255, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 0]], [[255, 255, 255], [255, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 0]], [[255, 128, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0]], [[255, 128, 0], [255, 128, 0], [255, 128, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0]]],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0], [255, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0], [255, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [255, 255, 0], [255, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 255]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 0]], [[255, 255, 255], [255, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 255], [0, 0, 255], [0, 0, 255], [0, 0, 0]], [[255, 128, 0], [255, 128, 0], [0, 0, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0]], [[255, 128, 0], [255, 128, 0], [255, 128, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0], [255, 255, 0], [0, 0, 0]]],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
]
cp_twowrongs = [
[[[255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0]], [[0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0]], [[255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0]]],
[[[255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0]], [[0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0]], [[255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0]]],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0]], [[0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0]], [[255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0]]],
[[[255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0]], [[0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0], [0, 0, 0]], [[255, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 0, 0]]],
[[[0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 255], [0, 255, 255], [0, 255, 255], [0, 255, 255]], [[0, 0, 0], [0, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0]], [[0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0]], [[0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 255, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]],
]
CATCHPHRASES= [cp_boat,cp_cake,cp_twowrongs, cp_weather]
'''ANSWERS = dict ((name, eval(name)) for name in ['cp_weather','cp_boat','cp_twowrongs','cp_cake'])'''
ANSWERS = {"Two wrongs don't make a right": cp_twowrongs, "When the boat comes in":cp_boat,"have your cake and eat it":cp_cake,"under the weather":cp_weather}
'''uh.rotation(90)
def show_catchphrase():
picked = random.choice(ANSWERS.keys())
for i in range(1):
for x in ANSWERS[picked]:
uh.set_pixels(x)
uh.show()
time.sleep(0.25)
time.sleep(1)
return picked
show_catchphrase()'''
|
[
"jasper@Dads-Mac-mini.local"
] |
jasper@Dads-Mac-mini.local
|
520dfeb3d7c17d3b5bee718128491655c2cac4a8
|
79b13102627bcd0bcba2fd784fb0b1aeff74993e
|
/gapps/gspin/forms.py
|
4cb6e7514983201d32b3eca1b29e4dee9a7009af
|
[] |
no_license
|
gibeongideon/gspin_backend
|
c513869ae4cd1c51242fa38fd3024935fe3d4769
|
2333b5c34c24751796c0775ce2210ca6219e6554
|
refs/heads/master
| 2021-06-20T01:58:30.252945
| 2019-06-29T20:55:32
| 2019-06-29T20:55:32
| 194,445,074
| 0
| 0
| null | 2021-03-19T22:29:54
| 2019-06-29T20:16:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,192
|
py
|
from django import forms
from .models import Account, Balance, Bet, AccountTopUp, BetResults, GameResult, BetTime, CustomUser
class AccountForm(forms.ModelForm):
class Meta:
model = Account
fields = ['account_number', 'is_active', 'custom_user']
class BalanceForm(forms.ModelForm):
class Meta:
model = Balance
fields = ['balance', 'account']
class BetForm(forms.ModelForm):
class Meta:
model = Bet
fields = ['bet_amount', 'is_active', 'bet_choice', 'account', 'bet_time']
class AccountTopUpForm(forms.ModelForm):
class Meta:
model = AccountTopUp
fields = ['topup_amount', 'account_to_topup']
class BetResultsForm(forms.ModelForm):
class Meta:
model = BetResults
fields = ['name', 'bet_result']
class GameResultForm(forms.ModelForm):
class Meta:
model = GameResult
fields = ['game_results', 'bet_time_result']
class BetTimeForm(forms.ModelForm):
class Meta:
model = BetTime
fields = '__all__'
class CustomUserForm(forms.ModelForm):
class Meta:
model = CustomUser
fields = ['first_name', 'last_name', 'custom_user']
|
[
"kipngeno.gibeon@gmail.com"
] |
kipngeno.gibeon@gmail.com
|
77e238f1a0830212c73060732d78d2362591aba1
|
152683f2e06b417b561400e3a1229426268dbdec
|
/nn_semisupervised_resnet_18.py
|
3ed619eef504d6ec1f5adff9e6cfdb7fb48c9e54
|
[] |
no_license
|
bigsnarfdude/icedChainsaw
|
593159a5d013a167549569b4d92d0ad2a8da81a3
|
2158b8eaf0ccc870e444dbb74752dfd5b402e20f
|
refs/heads/master
| 2021-08-23T14:24:40.676524
| 2017-12-05T07:11:14
| 2017-12-05T07:11:14
| 113,145,376
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,877
|
py
|
import os
import sys
from itertools import chain
import numpy as np
import pandas as pd
import torchvision.models
import torch.nn.functional as F
import torch.optim as optim
from torch import nn
from torch.utils.data import DataLoader
from torchsample.callbacks import CSVLogger, LearningRateScheduler
from callbacks import ModelCheckpoint, SemiSupervisedUpdater
import sklearn.model_selection
import paths
import labels
import transforms
from datasets import KaggleAmazonUnsupervisedDataset, KaggleAmazonSemiSupervisedDataset, KaggleAmazonJPGDataset, mlb
from ModuleTrainer import ModuleTrainer
name = os.path.basename(sys.argv[0])[:-3]
def generate_model():
class MyModel(nn.Module):
def __init__(self, pretrained_model):
super(MyModel, self).__init__()
self.pretrained_model = pretrained_model
self.layer1 = pretrained_model.layer1
self.layer2 = pretrained_model.layer2
self.layer3 = pretrained_model.layer3
self.layer4 = pretrained_model.layer4
pretrained_model.avgpool = nn.AvgPool2d(8)
classifier = [
nn.Linear(pretrained_model.fc.in_features, 17),
]
self.classifier = nn.Sequential(*classifier)
pretrained_model.fc = self.classifier
def forward(self, x):
return self.pretrained_model(x)
return MyModel(torchvision.models.resnet18(pretrained=True))
random_state = 1
labels_df = labels.get_labels_df()
unsupervised_dataframe = pd.read_csv(paths.submissions + 'SOTA')
kf = sklearn.model_selection.KFold(n_splits=5, shuffle=True, random_state=random_state)
split = supervised_split = kf.split(labels_df)
unsupervised_split = kf.split(unsupervised_dataframe)
def train_net(train, val, unsupervised, model, name):
unsupervised_initialization = mlb.transform(unsupervised['tags'].str.split()).astype(np.float32)
unsupervised_samples = unsupervised['image_name'].as_matrix()
unsupervised_initialization = unsupervised_initialization[:len(unsupervised_initialization)//2*3]
unsupervised_samples = unsupervised_samples[:len(unsupervised_samples)//2*3]
transformations_train = transforms.apply_chain([
transforms.to_float,
transforms.augment_color(0.1),
transforms.random_fliplr(),
transforms.random_flipud(),
transforms.augment(),
torchvision.transforms.ToTensor()
])
transformations_val = transforms.apply_chain([
transforms.to_float,
torchvision.transforms.ToTensor()
])
dset_train_unsupervised = KaggleAmazonUnsupervisedDataset(
unsupervised_samples,
paths.test_jpg,
'.jpg',
transformations_train,
transformations_val,
unsupervised_initialization
)
dset_train_supervised = KaggleAmazonJPGDataset(train, paths.train_jpg, transformations_train, divide=False)
dset_train = KaggleAmazonSemiSupervisedDataset(dset_train_supervised, dset_train_unsupervised, None, indices=False)
train_loader = DataLoader(dset_train,
batch_size=128,
shuffle=True,
num_workers=10,
pin_memory=True)
dset_val = KaggleAmazonJPGDataset(val, paths.train_jpg, transformations_val, divide=False)
val_loader = DataLoader(dset_val,
batch_size=128,
num_workers=10,
pin_memory=True)
ignored_params = list(map(id, chain(
model.classifier.parameters(),
model.layer1.parameters(),
model.layer2.parameters(),
model.layer3.parameters(),
model.layer4.parameters()
)))
base_params = filter(lambda p: id(p) not in ignored_params,
model.parameters())
optimizer = optim.Adam([
{'params': base_params},
{'params': model.layer1.parameters()},
{'params': model.layer2.parameters()},
{'params': model.layer3.parameters()},
{'params': model.layer4.parameters()},
{'params': model.classifier.parameters()}
], lr=0, weight_decay=0.0005)
trainer = ModuleTrainer(model)
def schedule(current_epoch, current_lrs, **logs):
lrs = [1e-3, 1e-4, 0.5e-4, 1e-5, 0.5e-5]
epochs = [0, 1, 8, 12, 20]
for lr, epoch in zip(lrs, epochs):
if current_epoch >= epoch:
current_lrs[5] = lr
if current_epoch >= 2:
current_lrs[4] = lr * 1.0
current_lrs[3] = lr * 1.0
current_lrs[2] = lr * 1.0
current_lrs[1] = lr * 0.1
current_lrs[0] = lr * 0.05
return current_lrs
trainer.set_callbacks([
ModelCheckpoint(
paths.models,
name,
save_best_only=False,
saving_strategy=lambda epoch: True
),
CSVLogger(paths.logs + name),
LearningRateScheduler(schedule),
SemiSupervisedUpdater(trainer, dset_train_unsupervised, start_epoch=10, momentum=0.25)
])
trainer.compile(loss=nn.BCELoss(),
optimizer=optimizer)
trainer.fit_loader(train_loader,
val_loader,
nb_epoch=35,
verbose=1,
cuda_device=0)
if __name__ == "__main__":
for i, ((train_idx, val_idx), (train_idx_unsupervised, val_idx_unsupervised)) in enumerate(zip(supervised_split, unsupervised_split)):
name = os.path.basename(sys.argv[0])[:-3] + '-split_' + str(i)
train_net(
labels_df.ix[train_idx],
labels_df.ix[val_idx],
unsupervised_dataframe.ix[val_idx_unsupervised],
generate_model(),
name
)
|
[
"ohprecio@gmail.com"
] |
ohprecio@gmail.com
|
0c10142f92ee52bdd943118a422ce3fba97826ec
|
bd00ffb02dd4470447a6ba89c01c5bf57d3eb28f
|
/섹션 4/8. 침몰하는 타이타닉/AA.py
|
92be2cc4919c71837eb15e99390a00525f00f4db
|
[] |
no_license
|
yeong95/python_algorithm_inflearn
|
24930795882e34c31eb4d5e979533c3a578a3d09
|
d500f670253c56a8e62e779dde2e579a57d62266
|
refs/heads/master
| 2023-02-28T06:52:22.886154
| 2021-01-22T15:38:14
| 2021-01-22T15:38:14
| 326,430,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
import sys
import os
os.chdir(r'C:\Users\CHOYEONGKYU\Desktop\파이썬 알고리즘 문제풀이(코딩테스트 대비)\섹션 4\8. 침몰하는 타이타닉')
sys.stdin = open('input.txt', 'rt')
n,m = map(int, input().split())
a = list(map(int, input().split()))
a.sort(reverse=True)
cnt=0
while a!=[]:
pop = a.pop(0)
tmp=len(a)
for i in range(len(a)):
if pop+a[i] <= m:
cnt+=1
del a[i]
break
if len(a) == tmp:
cnt+=1
print(cnt)
|
[
"cykpig95@gmail.com"
] |
cykpig95@gmail.com
|
2cb43e485765050a7cd4e3a5fecc44b3c7d6801f
|
f2f8a2ba384dbe68861151cc15ab14c72774bb16
|
/evaluate.py
|
e70f24cc371808dc34147c117f9661afa0329a74
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
zphang/bert
|
06233c65298a97475bd4c0e4a0d649dcc5da8f48
|
12b4afc019d6da5e0c153608c3fd553c5e7466b1
|
refs/heads/master
| 2020-04-05T01:17:18.498275
| 2018-11-14T20:47:26
| 2018-11-14T20:47:26
| 156,430,554
| 0
| 0
| null | 2018-11-06T18:40:47
| 2018-11-06T18:40:46
| null |
UTF-8
|
Python
| false
| false
| 4,093
|
py
|
import argparse
import json
import pandas as pd
from sklearn.metrics import matthews_corrcoef, f1_score
from scipy.stats import pearsonr, spearmanr
import run_classifier
PROCESSORS = {
"cola": run_classifier.ColaProcessor,
"sst": run_classifier.SstProcessor,
"mrpc": run_classifier.MrpcProcessor,
"stsb": run_classifier.StsbProcessor,
"qqp": run_classifier.QqpProcessor,
"mnli": run_classifier.MnliProcessor,
"qnli": run_classifier.QnliProcessor,
"rte": run_classifier.RteProcessor,
"xnli": run_classifier.XnliProcessor,
"snli": run_classifier.SnliProcessor,
"bcs": run_classifier.BcsProcessor,
}
OUTPUT_MODES = {
"cola": "classification",
"sst": "classification",
"mrpc": "classification",
"stsb": "regression",
"qqp": "classification",
"mnli": "classification",
"qnli": "classification",
"rte": "classification",
"xnli": "classification",
"snli": "classification",
"bcs": "classification",
}
def simple_accuracy(pred_srs, label_srs):
return (pred_srs == label_srs).mean()
def acc_and_f1(pred_srs, label_srs):
acc = simple_accuracy(pred_srs, label_srs)
f1 = f1_score(y_true=label_srs, y_pred=pred_srs)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(pred_srs, label_srs):
pearson_corr = pearsonr(pred_srs, label_srs)[0]
spearman_corr = spearmanr(pred_srs, label_srs)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def compute_metrics(task_name, pred_srs, label_srs):
assert len(pred_srs) == len(label_srs)
if task_name == "cola":
return {"mcc": matthews_corrcoef(label_srs, pred_srs)}
elif task_name == "sst":
return {"acc": simple_accuracy(pred_srs, label_srs)}
elif task_name == "mrpc":
return acc_and_f1(pred_srs, label_srs)
elif task_name == "stsb":
return pearson_and_spearman(pred_srs, label_srs)
elif task_name == "qqp":
return acc_and_f1(pred_srs, label_srs)
elif task_name == "mnli":
return {"acc": simple_accuracy(pred_srs, label_srs)}
elif task_name == "qnli":
return {"acc": simple_accuracy(pred_srs, label_srs)}
elif task_name == "rte":
return {"acc": simple_accuracy(pred_srs, label_srs)}
else:
raise KeyError(task_name)
def load_labels(task_name, data_dir):
processor = PROCESSORS[task_name]()
examples = processor.get_dev_examples(data_dir)
label2idx = {label: num for (num, label) in enumerate(processor.get_labels())}
label_srs = pd.Series([label2idx[example.label] for example in examples])
return label_srs
def load_preds(task_name, pred_file_path):
pred_df = pd.read_csv(pred_file_path, header=None, sep="\t")
output_mode = OUTPUT_MODES[task_name]
if output_mode == "classification":
pred_srs = pred_df.idxmax(axis=1)
elif output_mode == "regression":
pred_srs = pred_df[:, 0]
else:
raise KeyError(output_mode)
return pred_srs
def compute_metrics_from_paths(task_name, pred_file_path, task_data_dir):
pred_srs = load_preds(task_name, pred_file_path)
label_srs = load_labels(task_name, task_data_dir)
return compute_metrics(task_name, pred_srs, label_srs)
def main():
parser = argparse.ArgumentParser(description='evaluation')
parser.add_argument('--task-name', required=True)
parser.add_argument('--pred-file-path', required=True)
parser.add_argument('--task-data-dir', required=True)
parser.add_argument('--no-print', action="store_true")
parser.add_argument('--output-path', required=False, default=None)
args = parser.parse_args()
metrics = compute_metrics_from_paths(args.task_name, args.pred_file_path, args.task_data_dir)
if not args.no_print:
print(metrics)
if args.output_path is not None:
with open(args.output_path, "w") as f:
f.write(json.dumps(metrics, indent=2))
if __name__ == "__main__":
main()
|
[
"email@jasonphang.com"
] |
email@jasonphang.com
|
a831882f7ba810e8c2bcbb7e4e321ee9f6afae0a
|
b68f83bfa9df1a9d1f5a5beda60e56742c2b9308
|
/dev/backend/database.py
|
f5908982106106bdb356fda138f552f82cdbdf05
|
[] |
no_license
|
amolsurve20/gradscout
|
1a6be2225735473e3e8709b40430337f51870903
|
d424c3ea675f994ff3a260ff81e84c92a3a28b9c
|
refs/heads/master
| 2020-11-29T14:51:41.242670
| 2017-04-06T01:33:20
| 2017-04-06T01:33:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,187
|
py
|
#!/usr/bin/env python
"""Firebase Database Interaction Module."""
from firebase import firebase
class Firebase(object):
"""Class for Accessing Firebase Database."""
def __init__(self):
"""Initialize Firebase Object for Database Access."""
self.firebase = firebase.FirebaseApplication(
'https://gradscout-40fed.firebaseio.com/', None)
def get_detailed_program(self, program_id):
"""Get the Detailed Program Details for a given Program ID.
Args:
program_id (int): Unique ID for Program
Returns:
program_dict (dict): Python Dict of Complete Program Details
"""
endpoint = "/programs/" + str(program_id)
result = self.firebase.get(endpoint, None)
return result
def get_program_count(self):
"""Get the total count of programs available in the database.
Returns:
count (int): Total count of the programs in the database
"""
endpoint = "/programs/"
result = self.firebase.get(endpoint, None)
count = len(result)
return count
def get_program_research(self, program_id):
"""Get the Program Research Details for a given Program ID.
Args:
program_id (int): Unique ID for Program
Returns:
research_dict (dict): Python Dict of Research Details
"""
endpoint = "/programs/" + str(program_id) + '/research'
result = self.firebase.get(endpoint, None)
return result
def get_program_admission_rate(self, program_id):
"""Get the Admission Rate for a given Program ID.
Args:
program_id (int): Unique ID for Program
Returns:
admission_rate (int): Admission Rate in Percentage
"""
endpoint = "/programs/" + str(program_id) + '/admission_rate'
result = self.firebase.get(endpoint, None)
return result
def get_program_fees(self, program_id):
"""Get the Program Fees for a given Program ID.
Args:
program_id (int): Unique ID for Program
Returns:
fees_dict (dict): Python Dict of Program Fees
"""
endpoint = "/programs/" + str(program_id) + '/fees'
result = self.firebase.get(endpoint, None)
return result
def get_program_acad(self, program_id):
"""Get the Academic Requirements for a given Program ID.
Args:
program_id (int): Unique ID for Program
Returns:
acad_dict (dict): Python Dict of Academic Requirement Details
"""
endpoint = "/programs/" + str(program_id) + '/academic_requirements'
result = self.firebase.get(endpoint, None)
return result
def get_program_living(self, program_id):
"""Get the Living Expenses for a given Program ID.
Args:
program_id (int): Unique ID for Program
Returns:
program_dict (dict): Python Dict of Living Expense Details
"""
endpoint = "/programs/" + str(program_id) + '/living_expenditure'
result = self.firebase.get(endpoint, None)
return result
def get_program_location(self, program_id):
"""Get the Location Details of a given Program ID.
Returns:
location_dict (dict): Python Dict of Location Details
"""
endpoint = "/programs/" + str(program_id) + '/location'
result = self.firebase.get(endpoint, None)
count = len(result)
return count
def get_program_ownership(self, program_id):
"""Get the Ownership Details for a given Program ID.
Args:
program_id (int): Unique ID for Program
Returns:
ownership_dict (dict): Python Dict of Ownership Details
"""
endpoint = "/programs/" + str(program_id) + '/ownership'
result = self.firebase.get(endpoint, None)
return result
def get_program_details(self, program_id):
"""Get the Program Details for a given Program ID.
Args:
program_id (int): Unique ID for Program
Returns:
program_dict (dict): Python Dict of Program Details
"""
endpoint = "/programs/" + str(program_id) + '/program'
result = self.firebase.get(endpoint, None)
return result
def get_program_university(self, program_id):
"""Get the University Details for a given Program ID.
Args:
program_id (int): Unique ID for Program
Returns:
program_dict (dict): Python Dict of University Details
"""
endpoint = "/programs/" + str(program_id) + '/university'
result = self.firebase.get(endpoint, None)
return result
def get_program_rank(self, program_id):
"""Get the Rank Details for a given Program ID.
Args:
program_id (int): Unique ID for Program
Returns:
program_dict (dict): Python Dict of Rank Details
"""
endpoint = "/programs/" + str(program_id) + '/rank'
result = self.firebase.get(endpoint, None)
return result
|
[
"b.s.sanathkumar@gmail.com"
] |
b.s.sanathkumar@gmail.com
|
1921b628faebff60e71ddc2dd02c19109b656a35
|
63cc32b3eacb6695449e3568472d33a1605b8e2d
|
/liga_asobal/migrations/0002_auto_20170129_1609.py
|
3bb0470e6d0b04adc4ddc9d6fa79753faffe7dad
|
[] |
no_license
|
i32gamad/liga_asobal
|
5646ba782d25265084aa40d7b468a4714afae797
|
b51c8a8902d78d8b69ac521e912dd8fe0aed97a8
|
refs/heads/master
| 2021-01-23T07:51:12.676304
| 2017-01-31T12:15:00
| 2017-01-31T12:15:00
| 80,517,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-01-29 16:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('liga_asobal', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='equipo',
name='escudo',
field=models.ImageField(upload_to='static/img/'),
),
]
|
[
"i32gamad@uco.es"
] |
i32gamad@uco.es
|
a9b548c471b2e055a350b837dc89d441271f681e
|
3597ecf8a014dbd6f7d998ab59919a94aff8011d
|
/api-web/src/www/application/modules/page/components.py
|
634a4e4b5fd6f05c5af1356a2218dfbb8e41d334
|
[] |
no_license
|
duytran92-cse/nas-genomebrowser
|
f42b8ccbb7c5245bde4e52a0feed393f4b5f6bf1
|
d0240ad5edc9cfa8e7f89db52090d7d733d2bb8a
|
refs/heads/master
| 2022-10-24T05:26:01.760241
| 2020-06-14T19:01:35
| 2020-06-14T19:01:35
| 272,264,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,554
|
py
|
from application.models import *
class PageHelper(object):
def load_associated_publications(self, id):
page = Page.objects.get(pk=id)
# Get associtaed pubs
publications = []
rows = PageAssociatedPublication.objects.filter(page_id=page.id).all()
if rows:
for r in rows:
publications.append({
'id': r.id,
'pmid': r.pmid,
'doi': r.doi,
'pmc': r.pmc,
'title': r.title,
'authors': r.authors,
'journal': r.journal
})
return publications
def load_associated_genes(self, id):
page = Page.objects.get(pk=id)
# Get associated genes
genes = []
rows = PageAssociatedGene.objects.filter(page_id=page.id).all()
if rows:
for r in rows:
genes.append({
'id': r.id,
'gene_name': r.gene_name,
})
return genes
def load_associated_diseases(self, id):
page = Page.objects.get(pk=id)
# Get associated diseases
diseases = []
rows = PageAssociatedDisease.objects.filter(page_id=page.id).all()
if rows:
for r in rows:
diseases.append({
'id': r.id,
'disease_name': r.disease_name,
})
return diseases
|
[
"thanh.tran@etudiant.univ-lr.fr"
] |
thanh.tran@etudiant.univ-lr.fr
|
12be76ee075152e9b002246390f3443fa75f7f06
|
504d4814971735c3bc84d577521404c4616bca88
|
/multithreading/queue_1.py
|
226192454b0c20bc89fe9cdfcd5e47adb5af1ef7
|
[] |
no_license
|
caaden/python
|
203a6fec07b3993345376c2ab60246765e3d7f32
|
cf89a82331d14abbb376b5d199d546042e1f4f4b
|
refs/heads/master
| 2023-03-17T15:08:28.071076
| 2021-02-28T23:39:35
| 2021-02-28T23:39:35
| 267,114,193
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,368
|
py
|
#!/usr/bin/python3
import queue
import threading
import time
exitFlag = 0
class myThread (threading.Thread):
def __init__(self, threadID, name, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.q = q
def run(self):
print ("Starting " + self.name)
process_data(self.name, self.q)
print ("Exiting " + self.name)
def process_data(threadName, q):
while not exitFlag:
queueLock.acquire()
if not workQueue.empty():
data = q.get()
queueLock.release()
print ("%s processing %s" % (threadName, data))
else:
queueLock.release()
time.sleep(1)
threadList = ["Thread-1", "Thread-2", "Thread-3"]
nameList = ["One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight","Nine"]
queueLock = threading.Lock()
workQueue = queue.Queue(10)
threads = []
threadID = 1
# Create new threads
for tName in threadList:
thread = myThread(threadID, tName, workQueue)
thread.start()
threads.append(thread)
threadID += 1
# Fill the queue
queueLock.acquire()
for word in nameList:
workQueue.put(word)
queueLock.release()
# Wait for queue to empty
while not workQueue.empty():
pass
# Notify threads it's time to exit
exitFlag = 1
# Wait for all threads to complete
for t in threads:
t.join()
print ("Exiting Main Thread")
|
[
"caden@fringeai.com"
] |
caden@fringeai.com
|
194b6313d4b7c20adfb20c53de2ca19ab668783f
|
91eba4efb54b28722ccbf7f8af8ee37ac4d57fd5
|
/transformer.py
|
d0f92ff2a279eb59d84c87efb230aba2f6c1cd42
|
[] |
no_license
|
h2012c106/create_table_transformer
|
c4537fe80c9a084665a788ba1b98cd8bd1a1c78c
|
7e0e92dee362ba3616f64c8736aaff5f11323b91
|
refs/heads/master
| 2020-12-02T15:18:47.484966
| 2019-12-31T08:10:07
| 2019-12-31T08:10:07
| 231,046,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,617
|
py
|
# -*- coding: UTF-8 -*-
import os
import jpype
import generator
import collections
def _get_jars_path(lib_path):
res = []
for file_name in os.listdir(lib_path):
if os.path.splitext(file_name)[1] == '.jar':
res.append(lib_path + file_name)
return [os.path.abspath(path) for path in res]
class JavaEnv:
def __init__(self):
self.java_lib_path = './lib/'
def _pre_transform(self):
jvm_path = jpype.getDefaultJVMPath()
jars_path = _get_jars_path(self.java_lib_path)
jvm_arg = "-Djava.class.path={jars_path}".format(jars_path=":".join(jars_path))
if jpype.isJVMStarted():
jpype.shutdownJVM()
jpype.startJVM(jvm_path, jvm_arg, convertStrings=True)
def _set_init_class(self):
assert False, 'No inherit!'
def _transform(self, create_table):
assert False, 'No inherit!'
def _validate(self, create):
assert False, 'No inherit!'
def _post_transform(self):
jpype.shutdownJVM()
def main(self, create_table_list):
self._pre_transform()
self._set_init_class()
try:
res = []
for create_table in create_table_list:
create, sql = self._transform(create_table)
res.append((sql, self._validate(create)))
return res
finally:
self._post_transform()
class Transformer(JavaEnv):
def __init__(self, config_file):
JavaEnv.__init__(self)
self._config_file = config_file
def _set_init_class(self):
self._statement_parser_class = jpype.JClass('com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser')
self._row_class = jpype.JClass('com.alibaba.druid.sql.ast.statement.SQLColumnDefinition')
self._key_class = jpype.JClass('com.alibaba.druid.sql.dialect.mysql.ast.MySqlKey')
self._row_generator = generator.RowGenerator(self._config_file)
self._key_generator = generator.KeyGenerator(self._config_file)
def _transform(self, create_table):
parser = self._statement_parser_class(create_table)
create = parser.parseCreateTable()
create_detail_list = create.getTableElementList()
# deal with key and row
row_list = []
key_list = []
for create_detail in create_detail_list:
if isinstance(create_detail, self._row_class) \
and create_detail.getNameAsString() not in self._row_generator.get_deprecated_rows():
row_list.append(create_detail)
elif isinstance(create_detail, self._key_class) and self._key_generator.use_old_key():
key_list.append(create_detail)
row_list += self._row_generator.get_additional_rows(create)
key_list += self._key_generator.get_new_keys(create)
# deal with auto increment
self._row_generator.disable_autoincrement(create, row_list)
# deal with pk
key_list = self._key_generator.primary_to_unique(key_list)
create_detail_list.clear()
for row in row_list:
create_detail_list.add(row)
for key in key_list:
create_detail_list.add(key)
# deal with table drop
# choosing either of two generator is okay
if self._row_generator.need_overwrite_table():
sql = u'DROP TABLE IF EXISTS {table_name};\n{create_sql};'.format(table_name=create.getName().toString(),
create_sql=create.toString())
else:
# not my spell error
create.setIfNotExiists(True)
sql = u'{create_sql};'.format(create_sql=create.toString())
return create, sql
def _find_duplicate_list(self, src_list):
counter = dict(collections.Counter(src_list))
return [key for key, value in counter.items() if value > 1]
def _validate_row_duplicate(self, create):
create_detail_list = create.getTableElementList()
row_list = [detail for detail in create_detail_list if isinstance(detail, self._row_class)]
row_name_duplicate_list = self._find_duplicate_list([row.getNameAsString() for row in row_list])
if len(row_name_duplicate_list) == 0:
return True, None
else:
return False, 'duplicate row name: {li}'.format(li=row_name_duplicate_list)
def _validate_key_duplicate(self, create):
create_detail_list = create.getTableElementList()
key_list = [detail for detail in create_detail_list if isinstance(detail, self._key_class)]
key_name_duplicate_list = self._find_duplicate_list(
[key.getName().toString() for key in key_list if key.getName() is not None])
if len(key_name_duplicate_list) == 0:
return True, None
else:
return False, 'duplicate key name: {li}'.format(li=key_name_duplicate_list)
def _validate_row_in_key(self, create):
create_detail_list = create.getTableElementList()
row_name_set = set()
for detail in create_detail_list:
if isinstance(detail, self._row_class):
row_name_set.add(detail.getNameAsString())
err_key_dict = {}
for detail in create_detail_list:
if isinstance(detail, self._key_class):
key_name = detail.getName().toString() if detail.getName() is not None else 'PRIMARY KEY'
relative_row_set = set([row.getExpr().toString() for row in detail.getColumns()])
additional_row_set = relative_row_set - row_name_set
if len(additional_row_set) > 0:
err_key_dict[key_name] = list(additional_row_set)
if len(err_key_dict) == 0:
return True, None
else:
return False, ', '.join(
['key {key} contains nonexistent row: {row_list}'.format(key=key, row_list=row_list) for key, row_list
in
err_key_dict.items()])
def _validate(self, create):
validate_list = [self._validate_row_duplicate, self._validate_key_duplicate, self._validate_row_in_key]
success = True
msg = []
for validate in validate_list:
tmp_success, tmp_msg = validate(create)
success &= tmp_success
if not tmp_success:
msg.append(tmp_msg)
return success, '{table} error: {msg}'.format(table=create.getName().toString(),
msg=' and '.join(msg)), create.getName().toString()
|
[
"hanxianghuang@xiaohongshu.com"
] |
hanxianghuang@xiaohongshu.com
|
06428f4907937b0f95ba7ebdd85921a3835fb9a1
|
d733f3d915dd109f233f49240932e86c5f58b343
|
/Day23/Day23-Part1.py
|
1cee62e61b7e7c59178ed7913120e517eeb774e2
|
[] |
no_license
|
MichaelShoemaker/AdventOfCode-2020
|
90942c55fe91dd025a3dcd5db284aa39ef629f7f
|
311ef62b69c8da820258ca8bf47ed93f406be074
|
refs/heads/main
| 2023-02-03T19:48:45.512523
| 2020-12-23T20:36:37
| 2020-12-23T20:36:37
| 318,268,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,154
|
py
|
import time
import typing
def make_cups(file):
cups = open(file,'r').read()
return [int(c) for c in cups if c != '\n']
def play(cups: list, turns: int) -> list:
counter = 0
for x in range(0,turns):
## print(f"-- move {x+1} --")
## print(f"cups: {cups}")
#current cup
if counter == 0:
current_cup = cups[0]
elif (cups.index(current_cup) + 1) >= len(cups):
current_cup = cups[(cups.index(last_cup)+1)%len(cups)]
else:
current_cup = cups[cups.index(last_cup)+1]
## print(f"current cup: {current_cup}")
#pick up next three into a list
if cups.index(current_cup) == len(cups)-1:
pick_up = cups[0:3]
elif cups.index(current_cup) == len(cups)-2:
pick_up = [cups[-1]] + cups[0:2]
elif cups.index(current_cup) == len(cups)-3:
pick_up = cups[-2:] + [cups[0]]
else:
first = cups.index(current_cup)+1
last = first+3
pick_up = cups[first:last]
## print(f"pick up: {pick_up}")
#Remove the picked up cups from the list
for i in pick_up:
cups.remove(i)
next_cup = current_cup -1
if next_cup == 0:
next_cup = max(cups)
#Make sure it isn't a picked up cup
while next_cup in pick_up:
lowest = min(cups)
highest = max(cups)
next_cup -= 1
if next_cup < lowest:
next_cup = highest
break
## print(f"destination: {next_cup}")
#Get the index of the next cup
index = cups.index(next_cup)
for i in range(1,4):
if index +1 > len(cups):
cups.insert(index+i % len(cups)-1, pick_up[i-1])
else:
cups.insert(index+i,pick_up[i-1])
## print(cups)
#time.sleep(1)
last_cup = current_cup
counter += 1
return cups
if __name__=='__main__':
print(play(make_cups('input.txt'), 100))
|
[
"noreply@github.com"
] |
MichaelShoemaker.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.