blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a2ebd1a75d40d5243131985f9e179274ccf490e4 | e3efa07f9d804b7cb65ff79034f5565953f369df | /day12.py | c7105577cb87ead3b6059b6f7b882ff7dfdab838 | [] | no_license | fursovia/adventofcode2020 | decb277b7eedbbf5229e8c2d80dfe74bede4d234 | 8d65174fc6908736cc5609ea3115eda9d9586f63 | refs/heads/main | 2023-01-30T21:35:17.927959 | 2020-12-13T17:33:55 | 2020-12-13T17:33:55 | 317,532,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,974 | py | from dataclasses import dataclass
from typing import List, Tuple
from collections import OrderedDict
RAW = """F10
N3
F7
R90
F11"""
CARDINAL_MAPPING = OrderedDict(
{
"N": (0, +1),
"E": (-1, 0),
"S": (0, -1),
"W": (+1, 0),
}
)
DIRECTIONS = list(CARDINAL_MAPPING.values())
@dataclass
class Navigation:
actions: List[str]
current_direction: Tuple[int, int] = (-1, 0)
current_position: Tuple[int, int] = (0, 0)
waypoint: Tuple[int, int] = (-10, 1)
def move(self):
for action in self.actions:
value = int(action[1:])
if action.startswith("R") or action.startswith("L"):
self.change_direction(action)
continue
elif action.startswith("F"):
direction = self.current_direction
else:
direction = CARDINAL_MAPPING[action[0]]
to_step = tuple(di * value for di in direction)
self.current_position = tuple(map(sum, zip(self.current_position, to_step)))
def move2(self):
for action in self.actions:
value = int(action[1:])
if action.startswith("F"):
to_step = tuple(di * value for di in self.waypoint)
self.current_position = tuple(map(sum, zip(self.current_position, to_step)))
else:
self.change_waypoint_position(action)
def change_waypoint_position(self, action: str):
value = int(action[1:])
if action.startswith("R"):
for _ in range(value // 90):
self.waypoint = -self.waypoint[1], self.waypoint[0]
elif action.startswith("L"):
for _ in range(value // 90):
self.waypoint = self.waypoint[1], -self.waypoint[0]
else:
direction = CARDINAL_MAPPING[action[0]]
to_step = tuple(di * value for di in direction)
self.waypoint = tuple(map(sum, zip(self.waypoint, to_step)))
def change_direction(self, action: str):
turn, degrees = action[0], int(action[1:])
assert turn == "L" or turn == "R"
num_rotations = int(degrees / 90)
if turn == "L":
num_rotations = -num_rotations
current_index = DIRECTIONS.index(self.current_direction)
self.current_direction = DIRECTIONS[(current_index + num_rotations) % 4]
def get_distance(self) -> int:
return abs(self.current_position[0]) + abs(self.current_position[1])
navigation = Navigation(RAW.split("\n"))
navigation.move()
assert navigation.get_distance() == 25
navigation = Navigation(RAW.split("\n"))
navigation.move2()
assert navigation.get_distance() == 286
with open("data/day12.txt") as f:
data = f.read().split("\n")
navigation = Navigation(data)
navigation.move()
distance = navigation.get_distance()
print(distance)
navigation = Navigation(data)
navigation.move2()
distance = navigation.get_distance()
print(distance)
| [
"fursov.ia@gmail.com"
] | fursov.ia@gmail.com |
6433092cbee060d537b5cb9919a76a1ec7c5ab85 | 683b73e0c95c755a08e019529aed3ff1a8eb30f8 | /machina/apps/forum_conversation/forum_attachments/admin.py | de1995638c922ddee9447fdc8ec8937ae0ebd484 | [
"BSD-3-Clause"
] | permissive | DrJackilD/django-machina | b3a7be9da22afd457162e0f5a147a7ed5802ade4 | 76858921f2cd247f3c1faf4dc0d9a85ea99be3e1 | refs/heads/master | 2020-12-26T08:19:09.838794 | 2016-03-11T03:55:25 | 2016-03-11T03:55:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | # -*- coding: utf-8 -*-
# Standard library imports
# Third party imports
from django.contrib import admin
# Local application / specific library imports
from machina.core.db.models import get_model
Attachment = get_model('forum_attachments', 'Attachment')
class AttachmentAdmin(admin.ModelAdmin):
list_display = ('id', 'post', 'comment', 'file', )
list_display_links = ('id', 'post', 'comment', )
raw_id_fields = ('post', )
admin.site.register(Attachment, AttachmentAdmin)
| [
"morgan.aubert@zoho.com"
] | morgan.aubert@zoho.com |
bdcc367d50d850b9415469d0b80cd63c73ec7513 | 3a6a211ea0d32405497fbd6486c490bb147e25f9 | /third_party/webtest/webtest/http.py | 890ef96ff90e6543783dd70d2c2b7b0768054a2f | [
"BSD-3-Clause",
"MIT"
] | permissive | catapult-project/catapult | e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0 | 53102de187a48ac2cfc241fef54dcbc29c453a8e | refs/heads/main | 2021-05-25T07:37:22.832505 | 2021-05-24T08:01:49 | 2021-05-25T06:07:38 | 33,947,548 | 2,032 | 742 | BSD-3-Clause | 2022-08-26T16:01:18 | 2015-04-14T17:49:05 | HTML | UTF-8 | Python | false | false | 4,240 | py | # -*- coding: utf-8 -*-
"""
This module contains some helpers to deal with the real http
world.
"""
import threading
import logging
import select
import socket
import time
import os
import six
import webob
from six.moves import http_client
from waitress.server import TcpWSGIServer
def get_free_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
ip, port = s.getsockname()
s.close()
ip = os.environ.get('WEBTEST_SERVER_BIND', '127.0.0.1')
return ip, port
def check_server(host, port, path_info='/', timeout=3, retries=30):
"""Perform a request until the server reply"""
if retries < 0:
return 0
time.sleep(.3)
for i in range(retries):
try:
conn = http_client.HTTPConnection(host, int(port), timeout=timeout)
conn.request('GET', path_info)
res = conn.getresponse()
return res.status
except (socket.error, http_client.HTTPException):
time.sleep(.3)
return 0
class StopableWSGIServer(TcpWSGIServer):
"""StopableWSGIServer is a TcpWSGIServer which run in a separated thread.
This allow to use tools like casperjs or selenium.
Server instance have an ``application_url`` attribute formated with the
server host and port.
"""
was_shutdown = False
def __init__(self, application, *args, **kwargs):
super(StopableWSGIServer, self).__init__(self.wrapper, *args, **kwargs)
self.runner = None
self.test_app = application
self.application_url = 'http://%s:%s/' % (self.adj.host, self.adj.port)
def wrapper(self, environ, start_response):
"""Wrap the wsgi application to override some path:
``/__application__``: allow to ping the server.
``/__file__?__file__={path}``: serve the file found at ``path``
"""
if '__file__' in environ['PATH_INFO']:
req = webob.Request(environ)
resp = webob.Response()
resp.content_type = 'text/html; charset=UTF-8'
filename = req.params.get('__file__')
if os.path.isfile(filename):
body = open(filename, 'rb').read()
body = body.replace(six.b('http://localhost/'),
six.b('http://%s/' % req.host))
resp.body = body
else:
resp.status = '404 Not Found'
return resp(environ, start_response)
elif '__application__' in environ['PATH_INFO']:
return webob.Response('server started')(environ, start_response)
return self.test_app(environ, start_response)
def run(self):
"""Run the server"""
try:
self.asyncore.loop(.5, map=self._map)
except select.error: # pragma: no cover
if not self.was_shutdown:
raise
def shutdown(self):
"""Shutdown the server"""
# avoid showing traceback related to asyncore
self.was_shutdown = True
self.logger.setLevel(logging.FATAL)
while self._map:
triggers = list(self._map.values())
for trigger in triggers:
trigger.handle_close()
self.maintenance(0)
self.task_dispatcher.shutdown()
return True
@classmethod
def create(cls, application, **kwargs):
"""Start a server to serve ``application``. Return a server
instance."""
host, port = get_free_port()
if 'port' not in kwargs:
kwargs['port'] = port
if 'host' not in kwargs:
kwargs['host'] = host
if 'expose_tracebacks' not in kwargs:
kwargs['expose_tracebacks'] = True
server = cls(application, **kwargs)
server.runner = threading.Thread(target=server.run)
server.runner.daemon = True
server.runner.start()
return server
def wait(self, retries=30):
"""Wait until the server is started"""
running = check_server(self.adj.host, self.adj.port,
'/__application__', retries=retries)
if running:
return True
try:
self.shutdown()
finally:
return False
| [
"qyearsley@google.com"
] | qyearsley@google.com |
805e86f1d2bd1c867967768dadacd89d4c06a759 | 8823a9b97780bdf5f92c2ef3bfab28ea5a95abc0 | /problems/strings/tests/test_substrings.py | 434a5384b114cf584fe7f903cd441b2004d71c0d | [] | no_license | castlemilk/puzzles | 1ef2f9b7d0707e821869064b78a57691f2ba7a0a | 4cdc757fd14a444abe48254a0ecc8b04ca4b9552 | refs/heads/master | 2022-12-15T05:19:51.818193 | 2018-03-07T10:32:36 | 2018-03-07T10:32:36 | 122,824,563 | 0 | 0 | null | 2022-12-07T23:46:49 | 2018-02-25T10:07:36 | Python | UTF-8 | Python | false | false | 190 | py | from problems.strings import substrings
def test_permutations1():
assert substrings.is_permutation('abcd','aaaaa') is False
assert substrings.is_permutation('abcd', 'dcab') is True | [
"ben.ebsworth@gmail.com"
] | ben.ebsworth@gmail.com |
e2ded22e10d7c8fdba1a33dd11b5e8581b59afb5 | 16158ba3a3d94569498d51ab03ac19d7444b0349 | /kg/Recommender-System-master/Recommender_System/algorithm/KGCN/gpu_memory_growth.py | 9a808bd2987cfa7e6fb19015613b64193b9534f9 | [
"MIT"
] | permissive | lulihuang/DL_recommendation | c4109346a93336d85eed7bce2acfea143c76012c | 5e8b1b562cb4e6360103b11b6ec5601cd69282a3 | refs/heads/main | 2023-03-22T12:51:08.395165 | 2021-03-11T03:11:59 | 2021-03-11T03:11:59 | 328,589,292 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
import此文件后将gpu设置为显存增量模式
"""
from tensorflow import config
gpus = physical_devices = config.list_physical_devices('GPU')
if len(gpus) == 0:
print('当前没有检测到gpu,设置显存增量模式无效。')
for gpu in gpus:
try:
config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
| [
"727445285@qq.com"
] | 727445285@qq.com |
6e3dd7d9f9580b03b86f84b139624874d5968242 | 1391fe8dcace0742232d11d96f1eca5da02581ba | /test.py | 465cf5bafb6627ab4907e62fc187ca32fd3dbf60 | [] | no_license | ha8sh/test | a0b879d2cc7b8d753aa7f4bfd75703cfa3b02d51 | af18128bae0ad195f9fb064da0807d67fd65180d | refs/heads/master | 2023-02-26T04:33:11.201116 | 2021-01-25T19:10:07 | 2021-01-25T19:10:07 | 332,850,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16 | py | print("hassan")
| [
"root@ibs.ping308.net"
] | root@ibs.ping308.net |
ffa1f1f3a4d948cf970225aaaf6d9dfcbc38315c | d4ce88b6262d30e86f9be2d5635df8544df846e2 | /Web Scrapping/Selenium-Request&Beutifulsoup Detailed/Selenium/selenim_intr.py | 353973c7f248a049a92800b0e4111b37b27abe14 | [] | no_license | ahmetzekiertem/Web-Scrapping | e8d5d3fd2da03f408553c3f6fa1815d79d663af2 | 75a95b8373a84357bdd037cf6fef7461097bfacf | refs/heads/master | 2022-04-19T16:32:28.369369 | 2020-04-19T12:51:42 | 2020-04-19T12:51:42 | 256,998,504 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py |
from selenium import webdriver # imports
from time import sleep
from bs4 import BeautifulSoup
# make a webdriver object - chrome driver path for my system -- > /Users/waqarjoyia/Downloads/chromedriver
driver = webdriver.Chrome('/Users/mac/Desktop/geckodriver')
# open some page using get method - url -- > parameters
# close webdriver object
driver.close()
| [
"40203533+ahmetzekiertem@users.noreply.github.com"
] | 40203533+ahmetzekiertem@users.noreply.github.com |
0e13ea228a661ee0d8e2c5bfce784e4d705a8f66 | 09b0075f56455d1b54d8bf3e60ca3535b8083bdc | /WideResnet.py | 595e4f69f1baa13a9f27f80fdb61e54773195de4 | [] | no_license | YanYan0716/MPL | e02c1ddf036d6019c3596fd932c51c3a14187f5e | 6ad82b050ec1ed81987c779df2dddff95dc1cde5 | refs/heads/master | 2023-04-17T23:05:54.164840 | 2021-05-07T01:14:49 | 2021-05-07T01:14:49 | 331,491,485 | 11 | 6 | null | null | null | null | UTF-8 | Python | false | false | 7,157 | py | import os
from abc import ABC
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers
import config
class BasicBlock(layers.Layer):
def __init__(self, in_channels, out_channels, stride, dropout, name, trainable):
super(BasicBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
self.dropout = dropout
# name = name
self.trainable = trainable
self.bn1 = layers.BatchNormalization(
# momentum=0.999,
epsilon=config.BATCH_NORM_EPSILON,
trainable=self.trainable,
name=name+'_bn1'
)
self.relu1 = layers.LeakyReLU(alpha=0.2)
self.conv1 = layers.Conv2D(
filters=self.out_channels,
kernel_size=3,
strides=self.stride,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.HeNormal(),
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
trainable=self.trainable,
name=name+'_conv1',
)
self.bn2 = layers.BatchNormalization(
# momentum=0.999,
epsilon=config.BATCH_NORM_EPSILON,
trainable=self.trainable,
name=name+'_bn2'
)
self.relu2 = layers.LeakyReLU(alpha=0.2)
self.dropout = layers.Dropout(
rate=self.dropout,
trainable=self.trainable,
name=name+'_dropout',
)
self.conv2 = layers.Conv2D(
filters=self.out_channels,
kernel_size=3,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.HeNormal(),
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
trainable=self.trainable,
name=name+'_conv2',
)
if self.stride != 1 or self.in_channels != self.out_channels:
self.short_cut_relu = layers.LeakyReLU(alpha=0.2)
self.short_cut = layers.Conv2D(
filters=self.out_channels,
kernel_size=1,
strides=self.stride,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.HeNormal(),
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
trainable=self.trainable,
name=name+'_shortcut'
)
self.add = layers.Add(name=name+'_add')
def call(self, inputs, **kwargs):
residual = inputs
out = self.bn1(inputs)
if self.stride != 1 or self.in_channels != self.out_channels:
residual = out
out = self.relu1(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv2(out)
if self.stride != 1 or self.in_channels != self.out_channels:
residual = self.short_cut_relu(residual)
residual = self.short_cut(residual)
# else:
# shortcut = out
out = self.add([residual, out])
return out
class WideResnet(keras.Model):
def __init__(self, k=[16, 32, 64, 128], name='wider'):
super(WideResnet, self).__init__(name=name)
self.k = k
self.dropout = config.DROPOUT
self.drop = layers.Dropout(
rate=config.DROPOUT,
trainable=self.trainable,
name=name+'_dropout',
)
self.conv1 = layers.Conv2D(
filters=k[0],
kernel_size=3,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=keras.initializers.HeNormal(),
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
trainable=self.trainable,
name=name + '_conv1',
)
self.Basic1 = BasicBlock(in_channels=k[0], out_channels=k[1], stride=1, dropout=self.dropout, name=name+'_Basic1', trainable=True)
self.Basic2 = BasicBlock(in_channels=k[1], out_channels=k[1], stride=1, dropout=self.dropout, name=name+'_Basic2', trainable=True)
self.Basic3 = BasicBlock(in_channels=k[1], out_channels=k[1], stride=1, dropout=self.dropout, name=name+'_Basic3', trainable=True)
self.Basic4 = BasicBlock(in_channels=k[1], out_channels=k[1], stride=1, dropout=self.dropout, name=name+'_Basic4', trainable=True)
self.Basic5 = BasicBlock(in_channels=k[1], out_channels=k[2], stride=2, dropout=self.dropout, name=name+'_Basic5', trainable=True)
self.Basic6 = BasicBlock(in_channels=k[2], out_channels=k[2], stride=1, dropout=self.dropout, name=name+'_Basic6', trainable=True)
self.Basic7 = BasicBlock(in_channels=k[2], out_channels=k[2], stride=1, dropout=self.dropout, name=name+'_Basic7', trainable=True)
self.Basic8 = BasicBlock(in_channels=k[2], out_channels=k[2], stride=1, dropout=self.dropout, name=name+'_Basic8', trainable=True)
self.Basic9 = BasicBlock(in_channels=k[2], out_channels=k[3], stride=2, dropout=self.dropout, name=name+'_Basic9', trainable=True)
self.Basic10 = BasicBlock(in_channels=k[3], out_channels=k[3], stride=1, dropout=self.dropout, name=name+'_Basic10', trainable=True)
self.Basic11 = BasicBlock(in_channels=k[3], out_channels=k[3], stride=1, dropout=self.dropout, name=name+'_Basic11', trainable=True)
self.Basic12 = BasicBlock(in_channels=k[3], out_channels=k[3], stride=1, dropout=self.dropout, name=name+'_Basic12', trainable=True)
self.bn1 = layers.BatchNormalization(
# momentum=0.999,
epsilon=config.BATCH_NORM_EPSILON,
trainable=self.trainable,
name=name+'_bn1'
)
self.relu1 = layers.LeakyReLU(alpha=0.2)
self.avgpool = layers.GlobalAveragePooling2D(name=name+'_avgpool')
self.dense = layers.Dense(
units=config.NUM_CLASS,
# kernel_initializer=keras.initializers.RandomNormal(mean=0., stddev=1.),
# activation='softmax',
kernel_regularizer=regularizers.l2(config.WEIGHT_DECAY),
name=name+'_dense',
)
def call(self, inputs, training=None, mask=None):
x = self.conv1(inputs)
x = self.Basic1(x)
x = self.Basic2(x)
x = self.Basic3(x)
x = self.Basic4(x)
x = self.Basic5(x)
x = self.Basic6(x)
x = self.Basic7(x)
x = self.Basic8(x)
x = self.Basic9(x)
x = self.Basic10(x)
x = self.Basic11(x)
x = self.Basic12(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.avgpool(x)
x = self.drop(x)
out = self.dense(x)
return out
def model(self):
input = keras.Input(shape=(32, 32, 3), dtype=tf.float32)
return keras.Model(inputs=input, outputs=self.call(input))
if __name__ == '__main__':
img = tf.random.normal([1, 32, 32, 3])
model = WideResnet().model()
model.summary() | [
"yanqian0716@gmail.com"
] | yanqian0716@gmail.com |
450652c8cc5a188c7692244ac62b7cc5f3cd3443 | 23be24e92420af66307121468136570234aceebd | /Problems/What day is it/task.py | 49c6dff7b35a4222d7bf355ce9d79d44fb131b7e | [] | no_license | sanqit/credit_calculator | c309013f6639e886ae2a257b6c37c401d2d7b1fa | 1b2aaab271b985ec2df47fe6409a08e4dd180a0f | refs/heads/master | 2022-11-15T05:30:49.412246 | 2020-07-04T20:50:04 | 2020-07-04T20:50:04 | 277,182,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | offset = int(input())
time = 10.5
n = time + offset
if n < 0:
print("Monday")
elif n > 24:
print("Wednesday")
else:
print("Tuesday")
| [
"admin@sanqit.ru"
] | admin@sanqit.ru |
138b1b5eaed3c0e2c160f52a49e9ed9f1a6fd14b | 65331b368b8956c4864e95e21e6b9c2b3c715a2f | /__init__.py | 951540ce32097ac761cc9accb9ab3842ef23d029 | [] | no_license | yamoimeda/diccionario-guna | 83358ded73d79282457ff21984cdbff3028e1a82 | e3041d22721cab9e97e9ebd45d3456d9960aecfe | refs/heads/master | 2021-06-27T08:56:02.247787 | 2021-02-11T23:27:30 | 2021-02-11T23:27:30 | 203,426,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | from flask import Flask,redirect,url_for,render_template,request,jsonify
from pymongo import MongoClient
import sqlite3 as sql
app = Flask(__name__)
#cliente = MongoClient("localhost", 27017)
#db = cliente.diccionarios
@app.route('/',methods=['GET','POST'])
def index():
return render_template('inicios.html')
@app.route('/resultado',methods=['GET','POST'])
def resultados():
if request.method == 'POST':
palabra = request.form['palabra']
conn = sql.connect("diccionarioguna.db")
conn.row_factory = sql.Row
cur = conn.cursor()
cur.execute('''SELECT significado FROM palabras where palabra = ?''',(palabra,))
signifi = cur.fetchall()
if len(signifi) == 0:
significado = 'no hay coincidencia'
else:
for ab in signifi:
significado = ab['significado']
return significado
if __name__ == '__main__':
app.run()
| [
"noreply@github.com"
] | yamoimeda.noreply@github.com |
d7df85c05060a2aec8d54ca9acc857b79150ca15 | ba466ae34666eb71584cc3a808bdd78f5f555d2f | /exercicios/ex37.py | 2ccae9fb556d61448789994befc172790a367837 | [] | no_license | gilmargjs/cursoEmVideoPython | 8fa30984c00abd3ca331aab3de3d8e017d451e52 | cb15343bf5c884b2045e6a35b949cd02c3e8321c | refs/heads/main | 2023-06-12T22:48:59.398215 | 2021-06-30T20:52:55 | 2021-06-30T20:52:55 | 381,829,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | """programa que laeia um número intgeiro qualquer e peça
para o usuario escolher qual será a base da conversão:
-1 para binario
-2 para octal
-3 para hexadecimal
"""
print('='*30)
print('SISTEMA DE CONVERÇÃO DE BASES')
print('='*30)
num = int(input("Digite um número: "))
print('''Escolha uma das Bases para Conversão:
[ 1 ] Converter para BINÁRIO
[ 2 ] Converter para OCTAL
[ 3 ] Converter para HEXADECIMAL''')
print('='*30)
opcao = int(input("Sua Opção: "))
if opcao == 1:
print(f'{num} convertido para BINÁRIO é igual a {bin(num)[2:]}')
elif opcao == 2:
print(f'{num} convertido para OCTAL é igual a {oct(num)[2:]}')
elif opcao == 3:
print(f'{num} convertido para HEXADECIMAL é igual a {hex(num)[2:]}')
else:
print('escolha uma opção valida')
print('='*30)
| [
"gilmarjose2014@gmail.com"
] | gilmarjose2014@gmail.com |
7f9d646ad4abee72b62035b949da452c7e34ddbd | 35f076d1e8b5bbc3871a622b7808246e24b87f95 | /src/github/__init__.py | 10e03497751ba2b6964ca5e7dd22f5646fd3c7f5 | [] | no_license | elena20ruiz/github_crawler | 379ece12e07dcc5c866ca2aa8604d564d45010ff | dad9484539cc371584eb94ecfc20861e342325ee | refs/heads/master | 2020-12-27T18:17:21.001287 | 2020-02-08T17:35:18 | 2020-02-08T17:35:18 | 238,001,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py |
GITHUB_URL = 'https://github.com'
__all__ = [
'GITHUB_URL'
] | [
"elena.ruiz.bdn@gmail.com"
] | elena.ruiz.bdn@gmail.com |
2eb73db4561ba05eee120641d58dcfdf2e0c885b | 333632d57adb95541042769fed09b9698996f0b2 | /fullstack/venv_linux_auth0/bin/rst2xml.py | 483278b51727939b4ecb68f5f5a86856858bfb7c | [] | no_license | NginxNinja/Udacity_FullStack | c71f0ef323d794767c59b7316f845744ec9d9773 | ec028682633e4979a5938956ff92d336270a57f3 | refs/heads/master | 2021-05-19T15:12:05.720426 | 2020-05-24T00:31:33 | 2020-05-24T00:31:33 | 251,841,704 | 0 | 0 | null | 2020-04-18T06:18:28 | 2020-03-31T22:55:39 | Python | UTF-8 | Python | false | false | 622 | py | #!/vagrant/venv_linux_auth0/bin/python
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
| [
"dummy.kenmen2@gmail.com"
] | dummy.kenmen2@gmail.com |
2edce3beb41e028a8b6e04ee2df6b6902f74f0a5 | feee01a7ba1ccc745060cf8233168934c5ef6426 | /apps/courses/migrations/0009_course_teacher.py | 33de7421a3c85d8ba99c73f05edd594c8b124b27 | [] | no_license | carpenter-zhang/muke | bfa7fa320536466de3f12223b4ae98dc635ceba6 | 7529f4e6be5c1919b86194093e45f574a0ffa89a | refs/heads/master | 2021-08-26T08:15:35.907867 | 2017-11-22T13:15:06 | 2017-11-22T13:15:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-11-08 16:51
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organizations', '0006_teacher_image'),
('courses', '0008_video_video_times'),
]
operations = [
migrations.AddField(
model_name='course',
name='teacher',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organizations.Teacher', verbose_name='教师'),
),
]
| [
"596864319@qq.com"
] | 596864319@qq.com |
e3c07ab1c0f8f850ba14972383d4621221272771 | f36be49208238248fa155311a8fa115e3e672efb | /faq/migrations/0001_initial.py | 6eb45e075d0be3bdde3f88467367a6da286b3f51 | [
"MIT"
] | permissive | Sejong-Creative-Semester2021/OJ-BE | 71eb3bd88ce6767952227811d922000a9f407867 | cecc511b771f1979ba7a556abdae1cbefa8e17bd | refs/heads/main | 2023-08-14T03:23:54.029155 | 2021-09-17T13:00:45 | 2021-09-17T13:00:45 | 397,147,494 | 0 | 0 | MIT | 2021-08-30T04:03:01 | 2021-08-17T07:12:44 | Python | UTF-8 | Python | false | false | 1,141 | py | # Generated by Django 3.2.6 on 2021-09-17 12:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import utils.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='FAQ',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.TextField()),
('answer', utils.models.RichTextField()),
('create_time', models.DateTimeField(auto_now_add=True)),
('last_update_time', models.DateTimeField(auto_now=True)),
('visible', models.BooleanField(default=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'faq',
'ordering': ('-create_time',),
},
),
]
| [
"mksin00@naver.com"
] | mksin00@naver.com |
ad833abfa7ebc2a0c6d29bf5cc8502e93504ecf0 | 115dfb558763fe51bc6d065435e4181057907dee | /lintcode/空格替换.py | 4bc2be3615ae4a137bf54514b44b36f8585a3a6f | [] | no_license | StSphinx/leetcode | a8f328f9f409ca57d56f2054598e2024c578c0a6 | 699e8ead451b570aed22d3a705a5ecbb7178cea1 | refs/heads/master | 2020-09-22T02:49:27.327193 | 2019-05-22T03:46:12 | 2019-05-22T03:46:12 | 37,531,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | # -*- coding:utf-8 -*-
__author__ = 'Muming'
class Solution:
# @param {char[]} string: An array of Char
# @param {int} length: The true length of the string
# @return {int} The true length of new string
def replaceBlank(self, string, length):
# Write your code here
for k, v in enumerate(string):
if v == ' ':
string[k] = '%20'
length += 3
return length, string
so = Solution()
print so.replaceBlank(list("abcd efg hij"), 12) | [
"zhhljdb6014@gmail.com"
] | zhhljdb6014@gmail.com |
b5d716b2740e66732492a580f7db8280232f261e | d3d8acc788bd3a8d7e5f861ad87c4d802723062b | /test/step3_descope200MCHF_HLT.py | c2272355f19530f27df01562b14bf70d1dee3ae4 | [] | no_license | calabria/L1IntegratedMuonTrigger | 27ff0bde46208f84595423ec375080979fbe4c62 | 05a368b8d04f84b675d40445555f2cacfd135e4e | refs/heads/master | 2021-01-24T21:57:42.232290 | 2015-08-11T11:52:35 | 2015-08-11T11:52:35 | 38,485,204 | 0 | 2 | null | 2015-08-11T11:52:35 | 2015-07-03T09:40:57 | Python | UTF-8 | Python | false | false | 4,607 | py | # Auto generated configuration file
# using:
# Revision: 1.20
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: step3_descope200MCHF --fileout file:out_hlt_descope200MCHF.root --mc --eventcontent RECOSIM --step HLT --customise RecoParticleFlow/PandoraTranslator/customizeHGCalPandora_cff.cust_2023HGCalPandoraMuon,Configuration/DataProcessing/Utils.addMonitoring,L1Trigger/L1IntegratedMuonTrigger/phase2DescopingScenarios.descope200MCHF --datatier GEN-SIM-RECO --conditions PH2_1K_FB_V6::All --magField 38T_PostLS1 --filein file:/afs/cern.ch/work/d/dildick/public/GEM/MuonPhaseIIScopeDoc/CMSSW_6_2_0_SLHC26_patch3/src/001B71CC-0F38-E511-BEE2-002618943918.root --geometry Extended2023HGCalMuon,Extended2023HGCalMuonReco --no_exec -n 10
import FWCore.ParameterSet.Config as cms
process = cms.Process('HLT')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.Geometry.GeometryExtended2023HGCalMuonReco_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')
process.load('HLTrigger.Configuration.HLT_GRun_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
# Input source
process.source = cms.Source("PoolSource",
secondaryFileNames = cms.untracked.vstring(),
fileNames = cms.untracked.vstring('file:/afs/cern.ch/work/d/dildick/public/GEM/MuonPhaseIIScopeDoc/CMSSW_6_2_0_SLHC26_patch3/src/001B71CC-0F38-E511-BEE2-002618943918.root')
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.20 $'),
annotation = cms.untracked.string('step3_descope200MCHF nevts:10'),
name = cms.untracked.string('Applications')
)
# Output definition
process.RECOSIMoutput = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
outputCommands = process.RECOSIMEventContent.outputCommands,
fileName = cms.untracked.string('file:out_hlt_descope200MCHF.root'),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('GEN-SIM-RECO')
)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'PH2_1K_FB_V6::All', '')
# Path and EndPath definitions
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RECOSIMoutput_step = cms.EndPath(process.RECOSIMoutput)
# Schedule definition
process.schedule = cms.Schedule()
process.schedule.extend(process.HLTSchedule)
process.schedule.extend([process.endjob_step,process.RECOSIMoutput_step])
# customisation of the process.
# Automatic addition of the customisation function from RecoParticleFlow.PandoraTranslator.customizeHGCalPandora_cff
from RecoParticleFlow.PandoraTranslator.customizeHGCalPandora_cff import cust_2023HGCalPandoraMuon
#call to customisation function cust_2023HGCalPandoraMuon imported from RecoParticleFlow.PandoraTranslator.customizeHGCalPandora_cff
process = cust_2023HGCalPandoraMuon(process)
# Automatic addition of the customisation function from HLTrigger.Configuration.customizeHLTforMC
from HLTrigger.Configuration.customizeHLTforMC import customizeHLTforMC
#call to customisation function customizeHLTforMC imported from HLTrigger.Configuration.customizeHLTforMC
process = customizeHLTforMC(process)
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
# Automatic addition of the customisation function from L1Trigger.L1IntegratedMuonTrigger.phase2DescopingScenarios
from L1Trigger.L1IntegratedMuonTrigger.phase2DescopingScenarios import descope200MCHF
#call to customisation function descope200MCHF imported from L1Trigger.L1IntegratedMuonTrigger.phase2DescopingScenarios
process = descope200MCHF(process)
# End of customisation functions
| [
"sven.dildick@cern.ch"
] | sven.dildick@cern.ch |
151d04d5fcd1d701ef91676c6174b1bebb1baf8c | 208db76b669686f782d59ec393efdb11e3d3cbbc | /test.l0.py | 0f6d3d40bd459a45e68347a3ddc466043ba27139 | [] | no_license | vsevolod-oparin/stream-dynamic-components | 149542d1dd754cd3f9a7e7bfdff118612e3fb21a | ef5e40cb07f568205a140b78a77067f32d4f34cd | refs/heads/master | 2021-01-10T01:32:21.968060 | 2016-02-09T16:00:41 | 2016-02-09T16:00:41 | 51,305,791 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,774 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import random
from algo.l0sample import Rec1 as Rec1
from algo.l0sample import RecS as RecS
from algo.l0sample import RecGeneral as RecGeneral
class TestRec1(unittest.TestCase):
def test_zero(self):
r = Rec1(100, 0.001)
self.assertFalse(r.correct())
def test_one(self):
r = Rec1(100, 0.001)
r.update(17, 42)
self.assertTrue(r.correct())
self.assertEqual(r.recover(), (17, 42))
def test_two(self):
r = Rec1(100, 0.001)
r.update(1, 1)
r.update(2, 2)
self.assertFalse(r.correct())
def test_many(self):
r = Rec1(100, 0.001)
for i in xrange(100):
r.update(i, i + 17)
r.update(17, 42)
for i in xrange(100):
r.update(i, -(i + 17))
self.assertTrue(r.correct())
self.assertEqual(r.recover(), (17, 42))
class TestRecS(unittest.TestCase):
def test_touched(self):
r = RecS(100, 5, 0.01)
self.assertFalse(r.touched())
def test_zero(self):
r = RecS(100, 5, 0.01)
self.assertEqual(r.recover(), dict())
def test_one(self):
r = RecS(100, 5, 0.01)
r.update(17, 42)
for i in xrange(100):
ind, val = random.randint(0, 99), random.randint(1, 25)
r.update(ind, val)
r.update(ind, -val)
self.assertTrue(r.touched())
self.assertEqual(r.recover(), {17: 42})
def test_five(self):
r = RecS(100, 5, 0.01)
updates = dict([(17 + i, 42 + i) for i in xrange(5)])
for k in updates.keys():
r.update(k, updates[k])
for i in xrange(100):
ind, val = random.randint(0, 99), random.randint(1, 25)
r.update(ind, val)
r.update(ind, -val)
self.assertEqual(r.recover(), updates)
class TestRecGeneral(unittest.TestCase):
def test_zero(self):
r = RecGeneral(100, 0.01)
self.assertEqual(r.sample(), (0, 0))
def test_many(self):
size = 20
rs = [RecGeneral(size, 0.01) for i in xrange(10)]
vals = [0 for i in xrange(size)]
for i in xrange(size):
ind, val = random.randint(0, size - 1), random.randint(-5, 5)
vals[ind] += val
for r in rs:
r.update(ind, val)
for i in xrange(size):
if i % 2 == 1 and vals[i] != 0:
for r in rs:
r.update(i, -vals[i])
vals[i] -= vals[i]
answers = [r.sample() for r in rs]
print vals
print answers
for a in answers:
self.assertTrue(a[0] % 2 == 0 and a[1] == vals[a[0]])
if __name__ == '__main__':
unittest.main() | [
"oparin.vsevolod@gmail.com"
] | oparin.vsevolod@gmail.com |
d8229a35567ff7594f50dbb89b7cea36bec123ac | 148125096da896fd93292d2cd408265d159fec28 | /qa/rpc-tests/p2p-acceptblock.py | 2267768dbfeb2685302144171cfdd388f4355b4c | [
"MIT"
] | permissive | lycion/lkcoinse | 7cfbcbdfc1e98f20d9dfc497ea65fd75ca6de25d | 9cf9ed5730217566b44466c22dc255f0134ad1bb | refs/heads/master | 2020-03-30T03:24:44.245148 | 2018-09-28T04:55:30 | 2018-09-28T04:55:30 | 150,548,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,678 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Lkcoinse Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import test_framework.loginit
from test_framework.mininode import *
from test_framework.test_framework import LkcoinseTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(LkcoinseTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("LKCOINSED", "lkcoinsed"),
help="lkcoinsed binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print("First height 2 block accepted by both nodes")
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in range(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print("Second height 2 block accepted only from whitelisted peer")
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in range(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print("Unrequested more-work block accepted from non-whitelisted peer")
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print("Successfully reorged to length 3 chain from whitelisted peer")
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in range(2):
for i in range(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
test_node.sync_with_ping()
time.sleep(2)
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print("Unrequested block too far-ahead not processed")
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print("Unrequested block far ahead of tip accepted from whitelisted peer")
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
# Wait for the reorg to complete. It can be slower on some systems.
while self.nodes[0].getblockcount() != 290:
time.sleep(1)
j = j + 1
if (j > 60):
break
assert_equal(self.nodes[0].getblockcount(), 290)
print("Successfully reorged to longer chain from non-whitelisted peer")
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
| [
"lycion@gmail.com"
] | lycion@gmail.com |
74dd88c522c1f43180958ef5d5f77d70bc1a149a | 529b575a77c6c39714704c60e9705eaf52bd48d3 | /tictactoe.py | dea97741580f86f1c1e9f720054c2dca5e7284b3 | [] | no_license | alevi0106/AI | 82151cd5c415f0ab7d852cc8d76f88ae56490835 | c9ab47e63d8f8a0a0f0d5b1fd824a5096f3115a8 | refs/heads/master | 2020-03-22T21:58:33.549768 | 2018-09-27T10:01:02 | 2018-09-27T10:01:02 | 140,725,573 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,120 | py | board=[2]*10
board_copy=[" "]*10
#To draw a board
def draw():
for i in range(1,10):
if board[i]==3:
board_copy[i]="X"
if board[i]==5:
board_copy[i]="O"
#print(board_copy)
print(' {} | {} | {}'.format(board_copy[1],board_copy[2],board_copy[3]))
print(' {} | {} | {}'.format(board_copy[4],board_copy[5],board_copy[6]))
print(' {} | {} | {}'.format(board_copy[7],board_copy[8],board_copy[9]))
print('')
def Go(n,turn):
if turn%2==0:
board[n]=5
else: board[n]=3
#To find blank space on board
def fb(a,b,c):
if board[a]==2:
return a
elif board[b]==2:
return b
elif board[c]==2:
return c
return a
def Make():
if board[5]==2:
return 5
elif board[2]==2:
return 2
elif board[4]==2:
return 4
elif board[6]==2:
return 6
elif board[8]==2:
return 8
def Posswin(p):
if p=="X":
temp=18
elif p=="O":
temp=50
if board[1]*board[2]*board[3]==temp:
return fb(1,2,3)
elif board[4]*board[5]*board[6]==temp:
return fb(4,5,6)
elif board[7]*board[8]*board[9]==temp:
return fb(7,8,9)
elif board[1]*board[5]*board[9]==temp:
return fb(1,5,9)
elif board[3]*board[5]*board[7]==temp:
return fb(3,5,7)
elif board[1]*board[4]*board[7]==temp:
return fb(1,4,7)
elif board[2]*board[5]*board[8]==temp:
return fb(2,5,8)
elif board[3]*board[6]*board[9]==temp:
return fb(3,6,9)
return 0
def iswin(turn):
if(board[1]*board[2]*board[3]==27 or board[4]*board[5]*board[6]==27 or board[7]*board[8]*board[9]==27 or
board[1]*board[5]*board[9]==27 or board[3]*board[5]*board[7]==27 or
board[1]*board[4]*board[7]==27 or board[2]*board[5]*board[8]==27 or board[3]*board[6]*board[9]==27):
print("Winner is X")
return 1
elif(board[1]*board[2]*board[3]==125 or board[4]*board[5]*board[6]==125 or board[7]*board[8]*board[9]==125 or
board[1]*board[5]*board[9]==125 or board[3]*board[5]*board[7]==125 or
board[1]*board[4]*board[7]==125 or board[2]*board[5]*board[8]==125 or board[3]*board[6]*board[9]==125):
print("Winner is O")
return 1
elif turn==9:
print("Match Draw")
return 1
return 0
def isdraw(turn):
posx=posy=0
for i in range(1,10):
if board[i]==3:
if i%2!=0:
posx+=1
elif board[i]==5:
if i%2!=0:
posy+=1
#print(posx,posy)
if(posx==3 and posy==1 and board[5]==3):
return False
elif(posx==3 and posy==2 and board[5]==5):
return False
elif Posswin("O")==0:
return True
return False
val=int(input("Choose 3 for 'X' or 5 for 'O':\n"))
if val==3:
tempvar1=1
tempvar2=0
elif val==5:
tempvar1=0
tempvar2=1
for turn in range(1,10):
if turn%2==tempvar1:
cross=int(input("Where to mark?\n"))
board[cross]=val
draw()
var=iswin(turn)
if var==1:
break
elif turn%2==tempvar2:
print("AI turn")
if turn==1:
Go(1,turn)
draw()
var=iswin(turn)
if var==1:
break
if turn==2:
if board[5]==2:
Go(5,turn)
else:
Go(1,turn)
draw()
var=iswin(turn)
if var==1:
break
if turn==3:
if board[9]==2:
Go(9,turn)
else:
Go(3,turn)
draw()
var=iswin(turn)
if var==1:
break
if turn==4:
if Posswin("X")!=0:
Go(Posswin("X"),turn)
else: Go(Make(),turn)
draw()
var=iswin(turn)
if var==1:
break
if turn==5:
if Posswin("X")!=0:
Go(Posswin("X"),turn)
elif Posswin("O")!=0:
Go(Posswin("O"),turn)
elif board[7]==2:
Go(7,turn)
else: Go(3,turn)
draw()
var=iswin(turn)
if var==1:
break
if turn==6:
if Posswin("O")!=0:
Go(Posswin("O"),turn)
elif Posswin("X")!=0:
Go(Posswin("X"),turn)
else: Go(Make(),turn)
draw()
var=iswin(turn)
if var==1:
break
if turn==7 or turn==9:
if Posswin("X")!=0:
Go(Posswin("X"),turn)
elif Posswin("O")!=0:
Go(Posswin("O"),turn)
else: Go(fb(fb(1,2,3),fb(4,5,6),fb(7,8,9)),turn)
draw()
var=iswin(turn)
if var==1:
break
if turn==8:
if Posswin("O")!=0:
Go(Posswin("O"),turn)
elif Posswin("X")!=0:
Go(Posswin("X"),turn)
else: Go(fb(fb(1,2,3),fb(4,5,6),fb(7,8,9)),turn)
draw()
var=iswin(turn)
if var==1:
break
if turn==5:
if isdraw(turn)==True:
print("Match will be draw")
break
| [
"noreply@github.com"
] | alevi0106.noreply@github.com |
6537118072122509e9adad7738eee5616a1b24dd | fc83fc10fcc509316e612d73bd40a81d3ca0a2e6 | /tests/nd_gaussian_multiprocessing.py | 1f8c698393e3a088d991eb3484785a391dc3c783 | [
"MIT"
] | permissive | DimitriMisiak/mcmc-red | 47dfb7e0664205da55fa463df77851722082e3c3 | caae0ce39d082e578176a5078a9184980b0851c3 | refs/heads/main | 2023-06-19T04:10:42.385862 | 2019-07-05T07:45:01 | 2019-07-05T07:45:01 | 387,757,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,928 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Handy MCMC scripts.
Test for the different fit method (mcmc, ptmcmc, minimizer).
Author:
Dimitri Misiak (misiak@ipnl.in2p3.fr)
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
import scipy.signal as sgl
from os import path
import scipy.optimize as op
import mcmc_red as mcr
# close all plots
plt.close('all')
nsample = 1000
ndim = 4
SCALE = 'log'
### LINEAR SCALE
if SCALE == 'linear':
mu = np.random.uniform(-10, 10, ndim)
sigma = np.random.uniform(0, 10, ndim)
bounds = ((-20, 20),) * ndim
### LOG SCALE
elif SCALE == 'log':
mu_generator = np.random.uniform(-6, 0, ndim)
mu = 10**mu_generator
sigma = mu/10
bounds = ((1e-7, 1e1),) * ndim
else:
raise Exception('SCALE not set properly!')
print("Generating blob at mu={0} and sigma={1}".format(mu, sigma))
blob = np.random.normal(mu, sigma, (nsample, ndim))
print("Checking")
print("mean =", np.mean(blob, axis=0))
print("std =", np.std(blob, axis=0))
def chi2(param):
return mcr.chi2_simple(blob, param, sigma)
#def chi2(param):
# x2 = np.sum( (blob - np.array(param))**2 / np.array(sigma)**2 )
# return x2
condi = None
# XXX MCMC
# save directory
sampler_path = 'mcmc_sampler/autosave'
# extracts the sup bounds and the inf bounds
bounds = list(bounds)
binf = list()
bsup = list()
for b in bounds:
inf, sup = b
binf.append(inf)
bsup.append(sup)
binf = np.array(binf)
bsup = np.array(bsup)
# additionnal constrain as function of the parameters
if condi == None:
condi = lambda p: True
# Loglikelihood function taking into accounts the bounds
def loglike(x):
""" Loglikelihood being -chi2/2.
Take into account the bounds.
"""
cinf = np.sum(x<binf)
csup = np.sum(x>bsup)
if cinf == 0 and csup == 0 and condi(x) == True:
# return -0.5*aux(np.power(10,x))
return -0.5*chi2(x)
else:
return -np.inf
# running the mcmc analysis
sampler = mcr.mcmc_sampler_multi(loglike, bounds, nsteps=1000, path=sampler_path, threads=2, scale=SCALE)
#nwalkers=None
#nsteps=10000
#threads=4
##############################################################################
## extracts the sup bounds and the inf bounds
#bounds = list(bounds)
#binf = list()
#bsup = list()
#for b in bounds:
# inf, sup = b
# binf.append(inf)
# bsup.append(sup)
#binf = np.array(binf)
#bsup = np.array(bsup)
#
#condi = None
## additionnal constrain as function of the parameters
#if condi == None:
# condi = lambda p: True
#
## Loglikelihood function taking into accounts the bounds
#def loglike(x):
# """ Loglikelihood being -chi2/2.
# Take into account the bounds.
# """
# cinf = np.sum(x<binf)
# csup = np.sum(x>bsup)
# if cinf == 0 and csup == 0 and condi(x) == True:
## return -0.5*aux(np.power(10,x))
# return -0.5*chi2(x)
# else:china moon
# return -np.inf
#
## number of parameters/dimensions
#ndim = len(bounds)
#
## default nwalkers
#if nwalkers == None:
# nwalkers = 10 * ndim
#
## walkers are uniformly spread in the parameter space
#pos = list()
#for n in xrange(nwalkers):
# accept = False
# while not accept:
# new_pos = [
# np.random.uniform(low=l, high=h) for l,h in zip(binf, bsup)
# ]
# accept = condi(new_pos)
# pos.append(new_pos)
#
## MCMC analysis
#sampler = emcee.EnsembleSampler(nwalkers, ndim, loglike, threads=threads)
#sampler.run_mcmc(pos, nsteps, rstate0=np.random.get_state())
#############################################################################
# # loading the mcmc results
logd, chain, lnprob, acc = mcr.get_mcmc_sampler(sampler_path)
lab = tuple(['$\mu${}'.format(i) for i in range(ndim)])
dim = int(logd['dim'])
xopt, inf, sup = mcr.mcmc_results(dim, chain, lnprob, acc, lab,
scale=SCALE, savedir=sampler_path)
print(xopt, inf, sup)
| [
"dimitrimisiak@gmail.com"
] | dimitrimisiak@gmail.com |
05c06ff5850ee1f5cbab0d42f5704ce5b0f4acb3 | 57d1580fd540b4819abb67f9db43fdfbba63725f | /hydrogen_notebooks/option_pricing/binomial_european_call_delta_hedging.py | 29f3ca209e1b50cb4571fff0cac52d807c607296 | [] | no_license | glyfish/alpaca | 49edfcb9d80551825dfa4cf071f21aeb95a3502f | 2b5b69bcf50ed081a526742658be503706af94b4 | refs/heads/master | 2023-02-22T00:24:19.293502 | 2022-09-05T17:20:23 | 2022-09-05T17:20:23 | 186,169,438 | 1 | 3 | null | 2023-02-11T00:52:12 | 2019-05-11T18:38:58 | Python | UTF-8 | Python | false | false | 2,302 | py | # %%
%load_ext autoreload
%autoreload 2
import os
import sys
import numpy
from matplotlib import pyplot
from lib import config
from scipy.stats import binom
wd = os.getcwd()
yahoo_root = os.path.join(wd, 'data', 'yahoo')
pyplot.style.use(config.glyfish_style)
# %%
def qrn(U, D, R):
return (R - D) / (U - D)
def qrn1(q, U, R):
return q*(1.0 + U) / (1.0 + R)
def binomial_tail_cdf(l, n, p):
return 1.0 - binom.cdf(l, n, p)
def cutoff(S0, U, D, K, n):
for i in range(0, n + 1):
iU = (1.0 + U)**i
iD = (1.0 + D)**(n - i)
payoff = S0*iU*iD - K
if payoff > 0:
return i
return n + 1
def european_call_payoff(U, D, R, S0, K, n):
l = cutoff(S0, U, D, K, n)
q = qrn(U, D, R)
q1 = qrn1(q, U, R)
Ψq = binomial_tail_cdf(l - 1, n, q)
Ψq1 = binomial_tail_cdf(l - 1, n, q1)
return S0*Ψq1 - K*(1 + R)**(-n)*Ψq
def delta(CU, CD, SU, SD):
return (CU - CD) / (SU - SD)
def init_borrow(S0, C0, x):
return C0 - S0 * x
def borrow(y, R, x1, x2, S):
return y * (1 + R) + (x1 - x2) * S
def portfolio_value(x, S, y):
return x * S + y
# %%
n = 3
U = 0.2
D = -0.1
R = 0.1
S0 = 100.0
K = 105.0
# %%
q = qrn(U, D, R)
q1 = qrn1(q, U, R)
l = cutoff(S0, U, D, K, n)
Ψq = binomial_tail_cdf(l - 1, n, q)
Ψq1 = binomial_tail_cdf(l - 1, n, q1)
q, q1, l, Ψq, Ψq1
binom.cdf(l, n, q)
# %
# t = 0
C0 = european_call_payoff(U, D, R, S0, K, n)
# %%
# Delta hedge
# t = 0
S1U = S0*(1.0 + U)
S1D = S0*(1.0 + D)
C1U = european_call_payoff(U, D, R, S1U, K, n - 1)
C1D = european_call_payoff(U, D, R, S1D, K, n - 1)
x1 = delta(C1U, C1D, S1U, S1D)
y1 = init_borrow(S0, C0, x1)
portfolio_value(x1, S0, y1)
# t = 1
# The price goes up S1 = S0*(1+U)
S1 = S0 * (1 + U)
S2U = S1*(1.0 + U)
S2D = S1*(1.0 + D)
C2U = european_call_payoff(U, D, R, S2U, K, n - 2)
C2D = european_call_payoff(U, D, R, S2D, K, n - 2)
x2 = delta(C2U, C2D, S2U, S2D)
y2 = borrow(y1, R, x1, x2, S1)
portfolio_value(x2, S1, y2)
# t = 2
# The price goes down S1 = S0*(1+U)*(1+D)
S2 = S0 * (1 + U) * (1 + D)
S3U = S2*(1.0 + U)
S3D = S2*(1.0 + D)
C3U = european_call_payoff(U, D, R, S3U, K, n - 3)
C3D = european_call_payoff(U, D, R, S3D, K, n - 3)
x3 = delta(C3U, C3D, S3U, S3D)
y3 = borrow(y2, R, x2, x3, S2)
portfolio_value(x3, S2, y3)
| [
"troy.stribling@gmail.com"
] | troy.stribling@gmail.com |
f1a9d5f8ac93d9af895ae5ffd7c6036d617c5d19 | 6b83e1eb08926bd2437c3a42bf53e262fda81cd3 | /algorithms/envs/flow/envs/ring/lane_change_accel.py | ea40b24414a20e24a7db6c8d2e50716d09bf8c08 | [] | no_license | TerryLiu2k/DMPO | a92d2b96458066cd441293f618aca10bd21fce22 | 060c4135973a1b9bdea0cd26ea8f78a3a7ad5d98 | refs/heads/master | 2023-08-15T16:09:08.202735 | 2021-10-16T04:07:38 | 2021-10-16T04:07:38 | 416,567,453 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,406 | py | """Environments that can train both lane change and acceleration behaviors."""
from algorithms.envs.flow.envs.ring.accel import AccelEnv
from algorithms.envs.flow.core import rewards
from gym.spaces.box import Box
import numpy as np
ADDITIONAL_ENV_PARAMS = {
# maximum acceleration for autonomous vehicles, in m/s^2
"max_accel": 3,
# maximum deceleration for autonomous vehicles, in m/s^2
"max_decel": 3,
# lane change duration for autonomous vehicles, in s. Autonomous vehicles
# reject new lane changing commands for this duration after successfully
# changing lanes.
"lane_change_duration": 5,
# desired velocity for all vehicles in the network, in m/s
"target_velocity": 10,
# specifies whether vehicles are to be sorted by position during a
# simulation step. If set to True, the environment parameter
# self.sorted_ids will return a list of all vehicles sorted in accordance
# with the environment
'sort_vehicles': False
}
class LaneChangeAccelEnv(AccelEnv):
"""Fully observable lane change and acceleration environment.
This environment is used to train autonomous vehicles to improve traffic
flows when lane-change and acceleration actions are permitted by the rl
agent.
Required from env_params:
* max_accel: maximum acceleration for autonomous vehicles, in m/s^2
* max_decel: maximum deceleration for autonomous vehicles, in m/s^2
* lane_change_duration: lane change duration for autonomous vehicles, in s
* target_velocity: desired velocity for all vehicles in the network, in m/s
* sort_vehicles: specifies whether vehicles are to be sorted by position
during a simulation step. If set to True, the environment parameter
self.sorted_ids will return a list of all vehicles sorted in accordance
with the environment
States
The state consists of the velocities, absolute position, and lane index
of all vehicles in the network. This assumes a constant number of
vehicles.
Actions
Actions consist of:
* a (continuous) acceleration from -abs(max_decel) to max_accel,
specified in env_params
* a (continuous) lane-change action from -1 to 1, used to determine the
lateral direction the vehicle will take.
Lane change actions are performed only if the vehicle has not changed
lanes for the lane change duration specified in env_params.
Rewards
The reward function is the two-norm of the distance of the speed of the
vehicles in the network from a desired speed, combined with a penalty
to discourage excess lane changes by the rl vehicle.
Termination
A rollout is terminated if the time horizon is reached or if two
vehicles collide into one another.
"""
def __init__(self, env_params, sim_params, network, simulator='traci'):
for p in ADDITIONAL_ENV_PARAMS.keys():
if p not in env_params.additional_params:
raise KeyError(
'Environment parameter "{}" not supplied'.format(p))
super().__init__(env_params, sim_params, network, simulator)
@property
def action_space(self):
"""See class definition."""
max_decel = self.env_params.additional_params["max_decel"]
max_accel = self.env_params.additional_params["max_accel"]
lb = [-abs(max_decel), -1] * self.initial_vehicles.num_rl_vehicles
ub = [max_accel, 1] * self.initial_vehicles.num_rl_vehicles
return Box(np.array(lb), np.array(ub), dtype=np.float32)
@property
def observation_space(self):
"""See class definition."""
return Box(
low=0,
high=1,
shape=(3 * self.initial_vehicles.num_vehicles, ),
dtype=np.float32)
def compute_reward(self, rl_actions, **kwargs):
"""See class definition."""
# compute the system-level performance of vehicles from a velocity
# perspective
reward = rewards.desired_velocity(self, fail=kwargs["fail"])
# punish excessive lane changes by reducing the reward by a set value
# every time an rl car changes lanes (10% of max reward)
for veh_id in self.k.vehicle.get_rl_ids():
if self.k.vehicle.get_last_lc(veh_id) == self.time_counter:
reward -= 0.1
return reward
def get_state(self):
"""See class definition."""
# normalizers
max_speed = self.k.network.max_speed()
length = self.k.network.length()
max_lanes = max(
self.k.network.num_lanes(edge)
for edge in self.k.network.get_edge_list())
speed = [self.k.vehicle.get_speed(veh_id) / max_speed
for veh_id in self.sorted_ids]
pos = [self.k.vehicle.get_x_by_id(veh_id) / length
for veh_id in self.sorted_ids]
lane = [self.k.vehicle.get_lane(veh_id) / max_lanes
for veh_id in self.sorted_ids]
return np.array(speed + pos + lane)
def _apply_rl_actions(self, actions):
"""See class definition."""
acceleration = actions[::2]
direction = actions[1::2]
# re-arrange actions according to mapping in observation space
sorted_rl_ids = [
veh_id for veh_id in self.sorted_ids
if veh_id in self.k.vehicle.get_rl_ids()
]
# represents vehicles that are allowed to change lanes
non_lane_changing_veh = \
[self.time_counter <=
self.env_params.additional_params["lane_change_duration"]
+ self.k.vehicle.get_last_lc(veh_id)
for veh_id in sorted_rl_ids]
# vehicle that are not allowed to change have their directions set to 0
direction[non_lane_changing_veh] = \
np.array([0] * sum(non_lane_changing_veh))
self.k.vehicle.apply_acceleration(sorted_rl_ids, acc=acceleration)
self.k.vehicle.apply_lane_change(sorted_rl_ids, direction=direction)
def additional_command(self):
"""Define which vehicles are observed for visualization purposes."""
# specify observed vehicles
if self.k.vehicle.num_rl_vehicles > 0:
for veh_id in self.k.vehicle.get_human_ids():
self.k.vehicle.set_observed(veh_id)
class LaneChangeAccelPOEnv(LaneChangeAccelEnv):
"""POMDP version of LaneChangeAccelEnv.
Required from env_params:
* max_accel: maximum acceleration for autonomous vehicles, in m/s^2
* max_decel: maximum deceleration for autonomous vehicles, in m/s^2
* lane_change_duration: lane change duration for autonomous vehicles, in s
* target_velocity: desired velocity for all vehicles in the network, in m/s
States
States are a list of rl vehicles speeds, as well as the speeds and
bumper-to-bumper headways between the rl vehicles and their
leaders/followers in all lanes. There is no assumption on the number of
vehicles in the network, so long as the number of rl vehicles is
static.
Actions
See parent class.
Rewards
See parent class.
Termination
See parent class.
Attributes
----------
num_lanes : int
maximum number of lanes on any edge in the network
visible : list of str
lists of visible vehicles, used for visualization purposes
"""
def __init__(self, env_params, sim_params, network, simulator='traci'):
super().__init__(env_params, sim_params, network, simulator)
self.num_lanes = max(self.k.network.num_lanes(edge)
for edge in self.k.network.get_edge_list())
self.visible = []
@property
def observation_space(self):
"""See class definition."""
return Box(
low=0,
high=1,
shape=(4 * self.initial_vehicles.num_rl_vehicles *
self.num_lanes + self.initial_vehicles.num_rl_vehicles, ),
dtype=np.float32)
def get_state(self):
"""See class definition."""
obs = [
0
for _ in range(4 * self.k.vehicle.num_rl_vehicles * self.num_lanes)
]
self.visible = []
for i, rl_id in enumerate(self.k.vehicle.get_rl_ids()):
# normalizers
max_length = self.k.network.length()
max_speed = self.k.network.max_speed()
# set to 1000 since the absence of a vehicle implies a large
# headway
headway = [1] * self.num_lanes
tailway = [1] * self.num_lanes
vel_in_front = [0] * self.num_lanes
vel_behind = [0] * self.num_lanes
lane_leaders = self.k.vehicle.get_lane_leaders(rl_id)
lane_followers = self.k.vehicle.get_lane_followers(rl_id)
lane_headways = self.k.vehicle.get_lane_headways(rl_id)
lane_tailways = self.k.vehicle.get_lane_tailways(rl_id)
headway[0:len(lane_headways)] = lane_headways
tailway[0:len(lane_tailways)] = lane_tailways
for j, lane_leader in enumerate(lane_leaders):
if lane_leader != '':
lane_headways[j] /= max_length
vel_in_front[j] = self.k.vehicle.get_speed(lane_leader) \
/ max_speed
self.visible.extend([lane_leader])
for j, lane_follower in enumerate(lane_followers):
if lane_follower != '':
lane_headways[j] /= max_length
vel_behind[j] = self.k.vehicle.get_speed(lane_follower) \
/ max_speed
self.visible.extend([lane_follower])
# add the headways, tailways, and speed for all lane leaders
# and followers
obs[4*self.num_lanes*i:4*self.num_lanes*(i+1)] = \
np.concatenate((headway, tailway, vel_in_front, vel_behind))
# add the speed for the ego rl vehicle
obs.append(self.k.vehicle.get_speed(rl_id))
return np.array(obs)
def additional_command(self):
"""Define which vehicles are observed for visualization purposes."""
# specify observed vehicles
for veh_id in self.visible:
self.k.vehicle.set_observed(veh_id)
| [
"terrylyclow@yahoo.com"
] | terrylyclow@yahoo.com |
948f743d6e0bc327c25c496c6faeecf388f602ac | 569970f62eca0ff81e1f3aaca7f14c08021fa1a3 | /example1.py | 459893bf3f7a622ea4c88414a80137553c732951 | [
"MIT"
] | permissive | u9n/enron-modbus | c958c0af7408dcbc6efe3b526b55eba694e63c98 | 68418a07dbbd8b6e4763e4d1cb8eb2915ff040d7 | refs/heads/main | 2023-08-17T01:07:55.522410 | 2023-08-08T12:33:17 | 2023-08-08T12:33:17 | 377,763,911 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | from enron_modbus.client import EnronModbusClient
from enron_modbus.transports import SerialTransport
transport = SerialTransport(port="/dev/tty.usbserial", baudrate=9600)
client = EnronModbusClient(transport=transport)
client.connect()
print(client.read_numerics(1, 5160, 6))
print(client.read_numeric(1, 5160))
print(client.read_booleans(1, 1010, 2))
print(client.read_booleans(1, 1010, 33))
print(client.read_boolean(1, 1010))
client.write_boolean(1, 1010, True)
print(client.read_boolean(1, 1010))
client.write_numeric(1, 7001, 46)
print(client.read_numeric(1, 7001))
client.disconnect()
| [
"henrik@pwit.se"
] | henrik@pwit.se |
f44cb8ff8292065f395502b902384d138c5b9281 | f09ef05dbc335a095c3652089bd98c9ba2f0e1de | /project_model/blog/migrations/0004_blogpost_posted_by.py | 442e65869d57a4228993da0add9fc0dbedf1a905 | [] | no_license | Dzinsyah/DJANGO_MVC | be45e2900203717dba51c1221f899d1c54324c36 | 5dea4d592ffecd1918e78c2aae185076a4e21fa4 | refs/heads/master | 2020-04-22T02:50:53.661662 | 2019-02-13T11:58:45 | 2019-02-13T11:58:45 | 170,065,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | # Generated by Django 2.1.5 on 2019-02-11 07:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_blogpost'),
]
operations = [
migrations.AddField(
model_name='blogpost',
name='posted_by',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='blog.Mentee'),
preserve_default=False,
),
]
| [
"dzinsyah@alphatech.id"
] | dzinsyah@alphatech.id |
552afe3365ed66d2b8652c89879abaa32a139ce6 | 548a195b2bd6e5857f008ef4b5a305983bada183 | /popular-movie-nicer.py | c207732295d01f823104ab24c3592707649b22c2 | [] | no_license | aashray18521/Udemy-Spark_Python | b404801aba3bcb9c896a34fcd3679172ef092caf | 13174529199d581598902dadcf07dcb171cadf33 | refs/heads/master | 2020-03-28T10:55:32.160605 | 2018-09-17T06:44:21 | 2018-09-17T06:44:21 | 148,159,774 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | from pyspark import SparkConf, SparkContext
def loadMovieNames():
movieNames = {}
with open("ml-100k/u.item") as f:
for line in f:
fields = line.split('|')
movieNames[int(fields[0])] = fields[1]
return movieNames
conf = SparkConf().setMaster("local").setAppName("nicePopularMovie")
sc = SparkContext(conf = conf)
nameDict = sc.broadcast(loadMovieNames())
rdd = sc.textFile("ml-100k/u.data")
onlyMovieIds = rdd.map(lambda x: (int(x.split()[1]), 1))
countPerMovie = onlyMovieIds.reduceByKey(lambda x,y : x+y)
reverseRdd = countPerMovie.map(lambda (x,y) : (y,x))
sortedMovies = reverseRdd.sortByKey()
sortedMoviesWithNames = sortedMovies.map(lambda (count, movie) : (nameDict.value[movie], count))
results = sortedMoviesWithNames.collect()
for result in results:
print(result)
| [
"noreply@github.com"
] | aashray18521.noreply@github.com |
ce5dade7d36a431e3ec81dade64648f6c22eca35 | 7832e7dc8f1583471af9c08806ce7f1117cd228a | /aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/RunClusterServiceActionRequest.py | eb1c959505c70fd4e06aa43388665c4d9f9b06a3 | [
"Apache-2.0"
] | permissive | dianplus/aliyun-openapi-python-sdk | d6494850ddf0e66aaf04607322f353df32959725 | 6edf1ed02994245dae1d1b89edc6cce7caa51622 | refs/heads/master | 2023-04-08T11:35:36.216404 | 2017-11-02T12:01:15 | 2017-11-02T12:01:15 | 109,257,597 | 0 | 0 | NOASSERTION | 2023-03-23T17:59:30 | 2017-11-02T11:44:27 | Python | UTF-8 | Python | false | false | 3,508 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class RunClusterServiceActionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'RunClusterServiceAction')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_HostIdList(self):
return self.get_query_params().get('HostIdList')
def set_HostIdList(self,HostIdList):
self.add_query_param('HostIdList',HostIdList)
def get_ServiceName(self):
return self.get_query_params().get('ServiceName')
def set_ServiceName(self,ServiceName):
self.add_query_param('ServiceName',ServiceName)
def get_ServiceActionName(self):
return self.get_query_params().get('ServiceActionName')
def set_ServiceActionName(self,ServiceActionName):
self.add_query_param('ServiceActionName',ServiceActionName)
def get_CustomCommand(self):
return self.get_query_params().get('CustomCommand')
def set_CustomCommand(self,CustomCommand):
self.add_query_param('CustomCommand',CustomCommand)
def get_ComponentNameList(self):
return self.get_query_params().get('ComponentNameList')
def set_ComponentNameList(self,ComponentNameList):
self.add_query_param('ComponentNameList',ComponentNameList)
def get_Comment(self):
return self.get_query_params().get('Comment')
def set_Comment(self,Comment):
self.add_query_param('Comment',Comment)
def get_IsRolling(self):
return self.get_query_params().get('IsRolling')
def set_IsRolling(self,IsRolling):
self.add_query_param('IsRolling',IsRolling)
def get_NodeCountPerBatch(self):
return self.get_query_params().get('NodeCountPerBatch')
def set_NodeCountPerBatch(self,NodeCountPerBatch):
self.add_query_param('NodeCountPerBatch',NodeCountPerBatch)
def get_TotlerateFailCount(self):
return self.get_query_params().get('TotlerateFailCount')
def set_TotlerateFailCount(self,TotlerateFailCount):
self.add_query_param('TotlerateFailCount',TotlerateFailCount)
def get_OnlyRestartStaleConfigNodes(self):
return self.get_query_params().get('OnlyRestartStaleConfigNodes')
def set_OnlyRestartStaleConfigNodes(self,OnlyRestartStaleConfigNodes):
self.add_query_param('OnlyRestartStaleConfigNodes',OnlyRestartStaleConfigNodes)
def get_TurnOnMaintenanceMode(self):
return self.get_query_params().get('TurnOnMaintenanceMode')
def set_TurnOnMaintenanceMode(self,TurnOnMaintenanceMode):
self.add_query_param('TurnOnMaintenanceMode',TurnOnMaintenanceMode) | [
"haowei.yao@alibaba-inc.com"
] | haowei.yao@alibaba-inc.com |
1d0914e57305a093924e35492cdf55af4109608e | 5805bf03876af7f32e75fbc467b257768676ce42 | /compadre-appengine/compadre-server/run.py | 5b81242d833dbdb0fafb4183df8d100cd0b475bd | [] | no_license | mtaziz/Compadre | 53ce8c717674886cadb16c81f2d87bdc7a11fe78 | 83473b404597dcf7140e8a435ac4c1e4d894020f | refs/heads/master | 2021-01-17T15:54:39.716474 | 2016-06-20T16:57:01 | 2016-06-20T16:57:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | import os
import sys
sys.path.insert(1, os.path.join(os.path.abspath('.'), 'lib'))
import application
| [
"yonatano@gmail.com"
] | yonatano@gmail.com |
59394e931356467d27d5d2fb4d1f788d461283b7 | 026c31969fe43e3d14029a8dbf7b4e3e435a06d2 | /users/tests/test_admin.py | 7c8be906cd6030b7fe983ed5bab088036da8a00c | [
"MIT"
] | permissive | victorfsf/github-monitor | db83c9f2ad0e725f6adbfa2c56c299aeadd5bc3a | 7192827d44ca616c0914864770f8f7910cbe55b3 | refs/heads/master | 2020-03-08T00:44:30.341022 | 2018-04-09T23:43:12 | 2018-04-09T23:43:12 | 127,812,813 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | from django.test import TestCase
from django.test.client import RequestFactory
from model_mommy import mommy
from common.site import GithubMonitorAdminSite
from users.admin import GithubUserAdmin
class TestGithubUserAdmin(TestCase):
def setUp(self):
factory = RequestFactory()
self.user = mommy.make('users.User', username='test_username')
self.github_user = mommy.make('users.GithubUser', user=self.user)
self.admin = GithubUserAdmin(
self.github_user, GithubMonitorAdminSite()
)
self.request = factory.get('/')
def test_get_username(self):
expected = self.user.username
username = self.admin.get_username(self.github_user)
self.assertEqual(expected, username)
def tearDown(self):
self.github_user.delete()
self.user.delete()
| [
"victorfsf.dev@gmail.com"
] | victorfsf.dev@gmail.com |
b09fdc0bc43f30b2b51c8893afcf2024ef86d619 | 0009c76a25c89a0d61d3bc9e10071da58bdfaa5a | /py/ztools/mtp/mtp_tools.py | 0496f5ae683026478bdcc98faf9cc9c89b3e14a9 | [
"MIT"
] | permissive | julesontheroad/NSC_BUILDER | 84054e70a80b572088b0806a47ceb398302451b5 | e9083e83383281bdd9e167d3141163dcc56b6710 | refs/heads/master | 2023-07-05T05:23:17.114363 | 2021-11-15T19:34:47 | 2021-11-15T19:34:47 | 149,040,416 | 1,249 | 143 | MIT | 2022-12-15T03:19:33 | 2018-09-16T22:18:01 | Python | UTF-8 | Python | false | false | 8,313 | py | import os
from listmanager import folder_to_list
from listmanager import parsetags
from pathlib import Path
import Print
import shutil
from mtp.wpd import is_switch_connected
import sys
import subprocess
from python_pick import pick
from python_pick import Picker
squirrel_dir=os.path.abspath(os.curdir)
NSCB_dir=os.path.abspath('../'+(os.curdir))
if os.path.exists(os.path.join(squirrel_dir,'ztools')):
NSCB_dir=squirrel_dir
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
ztools_dir=os.path.join(NSCB_dir,'ztools')
squirrel_dir=ztools_dir
elif os.path.exists(os.path.join(NSCB_dir,'ztools')):
squirrel_dir=squirrel_dir
ztools_dir=os.path.join(NSCB_dir, 'ztools')
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
else:
ztools_dir=os.path.join(NSCB_dir, 'ztools')
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
testroute1=os.path.join(squirrel_dir, "squirrel.py")
testroute2=os.path.join(squirrel_dir, "squirrel.exe")
urlconfig=os.path.join(zconfig_dir,'NUT_DB_URL.txt')
isExe=False
if os.path.exists(testroute1):
squirrel=testroute1
isExe=False
elif os.path.exists(testroute2):
squirrel=testroute2
isExe=True
bin_folder=os.path.join(ztools_dir, 'bin')
nscb_mtp=os.path.join(bin_folder, 'nscb_mtp.exe')
cachefolder=os.path.join(ztools_dir, '_mtp_cache_')
if not os.path.exists(cachefolder):
os.makedirs(cachefolder)
games_installed_cache=os.path.join(cachefolder, 'games_installed.txt')
autoloader_files_cache=os.path.join(cachefolder, 'autoloader_files.txt')
sd_xci_cache=os.path.join(cachefolder, 'sd_xci.txt')
valid_saves_cache=os.path.join(cachefolder, 'valid_saves.txt')
mtp_source_lib=os.path.join(zconfig_dir,'mtp_source_libraries.txt')
mtp_internal_lib=os.path.join(zconfig_dir,'mtp_SD_libraries.txt')
storage_info=os.path.join(cachefolder, 'storage.csv')
download_lib_file = os.path.join(zconfig_dir, 'mtp_download_libraries.txt')
sx_autoloader_db=os.path.join(zconfig_dir, 'sx_autoloader_db')
def gen_sx_autoloader_files_menu():
print('***********************************************')
print('SX AUTOLOADER GENERATE FILES FROM HDD OR FOLDER')
print('***********************************************')
print('')
folder=input("Input a drive path: ")
if not os.path.exists(folder):
sys.exit("Can't find location")
title = 'Target for autoloader files: '
options = ['HDD','SD']
selected = pick(options, title, min_selection_count=1)
if selected[0]=='HDD':
type='hdd'
else:
type='sd'
title = 'Push files after generation?: '
options = ['YES','NO']
selected = pick(options, title, min_selection_count=1)
if selected[0]=='YES':
push=True
else:
push=False
title = "Ensure files can't colide after transfer?: "
options = ['YES','NO']
selected = pick(options, title, min_selection_count=1)
if selected[0]=='YES':
no_colide=True
else:
no_colide=False
gen_sx_autoloader_files(folder,type=type,push=push,no_colide=no_colide)
def gen_sx_autoloader_files(folder,type='hdd',push=False,no_colide=False):
gamelist=folder_to_list(folder,['xci','xc0'])
if type=='hdd':
SD_folder=os.path.join(sx_autoloader_db, 'hdd')
else:
SD_folder=os.path.join(sx_autoloader_db, 'sd')
if not os.path.exists(sx_autoloader_db):
os.makedirs(sx_autoloader_db)
if not os.path.exists(SD_folder):
os.makedirs(SD_folder)
for f in os.listdir(SD_folder):
fp = os.path.join(SD_folder, f)
try:
shutil.rmtree(fp)
except OSError:
os.remove(fp)
print(' * Generating autoloader files')
try:
for g in gamelist:
try:
fileid,fileversion,cctag,nG,nU,nD,baseid=parsetags(g)
if fileid=='unknown':
continue
tfile=os.path.join(SD_folder,fileid)
fileparts=Path(g).parts
if type=='hdd':
new_path=g.replace(fileparts[0],'"usbhdd:/')
else:
new_path=g.replace(fileparts[0],'"sdmc:/')
new_path=new_path.replace('\\','/')
with open(tfile,'w') as text_file:
text_file.write(new_path)
except:pass
print(' DONE')
if push==True:
if not is_switch_connected():
sys.exit("Can't push files. Switch device isn't connected.\nCheck if mtp responder is running!!!")
print(' * Pushing autoloader files')
if type=='hdd':
destiny="1: External SD Card\\sxos\\titles\\00FF0012656180FF\\cach\\hdd"
else:
destiny="1: External SD Card\\sxos\\titles\\00FF0012656180FF\\cach\\sd"
process=subprocess.Popen([nscb_mtp,"TransferFolder","-ori",SD_folder,"-dst",destiny,"-fbf","true"])
while process.poll()==None:
if process.poll()!=None:
process.terminate();
if no_colide==True:
cleanup_sx_autoloader_files()
except BaseException as e:
Print.error('Exception: ' + str(e))
pass
def cleanup_sx_autoloader_files():
from mtp_game_manager import retrieve_xci_paths
from mtp_game_manager import get_gamelist
try:
for f in os.listdir(cachefolder):
fp = os.path.join(cachefolder, f)
try:
shutil.rmtree(fp)
except OSError:
os.remove(fp)
except:pass
if not is_switch_connected():
sys.exit("Can't push files. Switch device isn't connected.\nCheck if mtp responder is running!!!")
retrieve_xci_paths()
print(" * Retriving autoloader files in device. Please Wait...")
process=subprocess.Popen([nscb_mtp,"Retrieve_autoloader_files","-tfile",autoloader_files_cache,"-show","false"],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
while process.poll()==None:
if process.poll()!=None:
process.terminate();
if os.path.exists(autoloader_files_cache):
print(" Success")
else:
sys.exit("Autoloader files weren't retrieved properly")
gamelist=get_gamelist(file=sd_xci_cache)
autoloader_list=get_gamelist(file=autoloader_files_cache)
sd_xci_ids=[]
for g in gamelist:
try:
fileid,fileversion,cctag,nG,nU,nD,baseid=parsetags(g)
sd_xci_ids.append(fileid)
except:pass
files_to_remove=[]
for f in autoloader_list:
fileparts=Path(f).parts
if 'sdd' in fileparts and not (fileparts[-1] in sd_xci_ids):
files_to_remove.append(f)
elif 'hdd' in fileparts and (fileparts[-1] in sd_xci_ids):
files_to_remove.append(f)
print(" * The following files will be removed")
for f in files_to_remove:
print(" - "+f)
for f in files_to_remove:
process=subprocess.Popen([nscb_mtp,"DeleteFile","-fp",f])
while process.poll()==None:
if process.poll()!=None:
process.terminate();
def push_sx_autoloader_libraries():
if not is_switch_connected():
sys.exit("Can't push files. Switch device isn't connected.\nCheck if mtp responder is running!!!")
title = "Ensure files can't colide after transfer?: "
options = ['YES','NO']
selected = pick(options, title, min_selection_count=1)
if selected[0]=='YES':
no_colide=True
else:
no_colide=False
print(' * Pushing autoloader files in hdd folder')
HDD_folder=os.path.join(sx_autoloader_db, 'hdd')
destiny="1: External SD Card\\sxos\\titles\\00FF0012656180FF\\cach\\hdd"
process=subprocess.Popen([nscb_mtp,"TransferFolder","-ori",HDD_folder,"-dst",destiny,"-fbf","true"])
while process.poll()==None:
if process.poll()!=None:
process.terminate();
print(' * Pushing autoloader files in SD folder')
SD_folder=os.path.join(sx_autoloader_db, 'sd')
destiny="1: External SD Card\\sxos\\titles\\00FF0012656180FF\\cach\\sd"
process=subprocess.Popen([nscb_mtp,"TransferFolder","-ori",SD_folder,"-dst",destiny,"-fbf","true"])
while process.poll()==None:
if process.poll()!=None:
process.terminate();
if no_colide==True:
cleanup_sx_autoloader_files()
def get_nca_ticket(filepath,nca):
import Fs
from binascii import hexlify as hx, unhexlify as uhx
if filepath.endswith('xci') or filepath.endswith('xcz'):
f = Fs.Xci(filepath)
check=False;titleKey=0
for nspF in f.hfs0:
if str(nspF._path)=="secure":
for file in nspF:
if (file._path).endswith('.tik'):
titleKey = file.getTitleKeyBlock().to_bytes(16, byteorder='big')
check=f.verify_key(nca,str(file._path))
if check==True:
break
return check,titleKey
elif filepath.endswith('nsp') or filepath.endswith('nsz'):
f = Fs.Nsp(filepath)
check=False;titleKey=0
for file in f:
if (file._path).endswith('.tik'):
titleKey = file.getTitleKeyBlock().to_bytes(16, byteorder='big')
check=f.verify_key(nca,str(file._path))
if check==True:
break
return check,titleKey | [
"42461174+julesontheroad@users.noreply.github.com"
] | 42461174+julesontheroad@users.noreply.github.com |
84eb65886a58255c2d3c09f27ece27c9be43b5bf | d55deb7b26277a647aff5887cdbe65f002035ac7 | /jobs/models.py | b2cee414ec2ee17dd8d1ae2de7a8ea7258a12eae | [
"MIT"
] | permissive | cyndi088/recruitment | 0af95ef6028c678e49952752e65dd7e58fd9799f | e25e29b8b6724f1dce7b9ed5d9efb409744907c0 | refs/heads/main | 2023-01-22T18:38:55.403025 | 2020-12-03T06:19:16 | 2020-12-03T06:19:16 | 318,094,536 | 0 | 0 | MIT | 2020-12-03T06:19:17 | 2020-12-03T06:13:45 | Python | UTF-8 | Python | false | false | 1,226 | py | from django.db import models
from datetime import datetime
from django.contrib.auth.models import User
# Create your models here.
JobTypes = [
(0, "技术类"),
(1, "产品类"),
(2, "运营类"),
(3, "设计类")
]
Cities = [
(0, "北京"),
(1, "上海"),
(2, "深圳"),
(3, "杭州")
]
class Job(models.Model):
job_type = models.SmallIntegerField(blank=False, choices=JobTypes, verbose_name="职位类别")
job_name = models.CharField(blank=False, max_length=250, verbose_name="职位名称")
job_city = models.SmallIntegerField(blank=False, choices=Cities, verbose_name="工作地点")
job_responsibility = models.TextField(blank=False, max_length=1024, verbose_name="职位职责")
job_requirement = models.TextField(blank=False, max_length=1024, verbose_name="职位要求")
creator = models.ForeignKey(User, verbose_name="创建人", null=True, on_delete=models.SET_NULL)
created_date = models.DateTimeField(verbose_name="创建日期", auto_now_add=True)
modified_date = models.DateTimeField(verbose_name="修改日期", auto_now=True, null=True, blank=True)
class Meta:
verbose_name = '职位'
verbose_name_plural = verbose_name
| [
"cyndi088@163.com"
] | cyndi088@163.com |
e30bf0195bbfc6ed9aa15d5e111172064f3af938 | bc863127e5f44ede4cfe46316ec44ce00cffb2d4 | /config.py | d27612bdaa7edb6acf23a584e67ea2f321b668d9 | [] | no_license | yogeshBsht/FeedbackForm | d33ce048261177badbed07871711da6260b93086 | 8c87505100bda14add08b1bfa3918ffc95a97525 | refs/heads/main | 2023-06-02T15:42:36.984226 | 2021-06-21T17:06:31 | 2021-06-21T17:06:31 | 378,629,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
class Config(object):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = 'SECRET_KEY'
# MAIL_SERVER = os.environ.get('MAIL_SERVER')
# MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25)
# MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None
# MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
# MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
# ADMINS = ['your-email@example.com']
# LANGUAGES = ['en', 'es']
# MS_TRANSLATOR_KEY = os.environ.get('MS_TRANSLATOR_KEY')
# POSTS_PER_PAGE = 25
| [
"ybnsit@gmail.com"
] | ybnsit@gmail.com |
9ef5be1266f315f4969221617ac232fd1647c121 | 1ed17b57788423eb62570020286daf3016749706 | /CountryGroup.py | e88e6d468fe13afca192163b40994c5c0da6a8bc | [] | no_license | yuvapriya/TopCoder | b37749e4afae7fedf5a10881ba2dd2249dc08bc5 | 6dea4cc9bb70ab26ac70a846a2cd46a82061c033 | refs/heads/master | 2021-01-19T06:37:17.735811 | 2015-05-17T21:47:30 | 2015-05-17T21:47:30 | 35,783,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | #Problem Statement: http://community.topcoder.com/stat?c=problem_statement&pm=13687
def countryGroup(arr):
countryGrp = {}
prev = None
for i in range(len(arr)):
val = arr[i]
if( val in countryGrp):
countryGrp[val] +=1
if(val != 1):
if prev != None and prev !=val:
return -1
if countryGrp[val] > val:
return -1
prev = val
else:
countryGrp[val] =1
prev = val
total = 0
for key in countryGrp.keys():
if key == 1:
total+= countryGrp[key]
else:
if (countryGrp[key] != key):
return -1
else:
total +=1
return total
print countryGroup([2,2,3,3,3])
print countryGroup([1,1,1,1,1])
print countryGroup([3,3])
print countryGroup([4,4,4,4,1,1,2,2,3,3,3])
print countryGroup([2,1,2,2,1,2])
| [
"m.yuvapriya@gmail.com"
] | m.yuvapriya@gmail.com |
4d251c34bdbf56b6e283315909d38807e090ff38 | 66746ed38e13b2d069829b1e7d963e2a66808f4e | /37 - Estrutura de repetição WHILE - Cria menu.py | 92ebdc45c4e59ce4ccd23fb7fb34b6fe0a53358f | [] | no_license | leanndropx/px-python-logica-de-programacao | 23ea8064381646623556130c29c7ecec4a845eb1 | 72d852abc8b4f85c0963909aab8e6aa0e9278ec1 | refs/heads/master | 2023-06-17T09:22:47.516634 | 2021-07-15T11:42:19 | 2021-07-15T11:42:19 | 385,781,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | py |
# - DESCREVENDO O DESAFIO
print('37 - Crie um programa que leia DOIS valores e mostre um menu na tela:')
print('1 - somar')
print('2 - multiplicar')
print('3 - maior')
print('4 - novos números')
print('5 - sair do programa')
print()
print()
# - INICIALIZANDO O PROGRAMA
# IMPORTA BIBLIOTECAS
from time import sleep
# 1 - RECEBE DADOS
n1=int(input('digite o primeiro número: '))
n2=int(input('digite o segundo número: '))
print('O que você gostaria de fazer: ')
opcao=0
while opcao!=5:
print('\033[7m',' ','\033[m')
print(''' [ 1 ] Somar
[ 2 ] Multiplicar
[ 3 ] Maior
[ 4 ] Novos números
[ 5 ] Sair do programa''')
print('\033[7m', ' ', '\033[m')
opcao=int(input('escolha a opção: '))
# 2 - MANIPULA E CRIA NOVOS DADOS
if opcao==1:
soma=n1+n2
print('A soma é {}'.format(soma))
elif opcao==2:
multiplicar=n1*n2
print('O produto é {}'.format(multiplicar))
elif opcao==3:
if n1>n2:
maior=n1
else:
maior=n2
print('O maior número é {}'.format(maior))
elif opcao==4:
n1=int(input('digite o primeiro número: '))
n2=int(input('digite o segundo número: '))
elif opcao==5:
print('Finalizando...')
else:
print('opção inválida')
print()
sleep(1)
print('O programa foi encerrado!')
# 3 - DEVOLVE DAODS
| [
"leanndrompeixoto1@gmail.com"
] | leanndrompeixoto1@gmail.com |
3b02f493286c8655b3bc6b8e6da7a3105dcca05f | a3b46d9b89bf7cf984413845db604ec2c98df8b3 | /code.py | 78d1cca2ec8f0d4db535249a5eb54e431c8c5d2a | [] | no_license | asahazmy/Field-prediction | 3c1b3cd1a057457648237db2933f5608dff6a305 | 6b7ff05371e20ac9da9cd54ca74ac606914c7f70 | refs/heads/master | 2023-01-03T06:14:01.879003 | 2020-10-25T13:08:21 | 2020-10-25T13:08:21 | 275,312,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,972 | py | import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import reduce_sum
from tensorflow.keras.backend import pow
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, MaxPool2D, UpSampling2D, Concatenate, Add, Flatten
from tensorflow.keras.losses import binary_crossentropy
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import os
import cv2
#Configurations
load_pretrained_model = False # load a pre-trained model
save_model = True # save the model after training
train_dir = '' # directory of training images
pretrained_model_path = '' # path of pretrained model
model_save_path = '' # path of model to save
epochs = 25
# batch size for training unet
k_size = 3 # kernel size 3x3
val_size = .20 # split of training set between train and validation set
TRAIN_LENGTH = info.splits['train'].num_examples
BATCH_SIZE = 64
BUFFER_SIZE = 1000
STEPS_PER_EPOCH = TRAIN_LENGTH // BATCH_SIZE
'''input data & mask'''
#Normalisasi data
def normalize(input_image, input_mask):
input_image = tf.cast(input_image, tf.float32) / 255.0
input_mask -= 1
return input_image, input_mask
#input data
def load_image_train(datapoint):
input_image = tf.image.resize(datapoint['image'], (256, 256))
input_mask = tf.image.resize(datapoint['segmentation_mask'], ((256, 256))
if tf.random.uniform()> 0.5:
input_image = tf.image.flip_left_right(input_image)
input_mask = tf.image.flip_left_right(input_mask)
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
def load_image_test(datapoint):
input_image = tf.image.resize(datapoint['image'], (128, 128))
input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128))
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
train = dataset['train'].map(load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)
test = dataset['test'].map(load_image_test)
train_dataset = train.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
train_dataset = train_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
test_dataset = test.batch(BATCH_SIZE)
'''checking the data'''
def display(display_list):
plt.figure(figsize=(15, 15))
title = ['Input Image', 'True Mask', 'Predicted Mask']
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i+1)
plt.title(title[i])
plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]))
plt.axis('off')
plt.show()
for image, mask in train.take(4):
sample_image, sample_mask = image, mask
display([sample_image, sample_mask])
'''ResUnet'''
def bn_act(x, act=True):
'batch normalization layer with an optinal activation layer'
x = tf.keras.layers.BatchNormalization()(x)
if act == True:
x = tf.keras.layers.Activation('relu')(x)
return x
def conv_block(x, filters, kernel_size=3, padding='same', strides=1):
'convolutional layer which always uses the batch normalization layer'
conv = bn_act(x)
conv = Conv2D(filters, kernel_size, padding=padding, strides=strides)(conv)
return conv
def stem(x, filters, kernel_size=3, padding='same', strides=1):
conv = Conv2D(filters, kernel_size, padding=padding, strides=strides)(x)
conv = conv_block(conv, filters, kernel_size, padding, strides)
shortcut = Conv2D(filters, kernel_size=1, padding=padding, strides=strides)(x)
shortcut = bn_act(shortcut, act=False)
output = Add()([conv, shortcut])
return output
def residual_block(x, filters, kernel_size=3, padding='same', strides=1):
res = conv_block(x, filters, k_size, padding, strides)
res = conv_block(res, filters, k_size, padding, 1)
shortcut = Conv2D(filters, kernel_size, padding=padding, strides=strides)(x)
shortcut = bn_act(shortcut, act=False)
output = Add()([shortcut, res])
return output
def upsample_concat_block(x, xskip):
u = UpSampling2D((2,2))(x)
c = Concatenate()([u, xskip])
return c
def ResUNet(img_h, img_w):
f = [16, 32, 64, 128, 256]
inputs = Input((img_h, img_w, 1))
## Encoder
e0 = inputs
e1 = stem(e0, f[0])
e2 = residual_block(e1, f[1], strides=2)
e3 = residual_block(e2, f[2], strides=2)
e4 = residual_block(e3, f[3], strides=2)
e5 = residual_block(e4, f[4], strides=2)
## Bridge
b0 = conv_block(e5, f[4], strides=1)
b1 = conv_block(b0, f[4], strides=1)
## Decoder
u1 = upsample_concat_block(b1, e4)
d1 = residual_block(u1, f[4])
u2 = upsample_concat_block(d1, e3)
d2 = residual_block(u2, f[3])
u3 = upsample_concat_block(d2, e2)
d3 = residual_block(u3, f[2])
u4 = upsample_concat_block(d3, e1)
d4 = residual_block(u4, f[1])
outputs = tf.keras.layers.Conv2D(4, (1, 1), padding="same", activation="sigmoid")(d4)
model = tf.keras.models.Model(inputs, outputs)
return model
'''Loss fuction'''
def dsc(y_true, y_pred):
smooth = 1.
y_true_f = Flatten()(y_true)
y_pred_f = Flatten()(y_pred)
intersection = reduce_sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (reduce_sum(y_true_f) + reduce_sum(y_pred_f) + smooth)
return score
def dice_loss(y_true, y_pred):
loss = 1 - dsc(y_true, y_pred)
return loss
def bce_dice_loss(y_true, y_pred):
loss = binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
return loss
# Focal Tversky loss, brought to you by: https://github.com/nabsabraham/focal-tversky-unet
def tversky(y_true, y_pred, smooth=1e-6):
y_true_pos = tf.keras.layers.Flatten()(y_true)
y_pred_pos = tf.keras.layers.Flatten()(y_pred)
true_pos = tf.reduce_sum(y_true_pos * y_pred_pos)
false_neg = tf.reduce_sum(y_true_pos * (1-y_pred_pos))
false_pos = tf.reduce_sum((1-y_true_pos)*y_pred_pos)
alpha = 0.7
return (true_pos + smooth)/(true_pos + alpha*false_neg + (1-alpha)*false_pos + smooth)
def tversky_loss(y_true, y_pred):
return 1 - tversky(y_true,y_pred)
def focal_tversky_loss(y_true,y_pred):
pt_1 = tversky(y_true, y_pred)
gamma = 0.75
return tf.keras.backend.pow((1-pt_1), gamma)
'''Compile & Fit'''
model = ResUNet(img_h=img_h, img_w=img_w)
adam = tf.keras.optimizers.Adam(lr = 0.05, epsilon = 0.1)
model.compile(optimizer=adam, loss=focal_tversky_loss, metrics=[tversky])
if load_pretrained_model:
try:
model.load_weights(pretrained_model_path)
print('pre-trained model loaded!')
except OSError:
print('You need to run the model and load the trained model')
#history = model.fit_generator(generator=training_generator, validation_data=validation_generator, epochs=epochs, verbose=1)
if save_model:
model.save(model_save_path)
| [
"noreply@github.com"
] | asahazmy.noreply@github.com |
fa26cbfd0a0af998227fd24745c6f1b50a85ae34 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03046/s367901013.py | bd60026b909a76c85e533b517ac364ab9dac011a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | from sys import stdout
printn = lambda x: stdout.write(str(x))
inn = lambda : int(input())
inl = lambda: list(map(int, input().split()))
inm = lambda: map(int, input().split())
ins = lambda : input().strip()
DBG = True # and False
BIG = 999999999
R = 10**9 + 7
def ddprint(x):
if DBG:
print(x)
m,k = inm()
if m==0 and k==0:
print('0 0')
exit()
if m==0 and k>0:
print('-1')
exit()
if m==1 and k==0:
print('0 0 1 1')
exit()
if m==1 and k>0:
print('-1')
exit()
if k>=2**m:
print('-1')
exit()
if k==0:
printn('0 0')
for i in range(1,2**m):
printn(' {} {}'.format(i,i))
print('')
exit()
u = [False]*(2**m)
u[k] = True
a = []
cnt = 0
for i in range(1,2**m):
j = i^k
if not u[i] and not u[j]:
a.append(i)
u[j] = True
cnt += 1
if cnt==2**(m-1)-1:
break
s = [x for x in a]
t = [x for x in a]
t.reverse()
s.extend([0,k,0])
s.extend(t)
v = [x^k for x in a]
t = [x for x in v]
t.reverse()
s.extend(v)
s.append(k)
s.extend(t)
printn(s[0])
for i in range(1,len(s)):
printn(' ' + str(s[i]))
print("")
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
354f4e8b11fc7deaae648a37d207d137f827d66e | 0aa87ee2e544f56c17c2dde28a3b3feed08daa14 | /apps/users/urls.py | 6dda1d1373eadae3c77476250c17308642600204 | [] | no_license | yanshigou/mxonline | f2cc44724c1511418953e7e06d04661244b29455 | cebc3295734713846828246fc54dd33f8df14f86 | refs/heads/master | 2022-12-09T12:11:05.734326 | 2022-08-17T10:38:13 | 2022-08-17T10:38:13 | 148,120,737 | 0 | 2 | null | 2022-12-08T02:58:15 | 2018-09-10T08:06:10 | Python | UTF-8 | Python | false | false | 1,309 | py | # -*- coding: utf-8 -*-
__author__ = 'dzt'
__date__ = '2018/12/21 23:48'
from django.conf.urls import url
from .views import UserInfoView, UploadImageView, UpdatePwdView, SendEmailCodeView, UpdateEmailView, MyCourses
from .views import MyFavOrgView, MyFavTeacherView, MyFavCourseView, MyMessageView
urlpatterns = [
# 用户信息
url(r'^info/$', UserInfoView.as_view(), name='user_info'),
# 用户头像上传
url(r'^image/upload/$', UploadImageView.as_view(), name='image_upload'),
# 用户个人中心修改密码
url(r'^update/pwd/$', UpdatePwdView.as_view(), name='update_pwd'),
# 发送邮箱验证码
url(r'^sendemail_code/$', SendEmailCodeView.as_view(), name='sendemail_code'),
# 修改邮箱
url(r'^update_email/$', UpdateEmailView.as_view(), name='update_email'),
# 我的教程
url(r'^mycourses/$', MyCourses.as_view(), name='mycourses'),
# 我的收藏 直播机构
url(r'^myfav/org/$', MyFavOrgView.as_view(), name='myfav_org'),
# 我的收藏 主播
url(r'^myfav/teacher/$', MyFavTeacherView.as_view(), name='myfav_teacher'),
# 我的收藏 教程
url(r'^myfav/course/$', MyFavCourseView.as_view(), name='myfav_course'),
# 我的消息
url(r'^mymessage/$', MyMessageView.as_view(), name='mymessage'),
] | [
"569578851@qq.com"
] | 569578851@qq.com |
506ab3ede97c112af86c4a23956ee39a25c9aecd | 83b1a267809c08a57a3bb16c103d71539502a650 | /job/migrations/0011_apply_created_at.py | c9ebca4b68d4fe3dc9d8d3052bdac004ee5816f8 | [] | no_license | rimatechcampus/django-jobboard-project- | c66933295b4692c7d3cb055dcf0cbaef80424b38 | 8823e1e7db011a4fbaa0fc87f1810bcd5dab08c6 | refs/heads/master | 2022-11-20T16:40:56.495550 | 2020-07-19T16:52:13 | 2020-07-19T16:52:13 | 279,794,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # Generated by Django 3.0.8 on 2020-07-18 08:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job', '0010_apply_job'),
]
operations = [
migrations.AddField(
model_name='apply',
name='created_at',
field=models.DateTimeField(auto_now=True),
),
]
| [
"riyamtechcampus@gmail.com"
] | riyamtechcampus@gmail.com |
3412b6740ab16c481792133ad7b3581fee11316c | 604756ba3da355fffb1a1cf4b882441de2d75184 | /app/util/py2mongo.py | d6d181192ddcc862daecdedcade7e3838cf2a87d | [] | no_license | gowthamlabs/python-rest-ml | 4e93f64019e28f4436b4c634d275e98b70c98939 | 3aa0a1b6fddd52037bfcdb065a9ae63105fd9f6c | refs/heads/master | 2020-07-29T01:59:28.579650 | 2019-09-30T04:46:39 | 2019-09-30T04:46:39 | 209,625,473 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | from pymongo import MongoClient
# pprint library is used to make the output look more pretty
from pprint import pprint
# connect to MongoDB, change the << MONGODB URL >> to reflect your own connection string
# client = MongoClient("localhost:27017") --> this also works
try:
client = MongoClient(port=27017)
except Exception as inst:
print("Unexpected error in 8 :", "8: "+inst)
# Set the db object to point to the myapp database
db=client.myapp
# Showcasing the count() method of find, count the total number of 5 ratings
print('The number of products available:')
# fivestarcount = db.reviews.find({'rating': 5}).count()
#productsCount = db.products.find().count();
#print(productsCount)
def productCount():
try:
productsCount = db.products.find().count()
return str(productsCount);
except Exception as inst:
print("Unexpected error in 23:", "23: "+inst)
return str(inst);
| [
"gowtham.venugopalan@cognizant.com"
] | gowtham.venugopalan@cognizant.com |
a25c70b086e30d5453a6b2028947b60a2489d0ec | 333b2e1284be6ea06a9989bcc76fd296f5c4f0a4 | /modules/study.py | 7aff8c48c34019e170d78e05afffc4ecb7954e76 | [] | no_license | luomeng007/MyLife | 567df155a30857e2c5f03049611d83eb0a847c02 | 76447fdfeaa83d7b77964560d56c67ce2cd36905 | refs/heads/main | 2023-01-20T14:17:30.613718 | 2020-11-29T10:46:26 | 2020-11-29T10:46:26 | 309,741,680 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,352 | py | # -*- coding: utf-8 -*-
import os
import time
import speech
class Study:
def __init__(self):
while True:
print("请您选择,提示:请输入序号1或者2")
print("1. 学习30分钟")
print("2. 学习60分钟")
self.choice = input("您的决定: ")
print("")
if self.choice == "1":
self.total_time = 30 * 60
break
elif self.choice == "2":
self.total_time = 60 * 60
break
else:
print("您的输入值有误,请重新输入!提示:输入数字1或者2")
continue
self.start_time = time.time()
self.flag = True
if not os.path.exists("./time_data_study.txt"):
self.time_total_study = 0
else:
with open("./time_data_study.txt", "r") as f:
time_data = f.readline()
self.time_total_study = float(time_data)
# judge whether the total time reaches 8 hours
if self.time_total_study >= 8:
print("今天学习时间太久了,请做点儿别的事情吧!")
print("")
self.flag = False
if self.choice == "2" and self.time_total_study == 7.5:
print("今日剩余学习时间30分钟,请重新选择")
print("")
self.flag = False
def main_program(self):
if self.flag:
self.start_learning()
self.update_data()
def start_learning(self):
print("开始学习!")
speech.say("los geht's")
while round(time.time() - self.start_time) != self.total_time:
# 这里可以加入一些语音互动
pass
speech.say("fertig!")
print("学习完成!")
if self.choice == "1":
self.time_total_study += 0.5
if self.choice == "2":
self.time_total_study += 1
def update_data(self):
with open("./time_data_study.txt", "w+") as f:
f.write(str(self.time_total_study) + '\n')
if __name__ == "__main__":
# ML: My Life
s = Study()
s.main_program() | [
"noreply@github.com"
] | luomeng007.noreply@github.com |
dc99e0b0e9ab6f25c323a84c139ce0ec4d9fcdeb | bcd33ba045b68fe6fba6f7a3a8fd95124106ac16 | /tests/test_dates.py | d2c3b3422cc62b44cc924dd40f9e617529822d61 | [
"MIT"
] | permissive | bfontaine/Romme | 43d9ba2f6cd09f7b24f8916b121854521009cdd0 | affdfb23a6bb882c17da95ec3767710d5bebd59a | refs/heads/master | 2021-03-27T12:29:13.329232 | 2017-06-10T19:42:30 | 2017-06-10T19:42:30 | 93,895,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | # -*- coding: UTF-8 -*-
import unittest
from romme.dates import RepublicanDate
class TestDates(unittest.TestCase):
def test_str(self):
rd = RepublicanDate(1, 1, 1)
self.assertEqual("1 Vendémiaire, an I", str(rd))
| [
"b@ptistefontaine.fr"
] | b@ptistefontaine.fr |
91f7b4d2efaf48ed26bfcc96e2670ac062a664fe | 6515c886cc420539bed05b2250c76e1c6974e5da | /models/mxnet_resnet_50.py | 708dbb07c13c01468c1d3fe4962f17ca8206bfd6 | [] | no_license | yuanmengzhixing/pytorch_deep_metric_learning | a320fd4e8863b9b8c3768b61e46027ccfc2077ee | b57621355a49af89573447c72685694043548434 | refs/heads/master | 2020-03-22T23:10:11.622231 | 2018-03-11T08:02:56 | 2018-03-11T08:02:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,697 | py | import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
__weights_dict = dict()
pre_trained_path = '/home/zhengxiawu/project/pytorch_deep_metric_learning/pretrained_models/kit_pytorch.npy'
#pre_trained_path = '/home/zhengxiawu/deep_learning/model/mxnet_2_resnet/mx2pt_resnet_50.npy'
#pre_trained_path = '/home/zhengxiawu/project/pytorch_deep_metric_learning/pretrained_models/resnet_50.npy'
pre_trained_path = '/home/zhengxiawu/deep_learning/model/mxnet_2_resnet/resnet_50_pytorch.npy'
def load_weights():
try:
weights_dict = np.load(pre_trained_path).item()
except:
weights_dict = np.load(pre_trained_path, encoding='bytes').item()
return weights_dict
class mxnet_resnet_50(nn.Module):
def __init__(self, **kwargs):
super(mxnet_resnet_50, self).__init__()
num_class = kwargs['num_class']
if kwargs['pretrain']:
global __weights_dict
__weights_dict = load_weights()
self.conv1 = self.__conv(2, name='conv1', in_channels=3, out_channels=64, kernel_size=(7L, 7L), stride=(2L, 2L),
groups=1, bias=True)
self.bn_conv1 = self.__batch_normalization(2, 'bn_conv1', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2a_branch1 = self.__conv(2, name='res2a_branch1', in_channels=64, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.res2a_branch2a = self.__conv(2, name='res2a_branch2a', in_channels=64, out_channels=64,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn2a_branch1 = self.__batch_normalization(2, 'bn2a_branch1', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.bn2a_branch2a = self.__batch_normalization(2, 'bn2a_branch2a', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2a_branch2b = self.__conv(2, name='res2a_branch2b', in_channels=64, out_channels=64,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn2a_branch2b = self.__batch_normalization(2, 'bn2a_branch2b', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2a_branch2c = self.__conv(2, name='res2a_branch2c', in_channels=64, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn2a_branch2c = self.__batch_normalization(2, 'bn2a_branch2c', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2b_branch2a = self.__conv(2, name='res2b_branch2a', in_channels=256, out_channels=64,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn2b_branch2a = self.__batch_normalization(2, 'bn2b_branch2a', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2b_branch2b = self.__conv(2, name='res2b_branch2b', in_channels=64, out_channels=64,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn2b_branch2b = self.__batch_normalization(2, 'bn2b_branch2b', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2b_branch2c = self.__conv(2, name='res2b_branch2c', in_channels=64, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn2b_branch2c = self.__batch_normalization(2, 'bn2b_branch2c', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2c_branch2a = self.__conv(2, name='res2c_branch2a', in_channels=256, out_channels=64,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn2c_branch2a = self.__batch_normalization(2, 'bn2c_branch2a', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2c_branch2b = self.__conv(2, name='res2c_branch2b', in_channels=64, out_channels=64,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn2c_branch2b = self.__batch_normalization(2, 'bn2c_branch2b', num_features=64, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res2c_branch2c = self.__conv(2, name='res2c_branch2c', in_channels=64, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn2c_branch2c = self.__batch_normalization(2, 'bn2c_branch2c', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3a_branch1 = self.__conv(2, name='res3a_branch1', in_channels=256, out_channels=512,
kernel_size=(1L, 1L), stride=(2L, 2L), groups=1, bias=False)
self.res3a_branch2a = self.__conv(2, name='res3a_branch2a', in_channels=256, out_channels=128,
kernel_size=(1L, 1L), stride=(2L, 2L), groups=1, bias=False)
self.bn3a_branch1 = self.__batch_normalization(2, 'bn3a_branch1', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.bn3a_branch2a = self.__batch_normalization(2, 'bn3a_branch2a', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3a_branch2b = self.__conv(2, name='res3a_branch2b', in_channels=128, out_channels=128,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn3a_branch2b = self.__batch_normalization(2, 'bn3a_branch2b', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3a_branch2c = self.__conv(2, name='res3a_branch2c', in_channels=128, out_channels=512,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3a_branch2c = self.__batch_normalization(2, 'bn3a_branch2c', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3b_branch2a = self.__conv(2, name='res3b_branch2a', in_channels=512, out_channels=128,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3b_branch2a = self.__batch_normalization(2, 'bn3b_branch2a', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3b_branch2b = self.__conv(2, name='res3b_branch2b', in_channels=128, out_channels=128,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn3b_branch2b = self.__batch_normalization(2, 'bn3b_branch2b', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3b_branch2c = self.__conv(2, name='res3b_branch2c', in_channels=128, out_channels=512,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3b_branch2c = self.__batch_normalization(2, 'bn3b_branch2c', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3c_branch2a = self.__conv(2, name='res3c_branch2a', in_channels=512, out_channels=128,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3c_branch2a = self.__batch_normalization(2, 'bn3c_branch2a', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3c_branch2b = self.__conv(2, name='res3c_branch2b', in_channels=128, out_channels=128,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn3c_branch2b = self.__batch_normalization(2, 'bn3c_branch2b', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3c_branch2c = self.__conv(2, name='res3c_branch2c', in_channels=128, out_channels=512,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3c_branch2c = self.__batch_normalization(2, 'bn3c_branch2c', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3d_branch2a = self.__conv(2, name='res3d_branch2a', in_channels=512, out_channels=128,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3d_branch2a = self.__batch_normalization(2, 'bn3d_branch2a', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3d_branch2b = self.__conv(2, name='res3d_branch2b', in_channels=128, out_channels=128,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn3d_branch2b = self.__batch_normalization(2, 'bn3d_branch2b', num_features=128, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res3d_branch2c = self.__conv(2, name='res3d_branch2c', in_channels=128, out_channels=512,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn3d_branch2c = self.__batch_normalization(2, 'bn3d_branch2c', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4a_branch1 = self.__conv(2, name='res4a_branch1', in_channels=512, out_channels=1024,
kernel_size=(1L, 1L), stride=(2L, 2L), groups=1, bias=False)
self.res4a_branch2a = self.__conv(2, name='res4a_branch2a', in_channels=512, out_channels=256,
kernel_size=(1L, 1L), stride=(2L, 2L), groups=1, bias=False)
self.bn4a_branch1 = self.__batch_normalization(2, 'bn4a_branch1', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.bn4a_branch2a = self.__batch_normalization(2, 'bn4a_branch2a', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4a_branch2b = self.__conv(2, name='res4a_branch2b', in_channels=256, out_channels=256,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn4a_branch2b = self.__batch_normalization(2, 'bn4a_branch2b', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4a_branch2c = self.__conv(2, name='res4a_branch2c', in_channels=256, out_channels=1024,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4a_branch2c = self.__batch_normalization(2, 'bn4a_branch2c', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4b_branch2a = self.__conv(2, name='res4b_branch2a', in_channels=1024, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4b_branch2a = self.__batch_normalization(2, 'bn4b_branch2a', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4b_branch2b = self.__conv(2, name='res4b_branch2b', in_channels=256, out_channels=256,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn4b_branch2b = self.__batch_normalization(2, 'bn4b_branch2b', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4b_branch2c = self.__conv(2, name='res4b_branch2c', in_channels=256, out_channels=1024,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4b_branch2c = self.__batch_normalization(2, 'bn4b_branch2c', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4c_branch2a = self.__conv(2, name='res4c_branch2a', in_channels=1024, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4c_branch2a = self.__batch_normalization(2, 'bn4c_branch2a', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4c_branch2b = self.__conv(2, name='res4c_branch2b', in_channels=256, out_channels=256,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn4c_branch2b = self.__batch_normalization(2, 'bn4c_branch2b', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4c_branch2c = self.__conv(2, name='res4c_branch2c', in_channels=256, out_channels=1024,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4c_branch2c = self.__batch_normalization(2, 'bn4c_branch2c', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4d_branch2a = self.__conv(2, name='res4d_branch2a', in_channels=1024, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4d_branch2a = self.__batch_normalization(2, 'bn4d_branch2a', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4d_branch2b = self.__conv(2, name='res4d_branch2b', in_channels=256, out_channels=256,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn4d_branch2b = self.__batch_normalization(2, 'bn4d_branch2b', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4d_branch2c = self.__conv(2, name='res4d_branch2c', in_channels=256, out_channels=1024,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4d_branch2c = self.__batch_normalization(2, 'bn4d_branch2c', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4e_branch2a = self.__conv(2, name='res4e_branch2a', in_channels=1024, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4e_branch2a = self.__batch_normalization(2, 'bn4e_branch2a', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4e_branch2b = self.__conv(2, name='res4e_branch2b', in_channels=256, out_channels=256,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn4e_branch2b = self.__batch_normalization(2, 'bn4e_branch2b', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4e_branch2c = self.__conv(2, name='res4e_branch2c', in_channels=256, out_channels=1024,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4e_branch2c = self.__batch_normalization(2, 'bn4e_branch2c', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4f_branch2a = self.__conv(2, name='res4f_branch2a', in_channels=1024, out_channels=256,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4f_branch2a = self.__batch_normalization(2, 'bn4f_branch2a', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4f_branch2b = self.__conv(2, name='res4f_branch2b', in_channels=256, out_channels=256,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn4f_branch2b = self.__batch_normalization(2, 'bn4f_branch2b', num_features=256, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res4f_branch2c = self.__conv(2, name='res4f_branch2c', in_channels=256, out_channels=1024,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn4f_branch2c = self.__batch_normalization(2, 'bn4f_branch2c', num_features=1024, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5a_branch1 = self.__conv(2, name='res5a_branch1', in_channels=1024, out_channels=2048,
kernel_size=(1L, 1L), stride=(2L, 2L), groups=1, bias=False)
self.res5a_branch2a = self.__conv(2, name='res5a_branch2a', in_channels=1024, out_channels=512,
kernel_size=(1L, 1L), stride=(2L, 2L), groups=1, bias=False)
self.bn5a_branch1 = self.__batch_normalization(2, 'bn5a_branch1', num_features=2048, eps=9.99999974738e-05,
momentum=0.899999976158)
self.bn5a_branch2a = self.__batch_normalization(2, 'bn5a_branch2a', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5a_branch2b = self.__conv(2, name='res5a_branch2b', in_channels=512, out_channels=512,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn5a_branch2b = self.__batch_normalization(2, 'bn5a_branch2b', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5a_branch2c = self.__conv(2, name='res5a_branch2c', in_channels=512, out_channels=2048,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn5a_branch2c = self.__batch_normalization(2, 'bn5a_branch2c', num_features=2048, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5b_branch2a = self.__conv(2, name='res5b_branch2a', in_channels=2048, out_channels=512,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn5b_branch2a = self.__batch_normalization(2, 'bn5b_branch2a', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5b_branch2b = self.__conv(2, name='res5b_branch2b', in_channels=512, out_channels=512,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn5b_branch2b = self.__batch_normalization(2, 'bn5b_branch2b', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5b_branch2c = self.__conv(2, name='res5b_branch2c', in_channels=512, out_channels=2048,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn5b_branch2c = self.__batch_normalization(2, 'bn5b_branch2c', num_features=2048, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5c_branch2a = self.__conv(2, name='res5c_branch2a', in_channels=2048, out_channels=512,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn5c_branch2a = self.__batch_normalization(2, 'bn5c_branch2a', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5c_branch2b = self.__conv(2, name='res5c_branch2b', in_channels=512, out_channels=512,
kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.bn5c_branch2b = self.__batch_normalization(2, 'bn5c_branch2b', num_features=512, eps=9.99999974738e-05,
momentum=0.899999976158)
self.res5c_branch2c = self.__conv(2, name='res5c_branch2c', in_channels=512, out_channels=2048,
kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.bn5c_branch2c = self.__batch_normalization(2, 'bn5c_branch2c', num_features=2048, eps=9.99999974738e-05,
momentum=0.899999976158)
self.class_fc = nn.Linear(4096, num_class)
nn.init.xavier_uniform(self.class_fc._parameters['weight'],gain=0.624)
nn.init.constant(self.class_fc._parameters['weight'],0)
def forward(self, x, **kwargs):
conv1_pad = F.pad(x, (3L, 3L, 3L, 3L))
conv1 = self.conv1(conv1_pad)
# conv1_numpy = conv1.data.cpu().numpy()
# param_numpy = self.conv1._parameters['weight'].data.cpu().numpy()
bn_conv1 = self.bn_conv1(conv1)
conv1_relu = F.relu(bn_conv1)
pool1 = F.max_pool2d(conv1_relu, kernel_size=(3L, 3L), stride=(2L, 2L))
res2a_branch1 = self.res2a_branch1(pool1)
res2a_branch2a = self.res2a_branch2a(pool1)
bn2a_branch1 = self.bn2a_branch1(res2a_branch1)
bn2a_branch2a = self.bn2a_branch2a(res2a_branch2a)
res2a_branch2a_relu = F.relu(bn2a_branch2a)
res2a_branch2b_pad = F.pad(res2a_branch2a_relu, (1L, 1L, 1L, 1L))
res2a_branch2b = self.res2a_branch2b(res2a_branch2b_pad)
bn2a_branch2b = self.bn2a_branch2b(res2a_branch2b)
res2a_branch2b_relu = F.relu(bn2a_branch2b)
res2a_branch2c = self.res2a_branch2c(res2a_branch2b_relu)
bn2a_branch2c = self.bn2a_branch2c(res2a_branch2c)
res2a = bn2a_branch1 + bn2a_branch2c
res2a_relu = F.relu(res2a)
res2b_branch2a = self.res2b_branch2a(res2a_relu)
bn2b_branch2a = self.bn2b_branch2a(res2b_branch2a)
res2b_branch2a_relu = F.relu(bn2b_branch2a)
res2b_branch2b_pad = F.pad(res2b_branch2a_relu, (1L, 1L, 1L, 1L))
res2b_branch2b = self.res2b_branch2b(res2b_branch2b_pad)
bn2b_branch2b = self.bn2b_branch2b(res2b_branch2b)
res2b_branch2b_relu = F.relu(bn2b_branch2b)
res2b_branch2c = self.res2b_branch2c(res2b_branch2b_relu)
bn2b_branch2c = self.bn2b_branch2c(res2b_branch2c)
res2b = res2a_relu + bn2b_branch2c
res2b_relu = F.relu(res2b)
res2c_branch2a = self.res2c_branch2a(res2b_relu)
bn2c_branch2a = self.bn2c_branch2a(res2c_branch2a)
res2c_branch2a_relu = F.relu(bn2c_branch2a)
res2c_branch2b_pad = F.pad(res2c_branch2a_relu, (1L, 1L, 1L, 1L))
res2c_branch2b = self.res2c_branch2b(res2c_branch2b_pad)
bn2c_branch2b = self.bn2c_branch2b(res2c_branch2b)
res2c_branch2b_relu = F.relu(bn2c_branch2b)
res2c_branch2c = self.res2c_branch2c(res2c_branch2b_relu)
bn2c_branch2c = self.bn2c_branch2c(res2c_branch2c)
res2c = res2b_relu + bn2c_branch2c
res2c_relu = F.relu(res2c)
res3a_branch1 = self.res3a_branch1(res2c_relu)
res3a_branch2a = self.res3a_branch2a(res2c_relu)
bn3a_branch1 = self.bn3a_branch1(res3a_branch1)
bn3a_branch2a = self.bn3a_branch2a(res3a_branch2a)
res3a_branch2a_relu = F.relu(bn3a_branch2a)
res3a_branch2b_pad = F.pad(res3a_branch2a_relu, (1L, 1L, 1L, 1L))
res3a_branch2b = self.res3a_branch2b(res3a_branch2b_pad)
bn3a_branch2b = self.bn3a_branch2b(res3a_branch2b)
res3a_branch2b_relu = F.relu(bn3a_branch2b)
res3a_branch2c = self.res3a_branch2c(res3a_branch2b_relu)
bn3a_branch2c = self.bn3a_branch2c(res3a_branch2c)
res3a = bn3a_branch1 + bn3a_branch2c
res3a_relu = F.relu(res3a)
res3b_branch2a = self.res3b_branch2a(res3a_relu)
bn3b_branch2a = self.bn3b_branch2a(res3b_branch2a)
res3b_branch2a_relu = F.relu(bn3b_branch2a)
res3b_branch2b_pad = F.pad(res3b_branch2a_relu, (1L, 1L, 1L, 1L))
res3b_branch2b = self.res3b_branch2b(res3b_branch2b_pad)
bn3b_branch2b = self.bn3b_branch2b(res3b_branch2b)
res3b_branch2b_relu = F.relu(bn3b_branch2b)
res3b_branch2c = self.res3b_branch2c(res3b_branch2b_relu)
bn3b_branch2c = self.bn3b_branch2c(res3b_branch2c)
res3b = res3a_relu + bn3b_branch2c
res3b_relu = F.relu(res3b)
res3c_branch2a = self.res3c_branch2a(res3b_relu)
bn3c_branch2a = self.bn3c_branch2a(res3c_branch2a)
res3c_branch2a_relu = F.relu(bn3c_branch2a)
res3c_branch2b_pad = F.pad(res3c_branch2a_relu, (1L, 1L, 1L, 1L))
res3c_branch2b = self.res3c_branch2b(res3c_branch2b_pad)
bn3c_branch2b = self.bn3c_branch2b(res3c_branch2b)
res3c_branch2b_relu = F.relu(bn3c_branch2b)
res3c_branch2c = self.res3c_branch2c(res3c_branch2b_relu)
bn3c_branch2c = self.bn3c_branch2c(res3c_branch2c)
res3c = res3b_relu + bn3c_branch2c
res3c_relu = F.relu(res3c)
res3d_branch2a = self.res3d_branch2a(res3c_relu)
bn3d_branch2a = self.bn3d_branch2a(res3d_branch2a)
res3d_branch2a_relu = F.relu(bn3d_branch2a)
res3d_branch2b_pad = F.pad(res3d_branch2a_relu, (1L, 1L, 1L, 1L))
res3d_branch2b = self.res3d_branch2b(res3d_branch2b_pad)
bn3d_branch2b = self.bn3d_branch2b(res3d_branch2b)
res3d_branch2b_relu = F.relu(bn3d_branch2b)
res3d_branch2c = self.res3d_branch2c(res3d_branch2b_relu)
bn3d_branch2c = self.bn3d_branch2c(res3d_branch2c)
res3d = res3c_relu + bn3d_branch2c
res3d_relu = F.relu(res3d)
res4a_branch1 = self.res4a_branch1(res3d_relu)
res4a_branch2a = self.res4a_branch2a(res3d_relu)
bn4a_branch1 = self.bn4a_branch1(res4a_branch1)
bn4a_branch2a = self.bn4a_branch2a(res4a_branch2a)
res4a_branch2a_relu = F.relu(bn4a_branch2a)
res4a_branch2b_pad = F.pad(res4a_branch2a_relu, (1L, 1L, 1L, 1L))
res4a_branch2b = self.res4a_branch2b(res4a_branch2b_pad)
bn4a_branch2b = self.bn4a_branch2b(res4a_branch2b)
res4a_branch2b_relu = F.relu(bn4a_branch2b)
res4a_branch2c = self.res4a_branch2c(res4a_branch2b_relu)
bn4a_branch2c = self.bn4a_branch2c(res4a_branch2c)
res4a = bn4a_branch1 + bn4a_branch2c
res4a_relu = F.relu(res4a)
res4b_branch2a = self.res4b_branch2a(res4a_relu)
bn4b_branch2a = self.bn4b_branch2a(res4b_branch2a)
res4b_branch2a_relu = F.relu(bn4b_branch2a)
res4b_branch2b_pad = F.pad(res4b_branch2a_relu, (1L, 1L, 1L, 1L))
res4b_branch2b = self.res4b_branch2b(res4b_branch2b_pad)
bn4b_branch2b = self.bn4b_branch2b(res4b_branch2b)
res4b_branch2b_relu = F.relu(bn4b_branch2b)
res4b_branch2c = self.res4b_branch2c(res4b_branch2b_relu)
bn4b_branch2c = self.bn4b_branch2c(res4b_branch2c)
res4b = res4a_relu + bn4b_branch2c
res4b_relu = F.relu(res4b)
res4c_branch2a = self.res4c_branch2a(res4b_relu)
bn4c_branch2a = self.bn4c_branch2a(res4c_branch2a)
res4c_branch2a_relu = F.relu(bn4c_branch2a)
res4c_branch2b_pad = F.pad(res4c_branch2a_relu, (1L, 1L, 1L, 1L))
res4c_branch2b = self.res4c_branch2b(res4c_branch2b_pad)
bn4c_branch2b = self.bn4c_branch2b(res4c_branch2b)
res4c_branch2b_relu = F.relu(bn4c_branch2b)
res4c_branch2c = self.res4c_branch2c(res4c_branch2b_relu)
bn4c_branch2c = self.bn4c_branch2c(res4c_branch2c)
res4c = res4b_relu + bn4c_branch2c
res4c_relu = F.relu(res4c)
res4d_branch2a = self.res4d_branch2a(res4c_relu)
bn4d_branch2a = self.bn4d_branch2a(res4d_branch2a)
res4d_branch2a_relu = F.relu(bn4d_branch2a)
res4d_branch2b_pad = F.pad(res4d_branch2a_relu, (1L, 1L, 1L, 1L))
res4d_branch2b = self.res4d_branch2b(res4d_branch2b_pad)
bn4d_branch2b = self.bn4d_branch2b(res4d_branch2b)
res4d_branch2b_relu = F.relu(bn4d_branch2b)
res4d_branch2c = self.res4d_branch2c(res4d_branch2b_relu)
bn4d_branch2c = self.bn4d_branch2c(res4d_branch2c)
res4d = res4c_relu + bn4d_branch2c
res4d_relu = F.relu(res4d)
res4e_branch2a = self.res4e_branch2a(res4d_relu)
bn4e_branch2a = self.bn4e_branch2a(res4e_branch2a)
res4e_branch2a_relu = F.relu(bn4e_branch2a)
res4e_branch2b_pad = F.pad(res4e_branch2a_relu, (1L, 1L, 1L, 1L))
res4e_branch2b = self.res4e_branch2b(res4e_branch2b_pad)
bn4e_branch2b = self.bn4e_branch2b(res4e_branch2b)
res4e_branch2b_relu = F.relu(bn4e_branch2b)
res4e_branch2c = self.res4e_branch2c(res4e_branch2b_relu)
bn4e_branch2c = self.bn4e_branch2c(res4e_branch2c)
res4e = res4d_relu + bn4e_branch2c
res4e_relu = F.relu(res4e)
res4f_branch2a = self.res4f_branch2a(res4e_relu)
bn4f_branch2a = self.bn4f_branch2a(res4f_branch2a)
res4f_branch2a_relu = F.relu(bn4f_branch2a)
res4f_branch2b_pad = F.pad(res4f_branch2a_relu, (1L, 1L, 1L, 1L))
res4f_branch2b = self.res4f_branch2b(res4f_branch2b_pad)
bn4f_branch2b = self.bn4f_branch2b(res4f_branch2b)
res4f_branch2b_relu = F.relu(bn4f_branch2b)
res4f_branch2c = self.res4f_branch2c(res4f_branch2b_relu)
bn4f_branch2c = self.bn4f_branch2c(res4f_branch2c)
res4f = res4e_relu + bn4f_branch2c
res4f_relu = F.relu(res4f)
res5a_branch1 = self.res5a_branch1(res4f_relu)
res5a_branch2a = self.res5a_branch2a(res4f_relu)
bn5a_branch1 = self.bn5a_branch1(res5a_branch1)
bn5a_branch2a = self.bn5a_branch2a(res5a_branch2a)
res5a_branch2a_relu = F.relu(bn5a_branch2a)
res5a_branch2b_pad = F.pad(res5a_branch2a_relu, (1L, 1L, 1L, 1L))
res5a_branch2b = self.res5a_branch2b(res5a_branch2b_pad)
bn5a_branch2b = self.bn5a_branch2b(res5a_branch2b)
res5a_branch2b_relu = F.relu(bn5a_branch2b)
res5a_branch2c = self.res5a_branch2c(res5a_branch2b_relu)
bn5a_branch2c = self.bn5a_branch2c(res5a_branch2c)
res5a = bn5a_branch1 + bn5a_branch2c
res5a_relu = F.relu(res5a)
res5b_branch2a = self.res5b_branch2a(res5a_relu)
bn5b_branch2a = self.bn5b_branch2a(res5b_branch2a)
res5b_branch2a_relu = F.relu(bn5b_branch2a)
res5b_branch2b_pad = F.pad(res5b_branch2a_relu, (1L, 1L, 1L, 1L))
res5b_branch2b = self.res5b_branch2b(res5b_branch2b_pad)
bn5b_branch2b = self.bn5b_branch2b(res5b_branch2b)
res5b_branch2b_relu = F.relu(bn5b_branch2b)
res5b_branch2c = self.res5b_branch2c(res5b_branch2b_relu)
bn5b_branch2c = self.bn5b_branch2c(res5b_branch2c)
res5b = res5a_relu + bn5b_branch2c
res5b_relu = F.relu(res5b)
res5c_branch2a = self.res5c_branch2a(res5b_relu)
bn5c_branch2a = self.bn5c_branch2a(res5c_branch2a)
res5c_branch2a_relu = F.relu(bn5c_branch2a)
res5c_branch2b_pad = F.pad(res5c_branch2a_relu, (1L, 1L, 1L, 1L))
res5c_branch2b = self.res5c_branch2b(res5c_branch2b_pad)
bn5c_branch2b = self.bn5c_branch2b(res5c_branch2b)
res5c_branch2b_relu = F.relu(bn5c_branch2b)
res5c_branch2c = self.res5c_branch2c(res5c_branch2b_relu)
bn5c_branch2c = self.bn5c_branch2c(res5c_branch2c)
res5c = res5b_relu + bn5c_branch2c
res5c_relu = F.relu(res5c)
if kwargs['scda']:
scda_x = torch.sum(res5c_relu,1,keepdim=True)
mean_x = torch.mean(scda_x.view(scda_x.size(0),-1),1,True)
scda_x = scda_x - mean_x
scda_x = scda_x>0
scda_x = scda_x.float()
res5c_relu = res5c_relu * scda_x
pooling0 = F.max_pool2d(input=res5c_relu, kernel_size=res5c_relu.size()[2:])
pooling1 = F.avg_pool2d(input=res5c_relu, kernel_size=res5c_relu.size()[2:])
flatten0 = pooling0.view(pooling0.size(0), -1)
flatten1 = pooling1.view(pooling1.size(0), -1)
avg_x = F.normalize(flatten1, p=2, dim=1)
max_x = F.normalize(flatten0, p=2, dim=1)
x = torch.cat((avg_x, max_x), dim=1)
# the last fc layer can be treat as distanc
# ree compute
x = x * kwargs['scale']
if kwargs['is_train']:
x = self.class_fc(x)
return x
@staticmethod
def __conv(dim, name, **kwargs):
if dim == 1:
layer = nn.Conv1d(**kwargs)
elif dim == 2:
layer = nn.Conv2d(**kwargs)
elif dim == 3:
layer = nn.Conv3d(**kwargs)
else:
raise NotImplementedError()
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
if 'bias' in __weights_dict[name]:
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
return layer
@staticmethod
def __batch_normalization(dim, name, **kwargs):
if dim == 1:
layer = nn.BatchNorm1d(**kwargs)
elif dim == 2:
layer = nn.BatchNorm2d(**kwargs)
elif dim == 3:
layer = nn.BatchNorm3d(**kwargs)
else:
raise NotImplementedError()
if 'scale' in __weights_dict[name]:
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['scale']))
else:
layer.weight.data.fill_(1)
if 'bias' in __weights_dict[name]:
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
else:
layer.bias.data.fill_(0)
layer.state_dict()['running_mean'].copy_(torch.from_numpy(__weights_dict[name]['mean']))
layer.state_dict()['running_var'].copy_(torch.from_numpy(__weights_dict[name]['var']))
return layer
| [
"zhengxiawu@126.com"
] | zhengxiawu@126.com |
0861548899e4e325b8f98626824a5f2a3f40c4a1 | d620b82c57adde1636826601e2b99209689ad2c4 | /model/xgboost/xgboostprocess.py | 8facdb750067deadb04ba3f2ca6276a0d2ee0326 | [] | no_license | weigebushiyao/HFData-PitchSystemModel | 1a75e2da6bef1bdbcae0eee1b1b9519bee03b56c | 58c77cdfcf85e49d7ab1f7163374c906ac0df361 | refs/heads/master | 2022-07-11T22:30:35.794840 | 2020-05-18T06:36:54 | 2020-05-18T06:36:54 | 264,849,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,260 | py | #-*-coding:utf-8-*-
from sklearn.model_selection import RandomizedSearchCV
import pandas as pd
from xgboost.sklearn import XGBRegressor
from model.get_data_path import get_train_data_path,get_test_data_path
from sklearn.model_selection import train_test_split
import os
from util.show_save_result import ShowAndSave
cur_path=os.path.abspath(os.path.dirname(__file__))
datafile = get_train_data_path()
class XgboostModel(ShowAndSave):
def __init__(self, params=None,jobname='xgb_model'):
super().__init__()
self.job_name=jobname
self.cur_path=cur_path
self.init_param()
self.params = params
self.model_file=self.model_path + self.job_name
def xgboostmodel(self):
df = pd.read_csv(datafile, encoding='utf-8', index_col=0)
print(df.shape)
traindata = df.iloc[:, :].values
x = traindata[:, :-1]
y = traindata[:, -1]
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7) # list
if self.params is None:
params={'max_depth':80,'n_estimators':512}
else:
params=self.params
raw_model = XGBRegressor(max_depth=128,n_estimators=768,learning_rate=0.01,silence=False)
raw_model.fit(x_train, y_train)
raw_model.save_model(self.model_file)
pred = raw_model.predict(x_test)
self.true=y_test
self.pred=pred
self.show_save_figure(fig_path=self.fig_path,modelname=self.job_name, detal_idx=500)
t_mean=self.cal_mean(self.true)
p_mean=self.cal_mean(self.pred)
self.save_result(self.result_path,true_mean=t_mean, pred_mean=p_mean)
def test_model(self,model_file=None):
if model_file is None:
modelfile=self.model_file
else:
modelfile=self.single_model_path+'model_'+str(model_file)
fault_test_file_path=get_test_data_path()
df=pd.read_csv(fault_test_file_path,encoding='utf-8',index_col=0)
data=df.iloc[:,:].values
x=data[:,:-1]
y=data[:,-1]
xgb=XGBRegressor()
raw_model=xgb.load_model(modelfile)
pred=raw_model.predict(x)
self.true=y
self.pred=pred
self.show_save_figure(fig_path=self.fault_data_test_figure_path,modelname=self.job_name,detal_idx=10)
t_mean=self.cal_mean(self.true)
p_mean=self.cal_mean(self.pred)
self.save_result(self.fault_data_test_result_path,true_mean=t_mean,pred_mean=p_mean)
def params_tuned(self):
xgb=XGBRegressor(objective='reg:squarederror')
params={'max_depth':[90,100,128],'n_estimators':[768,800,850]}
grid=RandomizedSearchCV(xgb,params,cv=3,scoring='neg_mean_squared_error',n_iter=6)
df = pd.read_csv(datafile, encoding='utf-8', index_col=0)
traindata = df.iloc[100000:700000, :].values
x = traindata[:, :-1]
y = traindata[:, -1]
grid.fit(x,y)
print(grid.best_score_)
print(grid.best_params_)
self.params=grid.best_params_
df=pd.DataFrame(list(self.params.items()))
df.to_csv(self.params_file_path+'params.csv',encoding='utf-8',index=None,header=None)
xgb = XgboostModel()
#xgb.params_tuned()
xgb.xgboostmodel()
#xgb.test_model()
| [
"505456072@qq.com"
] | 505456072@qq.com |
72ec7cf470d37dd39544d24e30dce4db9ee66c02 | 69d4577c856c8352f4b41a83431ca304bae3a8a2 | /model/charcnn.py | 286006f682bb6386485eaa862be0faef17d080aa | [
"Apache-2.0"
] | permissive | tagucci/NCRFpp | ecbdd6d9c6e87505bc200047eb4f2a21c651d2c9 | 3fd65685c26ed0686efde933d262b85daeb02697 | refs/heads/master | 2020-03-11T22:51:23.497036 | 2018-04-19T12:38:44 | 2018-04-19T12:38:44 | 130,304,515 | 0 | 0 | Apache-2.0 | 2018-04-20T03:35:53 | 2018-04-20T03:35:53 | null | UTF-8 | Python | false | false | 2,910 | py | # -*- coding: utf-8 -*-
# @Author: Jie Yang
# @Date: 2017-10-17 16:47:32
# @Last Modified by: Jie Yang, Contact: jieynlp@gmail.com
# @Last Modified time: 2018-03-30 16:18:23
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class CharCNN(nn.Module):
def __init__(self, alphabet_size, embedding_dim, hidden_dim, dropout, gpu):
super(CharCNN, self).__init__()
print "build char sequence feature extractor: CNN ..."
self.gpu = gpu
self.hidden_dim = hidden_dim
self.char_drop = nn.Dropout(dropout)
self.char_embeddings = nn.Embedding(alphabet_size, embedding_dim)
self.char_embeddings.weight.data.copy_(torch.from_numpy(self.random_embedding(alphabet_size, embedding_dim)))
self.char_cnn = nn.Conv1d(embedding_dim, self.hidden_dim, kernel_size=3, padding=1)
if self.gpu:
self.char_drop = self.char_drop.cuda()
self.char_embeddings = self.char_embeddings.cuda()
self.char_cnn = self.char_cnn.cuda()
def random_embedding(self, vocab_size, embedding_dim):
pretrain_emb = np.empty([vocab_size, embedding_dim])
scale = np.sqrt(3.0 / embedding_dim)
for index in range(vocab_size):
pretrain_emb[index,:] = np.random.uniform(-scale, scale, [1, embedding_dim])
return pretrain_emb
def get_last_hiddens(self, input, seq_lengths):
"""
input:
input: Variable(batch_size, word_length)
seq_lengths: numpy array (batch_size, 1)
output:
Variable(batch_size, char_hidden_dim)
Note it only accepts ordered (length) variable, length size is recorded in seq_lengths
"""
batch_size = input.size(0)
char_embeds = self.char_drop(self.char_embeddings(input))
char_embeds = char_embeds.transpose(2,1).contiguous()
char_cnn_out = self.char_cnn(char_embeds)
char_cnn_out = F.max_pool1d(char_cnn_out, char_cnn_out.size(2)).view(batch_size, -1)
return char_cnn_out
def get_all_hiddens(self, input, seq_lengths):
"""
input:
input: Variable(batch_size, word_length)
seq_lengths: numpy array (batch_size, 1)
output:
Variable(batch_size, word_length, char_hidden_dim)
Note it only accepts ordered (length) variable, length size is recorded in seq_lengths
"""
batch_size = input.size(0)
char_embeds = self.char_drop(self.char_embeddings(input))
char_embeds = char_embeds.transpose(2,1).contiguous()
char_cnn_out = self.char_cnn(char_embeds).transpose(2,1).contiguous()
return char_cnn_out
def forward(self, input, seq_lengths):
return self.get_all_hiddens(input, seq_lengths)
| [
"jie_yang@mymail.sutd.edu.sg"
] | jie_yang@mymail.sutd.edu.sg |
ff0ea4acf2347925603c4adec3f917e249a6c633 | eb0ff0b6979a4cef6b1d8509d10579da9a6aca90 | /main.py | 5c8cb04f647953e8faa71d2b9c1e2fbec3279661 | [] | no_license | SusaOP/Motion-Detection-Security-Camera | 71b44e2b80ddf99f611c85c375b68afa242f41e3 | 33468eb9a6743476e048fd2785c0ad82cf5feb79 | refs/heads/main | 2023-06-27T20:30:12.629899 | 2021-08-06T01:04:39 | 2021-08-06T01:04:39 | 393,207,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,256 | py | import os
import shutil
from datetime import datetime
from operateCamera import videoRecord
from into_frames import toFrames
from compare_frames import compare
from send_email import fromFlaggedToSent
from send_email import establishAttachment
local_max = 0
for i in range(2):
saveDir, videoFile = videoRecord()
print(f'saveDir is {saveDir} and videoFile is {videoFile}')
toFrames(saveDir, videoFile)
print(f'dir is {saveDir}')
local_max, issueID = compare(saveDir)
if (local_max > 5): #significant movement is detected
print(f'Max is {local_max}, moving to Flagged...')
os.mkdir(f'./Flagged/{saveDir}')
os.replace(f'./{saveDir}/{videoFile}', f'./Flagged/{saveDir}/{videoFile}')
os.replace(f'./{saveDir}/frame_{issueID}.jpg', f'./Flagged/{saveDir}/frame_{issueID}-Detected.jpg')
shutil.rmtree(f'./{saveDir}')
attach_path = f'./Flagged/{saveDir}/frame_{issueID}-Detected.jpg'
establishAttachment(attach_path)
fromFlaggedToSent(saveDir, videoFile, issueID)
elif (local_max <= 5): #significant movement is not detected
print(f'Insignificant max of {local_max} is found. Removing ./{saveDir}')
shutil.rmtree(f'./{saveDir}')
local_max = 0 | [
"noreply@github.com"
] | SusaOP.noreply@github.com |
9a6921acf2118b13a365d80619408f90133228ad | 12596a0809bc4ce7eba4f1f32cbf96c8191f5628 | /ProgramFlow/guessinggame.py | cd83bec8fcc24262a2d83ae812b093853753f8c7 | [] | no_license | MichaelAntropov/python-masterclass | e19c34d11eab42b9a3c58d568c866fb747ffe3ff | 037456ff3f67ae5adf16465e8882a42ada0ca6c1 | refs/heads/master | 2023-04-06T11:59:19.402503 | 2021-04-17T13:43:04 | 2021-04-17T13:43:04 | 342,575,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | import random
highest = 10
lowest = 0
answer = random.randint(lowest, highest)
print(answer) # TODO: Remove after testing
print("Please guess a number between {} and {}:".format(lowest, highest))
while True:
guess = int(input())
if guess == 0:
print("U gave up :(")
elif guess < lowest or guess > highest:
print("???")
elif guess < answer:
print("Please guess higher: ")
elif guess > answer:
print("please guess lower:")
else:
print("U got it!")
break
# if guess == answer:
# print("You got it first time")
# else:
# if guess < answer:
# print("Guess higher")
# else: # guess must be greater than answer
# print("Guess lower")
# guess = int(input())
# if guess == answer:
# print("Well done")
# else:
# print("Not correct")
# if guess < answer:
# print("Right answer is higher")
# guess = int(input())
# if guess == answer:
# print("Well done, you guessed correct")
# else:
# print("Sorry, you are wrong!")
# elif guess > answer:
# print("Please guess lower")
# guess = int(input())
# if guess == answer:
# print("Well done, you guessed correct")
# else:
# print("Sorry, you are wrong!")
# else:
# print("You got it first time")
| [
"mikhael.antropov@gmail.com"
] | mikhael.antropov@gmail.com |
a9f2bfe4189be9732b9d1c4db1fff1baab5cbbd9 | 94b8f8f7241545e614dc51f708c3b5b876f6db0c | /test.py | ce1d52ce890c992dc59565079f5e3c9ef7ba3cca | [
"MIT"
] | permissive | aliyun/The-Blessings-of-Unlabeled-Background-in-Untrimmed-Videos | b369155d11560508a43892da1e84f46cc7ae0852 | aca214c56fc05778a1f9f382c2f634cbeca4d852 | refs/heads/master | 2023-06-23T20:32:16.577804 | 2021-07-27T06:40:45 | 2021-07-27T06:40:45 | 370,694,896 | 30 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,256 | py | import torch
import torch.nn as nn
import numpy as np
import utils
import os
import os.path as osp
import json
from eval.eval_detection import ANETdetection
from tqdm import tqdm
import sys
def test(net, config, logger, test_loader, test_info, step, model_file=None):
with torch.no_grad():
net.eval()
if model_file is not None:
net.load_state_dict(torch.load(model_file))
final_res = {}
final_res['version'] = 'VERSION 1.3'
final_res['results'] = {}
final_res['external_data'] = {'used': True, 'details': 'Features from I3D Network'}
num_correct = 0.
num_total = 0.
result_store_numpy_path = './WUM_result_numpy'
load_iter = iter(test_loader)
for i in range(len(test_loader.dataset)):
_data, _label, _, vid_name, vid_num_seg = next(load_iter)
_data = _data.cuda()
_label = _label.cuda()
vid_num_seg = vid_num_seg[0].cpu().item()
num_segments = _data.shape[1]
features_div,_,_,_,_ = net(_data)
_,project_num,_ = features_div.shape
score_act = np.load(osp.join(result_store_numpy_path,vid_name[0]+'_score.npy'))
feat_act = np.load(osp.join(result_store_numpy_path,vid_name[0]+'_feat_act.npy'))
feat_bkg = np.load(osp.join(result_store_numpy_path,vid_name[0]+'_feat_bkg.npy'))
features = np.load(osp.join(result_store_numpy_path,vid_name[0]+'_features.npy'))
cas_softmax = np.load(osp.join(result_store_numpy_path,vid_name[0]+'_cas.npy'))
score_act = torch.Tensor(score_act).cuda()
feat_act = torch.Tensor(feat_act).cuda()
feat_bkg = torch.Tensor(feat_bkg).cuda()
features = torch.Tensor(features).cuda()
cas_softmax = torch.Tensor(cas_softmax).cuda()
features_div = features_div[0]
div_max = torch.max(features_div,dim=1,keepdim=True)[0]
div_min = torch.min(features_div,dim=1,keepdim=True)[0]
features_div = (features_div-div_min)/(div_max-div_min)
features_div = features_div.permute(1,0)
features_div_mean = torch.mean(torch.unsqueeze(features_div,dim=0),2,keepdim=True)
feat_magnitudes_act = torch.mean(torch.norm(feat_act, dim=2), dim=1)
feat_magnitudes_bkg = torch.mean(torch.norm(feat_bkg, dim=2), dim=1)
label_np = _label.cpu().data.numpy()
score_np = score_act[0].cpu().data.numpy()
pred_np = np.zeros_like(score_np)
pred_np[np.where(score_np < config.class_thresh)] = 0
pred_np[np.where(score_np >= config.class_thresh)] = 1
correct_pred = np.sum(label_np == pred_np, axis=1)
num_correct += np.sum((correct_pred == config.num_classes).astype(np.float32))
num_total += correct_pred.shape[0]
feat_magnitudes = torch.norm(features, p=2, dim=2)
feat_magnitudes = utils.minmax_norm(feat_magnitudes, max_val=feat_magnitudes_act, min_val=feat_magnitudes_bkg)
feat_magnitudes = feat_magnitudes.repeat((config.num_classes, 1, 1)).permute(1, 2, 0)
cas = utils.minmax_norm(cas_softmax * feat_magnitudes)
#The following two lines is to deploy TS-PCA with WUM.
cas = cas + 0.5*features_div_mean
cas = utils.minmax_norm(cas)
pred = np.where(score_np >= config.class_thresh)[0]
if len(pred) == 0:
pred = np.array([np.argmax(score_np)])
cas_pred = cas[0].cpu().numpy()[:, pred]
cas_pred = np.reshape(cas_pred, (num_segments, -1, 1))
cas_pred = utils.upgrade_resolution(cas_pred, config.scale)
proposal_dict = {}
feat_magnitudes_np = feat_magnitudes[0].cpu().data.numpy()[:, pred]
feat_magnitudes_np = np.reshape(feat_magnitudes_np, (num_segments, -1, 1))
feat_magnitudes_np = utils.upgrade_resolution(feat_magnitudes_np, config.scale)
for i in range(len(config.act_thresh_cas)):
cas_temp = cas_pred.copy()
zero_location = np.where(cas_temp[:, :, 0] < config.act_thresh_cas[i])
cas_temp[zero_location] = 0
seg_list = []
for c in range(len(pred)):
pos = np.where(cas_temp[:, c, 0] > 0)
seg_list.append(pos)
proposals = utils.get_proposal_oic(seg_list, cas_temp, score_np, pred, config.scale, \
vid_num_seg, config.feature_fps, num_segments)
for i in range(len(proposals)):
class_id = proposals[i][0][0]
if class_id not in proposal_dict.keys():
proposal_dict[class_id] = []
proposal_dict[class_id] += proposals[i]
for i in range(len(config.act_thresh_magnitudes)):
cas_temp = cas_pred.copy()
feat_magnitudes_np_temp = feat_magnitudes_np.copy()
zero_location = np.where(feat_magnitudes_np_temp[:, :, 0] < config.act_thresh_magnitudes[i])
feat_magnitudes_np_temp[zero_location] = 0
seg_list = []
for c in range(len(pred)):
pos = np.where(feat_magnitudes_np_temp[:, c, 0] > 0)
seg_list.append(pos)
proposals = utils.get_proposal_oic(seg_list, cas_temp, score_np, pred, config.scale, \
vid_num_seg, config.feature_fps, num_segments)
for i in range(len(proposals)):
class_id = proposals[i][0][0]
if class_id not in proposal_dict.keys():
proposal_dict[class_id] = []
proposal_dict[class_id] += proposals[i]
final_proposals = []
for class_id in proposal_dict.keys():
final_proposals.append(utils.nms(proposal_dict[class_id], 0.6))
final_res['results'][vid_name[0]] = utils.result2json(final_proposals)
test_acc = num_correct / num_total
json_path = os.path.join(config.output_path, 'result.json')
with open(json_path, 'w') as f:
json.dump(final_res, f)
f.close()
tIoU_thresh = np.linspace(0.1, 0.9, 9)
#tIoU_thresh = np.linspace(0.1, 0.7, 7)
anet_detection = ANETdetection(config.gt_path, json_path,
subset='test', tiou_thresholds=tIoU_thresh,
verbose=False, check_status=False)
mAP, average_mAP = anet_detection.evaluate()
logger.log_value('Test accuracy', test_acc, step)
for i in range(tIoU_thresh.shape[0]):
logger.log_value('mAP@{:.1f}'.format(tIoU_thresh[i]), mAP[i], step)
logger.log_value('Average mAP', average_mAP, step)
test_info["step"].append(step)
test_info["test_acc"].append(test_acc)
test_info["average_mAP"].append(average_mAP)
for i in range(tIoU_thresh.shape[0]):
test_info["mAP@{:.1f}".format(tIoU_thresh[i])].append(mAP[i])
| [
"alen.ly@alibaba-inc.com"
] | alen.ly@alibaba-inc.com |
a0754fefb495c8c77a0ceb23a9ff13a8cc1d720f | 0a669c18356f783fdd31ac54519b7c91f2fb3ef7 | /01-Estrutura_Sequencial/08-Salario_hora_simples.py | 8153ac315ed945727e1f63797c0f8bc3d3d8dd4d | [] | no_license | guilhermejcmarinho/Praticas_Python_Elson | 5295bca77785c22c6502c7b35988a89e6e8fba8e | 27145a99dd18c57281736079d94aef468d7276dc | refs/heads/main | 2023-07-28T22:41:58.636331 | 2021-09-10T22:09:37 | 2021-09-10T22:09:37 | 400,654,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | sal_hora = float(input('Informe o valor da hora trabalhada: '))
hr_trab = float(input('Informe quantas horas trabalhou: '))
print('Salário total no mês: R$', round(sal_hora*hr_trab, 2)) | [
"gui.the.great@gmail.com"
] | gui.the.great@gmail.com |
7fdb76e70da796bb88882454749b09f5a59d1b45 | ec4586abcc179293656f0afd837b0d521d072a75 | /torchsl/mvsl/__init__.py | d61621ffcfa464dc736802882d9237e957f9b3a7 | [] | no_license | ZDstandup/mvda | e483387e0b7e50c84bc28ffd864d44a724d23762 | 13f854e063f10a9374856d0e2005b233788a645f | refs/heads/master | 2021-01-13T20:42:51.842836 | 2019-12-15T19:16:13 | 2019-12-15T19:16:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | from .mvda import MvDA, MvDAvc, RMvDA, RMvDAvc
from .pcmvda import pcMvDA
from .mvcsda import MvCSDA, MvDAplusCS
from .mvlfda import MvLFDA, MvLFDAvc, RMvLFDA, RMvLFDAvc
from .mvccda import MvCCDA, MvDCCCDA
__all__ = [
'MvDA', 'MvDAvc', 'RMvDA', 'RMvDAvc',
'pcMvDA',
'MvCSDA', 'MvDAplusCS',
'MvLFDA', 'MvLFDAvc', 'RMvLFDA', 'RMvLFDAvc',
'MvCCDA', 'MvDCCCDA'
]
| [
"inspiros.tran@gmail.com"
] | inspiros.tran@gmail.com |
47220864385f35b099736c3ef297a7ae7f1cbe54 | ca08100b33a78c01bf49f097f4e80ed10e4ee9ad | /intrepidboats/apps/owners_portal/utils.py | 605fe7065629b6a2f9983f3de5ed580162b6c11a | [] | no_license | elite0401/intrepidpowerboats | 347eae14b584d1be9a61ca14c014135ab0d14ad0 | d2a475b60d17aa078bf0feb5e0298c927e7362e7 | refs/heads/master | 2021-09-11T01:51:47.615117 | 2018-04-06T02:20:02 | 2018-04-06T02:20:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,654 | py | from django.conf import settings
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.translation import gettext as _
def send_report_email(user_boat):
context = {
'user': user_boat.user,
'user_boat': user_boat,
'boat': user_boat.boat,
'site': Site.objects.get_current().domain,
'dashboard_url': reverse("owners_portal:owners_portal"),
}
send_mail(
subject=_("New boat report - Intrepid Powerboats"),
message=render_to_string('owners_portal/emails/report_email.txt', context),
from_email=settings.BUILD_A_BOAT['NO_REPLY_EMAIL_REPORTS'],
recipient_list=[user_boat.user.email],
html_message=render_to_string('owners_portal/emails/report_email.html', context),
)
def send_step_feedback_email(step_feedback):
context = {
'comments': step_feedback.comments,
'user': step_feedback.user,
'step': '{title} (phase: {phase})'.format(title=step_feedback.step.title, phase=step_feedback.step.phase),
'boat': '{boat} (model: {model})'.format(boat=step_feedback.step.user_boat,
model=step_feedback.step.user_boat.boat)
}
send_mail(
subject=_("{user} has sent feedback on {step} in Owner's portal - Intrepid Powerboats".format(
user=context['user'],
step=context['step'],
)),
message=render_to_string('owners_portal/emails/step_feedback_email.txt', context),
from_email=settings.NO_REPLY_EMAIL,
recipient_list=settings.TO_EMAIL['OWNERS_PORTAL_FEEDBACK_FORM'],
html_message=render_to_string('owners_portal/emails/step_feedback_email.html', context),
)
def send_new_shared_video_uploaded_email(shared_video):
from django.contrib.auth.models import User
admins = User.objects.filter(is_superuser=True)
subject = _("New uploaded video to vimeo")
to = admins.values_list('email', flat=True)
from_email = settings.NO_REPLY_EMAIL
site = Site.objects.get_current()
ctx = {
'user': shared_video.uploader,
'site': site.domain,
'admin_url': reverse("admin:owners_portal_sharedvideo_change", args=[shared_video.pk]),
}
message = render_to_string('owners_portal/emails/new_shared_video_email.txt', ctx)
html_message = render_to_string('owners_portal/emails/new_shared_video_email.html', ctx)
send_mail(subject=subject, message=message, from_email=from_email, recipient_list=to, html_message=html_message)
| [
"elite.wisdom@gmx.com"
] | elite.wisdom@gmx.com |
88f5d9a2605d4624cc87af7e584ecdf570ac00dc | 6eb302bf3456b5fe700a4e3281ca7bb4597477bf | /student_chatbot/app.py | 646b9b82f4b4c27cc03f005615fed2b793fd3fae | [] | no_license | lekhya19311/Student-Informative-Chat-Bot-System | f39a99d219bef3e534077c2cf2abcd7dd8d4eec1 | 7cd420bb5a21383bddf0f5bea01335d3e8bcbf9e | refs/heads/master | 2021-01-26T13:58:08.538530 | 2020-02-22T09:28:17 | 2020-02-22T09:28:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,486 | py | import os
from flask import send_file
from flask import Flask, session, render_template, request, redirect, url_for, flash, jsonify
from flask_bcrypt import Bcrypt
from flask_session import Session
from database import Base, Attendance, Marks,Accounts, Profile, Feedback
from sqlalchemy import create_engine, exc
from sqlalchemy.orm import scoped_session, sessionmaker
import requests
import re
import pandas as pd
import matplotlib.pyplot as plt
app = Flask(__name__)
bcrypt = Bcrypt(app)
app.secret_key = os.urandom(24)
# Configure session to use filesystem
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Set up database
engine = create_engine('sqlite:///database.db',connect_args={'check_same_thread': False},echo=True)
Base.metadata.bind = engine
db = scoped_session(sessionmaker(bind=engine))
@app.route("/")
def index():
if 'user' not in session:
return render_template("intro.html")
else:
return redirect(url_for('dashboard'))
# MAIN
@app.route("/dashboard")
def dashboard():
if 'user' not in session:
return redirect(url_for('index'))
else:
return render_template("menu.html")
@app.route("/query", methods=["POST"])
def quer():
if request.method == 'POST':
ss=request.form.get("msg").lower()
profile=db.execute("select sid from student_profile").fetchall()
profile_result=list([profile[i][0] for i in range(len(profile))])
if session['usert']=="Student":
if "show my attendance" in ss:
return redirect(url_for('attendance'))
else:
flash("Wrong! Try Again")
return redirect(url_for('dashboard'))
else:
if "show graph" in ss:
return redirect(url_for('plot_graph'))
if (re.search('attendance', ss) and re.search('75', ss) and (re.search('less than', ss) or re.search('lessthan', ss))) or (re.search('attendance', ss) and re.search('75', ss) and re.search('<', ss)) or re.search('attendance shortage', ss):
result=db.execute("SELECT * FROM attendance WHERE attend < 75 ORDER BY sid").fetchall()
return render_template("quer.html", results=result)
elif (re.search('attendance', ss) and re.search('65', ss) and (re.search('less than', ss) or re.search('lessthan', ss))) or (re.search('attendance', ss) and re.search('65', ss) and re.search('<', ss)) or re.search('detain', ss):
result=db.execute("SELECT * FROM attendance WHERE attend < 65 ORDER BY sid").fetchall()
return render_template("quer.html", results=result)
elif (ss.split()[-1].upper() in profile_result) and re.search('profile', ss):
result=db.execute("SELECT * from student_profile where sid = :s",{"s":ss.split()[-1].upper()})
return render_template("profile.html", results=result)
elif (ss.split()[-1].upper() in profile_result) and re.search('attendance', ss):
result=db.execute("SELECT * from attendance where sid = :s",{"s":ss.split()[-1].upper()})
return render_template("profile.html", results=result)
else:
flash("Wrong! Try Again")
return redirect(url_for('dashboard'))
@app.route("/profile")
def profile():
res=db.execute("SELECT * FROM student_profile WHERE sid = :u", {"u": session['user']}).fetchall()
return render_template("profile.html",results=res)
@app.route("/attendance")
def attendance():
result=db.execute("SELECT * FROM attendance WHERE sid = :u", {"u": session['user']}).fetchall()
return render_template("attendance.html",results=result)
@app.route("/marks")
def marks():
return render_template("marks.html")
@app.route("/attendance_display")
def attendance_update():
return render_template("attendance_form.html")
@app.route("/suggestions", methods=["GET", "POST"])
def Suggestions():
msg1=msg2=""
try:
if request.method == "POST":
sid = request.form.get("sid")
name = request.form.get("name")
subject = request.form.get("subject")
message = request.form.get("message")
result = db.execute("INSERT INTO feedback (name,subject,message,user_id) VALUES (:n,:s,:m,:u)", {"n":name,"s":subject ,"m": message,"u":session['user']})
db.commit()
msg1= "Submitted!"
msg2 = "Thank You for your Feedback"
except exc.IntegrityError:
message = "Roll Number already exists."
db.execute("ROLLBACK")
db.commit()
return render_template("feedback.html",msg1=msg1,msg2=msg2)
# To display all the complaints to the admin
@app.route("/adminfeedbacks")
def adminfeedbacks():
result=db.execute("SELECT * FROM feedback").fetchall()
return render_template('feedback.html',result=result)
@app.route("/graphs")
def plot_graph():
result=db.execute("SELECT sid,attend FROM attendance WHERE attend < 75 ORDER BY sid").fetchall()
x=["sart","ygf"]
y=[]
for i,j in result:
y.append(j)
plt.plot(x,y)
d="sath"
plt.title(d)
plt.xlabel(d, fontsize=18)
plt.ylabel(d, fontsize=16)
plt.savefig('static/graph.png')
return render_template('graphs.html',result=result)
@app.route('/download')
def download_file():
s=db.execute("select * from student_profile").fetchall()
df = pd.DataFrame(list(s))
writer = pd.ExcelWriter('outputt.xlsx')
df.to_excel(writer,sheet_name="lkjhgf")
x=writer.save()
return send_file('outputt.xlsx', as_attachment=True,mimetype='.xlsx')
# REGISTER
@app.route("/register", methods=["GET", "POST"])
def register():
if 'user' in session:
return redirect(url_for('dashboard'))
message = ""
if request.method == "POST":
try:
usern = request.form.get("username")
name = request.form.get("name").upper()
usert = request.form.get("usertyp")
passw = request.form.get("password")
passw_hash = bcrypt.generate_password_hash(passw).decode('utf-8')
result = db.execute("INSERT INTO accounts (id,name,user_type,password) VALUES (:u,:n,:t,:p)", {"u": usern,"n":name,"t":usert ,"p": passw_hash})
db.commit()
if result.rowcount > 0:
session['user'] = usern
session['namet'] = name
session['usert'] = usert
flash("Your successfully Registrated")
return redirect(url_for('dashboard'))
except exc.IntegrityError:
message = "Roll Number already exists."
db.execute("ROLLBACK")
db.commit()
return render_template("registration.html", message=message)
# Change Pasword
@app.route("/change-password", methods=["GET", "POST"])
def changepass():
if 'user' not in session:
return redirect(url_for('login'))
msg=""
if request.method == "POST":
try:
epswd = request.form.get("epassword")
cpswd = request.form.get("cpassword")
passw_hash = bcrypt.generate_password_hash(cpswd).decode('utf-8')
exist=db.execute("SELECT password FROM accounts WHERE id = :u", {"u": session['user']}).fetchone()
if bcrypt.check_password_hash(exist['password'], epswd) is True:
res=db.execute("UPDATE accounts SET password = :u WHERE id = :v",{"u":passw_hash,"v":session['user']})
db.commit()
if res.rowcount > 0:
return redirect(url_for('dashboard'))
except exc.IntegrityError:
msg = "Unable to process try again"
msg="Existing Not matching"
return render_template("change_password.html",m=msg)
# Reset
@app.route("/reset", methods=["GET", "POST"])
def reset():
msg=""
if session['usert']=="admin":
if request.method == "POST":
rollno = request.form.get("rollno")
passw_hash = bcrypt.generate_password_hash("srit").decode('utf-8')
res=db.execute("UPDATE accounts SET password = :u WHERE id = :v",{"u":passw_hash,"v":rollno})
db.commit()
if res is not None:
return redirect(url_for('dashboard'))
msg=""
return render_template("pswdreset.html",m=msg)
else:
return redirect(url_for('dashboard'))
# LOGOUT
@app.route("/logout")
def logout():
session.pop('user', None)
return redirect(url_for('dashboard'))
# LOGIN
@app.route("/login", methods=["GET", "POST"])
def login():
if 'user' in session:
return redirect(url_for('dashboard'))
message = ""
if request.method == "POST":
usern = request.form.get("username").upper()
passw = request.form.get("password").encode('utf-8')
result = db.execute("SELECT * FROM accounts WHERE id = :u", {"u": usern}).fetchone()
if result is not None:
print(result['password'])
if bcrypt.check_password_hash(result['password'], passw) is True:
session['user'] = usern
session['namet'] = result.name
session['usert'] = result.user_type
flash("Hii "+result.name)
return redirect(url_for('dashboard'))
message = "Username or password is incorrect."
return render_template("login.html", message=message)
# Main
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=5000) | [
"satheeshgajula22@gmail.com"
] | satheeshgajula22@gmail.com |
2c4cfe1cd667b7a708c96b4978b00325826dfb19 | 0987f31e64bcacb41ba3a1e20054d7b8ac0d7346 | /contests/panasonic2020/a.py | 3c85e5a3a0a4b6b5ab170b052566849aab8ae7bf | [] | no_license | masakiaota/kyoupuro | 81ae52ab3014fb2b1e10472994afa4caa9ea463b | 74915a40ac157f89fe400e3f98e9bf3c10012cd7 | refs/heads/master | 2021-06-27T04:13:52.152582 | 2020-09-20T03:21:17 | 2020-09-20T03:21:17 | 147,049,195 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | py | import sys
sys.setrecursionlimit(1 << 25)
read = sys.stdin.readline
def read_ints():
return list(map(int, read().split()))
def read_a_int():
return int(read())
def read_tuple(H):
'''
H is number of rows
'''
ret = []
for _ in range(H):
ret.append(tuple(map(int, read().split())))
return ret
def read_col(H, n_cols):
'''
H is number of rows
n_cols is number of cols
A列、B列が与えられるようなとき
'''
ret = [[] for _ in range(n_cols)]
for _ in range(H):
tmp = list(map(int, read().split()))
for col in range(n_cols):
ret[col].append(tmp[col])
return ret
def read_matrix(H):
'''
H is number of rows
'''
ret = []
for _ in range(H):
ret.append(list(map(int, read().split())))
return ret
# return [list(map(int, read().split())) for _ in range(H)] # 内包表記はpypyでは遅いため
def read_map(H):
'''
H is number of rows
文字列で与えられた盤面を読み取る用
'''
return [read()[:-1] for _ in range(H)]
def read_map_as_int(H):
'''
#→1,.→0として読み込む
'''
ret = []
for _ in range(H):
ret.append([1 if s == '#' else 0 for s in read()[:-1]])
# 内包表記はpypyでは若干遅いことに注意
# #numpy使うだろうからこれを残しておくけど
return ret
# default import
from collections import defaultdict, Counter, deque
from operator import itemgetter
from itertools import product, permutations, combinations
from bisect import bisect_left, bisect_right # , insort_left, insort_right
from fractions import gcd
def lcm(a, b):
# 最小公約数
g = gcd(a, b)
return a * b // g
a = [1, 1, 1, 2, 1, 2, 1, 5, 2, 2, 1, 5, 1, 2, 1, 14,
1, 5, 1, 5, 2, 2, 1, 15, 2, 2, 5, 4, 1, 4, 1, 51]
print(a[int(input()) - 1])
| [
"aotamasakimail@gmail.com"
] | aotamasakimail@gmail.com |
b1c5a6fe4a11aa713099d0337893a6259fa2e086 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02973/s301790930.py | 280647a2fd8669a6345ecf3a1ac6c75ef906c3dc | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | from sys import stdin
from bisect import bisect
N = int(stdin.readline().rstrip())
A = []
for i in range(N):
A.append(int(input()))
dp = []
for a in A[::-1]:
i = bisect(dp, a)
if i < len(dp):
dp[i] = a
else:
dp.append(a)
print(len(dp)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
0249930c34da9815a0b78f5701b102bed3daa0b0 | 57a9d84e8bcf505795e7e4f2a57f096edebd0040 | /read_statistics/migrations/0002_readdetail.py | c76c1ec0c7df8243dc8104d4804adc7e95280b51 | [] | no_license | klllllsssss/mysite | 03541240c3d8013da40e3fdcaefbf9cfffabdfe3 | 14fbce7d1cb5097f16d2002da5a7a709cc7953f6 | refs/heads/master | 2022-09-18T13:30:47.399220 | 2020-06-03T11:04:23 | 2020-06-03T11:04:23 | 269,059,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | # Generated by Django 2.0 on 2020-04-27 11:53
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('read_statistics', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ReadDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(default=django.utils.timezone.now)),
('read_num', models.IntegerField(default=0)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='contenttypes.ContentType')),
],
),
]
| [
"549284627@qq.com"
] | 549284627@qq.com |
fcf325192bb689fddfa24f58302b76220e0f8f1b | 9708ad482f925fb5a57df285b478602ad2749196 | /lib.py | af36fdf38a4f7b1c906176f91a90afb6c6a5b74c | [] | no_license | cczeus/project-euler | 580b6e559da23554aaab06b82b671ebbf382c26c | 57970c2f0a2b64c5e444050bb437ba3b3620bff1 | refs/heads/master | 2022-06-02T23:25:47.280066 | 2022-05-19T23:30:41 | 2022-05-19T23:30:41 | 68,892,531 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | import math
def isPrime(num):
if num == 2:
return True
elif num % 2 == 0:
return False
elif num < 0:
return False
for j in range(3, int(math.sqrt(num)) + 1, 2):
if(num % j == 0):
return False
return True
def getFactors(num):
factors = []
i = 1
while i <= math.sqrt(num):
if num % i == 0:
factors.append(i)
factors.append(num / i)
i += 1
return factors
def getFactorsSum(num):
sum = 1
i = 2
while i <= math.sqrt(num):
if num % i == 0:
sum += i
sum += num / i
if i == num / i:
sum -= i
i += 1
return sum | [
"chriszuis@MacBook-Pro.local"
] | chriszuis@MacBook-Pro.local |
30049a45def159f6d425e056ab47ba6b13055d72 | 3fdf83182664bf1c5c8c5b91186ed1a476cdcae7 | /manage.py | b82fa89d927388c22b2efe834deb746bcbac493a | [] | no_license | gauravdhingra99/Webkiosk-online-Student-portal- | 7a3d47e1bd0e05d1a853685a66e28627ae04eef3 | fa1369e0e616b6688f9f906fd0c5ea42efa06368 | refs/heads/master | 2020-04-06T19:46:05.882112 | 2019-02-27T16:30:13 | 2019-02-27T16:30:13 | 157,748,816 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webkiosk.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"gauravdhingra9999@gmail.com"
] | gauravdhingra9999@gmail.com |
55936ee6e0c535be6c763d5bbe570c3d5d24d065 | 5c7deaef83574a53416681063827cdbcb3004b7c | /PyGameMultiAgent/gameclient.py | 3dc5ddca4fd9eafea0898bdf3db2f38152da05a0 | [] | no_license | guotata1996/baselines | ba53ed2bb3d8015551f8e46dd8398a21b638ea80 | d7e2bee2ce1d98e5f2c511d6ede4e627e1112ad6 | refs/heads/master | 2020-06-22T00:00:09.431445 | 2019-08-07T03:22:39 | 2019-08-07T03:22:39 | 138,416,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,635 | py | import pygame
import pygame.locals
import socket
import select
import random
import numpy as np
from baselines.PyGameMultiAgent.staticworld import StaticWorld
class GameClient(object):
def __init__(self, addr="127.0.0.1", serverport=9009):
self.clientport = random.randrange(8000, 8999)
self.conn = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to localhost - set to external ip to connect from other computers
self.conn.bind(("127.0.0.1", self.clientport))
self.addr = addr
self.serverport = serverport
self.read_list = [self.conn]
self.write_list = []
self.setup_pygame()
def setup_pygame(self):
self.world = StaticWorld('../Maps/map_1.csv')
self.screen = pygame.display.set_mode((self.world.local_width * self.world.zoom, self.world.local_length * self.world.zoom))
pygame.event.set_allowed(None)
pygame.event.set_allowed([pygame.locals.QUIT,
pygame.locals.KEYDOWN])
pygame.key.set_repeat(100, 100) #move faster
def run(self):
running = True
clock = pygame.time.Clock()
tickspeed = 30
try:
# Initialize connection to server
self.conn.sendto("cz".encode('utf-8'), (self.addr, self.serverport))
while running:
clock.tick(tickspeed)
# select on specified file descriptors
readable, writable, exceptional = (
select.select(self.read_list, self.write_list, [], 0)
)
for f in readable:
if f is self.conn:
msg, addr = f.recvfrom(2048)
msg = msg.decode('utf-8') #Coordinates of all players
self_pos = None
AllZombiePose = []
for position in msg.split('|')[:-1]:
x, y, angle, tag = position.split(',')
x = float(x)
y = float(y)
angle = float(angle)
tag = int(tag)
if self_pos is None:
self_pos = (x, y, angle)
AllZombiePose.append((x, y, angle, tag))
self.world.draw_local(self.screen, self_pos, AllZombiePose)
#self.world.draw_global(self.screen)
#self.world.draw_zombie_global(self.screen, (x, y, angle))
for event in pygame.event.get():
if event.type == pygame.QUIT or event.type == pygame.locals.QUIT:
running = False
break
elif event.type == pygame.locals.KEYDOWN:
if event.key == pygame.locals.K_UP:
self.conn.sendto("uu".encode('utf-8'), (self.addr, self.serverport))
elif event.key == pygame.locals.K_LEFT:
self.conn.sendto("ul".encode('utf-8'), (self.addr, self.serverport))
elif event.key == pygame.locals.K_RIGHT:
self.conn.sendto("ur".encode('utf-8'), (self.addr, self.serverport))
pygame.event.clear(pygame.locals.KEYDOWN)
pygame.display.update()
finally:
self.conn.sendto("d".encode('utf-8'), (self.addr, self.serverport))
if __name__ == "__main__":
g = GameClient()
g.run() | [
"jg4006@columbia.edu"
] | jg4006@columbia.edu |
ba04885de6ca2c1a171de58f0649c0d7f07f2428 | a3e48885987e895d0d33b5dd903d51aaf4d21ce6 | /duplicate_strings.py | b6413d4a9b2d69466c9d364f828210a31a44ee9a | [] | no_license | das-amrit/helper_scripts | 2e17c0c585356dd6cf9bd84b5082f6a20dd815f7 | fdca380e55bfaee5cf5fb3365421931513501db7 | refs/heads/master | 2021-03-24T04:23:54.449186 | 2019-02-17T06:44:45 | 2019-02-17T06:44:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | import itertools
text = "aforapple"
text_list = list(text)
new_text = [k for k,g in itertools.groupby(text_list)]
print("".join(new_text)
| [
"noreply@github.com"
] | das-amrit.noreply@github.com |
e924e94bc28ebc9f1f2c03016db85c413511282e | 965ec7b89c996c51579561e944be93f054f94301 | /test1.py | dc6a774ac0e491436d86533342ea435beb02201e | [
"MIT"
] | permissive | nayunhwan/SMaSH_Python | e26b20def4d6ca3ed042087a218150db6bac9d9a | 6e80520f43f6e014be2abc40d6f51f76338e3ff8 | refs/heads/master | 2018-12-27T11:51:39.860739 | 2018-10-24T06:55:32 | 2018-10-24T06:55:32 | 34,496,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | #-*- coding: utf-8 -*-
# 예제 ex_A.py
unit_price = input("사과 1개의 가격은 얼마입니까? ")
apple_count = input("사과의 개수는 모두 몇 개 입니까? ")
price = apple_count * unit_price
print "전체 사과의 가격은 ", price, "원 입니다." | [
"kbk9288@gmail.com"
] | kbk9288@gmail.com |
fdb4bdf1a20e33fa178f567d6dfa0aac72099ca5 | c6716e87bde12a870d517ebe64c6916477ef3251 | /tableFormats.py | dfc915f56ec17c0cb2887a8fce4b5c6e7c0c0ed0 | [
"BSD-3-Clause"
] | permissive | adasilva/prettytable | efca75828341319e2962727e55f7cce5519eb4b7 | 899e255a53b257cf392565dc1d9f02bef25c4c4a | refs/heads/master | 2021-01-25T04:01:43.633364 | 2015-08-19T15:26:49 | 2015-08-19T15:26:49 | 40,557,221 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,591 | py | from prettytable import PrettyTable
import abc
class TableString(object):
"""Metaclass for formatted table strings."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __unicode__(self): return
@abc.abstractmethod
def __str__(self): return
@abc.abstractmethod
def get_string(self,outfile,**kwargs):
'''return the string'''
return
class latexTable(TableString):
"""Construct and export a LaTeX table from a PrettyTable.
latexTableExporter(table,**kwargs)
Required argument:
-----------------
table - an instance of prettytable.PrettyTable
Optional keyword arguments:
--------------------------
caption - string - a caption for the table
label - string - the latex reference ID
"""
def __init__(self,table,caption='',label=''):
self.table = table
self.caption = caption
self.label = label
def __str__(self):
return self.get_string()
def __unicode__(self):
return self.get_string()
def get_string(self,**kwargs):
''' Construct LaTeX string from table'''
options = self.table._get_options(kwargs) #does not work bc of prettytable bug
s = r'\begin{table}' + '\n'
s = s + r'\centering' + '\n'
s = s + r'\caption{%s}\label{%s}' %(self.caption,self.label)
s = s + '\n'
s = s + r'\begin{tabular}{'
s = s + ''.join(['c',]*len(self.table.field_names)) + '}'
s = s + '\n'
s = s + '&'.join(self.table.field_names)+r'\\ \hline'+'\n'
rows = self.table._format_rows(self.table._rows,options)
#print rows
for i in range(len(rows)):
row = [str(itm) for itm in rows[i]]
s = s + '&'.join(row)
if i != len(self.table._rows)-1:
s = s + r'\\'
s = s + '\n'
s = s + r'\end{tabular}' + '\n'
s = s + r'\end{table}'
return s
if __name__ == "__main__":
t = PrettyTable(['a','b','c'])
t.add_row([1,2.0,3.14159])
xt = latexTable(t,caption='Testing formatted table string',label='tab:test')
print '1. Simply print the table:\n'
print xt
print '\n2. Use get_string method:\n'
print xt.get_string()
print '\n3. Format floats to two decimal points: (KNOWN ISSUE)\n'
print xt.get_string(float_format='0.2')
print '\n4. Workaround to format floats:\n'
t.float_format = '0.2'
xt2 = latexTable(t,caption='Floats are formatted to have two decimal places',label='tab:test2')
print xt2
| [
"awesomeashley527@gmail.com"
] | awesomeashley527@gmail.com |
1d1dfcd44cf71fa592df181189c7efe1af6af40d | 7a8560742946bfb95f4a252693264c34d4d0473d | /k2/centroid.py | e09491c999915180b3830fd138110d6e2140551a | [
"MIT"
] | permissive | benmontet/K2-noise | 3781e475ed6d5e2748a7ac3ddd878b8eec334254 | a4b682cdf33f85d2dffc4cef115dcedacfccb4b4 | refs/heads/master | 2016-09-05T13:02:09.051080 | 2014-10-25T14:36:22 | 2014-10-25T14:36:22 | 22,899,258 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | # -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["centroid"]
import numpy as np
from functools import partial
from itertools import izip, imap
from .c3k import find_centroid
def centroid(tpf, **kwargs):
# Load the data.
data = tpf.read()
times = data["TIME"]
images = data["FLUX"]
quality = data["QUALITY"]
# Get rid of the bad times based on quality flags.
m = np.isfinite(times) * (quality == 0)
images[~m, :] = np.nan
f = partial(find_centroid, **kwargs)
return [times] + list(imap(np.array, izip(*(imap(f, images)))))
| [
"danfm@nyu.edu"
] | danfm@nyu.edu |
2b21f9bf32ec2ff92c015f407d8cc4df35ebc205 | 693431e2be60ac6f9d59996589c7023408537603 | /talk/metrics_publisher/publisher.py | 0855e20eee392393c3d46d0806aa5e8dfda83fa9 | [] | no_license | rapyuta-robotics/io_tutorials | deb547590a4519f19923dc9593399cae2e2d6683 | 88cf45629e4c02dff385048ece4b2b344a6100a3 | refs/heads/master | 2023-05-27T15:46:33.673684 | 2023-02-22T08:52:32 | 2023-02-22T08:52:32 | 118,696,902 | 7 | 24 | null | 2023-05-23T03:37:59 | 2018-01-24T02:00:39 | CMake | UTF-8 | Python | false | false | 1,629 | py | #!/usr/bin/env python
import random
import rospy
from std_msgs.msg import String
from ros_monitoring_msgs.msg import MetricList, MetricData, MetricDimension
def get_metric_list(cycle, count):
robot_dimensions = [
MetricDimension(name='cycle', value='cycle' + str(cycle)),
MetricDimension(name='random_tag', value=str(random.choice([0, 1]))),
]
return [
MetricData(
metric_name='robot.battery_charge',
unit=MetricData.UNIT_PERCENTAGE,
value=100 - (count * 10),
dimensions=robot_dimensions,
),
MetricData(
metric_name='robot.distance_traveled',
unit='meters',
value=random.uniform(count * 100.0, (count+1) * 100.0),
dimensions=robot_dimensions,
),
MetricData(
metric_name='edge.connected_robots',
unit=MetricData.UNIT_COUNT,
value=random.randint(1, 100),
),
]
def publish():
pub = rospy.Publisher('/io_metrics', MetricList, queue_size=10)
rospy.init_node('metric_publisher', anonymous=True)
rate = rospy.Rate(0.2)
cycle = 1
count = 1
while not rospy.is_shutdown():
pub.publish(MetricList(get_metric_list(cycle, count)))
rospy.loginfo('published metric list for cycle: %d, count: %d', cycle, count)
rate.sleep()
if count == 10:
cycle = 1 if cycle == 10 else cycle + 1
count = 1 # reset
else:
count += 1
if __name__ == '__main__':
try:
publish()
except rospy.ROSInterruptException:
pass
| [
"noreply@github.com"
] | rapyuta-robotics.noreply@github.com |
02af91d9a068eb13b6123c2f26b025668f5bb79f | 6eaf69ffd454ed6933e3395516246d878cb09781 | /repozeldapapp/tests/functional/test_authentication.py | f998f67ccdc2ccc018c17f9cecb7cb08697d7a58 | [] | no_license | ralphbean/repoze-ldap-app | 0d6658ef13b153736aaed6aa07fbdcaf65cbe1d9 | cc00fe59bcc286fd44d1e22a14c40cfc8419e21d | refs/heads/master | 2021-01-01T05:35:25.069715 | 2011-07-19T15:30:31 | 2011-07-19T15:30:31 | 2,072,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,583 | py | # -*- coding: utf-8 -*-
"""
Integration tests for the :mod:`repoze.who`-powered authentication sub-system.
As repoze-ldap-app grows and the authentication method changes, only these tests
should be updated.
"""
from repozeldapapp.tests import TestController
class TestAuthentication(TestController):
"""Tests for the default authentication setup.
By default in TurboGears 2, :mod:`repoze.who` is configured with the same
plugins specified by repoze.what-quickstart (which are listed in
http://code.gustavonarea.net/repoze.what-quickstart/#repoze.what.plugins.quickstart.setup_sql_auth).
As the settings for those plugins change, or the plugins are replaced,
these tests should be updated.
"""
application_under_test = 'main'
def test_forced_login(self):
"""Anonymous users are forced to login
Test that anonymous users are automatically redirected to the login
form when authorization is denied. Next, upon successful login they
should be redirected to the initially requested page.
"""
# Requesting a protected area
resp = self.app.get('/secc/', status=302)
assert resp.location.startswith('http://localhost/login')
# Getting the login form:
resp = resp.follow(status=200)
form = resp.form
# Submitting the login form:
form['login'] = u'manager'
form['password'] = 'managepass'
post_login = form.submit(status=302)
# Being redirected to the initially requested page:
assert post_login.location.startswith('http://localhost/post_login')
initial_page = post_login.follow(status=302)
assert 'authtkt' in initial_page.request.cookies, \
"Session cookie wasn't defined: %s" % initial_page.request.cookies
assert initial_page.location.startswith('http://localhost/secc/'), \
initial_page.location
def test_voluntary_login(self):
"""Voluntary logins must work correctly"""
# Going to the login form voluntarily:
resp = self.app.get('/login', status=200)
form = resp.form
# Submitting the login form:
form['login'] = u'manager'
form['password'] = 'managepass'
post_login = form.submit(status=302)
# Being redirected to the home page:
assert post_login.location.startswith('http://localhost/post_login')
home_page = post_login.follow(status=302)
assert 'authtkt' in home_page.request.cookies, \
'Session cookie was not defined: %s' % home_page.request.cookies
assert home_page.location == 'http://localhost/'
def test_logout(self):
"""Logouts must work correctly"""
# Logging in voluntarily the quick way:
resp = self.app.get('/login_handler?login=manager&password=managepass',
status=302)
resp = resp.follow(status=302)
assert 'authtkt' in resp.request.cookies, \
'Session cookie was not defined: %s' % resp.request.cookies
# Logging out:
resp = self.app.get('/logout_handler', status=302)
assert resp.location.startswith('http://localhost/post_logout')
# Finally, redirected to the home page:
home_page = resp.follow(status=302)
authtkt = home_page.request.cookies.get('authtkt')
assert not authtkt or authtkt == 'INVALID', \
'Session cookie was not deleted: %s' % home_page.request.cookies
assert home_page.location == 'http://localhost/', home_page.location
| [
"ralph.bean@gmail.com"
] | ralph.bean@gmail.com |
a2de64aec718958e8d3b7c4e7137f9309a3fd152 | b79042eb362a9ba284f0c518854a3b7e6ee39284 | /learning_users/basic_app/forms.py | 7015436716565e7010f8c6798e71463f0e946b09 | [] | no_license | Austin911/django-deployment-example | 2dc2e7d8b777928b7fd6d631978581fff54aa9d6 | eded8b839224b7202902f61a35f058693242ac55 | refs/heads/master | 2020-03-11T16:24:32.888430 | 2018-04-19T01:08:00 | 2018-04-19T01:08:00 | 130,115,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from django import forms
from django.contrib.auth.models import User
from basic_app.models import UserProfileInfo
class UserForm(forms.ModelForm):
password = forms.CharField(widget= forms.PasswordInput())
class Meta():
model= User
fields = ('username','email','password')
class UserProfileInfoForm(forms.ModelForm):
class Meta():
model= UserProfileInfo
fields = ('portfolio_site','profile_pic')
| [
"austin88yang@gmail.com"
] | austin88yang@gmail.com |
a602ed4d95af34413839a7d25ad1df255e16af0c | e67ae29c22eca0e23a63f871c008c0de3b0cf1df | /Civ4 Reimagined/PublicMaps/not_too_Big_or_Small.py | fddb4814cf8770bde44e7db1b6693f74db53c0c5 | [
"CC-BY-3.0"
] | permissive | NilsBatram/Civ4-Reimagined | 16f9e24174118ee7662723230f101fb563d31b4b | a9bc57908321dd12db3417b89bd569de7b99dea1 | refs/heads/master | 2020-12-08T11:30:46.160054 | 2020-06-18T20:32:55 | 2020-06-18T20:32:55 | 66,165,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,732 | py |
## "not too Big or Small". A modified version of "big and small" to scale better with larger maps.
## by Karadoc. version 1.4
from CvPythonExtensions import *
import CvUtil
import CvMapGeneratorUtil
from CvMapGeneratorUtil import FractalWorld
from CvMapGeneratorUtil import TerrainGenerator
from CvMapGeneratorUtil import FeatureGenerator
def getDescription():
return "A modified version of Big and Small, designed to scale better for large maps."
def isAdvancedMap():
"This map should not show up in simple mode"
return 0
def getNumCustomMapOptions():
return 2
def getCustomMapOptionName(argsList):
[iOption] = argsList
option_names = {
0: "TXT_KEY_MAP_SCRIPT_CONTINENTS_SIZE",
1: "TXT_KEY_MAP_SCRIPT_ISLANDS_SIZE"
}
translated_text = unicode(CyTranslator().getText(option_names[iOption], ()))
return translated_text
def getNumCustomMapOptionValues(argsList):
[iOption] = argsList
option_values = {
0: 3,
1: 2
}
return option_values[iOption]
def getCustomMapOptionDescAt(argsList):
[iOption, iSelection] = argsList
selection_names = {
0: {
0: "TXT_KEY_MAP_SCRIPT_MASSIVE_CONTINENTS",
1: "TXT_KEY_MAP_SCRIPT_NORMAL_CONTINENTS",
2: "TXT_KEY_MAP_SCRIPT_SNAKY_CONTINENTS"
},
1: {
0: "TXT_KEY_MAP_SCRIPT_ISLANDS",
1: "TXT_KEY_MAP_SCRIPT_TINY_ISLANDS"
}
}
translated_text = unicode(CyTranslator().getText(selection_names[iOption][iSelection], ()))
return translated_text
def getCustomMapOptionDefault(argsList):
[iOption] = argsList
option_defaults = {
0: 1,
1: 0
}
return option_defaults[iOption]
def minStartingDistanceModifier():
return -12
def beforeGeneration():
#global xShiftRoll
gc = CyGlobalContext()
dice = gc.getGame().getMapRand()
# Binary shift roll (for horizontal shifting if Island Region Separate).
#xShiftRoll = dice.get(2, "Region Shift, Horizontal - Big and Small PYTHON")
#print xShiftRoll
class BnSMultilayeredFractal(CvMapGeneratorUtil.MultilayeredFractal):
def generatePlotsByRegion(self):
# Sirian's MultilayeredFractal class, controlling function.
# You -MUST- customize this function for each use of the class.
#global xShiftRoll
iContinentsGrain = 1 + self.map.getCustomMapOption(0)
iIslandsGrain = 4 + self.map.getCustomMapOption(1)
# Water variables need to differ if Overlap is set. Defining default here.
iWater = 74
iTargetSize = 30 + self.dice.get(min(36, self.iW/3), "zone target size (horiz)")
iHorizontalZones = max(1, (self.iW+iTargetSize/2) / iTargetSize)
iTargetSize = 30 + self.dice.get(min(34, self.iH/2), "zone target size (vert)")
iVerticalZones = max(1, (self.iH+iTargetSize/2) / iTargetSize)
# if iHorizontalZones == 1 and iVerticalZones == 1:
# iHorizontalZones = 1 + self.dice.get(2, "Saving throw vs. Pangaea")
iTotalZones = iHorizontalZones * iVerticalZones
iContinentZones = (iTotalZones+1)/2 + self.dice.get(1+(iTotalZones-1)/2, "number of 'big' zones")
iIslandZones = iTotalZones - iContinentZones
# Add a few random patches of Tiny Islands first. (originaly 1 + r(4))
numTinies = iContinentZones + self.dice.get(2 + iTotalZones, "number of Tiny Islands")
print("Patches of Tiny Islands: ", numTinies)
if numTinies:
for tiny_loop in range(numTinies):
tinyWestLon = 0.01 * self.dice.get(85, "Tiny Longitude - Custom Continents PYTHON")
tinyWestX = int(self.iW * tinyWestLon)
tinySouthLat = 0.01 * self.dice.get(85, "Tiny Latitude - Custom Continents PYTHON")
tinySouthY = int(self.iH * tinyWestLon)
tinyWidth = int(self.iW * 0.15)
tinyHeight = int(self.iH * 0.15)
self.generatePlotsInRegion(80,
tinyWidth, tinyHeight,
tinyWestX, tinySouthY,
4, 3,
0, self.iTerrainFlags,
6, 5,
True, 3,
-1, False,
False
)
zone_types = [0] * iTotalZones
i = 0
while i < iContinentZones:
x = self.dice.get(iTotalZones - i, "zone placement")
j = 0
while j <= x:
if (zone_types[j] == 1):
x = x + 1
j += 1
zone_types[x] = 1
i += 1
iZoneWidth = int(self.iW / iHorizontalZones)
iZoneHeight = int(self.iH / iVerticalZones)
xExp = 6
iMaxOverLap = 5
for i in range(iTotalZones):
iWestX = max(0, (i % iHorizontalZones) * iZoneWidth - self.dice.get(iMaxOverLap, "zone overlap (west)"))
iEastX = min(self.iW - 1, (i % iHorizontalZones + 1) * iZoneWidth + self.dice.get(iMaxOverLap, "zone overlap (east)"))
iSouthY = max(0, max(3, (i / iHorizontalZones) * iZoneHeight) - self.dice.get(iMaxOverLap, "zone overlap (south)"))
iNorthY = min(self.iH - 1, min(self.iH - 4, (i / iHorizontalZones + 1) * iZoneHeight) + self.dice.get(iMaxOverLap, "zone overlap (north)"))
iWidth = iEastX - iWestX + 1
iHeight = iNorthY - iSouthY + 1
if (zone_types[i] == 1):
# continent zone
self.generatePlotsInRegion(iWater,
iWidth, iHeight,
iWestX, iSouthY,
iContinentsGrain, 4,
self.iRoundFlags, self.iTerrainFlags,
xExp, 6,
True, 15,
-1, False,
False
)
else:
# islands zone
self.generatePlotsInRegion(iWater,
iWidth, iHeight,
iWestX, iSouthY,
iIslandsGrain, 5,
self.iRoundFlags, self.iTerrainFlags,
xExp, 6,
True, 15,
-1, False,
False
)
# All regions have been processed. Plot Type generation completed.
return self.wholeworldPlotTypes
'''
Regional Variables Key:
iWaterPercent,
iRegionWidth, iRegionHeight,
iRegionWestX, iRegionSouthY,
iRegionGrain, iRegionHillsGrain,
iRegionPlotFlags, iRegionTerrainFlags,
iRegionFracXExp, iRegionFracYExp,
bShift, iStrip,
rift_grain, has_center_rift,
invert_heights
'''
def generatePlotTypes():
NiTextOut("Setting Plot Types (Python Custom Continents) ...")
fractal_world = BnSMultilayeredFractal()
plotTypes = fractal_world.generatePlotsByRegion()
return plotTypes
def generateTerrainTypes():
NiTextOut("Generating Terrain (Python Custom Continents) ...")
terraingen = TerrainGenerator()
terrainTypes = terraingen.generateTerrain()
return terrainTypes
def addFeatures():
NiTextOut("Adding Features (Python Custom Continents) ...")
featuregen = FeatureGenerator()
featuregen.addFeatures()
return 0
| [
"Nils.Batram@gmx.de"
] | Nils.Batram@gmx.de |
dd203b86fd8abbb163bbe54b4e921223fe92e53f | 1ed65a23ea5d9a135096bc55ea9df9b96625d909 | /core/migrations/0030_userprofile_is_active.py | 905bbf27e4e80966ed1aa6b9747b3c4b8caca345 | [] | no_license | nfishel48/simntx | 367b8323e6b4f433912eb687888a456e0959c228 | 0dc7f6c41adff1c21a52aca6e2712e7fcb3e9a48 | refs/heads/master | 2022-12-31T02:22:28.344922 | 2020-10-16T02:21:23 | 2020-10-16T02:21:23 | 271,566,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | # Generated by Django 2.2 on 2020-06-25 03:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0029_auto_20200624_2234'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='is_active',
field=models.BooleanField(default=False),
),
]
| [
"nfishel@emich.edu"
] | nfishel@emich.edu |
c9593dfd5fb0088ce2c4645844975fd74e3c847e | 7b53e120dc4022b09eed0cf87a975482dc1d2056 | /M2/utils.py | 93ee422692d9b63b45deed53160a1d656ec285cf | [] | no_license | YuanKQ/DDI-VAE | 878ba120c2a61e7966bf1638680c5b39a610d690 | fe2c5a699e5294287c0b05b60fd037c21c7fddd1 | refs/heads/master | 2020-03-17T12:34:12.895257 | 2018-05-16T01:50:45 | 2018-05-16T02:11:56 | 133,594,155 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,766 | py | import prettytensor as pt
import tensorflow as tf
import numpy as np
logc = np.log(2.*np.pi)
c = - 0.5 * np.log(2*np.pi)
def tf_normal_logpdf(x, mu, log_sigma_sq):
return ( - 0.5 * logc - log_sigma_sq / 2. - tf.div( tf.square( tf.subtract( x, mu ) ), 2 * tf.exp( log_sigma_sq ) ) )
def tf_stdnormal_logpdf(x):
return ( - 0.5 * ( logc + tf.square( x ) ) )
def tf_gaussian_ent(log_sigma_sq):
return ( - 0.5 * ( logc + 1.0 + log_sigma_sq ) )
def tf_gaussian_marg(mu, log_sigma_sq):
return ( - 0.5 * ( logc + ( tf.square( mu ) + tf.exp( log_sigma_sq ) ) ) )
def tf_binary_xentropy(x, y, const = 1e-10):
return - ( x * tf.log ( tf.clip_by_value( y, const, 1.0 ) ) + \
(1.0 - x) * tf.log( tf.clip_by_value( 1.0 - y, const, 1.0 ) ) )
def feed_numpy_semisupervised(num_lab_batch, num_ulab_batch, x_lab, y, x_ulab):
size = x_lab.shape[0] + x_ulab.shape[0]
batch_size = num_lab_batch + num_ulab_batch
count = int(size / batch_size)
dim = int(x_lab.shape[1])
for i in range(count):
start_lab = int(i * num_lab_batch)
end_lab = int(start_lab + num_lab_batch)
start_ulab = int(i * num_ulab_batch)
end_ulab = int(start_ulab + num_ulab_batch)
yield [ x_lab[start_lab:end_lab,:int(dim/2)], x_lab[start_lab:end_lab,int(dim/2):dim], y[start_lab:end_lab],
x_ulab[start_ulab:end_ulab,:int(dim/2)], x_ulab[start_ulab:end_ulab,int(dim/2):dim] ]
def feed_numpy(batch_size, x):
size = x.shape[0]
count = int(size / batch_size)
dim = x.shape[1]
for i in range(count):
start = i * batch_size
end = start + batch_size
yield x[start:end]
def print_metrics(epoch, *metrics):
print(25*'-')
for metric in metrics:
print('[{}] {} {}: {}'.format(epoch, metric[0],metric[1],metric[2]))
print(25*'-') | [
"kq_yuan@outlook.com"
] | kq_yuan@outlook.com |
9994c856f9f6988cd9021ad10b5aef5d6047c41c | 09e0940849ba15a2179b4418057aac28aac81bac | /app/doyin.py | 97041f90e5e2688edc51ac2ae0f95cabb6dc9784 | [
"MIT"
] | permissive | tqdonkey/app-bot | 75886957114840877adf473e99cf05dcf424f2f5 | df01784b3f2875facfe1e79d4614a906dc8e7a7b | refs/heads/master | 2022-02-28T22:46:52.418077 | 2019-08-20T12:28:59 | 2019-08-20T12:28:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,215 | py | # -*- coding: utf-8 -*-
'''
create by: 小宝
mail: 1435682155@qq.com
create date: 2019.8.11
Purpose: hehe
Desc:just do it~
'''
import sys
import time
import os
import shutil
sys.path.append('./app')
from common.app import app
from common import config
from common import screenshot
from common import comm
# 导入支持平台的action
# from action import dy_like
class doyin(app):
def __init__(self):
super().__init__()
screenshot.check_screenshot()
self.config = config.open_accordant_config('doyin')
self.delay = float(self.config['delay']['value'])
self.retry = int(self.config['retry']['value'])
pass
def run(self):
self.run_cmd()
def run_cmd(self):
try:
key = input(
"\n=========***欢迎使用doyin-bot***=========\n"+
"请输入序号选择要操作的功能:\n"+
"> 1:寻找美女并点赞\n"+
"> 2:取消点赞\n"+
"> 0: 退出程序\n"+
"=======================================\n"
"请输入[1/2/0]:"
)
key = int(key)
if key == 1:
self.search_dest()
elif key == 2:
self.cancel_like()
elif key == 0:
exit('谢谢使用')
except KeyboardInterrupt:
exit('谢谢使用')
def search_dest(self):
from action import do_like
while True:
self.action_schedule('do_like', do_like)
def cancel_like(self):
from action import do_cancel
while True:
self.action_schedule('do_cancel', do_cancel)
def action_schedule(self, action_name, action_file):
actions = action_file.actions
flg = True
while True:
for action in actions:
if action['type'] == 'open':
if not self._open_app(action['main_activity'], self.delay):
flg = False
break
elif action['type'] == 'click':
if not self._click_operate(action['current'], action['x'], action['y'], self.delay, action['expect'], self.retry):
flg = False
break
elif action['type'] == 'custom':
self.handle_custom_operate(action_name, action)
elif action['type'] == 'swipe':
self._swipe_page(action['x1'], action['y1'], action['x2'], action['y2'])
elif action['type'] == 'back':
self.back_expect_page(action['current'], action['expect'], self.delay, self.retry)
else:
exit('未知异常')
if flg:
break
def handle_custom_operate(self, action_name, action):
if action_name == 'do_like':
return self._handle_screenshot(action)
# 滑屏翻页
def _swipe_page(self, x1, y1, x2, y2):
self.swipe_operate(x1, y1, x2, y2, self.delay)
# 截屏及相关操作
def _handle_screenshot(self, action):
# 1.截屏优化图片
time.sleep(1)
self.screen_to_img()
comm.resize_image('./tmp/screen.png', './tmp/optimized.png', 1024*1024)
# 2.调用接口
res = comm.face_detectface()
if res == False:
return False
# 3.判断处理
is_dest = self._is_dest(res['face_list'], (0, 10), (80, 100), (0, 100))
# 4.保存图片
if is_dest != False:
print('是个美人儿~点赞走一波')
# 点赞
x = self.config['like_star']['x']
y = self.config['like_star']['y']
self._click_operate(action['current'], x, y, self.delay, '', self.retry)
self._img_save(is_dest['beauty'])
return True
return False
# 满足条件的图片保存
def _img_save(self, beauty):
# 1.把图存下来
path = time.strftime('%Y-%m-%d', time.localtime(time.time()))
file_path = './tmp/screenshot/' + path + "/"
if not os.path.exists(file_path):
os.mkdir(file_path)
rq = time.strftime('%Y%m%d%H%M%S-{}'.format(beauty), time.localtime(time.time()))
screen_name = file_path + rq + '.png'
shutil.copy('./tmp/screen.png', screen_name)
# 判断是否满足设定的条件
def _is_dest(self, face_list, gender = (0, 100), beauty = (0, 100), age = (0, 100)):
'''
default:
gender: (0, 100)
beauty:(0, 100)
age:(0, 100)
'''
for face in face_list:
if face['gender'] not in range(gender[0], gender[1] + 1): continue
if face['beauty'] not in range(beauty[0], beauty[1] + 1): continue
if face['beauty'] not in range(age[0], age[1] + 1): continue
print("颜值:{}".format(face['beauty']))
return {'beauty': face['beauty']}
return False | [
"root@lbp.localdomain"
] | root@lbp.localdomain |
1b20703b930ae2d775880d83cd617d40c9cdfa18 | ea867a1db2b730964b471e5f198ac74988417fa5 | /steemtools/helpers.py | 5c4e3a5d73bff0aa5310093de2799d44d516835b | [
"MIT"
] | permissive | Denis007138/steemtools | 0b58fa4bb2608c0134752b0855a36464cff9073a | c7f7ad9f482ff1b56e1218ceffbf574c95cf0c1f | refs/heads/master | 2021-01-11T01:34:36.721177 | 2016-10-10T13:58:44 | 2016-10-10T13:58:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | import datetime
import re
import time
import dateutil
from dateutil import parser
from funcy import contextmanager, decorator
from werkzeug.contrib.cache import SimpleCache
@contextmanager
def timeit():
t1 = time.time()
yield
print("Time Elapsed: %.2f" % (time.time() - t1))
@decorator
def simple_cache(func, cache_obj, timeout=3600):
if type(cache_obj) is not SimpleCache:
return func()
name = "%s_%s_%s" % (func._func.__name__, func._args, func._kwargs)
cache_value = cache_obj.get(name)
if cache_value:
return cache_value
else:
out = func()
cache_obj.set(name, out, timeout=timeout)
return out
def read_asset(asset_string):
re_asset = re.compile(r'(?P<number>\d*\.?\d+)\s?(?P<unit>[a-zA-Z]+)')
res = re_asset.match(asset_string)
return {'value': float(res.group('number')), 'symbol': res.group('unit')}
def parse_payout(payout):
return read_asset(payout)['value']
def time_diff(time1, time2):
time1 = parser.parse(time1 + "UTC").timestamp()
time2 = parser.parse(time2 + "UTC").timestamp()
return time2 - time1
def is_comment(item):
if item['permlink'][:3] == "re-":
return True
return False
def time_elapsed(time1):
created_at = parser.parse(time1 + "UTC").timestamp()
now_adjusted = time.time()
return now_adjusted - created_at
def parse_time(block_time):
return dateutil.parser.parse(block_time + "UTC").astimezone(datetime.timezone.utc)
| [
"_@furion.me"
] | _@furion.me |
b1c1eca6d9cd2f7761661a9abe7a38a71c3ffc06 | baec3aca9482e90605ac4e4ecee52b3d6eb44f1f | /21/d21.py | 3cc7219aec0f5c0137007760f688852551e5a018 | [] | no_license | grvn/aoc2017 | 9390d89dbcda7a10352ad65dae71eeec51c930ea | 48d380d8ff7000d38fdba9fb93bcfa99c1f0c447 | refs/heads/master | 2021-10-08T06:06:35.298716 | 2018-12-08T18:52:36 | 2018-12-08T18:52:36 | 112,791,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,623 | py | #!/usr/bin/env python3
from sys import argv
filename=argv[1]
iterations=int(argv[2])
rules={}
start=((".", "#", "."), (".", ".", "#"), ("#", "#", "#"))
def rotate(sqgr):
return tuple(tuple(x) for x in zip(*sqgr[::-1]))
with open(filename) as f:
input = f.readlines()
input = [line.strip() for line in input]
for line in input:
fro,to=line.split(' => ')
fro=tuple(map(tuple, fro.split('/')))
to=tuple(map(tuple, to.split('/')))
fro0=fro
while True:
rules[fro]=to
fro=rotate(fro)
if fro==fro0:
break
fro=tuple(reversed(fro))
fro0=fro
while True:
rules[fro]=to
fro=rotate(fro)
if fro==fro0:
break
for i in range(iterations):
size=len(start)
if size%2==0:
rwlen=2
else:
rwlen=3
tmp=(size//rwlen)*(rwlen+1)
tmplist=[[0 for _ in range(tmp)] for _ in range(tmp)]
for j in range(0,size,rwlen):
for k in range(0,size,rwlen):
if rwlen==2:
keytup=((start[j][k],start[j][k+1]),(start[j+1][k],start[j+1][k+1]))
else:
keytup=((start[j][k],start[j][k+1],start[j][k+2]),(start[j+1][k],start[j+1][k+1],start[j+1][k+2]),(start[j+2][k],start[j+2][k+1],start[j+2][k+2]))
newpart=rules[keytup]
offsetx=(j//rwlen)*(rwlen+1)
offsety=(k//rwlen)*(rwlen+1)
for l in range(rwlen+1):
for m in range(rwlen+1):
tmplist[offsetx+l][offsety+m]=newpart[l][m]
start=tuple(tuple(z) for z in tmplist)
print(sum(line.count('#') for line in start))
| [
"rickard"
] | rickard |
be1ca56a4c8e33d679fe761dc4faa412b354bfa3 | 61e68e3a4d6cc841da4350dc193315822ca4e354 | /lecture/4_정렬/4_퀵정렬.py | 45420f20a5eaaae9aafb31ff3bea12843c0068c4 | [] | no_license | sswwd95/Algorithm | 34360cd333019d6ded60f967c19aa70f1655e12a | a70bdf02580a39b9a5c282a04b0b2f8c2cb41636 | refs/heads/master | 2023-04-16T21:05:07.293929 | 2021-05-08T10:58:05 | 2021-05-08T10:58:05 | 362,651,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | array = [5, 7, 9, 0, 3, 1, 6, 2, 4, 8]
def quick_sort(array, start, end):
if start >= end: # 원소가 1개인 경우 종료
return
pivot = start # 피벗은 첫 번째 원소
left = start + 1
right = end
while(left <= right):
# 피벗보다 큰 데이터를 찾을 때까지 반복
while(left <= end and array[left] <= array[pivot]):
left += 1
# 피벗보다 작은 데이터를 찾을 때까지 반복
while(right > start and array[right] >= array[pivot]):
right -= 1
if(left > right): # 엇갈렸다면 작은 데이터와 피벗을 교체
array[right], array[pivot] = array[pivot], array[right]
else: # 엇갈리지 않았다면 작은 데이터와 큰 데이터를 교체
array[left], array[right] = array[right], array[left]
# 분할 이후 왼쪽 부분과 오른쪽 부분에서 각각 정렬 수행
quick_sort(array, start, right - 1)
quick_sort(array, right + 1, end)
quick_sort(array, 0, len(array) - 1)
print(array)
# [0,1,2,3,4,5,6,7,8,9] | [
"sswwd95@gmail.com"
] | sswwd95@gmail.com |
d9504d622907d7e7e0e6a6772e0fc6a072b448be | 276e6b57d182875c3c9276360dcfbd26ba542492 | /main.py | 6cb0596d6d39368e67e622a9d677e1de169562b2 | [
"MIT"
] | permissive | Charlie-kun/Loan | c9bf7ebff209dca6059bfe6141ad67420678b5be | 930835221d9eb28ba7f6a3d6e1b1b72ab280683c | refs/heads/master | 2022-12-09T07:24:40.351771 | 2020-09-21T21:55:06 | 2020-09-21T21:55:06 | 285,209,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,155 | py | import os
import sys
import logging
import argparse
import json
import settings
import utils
import data_manager
if __name__ == '__main__': # add parameters
parser = argparse.ArgumentParser()
parser.add_argument('--stock_code', nargs='+') # add stock_code
parser.add_argument('--ver', choices=['v1', 'v2'], default='v2') # chose policy?
parser.add_argument('--rl_method',
choices=['dqn', 'pg', 'ac', 'a2c', 'a3c'])
parser.add_argument('--net',
choices=['dnn', 'lstm', 'cnn'], default='dnn')
parser.add_argument('--num_steps', type=int, default=1) # somethings step
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--discount_factor', type=float, default=0.9)
parser.add_argument('--start_epsilon', type=float, default=0)
parser.add_argument('--balance', type=int, default=10000000)
parser.add_argument('--num_epoches', type=int, default=100)
parser.add_argument('--delayed_reward_threshold',
type=float, default=0.05)
parser.add_argument('--backend',
choices=['tensorflow', 'plaidml'], default='tensorflow')
parser.add_argument('--output_name', default=utils.get_time_str())
parser.add_argument('--value_network_name')
parser.add_argument('--policy_network_name')
parser.add_argument('--reuse_models', action='store_true')
parser.add_argument('--learning', action='store_true')
parser.add_argument('--start_date', default='20170101')
parser.add_argument('--end_date', default='20171231')
args = parser.parse_args()
# Keras Backend setting
if args.backend == 'tensorflow':
os.environ['KERAS_BACKEND'] = 'tensorflow'
elif args.backend == 'plaidml':
os.environ['KERAS_BACKEND'] = 'plaidml.keras.backend'
# output path setting
output_path = os.path.join(settings.BASE_DIR,
'output/{}_{}_{}'.format(args.output_name, args.rl_method, args.net))
if not os.path.isdir(output_path):
os.makedirs(output_path)
# Record of parameter.
with open(os.path.join(output_path, 'params.json'), 'w') as f:
f.write(json.dumps(vars(args)))
# log setting
file_handler = logging.FileHandler(filename=os.path.join(
output_path, "{}.log".format(args.output_name)), encoding='utf-8')
stream_handler = logging.StreamHandler(sys.stdout)
file_handler.setLevel(logging.DEBUG)
stream_handler.setLevel(logging.INFO)
logging.basicConfig(format="%(message)s",
handlers=[file_handler, stream_handler], level=logging.DEBUG)
# Log, Keras Backend setting first and RLTrader module import.
from agent import Agent
from learners import DQNLearner, PolicyGradientLearner, \
ActorCriticLearner, A2CLearner, A3CLearner
# ready for model path
value_network_path = ''
policy_network_path = ''
if args.value_network_name is not None: # when No value network name, connect network path
value_network_path = os.path.join(settings.BASE_DIR,
'models/{}.h5'.format(args.value_network_name))
else:
value_network_path = os.path.join(
output_path, '{}_{}_value_{}.h5'.format(
args.rl_method, args.net, args.output_name))
if args.policy_network_name is not None: # when No policy network name, connect network path
policy_network_path = os.path.join(settings.BASE_DIR,
'models/{}.h5'.format(args.policy_network_name))
else:
policy_network_path = os.path.join(
output_path, '{}_{}_policy_{}.h5'.format(
args.rl_method, args.net, args.output_name))
common_params = {}
list_stock_code = []
list_chart_data = []
list_training_data = []
list_min_trading_unit = []
list_max_trading_unit = []
for stock_code in args.stock_code:
# Chart data, ready for learn data.
chart_data, training_data = data_manager.load_data(
os.path.join(settings.BASE_DIR,
'data/{}/{}.csv'.format(args.ver, stock_code)),
args.start_date, args.end_date, ver=args.ver)
# Min /Max trading unit setting
min_trading_unit = max(int(100000 / chart_data.iloc[-1]['close']), 1)
max_trading_unit = max(int(1000000 / chart_data.iloc[-1]['close']), 1)
# common parameter setting.
common_params = {'rl_method': args.rl_method,
'delayed_reward_threshold': args.delayed_reward_threshold,
'net': args.net, 'num_steps': args.num_steps, 'lr': args.lr,
'output_path': output_path, 'reuse_models': args.reuse_models}
# Start for reinforce learning
learner = None
if args.rl_method != 'a3c':
common_params.update({'stock_code': stock_code,
'chart_data': chart_data,
'training_data': training_data,
'min_trading_unit': min_trading_unit,
'max_trading_unit': max_trading_unit})
if args.rl_method == 'dqn':
learner = DQNLearner(**{**common_params,
'value_network_path': value_network_path})
elif args.rl_method == 'pg':
learner = PolicyGradientLearner(**{**common_params,
'policy_network_path': policy_network_path})
elif args.rl_method == 'ac':
learner = ActorCriticLearner(**{**common_params,
'value_network_path': value_network_path,
'policy_network_path': policy_network_path})
elif args.rl_method == 'a2c':
learner = A2CLearner(**{**common_params,
'value_network_path': value_network_path,
'policy_network_path': policy_network_path})
if learner is not None:
learner.run(balance=args.balance,
num_epoches=args.num_epoches,
discount_factor=args.discount_factor,
start_epsilon=args.start_epsilon,
learning=args.learning)
learner.save_models()
else:
list_stock_code.append(stock_code)
list_chart_data.append(chart_data)
list_training_data.append(training_data)
list_min_trading_unit.append(min_trading_unit)
list_max_trading_unit.append(max_trading_unit)
if args.rl_method == 'a3c':
learner = A3CLearner(**{
**common_params,
'list_stock_code': list_stock_code,
'list_chart_data': list_chart_data,
'list_training_data': list_training_data,
'list_min_trading_unit': list_min_trading_unit,
'list_max_trading_unit': list_max_trading_unit,
'value_network_path': value_network_path,
'policy_network_path': policy_network_path})
learner.run(balance=args.balance, num_epoches=args.num_epoches,
discount_factor=args.discount_factor,
start_epsilon=args.start_epsilon,
learning=args.learning)
learner.save_models()
| [
"chlh@daum.net"
] | chlh@daum.net |
b8d49ed51407d17cd6270de28b88938cd694fa87 | 49f875de9e18812b25ead4cd15a562e9f1347256 | /selection_sort.py | 357b07b8ad678f0c637fe113b98915da400095ef | [] | no_license | brandontarney/ds_algo_review | 020499e7e81435b2475fd50acc717f3093fc0f7f | cabd42f01bd8f027db0d0e3de870263adbf9327a | refs/heads/master | 2020-06-23T16:30:50.564707 | 2019-09-09T01:02:17 | 2019-09-09T01:02:17 | 198,680,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | #!/bin/python
#NOTE - this is pseudocode, not real python code
#Algorithm
#Create sorted sublist (usually in the front of the list)
#Find the next smallest value and swap it into front position of a sorted sublist
#NOTE this is effectively iterating (inner loop) over the unsorted list vs. selection sort iterating (inner loop) over the sorted list
#Performance
#AVG O(n log n)
#BEST O(n log n)
#WORST O(n^2) - reverse order
#Form a sorted list starting at the front by simply swapping in the smallest value each iteration
def selection_sort( list ):
for (i = 0; i < list.len; i++):
min_val_idx = i
for (j=i+1; j< list.len; j++):
if (list[j] < list[min_val_idx]):
min_val_idx = j
#We found something smaller
if (min_val_idx != i):
tmpvar = list[i]
list[i] = list[min_val_idx]
list[min_val_idx] = tmpvar
| [
"brandon.tarney@gmail.com"
] | brandon.tarney@gmail.com |
6c298475750028ddc507024c20d0d2fb7fe96055 | c3e2fd391265f13e9104e2d441d4b046e1b892b5 | /Pattern_TD_Trap.py | d0ce2792f0af14cb63f1c9dd57b1ae33cc87077a | [] | no_license | jamesliu1/The-Book-of-Trading-Strategies | 262f0824ca3b6ba34daea63fba80bca6492c4a6e | 11ecc59db0e0d6ff5ec0685bb6d3dcd26780cacf | refs/heads/main | 2023-08-23T15:28:38.112508 | 2021-10-19T13:28:19 | 2021-10-19T13:28:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py |
# Base parameters
expected_cost = 0.5 * (lot / 10000)
assets = asset_list(1)
window = 1000
# Trading parameters
horizon = 'H1'
# Mass imports
my_data = mass_import(0, horizon)
def signal(Data):
# Adding columns
# Bullish signal
for i in range(len(Data)):
if Data[i - 1, 1] < Data[i - 2, 1] and Data[i - 1, 2] > Data[i - 2, 2] and Data[i, 3] > Data[i - 1, 1]:
Data[i, 6] = 1
# Bearish signal
for i in range(len(Data)):
if Data[i - 1, 1] < Data[i - 2, 1] and Data[i - 1, 2] > Data[i - 2, 2] and Data[i, 3] < Data[i - 1, 2]:
Data[i, 7] = -1
return Data
############################################################################## 1
my_data = adder(my_data, 10)
my_data = signal(my_data)
if sigchart == True:
signal_chart_ohlc_color(my_data, assets[0], 3, 6, 7, window = 250)
holding(my_data, 6, 7, 8, 9)
my_data_eq = equity_curve(my_data, 8, expected_cost, lot, investment)
performance(my_data_eq, 8, my_data, assets[0])
plt.plot(my_data_eq[:, 3], linewidth = 1, label = assets[0])
plt.grid()
plt.legend()
plt.axhline(y = investment, color = 'black', linewidth = 1) | [
"noreply@github.com"
] | jamesliu1.noreply@github.com |
b4ebea591ef98eba50becc2628f71215e816a37f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_84/306.py | 0561a547b612e83a36f4cf677430a4ecdf3d37f6 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | import sys, math
from multiprocessing import Pool
def main(data):
R,C,s = data
for i in range(R):
for j in range(C):
try:
if s[i][j] == "#":
if s[i][j+1] == "#" and s[i+1][j] == "#" and s[i+1][j+1] == "#":
s[i][j] = "/"
s[i][j+1] = "\\"
s[i+1][j] = "\\"
s[i+1][j+1] = "/"
else:
return "Impossible"
except:
return "Impossible"
return "\n".join(["".join(l) for l in s])
if __name__ == "__main__":
mode = 0
if len(sys.argv) > 1:
f = open(sys.argv[1])
mode = 1
else:
f = open("test.txt")
T = int(f.readline())
data = []
for i in range(T):
R,C = map(int, f.readline().strip().split())
s = list()
for j in range(R):
s.append(list(f.readline().strip()))
data.append((R, C, s))
if mode == 1:
pool = Pool()
r = pool.map(main, data)
else:
r = map(main, data)
for i in range(T):
print "Case #%d: \n%s" % (i+1, r[i]) | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
86595f3567adfa865a0e8806fba2bb2cd8d64109 | ade758c24cd547689012a61b55ccf77e33a2bbf2 | /93/93.py | 0ab93003b896e27ece999a42c10a5d8a9c0eaef6 | [] | no_license | danmedani/euler | 7f7dda0ee295a77eb6faca0a4aa15015850aed72 | eeef3a4d9c188f954842f7c3adc37d58588c4781 | refs/heads/master | 2023-08-17T03:26:36.864451 | 2023-08-08T02:35:46 | 2023-08-08T02:35:46 | 14,157,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,362 | py | import math
import copy
SENT = -999999
def doOp(num1, num2, op):
if op == '*':
return num1 * num2
elif op == '+':
return num1 + num2
elif op == '-':
return num1 - num2
elif op == '/':
if num2 == 0:
return 1.9874352345
else:
return 1.0 * num1 / num2
else:
print 'oh crap!'
def getTree1(numList, opList):
rr = doOp(numList[2], numList[3], opList[2])
r = doOp(numList[1], rr, opList[1])
c = doOp(numList[0], r, opList[0])
if c - int(c) > 0.0001:
return SENT
else:
return int(c)
def getTree2(numList, opList):
rr = doOp(numList[1], numList[2], opList[2])
r = doOp(rr, numList[3], opList[1])
c = doOp(numList[0], r, opList[0])
if c - int(c) > 0.0001:
return SENT
else:
return int(c)
def getTree3(numList, opList):
rr = doOp(numList[1], numList[2], opList[2])
r = doOp(numList[0], rr, opList[1])
c = doOp(r, numList[3], opList[0])
if c - int(c) > 0.0001:
return SENT
else:
return int(c)
def getTree4(numList, opList):
rr = doOp(numList[0], numList[1], opList[2])
r = doOp(rr, numList[2], opList[1])
c = doOp(r, numList[3], opList[0])
if c - int(c) > 0.0001:
return SENT
else:
return int(c)
def getTree5(numList, opList):
rr = doOp(numList[0], numList[1], opList[1])
r = doOp(numList[2], numList[3], opList[2])
c = doOp(rr, r, opList[0])
if c - int(c) > 0.0001:
return SENT
else:
return int(c)
opList = []
ops = ['+', '-', '/', '*']
def getOpList(soFar):
global opList
if len(soFar) == 3:
opList.append(soFar)
return
for op in ops:
soFarCop = copy.deepcopy(soFar)
soFarCop.append(op)
getOpList(soFarCop)
getOpList([])
fullList = []
def getNummySet(nums):
global fullList
fullList = []
getFullNumSet(nums, [])
def getFullNumSet(nums, numList):
global fullList
if len(nums) == 0:
fullList.append(numList)
for i in xrange(len(nums)):
numsCop = copy.deepcopy(nums)
numListCop = copy.deepcopy(numList)
numListCop.append(numsCop[i])
del(numsCop[i])
getFullNumSet(numsCop, numListCop)
def getAllNumOps(num):
global fullList
global opList
getNummySet(num)
fullListy = set([])
for nums in fullList:
for op in opList:
fullListy.add(getTree1(nums, op))
fullListy.add(getTree2(nums, op))
fullListy.add(getTree3(nums, op))
fullListy.add(getTree4(nums, op))
fullListy.add(getTree5(nums, op))
fullListy.remove(0)
return fullListy
def getOneTo(num):
allNums = getAllNumOps(num)
num = 1
while True:
if num not in allNums:
return num - 1
num = num + 1
digList = []
digs = range(10)
def getNumList(soFar, digs):
global digList
if len(soFar) == 4:
digList.append(soFar)
return
for op in digs:
soFarCop = copy.deepcopy(soFar)
soFarCop.append(op)
digCop = copy.deepcopy(digs)
digCop.remove(op)
getNumList(soFarCop, digCop)
getNumList([], digs)
digHashMap = {}
def hashIt(lis):
return lis[0] + (lis[1] * 100) + (lis[2] * 10000) + (lis[3] * 1000000)
finalDigz = []
for digL in digList:
digL.sort()
hh = hashIt(digL)
if hh not in digHashMap:
finalDigz.append(digL)
digHashMap[hh] = True
bigz = 0
for fDigz in finalDigz:
highestNum = getOneTo(fDigz)
if highestNum > bigz:
bigz = highestNum
print highestNum, fDigz
| [
"danmedani@gmail.com"
] | danmedani@gmail.com |
ffc5de43ef8bdec5bcaa803f057de2f9ed1be0f1 | 85be26cd8c2ee8afb3d7ce2495f320e81cb7582f | /pylearn2/minimun_sample/minimum_test.py | 920a1de30bcb8c568241463c2154706aff0468da | [] | no_license | basharbme/deeplearning4windows | 368f9faf5902d943bad053de62dcc6860d20ae79 | 20fea62a2f17ba142e68a349d1cffa582eb6312a | refs/heads/master | 2020-05-24T05:31:28.835187 | 2016-02-03T10:13:04 | 2016-02-03T10:13:04 | 187,117,816 | 1 | 0 | null | 2019-05-17T00:09:27 | 2019-05-17T00:09:26 | null | UTF-8 | Python | false | false | 1,482 | py | # coding: UTF-8
import os,codecs,platform
from pylearn2.config import yaml_parse
from pylearn2.scripts.train import train
import numpy as np
from pylearn2.utils import serial
os.environ["PYLEARN2_DATA_PATH"] = os.path.dirname(os.getcwd())
if platform.system() == "Windows":
os.environ['THEANO_FLAGS'] = "floatX=float32,device=cpu"
else:
os.environ['THEANO_FLAGS'] = "floatX=float32,device=gpu"
def ccc(name):
if name.lower() == 'windows-31j':
return codecs.lookup('utf-8')
codecs.register(ccc)
# prepare training data
# topo_view = np.zeros([5,28,28])
topo_view = np.random.randint(0,1,(3,28,28)) # [0, 1)の範囲で5 * 28 * 28の行列を作る
m, r, c = topo_view.shape
assert r == 28
assert c == 28
topo_view = topo_view.reshape(m, r, c, 1) # そうか、これがデザイン行列化ってことだ!!!
serial.save("input.pkl", topo_view)
serial.save("label.pkl", np.array([[0],[1],[2]]))
yaml = open("minimum.yaml", 'r').read()
hyper_params = {'train_stop': 5,
'valid_stop': 50050,
'test_stop': 5,
'batch_size': 3, # サンプル数の倍数である必要があるらしい?(なんかエラーになった)
'output_channels_h2': 4,
'output_channels_h3': 4,
'max_epochs': 5,
'save_path': 'result'
}
yaml = yaml % (hyper_params)
train = yaml_parse.load(yaml)
train.main_loop()
# train("minimum.yaml")
| [
"jgpuauno@gmail.com"
] | jgpuauno@gmail.com |
b7c734fbbfa3614e2f49ef72774e5dfc16bc2550 | 6c12904dde5ee546cb965c6c0af6901c7f89bea7 | /volume.py | 97951333780cd6388510ea93a05350fd9178a8f1 | [] | no_license | ad52825196/simple-file-system | 59eff6e1e628a5b49460b18bc339625b776bb89d | 58256b29ef6ebef6a154c831c619a340644d6b57 | refs/heads/master | 2020-04-06T03:35:16.877249 | 2017-04-25T00:11:05 | 2017-04-25T00:11:05 | 68,357,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,261 | py | import drive
import directoryentry
class Volume:
BITMAP_FREE_BLOCK = '-'
BITMAP_USED_BLOCK = '+'
def __init__(self, name):
self.name = name
self.drive = drive.Drive(name)
def format(self):
self.drive.format()
block = drive.Drive.EMPTY_BLK
block = Volume.modify_block(block, 0, Volume.BITMAP_FREE_BLOCK * drive.Drive.DRIVE_SIZE)
entry = str(directoryentry.DirectoryEntry())
cursor = drive.Drive.DRIVE_SIZE
flag = True
while flag:
try:
block = Volume.modify_block(block, cursor, entry)
cursor += directoryentry.DirectoryEntry.ENTRY_LENGTH
except:
flag = False
self.write_block(0, block)
def reconnect(self):
self.drive.reconnect()
def disconnect(self):
self.drive.disconnect()
def ls(self, full_pathname):
"""Return a list of DirectoryEntry objects in the given directory."""
entry, block_number_list = self.locate(full_pathname, directoryentry.DirectoryEntry.DIRECTORY, show = True)
if entry is not None:
block_number_list = entry.get_valid_blocks()
return self.get_block_number_list_directory_entry(block_number_list)
def mkfile(self, full_pathname, file_type = directoryentry.DirectoryEntry.FILE):
parent_entry, block_number_list, file_name = self.locate(full_pathname, file_type, True)
empty_entry_list = self.get_block_number_list_directory_entry(block_number_list, True)
if len(empty_entry_list) > 0:
entry = empty_entry_list[0]
elif parent_entry is not None:
# not root directory
block_number, block = self.allocate_new_directory_block()
parent_entry.add_new_block(block_number)
self.write_block(block_number, block)
parent_entry.file_length += len(block)
self.write_entry(parent_entry)
entry = directoryentry.DirectoryEntry(block_number = block_number)
else:
raise IOError("no more space in root directory")
entry.file_type = file_type
entry.file_name = file_name
self.write_entry(entry)
def mkdir(self, full_pathname):
self.mkfile(full_pathname, directoryentry.DirectoryEntry.DIRECTORY)
def append(self, full_pathname, data):
content, entry = self.get_file_content(full_pathname)
content += data
entry = self.write_file_content(entry, content)
self.write_entry(entry)
def get_file_content(self, full_pathname):
"""Return the file content along with the directory entry of this file."""
entry, block_number_list = self.locate(full_pathname, directoryentry.DirectoryEntry.FILE)
return self.get_entry_content(entry), entry
def delfile(self, full_pathname, file_type = directoryentry.DirectoryEntry.FILE):
entry, block_number_list = self.locate(full_pathname, file_type)
block_number_list = entry.get_valid_blocks()
if file_type == directoryentry.DirectoryEntry.DIRECTORY:
entry_list = self.get_block_number_list_directory_entry(block_number_list)
if len(entry_list) > 0:
raise IOError("directory is not empty")
for block_number in block_number_list:
self.write_block(block_number, release = True)
entry = directoryentry.DirectoryEntry(block_number = entry.block_number, start = entry.start)
self.write_entry(entry)
def deldir(self, full_pathname):
self.delfile(full_pathname, directoryentry.DirectoryEntry.DIRECTORY)
def modify_block(block, start, data):
end = start + len(data)
if end > len(block):
raise ValueError("invalid internal data")
return block[:start] + data + block[end:]
def write_block(self, n, data = '', release = False):
if release:
data = drive.Drive.EMPTY_BLK
data += ' ' * (drive.Drive.BLK_SIZE - len(data))
self.drive.write_block(n, data)
block = self.drive.read_block(0)
if release:
block = Volume.modify_block(block, n, Volume.BITMAP_FREE_BLOCK)
else:
block = Volume.modify_block(block, n, Volume.BITMAP_USED_BLOCK)
self.drive.write_block(0, block)
def get_path_list(full_pathname):
path_list = full_pathname.split('/')
if path_list[0] != '' or len(path_list) < 2:
raise ValueError("invalid pathname")
if len(path_list) == 2 and path_list[-1] == '':
return []
else:
return path_list[1:]
def get_block_directory_entry(self, n, empty = False):
"""Return a list of DirectoryEntry objects in block n."""
block = self.drive.read_block(n)
cursor = 0
if n == 0:
# skip bitmap
cursor += drive.Drive.DRIVE_SIZE
entry_list = []
while cursor < drive.Drive.BLK_SIZE:
entry = directoryentry.DirectoryEntry(block[cursor:cursor + directoryentry.DirectoryEntry.ENTRY_LENGTH], n, cursor)
cursor += directoryentry.DirectoryEntry.ENTRY_LENGTH
if (not empty and len(entry.file_name) > 0) or (empty and len(entry.file_name) == 0):
entry_list.append(entry)
return entry_list
def get_block_number_list_directory_entry(self, block_number_list, empty = False):
"""Return a list of DirectoryEntry objects in all blocks given in the list."""
entry_list = []
for block_number in block_number_list:
entry_list += self.get_block_directory_entry(block_number, empty)
return entry_list
def locate(self, full_pathname, file_type = directoryentry.DirectoryEntry.FILE, make = False, show = False):
"""Return the DirectoryEntry object of the final file or directory if make is False, otherwise the DirectoryEntry object of the parent directory. Also return a block number list containing all the blocks owned by the parent directory. If this is the root directory, the returning DirectoryEntry object will be None and the block number list will only contain block 0."""
path_list = Volume.get_path_list(full_pathname)
entry = None
block_number_list = [0]
if len(path_list) == 0:
# root directory
if show:
return entry, block_number_list
else:
raise ValueError("no file name specified")
directory_list = path_list[:-1]
file_name = path_list[-1]
if len(file_name) == 0:
raise ValueError("no file name specified")
if make and len(file_name) > directoryentry.DirectoryEntry.MAX_FILE_NAME_LENGTH:
raise ValueError("file name too long")
if ' ' in file_name:
raise ValueError("cannot have spaces in file name")
parent_entry = None
for directory in directory_list:
entry_list = self.get_block_number_list_directory_entry(block_number_list)
# find the directory
parent_entry = Volume.find_entry_in_entry_list(directoryentry.DirectoryEntry.DIRECTORY, directory, entry_list)
if parent_entry is None:
raise ValueError("directory '{}' dose not exist".format(directory))
block_number_list = parent_entry.get_valid_blocks()
entry_list = self.get_block_number_list_directory_entry(block_number_list)
entry = Volume.find_entry_in_entry_list(file_type, file_name, entry_list)
if make and entry is not None:
raise ValueError("'{}' already exists".format(file_name))
elif not make and entry is None:
raise ValueError("'{}' does not exist".format(file_name))
if make:
return parent_entry, block_number_list, file_name
return entry, block_number_list
def allocate_new_directory_block(self):
"""Find a free block and generate a block filled with directory entries but not write to the disk. Return the free block number and the content of the block."""
block_number = self.find_free_block()
block = drive.Drive.EMPTY_BLK
entry = str(directoryentry.DirectoryEntry())
cursor = 0
flag = True
while flag:
try:
block = Volume.modify_block(block, cursor, entry)
cursor += directoryentry.DirectoryEntry.ENTRY_LENGTH
except:
flag = False
return block_number, block
def find_free_block(self):
"""Find a free block in the volume."""
block = self.drive.read_block(0)
for i in range(drive.Drive.DRIVE_SIZE):
if block[i] == Volume.BITMAP_FREE_BLOCK:
return i
raise IOError("no more space in volume '{}'".format(self.name))
def write_entry(self, entry):
block = self.drive.read_block(entry.block_number)
block = Volume.modify_block(block, entry.start, str(entry))
self.write_block(entry.block_number, block)
def find_entry_in_entry_list(file_type, file_name, entry_list):
"""Return the found DirectoryEntry object in the entry_list or None if does not exist."""
for entry in entry_list:
if entry.file_type == file_type and entry.file_name == file_name:
return entry
def get_entry_content(self, entry):
content = ''
block_number_list = entry.get_valid_blocks()
for block_number in block_number_list:
content += self.drive.read_block(block_number)
return content[:entry.file_length]
def write_file_content(self, entry, content):
entry.file_length = 0
block_number_list = entry.get_valid_blocks()
while len(content) > 0:
if len(block_number_list) > 0:
block_number = block_number_list.pop(0)
else:
block_number = self.find_free_block()
entry.add_new_block(block_number)
self.write_block(block_number, content[:drive.Drive.BLK_SIZE])
entry.file_length += min(drive.Drive.BLK_SIZE, len(content))
content = content[drive.Drive.BLK_SIZE:]
return entry
| [
"me@zhen-chen.com"
] | me@zhen-chen.com |
327203d439300f410de4e56199b07bcb7a5b1cb1 | 3ca67d69abd4e74b7145b340cdda65532f90053b | /programmers/난이도별/level01.제일_작은_수_제거하기/Jaewon0702.py | 9574b875696e370e939054a0279eb98293b8defd | [] | no_license | DKU-STUDY/Algorithm | 19549516984b52a1c5cd73e1ed1e58f774d6d30e | 6f78efdbefd8eedab24e43d74c7dae7f95c2893b | refs/heads/master | 2023-02-18T06:48:39.309641 | 2023-02-09T07:16:14 | 2023-02-09T07:16:14 | 258,455,710 | 175 | 49 | null | 2023-02-09T07:16:16 | 2020-04-24T08:42:27 | Python | UTF-8 | Python | false | false | 156 | py | def solution(arr):
arr.remove(min(arr))
return arr if len(arr) else [-1]
print(solution([4, 3, 2, 1]) == [4, 3, 2])
print(solution([10]) == [-1])
| [
"45033215+sangmandu@users.noreply.github.com"
] | 45033215+sangmandu@users.noreply.github.com |
7ea07cb2116811e27e177f7323f15767d451495b | 0045204c130599381ee69c771478ac1609dfe67e | /HW_1/problem_3.py | f841cdde32c6b0fa7f53ae9d6f96339525b533b4 | [] | no_license | muzhts-anton/Differential-geometry | 1dea9003a3450cbe89e5beb220804cff67a5a230 | 56da14cf1b71cd4fca2a9fa46de2ef2dedd39ac1 | refs/heads/main | 2023-06-05T21:53:58.032013 | 2021-06-25T08:34:02 | 2021-06-25T08:34:02 | 360,800,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,826 | py | # problem 3 diffGem HW1
from sympy import symbols, diff, sin, cos, sqrt, simplify, Matrix
X_1, X_2, X_3 = symbols('X_1 X_2 X_3', positive=True)
A = Matrix([[1, 0, 0],
[0, sqrt(3)/2, -1/2],
[0, 1/2, sqrt(3)/2]])
X = [X_1,
X_2,
X_3]
x_sphere = [X_1 * sin(X_2) * cos(X_3),
X_1 * sin(X_2) * sin(X_3),
X_1 * cos(X_2)]
Q_mixed = Matrix([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
Jacobian = Matrix([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
for i in range(3):
for k in range(3):
Jacobian[i, k] = diff(x_sphere[i], X[k])
# (1)
for i in range(3):
for k in range(3):
for j in range(3):
Q_mixed[i, k] += A[i, j] * Jacobian[j, k]
# The latex output is too ugly. TODO(Tony): rewrite it
# (2)
r_covar_1 = [Q_mixed[0, 0], Q_mixed[1, 0], Q_mixed[2, 0]]
r_covar_2 = [Q_mixed[0, 1], Q_mixed[1, 1], Q_mixed[2, 1]]
r_covar_3 = [Q_mixed[0, 2], Q_mixed[1, 2], Q_mixed[2, 2]]
r_covar = [r_covar_1, r_covar_2, r_covar_3]
# (3)
g_covar = Matrix([[1, 0, 0],
[0, X_1**2, 0],
[0, 0, X_1**2 * (sin(X_2))**2]])
g_contra = g_covar**(-1)
# (4)
Q_contra = Matrix([[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
W = Matrix([[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
for i in range(3):
for m in range(3):
for j in range(3):
Q_contra[i, m] += g_contra[i, j] * Jacobian[m, j]
for i in range(3):
for j in range(3):
for m in range(3):
W[i, j] += A[j, m] * Q_contra[i, m]
# (5)
Christoffel_mixed = [Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]]),
Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]]),
Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])]
for i in range(3):
for j in range(3):
for m in range(3):
for k in range(3):
Christoffel_mixed[m][i, j] += 1/2 * g_contra[k, m] * (diff(g_covar[k, j], X[i]) + diff(g_covar[i, k], X[j]) - diff(g_covar[i, j], X[k]))
Christoffel_covar = [Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]]),
Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]]),
Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])]
for k in range(3):
for i in range(3):
for j in range(3):
for m in range(3):
if (i == 0) & (j == 0):
print(Christoffel_mixed[m][i, j] * g_covar[m, k])
Christoffel_covar[i][j, k] += Christoffel_mixed[m][i, j] * g_covar[m, k]
print(Christoffel_covar)
# (6)
H = [0, 0, 0]
for i in range(3):
H[i] = sqrt(g_covar[i, i])
simplify(H[i])
| [
"muzhts.anton@gmail.com"
] | muzhts.anton@gmail.com |
bd9a420a7684d527bcd274c32086f85330ec970b | 2704ad14c83050ac28f403371daa8e3148440e00 | /chiadoge/wallet/did_wallet/did_info.py | 2294be358c05f883b729c58c3c37a27b0b590ce5 | [
"Apache-2.0"
] | permissive | Bgihe/chiadoge-blockchain | d5e01a53c8e15fa17c47b44d9c95e6511aa98b7f | befb179c65ffe42aebbc47c211f78e193a095d2b | refs/heads/main | 2023-06-01T05:31:51.503755 | 2021-07-05T20:47:32 | 2021-07-05T20:47:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | from dataclasses import dataclass
from typing import List, Optional, Tuple
from chiadoge.types.blockchain_format.sized_bytes import bytes32
from chiadoge.util.ints import uint64
from chiadoge.util.streamable import streamable, Streamable
from chiadoge.wallet.cc_wallet.ccparent import CCParent
from chiadoge.types.blockchain_format.program import Program
from chiadoge.types.blockchain_format.coin import Coin
@dataclass(frozen=True)
@streamable
class DIDInfo(Streamable):
origin_coin: Optional[Coin] # puzzlehash of this coin is our DID
backup_ids: List[bytes]
num_of_backup_ids_needed: uint64
parent_info: List[Tuple[bytes32, Optional[CCParent]]] # {coin.name(): CCParent}
current_inner: Optional[Program] # represents a Program as bytes
temp_coin: Optional[Coin] # partially recovered wallet uses these to hold info
temp_puzhash: Optional[bytes32]
temp_pubkey: Optional[bytes]
| [
"83430349+lionethan@users.noreply.github.com"
] | 83430349+lionethan@users.noreply.github.com |
e3082e2c45c280050b7ffcd31885765d334863b4 | 9424df118a26170f023a665bdf8b0dc462f91721 | /project_utils.py | a22dd72aa5ba5acfa30a6a98965efbe8875a6bfb | [] | no_license | marcelthebridge/602_ml_Project1 | 65729b4faf8746554815f8e63089a1db33fec603 | 8f4afa9a6b36920e6869673df96d9c219014c2d0 | refs/heads/main | 2022-12-28T15:50:38.187531 | 2020-10-13T20:37:25 | 2020-10-13T20:37:25 | 302,507,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,973 | py | # Functions for use with Project_1 notebooks
# Imports:
from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def data_plot(hue, data):
for i, col in enumerate(data.columns):
plt.figure(i)
sns.set(rc={'figure.figsize':(20, 5)})
ax = sns.countplot(x=data[col],palette='mako',hue=hue,data=data)
def print_results(classifier, X_test, y_test):
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
print('Results of {} Model: \n'.format(classifier))
print('Accuracy of model {0:.4f}\n'.format(accuracy_score(y_test,classifier.predict(X_test))))
print('Classification Report:\n{}\n'.format(classification_report(y_test,classifier.predict(X_test))))
print('Confusion Matrix:\n{}\n'.format(confusion_matrix(y_test,classifier.predict(X_test))))
def visual_model(title, X, y, classifier, resolution=0.05):
# setup marker generator and color map
markers = ('x', 'o')
colors = ('black','cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
plt.figure(figsize=(15,10))
#plot surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class examples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=0.8,
c=colors[idx],
marker=markers[idx],
label=cl,
edgecolor='black')
| [
"noreply@github.com"
] | marcelthebridge.noreply@github.com |
c4315cc3d79adaa753717029196b2f1e64a56817 | 6bad224bb4c81facc0ed44d2330922d1826e23fb | /spamfilter.py | 97357ab1a60999ea37c2aaa6a1babdd0a9a6e511 | [] | no_license | zaid98/nlp | af7c83a692ec6e8cba1936a0288efa38d3a5ce26 | 4fbbcf9e35c9b882685abc1557a5e1c8cf78d565 | refs/heads/master | 2020-06-15T03:25:38.232934 | 2019-07-04T07:36:25 | 2019-07-04T07:36:25 | 195,192,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | import nltk
from nltk.corpus import stopwords
import string
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
messages = pd.read_csv('spam.csv', encoding='latin-1')
messages.drop(['Unnamed: 2','Unnamed: 3','Unnamed: 4'],axis=1,inplace=True)
messages = messages.rename(columns={'v1': 'class','v2': 'text'})
def process_text(text):
np = [char for char in text if char not in string.punctuation]
np = ''.join(np)
cleaned = [word for word in np.split() if word.lower() not in stopwords.words('english')]
return cleaned
mail_train, mail_test, class_train, class_test = train_test_split(messages['text'],messages['class'],test_size=0.2)
pipeline = Pipeline([
('count',CountVectorizer(analyzer=process_text)),
('tfidf',TfidfTransformer()),
('classifier',MultinomialNB())
])
pipeline.fit(mail_train,class_train)
predictions = pipeline.predict(mail_test)
| [
"noreply@github.com"
] | zaid98.noreply@github.com |
d61b6b6aa07912fb0f5b6d10f2b0e4d67c896405 | d79f3e7df0fb9dcf23a9ae1adf3c285dd08a360f | /list.py | c25a37e7d50ab435e4c4d9ee894dae68382d6c4a | [] | no_license | ramkodgreat/python1 | 4930afb1bb7f63798bd4237e753aecf5b2ba072b | 64ee9f210aab3b177fc41d351499106876a1d3fc | refs/heads/master | 2020-11-25T18:15:04.315157 | 2019-12-18T07:51:42 | 2019-12-18T07:51:42 | 228,789,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | # List can contain strings integer or float points
# Python index begins from zero
a =["string","int",1,2]
#indexing a list
a[2]
print(a[2])
# Returns all the values of a
b = a[:]
print(b)
#Overwriting values in a string
a[1] = "glo"
print(a)
print(b)
# Skipping one item from a list
a =[1,2,3,4,5,6,7,8,9,10]
# Side note first position is always inclusive while the last position is always exclusive
val = a[2:4]
print(val)
# Print subset of zero to four but skip three values while printing
val = a[0:4:3]
print(val)
# Print subset of zero to nine but skip three values while printing
#It skips three intermittently
# n:n-1
val = a[0:10:3]
print(val)
# Negative value prints backward
val = a[-2]
print(val)
# Findout how to use range of negative numbers
#val = a[-1:-3]
print(val) | [
"ramkodgreat@gmail.com"
] | ramkodgreat@gmail.com |
093c9c5f1b37d499d6bb6486317cbdcbb89a838e | 17b63416cf2f66246e1cf655ccfa2eb9a108da3c | /abupy/AlphaBu/ABuPickStockExecute.py | f344c2ed857ae0f8c94dc194d151f49cddb60f57 | [] | no_license | cmy00cmy/qtLearning | 58aec5cf9fccf9d8f14adf1793306b8b8b5ecb7f | 2b5fee7b9bbd832b20ba4e1b508be16b606249e0 | refs/heads/master | 2020-03-20T01:42:19.882639 | 2018-06-12T14:52:00 | 2018-06-12T14:52:00 | 137,085,926 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | # -*- encoding:utf-8 -*-
"""
包装选股worker进行,完善前后工作
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from .ABuPickStockWorker import AbuPickStockWorker
from ..CoreBu.ABuEnvProcess import add_process_env_sig
from ..MarketBu.ABuMarket import split_k_market
from ..TradeBu.ABuKLManager import AbuKLManager
from ..CoreBu.ABuFixes import ThreadPoolExecutor
__author__ = '阿布'
__weixin__ = 'abu_quant'
@add_process_env_sig
def do_pick_stock_work(choice_symbols, benchmark, capital, stock_pickers):
"""
包装AbuPickStockWorker进行选股
:param choice_symbols: 初始备选交易对象序列
:param benchmark: 交易基准对象,AbuBenchmark实例对象
:param capital: 资金类AbuCapital实例化对象
:param stock_pickers: 选股因子序列
:return:
"""
kl_pd_manager = AbuKLManager(benchmark, capital)
stock_pick = AbuPickStockWorker(capital, benchmark, kl_pd_manager, choice_symbols=choice_symbols,
stock_pickers=stock_pickers)
stock_pick.fit()
return stock_pick.choice_symbols
@add_process_env_sig
def do_pick_stock_thread_work(choice_symbols, benchmark, capital, stock_pickers, n_thread):
"""包装AbuPickStockWorker启动线程进行选股"""
result = []
def when_thread_done(r):
result.extend(r.result())
with ThreadPoolExecutor(max_workers=n_thread) as pool:
thread_symbols = split_k_market(n_thread, market_symbols=choice_symbols)
for symbols in thread_symbols:
future_result = pool.submit(do_pick_stock_work, symbols, benchmark, capital, stock_pickers)
future_result.add_done_callback(when_thread_done)
return result
| [
"chenmyuan@163.com"
] | chenmyuan@163.com |
c465145274aa05d388e08fcefbcf65c084507859 | 8b9587f5548733ebf8c51af489abfae815355fd0 | /templateCode/kakao_R/kakao/chatroom_analysis.py | 032302f2db92fdc28d57fd2e172e486c310b3189 | [] | no_license | jyp0802/Butherfly | d81b525a01781dc7633eca68d577b0dd9cb1f57d | d80a1eea4103e0f2e4f1aaf91f7f425936003081 | refs/heads/master | 2021-08-20T07:50:55.580648 | 2017-11-28T15:44:49 | 2017-11-28T15:44:49 | 112,354,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,109 | py | #-*- coding: utf-8 -*-
# Revision: 6 (June 3th, 2017 3:10)
# Author: Claude Jin, Sanggyu Nam, Seunghyun Han (DiscoveryChannel)
# Logs
# Revision 1: user anonymization, dateline, firstline, message types(emoticon, photo, video, link)
# Revision 2: chatroomname, # of current participants, invitationline, multiple lines into one message
# Revision 3: Add support for chat log files exported in English and refactor the reader logic using classes.
# Revision 4: Replace calling open/close functions of file object to using a context block. This reduces the memory consumption so that it becomes possible to process large data.
# Revision 5: Add argument parsing and fix UTF-8 BOM issue.
# Revision 6: Add --username argument. It makes a specific user to be distinguished even after anonymization.
# references for regular expressionf
# http://regexr.com/
# http://devanix.tistory.com/296
from abc import ABCMeta, abstractmethod
from datetime import datetime, date, time, timedelta
from os import path
import io
import itertools
import re, sys, os
import glob
import csv
import json
class BaseChatLogReader(metaclass=ABCMeta):
"""Base reader for KakaoTalk chat log files."""
@property
@abstractmethod
def chatroomnameGroup(self):
raise NotImplementedError
@property
@abstractmethod
def chatroomnameIndividual(self):
raise NotImplementedError
@property
@abstractmethod
def dynamicsline(self):
raise NotImplementedError
@property
@abstractmethod
def dateline(self):
raise NotImplementedError
@property
@abstractmethod
def firstline(self):
raise NotImplementedError
def fileobj(self, f):
if not isinstance(f, (io.TextIOBase, str)):
raise TypeError('f should be a text I/O stream or a file name')
return (f if isinstance(f, io.TextIOBase)
else open(f, 'r', encoding='utf-8-sig'))
def readChatroomLog(self, filename, username):
usercounter = 0
dynamics = []
dates = []
messages = []
participants = dict()
msg = None
if username is not 'Empty':
usercounter += 1
participants[username] = "user" + str(usercounter)
with self.fileobj(filename) as f:
first_line = next(f)
m = self.chatroomnameGroup.match(first_line)
if m:
chatroomName = m.group("name")
participantCnt = m.group("current")
else:
m = self.chatroomnameIndividual.match(first_line)
chatroomName = m.group("name")
participantCnt = 2
lines = itertools.islice(f, 4, None)
for line in lines:
# Skip blank lines.
if len(line) <= 1:
continue
m = self.dynamicsline.match(line)
if m:
dynamics.append(m.groupdict())
continue
m = self.dateline.match(line)
if m:
msg = None
dates.append(m.groupdict())
continue
m = self.firstline.match(line)
if m:
if msg is not None:
messages.append(msg)
msg = self.firstline.match(line).groupdict()
# Anonymize users.
if msg["participant"] in participants.keys():
msg["participant"] = participants[msg["participant"]]
else:
usercounter += 1
participants[msg["participant"]] = "user" + str(usercounter)
msg["participant"] = "user" + str(usercounter)
continue
# Encountered a multi-line message.
if msg is None:
print("Multi-line Error")
print(line)
print(dynamics)
exit(1)
msg["message"] += " " + line
return [dates, messages, participants, participantCnt, chatroomName, dynamics]
class KoreanChatLogReader(BaseChatLogReader):
"""Reader for KakaoTalk chat log files exported in Korean."""
@property
def chatroomnameGroup(self):
return re.compile("^(?P<name>.*) \((?P<current>[0-9]*)명\)과 카카오톡 대화-1.txt$")
@property
def chatroomnameIndividual(self):
return re.compile("^(?P<name>.*)님과 카카오톡 대화-1.txt$")
@property
def dynamicsline(self):
return re.compile("^(?P<year>[0-9]{4})\. (?P<month>[0-9]{1,2})\. (?P<date>[0-9]{1,2})\. "
"(?P<meridiem>오전|오후) (?P<hour>[0-9]{1,2}):(?P<minute>[0-9]{1,2}): "
"(.*?)님이 (?:((?:(?:.*?)님(?:, )?(?:과 )?)+)을 초대했습니다|나갔습니다).$")
@property
def dateline(self):
return re.compile("^(?P<year>[0-9]{4})년 (?P<month>[0-9]{1,2})월 (?P<date>[0-9]{1,2})일 (?P<day>.)요일$")
@property
def firstline(self):
return re.compile("^(?P<year>[0-9]{4})\. (?P<month>[0-9]{1,2})\. (?P<date>[0-9]{1,2})\. (?P<meridiem>오전|오후) (?P<hour>[0-9]{1,2}):(?P<minute>[0-9]{1,2}), (?P<participant>.*?) : (?P<message>.*)")
class EnglishChatLogReader(BaseChatLogReader):
"""Reader for KakaoTalk chat log files exported in English."""
@property
def chatroomnameGroup(self):
return re.compile("^KakaoTalk Chats with (?P<name>.*) \((?P<current>[0-9]*) people\)-1.txt$")
@property
def chatroomnameIndividual(self):
return re.compile("^KakaoTalk Chats with (?P<name>.*)-1.txt$")
@property
def dynamicsline(self):
return re.compile("^(?P<month>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) "
"(?P<date>[0-9]{1,2}), (?P<year>[0-9]{4}), "
"(?P<hour>[0-9]{1,2}):(?P<minute>[0-9]{1,2}) "
"(?P<meridiem>AM|PM): "
"(.*?) (?:invited ((?:(?:.*?)(?:, )?(?: and )?)+)|left this chatroom).$"
)
@property
def dateline(self):
return re.compile("^(?P<day>.{3}).*day, "
"(?P<month>January|February|March|April|May|June|July|August|September|October|November|December) "
"(?P<date>\d{1,2}), (?P<year>\d{4})$")
@property
def firstline(self):
return re.compile("^(?P<month>Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) "
"(?P<date>[0-9]{1,2}), (?P<year>[0-9]{4}), "
"(?P<hour>[0-9]{1,2}):(?P<minute>[0-9]{1,2}) "
"(?P<meridiem>AM|PM), "
"(?P<participant>.*?) : (?P<message>.*)")
class Analyzer:
"""Analyzer for KakaoTalk chat log"""
# message types
emoticon = re.compile("^\((?:emoticon|이모티콘)\) $")
photo = re.compile("^(사진|Photo)$")
video = re.compile("^(동영상|Video)$")
link = re.compile("^https?:\/\/.*")
maxInterval = 24
hour2Sec = 3600
day2Hour = 24
def __init__(self, lang, chatroomLogs, chatroomID):
self.chatroomLogs = chatroomLogs
self.lang = lang
self.dates = self.chatroomLogs[0]
self.messages = self.chatroomLogs[1]
self.participants = self.chatroomLogs[2]
self.participantCnt = self.chatroomLogs[3]
self.chatroomName = self.chatroomLogs[4]
self.dynamics = self.chatroomLogs[5]
self.users = dict()
self.chatroom = dict()
self.chatroom["chatroomID"] = chatroomID
self.chatroom["old"] = self.getOld(self.dates)
self.chatroom["pop"] = int(self.participantCnt)
self.chatroom["activePop"] = 0
self.chatroom["M"] = 0.0
self.chatroom["F"] = 0.0
self.chatroom["avgCharLen"] = 0
self.chatroom["dynamics"] = len(self.dynamics)
self.chatroom["avgInterval"] = self.getIntervalTime()
self.chatroom["avgReactTime"] = 0.0
if len(self.participants) == 2:
self.maxInterval = 24 # hour
else:
self.maxInterval = 8 # hour
for key, user in zip(self.participants.keys(), self.participants.values()):
self.users[user] = dict()
self.users[user]["chatroomID"] = chatroomID
self.users[user]["userID"] = user
self.users[user]["avgSeqMsg"] = []
self.users[user]["maxSeqMsg"] = 0
self.users[user]["reactionTime"] = []
self.users[user]["avgCharLen"] = []
self.users[user]["msgShare"] = 0.0
self.users[user]["msg"] = 0
self.users[user]["normal"] = 0
self.users[user]["photo"] = 0
self.users[user]["video"] = 0
self.users[user]["emoticon"] = 0
self.users[user]["link"] = 0
self.users[user]["activeness"] = 0
self.chatroom["M"] = round(self.chatroom["M"] / len(self.participants.keys()), 4)
self.chatroom["F"] = round(self.chatroom["F"] / len(self.participants.keys()), 4)
# Return all metrics
def getMetrics(self):
days = 54 # You can set the number of days that you want to measure.
self.getSequentialMsgs()
self.getReactionTimes()
self.getCharLen()
self.getActiveParticipants(days, days)
self.cntMsgType(days)
return [self.chatroom, self.users]
# Count the number of active participants for a specific period
def getActiveParticipants(self, period, n): # period : days, # n : times of activity
if self.lang == "kr":
date = "2017-05-24 00:00" # You must set the base date
FMT = '%Y-%m-%d %H:%M'
else :
date = "2017-May-24 00:00"
FMT = '%Y-%b-%d %H:%M'
for msg in reversed(self.messages):
interval = datetime.strptime(date, FMT) - datetime.strptime(self.convertTime(msg), FMT)
if timedelta.total_seconds(interval) > period * self.hour2Sec * self.maxInterval:
break
self.users[msg["participant"]]["activeness"] += 1
for user in self.users:
value = self.users[user]["activeness"]
if value >= n:
self.users[user]["activeness"] = "A"
self.chatroom["activePop"] += 1
else :
self.users[user]["activeness"] = "I"
# Count the number of messages for a specific period
def cntMsgType(self, period):
cntMsg = 0
if self.lang == "kr":
date = "2017-05-24 00:00" # You must set the base date
FMT = '%Y-%m-%d %H:%M'
else :
date = "2017-May-24 00:00"
FMT = '%Y-%b-%d %H:%M'
for msg in reversed(self.messages):
interval = datetime.strptime(date, FMT) - datetime.strptime(self.convertTime(msg), FMT)
if timedelta.total_seconds(interval) > period * self.hour2Sec * self.day2Hour:
break
cntMsg += 1
self.users[msg["participant"]]["msg"] += 1
if self.photo.match(msg["message"]):
self.users[msg["participant"]]["photo"] += 1
elif self.video.match(msg["message"]):
self.users[msg["participant"]]["video"] += 1
elif self.link.match(msg["message"]):
self.users[msg["participant"]]["link"] += 1
elif self.emoticon.match(msg["message"]):
self.users[msg["participant"]]["emoticon"] += 1
else:
self.users[msg["participant"]]["normal"] += 1
for user in self.users:
self.users[user]["msgShare"] = round(self.users[user]["msg"] / cntMsg, 4)
# Get interval from all pairs of users
def getIntervalTime(self):
intervals = []
for prev_msg, msg in zip(self.messages, self.messages[1:]):
interval = self.calculateInterval(prev_msg, msg)
if timedelta.total_seconds(interval) < self.hour2Sec * self.maxInterval:
intervals.append(interval)
if len(intervals) > 1:
avg_interval = timedelta.total_seconds(sum(intervals, timedelta()) / len(intervals))
else:
avg_interval = -1.0
return avg_interval
# Get some information about consecutive message from a user
def getSequentialMsgs(self):
cnt = 0
user = ""
for msg in self.messages:
if cnt is 0 :
cnt += 1
user = msg["participant"]
elif user == msg["participant"]:
cnt += 1
else :
self.users[user]["avgSeqMsg"].append(cnt)
cnt = 1
user = msg["participant"]
for user in self.users:
value = self.users[user]["avgSeqMsg"]
if len(value) > 0:
self.users[user]["avgSeqMsg"] = round(sum(value)/len(value), 4)
self.users[user]["maxSeqMsg"] = max(value)
else:
self.users[user]["avgSeqMsg"] = 0.0
self.users[user]["maxSeqMsg"] = 0
# Get some reaction information of a user from a latest message.
def getReactionTimes(self):
avgReactTime = 0
for prev_msg, msg in zip(self.messages, self.messages[1:]):
if prev_msg["participant"] == msg["participant"]:
continue
else:
interval = self.calculateInterval(prev_msg, msg)
# Skip the interval > 1 day
if timedelta.total_seconds(interval) < self.hour2Sec * self.maxInterval:
self.users[msg["participant"]]["reactionTime"].append(interval)
for user in self.users:
value = self.users[user]["reactionTime"]
if len(value) > 0:
self.users[user]["reactionTime"] = timedelta.total_seconds(sum(value, timedelta()) / len(value))
else:
self.users[user]["reactionTime"] = -1
avgReactTime += int(self.users[user]["reactionTime"])
self.chatroom["avgReactTime"] = round(avgReactTime / float(len(self.users)), 4)
# dynamics : Join / Exit
def getDynamics(self):
return len(self.dynamics)
# Count characters from a specific user
def getCharLen(self):
avgCharLen = 0
for msg in self.messages:
self.users[msg["participant"]]["avgCharLen"].append(len(msg["message"]))
for user in self.users:
avgCharLen += sum(self.users[user]["avgCharLen"])
if len(self.users[user]["avgCharLen"]) > 0:
self.users[user]["avgCharLen"] = round(sum(self.users[user]["avgCharLen"]) / float(len(self.users[user]["avgCharLen"])), 4)
else:
self.users[user]["avgCharLen"] = 0.0
self.chatroom["avgCharLen"] = round(avgCharLen / float(len(self.messages)), 4)
def calculateInterval(self, prev_msg, msg):
prev_time = self.convertTime(prev_msg)
time = self.convertTime(msg)
if self.lang == "kr":
FMT = '%Y-%m-%d %H:%M'
else:
FMT = '%Y-%b-%d %H:%M'
return datetime.strptime(time, FMT) - datetime.strptime(prev_time, FMT)
def convertTime(self, msg):
hour = int(msg['hour'])
if (msg['meridiem'] == "오후" or msg['meridiem'] == "PM") and hour is not 12:
hour = (hour+12)%24
elif (msg['meridiem'] == "오전" or msg['meridiem'] == "AM") and hour is 12:
hour = 0
return '{}-{}-{} {}:{}'.format(msg['year'], msg['month'], msg['date'], hour, msg['minute'])
# Estimate age of specific room
def getOld(self, datelist):
firstDate = datelist[0]
endDate = datelist[-1]
firstDate = '{}-{}-{}'.format(firstDate['year'], firstDate['month'], firstDate['date'])
endDate = '{}-{}-{}'.format(endDate['year'], endDate['month'], endDate['date'])
if self.lang == "kr":
FMT = '%Y-%m-%d'
else:
FMT = '%Y-%B-%d'
return timedelta.total_seconds(datetime.strptime(endDate, FMT) - datetime.strptime(firstDate, FMT))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('chatlog', help='put your directory path here')
parser.add_argument('-C', '--client-lang',
choices=['kr', 'en'],
default='kr',
help='KakaoTalk client language')
parser.add_argument('-U', '--username',
default='Empty',
help='set specific KakaoTalk user to \'user1\'')
args = parser.parse_args()
ReaderClass = {
'kr': KoreanChatLogReader,
'en': EnglishChatLogReader
}
prefix = ""
cnt = 0
exportCsvDict = dict()
chatroomList = []
userList = []
f_chatroom = open(args.chatlog + "/chatroom.json", "w", encoding='utf-8')
f_user = open(args.chatlog + "/user.json", "w", encoding='utf-8')
for file in glob.glob(args.chatlog+"/*.txt"):
cnt += 1
print (file[len(args.chatlog)+1:])
if file[len(args.chatlog)+1:len(args.chatlog)+10] == "KakaoTalk":
args.client_lang = 'en'
else:
args.client_lang = 'kr'
reader = ReaderClass[args.client_lang]()
chatroomLogs = reader.readChatroomLog(file, args.username)
exportCsvDict.update(chatroomLogs[2]) # update participants
analyzer = Analyzer(args.client_lang, chatroomLogs, prefix + str(cnt))
chatroom, users = analyzer.getMetrics()
chatroomList.append(chatroom)
for user in users:
userList.append(users[user])
json.dump(chatroomList, f_chatroom)
json.dump(userList, f_user)
f_chatroom.close()
f_user.close() | [
"jyp0802@hotmail.com"
] | jyp0802@hotmail.com |
c08a05fcca3a38d83fa5e5c0f599e925d0a2c97b | 56a4d0d73c349aeaca7580ca248caf0cf893a8c5 | /w2/using_find.py | af6a320679d645b836416da8a37d141b0a0c269d | [] | no_license | alejo8591/m101 | 79e62e0110bcc3e6ca82ac02ae3cdcbe13d51c67 | d93d34a161ecede77defb9a6a3db389d4a9b0de8 | refs/heads/master | 2020-05-18T21:42:46.651036 | 2012-12-17T23:36:49 | 2012-12-17T23:36:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | #!/usr/bin/env python
import pymongo
import sys
connect = pymongo.Connection("mongodb://127.0.0.1", safe=True)
db = connect.school
scores = db.scores
def find():
print "Find, reporting for duty"
query = {'type':'exam'}
try:
iter = scores.find(query)
except:
print "Unexpected error:",sys.exc_info()[0]
sanity = 0
for doc in iter:
print doc
sanity+=1
if (sanity > 10):
break
def find_one():
print "find one, reporting for duty"
query = {'student_id':10}
try:
iter = scores.find_one(query)
except:
print "Unexpected error:",sys.exc_info()[0]
print iter
find_one()
find() | [
"alejo8591@gmail.com"
] | alejo8591@gmail.com |
1577628297a846c2742329c8bab3cffaef031e77 | b298e8a971bf51036c61d1a2c4d5d61421fc47c5 | /projects/migrations/0003_project_image.py | 050ce465f6f7210e6eaed3e7571b1d25dc47b5ea | [] | no_license | jrusso0818/my-personal-site | 5fe6dc1111669d5c8429703a304f7c08f6358327 | dc2a179e6affb38303445d7a0c72e48c32ba6a8a | refs/heads/master | 2023-03-07T22:03:58.395368 | 2021-02-06T06:53:32 | 2021-02-06T06:53:32 | 322,690,526 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | # Generated by Django 2.2.17 on 2020-12-18 21:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0002_remove_project_image'),
]
operations = [
migrations.AddField(
model_name='project',
name='image',
field=models.FilePathField(default=0, path='/img'),
preserve_default=False,
),
]
| [
"jrusso0818@gmail.com"
] | jrusso0818@gmail.com |
41da3a83b961f3970b11aac3c48a97022b4627c8 | 5f9c05b3bee55b0a311e7b0fba452ac13f60eefd | /py/coordinator.py | ae7cc1a7eebf83d2cbecbc980476ba3ad0f82a11 | [] | no_license | Ard1tti/serialdet | d0fb84704239e207009368b0341b6ab974fa7a29 | bc04065f1607ced571c141d59ff35084d144cb27 | refs/heads/master | 2021-01-10T23:13:02.670703 | 2016-10-12T09:03:45 | 2016-10-12T09:03:45 | 70,608,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,370 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Coordinator to help multiple threads stop when requested."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import sys
import threading
import time
import six
from tensorflow.python.framework import errors
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
class Coordinator(object):
"""A coordinator for threads.
This class implements a simple mechanism to coordinate the termination of a
set of threads.
#### Usage:
```python
# Create a coordinator.
coord = Coordinator()
# Start a number of threads, passing the coordinator to each of them.
...start thread 1...(coord, ...)
...start thread N...(coord, ...)
# Wait for all the threads to terminate.
coord.join(threads)
```
Any of the threads can call `coord.request_stop()` to ask for all the threads
to stop. To cooperate with the requests, each thread must check for
`coord.should_stop()` on a regular basis. `coord.should_stop()` returns
`True` as soon as `coord.request_stop()` has been called.
A typical thread running with a coordinator will do something like:
```python
while not coord.should_stop():
...do some work...
```
#### Exception handling:
A thread can report an exception to the coordinator as part of the
`should_stop()` call. The exception will be re-raised from the
`coord.join()` call.
Thread code:
```python
try:
while not coord.should_stop():
...do some work...
except Exception as e:
coord.request_stop(e)
```
Main code:
```python
try:
...
coord = Coordinator()
# Start a number of threads, passing the coordinator to each of them.
...start thread 1...(coord, ...)
...start thread N...(coord, ...)
# Wait for all the threads to terminate.
coord.join(threads)
except Exception as e:
...exception that was passed to coord.request_stop()
```
To simplify the thread implementation, the Coordinator provides a
context handler `stop_on_exception()` that automatically requests a stop if
an exception is raised. Using the context handler the thread code above
can be written as:
```python
with coord.stop_on_exception():
while not coord.should_stop():
...do some work...
```
#### Grace period for stopping:
After a thread has called `coord.request_stop()` the other threads have a
fixed time to stop, this is called the 'stop grace period' and defaults to 2
minutes. If any of the threads is still alive after the grace period expires
`coord.join()` raises a RuntimeException reporting the laggards.
```python
try:
...
coord = Coordinator()
# Start a number of threads, passing the coordinator to each of them.
...start thread 1...(coord, ...)
...start thread N...(coord, ...)
# Wait for all the threads to terminate, give them 10s grace period
coord.join(threads, stop_grace_period_secs=10)
except RuntimeException:
...one of the threads took more than 10s to stop after request_stop()
...was called.
except Exception:
...exception that was passed to coord.request_stop()
```
"""
def __init__(self, clean_stop_exception_types=None):
"""Create a new Coordinator.
Args:
clean_stop_exception_types: Optional tuple of Exception types that should
cause a clean stop of the coordinator. If an exception of one of these
types is reported to `request_stop(ex)` the coordinator will behave as
if `request_stop(None)` was called. Defaults to
`(tf.errors.OutOfRangeError,)` which is used by input queues to signal
the end of input. When feeding training data from a Python iterator it
is common to add `StopIteration` to this list.
"""
if clean_stop_exception_types is None:
clean_stop_exception_types = (errors.OutOfRangeError,)
self._clean_stop_exception_types = tuple(clean_stop_exception_types)
# Protects all attributes.
self._lock = threading.Lock()
# Event set when threads must stop.
self._stop_event = threading.Event()
# Python exc_info to report.
# If not None, it should hold the returned value of sys.exc_info(), which is
# a tuple containing exception (type, value, traceback).
self._exc_info_to_raise = None
# True if we have called join() already.
self._joined = False
# Set of threads registered for joining when join() is called. These
# threads will be joined in addition to the threads passed to the join()
# call. It's ok if threads are both registered and passed to the join()
# call.
self._registered_threads = set()
def _filter_exception(self, ex):
"""Check if the exception indicated in 'ex' should be ignored.
This method examines `ex` to check if it is an exception that should be
reported to the users. If yes, it returns `ex` as is, otherwise it returns
None.
The code returns None for exception types listed in
`_clean_stop_exception_types`.
Args:
ex: None, an `Exception`, or a Python `exc_info` tuple as returned by
`sys.exc_info()`.
Returns:
ex or None.
"""
if isinstance(ex, tuple):
ex2 = ex[1]
else:
ex2 = ex
if isinstance(ex2, self._clean_stop_exception_types):
# Ignore the exception.
ex = None
return ex
def request_stop(self, ex=None):
"""Request that the threads stop.
After this is called, calls to `should_stop()` will return `True`.
Note: If an exception is being passed in, in must be in the context of
handling the exception (i.e. `try: ... except Exception as ex: ...`) and not
a newly created one.
Args:
ex: Optional `Exception`, or Python `exc_info` tuple as returned by
`sys.exc_info()`. If this is the first call to `request_stop()` the
corresponding exception is recorded and re-raised from `join()`.
"""
with self._lock:
ex = self._filter_exception(ex)
# If we have already joined the coordinator the exception will not have a
# chance to be reported, so just raise it normally. This can happen if
# you continue to use a session have having stopped and joined the
# coordinator threads.
if self._joined:
if isinstance(ex, tuple):
six.reraise(*ex)
elif ex is not None:
# NOTE(touts): This is bogus if request_stop() is not called
# from the exception handler that raised ex.
six.reraise(*sys.exc_info())
if not self._stop_event.is_set():
if ex and self._exc_info_to_raise is None:
if isinstance(ex, tuple):
logging.info("Error reported to Coordinator: %s, %s",
type(ex[1]),
compat.as_str_any(ex[1]))
self._exc_info_to_raise = ex
else:
logging.info("Error reported to Coordinator: %s, %s",
type(ex),
compat.as_str_any(ex))
self._exc_info_to_raise = sys.exc_info()
# self._exc_info_to_raise should contain a tuple containing exception
# (type, value, traceback)
if (len(self._exc_info_to_raise) != 3 or
not self._exc_info_to_raise[0] or
not self._exc_info_to_raise[1]):
# Raise, catch and record the exception here so that error happens
# where expected.
try:
raise ValueError(
"ex must be a tuple or sys.exc_info must return the current "
"exception: %s"
% self._exc_info_to_raise)
except ValueError:
# Record this error so it kills the coordinator properly.
# NOTE(touts): As above, this is bogus if request_stop() is not
# called from the exception handler that raised ex.
self._exc_info_to_raise = sys.exc_info()
self._stop_event.set()
def clear_stop(self):
"""Clears the stop flag.
After this is called, calls to `should_stop()` will return `False`.
"""
with self._lock:
self._joined = False
self._exc_info_to_raise = None
if self._stop_event.is_set():
self._stop_event.clear()
def should_stop(self):
"""Check if stop was requested.
Returns:
True if a stop was requested.
"""
return self._stop_event.is_set()
@contextlib.contextmanager
def stop_on_exception(self):
"""Context manager to request stop when an Exception is raised.
Code that uses a coordinator must catch exceptions and pass
them to the `request_stop()` method to stop the other threads
managed by the coordinator.
This context handler simplifies the exception handling.
Use it as follows:
```python
with coord.stop_on_exception():
# Any exception raised in the body of the with
# clause is reported to the coordinator before terminating
# the execution of the body.
...body...
```
This is completely equivalent to the slightly longer code:
```python
try:
...body...
exception Exception as ex:
coord.request_stop(ex)
```
Yields:
nothing.
"""
# pylint: disable=broad-except
try:
yield
except Exception as ex:
self.request_stop(ex)
# pylint: enable=broad-except
def wait_for_stop(self, timeout=None):
"""Wait till the Coordinator is told to stop.
Args:
timeout: Float. Sleep for up to that many seconds waiting for
should_stop() to become True.
Returns:
True if the Coordinator is told stop, False if the timeout expired.
"""
return self._stop_event.wait(timeout)
def register_thread(self, thread):
"""Register a thread to join.
Args:
thread: A Python thread to join.
"""
with self._lock:
self._registered_threads.add(thread)
def join(self, threads=None, stop_grace_period_secs=120):
"""Wait for threads to terminate.
This call blocks until a set of threads have terminated. The set of thread
is the union of the threads passed in the `threads` argument and the list
of threads that registered with the coordinator by calling
`Coordinator.register_thread()`.
After the threads stop, if an `exc_info` was passed to `request_stop`, that
exception is re-raised.
Grace period handling: When `request_stop()` is called, threads are given
'stop_grace_period_secs' seconds to terminate. If any of them is still
alive after that period expires, a `RuntimeError` is raised. Note that if
an `exc_info` was passed to `request_stop()` then it is raised instead of
that `RuntimeError`.
Args:
threads: List of `threading.Threads`. The started threads to join in
addition to the registered threads.
stop_grace_period_secs: Number of seconds given to threads to stop after
`request_stop()` has been called.
Raises:
RuntimeError: If any thread is still alive after `request_stop()`
is called and the grace period expires.
"""
# Threads registered after this call will not be joined.
with self._lock:
if threads is None:
threads = self._registered_threads
else:
threads = self._registered_threads.union(set(threads))
# Copy the set into a list to avoid race conditions where a new thread
# is added while we are waiting.
threads = list(threads)
# Wait for all threads to stop or for request_stop() to be called.
while any(t.is_alive() for t in threads) and not self.wait_for_stop(1.0):
pass
# If any thread is still alive, wait for the grace period to expire.
# By the time this check is executed, threads may still be shutting down,
# so we add a sleep of increasing duration to give them a chance to shut
# down without loosing too many cycles.
# The sleep duration is limited to the remaining grace duration.
stop_wait_secs = 0.001
while any(t.is_alive() for t in threads) and stop_grace_period_secs >= 0.0:
time.sleep(stop_wait_secs)
stop_grace_period_secs -= stop_wait_secs
stop_wait_secs = 2 * stop_wait_secs
# Keep the waiting period within sane bounds.
# The minimum value is to avoid decreasing stop_wait_secs to a value
# that could cause stop_grace_period_secs to remain unchanged.
stop_wait_secs = max(min(stop_wait_secs, stop_grace_period_secs), 0.001)
# List the threads still alive after the grace period.
stragglers = [t.name for t in threads if t.is_alive()]
# Terminate with an exception if appropriate.
with self._lock:
self._joined = True
self._registered_threads = set()
if self._exc_info_to_raise:
six.reraise(*self._exc_info_to_raise)
elif stragglers:
raise RuntimeError(
"Coordinator stopped with threads still running: %s" %
" ".join(stragglers))
@property
def joined(self):
return self._joined
# Threads for the standard services.
class LooperThread(threading.Thread):
"""A thread that runs code repeatedly, optionally on a timer.
This thread class is intended to be used with a `Coordinator`. It repeatedly
runs code specified either as `target` and `args` or by the `run_loop()`
method.
Before each run the thread checks if the coordinator has requested stop. In
that case the looper thread terminates immediately.
If the code being run raises an exception, that exception is reported to the
coordinator and the thread terminates. The coordinator will then request all
the other threads it coordinates to stop.
You typically pass looper threads to the supervisor `Join()` method.
"""
def __init__(self, coord, timer_interval_secs, target=None, args=None,
kwargs=None):
"""Create a LooperThread.
Args:
coord: A Coordinator.
timer_interval_secs: Time boundaries at which to call Run(), or None
if it should be called back to back.
target: Optional callable object that will be executed in the thread.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Raises:
ValueError: If one of the arguments is invalid.
"""
if not isinstance(coord, Coordinator):
raise ValueError("'coord' argument must be a Coordinator: %s" % coord)
super(LooperThread, self).__init__()
self.daemon = True
self._coord = coord
self._timer_interval_secs = timer_interval_secs
self._target = target
if self._target:
self._args = args or ()
self._kwargs = kwargs or {}
elif args or kwargs:
raise ValueError("'args' and 'kwargs' argument require that you also "
"pass 'target'")
self._coord.register_thread(self)
@staticmethod
def loop(coord, timer_interval_secs, target, args=None, kwargs=None):
"""Start a LooperThread that calls a function periodically.
If `timer_interval_secs` is None the thread calls `target(args)`
repeatedly. Otherwise `target(args)` is called every `timer_interval_secs`
seconds. The thread terminates when a stop of the coordinator is
requested.
Args:
coord: A Coordinator.
timer_interval_secs: Number. Time boundaries at which to call `target`.
target: A callable object.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Returns:
The started thread.
"""
looper = LooperThread(coord, timer_interval_secs, target=target, args=args,
kwargs=kwargs)
looper.start()
return looper
def run(self):
with self._coord.stop_on_exception():
self.start_loop()
if self._timer_interval_secs is None:
# Call back-to-back.
while not self._coord.should_stop():
self.run_loop()
else:
# Next time at which to call run_loop(), starts as 'now'.
next_timer_time = time.time()
while not self._coord.wait_for_stop(next_timer_time - time.time()):
next_timer_time += self._timer_interval_secs
self.run_loop()
self.stop_loop()
def start_loop(self):
"""Called when the thread starts."""
pass
def stop_loop(self):
"""Called when the thread stops."""
pass
def run_loop(self):
"""Called at 'timer_interval_secs' boundaries."""
if self._target:
self._target(*self._args, **self._kwargs) | [
"blueconet@gmail.com"
] | blueconet@gmail.com |
cbf9caf78e847840f44e9cd9c6f7768baa91eee8 | de6f8466270d80d72fd696ebb8894d8ef02bca1e | /planner/search.py | ba2538bf9c59f687281c18739fd136a78549eba5 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | cocoflan/roundtrip | eac96231d1cc484e07317ef3068a661ee4dd474a | f2a740f7e6bb13acab979c6a7c09244c76466197 | refs/heads/master | 2022-04-30T13:59:21.736591 | 2021-07-01T21:13:26 | 2021-07-01T21:13:26 | 147,789,413 | 0 | 0 | null | 2022-04-22T20:57:11 | 2018-09-07T07:45:44 | JavaScript | UTF-8 | Python | false | false | 5,576 | py | import time
import moment
from splinter import Browser
from planner.models import Flight, FlightPrice, NoFlights
from planner.models import AirBNB
from urllib.parse import quote_plus
from math import ceil
import json
class Searcher:
_browser = None
def browser(self):
if self._browser is None:
self._browser = Browser("chrome")
return self._browser
def quit(self):
if self._browser is not None:
self._browser.quit()
def airbnb(self, air, city, adults):
def airbnburl(l):
loc = quote_plus(l)
return "https://api.airbnb.com/v2/search_results?" \
"client_id=3092nxybyb0otqw18e8nh5nty&locale=en-US¤cy=EUR&_format=for_search_results&_limit=20&" \
"_offset=0&fetch_facets=true&guests=" + str(
adults) + "&ib=false&ib_add_photo_flow=true&location=" + loc + "&min_bathrooms=0&" \
"min_bedrooms=" + str(
ceil(int(adults) / 2)) + "&min_beds=" + str(adults) + "&min_num_pic_urls=10&" \
"price_max=210&price_min=30&sort=1&user_lat=52.370216&user_lng=4.895168"
airbnbdata = dict()
air, created = AirBNB.objects.get_or_create(
city=city,
airports=air,
adults=adults
)
if created:
browser = self.browser()
browser.visit(airbnburl(city))
air.data = browser.find_by_tag("pre").first.text
air.save()
return json.loads(air.data)
def months(self, origin, destination, date, adults):
if len(NoFlights.objects.filter(origin=origin, destination=destination)) > 0:
return []
entries = FlightPrice.objects.filter(
origin=origin,
destination=destination,
date__year=moment.date(date).year,
date__month=moment.date(date).month,
adults=adults)
if len(entries) == 0:
browser = self.browser()
browser.visit(
'https://www.google.nl/flights/#search;f=' + origin + ';t=' + destination
+ ';d=' + date + ';tt=o;ti=t0800-2000;px=' + adults+";s=0")
el = browser.find_by_css('.OMOBOQD-G-q')
el.first.click()
time.sleep(3)
table = browser.find_by_css('.OMOBOQD-p-j').first
trs = [tr for tr in table.find_by_css('tr')][1:6]
count = 0
for tr in trs:
for td in tr.find_by_css('td'):
sp = td.text.split("\n")
if len(sp) == 2:
day = sp[0]
price = sp[1]
price = int(price.strip('€ ').replace('.', ''))
fdate = moment.date(date).replace(days=int(day)).strftime("%Y-%m-%d")
fp = FlightPrice(origin=origin, destination=destination, date=fdate, adults=adults, price=price)
fp.save()
count += 1
fdate = moment.date(date).replace(days=1).add(months=1)
table = browser.find_by_css('.OMOBOQD-p-o').first
trs = [tr for tr in table.find_by_css('tr')][1:6]
for tr in trs:
for td in tr.find_by_css('td'):
sp = td.text.split("\n")
if len(sp) == 2:
day = sp[0]
price = sp[1]
price = int(price.strip('€ ').replace('.', ''))
fdate = moment.date(fdate).replace(days=int(day)).strftime("%Y-%m-%d")
fp = FlightPrice(origin=origin, destination=destination, date=fdate, adults=adults, price=price)
fp.save()
count += 1
if count == 0:
NoFlights(origin=origin, destination=destination).save()
entries = FlightPrice.objects.filter(origin=origin, destination=destination, date=date, adults=adults)
return entries
#
# def flight(self, origin, destination, date, adults):
# entries = Flight.objects.filter(origin=origin, destination=destination, date=date, adults=adults)
# flight = entries.first()
# if len(entries) == 0:
# browser = self.browser()
# browser.visit(
# 'https://www.google.nl/flights/#search;f=' + origin + ';t=' + destination
# + ';d=' + date + ';tt=o;ti=t0800-2000;px=' + adults)
#
# result = browser.find_by_css(".gwt-HTML a.EESPNGB-d-W.EESPNGB-d-s").first
# if result:
# url = result['href']
# data = result.text.split("\n")
# price = int(data[0].strip('€ '))
# time = data[2]
# company = data[3]
# duration = data[4]
# info = data[5]
# flight = Flight(
# origin=origin,
# destination=destination,
# date=date, adults=adults,
# price=price,
# time=time,
# company=company,
# duration=duration,
# info=info,
# url=url
# )
# flight.save()
#
# else:
# return None
# return flight
#
| [
"nanne@mycel.nl"
] | nanne@mycel.nl |
42d1a243ee0f6e26eac7dbafad461f06f46e2a6c | ef51e831de7776d273b5384a5ad5b110782ed6f2 | /python script/Linux/update.py | d0917c560b2e2963023a5eb020b1940fd8ffac25 | [] | no_license | sudkumar/Summer-Project-2014 | c6f08d025a17cdb4ed5a9a383e03590368bcb36a | 27f805c5171afc44d61a6b2f877f4428967b5519 | refs/heads/master | 2021-01-10T20:39:29.224258 | 2014-08-02T08:46:19 | 2014-08-02T08:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | import serial
import MySQLdb
conn = MySQLdb.connect(host="localhost", user="root", passwd="name", db="attendance")
myc = conn.cursor()
# for linux
src = serial.Serial('/dev/ttyACM0', 9600)
# we can find the path by
# but remove the arduino first
# ls /dev/tty*
# now plugIn arduino and run the command again
# if there is any change in the result then that is our port name
ids = []
# first of all take the instructure ID
instructure_id = 113
while 1:
id = src.readline()
print id
try:
int(id)
except ValueError:
x = 0
else:
id = int(id)
if id == instructure_id:
query = "UPDATE `esc101` SET class_conducted = class_conducted + 1"
myc.execute(query)
conn.commit()
print "UPdate"
while 1:
id = src.readline()
try:
int(id)
except ValueError:
x = 0
else:
if id in ids:
continue
ids.append(id)
print id
id = int(id)
query = ("SELECT `roll_no` FROM `student_id` WHERE id = %d ") % (id)
myc.execute(query)
id = myc.fetchone()
query = ("UPDATE `esc101` SET class_attended = class_attended + 1 WHERE id = %s ") % (id)
myc.execute(query)
conn.commit()
query = "UPDATE `esc101` SET percentage = (class_attended/class_conducted)*100 "
myc.execute(query)
conn.commit()
conn.close()
| [
"luckysud4@gmail.com"
] | luckysud4@gmail.com |
63c03bea228aaf2bf3633ee0bc769131005dcc58 | 36a1a414847c29f406416db5c3e54916c647c8c1 | /LinkedList/mergeSortedll.py | b91d2e709627080389085640261b76232b8eb207 | [] | no_license | advaitp/Data-Structures-and-Algorithms | 07c2dd18bb9892dfd4e3ea1e6ab60c6a50bebdf5 | 83567a1dbd92677eb60711865ab08a7b996f3128 | refs/heads/main | 2023-05-15T09:42:16.254654 | 2021-05-28T10:47:43 | 2021-05-28T10:47:43 | 371,667,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | class Node:
def __init__(self, data):
self.data = data
self.next = None
def merge(head1,head2):
fh=None
ft=None
if head1.data<=head2.data :
fh=head1
ft=head1
head1=head1.next
else:
fh=head2
ft=head2
head2=head2.next
while head1 is not None and head2 is not None:
if head1.data<=head2.data:
ft.next=head1
ft=ft.next
head1=head1.next
elif head2.data<head1.data:
ft.next=head2
ft=ft.next
head2=head2.next
if head1 is not None:
ft.next=head1
if head2 is not None:
ft.next=head2
return fh
def ll(arr):
if len(arr)==0:
return None
head = Node(arr[0])
last = head
for data in arr[1:]:
last.next = Node(data)
last = last.next
return head
def printll(head):
while head:
print(head.data, end=' ')
head = head.next
print()
# Main
# Read the link list elements including -1
arr1=list(int(i) for i in input().strip().split(' '))
arr2=list(int(i) for i in input().strip().split(' '))
# Create a Linked list after removing -1 from list
l1 = ll(arr1[:-1])
l2 = ll(arr2[:-1])
l = merge(l1, l2)
printll(l)
| [
"advaitpatole@gmail.com"
] | advaitpatole@gmail.com |
8a58387defcb67cfd652d2f25881520516b9f458 | f741f7f070d150cffbb63f13666fec5dceb4c7c4 | /3.massives/5.py | 2c448a2722cdcbb8ded28131ad1b8cfff5321d19 | [] | no_license | mahhets/algorithms-and-data-structures | 7e2359c15a1cfd46200f9f2008a3a4050e2a5f40 | d1d198020e9d1f7f5085188678828520e4da36c8 | refs/heads/main | 2023-05-29T02:38:05.716730 | 2021-06-10T20:26:34 | 2021-06-10T20:26:34 | 367,147,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | # Ассоциативный массив, словарь (ключ - значение других столбцов, как в SQL)
"""
Пользователь вводит кол-во предприятий, названия, плановую и фактическую прибыль каждого предприятия
Вычислить процент выполнения плана и вывести данные с предварительной фильтрацией
"""
k = int(input('Введите кол-во предприятий: '))
enterprises = {}
for i in range(1, k+1):
name = input('Название предприятия: ')
enterprises[name] = [float(input('Введите плановую прибыль:')),
float(input('Введите фактическую прибыль: '))]
enterprises[name].append(enterprises[name][1]/enterprises[name][0])
for i,item in enterprises.items():
if item[1] > 0:
print(f'Предприятие {i} заработало {item[1]} что составило {item[2] * 100:.2f}%')
| [
"the_mahh@mail.ru"
] | the_mahh@mail.ru |
baf02d6d1f7af369aaccc549e18727ba85123b46 | 9737aa767b5cb2baa4e1ac6af64e3acb614e9265 | /smyt/smyt/urls.py | c5bff7d884d2a379fd6c17cdb564a90188291241 | [] | no_license | leotrubach/smyt | 1b11072a89067370f69c55465b84c5a7099d576f | 8398a60ebf5d6a2b490301276c7d6866def1d9a5 | refs/heads/master | 2016-09-05T16:45:10.698129 | 2012-06-19T23:13:56 | 2012-06-19T23:13:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
from dynmodels.views import HomeView, list_models, model_data
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'smyt.views.home', name='home'),
# url(r'^smyt/', include('smyt.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^$', HomeView.as_view(), name='home'),
url(r'^models/$', list_models, name='get_models'),
url(r'^modeldata/$', model_data, name='model_data'),
)
| [
"leotrubach@gmail.com"
] | leotrubach@gmail.com |
dad02ea4f0608d5be5fe199a38b3181d93ce7718 | dc91e5c22ed3b9128c649392c3dfb04ec74a3976 | /Meter-Distributed/Meter_cfba.py | 27f4d7a28382ad20b632b72672dbbb06fc4e1af1 | [] | no_license | Agrawalayushi/Closeness-Factor-Based-Algorithm-for-Incremental-Clustering-of-Images- | 5744ac2774f0e0d54ba2ffbf6998bdcc80554f79 | 544bda25d861a8ee656afd60e26bfbbde07016fd | refs/heads/main | 2023-04-13T12:38:53.455953 | 2021-04-27T17:04:03 | 2021-04-27T17:04:03 | 361,854,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,509 | py | import pandas as pd
import math as mp
import time
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
def visualize(df_basic,df_incremental,df_merge):
ax = df_basic.groupby(['CNumber'])['CNumber'].count().plot.bar(title = "Basic...")
ax.set_xlabel('Clusters')
ax.set_ylabel('Frequency')
plt.show()
ax = df_incremental.groupby(['CNumber'])['CNumber'].count().plot.bar(title = "Incremental")
ax.set_xlabel('Clusters')
ax.set_ylabel('Frequency')
plt.show()
ax = df_merge.groupby(['CNumber','Cluster_Type'])['Cluster_Type'].count().unstack(0).plot.bar(stacked=True, figsize=(8, 6))
ax.legend(loc = 'center right',bbox_to_anchor = (1.4,0.5),ncol = 1)
plt.title('iteration 1')
plt.xlabel('clusters')
plt.ylabel('No of Records')
plt.show()
# merging basic and incremental dataset
def mergefile_graph(df_basic,df_incremental):
df_basic['Cluster_Type'] = 'Basic_cluster'
df_incremental['Cluster_Type'] = 'Incremental_1'
df_basic = df_basic.append(df_incremental)
df_basic=df_basic.sort_values(by = ['CNumber'])
df_basic.to_csv('record.csv',index = False)
print("df_basic length", len(df_basic))
return df_basic
#merging training and test dataset
def mergefile_representative(dftrain,dftest):
dftrain = dftrain.append(dftest)
dftrain = dftrain.sort_values(by = ['CNumber'])
dftrain.to_csv('record.csv',index = False)
#(dftrain.groupby(['CNumber'],as_index = False).mean()).to_csv('record.csv')
#basic clustering code using cfba
def basic_cluster_lone(df,df1):
df['row_total'] = df.sum(axis = 1)
print("after row total",df.head())
count = 1
closeness_val= []
for i in range(len(df)):
df.loc[i,'Flag']=False
c1 = []
for i in range(len(df)):
if(df.Flag[i]==False):
countercheck = []
df1.loc[i,'CNumber'] = count
df1.loc[i,'Closeness_Value'] = 0
df.loc[i,'Flag']=True
df.loc[i,'CNumber'] = count
for j in range(i+1,len(df)):
if(df.Flag[j]==False):
c1 = df.row_total[i]/(df.row_total[i]+df.row_total[j])
d1 = df.T1[i]+df.T1[j]
d2=c1*d1-df.T1[i]
d3 = mp.sqrt(d1*c1*(1-c1))
prob1 = d2/d3
c_square = mp.pow(prob1,2)
weight = mp.sqrt(d1)
c = c_square * weight
#second feature
col2 = df.V1[i]+df.V1[j]
col21 = (c1*col2-df.V1[i])/mp.sqrt(col2*c1*(1-c1))
e2 = mp.pow(col21,2)
wei2 = mp.sqrt(col2)
c2 = e2 * wei2
#third feature
col4 = df.W1[i]+df.W1[j]
col41 = (c1*col4-df.W1[i])/mp.sqrt(col4*c1*(1-c1))
e4 = mp.pow(col41,2)
wei4 = mp.sqrt(col4)
c4 = e4 * wei4
close1 = c+c2+c4
close2 = weight+wei2+wei4
close = close1/close2
counter = 1
if close<=1:
df1.loc[j,'CNumber'] = count
df1.loc[j,'Closeness_Value']=close
df.loc[j,'Flag']=True
df.loc[j,'CNumber']=count
if(close < 0.00056200894733631):
df1.loc[j,'CNumber']=counter
df.loc[j,'CNumber']=counter
elif(0.0014036781659371 < close < 0.0169289160263237):
df1.loc[j,'CNumber']=counter+1
df.loc[j,'CNumber']=counter+1
elif(0.0169289160263237 < close < 0.0450943423407067):
df1.loc[j,'CNumber']=counter+2
df.loc[j,'CNumber']=counter+2
elif(0.0450943423407067 < close < 0.128604750357539):
df1.loc[j,'CNumber']=counter+3
df.loc[j,'CNumber']=counter+3
elif(0.128604750357539 < close < 0.248836893559896):
df1.loc[j,'CNumber']=counter+4
df.loc[j,'CNumber']=counter+4
elif(0.248836893559896 < close < 0.486936879396661):
df1.loc[j,'CNumber']=counter+5
df.loc[j,'CNumber']=counter+5
elif(0.486936879396661 < close < 0.619630852965444):
df1.loc[j,'CNumber']=counter+6
df.loc[j,'CNumber']=counter+6
else:
df1.loc[j,'CNumber']=counter+7
df1.to_csv('record.csv')
df1 = df1.sort_index()
df1 = df1.sort_values(by = 'CNumber')
df1.to_csv('record.csv')
#add name of csv
df =df.drop(['Flag','row_total'],axis=1)
return df1,df
# incremental clustering code using cfba
def incremental_cluster(dftest,df2):
df = pd.read_csv('record.csv')
print("test data",df.head())
df_rep = df.iloc[:,1:]
df_rep['row_total'] = df_rep.sum(axis =1)
print(df_rep.head())
whole = []
outlier = []
fclose=[]
outlierclose=[]
dftest['row_total'] = dftest.sum(axis =1)
for i in range(len(dftest)):
dftest.loc[i,'Flag']=False
c1 = []
for i in range(len(df_rep)):
whole.append(i)
for j in range(len(dftest)):
if(dftest.Flag[j]==False):
c1 = df_rep.row_total[i]/(df_rep.row_total[i]+dftest.row_total[j])
d1 = df_rep.T1[i]+dftest.T1[j]
d2=c1*d1-df_rep.T1[i]
d3 = mp.sqrt(d1*c1*(1-c1))
prob1 = d2/d3
c_square = mp.pow(prob1,2)
weight = mp.sqrt(d1)
c = c_square * weight
# feature - Department
col2 = df_rep.V1[i]+dftest.V1[j]
col21 = (c1*col2-df_rep.V1[i])/mp.sqrt(col2*c1*(1-c1))
e2 = mp.pow(col21,2)
wei2 = mp.sqrt(col2)
c2 = e2 * wei2
#feature
col4 = df_rep.W1[i]+dftest.W1[j]
col41 = (c1*col4-df_rep.W1[i])/mp.sqrt(col4*c1*(1-c1))
e4 = mp.pow(col41,2)
wei4 = mp.sqrt(col4)
c4 = e4 * wei4
close1 = c+c2+c4
close2 = weight+wei2+wei4
close = close1/close2
if close<=1:
whole.append(j)
df2.loc[j,'CNumber'] = df.CNumber[i]
df2.loc[j,'Closeness Value']=close
dftest.loc[j,'Flag']=True
dftest.loc[j,'CNumber']=df.CNumber[i]
#add name of csv of incremental
df2.to_csv('record.csv')
else:
outlier.append(j)
outlierclose.append(close)
fclose.append(0)
resultant_list = list(set(outlier)-set(whole))
if(len(resultant_list)!=None):
dftest.loc[resultant_list,'CNumber']=i+2
dftest.loc[resultant_list,'Flag']=True
df2.loc[resultant_list,'CNumber']=i+2
df2 = df2.fillna(-1)
df2 = df2.sort_index()
df2 = df2.sort_values(by = 'CNumber')
#add name of csv
df2.to_csv('record.csv')
dftest =dftest.drop(['Flag','row_total'],axis=1)
return df2,dftest
def scale(pandas_df):
features = ['T1','V1','W1']
features_v = pandas_df[features]
scaler = MinMaxScaler(feature_range = (0,10))
scaler_features = scaler.fit_transform(features_v)
print("normalised dataset with MinMaxScaler",scaler_features)
features_train, features_test = train_test_split(scaler_features, test_size =0.2)
train1 = pd.DataFrame(features_train,columns = ['T1','V1','W1'])
test1 = pd.DataFrame(features_test,columns = ['T1','V1','W1'])
print("length of training and testing data",len(train1),len(test1))
df_inverse = scaler.inverse_transform(features_train)
df1 = pd.DataFrame(df_inverse,columns = ['T1','V1','W1'])
df2 = scaler.inverse_transform(features_test)
df2 = pd.DataFrame(df2,columns = ['T1','V1','W1'])
print("length of Inversed data",len(df1),len(df2))
return train1,test1,df1,df2
| [
"noreply@github.com"
] | Agrawalayushi.noreply@github.com |
49245674d0105a3e9f82730ebdb250b147a85355 | eb685438961de82301a31e0798630ae1844a82f8 | /migrations/versions/4571ea43dd63_.py | 74d7288c4070368728d1028206f97dc6bcdd35f6 | [] | no_license | Peter-White/base_64_test_python | 2fcd28f517f74b7dac6a68622ce1fd8d6d81373e | 302e5aa13260d4fe13b057fec93c3942320fc7ce | refs/heads/master | 2020-07-16T02:43:19.593580 | 2019-09-09T14:36:39 | 2019-09-09T14:36:39 | 205,700,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | """empty message
Revision ID: 4571ea43dd63
Revises:
Create Date: 2019-08-26 15:47:45.221174
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4571ea43dd63'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('place_holder_image',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('image', sa.LargeBinary(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('place_holder_image')
# ### end Alembic commands ###
| [
"pwhitedeveloper@gmail.com"
] | pwhitedeveloper@gmail.com |
f45845f3775295ca40384d997d67d25854ded28e | 72a3977adc460ec70d0ebd8177cea1bb22a06cbf | /src/Classes/TennisSet.py | 5a0109b5e881dab41bb21cd34e6c496b92627d7c | [] | no_license | andreistaicu1/TennisScoreSimulator | 0ff6307ee1f1019a42fa70f5557cacb478c1d59e | 62fc0cc21ade84526190b2c840b4dea79bfb32d6 | refs/heads/master | 2023-07-23T05:37:00.521304 | 2021-09-01T20:09:48 | 2021-09-01T20:09:48 | 349,536,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,374 | py | from src.Classes.TennisGame import *
from src.Classes.TennisTiebreak import *
class TennisSet:
def __init__(self, set_length, player1, player2, ad, serving, will_breaker):
"""
:param set_length: int - When the set stops
:param player1: Players object - player 1
:param player2: Players object - player 2
:param ad: Boolean - true if there are ads
:param serving: Boolean - true if player 1 serves first
:param will_breaker: Boolean - True if there is a breaker in the last set
"""
self.set_length = set_length
self.ad = ad
self.player_array = [player1, player2]
self.serving = serving
self.will_breaker = will_breaker
self.player1G = 0
self.player2G = 0
self.data = []
self.toText = {}
self.winner = 0
self.setOver = False
self.tiebreaker = False
def play_set(self):
"""
Plays the set on its own
:return: nothing
"""
while not self.setOver:
self.tiebreaker = self.player1G == self.player2G and self.player1G == self.set_length
if self.tiebreaker and self.will_breaker:
new_tiebreak = TennisTiebreak(self.player_array[0], self.player_array[1], True, 7)
new_tiebreak.play_breaker()
self.iterate_set_breaker(new_tiebreak)
else:
new_game = TennisGame(self.player_array[0], self.player_array[1], self.serving, self.ad)
new_game.play_game()
self.iterate_set_game(new_game)
self.serving = not self.serving
def iterate_set_game(self, current_game):
"""
Given a game, updates internal data
:param current_game: TennisGame object - game to be played
:return: nothing
"""
if self.tiebreaker and self.will_breaker:
return
game_winner = current_game.winner
if game_winner == 0:
self.setOver = True
elif game_winner == 1:
self.player1G += 1
else:
self.player2G += 1
self.data.append(current_game)
if self.player1G >= self.set_length or self.player2G >= self.set_length:
if self.player1G - self.player2G > 1:
self.winner = 1
self.setOver = True
elif self.player2G - self.player1G > 1:
self.winner = 2
self.setOver = True
def iterate_set_breaker(self, current_breaker):
"""
Given a tiebreaker updates internal data
:param current_breaker: TennisTiebreak object
:return: nothing
"""
if self.tiebreaker and self.will_breaker:
self.winner = current_breaker.winner
if self.winner == 0:
self.setOver = True
elif self.winner == 1:
self.player1G += 1
else:
self.player2G += 1
self.data.append(current_breaker)
self.setOver = True
def compile(self):
"""
Consolidates internal data in a dictionary that can be easily printed to a text file
:return: nothing
"""
self.toText['serving'] = self.serving
self.toText['will_breaker'] = self.will_breaker
| [
"55329808+andreistaicu1@users.noreply.github.com"
] | 55329808+andreistaicu1@users.noreply.github.com |
323e6dbcabc39627ea900df0e1e0063631c79908 | 9222f00f40ae22ec71a61cd6627b11ec89958c5b | /v0_2017/lib/common.py | 386dee3834e99a3849fe778b1623f1ff1c74da46 | [] | no_license | ebensh/pinball_cv | 635d051b296dcdfb6c7c001709ef30a710f96549 | e2e67fd3f7e33f9583a46f2a63130c1cec791315 | refs/heads/master | 2021-07-15T16:35:07.878569 | 2018-12-27T05:03:57 | 2018-12-27T05:03:57 | 106,141,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,640 | py | from collections import namedtuple
import cv2
import inspect
import json
import matplotlib.pyplot as plt
import numpy as np
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def numpy_print_cols(cols=160):
np.core.arrayprint._line_width = cols
# http://ipython-books.github.io/featured-01/
def get_data_base(arr):
"""For a given Numpy array, finds the
base array that "owns" the actual data."""
base = arr
while isinstance(base.base, np.ndarray):
base = base.base
return base
def arrays_share_data(x, y): return get_data_base(x) is get_data_base(y)
def print_by_channel(img):
rows, cols, channels = img.shape
for channel in xrange(channels):
print(img[:,:,channel])
def display_image(img, title=None, show=True):
if show:
if title is None:
# Get the caller's line number so we can identify which point in the
# process we're at without uniquely naming each one.
frame, filename, line_num, function_name, lines, index = inspect.stack()[1]
title = "{0}:{1}".format(filename, line_num)
cv2.imshow(title, img)
# This is *INEFFICIENT* and is only intended for quick experimentation.
# http://blog.hackerearth.com/descriptive-statistics-with-Python-NumPy
# TODO(ebensh): Add a wrapper class around the named tuple.
#NamedStatistics = namedtuple('NamedStatistics', ['minimum', 'maximum', 'ptp', 'mean'])
NamedStatistics = namedtuple('NamedStatistics', ['mean'])
def get_named_statistics(planes):
#minimum = np.amin(planes, axis=0)
#maximum = np.amax(planes, axis=0)
return NamedStatistics(
#minimum=minimum,
#maximum=maximum,
#ptp=maximum - minimum,
mean=np.mean(planes, axis=0, dtype=np.float64).astype(np.uint8))
#median=cv2.convertScaleAbs(np.median(frames, axis=0)),
#variance=cv2.convertScaleAbs(np.var(frames, axis=0, dtype=np.float64)))
def print_statistics(statistics, printer):
for field in statistics._fields:
printer.add_image(getattr(statistics, field), field)
class FrameBuffer(object):
def __init__(self, num_frames=1, shape=(640, 480, 3), dtype=np.uint8):
# Create our frame buffers. We don't store them together because while it
# would make the rolling easier it would also require the gray version to
# be stored with three channels.
self._BUFFER_LENGTH = 2 * num_frames # Left here in case we want to increase.
self._num_frames = num_frames
self._idx = 0
self._shape = shape
self._frames = np.zeros((self._BUFFER_LENGTH,) + shape, dtype=dtype)
self._frames_gray = np.zeros((self._BUFFER_LENGTH,) + shape[0:2], dtype=dtype)
def append(self, frame):
idx_to_insert = (self._idx + self._num_frames) % self._BUFFER_LENGTH
self._frames[idx_to_insert] = frame
self._frames_gray[idx_to_insert] = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self._idx = (self._idx + 1) % self._BUFFER_LENGTH
def get_view(self, start, stop, color=True):
view = None
if start is None: start = 0
if stop is None: stop = self._num_frames
start += self._idx
stop += self._idx
if color:
view = self._frames.take(range(start, stop), axis=0, mode='wrap').view()
else:
view = self._frames_gray.take(range(start, stop), axis=0, mode='wrap').view()
view.setflags(write=False)
return view
def get_shape(self, color=True):
if color: return self._shape
return self._shape[0:2]
# Useful for debugging.
def get_buffers(self):
return cv2.hconcat(self._frames), cv2.hconcat(self._frames_gray)
class FramePrinter(object):
def __init__(self):
self._images = []
def add_image(self, img, caption):
if len(img.shape) < 3 or img.shape[2] != 3:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
self._images.append((img, caption))
def get_combined_image(self):
font = cv2.FONT_HERSHEY_SIMPLEX
space = 10 # pixels between images
max_rows = 0
total_cols = 0
for img, _ in self._images:
shape = img.shape
rows, cols = shape[0], shape[1]
max_rows = max(max_rows, rows)
total_cols += cols
total_cols += (len(self._images) - 1) * space
combined_image = np.zeros((rows, total_cols, 3), dtype=np.uint8)
current_col = 0
for img, caption in self._images:
shape = img.shape
rows, cols = shape[0], shape[1]
combined_image[0:rows, current_col:current_col+cols] = img
cv2.putText(combined_image, caption, (current_col, rows), font,
1, (255,255,255), 2, cv2.LINE_AA)
current_col += cols + space
return combined_image
def get_region_as_mask(rows, cols, region):
mask = np.zeros((rows, cols), dtype=np.uint8)
cv2.fillConvexPoly(mask, region, 255)
return mask
def get_perspective_transform(rows, cols, region):
corners = np.array([
(0, 0), # top left
(cols-1, 0), # top right
(cols-1, rows-1), # bottom right
(0, rows-1)], dtype=np.float32) # bottom left
return cv2.getPerspectiveTransform(region[:-1].astype(np.float32), corners)
# IMPORTANT!!! Subtraction will WRAP with uint8 if it goes negative!
def trim_to_uint8(arr): return np.clip(arr, 0, 255).astype(np.uint8)
def extrapolate(xy1, xy2):
x1, y1 = xy1
x2, y2 = xy2
vx = x2 - x1
vy = y2 - y1
return (x2 + vx, y2 + vy)
def lerp(xy1, xy2):
x1, y1 = xy1
x2, y2 = xy2
return ((x1 + x2) / 2, (y1 + y2) / 2)
def dist(xy1, xy2):
x1, y1 = xy1
x2, y2 = xy2
return (x2 - x1)**2 + (y2 - y1)**2
def in_bounds(rows, cols, xy):
x, y = xy
return (x >= 0 and x < cols and y >= 0 and y < rows)
# https://matplotlib.org/users/image_tutorial.html
# http://jakevdp.github.io/mpl_tutorial/tutorial_pages/tut2.html
def p_gray(*args, path=None):
imgs = list(args)
#plt.figure(figsize=(20,10))
fig, axs = plt.subplots(1, len(imgs), squeeze=False)
fig.set_size_inches(20, 10)
for img, ax in zip(imgs, axs[0]):
ax.imshow(img, cmap = 'gray')
if path: plt.savefig(path, bbox_inches='tight')
plt.show()
def p_bgr(img, path=None):
plt.figure(figsize=(20,10))
plt.imshow(img[:,:,::-1])
if path: plt.savefig(path, bbox_inches='tight')
plt.show()
def p_heat(img, path=None):
plt.figure(figsize=(20,10))
plt.imshow(1.0 * img / img.max(), cmap='inferno', interpolation='nearest')
if path: plt.savefig(path, bbox_inches='tight')
plt.show()
def p_histogram(img, path=None):
plt.figure(figsize=(6, 3))
plt.hist(img, bins=32)
if path: plt.savefig(path, bbox_inches='tight')
plt.show()
def load_json_keypoints_as_dict(path):
with open(path, 'r') as keypoints_file:
frame_to_keypoints_str = json.load(keypoints_file)
frame_to_keypoints = {}
for frame_index_str, keypoints_str in frame_to_keypoints_str.items():
frame_to_keypoints[int(frame_index_str)] = [
[int(round(x)), int(round(y)), int(round(size))]
for x, y, size in keypoints_str]
assert set(frame_to_keypoints.keys()) == set(range(len(frame_to_keypoints)))
return frame_to_keypoints
def load_json_keypoints_as_list(path):
# The dict is guaranteed to be dense, but potentially out of order.
# Here we sort them and return as a list of lists.
keypoints_dict = load_json_keypoints_as_dict(path)
return [keypoints_dict[frame_ix] for frame_ix in sorted(keypoints_dict.keys())]
def get_all_frames_from_video(path):
cap = cv2.VideoCapture(path)
video_frames = []
while cap.isOpened():
grabbed, raw_frame = cap.read()
if not grabbed: break
video_frames.append(raw_frame)
cap.release()
return np.array(video_frames)
def keypoints_to_mask(rows, cols, keypoints, fixed_radius=None, thickness=-1):
mask = np.zeros([rows, cols], np.uint8)
for x, y, size in keypoints:
if fixed_radius: size = fixed_radius
if size == 1: mask[y, x] = 255
else: cv2.circle(mask, (x, y), size, color=255, thickness=thickness)
return mask
def get_all_keypoint_masks(rows, cols, frame_to_keypoints_list, fixed_radius=None, thickness=-1):
video_masks = []
for keypoints in frame_to_keypoints_list:
video_masks.append(keypoints_to_mask(rows, cols, keypoints, fixed_radius,
thickness))
return np.array(video_masks)
def hconcat_ndarray(imgs):
num_imgs, rows, cols = imgs.shape[:3]
return imgs.swapaxes(0, 1).reshape([rows, num_imgs * cols])
def convert_bgr_planes_to_gray(planes):
plns, rows, cols, chs = planes.shape
flattened = planes.reshape((plns * rows, cols, chs))
flattened_gray = cv2.cvtColor(flattened, cv2.COLOR_BGR2GRAY)
return flattened_gray.reshape((plns, rows, cols))
def add_bgr_and_gray(img_color, img_gray):
return cv2.add(img_color, cv2.cvtColor(img_gray, cv2.COLOR_GRAY2BGR))
| [
"ebensh@gmail.com"
] | ebensh@gmail.com |
8727efc17619467964719851112b2714b7a40e24 | 3bdad3e626daaf079d316a8ce56b79af095327d4 | /api/migrations/0002_maincycle_user.py | 575c15fda6f25e37b1e083211a8d6d71084f8e60 | [] | no_license | kirussshin/djangoClicker | 3ab2ba798c993d6a42bbb352ca2951b5f094bf82 | 1922abd13289529f0f314cd744d137e84228aaae | refs/heads/master | 2023-06-01T19:34:37.280301 | 2021-06-13T18:54:00 | 2021-06-13T18:54:00 | 376,618,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | # Generated by Django 3.2.4 on 2021-06-13 17:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='maincycle',
name='user',
field=models.OneToOneField(default=0, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"egorka-knopka@mail.ru"
] | egorka-knopka@mail.ru |
42e64f9eabca1da6ef4c05025e9e1c63b6a4cea6 | 1b037639fad280142ee84d10412c4bc1d729148c | /act_complementaryMedicine/db_method/insert.py | fb71ce5dc7599f5a985194c1f37fbb51b23a7644 | [] | no_license | Tohsaka-Rin/act-cm | fb36f5a16638f52646c2834a0d73c1fb1fab1f1d | c99dae527510fc0352fac29df36bd3090d361b89 | refs/heads/master | 2021-01-23T01:55:42.703190 | 2017-03-22T04:27:28 | 2017-03-22T04:27:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,500 | py | # -*- coding:UTF-8 -*-
from act_db.models import DoctorInfo,GroupInfo,PatientGroup,PatientInfo,RelationInfo,OutPatientServiceInfo,EmergCallInfo,InHospitalInfo,Clinic,ESS,MBQ,SGRO,AttachInfo,AccessoryExamination
import time
import datetime
# 添加新用户
# 参数是一个字典,包含医生的所有信息
# 成功返回True,失败返回False
def addDoctorInfo(data):
#TODO
# birthday 需要处理成Date格式
d = datetime.datetime.strptime(data['birthday'], "%Y-%m-%d").date()
# registerDate会自动生成
try:
newObj = DoctorInfo(name = data['name'], sex = data['sex'], birthday = d, userName = data['userName'],
password = data['password'], cellphone = data['cellphone'], weChat = data['weChat'],
mail = data['mail'], title = data['title'], hospital = data['hospital'],
department = data['department'], userGroup = data['userGroup'])
newObj.save()
return True
except :
return False
# 添加新的实验组
# 成功返回True,失败返回False
def addExpGroup(D_id,name,info):
# TODO
try:
newObj = GroupInfo(D_id=D_id,name=name,information=info)
newObj.save()
return True
except :
return False
# 向实验组中添加患者
#注意判断一下各种id的正确性
# 成功返回True,失败返回False
def addPatientToExpGroup(G_id,P_id):
# TODO
try:
newObj = PatientGroup(G_id=G_id,P_id=P_id)
newObj.save()
return True
except :
return False
# 添加新患者
# 参数是一个字典,包含患者的所有信息。同时也包含D_id与G_id,需要添加对应的关系表
# 成功返回True,失败返回False
def addPatientInfo(data):
#TODO
try:
d = datetime.datetime.strptime(data['birthday'], "%Y-%m-%d").date()
newObj = PatientInfo(P_id = data['P_id'], sign = data['sign'], name = data['name'], sex = data['sex'],
birthday = d, age = data['age'], nation = data['nation'], height = data['height'],
weight = data['weight'], education = data['education'], career = data['career'],
marriage = data['marriage'], photo = data['photo'], homeAddr = data['homeAddr'],
birthAddr = data['birthAddr'], activityAddr1 = data['activityAddr1'],
activityAddr2 = data['activityAddr2'], actionAddr = data['actionAddr'],
diastolicPressure = data['diastolicPressure'],
systolicPressure = data['systolicPressure'], neckCircu = data['neckCircu'],
payment = data['payment'], telephone = data['telephone'],
cellphone = data['cellphone'], partnerPhone = data['partnerPhone'])
newObj.save()
newP = PatientGroup(G_id=data['G_id'], P_id=data['P_id'])
newP.save()
return True
except :
return False
# 添加新家属
# 参数是一个字典,包含患者的所有信息。同时也包含D_id与P_id
# 成功返回True,失败返回False
def addRelationInfo(data):
try:
newObj = RelationInfo(P_id = data['P_id'], name = data['name'], sex = data['sex'],
telephone = data['telephone'], cellphone = data['cellphone'],
weChat = data['weChat'], mail = data['mail'], homeAddr = data['homeAddr'])
newObj.save()
return True
except :
return False
#添加门诊信息
def addOutPatientServiceInfo(data):
try:
data['date'] = datetime.datetime.strptime(data['date'], "%Y-%m-%d").date()
newObj = OutPatientServiceInfo(P_id = data['P_id'], date = data['date'], place = data['place'],
isStabel = data['isStabel'], symptom = data['symptom'],
physicalExam = data['physicalExam'], breathErr = data['breathErr'],
acuteExac = data['acuteExac'], disease = data['disease'],
use_abt = data['use_abt'], useJmzs = data['useJmzs'],
hospital = data['hospital'], airRelate = data['airRelate'],
treatMethod = data['treatMethod'], medicine = data['medicine'])
newObj.save()
return True
except:
return False
#添加急诊信息
def addEmergCallInfo(data):
try:
data['date'] = datetime.datetime.strptime(data['date'], "%Y-%m-%d").date()
newObj = EmergCallInfo(P_id = data['P_id'], date = data['date'], place = data['place'],
symptom = data['symptom'], acuteExac = data['acuteExac'],disease = data['disease'],
byxCheck = data['byxCheck'],byxResult = data['byxResult'], ycWcTreat = data['ycWcTreat'],
useAbt = data['useAbt'], abtType = data['abtType'], useJmzs = data['useJmzs'],
ecMethod = data['ecMethod'], ecDate = data['ecDate'],hospital = data['hospital'],
treatMethod = data['treatMethod'],airRelate = data['airRelate'])
newObj.save()
return True
except:
return False
#添加住院信息
def addInHospitalInfo(data):
try:
data['date'] = datetime.datetime.strptime(data['date'], "%Y-%m-%d").date()
newObj = InHospitalInfo(P_id = data['P_id'], date = data['date'], place = data['place'],
commonIcu = data['commonIcu'], symptom = data['symptom'],acuteExac = data['acuteExac'],
disease = data['disease'],byxCheck = data['byxCheck'], byxResult = data['byxResult'],
ycWcTreat = data['ycWcTreat'], useAbt = data['useAbt'],abtType = data['abtType'],
useJmzs = data['useJmzs'],hospitalDays = data['hospitalDays'],
airRelate = data['airRelate'],treatMethod = data['treatMethod'],
reason = data['reason'],docAdvice = data['docAdvice'])
newObj.save()
return True
except:
return False
#添加临床信息
def addClinicInfo(data):
try:
newObj = Clinic(P_id = data['P_id'], type = data['type'], S_id = data['S_id'],dangerType = data['dangerType'],
smoke1 = data['smoke1'],smoke2 = data['smoke2'], smoke3 = data['smoke3'],
smoke4 = data['smoke4'],smoke5 = data['smoke5'], smoke6 = data['smoke6'],
smoke7 = data['smoke7'],smoke8 = data['smoke8'], smoke9 = data['smoke9'],
smoke10 = data['smoke10'],powder1 = data['powder1'], powder2 = data['powder2'],
powder3 = data['powder3'],biology1 = data['biology1'], biology2 = data['biology2'],
hAir1 = data['hAir1'],hAir2 = data['hAir2'], gm1 = data['gm1'], gm2 = data['gm2'],
drink1 = data['drink1'], drink2 = data['drink2'], drink3 = data['drink3'],
drink4 = data['drink4'], lung1 = data['lung1'], lung2 = data['lung2'],lung3 = data['lung3'],
lung4 = data['lung4'], lung5 = data['lung5'],lung6 = data['lung6'], lung7 = data['lung7'],
cure1 = data['cure1'],cure2 = data['cure2'], cure3 = data['cure3'], cure4 = data['cure4'],
cure5 = data['cure5'], cure6 = data['cure6'], cure7 = data['cure7'],cure8 = data['cure8'],
cure9 = data['cure9'], cure10 = data['cure10'],cure11 = data['cure11'], cure12 = data['cure12'],
cure13 = data['cure13'],cure14 = data['cure14'], cure15 = data['cure15'],
cure16 = data['cure16'],cure17 = data['cure17'], cure18 = data['cure18'],
cure19 = data['cure19'],cure20 = data['cure20'], cure21 = data['cure21'],
cure22 = data['cure22'],cure23 = data['cure23'], cure24 = data['cure24'],
cure25 = data['cure25'],cure26 = data['cure26'], comp1 = data['comp1'], comp2 = data['comp2'],
comp3 = data['comp3'], comp4 = data['comp4'], comp5 = data['comp5'],comp6 = data['comp6'])
newObj.save()
return True
except:
return False
#添加问卷信息
def addQuestionnaireInfo(type,data):
try:
if type == 0:
newObj = ESS(P_id = data['P_id'], type = data['type'], S_id = data['S_id'], ess4 = data['ess4'],
ess5 = data['ess5'], ess6 = data['ess6'], ess7 = data['ess7'], ess8 = data['ess8'],
score = data['score'])
newObj.save()
elif type == 1:
newObj = MBQ(P_id = data['P_id'], type = data['type'], S_id = data['S_id'], q4 = data['q4'],
q5 = data['q5'], q6 = data['q6'], q7 = data['q7'], q8 = data['q8'], q9 = data['q9'],
q10 = data['q10'], BMI = data['BMI'])
newObj.save()
elif type == 2:
newObj = SGRO(P_id = data['P_id'], type = data['type'], S_id = data['S_id'], q4 = data['q4'],
q5 = data['q5'], q6 = data['q6'], q7 = data['q7'], q8 = data['q8'], q9 = data['q9'],
q10 = data['q10'], BMI = data['BMI'])
newObj.save()
else:
return False
return True
except:
return False
#添加附件信息
def addAttachInfo(data):
try:
newObj = AttachInfo(P_id = data['P_id'], type = data['type'], S_id = data['S_id'],D_id = data['D_id'],
name = data['name'], information = data['information'],dir = data['dir'])
#TODO
# img context没有加
newObj.save()
return True
except:
return False
#添加附件信息
def addAccessoryExamination(data):
try:
newObj = AccessoryExamination(S_id = data['S_id'], type = data['type'], date = data['date'],
AE_type = data['AE_type'], name = data['name'],
description = data['description'], D_id = data['D_id'])
newObj.save()
return True
except:
return False | [
"1021369745@qq.com"
] | 1021369745@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.