blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c927116099b54a05b03c90959b1cce574e84ed84
|
afc910035c034c6ca01f7c79a4ef15b2ba263e9d
|
/invites/migrations/0002_alter_invites_inviter.py
|
2d4c2866f69184d03883b1da4b9447fe089738bc
|
[] |
no_license
|
HJLG/treasures
|
a2094e10cace9df6f29226324525c6e93df22829
|
e444aa5b352c071f97f70a53f18946c27512e7e3
|
refs/heads/master
| 2023-07-10T19:12:13.268788
| 2021-08-19T16:56:11
| 2021-08-19T16:56:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 600
|
py
|
# Generated by Django 3.2.6 on 2021-08-18 06:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('invites', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='invites',
name='inviter',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='inviter_id', to=settings.AUTH_USER_MODEL),
),
]
|
[
"huijialegerald@gmail.com"
] |
huijialegerald@gmail.com
|
0072d7dd48d5db4a17994ff2550b4e628a58e0ed
|
f865fdd970f8e37ea2aa5157374af8c4d6ced987
|
/test/test_user_group_list_inner.py
|
16ac636b80338e722fd59c04d034837cc6c1eece
|
[] |
no_license
|
gkeep-openapi/python-sdk
|
7e809448355bff535b3d64e013f001e9196c5e19
|
7c4f3785b47a110386ef10109619654522c95de5
|
refs/heads/master
| 2022-05-28T16:13:06.643958
| 2022-05-13T14:58:39
| 2022-05-13T14:58:39
| 235,536,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 859
|
py
|
# coding: utf-8
"""
Gkeep API
Gkeep API # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from models.user_group_list_inner import UserGroupListInner # noqa: E501
from swagger_client.rest import ApiException
class TestUserGroupListInner(unittest.TestCase):
"""UserGroupListInner unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUserGroupListInner(self):
"""Test UserGroupListInner"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.user_group_list_inner.UserGroupListInner() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"gkeep-ci-jenkins"
] |
gkeep-ci-jenkins
|
3d0a3c8a5c6e5b46f5dd89d596fcc0319355ccc3
|
3b504a983f1807ae7c5af51078bfab8c187fc82d
|
/client/Helpers/cleaner.py
|
c4d4630216831050d22fa945b6021c73c17ec2b3
|
[] |
no_license
|
SEA-group/wowp_scripts
|
7d35fd213db95ea6b3dbd1ec6d3e0f13de86ba58
|
2fe54a44df34f2dcaa6860a23b835dcd8dd21402
|
refs/heads/master
| 2021-09-07T23:10:13.706605
| 2018-03-02T17:23:48
| 2018-03-02T17:23:48
| 117,280,141
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 839
|
py
|
# Embedded file name: scripts/client/Helpers/cleaner.py
from debug_utils import LOG_TRACE, LOG_WARNING, LOG_ERROR
import time
import os
from wofdecorators import noexcept
@noexcept
def deleteOldFiles(dirPath, days, ext):
if os.path.exists(dirPath):
allFiles = [ os.path.join(dirPath, f) for f in os.listdir(dirPath) if os.path.isfile(os.path.join(dirPath, f)) and f.endswith(ext) ]
for t, f in [ (time.time() - os.path.getmtime(f), f) for f in allFiles ]:
if int(t / 86400) >= days:
LOG_TRACE('File to remove: %s' % f)
try:
os.remove(f)
except OSError as e:
LOG_WARNING('deleteOldFiles (%s) - %s' % (ext, e.strerror))
else:
LOG_WARNING('deleteOldFiles (%s) - directory is not exists: %s' % (ext, dirPath))
|
[
"55k@outlook.com"
] |
55k@outlook.com
|
06aa64922809f256f59a8384624cf0bf5600fefe
|
ace029249e56decdc82770645cbe1904a1af8ec7
|
/django/django_orm/the_wall_p/apps/wall/views.py
|
7e9b9528bba1d7acbec79a6fa30425bbc8a77744
|
[] |
no_license
|
CoraleeZ/Python-Stack-All-Assignments
|
1579e17d447afc0a56be6628db39ba13d6e2e1b3
|
993f4f3ed7ff8a22b8c4275cda037f7f925cb161
|
refs/heads/master
| 2022-11-08T06:17:31.668026
| 2019-03-24T02:59:29
| 2019-03-24T02:59:29
| 177,363,963
| 0
| 1
| null | 2022-10-15T01:50:04
| 2019-03-24T02:55:36
|
Python
|
UTF-8
|
Python
| false
| false
| 4,296
|
py
|
from django.shortcuts import render,redirect
from .models import *
from django.contrib import messages
import bcrypt
import datetime
def index(request):
pass
return render(request,'wall/lr.html')
def register(request):
if request.method=='POST':
errors=users.objects.basic_validator(request.POST)
if len(errors)>0:
for key, value in errors.items():
messages.error(request,value,extra_tags='red')
return redirect('/')
else:
messages.success(request, "Successfully registered(or log in)!" ,extra_tags='green')
request.session['fn']=request.POST['fn']
pwhash=bcrypt.hashpw(request.POST['pw'].encode(), bcrypt.gensalt())
a=users.objects.create(first_name=request.POST['fn'],last_name=request.POST['ln'],email=request.POST['em'],password=pwhash)
request.session['id']=a.id
return redirect('/mainwall')
def mainwall(request):
if 'id' in request.session and 'fn' in request.session:
if users.objects.filter(id=request.session['id']):
a=users.objects.get(id=request.session['id'])
if a.first_name==request.session['fn']:
context={
'user':users.objects.get(id=request.session['id']),
'message':Messages.objects.all()
}
return render(request,'wall/mainwall.html',context)
else:
messages.error(request,'You are not log in yet!', extra_tags='red')
return redirect('/')
def logout(request):
del request.session['fn']
del request.session['id']
return redirect('/')
def login(request):
if request.method=='POST':
if users.objects.filter(email=request.POST['em']):
a=users.objects.get(email=request.POST['em'])
if bcrypt.checkpw(request.POST['pw'].encode(), a.password.encode()):
request.session['id']=a.id
request.session['fn']=a.first_name
messages.success(request, "Successfully registered(or log in)!" ,extra_tags='green')
return redirect('/mainwall')
else:
messages.error(request,'Not valid! Try again!',extra_tags='red')
return redirect('/')
#wall
def addmessage(request):
if request.method=='POST':
a=users.objects.get(id=request.POST['userid'])
Messages.objects.create(message=request.POST['message'],user=a)
return redirect('/mainwall')
def wall(request):
context={
'user':users.objects.get(id=request.session['id']),
'postm':Messages.objects.all()
}
return render(request,'wall/wall.html',context)
def addcomment(request):
if request.method=='POST':
a=users.objects.get(id=request.POST['commentwriter'])
b=Messages.objects.get(id=request.POST['messageid'],)
comments.objects.create(comment=request.POST['commnet'],user=a,message=b)
return redirect('/wall')
def deletemessage(request,messageid):
a=Messages.objects.get(id=messageid)
if a.user.id==request.session['id']:
time=datetime.datetime.now()-a.created_at.replace(tzinfo=None)
limit=datetime.timedelta(minutes=30)
if time>limit:
messages.error(request,'you can not delete message posted 30 minutes ago!',extra_tags='red')
return redirect('/wall')
else:
# b=datetime.datetime.now()
# c=datetime.datetime.strptime(str(a.created_at),'%Y-%m-%d %H:%m:%S')
# (b-c).minute
# if (b-c).minute<30:
a.delete()
return redirect('/wall')
# else:
# messages.error(request,'you can not delete message posted 30 minutes ago!',extra_tags='red')
# return redirect('/wall')
else:
messages.error(request,'you can only delete message belongs to you',extra_tags='red')
return redirect('/wall')
def deletecomment(request,commentid):
a=comments.objects.get(id=commentid)
if a.user.id==request.session['id']:
a.delete()
return redirect('/wall')
else:
messages.error(request,'you can only delete comment belongs to you',extra_tags='red')
return redirect('/wall')
# Create your views here.
|
[
"helloqyzhang@gmail.com"
] |
helloqyzhang@gmail.com
|
2c1a94b50ac37c3f75da9cf71ce6ccb720a8887a
|
de826f625e3f4a3f71c2341a3275a3b288ba5062
|
/lab/yun/miniterm.py
|
4787918ee4555d9d3c4f8f3d8bef28863a163261
|
[
"MIT"
] |
permissive
|
sourceperl/pid-analyzer
|
aff4fba41dcd69baa116e5e92fb7afaea17019b3
|
f6637c1da3542f0e88ce29cc9f387ad1931c6959
|
refs/heads/master
| 2020-04-09T23:26:32.006227
| 2019-04-28T17:56:32
| 2019-04-28T17:56:32
| 160,656,825
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35,436
|
py
|
#!/usr/bin/python
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2017 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{!r}] '.format(text))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{!r}] '.format(text))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {!r}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = unichr(0x1d) # GS/CTRL+]
self.menu_character = unichr(0x14) # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
self.upload_file()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
self.change_filter()
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
self.change_encoding()
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
self.change_port()
elif c in 'sS': # S -> suspend / open port temporarily
self.suspend_port()
elif c in 'bB': # B -> change baudrate
self.change_baudrate()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def upload_file(self):
"""Ask user for filenname and send its contents"""
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
def change_filter(self):
"""change the i/o transformations"""
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {!r}\n'.format(f))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def change_encoding(self):
"""change encoding on the serial port"""
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
def change_baudrate(self):
"""change the baudrate"""
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
def change_port(self):
"""Have a conversation with the user to change the serial port"""
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
def suspend_port(self):
"""\
open port temporarily, allow reconnect, exit and port change to get
out of the loop
"""
# reader thread needs to be shut down
self._stop_reader()
self.serial.close()
sys.stderr.write('\n--- Port closed: {} ---\n'.format(self.serial.port))
do_change_port = False
while not self.serial.is_open:
sys.stderr.write('--- Quit: {exit} | p: port change | any other key to reconnect ---\n'.format(
exit=key_description(self.exit_character)))
k = self.console.getkey()
if k == self.exit_character:
self.stop() # exit app
break
elif k in 'pP':
do_change_port = True
break
try:
self.serial.open()
except Exception as e:
sys.stderr.write('--- ERROR opening port: {} ---\n'.format(e))
if do_change_port:
self.change_port()
else:
# and restart the reader thread
self._start_reader()
sys.stderr.write('--- Port opened: {} ---\n'.format(self.serial.port))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description='Miniterm - A simple terminal program for the serial port.')
parser.add_argument(
'port',
nargs='?',
help='serial port name ("-" to show port list)',
default=default_port)
parser.add_argument(
'baudrate',
nargs='?',
type=int,
help='set baud rate, default: %(default)s',
default=default_baudrate)
group = parser.add_argument_group('port settings')
group.add_argument(
'--parity',
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help='set parity, one of {N E O S M}, default: N',
default='N')
group.add_argument(
'--rtscts',
action='store_true',
help='enable RTS/CTS flow control (default off)',
default=False)
group.add_argument(
'--xonxoff',
action='store_true',
help='enable software flow control (default off)',
default=False)
group.add_argument(
'--rts',
type=int,
help='set initial RTS line state (possible values: 0, 1)',
default=default_rts)
group.add_argument(
'--dtr',
type=int,
help='set initial DTR line state (possible values: 0, 1)',
default=default_dtr)
group.add_argument(
'--non-exclusive',
dest='exclusive',
action='store_false',
help='disable locking for native ports',
default=True)
group.add_argument(
'--ask',
action='store_true',
help='ask again for port when open fails',
default=False)
group = parser.add_argument_group('data handling')
group.add_argument(
'-e', '--echo',
action='store_true',
help='enable local echo (default off)',
default=False)
group.add_argument(
'--encoding',
dest='serial_port_encoding',
metavar='CODEC',
help='set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s',
default='UTF-8')
group.add_argument(
'-f', '--filter',
action='append',
metavar='NAME',
help='add text transformation',
default=[])
group.add_argument(
'--eol',
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help='end of line mode',
default='CRLF')
group.add_argument(
'--raw',
action='store_true',
help='Do no apply any encodings/transformations',
default=False)
group = parser.add_argument_group('hotkeys')
group.add_argument(
'--exit-char',
type=int,
metavar='NUM',
help='Unicode of special character that is used to exit the application, default: %(default)s',
default=0x1d) # GS/CTRL+]
group.add_argument(
'--menu-char',
type=int,
metavar='NUM',
help='Unicode code of special character that is used to control miniterm (menu), default: %(default)s',
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group('diagnostics')
group.add_argument(
'-q', '--quiet',
action='store_true',
help='suppress non-error messages',
default=False)
group.add_argument(
'--develop',
action='store_true',
help='show Python traceback on error',
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
if isinstance(serial_instance, serial.Serial):
serial_instance.exclusive = args.exclusive
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {!r}: {}\n'.format(args.port, e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write('\n--- exit ---\n')
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
[
"loic.celine@free.fr"
] |
loic.celine@free.fr
|
7fc1c487d6b98fa7925264d42daaf09f8bf42ccc
|
3b27ecb82639545fc564446295a45a5f0fa99920
|
/src/service/dynamo.py
|
85810f83f388b0cc8249512a44172c2581fc6257
|
[] |
no_license
|
suyogmirgal/serverless-user-service
|
2ce6f5449fbbe4caf7b74ec46b367afd51b52f3e
|
f1ece95abeb445ceb0f74defab5c814c331aa51a
|
refs/heads/master
| 2023-01-31T22:17:50.818927
| 2020-12-16T16:18:48
| 2020-12-16T16:18:48
| 321,696,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
import boto3
import os
import uuid
def create_user(user_data):
dynamo_client().put_item(
TableName=os.getenv("USER_TABLE"),
Item={
"id": {"S": str(uuid.uuid4())},
"email": {"S": user_data.get("email")},
"firstName": {"S": user_data.get("first_name")},
"lastName": {"S": user_data.get("last_name")},
"dob": {"S": user_data.get("dob")},
},
)
def dynamo_client():
if os.getenv("IS_OFFLINE") == "true":
return boto3.client("dynamodb", endpoint_url="http://localhost:8000")
return boto3.client("dynamodb")
|
[
"suyog.mirgal@arrkgroup.com"
] |
suyog.mirgal@arrkgroup.com
|
d38fd7f0f8dd0b59d615a0c1fa227a31c6f535f9
|
2f27a99188314938eed1093deaa5ac84c29534c8
|
/freedmqtt/freedmqtt/MQTTClientUpdated.py
|
1834b68ff1e60e17e69d9afa5065a9e83e7892e7
|
[] |
no_license
|
FREEDM-DGI/MQTT-Device
|
3f9ea4083355fa9af1dff03e8ece9e77db830351
|
0d8c1085b5656d9e1e38a2f7b68bbdf32f228911
|
refs/heads/master
| 2021-01-10T17:16:53.169156
| 2016-12-20T00:07:14
| 2016-12-20T00:07:14
| 51,873,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,185
|
py
|
import os
import sys
import json
from freedmqtt.log import log, datalog, DEBUG, EVENT, WARNING, ERROR, RX, TX
from freedmqtt.DeviceProfile import DeviceProfile
import re
import jsonpickle
import datetime
import time
import paho.mqtt.client as mqtt
import multiprocessing
import threading
# from freedmqtt.jsonhandler import jsonhandler
# from watchdog.observers import Observer
TIME_CONSTANT = 0.1
class MQTTClient_Process(multiprocessing.Process):
def __init__(self, device, ip, port, p_type=1):
super(MQTTClient_Process, self).__init__()
self.ip = str(ip)
self.init_event = multiprocessing.Event()
self.update_event = multiprocessing.Event()
self.init_event.clear()
self.p_type = p_type
self.port = port
self.missed_msg = 0
self.daemon = True
self.json_lock = threading.RLock()
self.init = 0
self.device_name = str(device)
self.device = DeviceProfile(device, "MQTT")
self.profile = ''
self.dev_topic = ''
self.start_time = datetime.datetime.now()
# self.event_handler = JsonHandler(self.update_event)
# self.observer = Observer()
if(self.p_type == 0):
self.profile = jsonpickle.encode(self.device)
self.dev_topic = str(self.device.read_object(
'Dev_Name')) + '/' + str(int(self.device.read_object('Dev_HW_Ver')))
elif(self.p_type == 1 or self.p_type == 2):
temp_topic = re.split('_', str(device))
self.dev_topic = str(temp_topic[0] + '/' + temp_topic[1])
if(sys.platform == 'win32'):
print("Running on Windows")
def on_connect(self, mqttc, obj, flags, rc):
if rc == 0:
print("Connected")
print("rc: " + str(rc))
if(self.p_type == 0): # Current Device's Datapoint Publisher
self.mqttc.subscribe(str(self.dev_topic) + '/AIN/#')
print("Sub AIN")
self.mqttc.subscribe(str(self.dev_topic) + '/DIN/#')
print("Sub DIN")
self.mqttc.subscribe(str(self.dev_topic) + '/ACK')
print("Sub ACK")
if(self.init == 0):
self.mqttc.publish(
'join/' + str(self.dev_topic), 'Connect')
print("Pub Join")
elif(self.p_type == 1):
self.mqttc.subscribe(str(self.dev_topic) + '/AOUT/#')
print("Sub AOUT")
self.mqttc.subscribe(str(self.dev_topic) + '/DOUT/#')
print("Sub DOUT")
self.mqttc.publish(str(self.dev_topic) + '/ACK', 'ACK')
print("Pub ACK")
self.mqttc.subscribe(str(self.dev_topic) + '/JSON/#')
print("Sub JSON")
elif(self.p_type == 2):
self.mqttc.subscribe(str(self.dev_topic) + '/AIN/#')
self.mqttc.subscribe(str(self.dev_topic) + '/DIN/#')
self.mqttc.subscribe(str(self.dev_topic) + '/DOUT/#')
self.mqttc.subscribe(str(self.dev_topic) + '/AOUT/#')
print("Sub to ALL")
self.mqttc.publish(str(self.dev_topic) + '/ACK', 'ACK')
print("Pub ACK")
self.mqttc.subscribe(str(self.dev_topic) + '/JSON/#')
print("Sub JSON")
else:
print("Error in connect")
def on_publish(self, mqttc, obj, mid):
print("mid: " + str(mid))
def on_subscribe(self, mqttc, obj, mid, granted_qos):
print("Subscribed: " + str(mid) + " " + str(granted_qos))
def on_message(self, mqttc, obj, msg):
print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
split_topic = re.split('/', msg.topic)
if(self.p_type == 0):
if(split_topic[2] == 'ACK'):
if(msg.payload == 'ACK'):
self.mqttc.publish(
str(self.dev_topic) + '/JSON', str(self.profile))
self.mqttc.publish(
str(self.dev_topic) + '/JSON-DGI', str(jsonpickle.encode(self.device, unpicklable=False)))
if not (self.init_event.is_set()):
self.init_event.set()
self.init = 1
else:
self.read_command(msg)
elif(self.p_type == 1 or self.p_type == 2 ):
if(split_topic[2] == 'JSON'):
file_name = str(split_topic[0]) + \
'_' + str(split_topic[1]) + '.json'
file_name = 'json/' + str(file_name)
f = open(file_name, 'a')
f.seek(0)
f.truncate()
f.write(msg.payload)
f.close()
if not (self.init_event.is_set()):
self.init_event.set()
self.init = 1
else:
self.read_command(msg)
def on_log(self, mqttc, obj, level, string):
print(string)
def send_commands(self):
while True:
obj_topic = self.device.Delete_Change()
log("Object to be sent " + str(obj_topic), DEBUG)
if(obj_topic == 'Error'):
return
sp_topic = re.split('/', obj_topic)
try:
msg = self.device.read(sp_topic[0], int(sp_topic[1]))
except ValueError as e:
print(e)
print(msg)
datalog(msg,obj_topic,self.device_name,TX,(datetime.datetime.now()-self.start_time))
self.mqttc.publish(str(self.dev_topic) + '/' + obj_topic, msg)
def read_command(self, msg):
self.rx_msg = msg
log("Trying to Acquire JSON file", DEBUG,(datetime.datetime.now()-self.start_time))
#self.json_lock.acquire()
sp_msg = re.split('/', self.rx_msg.topic)
try:
log("Write " + str(sp_msg[2]) +
str(sp_msg[3]) + "to JSON File", DEBUG,(datetime.datetime.now()-self.start_time))
datalog(self.rx_msg.payload,str(sp_msg[2])+ '/' + str(sp_msg[3]),self.device_name,RX,(datetime.datetime.now()-self.start_time))
self.device.write(self.rx_msg.payload, sp_msg[2], int(sp_msg[3]))
except ValueError as e:
print(e)
#self.json_lock.release()
def run(self):
self.mqttc = mqtt.Client(
client_id="", clean_session=True, userdata=None, protocol=3)
self.mqttc.on_connect = self.on_connect
self.mqttc.on_publish = self.on_publish
self.mqttc.on_subscribe = self.on_subscribe
self.mqttc.on_message = self.on_message
# Uncomment to enable debug messages
self.mqttc.on_log = self.on_log
self.mqttc.connect(self.ip, self.port)
# mqttc.connect('ts7800-15.ece.ncsu.edu',8883)
self.mqttc.loop_start()
self.init_event.wait()
self.currtime = time.time()
# self.observer.schedule(event_handler, 'json/' , recursive=True)
# self.observer.start()
while True:
time.sleep(1)
try:
# self.update_event.wait()
if((time.time() - self.currtime) > TIME_CONSTANT):
log("Trying to Acquire JSON file", DEBUG,(datetime.datetime.now()-self.start_time))
#self.json_lock.acquire()
try:
log("Check JSON for Update", DEBUG, (datetime.datetime.now()-self.start_time))
self.device.update_object()
except ValueError as e:
print(e)
continue
if(self.device.updated()):
log("Sending Commands", EVENT, (datetime.datetime.now()-self.start_time))
self.send_commands()
self.currtime = time.time()
#self.json_lock.release()
# self.update_event.clear()
except KeyboardInterrupt:
self.mqttc.loop_stop()
self.mqttc.disconnect()
print("Exited Process")
break
|
[
"jcork2@mst.edu"
] |
jcork2@mst.edu
|
0e8912bd96054715c06e4b8c14fbf301e2595ac2
|
f3248eb4020f60590443778df0c2148cad730445
|
/src/entitykb/crypto.py
|
ec6002825a9b66ddc65bc22d99347496d21926e3
|
[
"MIT"
] |
permissive
|
mehdibenamorr/entitykb
|
1b380a94df333253fd9e19653fe1d4f3f9400d1e
|
61cf346a24f52fd8c1edea8827a816284ed6ecaf
|
refs/heads/master
| 2023-06-28T02:31:40.074794
| 2021-07-28T14:35:54
| 2021-07-28T14:35:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,622
|
py
|
import datetime
import secrets
from pathlib import Path
import jwt
from passlib.context import CryptContext
from smart_open import smart_open
# secret
def generate_secret(length=32):
return secrets.token_hex(length)
# password
one_way_hash = CryptContext(schemes=["bcrypt"], deprecated="auto")
word_set = None
def get_words():
global word_set
if word_set is None:
word_file = Path(__file__).parent / "deps" / "eff-long.txt.gz"
word_set = smart_open(word_file, "r").read().splitlines()
return word_set
def generate_password(count=4):
password = "-".join(secrets.choice(get_words()) for _ in range(count))
return password
def verify_password(password, hashed_password):
return one_way_hash.verify(password, hashed_password)
def hash_password(password):
return one_way_hash.hash(password)
# Javascript Web Tokens (JWT)
ALGORITHM = "HS256"
JWT_EXPIRE = "exp"
JWT_NOT_BEFORE = "nbf"
JWT_SUBJECT = "sub"
def decode_jwt_token(token: str, secret_key: str) -> str:
payload = jwt.decode(token, secret_key, algorithms=[ALGORITHM])
return payload[JWT_SUBJECT]
def encode_jwt_token(subject: str, secret_key: str) -> str:
expires_in = datetime.timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
expires = (datetime.datetime.utcnow() + expires_in).timestamp()
payload = {JWT_SUBJECT: subject, JWT_EXPIRE: int(expires)}
encoded = jwt.encode(payload, secret_key, algorithm=ALGORITHM)
encoded_str = encoded.decode("utf-8")
return encoded_str
ACCESS_TOKEN_EXPIRE_DAYS = 14
ACCESS_TOKEN_EXPIRE_MINUTES = (60 * 24) * ACCESS_TOKEN_EXPIRE_DAYS
|
[
"ian@genomoncology.com"
] |
ian@genomoncology.com
|
1d80569ab6308b0a536c1b107e3b293f42597286
|
5d41c8a9c6edd91ca484021f92b78f36e950de8e
|
/chapter_4.4.py
|
df03ab48f8d15c7a6ee190d8e28a907059164370
|
[] |
no_license
|
dorabelme/Python-Programming-for-the-Absolute-Beginner
|
1242d3754d3bfdae519d2eb8486d359038b07fae
|
53a3f2abd76b255908d4a9b39cd89d447df6aef0
|
refs/heads/master
| 2020-05-02T23:12:56.089630
| 2019-03-28T20:11:32
| 2019-03-28T20:11:32
| 178,263,050
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
# Word Jumble
#
# The computer picks a random word and then "jumbles" it
# The player has to guess the original word
import random
WORDS = ("python", "jumble", "easy", "difficult", "answer", "xylophone")
word = random.choice(WORDS)
correct = word
print("There are", len(correct), "letters in the word.")
for i in range(5):
tries_1 = input("Guess a letter: ")
if tries_1 in correct:
print("Yes")
else:
print("No")
final_guess = input("Now you have to guess the word! ")
if final_guess == correct:
print("You got this!")
else:
print("You failed here!")
|
[
"contact.dorabelme@gmail.com"
] |
contact.dorabelme@gmail.com
|
028fa500181f6106d50fa5f983b86b8309a24421
|
892b4d1b7d415d6fbdba57e9e3e7d777b63b3264
|
/doc16.py
|
663fab40e8f7a8c6624c8b41d5ba0213866722d6
|
[] |
no_license
|
mohseenxor/python-example
|
ea613485c8f855ce774c65d4e839ff1a8a220387
|
c39f936c554675eec4c92ffa10947448852da4f1
|
refs/heads/master
| 2022-07-28T09:27:54.721573
| 2020-05-21T05:03:02
| 2020-05-21T05:03:02
| 265,751,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
import math
num = int(input("please enter the number to cal factorial of:"))
try:
result=math.factorial(num)
print(result)
except:
print("cannot compute the factorial of -ve number")
|
[
"mohseenkhan.pathan@xoriant.com"
] |
mohseenkhan.pathan@xoriant.com
|
3a84080e5495c75c744f14e91f2f7f3500efb62d
|
1c965aeda99fdda2047d80dcffa4f57f0b435ffb
|
/Database/database.py
|
ddcc90ba98f2d7ab816e0225e6aa608962c31d3a
|
[] |
no_license
|
therealcorwin/P5_donnees_publiques_OpenFoodFacts
|
9cd94122060e1935479dd2c8b7354e7c7e054ebd
|
3d94e35572e5cd2aff2066089e18f23b413a2275
|
refs/heads/master
| 2023-04-03T09:59:51.803089
| 2019-03-06T08:31:07
| 2019-03-06T08:31:07
| 174,113,112
| 0
| 0
| null | 2023-03-20T13:17:10
| 2019-03-06T09:23:40
|
Python
|
UTF-8
|
Python
| false
| false
| 7,994
|
py
|
# -*- PipEnv -*-
# -*- coding: Utf-8 -*-
import records as rec
from Config import constants as conf
from Database.database_user import DataBaseUser
from Api.search_category import ApiCollectingData
class DataBaseCreator:
"""
This class has the responsibility of structuring
the database, and inserting the data collection of the API
"""
def __init__(self, db):
""" Connect to Mysql database from the class DataBaseUser() """
self.db = db
self.database = DataBaseUser(self.db)
def drop_tables(self):
""" Delete existing tables, to collect new data """
self.db.query(""" DROP TABLE IF EXISTS
Categories, Categories_summary,
Products, Products_categories_key,
Products_categories_summary_key,
Products_stores, Stores, Favorites;
""")
def create_table_product(self):
""" Create table Products """
self.db.query(""" CREATE TABLE IF NOT EXISTS Products (
barcode BIGINT UNSIGNED UNIQUE PRIMARY KEY,
name_product VARCHAR(150),
grade CHAR(1),
web_site VARCHAR(255));
""")
def create_table_category(self):
""" Create table category """
self.db.query(""" CREATE TABLE IF NOT EXISTS Categories (
id BIGINT UNSIGNED PRIMARY KEY AUTO_INCREMENT,
category VARCHAR(125) UNIQUE);
""")
self.db.query(""" CREATE TABLE IF NOT EXISTS Categories_summary (
id BIGINT UNSIGNED PRIMARY KEY AUTO_INCREMENT,
c_category VARCHAR(125) UNIQUE);
""")
def create_table_store(self):
""" Create table stores """
self.db.query(""" CREATE TABLE IF NOT EXISTS Stores (
id MEDIUMINT UNSIGNED PRIMARY KEY AUTO_INCREMENT,
store VARCHAR(150) UNIQUE);
""")
def create_table_subkey(self):
""" Creating to the associate index table """
self.db.query(""" CREATE TABLE IF NOT EXISTS Products_categories_key (
id MEDIUMINT UNSIGNED PRIMARY KEY AUTO_INCREMENT,
product_id BIGINT REFERENCES Products(barcode),
category_id MEDIUMINT REFERENCES Category(id));
""")
self.db.query(""" CREATE TABLE IF NOT EXISTS
Products_categories_summary_key (
id MEDIUMINT UNSIGNED PRIMARY KEY AUTO_INCREMENT,
product_id BIGINT REFERENCES Products(barcode),
c_category_id MEDIUMINT REFERENCES
Categories_summary(id));
""")
self.db.query(""" CREATE TABLE IF NOT EXISTS Products_stores (
id MEDIUMINT UNSIGNED PRIMARY KEY AUTO_INCREMENT,
product_id BIGINT REFERENCES Products(barcode),
store_id MEDIUMINT REFERENCES Stores(id));
""")
def create_favorites_table(self):
""" Create the favorites table """
self.db.query(""" CREATE TABLE IF NOT EXISTS Favorites (
id_product BIGINT REFERENCES Products(barcode),
id_substitute BIGINT REFERENCES Products(barcode),
PRIMARY KEY (id_product, id_substitute));
""")
def insert_product(self, id, name, grade, url, *args):
""" Insert the product data in the table"""
self.db.query(""" INSERT INTO Products
(barcode, name_product, grade, web_site)
VALUES
(:id, :name, :grade, :url)
ON DUPLICATE KEY UPDATE barcode=:id;
""", id=id, name=name, grade=grade, url=url)
def insert_category(self, id, name, grade, url,
categories, sub_category, stores, *args):
""" Insert the category list data in the table"""
for category in categories:
self.db.query(""" INSERT INTO Categories(category)
VALUES
(:category)
ON DUPLICATE KEY UPDATE category=:category;
""", category=category)
self.db.query(""" INSERT INTO Categories_summary(c_category)
VALUES
(:c_category)
ON DUPLICATE KEY UPDATE c_category=:c_category;
""", c_category=sub_category)
self.db.query(""" INSERT INTO Products_categories_key
(product_id, category_id) VALUES
(:barcode, (SELECT id FROM Categories
WHERE category=:category_id));
""", barcode=id, category_id=category)
self.db.query(""" INSERT INTO Products_categories_summary_key
(product_id, c_category_id) VALUES
(:barcode, (SELECT id FROM Categories_summary
WHERE c_category=:category_id));
""", barcode=id, category_id=sub_category)
def insert_stores(self, id, name, grade, url,
ategories, sub_category, stores, *args):
""" Insert the store list data in the table"""
for store in stores:
self.db.query(""" INSERT INTO Stores(store)
VALUES (:store)
ON DUPLICATE KEY UPDATE store=:store;
""", store=store)
self.db.query(""" INSERT INTO Products_stores
(product_id, store_id) VALUES (:barcode,
(SELECT id FROM Stores WHERE store=:store_id));
""", barcode=id, store_id=store)
def create_tables(self):
""" Execute the creating table """
self.drop_tables()
print('\n', conf.DECO, '\n', conf.SPACE_ADJUST,
"**** Deleting tables success ****",
'\n', conf.DECO, '\n')
self.create_table_product()
self.create_table_category()
self.create_table_store()
self.create_table_subkey()
self.create_favorites_table()
print('\n', conf.DECO, '\n', conf.SPACE_ADJUST,
"**** Creating table success ****",
'\n', conf.DECO, '\n')
return True
def insert_rows(self, products):
""" Completion the data row per row """
for product in products:
self.insert_product(*product)
self.insert_category(*product)
self.insert_stores(*product)
print('\n', conf.DECO, '\n', conf.SPACE_ADJUST,
"**** Insert data success *****",
'\n', conf.DECO, '\n')
return True
def main():
""" Initialize the connection """
db = rec.Database(
f"mysql+mysqlconnector://{conf.USER}:{conf.PASSWORD}@localhost/"
f"{conf.DATABASE}?charset=utf8mb4")
creating = DataBaseCreator(db)
# Load the API class and connecting in the API
downloader = ApiCollectingData()
# Load the API connexion
connect = downloader.connect_and_harvest()
# Harvest OPFF's request
final_products = downloader.format_final_response(connect)
# Creating the necessary tables
creating.create_tables()
# Insert data
creating.insert_rows(final_products)
if __name__ == "__main__":
main()
|
[
"LyssProGm@gmail.com"
] |
LyssProGm@gmail.com
|
01257fc2ed40115317bdb68fea86852b0aa4ef26
|
6bd21a64c5fbeba1682c3e65221f6275a44c4cd5
|
/vega/algorithms/nas/modnas/core/params/torch.py
|
9e51d3a56271c36ac7b805c9045a7e136db28898
|
[
"MIT"
] |
permissive
|
yiziqi/vega
|
e68935475aa207f788c849e26c1e86db23a8a39b
|
52b53582fe7df95d7aacc8425013fd18645d079f
|
refs/heads/master
| 2023-08-28T20:29:16.393685
| 2021-11-18T07:28:22
| 2021-11-18T07:28:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,879
|
py
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Torch tensor parameter."""
import torch
from .base import Param
from modnas.registry.params import register
from modnas.core.param_space import ParamSpace
from torch.nn.parameter import Parameter
from typing import Optional, Callable
def _default_tensor_sampler(shape: int, init_ratio: float = 1e-3) -> Parameter:
return torch.nn.Parameter(init_ratio * torch.randn(shape))
@register
class TorchTensor(Param):
"""Torch tensor parameter class."""
TYPE = 'T'
def __init__(
self, shape: int, sampler: Optional[Callable] = None, name: Optional[str] = None,
space: Optional[ParamSpace] = None, on_update: Optional[Callable] = None
) -> None:
super().__init__(name, space, on_update)
self.sample = _default_tensor_sampler if sampler is None else sampler
self.shape = shape
self.val = self.sample(self.shape)
self._length = None
def extra_repr(self) -> str:
"""Return extra representation string."""
return 'shape={}'.format(self.shape)
def is_valid(self, value):
"""Return if the value is valid."""
return isinstance(value, torch.Tensor)
def value(self) -> Parameter:
"""Return parameter value."""
if self.val is None:
self.val = self.sample(self.shape)
return self.val
def set_value(self, value: Parameter) -> None:
"""Set parameter value."""
self.val = value
|
[
"zhangjiajin@huawei.com"
] |
zhangjiajin@huawei.com
|
0cc7170517859af17328f8430a10f0b600a13273
|
d278e624f22039eada08d46e73554785aafefdc8
|
/svdtest.py
|
4b8f07068592015adf58d7c3d9122038602a8ca6
|
[] |
no_license
|
rohithnamani/Predicting-movies-using-Matrix-factorization-method
|
8d10d934e402d9df4345a4c770f6a16c9f611e99
|
eadb208d9b26afef23789da7811e7998ce581ba4
|
refs/heads/master
| 2020-04-24T00:20:29.685164
| 2019-05-05T17:11:25
| 2019-05-05T17:11:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,543
|
py
|
"""
Test file to check the working of recommender
With 600 principal components preserving 92% variance of data.
"""
import csv
import numpy as np
def SVD(M):
Mt=np.transpose(M)
prd=np.dot(M,Mt)
#Eigen Value Decomposition
eigenvalue,eigenvec=np.linalg.eig(prd)
#Indirect sort on eigenvalue to find out the proper indices, the same can
#be used with corresponding eigenvectors
sortindex=eigenvalue.argsort()[::-1]
#Sort Eigen values
eigenvalue=eigenvalue[sortindex]
#To calculate sigma
sigma=np.sqrt(abs(eigenvalue))
sigma=np.around(sigma,decimals=2)
dim=600
sigma=sigma[0:dim]
#To Calculate U - we had earlier calculated eigenvec for MMt
#Sort and reduce U to nXdim
U=eigenvec[:,sortindex]
U=U[:,0:dim]
U=np.real(U)
U=np.around(U,decimals=2)
#To Calculate V
prd=np.dot(Mt,M)
eigenvalue,eigenvec=np.linalg.eig(prd)
sortindex=eigenvalue.argsort()[::-1]
V=eigenvec[:,sortindex]
V=V[:,0:dim]
V=np.real(V)
V=np.around(V,decimals=2)
return U,sigma,V
def query(q,V):
#find q*v, w
prd=np.dot(q,V)
Vt=np.transpose(V)
other=np.dot(prd,Vt)
return other
#To Prepare list of movies - for recommending
fileh=open('u.item','r')
reader = csv.reader(fileh, delimiter='|')
movienames=list()
# The list of all the movies with movieid-1 as list index
for row in reader:
movienames.append(row[1])
num_users=943
num_movies=1682
#To Prepare matrix M
fp2=open('u.data','r')
reader = csv.reader(fp2, delimiter='\t')
m=list()
for j in range(num_users):
m.append([0]*num_movies)
for row in reader:
m[int(row[0])-1][int(row[1])-1]=float(row[2])
M=np.array(m)
U,sigma,V=SVD(M)
#To preduct movies for a user.
uid=int(input("Enter userid"))
q=m[uid-1]
#Get the list of movies watched by userid
watch=m[uid-1]
watched=list()
for i in range(len(watch)):
if(watch[i]==5):
watched.append(i)
print('List of movies User rated-5\n',watched)
#Sorting the user_rating row based on index
predict=query(q,V)
idx=predict.argsort()[::-1]
predicted=predict[idx]
#print(predict[0])
i=0
j=0
wp=list()
while(j<100):
if(predict[idx[j]]>4.5):
wp.append(idx[j])
j+=1
print('List of movies with prediction>4.5')
print(np.sort(wp).tolist())
|
[
"noreply@github.com"
] |
rohithnamani.noreply@github.com
|
076f711788bef03866cb89df6471ef751a2fcb9a
|
643d307a94510783a2de74d80b7a2dc3ebcd93ba
|
/docker-images/bb/bin/dsync
|
fd9062e8c2c415d7da2da676852a14bb3f8db6f1
|
[
"Unlicense"
] |
permissive
|
NodyHub/docker-k8s-resources
|
f7da51847bf5ef30cd87436286d2bc4a467403eb
|
878c0a1d27ed4fbf6e8ad1c8d94c076c1a0699ad
|
refs/heads/master
| 2022-11-25T18:11:33.729341
| 2022-11-14T10:34:10
| 2022-11-14T10:34:10
| 255,587,236
| 19
| 13
| null | 2020-10-18T07:37:12
| 2020-04-14T11:11:56
|
Shell
|
UTF-8
|
Python
| false
| false
| 3,638
|
#!/bin/python
import socket
import ssl
import sys
import select
#context = ssl.create_default_context()
#request = "POST / HTTP/1.1\r\n"
#request += "Host: %s\r\n" % hostname
#request += "Content-Length: 4\r\n"
#request += "Transfer-Encoding: Chunked\r\n"
#request += "\r\n"
#sanitize = "POST / HTTP/1.1\r\n"
#sanitize += "Host: %s\r\n" % hostname
#sanitize += "Connection: Close\r\n"
#sanitize += "Transfer-Encoding: Chunked\r\n"
#sanitize += "\r\n"
def test_clte(hostname, clte, timeout=1):
context = ssl._create_unverified_context()
sock = socket.create_connection((hostname, 443))
ssock = context.wrap_socket(sock, server_hostname=hostname)
request = "POST / HTTP/1.1\r\n"
request += "Host: %s\r\n" % hostname
#request += "Content-Length: 4\r\n"
#request += "Transfer-Encoding: Chunked\r\n"
request += (clte % 4)
request += "\r\n"
ssock.write(request.encode("utf8"))
ssock.write("1\r\nA\r\n".encode("utf8"))
ssock.write("Q".encode("utf8"))
#Wating for response
r_list,_,_ = select.select([ssock],[],[],timeout)
if len(r_list) == 0:
print ("CLTE Timed Out", escape_clte(clte))
return True # Vulnerable
data = ssock.recv(1024)
#print(data.decode("utf8"))
return False
def test_tecl(hostname, clte,timeout=1):
context = ssl._create_unverified_context()
sock = socket.create_connection((hostname, 443))
ssock = context.wrap_socket(sock, server_hostname=hostname)
request = "POST / HTTP/1.1\r\n"
request += "Host: %s\r\n" % hostname
#request += "Content-Length: 6\r\n"
#request += "Transfer-Encoding: Chunked\r\n"
request += (clte % 6)
request += "\r\n"
ssock.write(request.encode("utf8"))
ssock.write("0\r\n\r\n".encode("utf8"))
ssock.write("Q".encode("utf8"))
#Wating for response
r_list,_,_ = select.select([ssock],[],[],timeout)
if len(r_list) == 0:
print ("TECL Timed Out", escape_clte(clte))
return True # Vulnerable
data = ssock.recv(1024)
#print(data.decode("utf8"))
return False
white_spaces = ["", " ", "x"] + list(map(chr, range(0x21)))
def escape_clte(s):
for w in white_spaces:
if len(w) == 1 and w != " ":
s = s.replace(w, "\\x%02x" % ord(w))
return s
def replace(s, old, new_list):
s = s.split(old)
for i in range(len(s)-1):
for new in new_list:
yield old.join(s[:i+1]) + new + old.join(s[i+1:])
def replace_white_spaces(s):
for w in white_spaces:
yield w+s
yield s+w
for x in replace(s, " ", white_spaces):
yield x
for x in replace(s, "\r", white_spaces):
yield x
for x in replace(s, "\n", white_spaces):
yield x
def replace_dash(s):
dashes = ["_"]
for w in white_spaces:
dashes += ["-"+w, "_"+w]
for x in replace(s, "-", dashes):
yield x
def cases(s):
yield s.upper()
yield s.lower()
mutations = [
replace_white_spaces,
replace_dash,
]
cltefs = [
"Content-Length: %d\r\nTransfer-Encoding: Chunked\r\n",
"Transfer-Encoding: Chunked\r\nContent-Length: %d\r\n",
"Content-Length: %d\r\nTransfer-Encoding: asd\r\nTransfer-Encoding: Chunked\r\n",
"Content-Length: %d\r\nTransfer-Encoding: Chunked\r\nTransfer-Encoding: asd\r\n",
]
if len(sys.argv) != 2:
print("usage: %s hostname" % sys.argv[0])
sys.exit(1)
hostname = sys.argv[1]
timeout = 5
for cltef in cltefs:
for mut in mutations:
for clte in mut(cltef):
test_clte(hostname, clte, timeout)
test_tecl(hostname, clte, timeout)
|
[
"github@janharrie.de"
] |
github@janharrie.de
|
|
4cc0108ea66142669d75e2f3bcc6ae8b27046567
|
48eb6ec0a25019978f53c4a6ccd5c52ddb848b59
|
/DL_Board_Games/policy_gradient.py
|
35271f6ad5a4596bc37178620dce7a7356aa5c8f
|
[] |
no_license
|
SadiCetinkaya/Deep_Learning_Fundamentals
|
2a7c66a7ce9799b2a450270435bccf0a9cc200f3
|
3dd38c2e446910062896f0cbb9295eb524606204
|
refs/heads/master
| 2020-04-13T05:02:26.720721
| 2018-12-24T11:58:31
| 2018-12-24T11:58:31
| 162,979,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,809
|
py
|
import collections
import numpy as np
import tensorflow as tf
from tic_tac_toe import play_game, random_player
HIDDEN_NODES = (100, 100, 100) # number of hidden layer neurons
INPUT_NODES = 3 * 3 # board size
BATCH_SIZE = 100 # every how many games to do a parameter update?
LEARN_RATE = 1e-4
OUTPUT_NODES = INPUT_NODES
PRINT_RESULTS_EVERY_X = 1000 # every how many games to print the results
input_placeholder = tf.placeholder("float", shape=(None, INPUT_NODES))
reward_placeholder = tf.placeholder("float", shape=(None,))
actual_move_placeholder = tf.placeholder("float", shape=(None, OUTPUT_NODES))
hidden_weights_1 = tf.Variable(tf.truncated_normal((INPUT_NODES, HIDDEN_NODES[0]), stddev=1. / np.sqrt(INPUT_NODES)))
hidden_weights_2 = tf.Variable(
tf.truncated_normal((HIDDEN_NODES[0], HIDDEN_NODES[1]), stddev=1. / np.sqrt(HIDDEN_NODES[0])))
hidden_weights_3 = tf.Variable(
tf.truncated_normal((HIDDEN_NODES[1], HIDDEN_NODES[2]), stddev=1. / np.sqrt(HIDDEN_NODES[1])))
output_weights = tf.Variable(tf.truncated_normal((HIDDEN_NODES[-1], OUTPUT_NODES), stddev=1. / np.sqrt(OUTPUT_NODES)))
hidden_layer_1 = tf.nn.relu(
tf.matmul(input_placeholder, hidden_weights_1) + tf.Variable(tf.constant(0.01, shape=(HIDDEN_NODES[0],))))
hidden_layer_2 = tf.nn.relu(
tf.matmul(hidden_layer_1, hidden_weights_2) + tf.Variable(tf.constant(0.01, shape=(HIDDEN_NODES[1],))))
hidden_layer_3 = tf.nn.relu(
tf.matmul(hidden_layer_2, hidden_weights_3) + tf.Variable(tf.constant(0.01, shape=(HIDDEN_NODES[2],))))
output_layer = tf.nn.softmax(
tf.matmul(hidden_layer_3, output_weights) + tf.Variable(tf.constant(0.01, shape=(OUTPUT_NODES,))))
policy_gradient = tf.reduce_sum(tf.reshape(reward_placeholder, (-1, 1)) * actual_move_placeholder * output_layer)
train_step = tf.train.RMSPropOptimizer(LEARN_RATE).minimize(-policy_gradient)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
board_states, actual_moves, rewards = [], [], []
episode_number = 1
results = collections.deque()
def make_move(board_state, side):
board_state_flat = np.ravel(board_state)
board_states.append(board_state_flat)
probability_of_actions = sess.run(output_layer, feed_dict={input_placeholder: [board_state_flat]})[0]
try:
move = np.random.multinomial(1, probability_of_actions)
except ValueError:
# sometimes because of rounding errors we end up with probability_of_actions summing to greater than ML.
# so need to reduce slightly to be a valid value
move = np.random.multinomial(1, probability_of_actions / (sum(probability_of_actions) + 1e-7))
actual_moves.append(move)
move_index = move.argmax()
return move_index / 3, move_index % 3
while True:
reward = play_game(make_move, random_player)
results.append(reward)
if len(results) > PRINT_RESULTS_EVERY_X:
results.popleft()
last_game_length = len(board_states) - len(rewards)
# we scale here so winning quickly is better winning slowly and loosing slowly better than loosing quick
reward /= float(last_game_length)
rewards += ([reward] * last_game_length)
episode_number += 1
if episode_number % BATCH_SIZE == 0:
normalized_rewards = rewards - np.mean(rewards)
normalized_rewards /= np.std(normalized_rewards)
sess.run(train_step, feed_dict={input_placeholder: board_states,
reward_placeholder: normalized_rewards,
actual_move_placeholder: actual_moves})
# clear batches
del board_states[:]
del actual_moves[:]
del rewards[:]
if episode_number % PRINT_RESULTS_EVERY_X == 0:
print("episode: %s win_rate: %s" % (episode_number, 0.5 + sum(results) / (PRINT_RESULTS_EVERY_X * 2.)))
|
[
"cetinkaya.sadi@gmail.com"
] |
cetinkaya.sadi@gmail.com
|
9649113ecdc56ad32d934da6ba12724b88237bf0
|
fb35dbcbcf0ba811032a6aaf451aa4bdacf7dc24
|
/41.缺失的第一个正数.py
|
bd0f466ce2e4185a91138ae91c3dcc7bace0b59b
|
[] |
no_license
|
wangkai997/leetcode
|
4bf00ac327249319fcbc2350cf1a0734896bb871
|
43d23f8465232182ef95eb4587386aed93847de7
|
refs/heads/master
| 2023-07-28T18:59:12.439360
| 2021-09-12T17:29:54
| 2021-09-12T17:29:54
| 345,692,039
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 882
|
py
|
#
# @lc app=leetcode.cn id=41 lang=python3
#
# [41] 缺失的第一个正数
#
# @lc code=start
class Solution:
def firstMissingPositive(self, nums: List[int]) -> int:
def swap(nums,index1,index2):
nums[index1],nums[index2] = nums[index2],nums[index1]
n = len(nums)
for i in range(n):
while 1 <= nums[i]<=n and nums[i] != nums[nums[i]-1]:
swap(nums,nums[i]-1,i)
for i in range(n):
if i != nums[i]-1:
return i+1
return n+1
# n = len(nums)
# for i in range(n):
# while 1 <= nums[i] <= n and nums[nums[i] - 1] != nums[i]:
# nums[nums[i] - 1], nums[i] = nums[i], nums[nums[i] - 1]
# for i in range(n):
# if nums[i] != i + 1:
# return i + 1
# return n + 1
# @lc code=end
|
[
"wk15@mail.ustc.edu.cn"
] |
wk15@mail.ustc.edu.cn
|
8addf74d122966e1108f617dd09bfe39aa164b1f
|
bf434761157d976483ffb69056e8fab26a46cfe0
|
/merge_sort/merge_sort.py
|
3583fd3849f669cccd41953af99cd8095ca04a7c
|
[] |
no_license
|
sshukla31/misc_algos
|
ebfc690417b89b64bfd23aa20e95695163c0e9cb
|
2d95bcb5e4d6308665bac124a2cdf14ead310920
|
refs/heads/master
| 2020-05-18T15:11:42.533461
| 2017-01-07T02:16:08
| 2017-01-07T02:16:08
| 24,486,319
| 0
| 0
| null | 2015-09-04T19:08:21
| 2014-09-26T04:34:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,287
|
py
|
def merge_list(left, right):
"""
Merge left and right list. Compare elements of 2 list and sort them
"""
result = []
i = 0
j = 0
while(i < len(left) or j < len(right)):
# Case 1: left and right list are non-empty
if(i < len(left) and j < len(right)):
if(left[i] <= right[j]):
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
# Case 2: left list is non-empty
elif(i < len(left)):
result.append(left[i])
i += 1
# Case 3: right list is non-empty
elif(j < len(right)):
result.append(right[j])
j += 1
return result
def merge_sort(table):
"""
Find mid and divide table in 2 parts.
"""
length_table = len(table)
# Stop Condition
if (length_table <= 1):
return table
else:
# Divide
mid = length_table / 2
# Conquer
sorted_left = merge_sort(table[: mid])
sorted_right = merge_sort(table[mid : ])
# Merge
return merge_list(sorted_left, sorted_right)
if __name__ == '__main__':
table = [5, 1, 4, 2, 10, 99, 0]
print merge_sort(table=table)
|
[
"sandeepshukla31@gmail.com"
] |
sandeepshukla31@gmail.com
|
a4ba0dfc4b298c001e05f14b51f922ff7771eea3
|
c6feea761c286347a60e3d629ed1a93b8834b3d3
|
/torch/legacy/nn/WeightedEuclidean.py
|
cc71344a46af63d8137557ca251447079ad67fec
|
[
"BSD-2-Clause"
] |
permissive
|
drdarshan/pytorch-dist
|
904d51d29cd4e008cd4a2b46a9126ebdd11fc0f7
|
6b821ece22e4d67dbe2d2f899672a08822769024
|
refs/heads/master
| 2020-04-06T04:00:42.918913
| 2016-11-09T02:50:17
| 2016-11-09T02:50:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,455
|
py
|
import math
import torch
from .Module import Module
class WeightedEuclidean(Module):
def __init__(self, inputSize, outputSize):
super(WeightedEuclidean, self).__init__()
self.weight = torch.Tensor(inputSize, outputSize)
self.gradWeight = torch.Tensor(inputSize, outputSize)
# each template (output dim) has its own diagonal covariance matrix
self.diagCov = torch.Tensor(inputSize, outputSize)
self.gradDiagCov = torch.Tensor(inputSize, outputSize)
self.reset()
self._diagCov = self.output.new()
# TODO: confirm
self.fastBackward = False
self._input = None
self._weight = None
self._expand = None
self._expand2 = None
self._expand3 = None
self._repeat = None
self._repeat2 = None
self._repeat3 = None
self._div = None
self._output = None
self._expand4 = None
self._gradOutput = None
self._sum = None
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
else:
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.uniform_(-stdv, stdv)
self.diagCov.fill_(1)
def _view(self, res, src, *args):
if src.is_contiguous():
res.set_(src.view(*args))
else:
res.set_(src.contiguous().view(*args))
def updateOutput(self, input):
# lazy-initialize
self._diagCov = self._diagCov or self.output.new()
self._input = self._input or input.new()
self._weight = self._weight or self.weight.new()
self._expand = self._expand or self.output.new()
self._expand2 = self._expand or self.output.new()
self._expand3 = self._expand3 or self.output.new()
self._repeat = self._repeat or self.output.new()
self._repeat2 = self._repeat2 or self.output.new()
self._repeat3 = self._repeat3 or self.output.new()
inputSize, outputSize = self.weight.size(0), self.weight.size(1)
# y_j = || c_j * (w_j - x) ||
if input.dim() == 1:
self._view(self._input, input, inputSize, 1)
self._expand.expand_as(self._input, self.weight)
self._repeat.resize_as_(self._expand).copy_(self._expand)
self._repeat.add_(-1, self.weight)
self._repeat.mul_(self.diagCov)
torch.norm(self.output, self._repeat, 2, 0)
self.output.resize_(outputSize)
elif input.dim() == 2:
batchSize = input.size(0)
self._view(self._input, input, batchSize, inputSize, 1)
self._expand = self._input.expand(batchSize, inputSize, outputSize)
# make the expanded tensor contiguous (requires lots of memory)
self._repeat.resize_as_(self._expand).copy_(self._expand)
self._weight = self.weight.view(1, inputSize, outputSize)
self._expand2 = self._weight.expand_as(self._repeat)
self._diagCov = self.diagCov.view(1, inputSize, outputSize)
self._expand3 = self._diagCov.expand_as(self._repeat)
if input.type() == 'torch.cuda.FloatTensor':
# TODO: this can be fixed with a custom allocator
# requires lots of memory, but minimizes cudaMallocs and loops
self._repeat2.resize_as_(self._expand2).copy_(self._expand2)
self._repeat.add_(-1, self._repeat2)
self._repeat3.resize_as_(self._expand3).copy_(self._expand3)
self._repeat.mul_(self._repeat3)
else:
self._repeat.add_(-1, self._expand2)
self._repeat.mul_(self._expand3)
torch.norm(self.output, self._repeat, 2, 1)
self.output.resize_(batchSize, outputSize)
else:
raise RuntimeError("1D or 2D input expected")
return self.output
def updateGradInput(self, input, gradOutput):
if not self.gradInput:
return
self._div = self._div or input.new()
self._output = self._output or self.output.new()
self._expand4 = self._expand4 or input.new()
self._gradOutput = self._gradOutput or input.new()
if not self.fastBackward:
self.updateOutput(input)
inputSize, outputSize = self.weight.size(0), self.weight.size(1)
"""
dy_j -2 * c_j * c_j * (w_j - x) c_j * c_j * (x - w_j)
---- = -------------------------- = ---------------------
dx 2 || c_j * (w_j - x) || y_j
"""
# to prevent div by zero (NaN) bugs
self._output.resize_as_(self.output).copy_(self.output).add_(1e-7)
self._view(self._gradOutput, gradOutput, gradOutput.size())
torch.div(self._div, gradOutput, self._output)
if input.dim() == 1:
self._div.resize_(1, outputSize)
self._expand4 = self._div.expand_as(self.weight)
if torch.type(input) == 'torch.cuda.FloatTensor':
self._repeat2.resize_as_(self._expand4).copy_(self._expand4)
self._repeat2.mul_(self._repeat)
else:
self._repeat2.mul_(self._repeat, self._expand4)
self._repeat2.mul_(self.diagCov)
torch.sum(self.gradInput, self._repeat2, 1)
self.gradInput.resize_as_(input)
elif input.dim() == 2:
batchSize = input.size(0)
self._div.resize_(batchSize, 1, outputSize)
self._expand4 = self._div.expand(batchSize, inputSize, outputSize)
if input.type() == 'torch.cuda.FloatTensor':
self._repeat2.resize_as_(self._expand4).copy_(self._expand4)
self._repeat2.mul_(self._repeat)
self._repeat2.mul_(self._repeat3)
else:
torch.mul(self._repeat2, self._repeat, self._expand4)
self._repeat2.mul_(self._expand3)
torch.sum(self.gradInput, self._repeat2, 2)
self.gradInput.resize_as_(input)
else:
raise RuntimeError("1D or 2D input expected")
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
inputSize, outputSize = self.weight.size(0), self.weight.size(1)
"""
dy_j 2 * c_j * c_j * (w_j - x) c_j * c_j * (w_j - x)
---- = -------------------------- = ---------------------
dw_j 2 || c_j * (w_j - x) || y_j
dy_j 2 * c_j * (w_j - x)^2 c_j * (w_j - x)^2
---- = ----------------------- = -----------------
dc_j 2 || c_j * (w_j - x) || y_j
#"""
# assumes a preceding call to updateGradInput
if input.dim() == 1:
self.gradWeight.add_(-scale, self._repeat2)
self._repeat.div_(self.diagCov)
self._repeat.mul_(self._repeat)
self._repeat.mul_(self.diagCov)
if torch.type(input) == 'torch.cuda.FloatTensor':
self._repeat2.resize_as_(self._expand4).copy_(self._expand4)
self._repeat2.mul_(self._repeat)
else:
torch.mul(self._repeat2, self._repeat, self._expand4)
self.gradDiagCov.add_(self._repeat2)
elif input.dim() == 2:
self._sum = self._sum or input.new()
torch.sum(self._sum, self._repeat2, 0)
self._sum.resize_(inputSize, outputSize)
self.gradWeight.add_(-scale, self._sum)
if input.type() == 'torch.cuda.FloatTensor':
# requires lots of memory, but minimizes cudaMallocs and loops
self._repeat.div_(self._repeat3)
self._repeat.mul_(self._repeat)
self._repeat.mul_(self._repeat3)
self._repeat2.resize_as_(self._expand4).copy_(self._expand4)
self._repeat.mul_(self._repeat2)
else:
self._repeat.div_(self._expand3)
self._repeat.mul_(self._repeat)
self._repeat.mul_(self._expand3)
self._repeat.mul_(self._expand4)
torch.sum(self._sum, self._repeat, 0)
self._sum.resize_(inputSize, outputSize)
self.gradDiagCov.add_(scale, self._sum)
else:
raise RuntimeError("1D or 2D input expected")
def type(self, type=None, tensorCache=None):
if type:
# prevent premature memory allocations
self._input = None
self._output = None
self._gradOutput = None
self._weight = None
self._div = None
self._sum = None
self._expand = None
self._expand2 = None
self._expand3 = None
self._expand4 = None
self._repeat = None
self._repeat2 = None
self._repeat3 = None
return super(WeightedEuclidean, self).type(type, tensorCache)
def parameters(self):
return [self.weight, self.diagCov], [self.gradWeight, self.gradDiagCov]
def accUpdateGradParameters(self, input, gradOutput, lr):
gradWeight = self.gradWeight
gradDiagCov = self.gradDiagCov
self.gradWeight = self.weight
self.gradDiagCov = self.diagCov
self.accGradParameters(input, gradOutput, -lr)
self.gradWeight = gradWeight
self.gradDiagCov = gradDiagCov
|
[
"adam.paszke@gmail.com"
] |
adam.paszke@gmail.com
|
fefdfe40f2aca0d879b50f15b39baebb1bcc82d9
|
4d99350a527a88110b7bdc7d6766fc32cf66f211
|
/OpenGLCffi/GL/EXT/ARB/ES3_2_compatibility.py
|
ceaf8d75612d44872e51b4093a42ac9b70cd120a
|
[
"MIT"
] |
permissive
|
cydenix/OpenGLCffi
|
e790ef67c2f6c9877badd5c38b7d58961c8739cd
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
refs/heads/master
| 2021-01-11T07:31:10.591188
| 2017-04-17T11:04:55
| 2017-04-17T11:04:55
| 80,312,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
from OpenGLCffi.GL import params
@params(api='gl', prms=['minX', 'minY', 'minZ', 'minW', 'maxX', 'maxY', 'maxZ', 'maxW'])
def glPrimitiveBoundingBoxARB(minX, minY, minZ, minW, maxX, maxY, maxZ, maxW):
pass
|
[
"cdenizol@gmail.com"
] |
cdenizol@gmail.com
|
dfbe54b21a82693c1f18b608653760f5d404f818
|
d8b68b17502771003293bcefae6ac890b50b1073
|
/Backend/music_Controller/settings.py
|
eff796cddff63c231b5a39e3dd75990d4430de49
|
[] |
no_license
|
Bhavya0020/Party-Music-Controller-Using-React-And-Django
|
5ae1a2412f1da9b56f185708afbfc194d141919b
|
f6e7d33d59df551d2623b4e9e23acfd386bdcc00
|
refs/heads/main
| 2023-04-21T22:17:31.018776
| 2021-05-05T04:28:14
| 2021-05-05T04:28:14
| 362,118,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,202
|
py
|
"""
Django settings for music_Controller project.
Generated by 'django-admin startproject' using Django 2.2.20.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u$ws=cb$i+xzn@tcyiq(@yw!c3^1k9!ij$sh*6(6f2-1$22#gy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api.apps.ApiConfig',
'rest_framework',
'frontend.apps.FrontendConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'music_Controller.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'music_Controller.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"bhavya.learncode@gmail.com"
] |
bhavya.learncode@gmail.com
|
6900c2fa1ec48b5e89d0c3b028d429e1f9f90a6e
|
74bb9d7f7430092b112fb1872539a418cbb39617
|
/create - コピー.py
|
ffe0c8c355d6e7375f2315b388e0b43771df9072
|
[] |
no_license
|
KenshiroSugiyama/automation_account_create
|
a5f1f76adece10dc618f31688441d9d0ef46ff05
|
e6b07bb22923e00caa25a643d8a8f6ced57b64c5
|
refs/heads/main
| 2023-07-15T19:03:12.939029
| 2021-08-27T10:16:35
| 2021-08-27T10:16:35
| 400,468,371
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,363
|
py
|
# coding: UTF-8
# PyAutoGUIをインポート
import pyautogui
import time
import webbrowser
# x = pyautogui.position()
# print(x)
#postman
# pyautogui.click(x=1277, y=1054)
#IE
# pyautogui.click(x=993, y=1054)
#personal_info_sheet
# pyautogui.click(x=243, y=22)
#API結果シート
# pyautogui.click(x=590, y=23)
#postman起動
pyautogui.click(x=258, y=1058,duration=1)
time.sleep(3)
pyautogui.typewrite(['p', 'o', 's', 't', 'm', 'a', 'n'])
time.sleep(3)
pyautogui.hotkey('enter')
time.sleep(40)
pyautogui.click(x=1225, y=805)
time.sleep(2)
pyautogui.click(x=963, y=408)
time.sleep(2)
for i in range(14):
#personal_information起動
webbrowser.open("https://docs.google.com/spreadsheets/d/1xLALVzxv9QzsQ7FOLtoEKGb-rYTRXzP-3w7_YS2FXfg/edit?ts=5ed48b49&pli=1#gid=1347913400")
time.sleep(10)
#テスト結果起動
webbrowser.open("https://docs.google.com/spreadsheets/d/1EKNtThtt4YQXwO5r_T5gsrG8i0Z5K8M-UC5SkjUiIds/edit#gid=395302572")
time.sleep(10)
#個人情報取得
pyautogui.click(x=243, y=22)
time.sleep(2)
for i in range(4):
pyautogui.hotkey('right')
time.sleep(0.5)
pyautogui.hotkey('down')
time.sleep(0.5)
pyautogui.hotkey('ctrl', 'shift','down')
time.sleep(0.5)
for i in range(2):
pyautogui.hotkey('ctrl', 'c')
time.sleep(1)
pyautogui.hotkey('down')
# APIテスト結果シートに記入
pyautogui.click(x=590, y=23)
pyautogui.keyDown('ctrl')
pyautogui.keyDown('shift')
pyautogui.press('pagedown')
pyautogui.press('pagedown')
pyautogui.keyUp('ctrl')
pyautogui.keyUp('shift')
time.sleep(3)
for i in range(6):
pyautogui.hotkey('right')
time.sleep(0.5)
pyautogui.hotkey('ctrl','down')
time.sleep(0.5)
pyautogui.hotkey('down')
time.sleep(0.5)
x = pyautogui.locateOnScreen('suusiki.png' , confidence=0.7)
pyautogui.click(x)
time.sleep(0.5)
pyautogui.hotkey('ctrl','v')
time.sleep(0.5)
pyautogui.hotkey('enter')
# pyautogui.hotkey('ctrl', 'w')
# time.sleep(3)
# y = pyautogui.locateOnScreen('idou.png' , confidence=0.6)
# time.sleep(0.5)
# pyautogui.doubleClick(y)
# time.sleep(3)
# print("送る内容貼り付け")
#postman起動
time.sleep(2)
pyautogui.click(x=1237, y=1049)
time.sleep(1)
pyautogui.doubleClick(x=675, y=656)
time.sleep(2)
pyautogui.hotkey('ctrl','a')
time.sleep(0.6)
pyautogui.hotkey('del')
time.sleep(0.6)
pyautogui.hotkey('ctrl','v')
time.sleep(2)
pyautogui.click(x=1662, y=333)
time.sleep(15)
#request_id取得
x = pyautogui.locateOnScreen('request.png' , confidence=0.7)
pyautogui.tripleClick(x)
pyautogui.hotkey('ctrl', 'c')
time.sleep(0.6)
# pyautogui.click(x=1874, y=15)
# time.sleep(3)
#メモ帳起動、request_id修正
time.sleep(0.6)
pyautogui.click(x=258, y=1058,duration=1)
time.sleep(3)
pyautogui.typewrite(['m', 'e', 'm', 'o'])
time.sleep(3)
pyautogui.hotkey('enter')
time.sleep(5)
pyautogui.hotkey('ctrl', 'v')
time.sleep(0.6)
pyautogui.press('up')
time.sleep(0.6)
for i in range(19):
pyautogui.press('del')
time.sleep(0.6)
pyautogui.hotkey('ctrl', 'a')
time.sleep(0.6)
pyautogui.hotkey('right')
time.sleep(2)
for i in range(2):
pyautogui.hotkey('left')
time.sleep(0.6)
pyautogui.hotkey('del')
time.sleep(0.6)
pyautogui.hotkey('ctrl', 'a')
time.sleep(0.6)
pyautogui.hotkey('ctrl', 'c')
time.sleep(0.6)
pyautogui.hotkey('ctrl', 'w')
time.sleep(0.6)
pyautogui.hotkey('n')
#request_id貼り付け
time.sleep(1)
pyautogui.click(x=993, y=1054)
time.sleep(1)
pyautogui.click(x=590, y=23)
time.sleep(1)
pyautogui.click(x=590, y=23)
pyautogui.keyDown('ctrl')
pyautogui.keyDown('shift')
pyautogui.press('pageup')
pyautogui.press('pageup')
pyautogui.keyUp('ctrl')
pyautogui.keyUp('shift')
time.sleep(3)
pyautogui.hotkey('down')
time.sleep(0.6)
pyautogui.hotkey('right')
time.sleep(0.6)
pyautogui.hotkey('ctrl','down')
time.sleep(0.6)
pyautogui.hotkey('down')
time.sleep(0.6)
pyautogui.hotkey('ctrl', 'v')
# time.sleep(0.6)
# pyautogui.hotkey('ctrl', 'w')
# time.sleep(3)
# y = pyautogui.locateOnScreen('idou.png' , confidence=0.6)
# time.sleep(0.5)
# pyautogui.doubleClick(y)
time.sleep(3)
print("request_id貼り付け完了")
#メアド貼り付け
pyautogui.click(x=243, y=22)
time.sleep(3)
# pyautogui.press('down')
time.sleep(0.5)
for i in range(2):
pyautogui.hotkey('down')
time.sleep(0.5)
for i in range(4):
pyautogui.hotkey('left')
time.sleep(0.5)
pyautogui.hotkey('ctrl', 'c')
time.sleep(0.5)
# pyautogui.hotkey('ctrl', 'w')
pyautogui.click(x=590, y=23)
time.sleep(1)
for i in range(9):
pyautogui.press('right')
time.sleep(0.5)
pyautogui.hotkey('ctrl','v')
time.sleep(0.5)
pyautogui.hotkey('enter')
time.sleep(0.5)
# pyautogui.hotkey('ctrl', 'w')
# time.sleep(3)
# k = pyautogui.locateOnScreen('idou.png' , confidence=0.6)
# time.sleep(0.5)
# pyautogui.doubleClick(y)
# time.sleep(6)
#送信後スプレッドシートに記入
# webbrowser.open("https://docs.google.com/spreadsheets/d/1xLALVzxv9QzsQ7FOLtoEKGb-rYTRXzP-3w7_YS2FXfg/edit?ts=5ed48b49&pli=1#gid=1536068967")
# time.sleep(10)
pyautogui.click(x=243, y=22)
pyautogui.keyDown('ctrl')
pyautogui.keyDown('shift')
pyautogui.press('pageup')
pyautogui.keyUp('ctrl')
pyautogui.keyUp('shift')
time.sleep(3)
for i in range(11):
pyautogui.hotkey('right')
time.sleep(0.5)
pyautogui.hotkey('ctrl','down')
pyautogui.hotkey('down')
pyautogui.typewrite('Used')
pyautogui.hotkey('enter')
time.sleep(3)
pyautogui.hotkey('ctrl', 'w')
time.sleep(1)
pyautogui.hotkey('ctrl', 'w')
time.sleep(10)
print("--終了--")
|
[
"ken4600223@gmail.com"
] |
ken4600223@gmail.com
|
29e3dcf28d9875d7504d3ac59cdea11af21fd3bf
|
2845afa035e34c8b476c49caf2b4e2269ce88e69
|
/task_65.py
|
0614eb05daad31d0baa2e693ae283ee9b11e2080
|
[] |
no_license
|
Alexandr-Potapov/brain_strings
|
00cf41cedd04d781e39c31c6504253ee3cdbd99b
|
220fe6ecc0b33a882b70ab8fcceefe76b63c0d54
|
refs/heads/master
| 2022-04-25T14:12:41.592942
| 2020-04-28T16:27:08
| 2020-04-28T16:27:08
| 259,691,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
"""Дан текст. Найти сумму имеющихся в нем цифр."""
import re
def find_sum(text_from_user):
numbers_as_symbols = re.findall('\d', text_from_user)
return sum([int(number) for number in numbers_as_symbols])
from_user = input('Введите текст: ')
print(find_sum(from_user))
|
[
"aleksu007@gmail.com"
] |
aleksu007@gmail.com
|
ef86634c7ffd1a3b39bcf60de937866432db17a0
|
edb58186978e8b07a86eebefbd6cfbb69ab85eef
|
/astroimage/utilitywrappers/imagestack.py
|
87512ebbc3506e89e7d68053993be01427194562
|
[] |
no_license
|
jmontgom10/AstroImage
|
2aa8d2dcf4eebaa21f9073c347e85a4f5052e83b
|
799218cfb077b9e105207f2acc023b8409b21c5a
|
refs/heads/master
| 2021-01-19T01:37:54.732617
| 2017-05-21T21:07:27
| 2017-05-21T21:07:27
| 48,810,762
| 1
| 0
| null | 2017-04-25T15:18:20
| 2015-12-30T17:00:34
|
Python
|
UTF-8
|
Python
| false
| false
| 88,792
|
py
|
# Core imports
import copy
import warnings
import psutil
from functools import lru_cache
# Scipy imports
import numpy as np
from scipy import ndimage, signal
# Astropy imports
import astropy.units as u
from astropy.stats import sigma_clipped_stats
from astropy.modeling import models, fitting
from astropy.nddata import NDDataArray, StdDevUncertainty
from photutils import make_source_mask, data_properties
# AstroImage imports
from ..baseimage import BaseImage
from ..raw import RawBias, RawDark, RawFlat, RawScience
from ..reduced import MasterBias, MasterDark, MasterFlat, ReducedScience
from .astrometrysolver import AstrometrySolver
from .inpainter import Inpainter
# Define which functions, classes, objects, etc... will be imported via the command
# >>> from imagestack import *
__all__ = ['ImageStack']
class ImagePairOffsetGetter(object):
"""
A class for computing the offsets between two images.
This is essentially a helper class for the ImageStack alignment methods.
This class assumes that there is no significant rotation between the two
input images.
Properties
----------
image1 A BaseImage (or subclass) instance
image2 A BaseImage (or subclass) instance
Methods
-------
get_cross_correlation_integer_pixel_offset Computes the offset between
two images with integer pixel
accuracy
get_cross_correlation_subpixel_offset Computes the offset between
two images with subpixel
accuracy
"""
def __init__(self, image1, image2):
"""
Constructs the ImagePairOffsetGetter from two supplied images.
"""
if not (issubclass(type(image1), ReducedScience)
and(issubclass(type(image2), ReducedScience))):
raise TypeError('Both images must be `ReducedScience` instances for proper alignment')
# Store two copies of the image data arrays... the rest doesn't matter!
self.image1 = image1.copy()
self.image2 = image2.copy()
##################################
### START OF OTHER METHODS ###
##################################
def _replace_negatives_and_nans_with_medians(self):
"""
Replaces negatives and nans with non-problematic values.
Uses a median-filter to estimate the expected values at the location of
negative/nan pixels.
"""
# Loop through each of the arrays and perform the cleanup
arrayList = [self.image1.data, self.image2.data]
for array in arrayList:
# Find the negative pixels
negPix = np.nan_to_num(array) < 0
# If there are some negative pixels, then replace them with the
# median value of the WHOLE image
if np.sum(negPix.astype(int)) > 0:
# Find the indices of the bad and good pixels
badInds = np.where(negPix)
goodInds = np.where(np.logical_not(negPix))
# Replace the bad pixels with the median of the good pixels
array[badInds] = np.median(np.nan_to_num(array[goodInds]))
# Find the Nan pixels
nanPix = np.logical_not(np.isfinite(array))
# If there are some NaN pixels, then replace them with the local
# median value.
if np.sum(nanPix.astype(int)) > 0:
# Find the indices of the bad and good pixels
badInds = np.where(nanPix)
goodInds = np.where(np.logical_not(nanPix))
# Replace the bad pixels with the median of the good pixels
array[badInds] = np.median(np.nan_to_num(array[goodInds]))
# Compute the median filtered image
medianImage = ndimage.median_filter(array, size=(9,9))
# Replace the bad pixels with their local median
array[badInds] = medianImage[badInds]
# Return the fixed arrays to the user
return tuple(arrayList)
@staticmethod
def _fix_bad_correlation_image_pixels(corrImage):
"""Repairs any deviant pixels in the cross-correlation image"""
# Do a little post-processing to block out bad points in corrImage
# Copy the input for manipulation
outCorrImage = corrImage.copy()
# First filter with the median
medCorr = ndimage.median_filter(corrImage, size=(9,9))
# Compute sigma_clipped_stats of the correlation image
mean, median, stddev = sigma_clipped_stats(corrImage)
# Then check for significant deviations from median.
deviations = (np.abs(corrImage - medCorr) > 2.0*stddev)
# Count the number of masked neighbors for each pixel
neighborCount = np.zeros_like(corrImage, dtype=np.int16)
for dx1 in range(-1,2,1):
for dy1 in range(-1,2,1):
neighborCount += np.roll(np.roll(deviations, dy1, axis=0),
dx1, axis=1).astype(np.int16)
# Find isolated deviant pixels (these are no good!)
deviations = np.logical_and(deviations, neighborCount <= 4)
# If some deviating pixels were found, then replace them with their
# local median
if np.sum(deviations > 0):
badInds = np.where(deviations)
outCorrImage[badInds] = medCorr[badInds]
return outCorrImage
@staticmethod
def _extract_integer_offset_from_correlation_image(corrImage):
"""
Extracts the image offset values from the cross correlation image
Parameters
----------
corrImage : numpy.ndarray
A clean (defect free) version of the cross correlation image
Returns
-------
dx, dy : int
The image ofset values based on the cross correlation image
"""
# Check for the maximum of the cross-correlation image function
correlationPeak = np.unravel_index(corrImage.argmax(), corrImage.shape)
dy, dx = np.array(correlationPeak) - np.array(corrImage.shape)//2
return int(dx), int(dy)
def get_wcs_integer_pixel_offset(self):
"""
Computes the offset between image1 and image2 using wcs alignment.
Provides integer pixel accuracy.
Returns
-------
dx, dy : int
The offset of self.image1 with respect to self.image2
"""
# Grab the WCS of the first image
refWCS = self.image1.wcs
# Estimate the central location for this image
refX, refY = self.image1.shape[1]//2, self.image1.shape[0]//2
# Convert pixels to sky coordinates
refRA, refDec = refWCS.all_pix2world(refX, refY, 0, ra_dec_order=True)
# Compute the pixel location of the reference RA and Dec
dx, dy = self.image2.wcs.all_world2pix(refRA, refDec, 0)
# Convert these relativeoffsets into integer values
dx, dy = int(dx - refX), int(dy - refY)
# Return the image offsets
return (dx, dy)
@lru_cache()
def get_cross_correlation_integer_pixel_offset(self):
"""
Computes the offset between image1 and image2 using cross-correlation.
Provides integer pixel accuracy.
Returns
-------
dx, dy : int
The offset of self.image1 with respect to self.image2
"""
# Replace any suprious values with local median values
array1, array2 = self._replace_negatives_and_nans_with_medians()
# Do an array flipped convolution, which is a correlation.
corrImage = signal.fftconvolve(
array2,
array1[::-1, ::-1],
mode='same'
)
# Fix any suprious pixel values
corrImage = self._fix_bad_correlation_image_pixels(corrImage)
# Extract the integer pixel offesets from this correlation image
dx, dy = self._extract_integer_offset_from_correlation_image(corrImage)
return dx, dy
@staticmethod
def _parse_star_cutouts(starCutouts1, starCutouts2):
"""
Decides which cutouts to use for sub-pixel alignment.
The provided star cutouts will be quality checked. Those cutouts which
do not meet the minimum quality criteria will be tossed from the stacks.
Parameters
----------
starCutouts1, starCutouts2 : array_like
A stack of star cutouts, each center on on a star
Returns
-------
outStarCutouts1, outStarCutouts2 : array_like
A quality-cut stack of star cutouts, each center on on a star.
"""
# Start by parsing the properties of ALL the cutouts
cutoutCorrelationCoeffs1 = []
cutoutCorrelationCoeffs2 = []
cutoutElongations1 = []
cutoutElongations2 = []
normalizedStarCutouts1 = []
normalizedStarCutouts2 = []
averageCutoutFlux = []
for starCutout1, starCutout2 in zip(starCutouts1, starCutouts2):
# Measure the properties of this cutout
cutoutProperties1 = data_properties(starCutout1)
cutoutProperties2 = data_properties(starCutout2)
# Grab the elongation property
cutoutElongations1.append(cutoutProperties1.elongation)
cutoutElongations2.append(cutoutProperties2.elongation)
# Grab the correlation coefficient property
correlationCoeff1 = (
cutoutProperties1.covar_sigxy /
(
cutoutProperties1.semimajor_axis_sigma *
cutoutProperties1.semiminor_axis_sigma
)
)
correlationCoeff2 = (
cutoutProperties2.covar_sigxy /
(
cutoutProperties2.semimajor_axis_sigma *
cutoutProperties2.semiminor_axis_sigma
)
)
# Store the correlation coefficients in lists
cutoutCorrelationCoeffs1.append(correlationCoeff1)
cutoutCorrelationCoeffs2.append(correlationCoeff2)
# Compute the total flux of each star
cutoutFlux1 = starCutout1.sum()
cutoutFlux2 = starCutout2.sum()
# Normalize the cutouts to have a total of one
normalizedStarCutouts1.append(starCutout1/cutoutFlux1)
normalizedStarCutouts2.append(starCutout2/cutoutFlux2)
# Compute an average flux for this star and store it
starCutoutFlux = 0.5*(cutoutFlux1 + cutoutFlux2)
averageCutoutFlux.append(starCutoutFlux)
# Convert these to arrays
cutoutCorrelationCoeffs1 = np.array(cutoutCorrelationCoeffs1)
cutoutCorrelationCoeffs2 = np.array(cutoutCorrelationCoeffs2)
cutoutElongations1 = np.array(cutoutElongations1)
cutoutElongations2 = np.array(cutoutElongations2)
normalizedStarCutouts1 = np.array(normalizedStarCutouts1)
normalizedStarCutouts2 = np.array(normalizedStarCutouts2)
averageCutoutFlux = np.array(averageCutoutFlux)
# Sort the cutouts from brightest to dimmest
# Grab the sorting index ordering
sortInds = averageCutoutFlux.argsort()
sortInds = sortInds[::-1]
# Apply the sorting array.
cutoutCorrelationCoeffs1 = cutoutCorrelationCoeffs1[sortInds]
cutoutCorrelationCoeffs2 = cutoutCorrelationCoeffs2[sortInds]
cutoutElongations1 = cutoutElongations1[sortInds]
cutoutElongations2 = cutoutElongations2[sortInds]
normalizedStarCutouts1 = normalizedStarCutouts1[sortInds]
normalizedStarCutouts2 = normalizedStarCutouts2[sortInds]
averageCutoutFlux = averageCutoutFlux[sortInds]
# Find the cutouts with good correlation coefficients
goodCorrelationCoeffs = np.logical_and(
np.abs(cutoutCorrelationCoeffs1 - np.median(cutoutCorrelationCoeffs1)) < 0.1,
np.abs(cutoutCorrelationCoeffs2 - np.median(cutoutCorrelationCoeffs2)) < 0.1
)
# Find the cutouts with good elogation values
goodElongations = np.logical_and(
cutoutElongations1 < 1.4,
cutoutElongations2 < 1.4
)
# Find the cutouts with good everything....
goodCutouts = np.logical_and(
goodCorrelationCoeffs,
goodElongations
)
# Cull the cutouts to only include the good cutouts
goodCutoutInds = np.where(goodCutouts)
return (normalizedStarCutouts1[goodCutoutInds],
normalizedStarCutouts2[goodCutoutInds])
@staticmethod
def _build_star_cutout_mosaic(starCutouts):
"""
Constructs a mosaic of star cutouts.
Parameters
----------
starCutouts : array_like
A list of star cutouts, each centered on a star
Returns
-------
starCutoutMosaic : numpy.ndarray
An array containing the star cutout each of the brightest stars
"""
# Make sure starCutouts can be handled properly
try:
starCutouts = np.array(starCutouts)
except:
raise TypeError('`starCutouts` must be an array-like object')
if starCutouts.ndim != 3:
raise ValueError('`starCutouts` must be a (numbor of stars X cutout size x cutout size) array')
# Get the number and shape of the remaining star cutouts
numberOfStars, ny, nx = starCutouts.shape
# Cull the list to the brightest square number of stars
if numberOfStars >= 25:
keepStarCount = 25
elif numberOfStars >= 16:
keepStarCount = 16
elif numberOfStars >= 9:
keepStarCount = 9
elif numberOfStars >= 4:
keepStarCount = 4
else:
raise RuntimeError('Fewer than 9 stars found: cannot build star cutout mosaic')
# Chop out the sections around each star, and build a mosaic of cutouts
numZoneSide = np.int(np.round(np.sqrt(keepStarCount)))
cutoutMosaic = np.zeros((numZoneSide*ny, numZoneSide*nx))
# Loop through each star to be placed in the mosaic
for iStar, starCutout in enumerate(starCutouts[0:keepStarCount]):
# Compute the zone for this star
yZone, xZone = np.unravel_index(iStar, (numZoneSide, numZoneSide))
# Establish the pasting boundaries
btPaste = np.int(np.round(ny*yZone))
tpPaste = np.int(np.round(ny*(yZone + 1)))
lfPaste = np.int(np.round(nx*xZone))
rtPaste = np.int(np.round(nx*(xZone + 1)))
# Paste the cutout into the star mosaic
cutoutMosaic[btPaste:tpPaste, lfPaste:rtPaste] = starCutout
return cutoutMosaic
@staticmethod
def _extract_subpixel_offset_from_correlation_image(corrImage):
"""
Extracts the subpixel offset from the cross-correlation image
Parameters
----------
corrImage : numpy.ndarray
The cross-correlation image of the starCutoutMosaic images
Returns
-------
dx, dy : float
The subpixel correction to be added to the integer pixel offset of
the two images
"""
# Define a plane fitting function for use within this method only
def planeFit(points):
"""
p, n = planeFit(points)
Given an array, points, of shape (d,...)
representing points in d-dimensional space,
fit an d-dimensional plane to the points.
Return a point, p, on the plane (the point-cloud centroid),
and the normal, n.
"""
points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions
assert points.shape[0] <= points.shape[1], "There are only {} points in {} dimensions.".format(points.shape[1], points.shape[0])
ctr = points.mean(axis=1)
x = points - ctr[:,np.newaxis]
M = np.dot(x, x.T) # Could also use np.cov(x) here.
return ctr, np.linalg.svd(M)[0][:,-1]
# Check for the maximum of the cross-correlation function
yPeak, xPeak = np.unravel_index(corrImage.argmax(), corrImage.shape)
# Compute the corners of the central region to analyze
peakSz = 6
btCorr = yPeak - peakSz
tpCorr = btCorr + 2*peakSz + 1
lfCorr = xPeak - peakSz
rtCorr = lfCorr + 2*peakSz + 1
# Chop out the central region
corrImagePeak = corrImage[btCorr:tpCorr, lfCorr:rtCorr]
# Get the gradient of the cross-correlation function
Gx = ndimage.sobel(corrImagePeak, axis=1)
Gy = ndimage.sobel(corrImagePeak, axis=0)
# Grab the index of the peak
yPeak, xPeak = np.unravel_index(
corrImagePeak.argmax(),
corrImagePeak.shape
)
# Chop out the central zone and grab the minimum of the gradient
cenSz = 3
bt = yPeak - cenSz//2
tp = bt + cenSz
lf = xPeak - cenSz//2
rt = lf + cenSz
# Grab the region near the minima
yy, xx = np.mgrid[bt:tp, lf:rt]
Gx_plane = Gx[bt:tp, lf:rt]
Gy_plane = Gy[bt:tp, lf:rt]
# Fit planes to the x and y gradients...Gx
px_init = models.Polynomial2D(degree=1)
py_init = models.Polynomial2D(degree=1)
fit_p = fitting.LinearLSQFitter()
px = fit_p(px_init, xx, yy, Gx_plane)
py = fit_p(py_init, xx, yy, Gy_plane)
# TODO: speed this up by getting the plane solutions from the
# planeFit(points) function.
# Solve these equations using NUMPY
# 0 = px.c0_0 + px.c1_0*xx_plane + px.c0_1*yy_plane
# 0 = py.c0_0 + py.c1_0*xx_plane + py.c0_1*yy_plane
#
# This can be reduced to Ax = b, where
#
A = np.matrix([[px.c1_0.value, px.c0_1.value],
[py.c1_0.value, py.c0_1.value]])
b = np.matrix([[-px.c0_0.value],
[-py.c0_0.value]])
# Now we can use the build in numpy linear algebra solver
x_soln = np.linalg.solve(A, b)
# Extract the shape of the corrImage and compute final relative offset
ny, nx = corrImage.shape
# Finally convert back into an absolute image offset
dx1 = lfCorr + (x_soln.item(0) - (ny)//2)
dy1 = btCorr + (x_soln.item(1) - (nx)//2)
return dx1, dy1
def get_cross_correlation_subpixel_offset(self, satLimit=16e3,
cutoutSize=21):
"""
Computes the offset between image1 and image2 using cross-correlation.
Provides subpixel accuracy.
Parameters
----------
satLimit : int or float, optional, default: 16e3
Sources which contain any pixels with more than this number of
counts will not be used to perform cross-correlation alignment.
cutoutSize : int, optional, default: 21
The size of the cutout array to extract for matching PSFs through
cross-correlation. This will also set a limit for the nearest
neighbors allowed at sqrt(2)*cutoutSize.
Returns
-------
dx, dy : float
The precise offset of self.image1 with respect to self.image2
"""
# TODO: Test if rough pixel-level alignment is required
pass
# # Test if a quick WCS integer pixel alignment is possible.
# if self.image1.has_wcs and self.image2.has_wcs:
# # Compute the integer pixel offsets using WCS
# dx, dy = self.get_wcs_integer_pixel_offset()
# else:
# # Compute the integer pixel offsets using cross-correlation
# dx, dy = self.get_cross_correlation_integer_pixel_offset()
#
# # Shift image2 array to approximately match image1
# shiftedImage2 = self.image2.shift(-dx, -dy)
# Compute a combined image and extract stars from that combined image
combinedImage = 0.5*(self.image1 + self.image2)
xStars, yStars = combinedImage.get_sources(
satLimit = satLimit,
crowdLimit = np.sqrt(2)*cutoutSize,
edgeLimit = cutoutSize + 1
)
# Grab the list of star cutouts from image one
starCutouts1 = self.image1.extract_star_cutouts(xStars, yStars,
cutoutSize = cutoutSize)
# Grab the list of star cutouts from shifted image two
starCutouts2 = self.image2.extract_star_cutouts(xStars, yStars,
cutoutSize = cutoutSize)
# Cull any bad cutouts from the cutout list
starCutouts1, starCutouts2 = self._parse_star_cutouts(
starCutouts1,
starCutouts2
)
# Build the square mosaics of cutouts
cutoutMosaic1 = self._build_star_cutout_mosaic(starCutouts1)
cutoutMosaic2 = self._build_star_cutout_mosaic(starCutouts2)
#
# TODO: remove this code block if possible
#
# Construct a NEW ImagePair instance from these two mosaics
mosaicPair = ImagePairOffsetGetter(
ReducedScience(cutoutMosaic1),
ReducedScience(cutoutMosaic2)
)
# Replace any suprious values with local median values
array1, array2 = mosaicPair._replace_negatives_and_nans_with_medians()
# Do an array flipped convolution, which is a correlation.
corrImage = signal.fftconvolve(
array2,
array1[::-1, ::-1],
mode='same'
)
# Fix any suprious pixel values
corrImage = ImagePairOffsetGetter._fix_bad_correlation_image_pixels(corrImage)
# Grab the subpixel precision offsets from the cross correlation image
dx, dy = ImagePairOffsetGetter._extract_subpixel_offset_from_correlation_image(corrImage)
# # Add the integer and subpixel offsets and return them to the user
# dx += dx1
# dy += dy1
return dx, dy
##################################
### END OF OTHER METHODS ###
##################################
class ImageStack(object):
"""
A class for aligning and combining a list of AstroImage objects.
Properties
----------
imageList A list containing all the images in the stack
Methods
-------
add_image Appends the provided image instance to
the end of the `imageList`
pop_image Removes and returns the specified image
from the `imageList`
align_images_with_wcs Aligns the images using the WCS
solutions in the header
align_images_with_cross_correlation Aligns the images using a
cross-correlation technique
combine_images Combines the images to form a single,
average output image
"""
def __init__(self, imageList, gobble=True):
"""
Constructs an `ImageStacks` from a list or tuple of AstroImage
instances. The instances can be of any type as long as they are a
subclass of the BaseImage class and all the images are of the same type.
Parameters
----------
imageList : iterable
An iterable list of AstroImage instances (all of the same type)
gobble : bool, optional, default: True
If True, then `imageList` is *emptied* into the ImageStack storage
attribute so that `imageList` is empty after constructing the
ImageStack. If False, then the contens of `imageList` are simply
coppied into the ImageStack storage content, and `imageList` is
unaffected. The default value is True in order to save memory.
Returns
-------
outStack : `ImageStack`
A new instance containing the data from the image .
"""
# Check that a list (or something close o it) was provided
if not hasattr(imageList, '__iter__'):
raise TypeError('`imageList` must be a list or iterable object containing image instances')
# Start by counting the number of images
numberOfImages = len(imageList)
# Catch an empty list
if numberOfImages < 1:
raise ValueError('`imageList` must contain at least one image instance')
# Check that the first element of the list is a subclass of `BaseImage`
thisType = type(imageList[0])
if not issubclass(type(imageList[0]), BaseImage):
raise TypeError('{0} type not a recognized astroimage type'.format(thisType))
# Check if all the images are of the same type
typeList = [type(img) for img in imageList]
if typeList.count(typeList[0]) != numberOfImages:
raise TypeError('All instances in `imageList` must be the same type')
# Check that the binning is all correct
imageBinnings = np.array([img.binning for img in imageList])
dx = imageBinnings[:, 0]
dy = imageBinnings[:, 1]
if ((np.sum(dx == dx[0]) != numberOfImages) or
(np.sum(dy == dy[0]) != numberOfImages)):
raise ValueError('All instances in `imageList` must have the same binning')
# Check that the units have the same dimensions.
unitList = [u.Quantity(1, img.unit).decompose() for img in imageList]
if unitList.count(unitList[0]) != numberOfImages:
raise ValueError('All instances in `imageList` must have the same units')
# Grab the units of the first image and loop through the rest of the
# images to make sure that they also have the same units.
targetUnits = imageList[0].unit
for i in range(numberOfImages):
if imageList[i].unit != targetUnits:
imageList[i].convert_units_to(targetUnits)
# Store an immutable version of the image list
if gobble:
# If the gobble parameter is true, then transpose each element of
# the intput list into the tupple.
self.__imageList = tuple()
while len(imageList) > 0:
self.__imageList += (imageList.pop(0),)
elif not gobble:
# If the gobble parameter is false, then just copy the list...
self.__imageList = tuple(imageList)
# Store the image list type
self.__imageType = thisType
# Set an instance variable to indicate whether the images have been aligned
if issubclass(self.imageType, (RawScience, ReducedScience)):
# Assume that science images have not been aligned
self.__aligned = False
else:
# Calibration images do not require alignment
self.__aligned = True
# Initalize a boolean value to indicate that this is NOT a supersky.
# The boolean will be changed to "True" if-and-only-if the
# "produce_supersky" method is executed.
self.__is_supersky = False
# Force all the image shapes to be the same
self.pad_images_to_match_shapes()
##################################
### START OF PROPERTIES ###
##################################
@property
def aligned(self):
"""A boolean flag indicating the image alignment"""
return self.__aligned
@property
def imageList(self):
"""The list of images in this stack"""
return self.__imageList
@property
def imageType(self):
"""The type of images stored in this stack"""
return self.__imageType
@property
def is_supersky(self):
"""Boolean flag indicating if this stack has yielded a supersky"""
return self.__is_supersky
@property
def numberOfImages(self):
"""The number of images currently in this stack"""
return len(self.imageList)
@property
def shape(self):
"""The shape of the image stack (nz, ny, nx)"""
return (self.numberOfImages,) + self.imageList[0].shape
##################################
### END OF PROPERTIES ###
##################################
##################################
### START OF OTHER METHODS ###
##################################
def pad_images_to_match_shapes(self):
"""
Pads all the images in imageList to have the same shape.
The padding is applied to the top and right sides of the images, so
the WCS of those images will be unaffacted if they have WCS.
Side Effects
------------
Pads the images in place, so the ImageStack on which this is invoked
will be modified.
Returns
-------
outStack : ImageStack
The SAME image stack on which this method was invoked but with its
images now padded to have the same shapes.
"""
# Force all the images to have the same shape
imageShapes = np.array([img.shape for img in self.imageList])
ny, nx = imageShapes.max(axis=0)
# Loop through each image and add padding if necessary
for ny1nx1 in imageShapes:
ny1, nx1 = ny1nx1
padY = ny - ny1 if ny1 < ny else 0
padX = nx - nx1 if nx1 < nx else 0
# Extract the first image in the imageList
thisImg = self.pop_image(0)
if padX > 0 or padY > 0:
# Pad the image as necessary
thisImg = thisImg.pad(((0, padY), (0, padX)), 'constant')
# Return the image to the imageList (at the END of the list)
self.add_image(thisImg)
# Hand the padded ImageStack back to the user
return self
def add_image(self, image):
"""
Adds an image to the image stack.
Parameters
----------
image : `BaseImage` (or subclass)
The image to be added to the ImageStack instance
Returns
-------
out: None
"""
if type(image) is not self.imageType:
raise TypeError('`image` must be of type {0}'.format(self.imageType))
listBinning = self.imageList[0].binning
if image.binning != listBinning:
raise ValueError('`image` must have binning ({0} x {1})'.format(*listBinning))
self.__imageList = self.imageList + (image,)
return None
def pop_image(self, index=None):
"""
Removes the image at `index` from the imageList and returns it.
Parameters
----------
index : int
The index of the image to be removed and returned
Returns
-------
outImg : BaseImage or subclass
The image stored at `index` in the imageList
"""
# Check if an index was provided, and if not, then grab the final index
if index is None: index = self.numberOfImages-1
# Grab the output image
try:
outImg = self.imageList[index]
except:
raise
# Reconstruct the image list
if index < self.numberOfImages-1:
self.__imageList = self.imageList[:index] + self.imageList[index+1:]
else:
self.__imageList = self.imageList[:index]
return outImg
def get_wcs_offsets(self, subPixel=False):
"""
Computes the relative offsets between `ReducedScience` images in the
ImageStack using the WCS solutions provided in each image. If ANY of the
images do not have a WCS solution, then this method returns an error.
Parameters
----------
subPixel : bool, optional, default: False
If true, then the image offsets are returned with sub-pixel
precision. In general, the WCS solution is not accurate enough to
justify subpixel precision, so the default value is False.
Returns
-------
dx, dy : numpy.ndarray
The horizontal (dx) and vertical (dy) offsets required to align the
images.
"""
# Check that there is at least ONE image for which to compute an offset
if self.numberOfImages < 1:
raise ValueError('Silly rabbit, you need some images to align!')
# Check that the image list is storing `ReducedScience` type images.
if not issubclass(self.imageType, ReducedScience):
raise TypeError('WCS offsets can only be computed for `ReducedScience` type images')
# Check that all the stored images have WCS
imagesHaveWCS = [img.has_wcs for img in self.imageList]
if not all(imagesHaveWCS):
raise ValueError('All `ReducedScience` instances must have WCS solutions')
# Grab the WCS of the first image
refWCS = self.imageList[0].wcs
# Estimate the central location for this image
refX, refY = self.imageList[0].shape[1]//2, self.imageList[0].shape[0]//2
# Convert pixels to sky coordinates
refRA, refDec = refWCS.wcs_pix2world(refX, refY, 0, ra_dec_order=True)
# Loop through all the remaining images in the list
# Grab the WCS of the alignment image and convert back to pixels
xPos = []
yPos = []
for img in self.imageList:
imgX, imgY = img.wcs.all_world2pix(refRA, refDec, 0)
# Store the relative center pointing
xPos.append(float(imgX))
yPos.append(float(imgY))
# Compute the relative pointings from the median position
dx = np.median(xPos) - np.array(xPos)
dy = np.median(yPos) - np.array(yPos)
if subPixel:
# If sub pixel offsets were requested, then add a small `epsilon` to
# ensure that none of the images have a zero offset.
dx, dy = self._force_non_integer_offsets(dx, dy)
else:
# If integer pixel offsets were requested, then round each offset to
# its nearest integer value.
dx = np.round(dx).astype(int)
dy = np.round(dy).astype(int)
# Return the image offsets
return (dx, dy)
#TODO: break this up into two separate methods:
# 1) integer_offsets
# 2) subpixel_offsets
def get_cross_correlation_offsets(self, subPixel=False, satLimit=16e3):
"""
Computes the relative offsets between the images in the ImageStack
Parameters
----------
subPixel : bool, optional, default: False
If true, then the image offsets are returned with sub-pixel
precision. In general, the WCS solution is not accurate enough to
justify subpixel precision, so the default value is False.
satLimit : int or float, optional, default: 16e3
The maximum number of pixel counts permitted for any of the
reference stars to be used for alignment. Any stars containing a
pixel brighter than this amount will be omitted from the list of
permissible reference stars.
Returns
-------
dx, dy : numpy.ndarray
The horizontal (dx) and vertical (dy) offsets required to align
the images.
"""
# Catch the truly trivial case
numberOfImages = self.numberOfImages
if self.numberOfImages <= 1:
return (0, 0)
# Grab the appropriate reference image depending on whether or not the
# image stack has already been aligned...
if self.aligned:
# If subPixel accurace was requested, then start by constructing
# a reference image to be used in sub-pixel alignment.
referenceImage = self.build_median_image()
# Initalize lists for storing offsets and shapes, and use the FIRST
# image in the list as the reference image for now.
xPos = []
yPos = []
# Which image should be the FIRST image to align? Start with the 0th
# image because we've constructed a separate `referenceImage`
startInd = 0
else:
referenceImage = self.imageList[0]
# Initalize lists for storing offsets and shapes, and use the FIRST
# image in the list as the reference image for now.
xPos = [0]
yPos = [0]
# Which image should be the FIRST image to align? Skip the 0th image
# in this case because it is serving as the referenc image.
startInd = 1
# Loop through the rest of the images.
# Use cross-correlation to get relative offsets,
# and accumulate image shapes
progressString = 'Aligning image {0} of {1}'
for imgNum, image in enumerate(self.imageList[startInd:]):
# Update the user on the progress
print(progressString.format(imgNum+startInd+1, numberOfImages), end='\r')
# Construct an image pair using the reference image
imgPair = ImagePairOffsetGetter(
referenceImage,
image
)
# Grab subpixel or integer offsets depending on what was requested
if subPixel:
dx, dy = imgPair.get_cross_correlation_subpixel_offset(
satLimit=satLimit)
else:
dx, dy = imgPair.get_cross_correlation_integer_pixel_offset()
# Append cross_correlation values for non-reference image
xPos.append(dx)
yPos.append(dy)
# Print a new line for shell output
print('')
# TODO: delete these lines if everything is working correctly.
# Compute the relative pointings from the median position
dx = np.median(xPos) - np.array(xPos)
dy = np.median(yPos) - np.array(yPos)
if subPixel:
# If sub pixel offsets were requested, then add a small `epsilon` to
# ensure that none of the images have a zero offset.
dx, dy = self._force_non_integer_offsets(
np.array(dx),
np.array(dy)
)
else:
# If integer pixel offsets were requested, then round each offset to
# its nearest integer value.
dx = np.round(dx).astype(int)
dy = np.round(dy).astype(int)
return (dx, dy)
@staticmethod
def _force_non_integer_offsets(dx, dy, epsilon=1e-4):
"""
Forces any offset values to be non-integer values by adding epsilon.
Continues to add epsilon to the offsets until NONE of them are integers.
Parameters
----------
dx, dy : array_like
The offset values to force to be non-integer
epsilon: float, optional, default: 1e-4
The ammount to add to the offsets in order to make them non-integer
Returns
-------
dx, dy : array_like
Arrays of offset values, none of which will be integers
"""
# Copy the input offsets
outDx = np.array(copy.deepcopy(dx), dtype=float)
outDy = np.array(copy.deepcopy(dy), dtype=float)
# Repeatedly add epsilon to the offsets until none of them are integers
addEpsilon = True
while addEpsilon:
# Check for any perfectly integer shifts
for dx1, dy1 in zip(outDx, outDy):
# If an integer pixel shift is found, then add tiny shift and
# try again.
if dx1.is_integer() or dy1.is_integer():
addEpsilon = True
outDx += epsilon
outDy += epsilon
break
else:
# If the loop completed, then no epsilon addition necessary!
addEpsilon = False
return outDx, outDy
def apply_image_shift_offsets(self, dx, dy, padding=0):
"""
Shifts each image in the stack by the ammount specified.
Parameters
----------
dx, dy : int or float
The amount to shift each image along the horizontal (dx) and
vertical (dy) axes.
padding : int or float, optional, default: 0
The value to use for padding the edges of the shifted images.
"""
numberOfOffsets = len(dx)
if numberOfOffsets != len(dy):
raise ValueError('`dx` and `dy` must have the same number of elements')
if numberOfOffsets != self.numberOfImages:
raise ValueError('There must be one (dx, dy) pair for each image')
# Recompute the offsets so that all images are shifted up and right
dx1, dy1 = dx - np.floor(dx.min()), dy - np.floor(dy.min())
# Compute the required padding
padX, padY = np.int(np.ceil(dx1.max())), np.int(np.ceil(dy1.max()))
# Loop through each offset and apply it to the images
for dx11, dy11 in zip(dx1, dy1):
# Extract the first image in the imageList
thisImg = self.pop_image(0)
# Pad this image with so that shifts do not delete any data
thisImg = thisImg.pad(
((0, padY), (0, padX)),
mode='constant',
constant_values=padding
)
# Shift the image as necessary
thisImg = thisImg.shift(dx11, dy11, padding=padding)
# Return the image to the imageList (at the END of the list)
self.add_image(thisImg)
def build_median_image(self):
"""
Computes fast median image of an aligned image stack.
Returns
-------
medianImage : `~astroimage.reduced.ReducedScience`
The median of the image stack.
"""
# Check if the image stack has been aligned
if not self.aligned:
raise RuntimeError('ImageStack must be aligned before a median image can be computed')
# Stack the data arrays
dataStack = np.array([img.data for img in self.imageList])
# Compute the median of the data stack
medianData = np.nanmedian(dataStack, axis=0)
# Copy the first image in the image stack and replace its data
medianImage = self.imageList[0].copy()
medianImage.data = medianData
# Store the median image in the medianImage attribute
self.medianImage = medianImage
return medianImage
def align_images_with_wcs(self, subPixel=False, padding=0):
"""
Aligns the whole stack of images using the astrometry in the header.
NOTE: (2016-06-29) This function *DOES NOT* match image PSFs.
Perhaps this functionality will be implemented in future versions.
Parameters
----------
subPixel : bool
If True, then non-integer pixel shifts will be applied. If False,
then all shift amounts will be rounded to the nearest integer pixel.
padding : int or float, optional, default: 0
The value to use for padding the edges of the aligned images.
"""
# Catch the case where imageList has only one image
if self.numberOfImages == 1:
return imageList[0]
# If no offsets were supplied, then retrieve them
dx, dy = self.get_wcs_offsets(subPixel=subPixel)
if subPixel == True:
# Make sure there are no integers in the dx, dy list
dx, dy = self._force_non_integer_offsets(dx, dy)
else:
# If non-subpixel alignment was requested, then FORCE all the
# offsets to the nearest integer value.
dx = np.round(dx).astype(int)
dy = np.round(dy).astype(int)
# Apply the shifts to the images in the stack
self.apply_image_shift_offsets(dx, dy, padding=padding)
# Set the alignment flag to True
self.__aligned = True
# Set the is_supersky flag to False (in case it was previously set True)
self.__is_supersky = False
def align_images_with_cross_correlation(self, subPixel=False,
satLimit=16e3, padding=0):
"""
Aligns the whole stack of images using the astrometry in the header.
NOTE: (2016-06-29) This function *DOES NOT* math image PSFs.
Perhaps this functionality will be implemented in future versions.
Parameters
----------
subPixel : bool
If True, then non-integer pixel shifts will be applied. If False,
then all shift amounts will be rounded to the nearest integer pixel.
satLimit : int or float, optional, default: 16e3
Sources which contain any pixels with more than this number of
counts will not be used to perform cross-correlation alignment.
padding : int or float, optional, default: 0
The value to use for padding the edges of the aligned images.
"""
# Catch the case where imageList has only one image
if self.numberOfImages == 1:
return imageList[0]
# Check if approximate alignment has already been achieved
if not self.aligned:
# Start by retrieving the integer pixel offsets
print('Aligning images to the integer-pixel level')
dx, dy = self.get_cross_correlation_offsets(subPixel=False,
satLimit=satLimit)
# Align the images to an integer pixel level.
self.apply_image_shift_offsets(dx, dy, padding=padding)
# Set the alignment flag to True
self.__aligned = True
# If approximate alignment has already been achieved, then simply
# proceed to get sub-pixel alignment level.
if subPixel == True:
# Get the sub-pixel corrections to the alignment.
print('Aligning images to the sub-pixel level')
dx, dy = self.get_cross_correlation_offsets(subPixel=True,
satLimit=satLimit)
# Make sure there are no integers in the dx, dy list
dx, dy = self._force_non_integer_offsets(dx, dy)
# Apply the shifts to the images in the stack
self.apply_image_shift_offsets(dx, dy, padding=padding)
# Set the alignment flag to True
self.__aligned = True
# else:
# # If non-subpixel alignment was requested, then FORCE all the
# # offsets to the nearest integer value.
# dx = np.round(dx).astype(int)
# dy = np.round(dy).astype(int)
# Set the is_supersky flag to False (in case it was previously set True)
self.__is_supersky = False
####################################
### START OF COMBINATION HELPERS ###
####################################
def _get_number_of_rows_to_process(self, bitsPerPixel):
"""
Computes the number of rows to process at a given time.
Parameters
----------
bitsPerPixel : int
The number of bits used to store each pixel
Returns
-------
numRows : int
The number of rows to include in each section
numSections : int
The total number of sections in the ImageStack instance
"""
# TODO: do a better job estimating the number of rows to process.
# Compute the number of pixels that fit under the memory limit.
memLimit = (psutil.virtual_memory().available/
(bitsPerPixel*(1024**2)))
memLimit = int(50*np.floor(memLimit/10.0))
numStackPix = memLimit*(1024**2)*8/bitsPerPixel
# Grab the number of images and the shape of those image
numImg, ny, nx = self.shape
# Compute the number of rows to be processed in each chunk
numRows = int(np.floor(numStackPix/(numImg*nx)))
# Catch the case where ALL rows get handled at once
if numRows > ny: numRows = ny
numSections = int(np.ceil(ny/numRows))
# Recompute the number of rows to be evenly spaced
numRows = int(np.ceil(ny/numSections))
return numRows, numSections
def _produce_individual_star_masks(self, dilationWidth=4):
"""
Finds the stars in the image stack and builds masks to protect or omit.
Parameters
----------
dilationWidth : int or float, optional, default: 4
The amount to circularly dilate outward from masked pixels. These
roughly translate to pixel values so that `dilationWidth=4` will
mask any locations within 4 pixels of a crudely identified star
pixel. This actually depends on the number of other factors, such as
the number of crudely identified star pixels in a given star, etc...
Returns
-------
starMasks : numpy.ndarray
A (numberOfImages, ny, nx) array where each slice along the 0th axis
represents the star mask for the image located at the corresponding
index in the imageList attribute.
"""
# TODO: REWRITE THIS METHOD USING THE ASTROPY SEGMENTATION METHODS???
# Yes, I THINK so...
# Grab binning
binX, binY = self.imageList[0].binning
# Compute kernel shape
medianKernShape = (np.int(np.ceil(9.0/binX)), np.int(np.ceil(9.0/binY)))
# Grab the number of images (for user updates)
numImg = self.numberOfImages
# Construct a blank array to populate with masks
starMasks = np.zeros(self.shape, dtype=int)
# Loop through the images and compute individual star masks
for imgNum, img in enumerate(self.imageList):
print('Building star mask for image {0:g} of {1:g}'.format(imgNum + 1, numImg), end='\r')
# Grab the image array
thisData = img.data.copy()
# Replace bad values with zeros
badInds = np.where(np.logical_not(np.isfinite(thisData)))
thisData[badInds] = -1e6
# Filter the image
medImg = ndimage.median_filter(thisData, size = medianKernShape)
# get stddev of image background
mean, median, stddev = img.sigma_clipped_stats()
# Look for deviates from the filter (positive values only)
# starMask1 = np.logical_and(np.abs(thisData - medImg) > 2.0*stddev,
# thisData > 0)
starMask1 = (np.abs(thisData - medImg) > 2.0*stddev)
# Use the scipy ndimage opening and closing to clean the mask
starMask1 = ndimage.binary_opening(starMask1)
starMask1 = ndimage.binary_closing(starMask1)
# Clean out some edge effects.
starMask1[:, -4:-1] = 0
#
# NOTE: This doesn't work when there are nebulae and galaxies in the image!
#
# starMask1 = make_source_mask(
# thisData,
# snr=2,
# npixels=5,
# dilate_size=11,
# mask_value=-1e6
# )
# Try using guassian kernel convolution instead
from astropy.convolution import convolve, convolve_fft, Gaussian2DKernel
# Initalize a dilatingKernel
gaussian_2D_kernel = Gaussian2DKernel(10.0)
# Normalize the kernel
gaussian_2D_kernel.normalize()
# If the dialation kernel is larger than 10 pixels, then use FFT
# convolution.
starMask11 = convolve_fft(
starMask1.astype(float),
gaussian_2D_kernel
)
# Mask any pixels with values greater than 0.04 (which seems to
# produce a reasonable result.)
peakValue = 1/(200*np.pi)
maskThreshold = 10 * peakValue * np.exp(-0.5*((dilationWidth+0.5)/10.0)**2)
starMask1 = (starMask11 > maskThreshold).astype(np.int8)
# TODO: delete this code if convolution works out
#
# # Finally, liberally EXPAND the mask with four dilations
# starMask1 = ndimage.binary_dilation(
# starMask1,
# iterations=starMaskIters
# ).astype(np.int8)
# TODO: delete this code once I verify everything is working
#
# # Count the number of masked neighbors for each pixel
# neighborCount = np.zeros(thisData.shape, dtype=int)
# for dx in range(-1,2,1):
# for dy in range(-1,2,1):
# neighborCount += np.roll(np.roll(starMask1, dy, axis=0),
# dx, axis=1).astype(np.int8)
#
# # Find pixels with more than two masked neighbor (including self)
# # starMask1 = np.logical_and(starMask1, neighborCount > 2)
# starMask1 = (neighborCount > 2).astype(np.int8)
# Place the final mask into its respective slice of the 3D array
starMasks[imgNum, :, :] = starMask1
# Print a newline character to preserve star mask updates
print('')
# Once ALL of the star masks have been computed, return them to the user
return starMasks
def _construct_star_mask(self):
"""
Finds stars in the image stack and builds masks to protect or omit.
Returns
-------
mask : numpy.ndarray or bool
An array of star positions to mask. If no stars were found, then
simply returns False. This output can be used as the mask in a
numpy.ma.core.MaskedArray object.
"""
# Produce a separate star mask for EACH image in the stack
starMasks = self._produce_individual_star_masks()
# Accumulate these pixels into the final star mask
starMask = starMasks.sum(axis=0)
# Cleanup temporary variables
del starMasks
# Compute final star mask based on which pixels were masked more than
# 10% of the time.
numImg = self.numberOfImages
starMask = (starMask > np.ceil(0.1*numImg)).astype(float)
# Check that at least one star was detected (more than 15 pixels masked)
if np.sum(starMask) > 15:
# Now smooth the star mask with a gaussian to dialate it
starMask1 = ndimage.gaussian_filter(starMask, (4, 4))
# Grab any pixels (and indices) above 0.05 value post-smoothing
starMask = (starMask1 > 0.05)
numInStarPix = np.sum(starMask)
# Notify user how many "in-star pixels" were masked
print('\n\nMasked a total of {0} pixels'.format(numInStarPix))
else:
print('\n\nNo pixels masked as "in-star" pixels')
starMask = False
return starMask
def _get_sigma_clip_start_and_steps(self, iters, backgroundClipSigma=5.0,
backgroundClipStep=0.5, starClipSigma=40.0, starClipStep=1.0):
"""
Computes the sigma clipping step sizes
Parameters
----------
iters : int
The number of iterations to be used by the sigma-clipping
backgroundClipSigma : int or float, optional, default: 5.0
The number of standard-deviations from the median value a pixel
located outside of a star can deviate from the median before it is
marked as a bad pixel.
backgroundClipStep : float, optional, default: 0.5
The step-size to use for each iteration of the sigma-clipping in
stellar pixels. If this value forces backgroundClipStart below 0.1,
then it is reset to 0.5.
starClipSigma : int or float, optional, default: 40.0
The number of standard-deviations from the median value a pixel
located within a star can deviate from the median before it is
marked as a bad pixel.
starClipStep: float, optional, default: 1.0
The step-size to use for each iteration of the sigma-clipping in
non-stellar background pixels. If this value forces starClipStart
below 30.0, then it is reset to 1.0.
Returns
-------
backgroundClipStart : float
The starting point for sigma-clipping of the background pixels
backgroundClipStep : float
The step-size to use for each iteration of the sigma-clipping in
stellar pixels. This is the same as the input value unless it was
remapped to prevent a backgroundClipStart value below 0.1.
starClipStart : float
The starting point for sigma-clipping of the star pixels
starClipStep: float
The step-size to use for each iteration of the sigma-clipping in
non-stellar background pixels. This is the same as the input value
unless it was remapped to prevent too a starClipStart value below
30.0.
"""
# Compute how to iterate through sigma clipping
# Compute the expected starting point
backgroundClipStart = backgroundClipSigma - backgroundClipStep*iters
starClipStart = starClipSigma - starClipStep*iters
# Double check that these values are legal
# (otherwise adjust sigmaStep values)
if backgroundClipStart < 0.1:
backgroundClipStart = 0.5
backgroundClipStep = (backgroundClipSigma - backgroundClipStart)/iters
if starClipStart < 30.0:
starClipStart = 30.0
starClipStep = 1.0
return (
backgroundClipStart,
backgroundClipStep,
starClipStart,
starClipStep
)
def _get_start_and_end_rows(self, sectionNumber, numberOfRows):
"""
Compute the start and end rows for a given section number.
Parameters
----------
sectionNumber : int
The section number to extract (starts at 0)
numberOfRows : int
The number of rows to extract from the imageList
Returns
-------
startRow : int
The index of the first row for the selected region
endRow : int
The index of the last row for the selected region
"""
# Test if the input is logical
if not issubclass(type(sectionNumber),
(int, np.int, np.int8, np.int16, np.int32, np.int64)):
raise TypeError('`sectionNumber` must be an int type')
if not issubclass(type(numberOfRows),
(int, np.int, np.int8, np.int16, np.int32, np.int64)):
raise TypeError('`numberOfRows` must be an int type')
# Grab the shape of the image stack
nz, ny, nx = self.shape
# Compute the range of rows to extract
startRow = sectionNumber*numberOfRows
endRow = (sectionNumber + 1)*numberOfRows
# Just to be safe, catch the case where we attempt to index BEYOND
# the last row in the image stack.
if endRow > ny: endRow = ny-1
return startRow, endRow
def _extract_data_sub_stack(self, startRow, endRow):
"""
Extracts and returns a 3D numpy.ndarray containing the image data.
Parameters
----------
startRow : int
The index of the first row in the selected sub stack
endRow : int
The index of the last row in the selected sub stack
Returns
-------
outData : numpy.ma.ndarray
An masked array containing the data
"""
# Grab the shape of the image stack
nz, ny, nx = self.shape
# Compute the number of rows in this sub stack
numberOfRows = endRow - startRow
# Build an array for storing output
outData = np.zeros((nz, numberOfRows, nx))
# Loop through each image and extract its data
for zInd, img in enumerate(self.imageList):
outData[zInd, :, :] = img.data[startRow:endRow, :]
return np.ma.array(outData)
def _extract_uncert_sub_stack(self, startRow, endRow):
"""
Extracts and returns a 3D numpy.ndarray containing the image uncertainty.
Parameters
----------
startRow : int
The index of the first row in the selected sub stack
endRow : int
The index of the last row in the selected sub stack
Returns
-------
outUncert : numpy.ndarray
An array containing the uncertainty
"""
# Grab the shape of the image stack
nz, ny, nx = self.shape
# Build a list of which of these images have uncertainty
numWithUncert = np.sum([img.has_uncertainty for img in self.imageList])
# If not ALL of the images have uncertainty, then there is no simple
# algebraic means of treating the uncertainties for SOME of the images,
# so simply return None for the uncertainty
if numWithUncert > 0 and numWithUncert < nz:
# Issue a warning so that the user knows this is happening.
warnings.warn(
'Not all images in the ImageStack have associated '
'uncertainties: estimating uncertainty from data variance. '
'This will overestimate the uncertainty in stellar pixels.'
)
if (numWithUncert != nz):
outUncert = None
return outUncert
# If, however, ALL of the images DO have uncertainty, then proceed to
# chop out that uncertainty subStack and return it
# Compute the number of rows in this sub stack
numberOfRows = endRow - startRow
# Build an array for storing output
outUncert = np.zeros((nz, numberOfRows, nx))
# Loop through each image and extract its data
for zInd, img in enumerate(self.imageList):
outUncert[zInd, :, :] = img.uncertainty[startRow:endRow, :]
return outUncert
@staticmethod
def _initalize_mask(dataSubStack):
"""
Initalizes and output mask and masks NaNs and Infs in the input array
Parameters
----------
dataSubStack : numpy.ma.ndarray
The data array in which the bad pixels are to be found
Returns
-------
outMask : numpy.ndarray (bool type)
An empty mask in which the final output will be stored
outSubStack: numpy.ndarray
An array containing all the same data as dataSubStack but with any
NaNs or Infs masked
"""
# Initalize an array to store the output mask values
outMask = np.zeros(dataSubStack.shape, dtype=bool)
# Start by masking out NaNs or Infs
NaNsOrInfs = np.logical_not(np.isfinite(dataSubStack.data))
dataSubStack.mask = NaNsOrInfs
return outMask, dataSubStack
@staticmethod
def _increment_sigma_clip_scale(inputClipScale, nextScale,
backgroundClipStep, starClipStep=0, starMask=False):
"""
Builds an array to indicate the sigma-clipping level for each column
Parameters
----------
inputClipScale : numpy.ndarray
An array containing the sigmga-clipping scale BEFORE this iteration
nextScale : numpy.ndarray (bool type)
An array indicating which columns should have their sigma-clipping
scale incremented by another step
backgroundClipStep : int or float
The amount to increment the sigma-clipping for background pixels
starClipStep : int or float, optional, default: 0
The amount to increment the sigma-clipping for stellar pixels. If
this is not provided, then `starMask` must also be 0.
starMask : numpy.ndaray (bool type), optional, default: False
An array indicating which pixels are located in stars. True
indicates stellar pixels. If this is provided, then `starClipStep`
must also be provided.
Returns
-------
outputClipScale : numpy.ndarrray
An array containing the sigma-clipping scale AFTER this iteration
"""
# If StarMask is false, then set starClipStep to zero.
if starMask is False:
starClipStep = 0
# Generate simple arrays to indicate which pixels are background/star
starPix = np.array(starMask).astype(int)
backgroundPix = np.logical_not(starPix).astype(int)
# Convert the nextScale array into an integer array
nextScale = np.array(nextScale).astype(int)
# Copy the input sigma-clipping scale and add the next steps to it.
outputClipScale = inputClipScale
outputClipScale += backgroundClipStep * nextScale * backgroundPix
outputClipScale += starClipStep * nextScale * starPix
return outputClipScale
@staticmethod
def _process_sigma_clip_iteration(dataSubStack, NaNsOrInfs, startNumMasked,
sigmaClipScale):
"""
Processes the next step in the sigma clip iteration
Parameters
----------
dataSubStack : numpy.ma.ndarray
The 3D data array in which to located bad pixels
NaNsOrInfs : numpy.ndarray
A 2D array indicating the locations of NaNs or Infs in dataSubStack
startNumMasked : numpy.ndarray
A 2D array indicating the number of masked pixels in each column of
the dataSubStack BEFORE this iteration. This should be equal to
numpy.sum(dataSubStack.mask, axis=0)
sigmaClipScale : numpy.ndarray
A 2D array containing the current sigma clipping level of each
column of pixels in dataSubStack
Returns
-------
dataSubStack : numpy.ma.ndarray
The same as the input dataSubStack array, but with its mask updated.
nextScale : numpy.ndarray
A 2D array indicating which columns should continue to the next step
of iteration.
endNumMasked : numpy.ndarray
A 2D array indicating the number of masked pixels in each column of
the datasubStack AFTER this iteration.
"""
# Estimate the median and standard deviation of this subStack
imgEstimate = np.ma.median(dataSubStack, axis = 0).data
stackSigma = np.ma.std(dataSubStack, axis = 0).data
# Build a bool array for marking the outliers of this dataSubStack
outliers = np.zeros(dataSubStack.shape, dtype=bool)
# Loop through the stack, and find the outliers.
for j in range(dataSubStack.shape[0]):
deviation = np.absolute(dataSubStack.data[j,:,:] - imgEstimate)
outliers[j,:,:] = (deviation > sigmaClipScale*stackSigma)
# Save the newly computed outliers to the mask
dataSubStack.mask = np.logical_or(outliers, NaNsOrInfs)
# Count the number of masked points after the iteration
endNumMasked = np.sum(dataSubStack.mask, axis=0)
# Determine which pixel columns experienced a CHANGE in the number of
# masked pixels and mark those for continued iteration
nextScale = endNumMasked != startNumMasked
return dataSubStack, nextScale, endNumMasked
def _construct_sub_stack_bad_pixel_mask(self, dataSubStack, starSubMask,
iters=5, backgroundClipStart=2.5, backgroundClipStep=0.5,
starClipStart=35.0, starClipStep=1.0):
"""
Computes the bad pixels to be masked in the median filtered mean.
Parameters
----------
dataSubStack : numpy.ma.ndarray
The array of values for which to identify outliers and compute masks
to cover those bad pixes.
starSubMask : array numpy.ndarray (bool type)
An array of booleans indicating which pixels in the aligned images
are inside stellar PSFs. True values indicate pixels inside a
stellar PSF. A scalar False valeu indicates there are no stars to
mask.
iters : int, optional, default: 5
The number of sigma-clipping iterations to perform when searching
for bad pixels
backgroundClipStart : int or float, optional, default: 2.5
The sigma-clipping level for background pixels during the first
iteration.
backgroundClipStep : int or float, optional, default: 0.5
The increment by whech to inclease the sigma-clipping level for
background pixels after each iteration.
starClipStart : int or float, optional, default: 35.0
The sigma-clipping level for stellar pixels during the first
iteration.
starClipStep : int or float, optional, default: 1.0
The increment by whech to inclease the sigma-clipping level for
stellar pixels after each iteration.
Returns
-------
dataSubStack : numpy.ma.ndarray
The `mask` attribute of this objcet is a bool type array where True
values indicate bad pixels based on the provided backgroundClipSigma
and starClipSigma values.
"""
# Initalize the output array and cover up any NaNs/Infs in dataSubStack
outMask, dataSubStack = self._initalize_mask(dataSubStack)
# At this stage, only NaNs or Infs are masked, so save that information
# for use when processing the sigma-clipping.
NaNsOrInfs = dataSubStack.mask
# Initalize an array of sigma-clipping values
sigmaClipScale = self._increment_sigma_clip_scale(
0, # Start at zero sigma-clipping
1, # Increment EVERY pixel up to its starting value
backgroundClipStart,
starClipStart,
starSubMask
)
# Compute the starting number of pixels masked in each column
startNumMasked = np.sum(dataSubStack.mask, axis=0)
# This loop will iterate until the mask converges to an
# unchanging state, or until clipSigma is reached.
for iLoop in range(iters):
print('\tProcessing section for (\u03C3(bkg), \u03C3(*)) = ({0:3.2g}, {1:3.2g})'.format(
backgroundClipStart + backgroundClipStep*iLoop,
starClipStart + starClipStep*iLoop))
# Perform the next iteration in the sigma-clipping
dataSubStack, nextScale, startNumMasked = self._process_sigma_clip_iteration(
dataSubStack,
NaNsOrInfs,
startNumMasked,
sigmaClipScale
)
if np.sum(nextScale) == 0:
# If there are no new data included, then break out of loop
break
else:
# Otherwise increment scale where new data are included
sigmaClipScale = self._increment_sigma_clip_scale(
sigmaClipScale, # Start at the original sigma-clipping
nextScale, # Increment only columns with changed masking
backgroundClipStep, # Amount to increment bkg pixels
starClipStep, # Amount to increment star pixels
starSubMask # Array indictating star pixels
)
# When finished processing each sub stack, return the final
return dataSubStack
@staticmethod
def _propagate_masked_uncertainty(uncertainty, mask):
"""Computes the uncertainty in the masked array."""
# Compute the variance of the total quantity
varianceOfTheTotal = np.nansum(uncertainty**2, axis=0)
# Count the number of unmasked pixels in each column of the stack
goodPix = np.logical_and(
np.logical_not(mask),
np.isfinite(uncertainty)
)
numberOfUnmaskedPixels = np.sum(goodPix.astype(np.int16), axis=0)
# Estimate the uncertainty by dividing the variance by the number of
# unmasked pixels in each column, and then taking the square root.
maskedUncertainty = np.sqrt(varianceOfTheTotal)/numberOfUnmaskedPixels
return maskedUncertainty
@staticmethod
def _compute_masked_mean_and_uncertainty(maskedData, uncertainty):
"""
Computes the mean and uncertainty in the mean of a masked array.
"""
# Compute the mean of the unmasked pixels
maskedMean = maskedData.mean(axis=0).data
# Compute the masked uncertainty array
if uncertainty is None:
# If not all of the images had uncertainty arrays, then we must
# resort to estimating the uncertainty from the data variance.
maskedUncertainty = maskedData.std(axis=0).data
else:
# If an array of uncertainties was provided, then we can proceed by
# propagating those uncertainties.
maskedUncertainty = ImageStack._propagate_masked_uncertainty(
uncertainty,
maskedData.mask
)
return maskedMean, maskedUncertainty
def _compute_stack_mean_and_uncertainty(self, starMask, iters=5,
backgroundClipSigma=5.0, starClipSigma=40.0):
"""
Computes the mean and uncertainty of the complete stack.
Takes a star mask and treats pixels inside stars with a more forgiving
sigma-clipping than pixels outside stars.
Parameters
----------
starMask : numpy.ndarray (bool type)
An array of booleans indicating which pixels in the aligned images
are inside stellar PSFs. True values indicate pixels inside a
stellar PSF.
iters : int, optional, default: 5
The number of sigma-clipping iterations to perform when searching
for bad pixels
backgroundClipSigma : int or float, optional, default: 5.0
The number of standard-deviations from the median value a pixel
located outside of a star can deviate from the median before it is
marked as a bad pixel.
starClipSigma : int or float, optional, default: 40.0
The number of standard-deviations from the median value a pixel
located within a star can deviate from the median before it is
marked as a bad pixel.
Returns
-------
outMean : numpy.ndarray
The mean image
outUncert : numpy.ndarray
The uncertainty in that mean. If the images in the stack have
associated uncertainties, then this will be a propagated
uncertainty. If they do not have associated uncertainties, then this
will be estimated from the variance in the stack pixel values.
!WARNING! - Estimating uncertainty from the pixel variance will lead
to unreasonably high uncertainties in stellar PSFs. Thus, it is
much better to load the ReducedScience instances with an estimated
detector gain so that a Poisson uncertainty can be used.
"""
# Extract the number of images (nz) and the shape of the images (ny, nx)
nz, ny, nx = self.shape
# Test for the number of bits in each pixel (or just assum 64 bits)
bitsPerPixel = 64
# Compute the number of rows to process at a given time
numberOfRows, numSections = self._get_number_of_rows_to_process(bitsPerPixel)
print('Processing stack in {0} sections of {1} rows'.format(
numSections, numberOfRows))
# Compute the sigma-clipping starting points and increments
tmp = self._get_sigma_clip_start_and_steps(
iters=iters,
backgroundClipSigma=backgroundClipSigma,
starClipSigma=starClipSigma
)
backgroundClipStart, backgroundClipStep, starClipStart, starClipStep = tmp
# Initalize an empty array to hold the output
outMean = np.zeros((ny, nx))
outUncert = np.zeros((ny, nx))
for sectionNumber in range(numSections):
print('Starting section number {0}'.format(sectionNumber+ 1 ))
# Compute the range of rows to extract
startRow, endRow = self._get_start_and_end_rows(
sectionNumber, numberOfRows
)
# Extract the data for this section
dataSubStack = self._extract_data_sub_stack(startRow, endRow)
# Extract the uncertainty for this section
uncertSubStack = self._extract_uncert_sub_stack(startRow, endRow)
# Extract the starSubMask for this section
if issubclass(type(starMask), np.ndarray):
starSubMask = starMask[startRow:endRow, :]
elif issubclass(type(starMask), bool):
starSubMask = starMask
# Build the bad pixel mask for this subStack
dataSubStack = self._construct_sub_stack_bad_pixel_mask(
dataSubStack,
starSubMask,
iters=iters,
backgroundClipStart=backgroundClipStart,
backgroundClipStep=backgroundClipStep,
starClipStart=starClipStart,
starClipStep=starClipStep
)
# Compute the mean and uncertainty of the masked array
mean, uncert = self._compute_masked_mean_and_uncertainty(
dataSubStack, uncertSubStack)
# Store the result in the output
outMean[startRow:endRow, :] = mean
outUncert[startRow:endRow, :] = uncert
return outMean, outUncert
def _parse_stars_according_to_image(self, starClipSigma=40.0):
"""
Builds star mask for ReducedScience and turns of star masking for others
Parameters
----------
starClipSigma : int or float, optional, default: 40.0
The number of standard-deviations from the median value a pixel
located within a star can deviate from the median before it is
marked as a bad pixel.
Returns
-------
starMask : bool or numpy.ndarray (bool type)
An array indicating which pixels are located in stars. True
indicates stellar pixels. If this is provided, then `starClipStep`
must also be provided. If no star clipping should happen (i.e. this
image does not contain stars), then this is simply set to False.
starClipSigma : int or float
This is the same as the input starClipSigma value except that it is
set to 0 if no star clipping sholud happen (i.e. this image does
not contain stars)
"""
if issubclass(self.imageType, ReducedScience):
# Check if all the images were corrected to Airmass 0.0
if np.sum([img.airmass for img in self.imageList]) > 0:
raise ValueError('All images in the imageList must be corrected to airmass=0.0 before combining')
# Compute the star masks for this image stack.
starMask = self._construct_star_mask()
else:
starMask = False
starClipSigma = 0
return starMask, starClipSigma
def _finalize_output(self, stackMean, stackUncert):
"""
Places mean and uncertainty into an image object and solves astrometry
Only attempts astrometric solution if the output image was an
ReducedScience instance.
Parameters
----------
stackMean : numpy.ndarray
The resulting median-filtered-mean of the stacked data
stackUncert : numpy.ndarray
The resulting uncertainty in the `stackMean` values
Returns
-------
outImg : ReducedImage (or subclass)
An image instance containing the mean and uncertainty and
astrometric solution if possible.
"""
# Select the type of output image to be built on the basis of the image
# obsType.
outImageClassDict = {
'BIAS': MasterBias,
'DARK': MasterDark,
'FLAT': MasterFlat,
'OBJECT': ReducedScience
}
outImageClass = outImageClassDict[self.imageList[0].obsType]
# TODO: decide if it is a good idea to have an optional uncertainty...
# # Properly handle the uncertainty provided
# if stackUncert is not None:
# outUncert = StdDevUncertainty(stackUncert)
# else:
# outUncert = None
# Return that data to the user in a single AstroImage instance
outImg = outImageClass(
stackMean,
uncertainty=StdDevUncertainty(stackUncert),
header=self.imageList[0].header,
properties={'unit': self.imageList[0].unit}
)
# Clean up any bad pixels in this image using the Inpointer class
inpainter = Inpainter(outImg)
outImg = inpainter.inpaint_nans()
# If the output image is an ReducedScience and is not a supersky image,
# then clear out the old astrometry and solve it anew!
if (outImageClass is ReducedScience) and not self.is_supersky:
# Clear out the old astrometry
outImg.clear_astrometry()
# Initalize an astrometry solver object
astroSolver = AstrometrySolver(outImg)
# Attempt to perform an astrometric solution
temporaryImage, success = astroSolver.run()
# If astrometry solution was successful, then replace the output
if success: outImg = temporaryImage
return outImg
def _compute_supersky(self, starMasks):
"""
Computes the masked median of the unaligned image stack.
Parameters
----------
starMasks : numpy.ndarary
A (numberOfImages, ny, nx) array containing a True value wherever
there are stars and a False value in all the sky pixels.
Returns
-------
supersky : numpy.ndarray
A (ny, nx) array containing the median sky-counts in each pixel
"""
# TODO: break this into more managably bite sized bits if necessary.
# Construct a median normalized data stack
dataStack = np.zeros(self.shape, dtype=np.float32)
# Loop through each image, normalize and place in data stack
for imgNum, img in enumerate(self.imageList):
# Copy the data for this image
thisData = img.data
# Mask this image with its starMask
starInds = np.where(starMasks[imgNum, :, :])
thisData[starInds] = np.NaN
# Compute the median of this image
thisMedian = np.nanmedian(thisData)
# Median normalize this image
thisData /= thisMedian
# Place the normalized image in its place
dataStack[imgNum, :, :] = thisData
# Compute the median image (ignore warnings because we'll fix those)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
medianArray = np.nanmedian(dataStack, axis=0)
# Comptue uncertainty as standard deviation/sqrt(numOfUnmaskedPixels)
stdArray = np.nanstd(dataStack, axis=0)
numPix = np.nansum(dataStack, axis=0)
uncertArray = stdArray/np.sqrt(numPix - 1)
# Renormalize by this output median
thisMedian = np.nanmedian(medianArray)
medianArray /= thisMedian
uncertArray /= np.abs(thisMedian)
# Return to user
return medianArray, uncertArray
####################################
### END OF COMBINATION HELPERS ###
####################################
####################################
### START OF COMBINATION METHODS ###
####################################
def produce_supersky(self, dilationWidth=4):
"""
Computes the median of the unregistered image stack.
Parameters
----------
dilationWidth : int or float, optional, default: 4
The amount to circularly dilate outward from masked pixels. These
roughly translate to pixel values so that `dilationWidth=4` will
mask any locations within 4 pixels of a crudely identified star
pixel. This actually depends on the number of other factors, such as
the number of crudely identified star pixels in a given star, etc...
Returns
-------
outImg : ReducedImage (or subclass)
The average sky image with stars masked.
"""
# Catch if the images have been aligned and raise an error
if self.aligned:
raise RuntimeError('Cannot produce supersky with aligned images')
# Catch if there are enough images to produce a supersky
if self.numberOfImages < 2:
raise RuntimeError('Cannot produce supersky with less than 2 images')
# Produce individual star masks for each image in the ImageStack
starMasks = self._produce_individual_star_masks(
dilationWidth=dilationWidth
)
# Compute the mean and uncertainty given this star mask
stackMedian, stackUncert = self._compute_supersky(starMasks)
# Set the boolean supersky flag
self.__is_supersky = True
# Place the stack average and uncertainty into an array
outImg = self._finalize_output(stackMedian, stackUncert)
# Remove the units from the output image because superskys don't have units
outImg /= (1.0*outImg.unit)
# Return the resulting image to the user
return outImg
def combine_images(self, iters=5, double=False, backgroundClipSigma=5.0,
starClipSigma=40.0):
"""
Computes the median filtered mean of the image stack.
Starts by identifying which pixels are located in stars and applies a
more tolerant sigma-clipping procedure to those pixels.
Parameters
----------
iters : int, optional, default: 5
The number of sigma-clipping iterations to perform when searching
for bad pixels
double : bool, optional, default: False
If True, then the output image will be computed as a 64-bit float.
If False, then the output image will be computed as a 32-bit float.
backgroundClipSigma : int or float, optional, default: 5.0
The number of standard-deviations from the median value a pixel
located outside of a star can deviate from the median before it is
marked as a bad pixel.
starClipSigma : int or float, optional, default: 40.0
The number of standard-deviations from the median value a pixel
located within a star can deviate from the median before it is
marked as a bad pixel.
Returns
-------
outImg : ReducedImage (or subclass)
The average image and uncertainty based on the input image list
"""
# Catch the truly trivial case
if self.numberOfImages <= 1:
return self.imageList[0]
# Catch if the images have not been aligned
if not self.aligned:
raise RuntimeError('This ImageStack has not yet been aligned')
# If this is not an astroimage, then catch it and PREVENT star clipping
tmp = self._parse_stars_according_to_image(starClipSigma)
starMask, starClipSigma = tmp
# Compute the mean and uncertainty given this star mask
stackMean, stackUncert = self._compute_stack_mean_and_uncertainty(
starMask,
iters=iters,
backgroundClipSigma=backgroundClipSigma,
starClipSigma=starClipSigma
)
# Place the stack average and uncertainty into an array
outImg = self._finalize_output(stackMean, stackUncert)
# Return the resulting image to the user
return outImg
|
[
"jmontgom.10@gmail.com"
] |
jmontgom.10@gmail.com
|
56ff0c923d0bebe7b7f141de3264c6715b572f0b
|
e56b5d35e502eb9cd6170ce2ff778c3a153e0df1
|
/gru/network.py
|
b4cd0cf20c21e4b8414ee2a305c5c87471c16f14
|
[] |
no_license
|
fankib/STGRU
|
c84f2064cd2c3c66a4055f9b48af2a9f59561447
|
a562fb1b20c79afc54413ea31ce226273ac00bcd
|
refs/heads/master
| 2020-09-22T14:14:46.589655
| 2020-06-03T08:53:15
| 2020-06-03T08:53:15
| 225,234,660
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,146
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from enum import Enum
from gru import OwnGRU, OwnLSTM, STGN, STGCN
class GRU(Enum):
PYTORCH_GRU = 0
OWN_GRU = 1
RNN = 2
LSTM = 3
OWN_LSTM = 4
STGN = 5
STGCN = 6
@staticmethod
def from_string(name):
if name == 'pytorch':
return GRU.PYTORCH_GRU
if name == 'own':
return GRU.OWN_GRU
if name == 'rnn':
return GRU.RNN
if name == 'lstm':
return GRU.LSTM
if name == 'ownlstm':
return GRU.OWN_LSTM
if name == 'stgn':
return GRU.STGN
if name == 'stgcn':
return GRU.STGCN
raise ValueError('{} not supported'.format(name))
class GruFactory():
def __init__(self, gru_type_str):
self.gru_type = GRU.from_string(gru_type_str)
def is_lstm(self):
return self.gru_type in [GRU.LSTM, GRU.OWN_LSTM, GRU.STGN, GRU.STGCN]
def is_stgn(self):
return self.gru_type in [GRU.STGN, GRU.STGCN]
def greeter(self):
if self.gru_type == GRU.PYTORCH_GRU:
return 'Use pytorch GRU implementation.'
if self.gru_type == GRU.OWN_GRU:
return 'Use *own* GRU implementation.'
if self.gru_type == GRU.RNN:
return 'Use vanilla pytorch RNN implementation.'
if self.gru_type == GRU.LSTM:
return 'Use pytorch LSTM implementation.'
if self.gru_type == GRU.OWN_LSTM:
return 'Use *own* LSTM implementation.'
if self.gru_type == GRU.STGN:
return 'Use STGN variant.'
if self.gru_type == GRU.STGCN:
return 'Use STG*C*N variant.'
def create(self, hidden_size):
if self.gru_type == GRU.PYTORCH_GRU:
return nn.GRU(hidden_size, hidden_size)
if self.gru_type == GRU.OWN_GRU:
return OwnGRU(hidden_size)
if self.gru_type == GRU.RNN:
return nn.RNN(hidden_size, hidden_size)
if self.gru_type == GRU.LSTM:
return nn.LSTM(hidden_size, hidden_size)
if self.gru_type == GRU.OWN_LSTM:
return OwnLSTM(hidden_size)
if self.gru_type == GRU.STGN:
return STGN(hidden_size)
if self.gru_type == GRU.STGCN:
return STGCN(hidden_size)
class RNN(nn.Module):
''' GRU based RNN, using embeddings and one linear output layer '''
def __init__(self, input_size, hidden_size, gru_factory):
super(RNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.encoder = nn.Embedding(input_size, hidden_size)
self.gru = gru_factory.create(hidden_size)
self.fc = nn.Linear(hidden_size, hidden_size)
def forward(self, x, h, active_user):
seq_len, user_len = x.size()
x_emb = self.encoder(x)
out, h = self.gru(x_emb, h)
#out, (h, c) = self.gru(x_emb) # lstm hack
y_linear = self.fc(out)
return y_linear, h
class RNN_user(nn.Module):
''' GRU based RNN, with user embeddings and one linear output layer '''
def __init__(self, input_size, user_count, hidden_size, gru_factory):
super(RNN_user, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.user_count = user_count
self.encoder = nn.Embedding(input_size, hidden_size)
self.user_encoder = nn.Embedding(user_count, hidden_size)
self.gru = gru_factory.create(hidden_size)
self.fc = nn.Linear(hidden_size, hidden_size)
def forward(self, x, h, active_user):
seq_len, user_len = x.size()
x_emb = self.encoder(x)
out, h = self.gru(x_emb, h)
y_linear = self.fc(out)
p_u = self.user_encoder(active_user)
p_u = p_u.view(1, user_len, self.hidden_size)
return (y_linear + p_u), h
class RNN_cls(nn.Module):
''' GRU based RNN used for cross entropy loss, using embeddings and one linear output layer '''
def __init__(self, input_size, hidden_size, gru_factory):
super(RNN_cls, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.encoder = nn.Embedding(input_size, hidden_size)
self.gru = gru_factory.create(hidden_size)
self.fc = nn.Linear(hidden_size, input_size) # create outputs in lenght of locations
def forward(self, x, h, active_user):
seq_len, user_len = x.size()
x_emb = self.encoder(x)
out, h = self.gru(x_emb, h)
y_linear = self.fc(out)
return y_linear, h
class RNN_cls_user(nn.Module):
''' GRU based RNN used for cross entropy loss with user embeddings '''
def __init__(self, input_size, user_count, hidden_size, gru_factory):
super(RNN_cls_user, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.user_count = user_count
self.encoder = nn.Embedding(input_size, hidden_size)
self.user_encoder = nn.Embedding(user_count, hidden_size)
self.gru = gru_factory.create(hidden_size)
self.fc = nn.Linear(2*hidden_size, input_size) # create outputs in lenght of locations
def forward(self, x, h, active_user):
seq_len, user_len = x.size()
x_emb = self.encoder(x)
out, h = self.gru(x_emb, h)
p_u = self.user_encoder(active_user)
p_u = p_u.view(user_len, self.hidden_size)
# boradcast on sequence (concat user embeddings):
out_pu = torch.zeros(seq_len, user_len, 2*self.hidden_size, device=x.device)
for i in range(seq_len):
out_pu[i] = torch.cat([out[i], p_u], dim=1)
y_linear = self.fc(out_pu)
return y_linear, h
class RNN_stgn(nn.Module):
''' STGN based RNN used for bpr, using own weights '''
def __init__(self, input_size, hidden_size, gru_factory):
super(RNN_stgn, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.encoder = nn.Embedding(input_size, hidden_size)
self.gru = gru_factory.create(hidden_size)
def forward(self, x, delta_t, delta_s, h):
seq_len, user_len = x.size()
x_emb = self.encoder(x)
out, h = self.gru(x_emb, delta_t, delta_s, h)
y_linear = self.fc(out) # use own weights!
return y_linear, h
class RNN_cls_stgn(nn.Module):
''' STGN based RNN used for cross entropy loss and one linear output layer '''
def __init__(self, input_size, hidden_size, gru_factory):
super(RNN_cls_stgn, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.encoder = nn.Embedding(input_size, hidden_size)
self.gru = gru_factory.create(hidden_size)
self.fc = nn.Linear(hidden_size, input_size) # create outputs in lenght of locations
def forward(self, x, delta_t, delta_s, h):
seq_len, user_len = x.size()
x_emb = self.encoder(x)
out, h = self.gru(x_emb, delta_t, delta_s, h)
y_linear = self.fc(out)
return y_linear, h
class RNN_cls_st(nn.Module):
''' GRU based rnn. applies weighted average using spatial and temporal data '''
def __init__(self, input_size, hidden_size, f_t, f_s, gru_factory):
super(RNN_cls_st, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.f_t = f_t # function for computing temporal weight
self.f_s = f_s # function for computing spatial weight
self.encoder = nn.Embedding(input_size, hidden_size)
self.gru = gru_factory.create(hidden_size)
self.fc = nn.Linear(hidden_size, input_size) # create outputs in lenght of locations
def forward(self, x, t, s, y_t, y_s, h, active_user):
seq_len, user_len = x.size()
x_emb = self.encoder(x)
out, h = self.gru(x_emb, h)
# comopute weights per
out_w = torch.zeros(seq_len, user_len, self.hidden_size, device=x.device)
for i in range(seq_len):
sum_w = torch.zeros(user_len, 1, device=x.device)
for j in range(i+1):
dist_t = t[i] - t[j]
dist_s = torch.norm(s[i] - s[j], dim=-1)
a_j = self.f_t(dist_t, user_len) # (torch.cos(cummulative_t[j]*2*np.pi / 86400) + 1) / 2 #
b_j = self.f_s(dist_s, user_len)
a_j = a_j.unsqueeze(1)
b_j = b_j.unsqueeze(1)
w_j = a_j*b_j + 1e-10 # small epsilon to have no 0 division
sum_w += w_j
out_w[i] += w_j*out[j] # could be factored out into a matrix!
# normliaze according to weights
out_w[i] /= sum_w
y_linear = self.fc(out_w)
return y_linear, h
class RNN_cls_st_user(nn.Module):
''' GRU based rnn. applies weighted average using spatial and temporal data WITH user embeddings'''
def __init__(self, input_size, user_count, hidden_size, f_t, f_s, gru_factory):
super(RNN_cls_st_user, self).__init__()
self.input_size = input_size
self.user_count = user_count
self.hidden_size = hidden_size
self.f_t = f_t # function for computing temporal weight
self.f_s = f_s # function for computing spatial weight
self.encoder = nn.Embedding(input_size, hidden_size)
self.user_encoder = nn.Embedding(user_count, hidden_size)
self.gru = gru_factory.create(hidden_size)
self.fc = nn.Linear(2*hidden_size, input_size) # create outputs in lenght of locations
def forward(self, x, t, s, y_t, y_s, h, active_user):
seq_len, user_len = x.size()
x_emb = self.encoder(x)
out, h = self.gru(x_emb, h)
# comopute weights per
out_w = torch.zeros(seq_len, user_len, self.hidden_size, device=x.device)
for i in range(seq_len):
sum_w = torch.zeros(user_len, 1, device=x.device)
for j in range(i+1):
dist_t = t[i] - t[j]
dist_s = torch.norm(s[i] - s[j], dim=-1)
a_j = self.f_t(dist_t, user_len) # (torch.cos(cummulative_t[j]*2*np.pi / 86400) + 1) / 2 #
b_j = self.f_s(dist_s, user_len)
a_j = a_j.unsqueeze(1)
b_j = b_j.unsqueeze(1)
w_j = a_j*b_j + 1e-10 # small epsilon to have no 0 division
sum_w += w_j
out_w[i] += w_j*out[j] # could be factored out into a matrix!
# normliaze according to weights
out_w[i] /= sum_w
# add user embedding:
p_u = self.user_encoder(active_user)
p_u = p_u.view(user_len, self.hidden_size)
out_pu = torch.zeros(seq_len, user_len, 2*self.hidden_size, device=x.device)
for i in range(seq_len):
out_pu[i] = torch.cat([out_w[i], p_u], dim=1)
y_linear = self.fc(out_pu)
return y_linear, h
|
[
"benjamin.fankhauser@bfh.ch"
] |
benjamin.fankhauser@bfh.ch
|
6a2266d3fe5ce2ebea74f20a616326857a830adc
|
0b189ca8e2bda62a816c4f519eec457dfd005830
|
/schema.py
|
275c205c7cad6869b55cbd22e0155425b7cdadd0
|
[] |
no_license
|
katheich/graphene-django-basic-tutorial
|
d1ffaf96fb6a6f24766374281677108d692471e6
|
1bb15a392f0abf61ec273a757707e26637fe69b2
|
refs/heads/master
| 2023-04-08T15:27:12.910761
| 2020-02-08T11:16:50
| 2020-02-08T11:16:50
| 238,943,745
| 0
| 0
| null | 2023-03-15T18:02:44
| 2020-02-07T14:27:58
|
Python
|
UTF-8
|
Python
| false
| false
| 255
|
py
|
import graphene
import ingredients.schema
class Query(ingredients.schema.Query, graphene.ObjectType):
# This class will inherit from multiple Queries
# as we begin to add more apps to our project
pass
schema = graphene.Schema(query=Query)
|
[
"kathrin801@gmail.com"
] |
kathrin801@gmail.com
|
f23b24f93a92453fdd1acc05b450f0c269959bd9
|
78c4a0b029ef1af4ac9ef90305eef85ef866ad35
|
/networking/dbus/mpris.py
|
c6cdf510fbed7188d58e45f5eae08f22f5144938
|
[
"MIT"
] |
permissive
|
qeedquan/misc_utilities
|
7727e33e01a9f45275e3efdd165c90053d8ba10a
|
e8319e6572dd92efceebb5a2d52a00cb993492b2
|
refs/heads/master
| 2023-08-08T13:24:18.213741
| 2023-08-07T00:26:03
| 2023-08-07T00:26:03
| 46,625,839
| 10
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,786
|
py
|
#!/usr/bin/env python
"""
https://specifications.freedesktop.org/mpris-spec/2.2/
https://amish.naidu.dev/blog/dbus/
Mainstream media players using this:
audacious
vlc
parole media player
"""
import dbus
import re
player_path = "org.mpris.MediaPlayer2.Player"
bus = dbus.SessionBus()
def test_control(player):
# go to next song
player.Next(dbus_interface=player_path)
# go to previous song
player.Previous(dbus_interface=player_path)
# pause
player.Pause(dbus_interface=player_path)
# play/pause
player.PlayPause(dbus_interface=player_path)
# we can simplify the above by creating an interface
interface = dbus.Interface(player, dbus_interface=player_path)
# all method above applies
interface.Next()
def get_properties(service):
print(player.Get(player_path, 'Volume', dbus_interface='org.freedesktop.DBus.Properties'))
# can create an interface object to make it simpler
property_interface = dbus.Interface(player, dbus_interface='org.freedesktop.DBus.Properties')
volume = property_interface.Get('org.mpris.MediaPlayer2.Player', 'Volume')
print(volume)
for property, value in property_interface.GetAll('org.mpris.MediaPlayer2.Player').items():
print(property, ':', value)
metadata = player.Get('org.mpris.MediaPlayer2.Player', 'Metadata', dbus_interface='org.freedesktop.DBus.Properties')
for attr, value in metadata.items():
print(attr, '\t', value)
for service in bus.list_names():
if re.match('org.mpris.MediaPlayer2.', service):
mpris_path = "/org/mpris/MediaPlayer2"
# given an object, we can call an interface defined on the object
player = bus.get_object(service, mpris_path)
test_control(player)
get_properties(player)
|
[
"qeed.quan@gmail.com"
] |
qeed.quan@gmail.com
|
aeb89ffab98427974b48d58dd4055f8cab16be78
|
52b40e35d86bf947a180bd960510771e01d4b36d
|
/packages/Python/lldbsuite/test/functionalities/data-formatter/swift-typealias/TestSwiftTypeAliasFormatters.py
|
22ab10c43e4d1778582fc03ae82e99b304fd2a9b
|
[
"NCSA"
] |
permissive
|
k8stone/swift-lldb
|
99a6253959d3610a04c3009e2840860581fc7a93
|
952b49258a3ab0baa66ab02cf176fee0f5dc22ab
|
refs/heads/master
| 2021-01-17T07:00:15.564729
| 2015-12-11T21:01:05
| 2015-12-11T21:01:05
| 47,856,292
| 1
| 0
| null | 2015-12-11T23:36:05
| 2015-12-11T23:36:05
| null |
UTF-8
|
Python
| false
| false
| 3,756
|
py
|
# TestSwiftTypeAliasFormatters.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test that Swift typealiases get formatted properly
"""
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.lldbutil as lldbutil
import os
class TestSwiftTypeAliasFormatters(TestBase):
mydir = TestBase.compute_mydir(__file__)
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
@dsym_test
@swiftTest
def test_with_dsym(self):
"""Test that Swift typealiases get formatted properly"""
self.buildDsym()
self.do_test()
@dwarf_test
@swiftTest
def test_with_dwarf(self):
"""Test that Swift typealiases get formatted properly"""
self.buildDwarf()
self.do_test()
def setUp(self):
TestBase.setUp(self)
self.main_source = "main.swift"
self.main_source_spec = lldb.SBFileSpec (self.main_source)
def do_test(self):
"""Test that Swift typealiases get formatted properly"""
exe_name = "a.out"
exe = os.path.join(os.getcwd(), exe_name)
# Create the target
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Set the breakpoints
breakpoint = target.BreakpointCreateBySourceRegex('break here', self.main_source_spec)
self.assertTrue(breakpoint.GetNumLocations() > 0, VALID_BREAKPOINT)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(None, None, os.getcwd())
self.assertTrue(process, PROCESS_IS_VALID)
# Frame #0 should be at our breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint (process, breakpoint)
self.assertTrue(len(threads) == 1)
self.thread = threads[0]
self.frame = self.thread.frames[0]
self.assertTrue(self.frame, "Frame 0 is valid.")
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd("settings set target.max-children-count 256", check=False)
self.addTearDownHook(cleanup)
self.expect("frame variable f", substrs=['.Foo) f = 12'])
self.expect("frame variable b", substrs=['.Bar) b = 24'])
self.runCmd('type summary add a.Foo -v -s "hello"')
self.expect("frame variable f", substrs=['.Foo) f = hello'])
self.expect("frame variable b", substrs=['.Bar) b = hello'])
self.runCmd('type summary add a.Bar -v -s "hi"')
self.expect("frame variable f", substrs=['.Foo) f = hello'])
self.expect("frame variable b", substrs=['.Bar) b = hi'])
self.runCmd("type summary delete a.Foo")
self.expect("frame variable f", substrs=['.Foo) f = 12'])
self.expect("frame variable b", substrs=['.Bar) b = hi'])
self.runCmd("type summary delete a.Bar")
self.runCmd("type summary add -C no -v a.Foo -s hello")
self.expect("frame variable f", substrs=['.Foo) f = hello'])
self.expect("frame variable b", substrs=['.Bar) b = 24'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
[
"tfiala@apple.com"
] |
tfiala@apple.com
|
3926e85a4e520c8c3d90ab7486085a7e4262c471
|
91efd185e9ac6a8cf0c717471531ed6acb758191
|
/tests.py
|
74d1042aa46fb79fa834c277943a97c980062c38
|
[] |
no_license
|
aromaniello/globant_python_exercises
|
433605e26e23bf3845d35c4beda2aa27374e7db3
|
dc4b22feada6eb931f8083ea3c0ac41231ca23a4
|
refs/heads/master
| 2020-09-11T06:17:18.536639
| 2019-11-21T13:11:03
| 2019-11-21T13:11:03
| 221,968,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,506
|
py
|
# Test addition
m1 = Matrix([[1,2,3],[1,2,3]])
m2 = Matrix([[3,2,1],[3,2,1]])
result = m1.add(m2)
print("\nAdding " + str(m1.matrix) + " and " + str(m2.matrix) + ", result: " + str(result.matrix))
if result.matrix == [[4,4,4],[4,4,4]]:
print("Addition test successful")
else:
print("Addition test failed")
# Test scalar product
m1 = Matrix([[1,2,3],[4,5,6]])
scalar = 2
result = m1.scalar_product(scalar)
print("\nMultiplying " + str(m1.matrix) + " by scalar " + str(scalar) + ", result: " + str(result.matrix))
if result.matrix == [[2,4,6],[8,10,12]]:
print("Scalar product test successful")
else:
print("Scalar product test failed")
# Test matrix product
m1 = Matrix([[1,2,3],[4,5,6]])
m2 = Matrix([[7,8],[9,10],[11,12]])
result = m1.matrix_product(m2)
print("\nMultiplying " + str(m1.matrix) + " by matrix " + str(m2.matrix) + ", result: " + str(result.matrix))
if result.matrix == [[58,64],[139,154]]:
print("Matrix product test successful")
else:
print("Matrix product test failed")
# Test transpose
m1 = Matrix([[1,2],[3,4],[5,6]])
result = m1.transpose()
print("\nTransposing " + str(m1.matrix) + ", result: " + str(result.matrix))
if result.matrix == [[1,3,5],[2,4,6]]:
print("Transpose test successful")
else:
print("Transpose test failed")
# Test determinant 2x2
m1 = Matrix([[3,8],[4,6]])
result = m1.determinant()
print("\nDeterminant of " + str(m1.matrix) + " is " + str(result))
if result == -14:
print("Determinant 2x2 test successful")
else:
print("Determinant 2x2 test failed")
# Test determinant 3x3
m1 = Matrix([[6,1,1],[4,-2,5],[2,8,7]])
result = m1.determinant()
print("\nDeterminant of " + str(m1.matrix) + " is " + str(result))
if result == -306:
print("Determinant 3x3 test successful")
else:
print("Determinant 3x3 test failed")
# Test get_rows
m1 = Matrix([[1,2,3],[4,5,6],[7,8,9],[10,11,12],[13,14,15]])
result = m1.get_rows(2,4)
print("\nGetting rows 2 and 4 of " + str(m1.matrix) + ", result: " + str(result.matrix))
if result.matrix == [[4,5,6],[10,11,12]]:
print("Rows test successful")
else:
print("Rows test failed")
# Test get_columns
m1 = Matrix([[1,2,3,4,5],[6,7,8,9,10],[11,12,13,14,15]])
result = m1.get_columns(2,4)
print("\nGetting columns 2 and 4 of " + str(m1.matrix) + ", result: " + str(result.matrix))
if result.matrix == [[2,4],[7,9],[12,14]]:
print("Columns test successful")
else:
print("Columns test failed")
# str test
m1 = Matrix([[1,2,3],[4,5,6],[7,8,9]])
print("\nTesting __str__ implementation:")
print(str(m1))
|
[
"agustin@romaniello.com.ar"
] |
agustin@romaniello.com.ar
|
ae6b9987d4be539208bc25cd4cc81bbb43a6618a
|
b1ce15633def9819abd03c167a4bdfe603745a4d
|
/Blog_No3(BMI_Prediction)/visualize.py
|
828b33461957509a0dd6b6784edc44ce8133a57e
|
[] |
no_license
|
onkkul/Blog_Programs
|
808d5b4705197641776c42fb3c51a8e840aa490e
|
313d7b321a423269f72f935a5c39b7a953cf0283
|
refs/heads/master
| 2021-10-19T04:34:34.400996
| 2019-02-18T04:18:30
| 2019-02-18T04:18:30
| 110,731,263
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 29 16:40:39 2018
@author: NAIFE
"""
from matplotlib import pyplot as plt
#daset of our model in the form of [height, weight] in inches and pounds respectivey
X = [[62, 100], [67, 105], [67, 115],[72, 120], [72, 135],[76, 145], [76, 150], [58, 91], [58, 115], [63, 107], [63, 135], [68, 125], [68, 158], [73, 144], [73, 182], [76, 156], [76, 197], [58, 119], [58, 138], [63, 141], [63, 163], [68, 164],[68, 190], [73, 189], [73, 219], [76, 205], [76, 238], [58, 143], [58, 186], [63, 169], [63, 220], [68, 197],[68, 256], [73, 227], [73, 295], [76, 246], [76, 320]]
#labels for the above dataset
Y = ['underweight', 'underweight', 'underweight', 'underweight', 'underweight', 'underweight', 'underweight', 'normal','normal', 'normal', 'normal', 'normal', 'normal', 'normal', 'normal', 'normal', 'normal', 'overweight', 'overweight', 'overweight', 'overweight', 'overweight', 'overweight', 'overweight', 'overweight', 'overweight', 'overweight', 'obese', 'obese', 'obese', 'obese', 'obese', 'obese', 'obese', 'obese', 'obese', 'obese']
def visual():
for red in range(0,6):
x = X[red][0]
y = X[red][1]
plt.plot(x, y, 'r+')
for blue in range(7,16):
x = X[blue][0]
y = X[blue][1]
plt.plot(x, y, 'bo')
for green in range(17,26):
x = X[green][0]
y = X[green][1]
plt.plot(x, y, 'gx')
for black in range(27,36):
x = X[black][0]
y = X[black][1]
plt.plot(x,y,'k^')
plt.show()
|
[
"noreply@github.com"
] |
onkkul.noreply@github.com
|
c747f4aa592960b9ec52d42f96aefd7e187160d0
|
e603596e7750177b92eb9f0a70e0cb952177e52a
|
/utils.py
|
3a90e8bd49c940fe0102248dca2eda89515b3423
|
[] |
no_license
|
XiaXuehai/Learning_FasterRCNN
|
30f90ef0e1d2f45fc6fe807e29f8bd4199a82818
|
7c2146cd7b5c548efa1d6cddf6ce2e8695e60fa3
|
refs/heads/master
| 2020-04-09T12:04:09.084777
| 2019-03-06T07:25:01
| 2019-03-06T07:25:01
| 160,334,661
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,739
|
py
|
# coding:utf-8
import numpy as np
def generate_anchor(side_lenth=16, strides=16, ratios=[0.5,1,2], scales=[0.5,1,2]):
py = side_lenth / 2
px = side_lenth / 2
anchor_base = np.zeros((len(ratios)*len(scales), 4), dtype=np.float32)
for i in range(len(ratios)):
for j in range(len(scales)):
h = side_lenth * strides * scales[j] * np.sqrt(ratios[i])
w = side_lenth * strides * scales[j] * np.sqrt(1/ratios[i])
index = i * len(scales) + j
anchor_base[index, 0] = px - 0.5 * w
anchor_base[index, 1] = py - 0.5 * h
anchor_base[index, 2] = px + 0.5 * w
anchor_base[index, 3] = py + 0.5 * h
return anchor_base
def get_anchor(anchor_base, stride, h, w):
grid_x = np.arange(w) * stride
grid_y = np.arange(h) * stride
x, y = np.meshgrid(grid_x, grid_y)
shift = np.stack((x.ravel(), y.ravel(), x.ravel(), y.ravel()), axis=1)
# coordinate
co = np.repeat(shift, len(anchor_base), axis=0)
# anchors
an = np.tile(anchor_base, [len(shift), 1])
anchors = co + an
anchors = anchors.astype(np.float32)
return anchors
def transform_locs(anchors, rpn_locs):
w_a = anchors[:, 2] - anchors[:, 0]
h_a = anchors[:, 3] - anchors[:, 1]
x_a = anchors[:, 0] + 0.5 * w_a
y_a = anchors[:, 1] + 0.5 * h_a
tx = rpn_locs[:, 0]
ty = rpn_locs[:, 1]
tw = rpn_locs[:, 2]
th = rpn_locs[:, 3]
dx = tx * w_a + x_a
dy = ty * h_a + y_a
dw = np.exp(tw) * w_a
dh = np.exp(th) * h_a
dst = np.zeros(rpn_locs.shape, dtype=rpn_locs.dtype)
dst[:, 0] = dx - 0.5 * dw
dst[:, 1] = dy - 0.5 * dh
dst[:, 2] = dx + 0.5 * dw
dst[:, 3] = dy + 0.5 * dh
return dst
def nms(rois, nms_thresh):
x1 = rois[:, 0]
y1 = rois[:, 1]
x2 = rois[:, 2]
y2 = rois[:, 3]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
n = len(rois)
order = np.arange(n)
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0., xx2 - xx1 + 1)
h = np.maximum(0., yy2 - yy1 + 1)
in_area = w * h
iou = in_area / (area[i] + area[order[1:]] - in_area)
# keep the iou less than thresh
# update the order
idx = np.where(iou <= nms_thresh)[0]
order = order[idx+1]
return keep
def iou(abox, bbox):
# top-left and bottom-right
# broadcast
tl = np.maximum(abox[:, None, :2], bbox[:, :2])
br = np.minimum(abox[:, None, 2:], bbox[:, 2:])
wh = br - tl
wh[wh<0] = 0
inter = wh[:, :, 0] * wh[:, :, 1]
a_area = np.prod(abox[:, 2:] - abox[:, :2], axis=1)
b_area = np.prod(bbox[:, 2:] - bbox[:, :2], axis=1)
# broadcast
return inter / (a_area[:, None] + b_area - inter)
def bbox2loc(src, dst):
src_w = src[:, 2] - src[:, 0]
src_h = src[:, 3] - src[:, 1]
src_x = src[:, 0] + 0.5 * src_w
src_y = src[:, 1] + 0.5 * src_h
dst_w = dst[:, 2] - dst[:, 0]
dst_h = dst[:, 3] - dst[:, 1]
dst_x = dst[:, 0] + 0.5 * dst_w
dst_y = dst[:, 1] + 0.5 * dst_h
eps = np.finfo(src_h.dtype).eps
src_h = np.maximum(src_h, eps)
src_w = np.maximum(src_w, eps)
tx = (dst_x - src_x) / src_w
ty = (dst_y - src_y) / src_h
tw = np.log(dst_w / src_w)
th = np.log(dst_h / src_h)
loc = np.vstack((tx, ty, tw, th)).transpose()
return loc
def loc2bbox(src, loc):
if src.shape[0] == 0:
return np.zeros((0, 4), dtype=loc.dtype)
src_w = src[:, 2] - src[:, 0]
src_h = src[:, 3] - src[:, 1]
src_x = src[:, 0] + 0.5 * src_w
src_y = src[:, 1] + 0.5 * src_h
tx = loc[:, 0]
ty = loc[:, 1]
tw = loc[:, 2]
th = loc[:, 3]
dx = tx * src_w + src_x
dy = ty * src_h + src_y
dw = np.exp(tw) * src_w
dh = np.exp(th) * src_h
dst = np.zeros(loc.shape, dtype=loc.dtype)
dst[:, 0] = dx - 0.5 * dw
dst[:, 1] = dy - 0.5 * dh
dst[:, 2] = dx + 0.5 * dw
dst[:, 3] = dy + 0.5 * dh
return dst
class anchor_target(object):
'''get the ground truth for rpn loss'''
def __init__(self, n_sample=256, iou_pos=0.7, iou_neg = 0.3, pos_ratio=0.5):
self.n_sample = n_sample
self.iou_pos = iou_pos
self.iou_neg = iou_neg
self.pos_ratio = pos_ratio
def __call__(self, boxes, anchors, img_size):
boxes = boxes.numpy()
n_anchor = len(anchors)
h, w = img_size
index_inside = np.where(
(anchors[:, 0] >= 0) &
(anchors[:, 1] >= 0) &
(anchors[:, 2] <= w) &
(anchors[:, 3] <= h)
)[0]
anchors = anchors[index_inside]
argmax_ious, label = self.create_label(anchors, boxes)
loc = bbox2loc(anchors, boxes[argmax_ious])
gt_rpn_scores = self._unmap(label, n_anchor, index_inside, fill=-1)
gt_rpn_loc = self._unmap(loc, n_anchor, index_inside, fill=0)
return gt_rpn_loc, gt_rpn_scores
def create_label(self, anchor, boxes):
label = np.empty((len(anchor),), dtype=np.int)
label.fill(-1)
ious = iou(anchor, boxes)
argmax_ious = ious.argmax(axis=1)
max_ious = ious[np.arange(len(anchor)), argmax_ious]
gt_argmax_ious = ious.argmax(axis=0)
gt_max_ious = ious[gt_argmax_ious, np.arange(ious.shape[1])]
gt_argmax_ious = np.where(ious==gt_max_ious)[0] # more than before
label[max_ious < self.iou_neg] = 0
label[max_ious >= self.iou_pos] = 1
label[gt_argmax_ious] = 1
n_pos = int(self.n_sample * self.pos_ratio)
pos_index = np.where(label==1)[0]
if len(pos_index) > n_pos:
disable_index = np.random.choice(pos_index, size=len(pos_index)-n_pos, replace=False)
label[disable_index] = -1
n_neg = self.n_sample - np.sum(label==1)
neg_index = np.where(label==0)[0]
if len(neg_index)>n_neg:
disable_index = np.random.choice(neg_index, size=len(neg_index) - n_neg, replace=False)
label[disable_index] = -1
return argmax_ious, label
def _unmap(self, data, count, index, fill=0):
if len(data.shape) == 1:
ret = np.empty((count,), dtype=data.dtype)
ret.fill(fill)
ret[index] = data
else:
ret = np.empty((count,) + data.shape[1:], dtype=data.dtype)
ret.fill(fill)
ret[index, :] = data
return ret
class proposal_target(object):
def __init__(self):
self.n_sample = 128
self.pos_ratio = 0.25
self.iou_pos = 0.5
self.iou_neg_h = 0.5
self.iou_neg_l = 0.1
def __call__(self, rois, boxes, labels,
loc_mean=(0., 0., 0., 0.), loc_std=(0.1, 0.1, 0.2, 0.2)):
n_box, _ = boxes.shape
boxes = boxes.numpy()
labels = labels.numpy()
# to guarantee the ground-truth in samples-rois
rois = np.concatenate((rois, boxes), axis=0)
n_pos_roi = int(self.n_sample * self.pos_ratio)
ious = iou(rois, boxes)
max_iou = ious.max(axis=1)
argmax_iou = ious.argmax(axis=1)
# 0 is background
iou_label = labels[argmax_iou] + 1
pos_index = np.where(max_iou>=self.iou_pos)[0]
n_pos_roi = min(n_pos_roi, len(pos_index))
if len(pos_index) > n_pos_roi:
pos_index = np.random.choice(pos_index, size=n_pos_roi, replace=False)
neg_index = np.where((max_iou<self.iou_neg_h) & (max_iou>=self.iou_neg_l))[0]
n_neg_roi = self.n_sample - n_pos_roi
n_neg_roi = min(n_neg_roi, len(neg_index))
if len(neg_index) > n_neg_roi:
neg_index = np.random.choice(neg_index, size=n_neg_roi, replace=False)
keep = np.append(pos_index, neg_index)
gt_roi_label = iou_label[keep]
gt_roi_label[n_pos_roi:] = 0
sample_roi = rois[keep]
gt_roi_loc = bbox2loc(sample_roi, boxes[argmax_iou[keep]])
gt_roi_loc = (gt_roi_loc - np.array(loc_mean, dtype=np.float32)) / np.array(loc_std, dtype=np.float32)
return sample_roi, gt_roi_loc, gt_roi_label
def init_normal(layer, mean, std):
layer.weight.data.normal_(mean, std)
layer.bias.data.zero_()
if __name__ == '__main__':
a = generate_anchor()
print(a)
ans = get_anchor(a, 16, 37, 50)
print(ans.shape)
rpn_locs = np.random.rand(8, 16650, 4)
transform_locs(ans, rpn_locs, (600, 800))
box = np.random.rand(2,4)
at = anchor_target()
at(box, ans, (600, 800))
|
[
"609632889@qq.com"
] |
609632889@qq.com
|
0a3269003082b9be357f4fa26660d672cf495707
|
3fef5bcfab73fa25d04ab796df419e9a17cccf73
|
/shapely/settings/testing.py
|
11f6d496e15f2856510cf60a517e87869acdea57
|
[
"MIT"
] |
permissive
|
mhotwagner/shapely
|
e0d0dee722a93a24153740d64afff5dda550bdff
|
e23ad452b6afaf9c20898887f206aaf309d0ab12
|
refs/heads/master
| 2021-04-28T13:34:36.418729
| 2018-02-22T03:24:24
| 2018-02-22T03:24:24
| 122,108,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
from .base import *
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'shapely-testing',
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"mhotwagner@gmail.com"
] |
mhotwagner@gmail.com
|
8a48edd00838db3d482490bf620be9f0c0c3bc0a
|
c805a7a8cb0e9a1d625b54683b133d1be018f406
|
/test/ar_viewer/src/ar_parking_ws.py
|
f6fb81bab84d149716ae413fb21add9f37e6f48a
|
[] |
no_license
|
cjw1090/teamD
|
8acb581ca23be81e62e318428e59a4636d8cb6da
|
57a91b50c8eef1b403cc76ba6f14fdc1773ee2e3
|
refs/heads/master
| 2023-09-01T04:01:10.178809
| 2021-09-26T16:25:25
| 2021-09-26T16:25:25
| 397,126,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,013
|
py
|
#!/usr/bin/env python
#-- coding:utf-8 --
import rospy, time, cv2, math
import numpy as np
from xycar_msgs.msg import xycar_motor
from ar_track_alvar_msgs.msg import AlvarMarkers
from tf.transformations import euler_from_quaternion
class AR_PARKING:
arData = {"DX":0.0, "DY":0.0, "DZ":0.0, "AX":0.0, "AY":0.0, "AZ":0.0, "AW":0.0}
motor_pub = rospy.Publisher("/xycar_motor", xycar_motor, queue_size=1)
xycar_msg = xycar_motor()
def __init__(self):
rospy.init_node('ar_parking', anonymous=False)
rospy.Subscriber('ar_pose_marker', AlvarMarkers, self.callback, queue_size=10)
self.r=rospy.Rate(10)
self.id=0
def callback(self, msg):
for i in msg.markers:
self.id=i.id
#print('id', self.id)
self.arData["DX"] = i.pose.pose.position.x+0.1 if i.pose.pose.position.x>0 else i.pose.pose.position.x-0.1
self.arData["DY"] = i.pose.pose.position.z
#self.arData["DZ"] = i.pose.pose.position.z
self.arData["AX"] = i.pose.pose.orientation.x
self.arData["AY"] = i.pose.pose.orientation.y
self.arData["AZ"] = i.pose.pose.orientation.z
self.arData["AW"] = i.pose.pose.orientation.w
def back_drive(self):
yaw=math.degrees(euler_from_quaternion((self.arData["AX"], self.arData["AY"], self.arData["AZ"], self.arData["AW"]))[1])
back=-yaw
cnt=0
while abs(yaw)>5.0:
cnt+=1
#print('first', yaw)
self.xycar_msg.angle=back
#print(self.id, self.xycar_msg.angle)
self.xycar_msg.speed=-30
self.motor_pub.publish(self.xycar_msg)
yaw=math.degrees(euler_from_quaternion((self.arData["AX"], self.arData["AY"], self.arData["AZ"], self.arData["AW"]))[1])
self.r.sleep()
print('finish first')
for _ in range(cnt//1.5):
self.xycar_msg.angle=back
self.xycar_msg.speed=-30
self.motor_pub.publish(self.xycar_msg)
self.r.sleep()
print('finish second')
yaw=math.degrees(euler_from_quaternion((self.arData["AX"], self.arData["AY"], self.arData["AZ"], self.arData["AW"]))[1])
#print('second', yaw)
while abs(yaw)>2.0:
#print('second', yaw)
self.xycar_msg.angle=-back
self.xycar_msg.speed=-30
self.motor_pub.publish(self.xycar_msg)
yaw=math.degrees(euler_from_quaternion((self.arData["AX"], self.arData["AY"], self.arData["AZ"], self.arData["AW"]))[1])
self.r.sleep()
print('finish third')
def ar_parking(self):
(roll, pitch, yaw)=euler_from_quaternion((self.arData["AX"], self.arData["AY"], self.arData["AZ"], self.arData["AW"]))
#print(math.degrees(roll), math.degrees(pitch), math.degrees(yaw))
#roll = math.degrees(roll)
#pitch = math.degrees(pitch)
yaw = math.degrees(pitch)
distance = math.sqrt(pow(self.arData["DX"],2) + pow(self.arData["DY"],2))
#print(self.arData["DX"], self.arData["DY"])
#print(distance)
atan = math.degrees(math.atan2(self.arData["DX"], self.arData["DY"]))
#print('front', 'atan', atan, 'yaw', yaw, 'distance', distance)
speed=30
angle=atan
if self.id==0:
if distance>1.8:
speed=15
elif distance>1.2:
speed=15
elif distance>0.8:
speed=15
elif distance>0.5:
speed=10
else:
speed=0
#디버깅 요망
#print(yaw, atan, distance)
if abs(yaw)>8 and abs(atan)>8:
print('back')
self.back_drive()
else:
angle=0
elif self.id==9:
if distance>1.8:
speed=15
elif distance>1.2:
speed=15
elif distance>0.8:
speed=15
elif distance>0.33:
speed=10
else:
speed=0
#디버깅 요망
#print(yaw, atan, distance)
if abs(yaw)>8 and abs(atan)>8:
print('back')
self.back_drive()
else:
angle=0
def left_drive(ang,cnt):
global xycar_msg, motor_pub
global ar_drive_flag
global angle, speed
global arData
#print("go left--------------------------")
while (arData["DZ"] > 0.5):
if(arData["DX"] < 0):
xycar_msg.angle = -20
xycar_msg.speed = 10
motor_pub.publish(xycar_msg)
else:
xycar_msg.angle = 20
xycar_msg.speed = 10
motor_pub.publish(xycar_msg)
for i in range(30):
xycar_msg.angle = 0
xycar_msg.speed = 0
motor_pub.publish(xycar_msg)
print("stop")
while (arData["DZ"] < 2):
if(arData["DX"] < 0):
xycar_msg.angle = -20
xycar_msg.speed = -30
motor_pub.publish(xycar_msg)
print("back1")
else:
xycar_msg.angle = 20
xycar_msg.speed = -30
motor_pub.publish(xycar_msg)
print("back2")
"""
for cnt in range(cnt):
xycar_msg.angle = -ang
xycar_msg.speed = -20
motor_pub.publish(xycar_msg)
time.sleep(0.1)
if not ar_drive_flag:
xycar_msg.angle = 0
xycar_msg.speed = 0
motor_pub.publish(xycar_msg)
break
"""
self.xycar_msg.angle=angle
self.xycar_msg.speed=speed
self.motor_pub.publish(self.xycar_msg)
self.r.sleep()
if __name__ == '__main__':
parking = AR_PARKING()
while not rospy.is_shutdown():
parking.ar_parking()
|
[
"jaewon5359@gmail.com"
] |
jaewon5359@gmail.com
|
d4f260b817c0f2a7aede086832374c46bc4a7e24
|
124033d85589734e7f96d01b06e6f5b46b09171f
|
/smartShop/smartShop/urls.py
|
a368378fe3c2bbb61b45200c9d6af93a34fe4c0a
|
[] |
no_license
|
jimzers/smart-shop
|
e4a6ed66880b9a7b20b6bb1c73c2522f68bdece8
|
b6d4bf8356f7d84d1b024ab9944f1770a5238e69
|
refs/heads/master
| 2020-03-21T16:28:53.874191
| 2018-06-26T17:47:44
| 2018-06-26T17:47:44
| 138,772,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 961
|
py
|
"""smartShop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('home/', include('home.urls')),
path('clothing/', include('clothing.urls')),
path('user/', include('user.urls')),
path('search/', include('search.urls')),
]
|
[
"noreply@github.com"
] |
jimzers.noreply@github.com
|
4803551477ac4de75cd92437530060e3939647eb
|
5b78602dd41bb226bc97e0acc60d2ec7dff10da4
|
/Temur_online_magazini/django_project/store/views.py
|
eee7e8aa18136dbae7c2e7d4278b708df4a94789
|
[] |
no_license
|
developeryuldashev/Rest_Api_boshlang-ich
|
fac742823442d052930526b60dc613853e9a9773
|
a2f1b5cc104d53504a694d26ba8f492f0743e67e
|
refs/heads/main
| 2023-08-18T23:52:40.441378
| 2021-09-13T03:32:29
| 2021-09-13T03:32:29
| 393,358,090
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,150
|
py
|
from django.shortcuts import render
from rest_framework import viewsets, status
from rest_framework.response import Response
from .models import *
import json
from .serializer import *
class CustomerView(viewsets.ModelViewSet):
queryset = Customer.objects.all()
serializer_class = CustomerSerializer
class CategoriesView(viewsets.ModelViewSet):
queryset = Categories.objects.all()
serializer_class = CategoriesSerializer
class ProductsView(viewsets.ModelViewSet):
queryset = Products.objects.all()
serializer_class = ProductsSerializer
class ProductByCategoryView(viewsets.ModelViewSet):
queryset = Products.objects.all()
serializer_class = ProductsSerializer
def retrieve(self, request, *args, **kwargs):
category_id=kwargs['pk']
products=self.queryset.filter(category_id=category_id)
serializers = self.get_serializer(products, many=True)
return Response(serializers.data)
class OrdersView(viewsets.ModelViewSet):
queryset = Orders.objects.all()
serializer_class = OrdersSerializer
class OrderDetailsView(viewsets.ModelViewSet):
queryset = Order_details.objects.all()
serializer_class = OrderDetailsSerializer
class OrderDetailsByOrdersView(viewsets.ModelViewSet):
queryset = Order_details.objects.all()
serializer_class = OrderDetailsSerializer
def retrieve(self, request, *args, **kwargs):
order_id=kwargs['pk']
order_details=self.queryset.filter(order_id=order_id)
serializers=self.get_serializer(order_details, many=True)
return Response(serializers.data)
class OrderDetailsActionsView(viewsets.ModelViewSet):
queryset = Order_details.objects.all()
serializer_class = OrderDetailsSerializer
def update(self, request, *args, **kwargs): #put zoprosda keladi, shuning uchun update ga berib ketamiz
od_id=kwargs['pk']
#kelgan datani o'qib olishimiz kk
data=json.loads(request.body)
order_detail=self.queryset.get(id=od_id)
order_detail.action(data)
# print(request.body)
return Response({'quantity':order_detail.quantity})
# Create your views here.
|
[
"81365808+developeryuldashev@users.noreply.github.com"
] |
81365808+developeryuldashev@users.noreply.github.com
|
c6b20abb49f52dddb7535d92c669b6e97f8a32f6
|
3344c2cd2cf23ba9bf10a3fc2b753d4d1b0407f4
|
/bisection recur.py
|
e1d9aab0911196f62cad0372d171d39bce8297e5
|
[] |
no_license
|
Scunnered/Algorithms
|
d9072f3a21ad60303ab3ba389dd911aad6e30ed6
|
6e42cce3754676527eebfe8571df0ebc123ac12c
|
refs/heads/master
| 2022-11-15T14:08:09.041412
| 2020-07-08T12:36:55
| 2020-07-08T12:36:55
| 276,674,466
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 619
|
py
|
def bisection_recur(n, arr, start, stop):
if start > stop:
return f"{n} not found in list"
else:
mid = (start + stop)//2
if n == arr[mid]:
return f"{n} found at index: {mid}"
elif n > arr[mid]:
return bisection_recur(n, arr, mid+1, stop)
else:
return bisection_recur(n, arr, start, mid-1)
def create_list(max_val):
arr = []
for num in range(1, max_val+1):
arr.append(num)
return arr
l = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
#idx 0 1 2 3 4 5 6 7 8 9
for num in range(16):
print(bisection_recur(num, l, 0, len(l)-1))
|
[
"44169316+Scunnered@users.noreply.github.com"
] |
44169316+Scunnered@users.noreply.github.com
|
3502ebb6f5c529c64734685213e4edf5dc9c5122
|
7ca5d7ba135854ad49add173412da1550b7b9383
|
/utils/asyncRequest.py
|
d78a8e2e5f3e18ae21541df725c54e3be2456321
|
[] |
no_license
|
13699808294/CityWeatherService
|
74eb09ce2dae7567906b26183066896bd52c4308
|
999c5ebcc59981b082bc2757ed432864a24a3f6b
|
refs/heads/master
| 2021-01-01T23:50:24.474273
| 2020-02-10T00:45:24
| 2020-02-10T00:45:24
| 239,398,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 895
|
py
|
import json
from json import JSONDecodeError
from tornado import gen, httpclient
from tornado.httputil import url_concat
from utils.my_json import json_dumps
@gen.coroutine
def asyncTornadoRequest(
url:str,
params:dict=None,
body:dict=None,
method:str='GET',
headers:dict=None,
allow_nonstandard_methods:bool=False
):
if body:
body = json_dumps(body)
if params:
url = url_concat(url, params)
request = httpclient.HTTPRequest(
url=url,
headers=headers,
method=method,
body=body,
allow_nonstandard_methods=allow_nonstandard_methods,
)
http = httpclient.AsyncHTTPClient()
try:
resp = yield http.fetch(request)
result = json.loads(resp.body.decode())
except Exception as e:
result = { "status": 1, "errmsg": e }
return result
|
[
"pcl5662828@yeah.net"
] |
pcl5662828@yeah.net
|
c6bd163f0f2a94c7198fc64414ae5763cd95662a
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/shortes_20200507175803.py
|
a9ff073099d18faa3c318c80098dbeee4aa276df
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
def find_short(s):
length = []
for word in s.split():
length.append(len(words))
print(sorted(length)[0]))
find_short("geek for geeks")
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
170abad341cd1da0093a8830aada909a78ca01fd
|
6713b3538360f5b96842140e723c17bf12327420
|
/python/IO/StringIO和BytesIO/StringIO.py
|
40e183eae167c2031400bfe887c698e2c8435711
|
[] |
no_license
|
1065672881/Demo
|
9fec96be252881806e0d01e410635df67093cf07
|
44907288e04916dbfaf9e7f0a45aeaa344294dfa
|
refs/heads/master
| 2021-06-20T03:50:07.310099
| 2021-04-25T04:28:59
| 2021-04-25T04:28:59
| 207,975,453
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
# 很多时候,数据读写不一定是文件,也可以在内存中读写。
#
# StringIO顾名思义就是在内存中读写str。
#
# 要把str写入StringIO,我们需要先创建一个StringIO,然后,像文件一样写入即可
from io import StringIO
f = StringIO()
print(f.write('hello'))
# getvalue()方法用于获得写入后的字符
print(f.getvalue())
# 要读取StringIO,可以用一个str初始化StringIO,然后,像读文件一样读取
f1 = StringIO('Hello!\nhi!\nGoodBye!')
while True:
s = f1.readline()
if s == '':
break
print(s.strip())
|
[
"1539211463@qq.com"
] |
1539211463@qq.com
|
99c578f487e2bd0037e536cfefcca6c42c0b7b76
|
e46aceea32ffde8d4ce936539500639a57de50ae
|
/backend/file_service/app/api/models/file_request.py
|
ef734b1a6170dd6beb9a344e729f63464c311faf
|
[] |
no_license
|
person0709/mongo-file-storage
|
653e109875888eda5ae8b5f4231247c2b88d922b
|
7c9775ba654b0053f5cd0b51d8fe484d8fd27dcc
|
refs/heads/master
| 2023-08-08T01:18:41.039737
| 2021-04-15T20:21:41
| 2021-04-15T20:21:41
| 351,797,595
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,294
|
py
|
from typing import Optional
from fastapi import UploadFile, File
from pydantic import BaseModel, conint
class BaseFileRequest(BaseModel):
pass
class ReadFileInfoRequest(BaseFileRequest):
user_id: Optional[str]
filename: str
class UploadFileRequest(BaseFileRequest):
user_id: Optional[str]
file: UploadFile = File(...)
class DownloadFileRequest(BaseFileRequest):
user_id: Optional[str]
filename: str
class DeleteFileRequest(BaseFileRequest):
user_id: Optional[str]
filename: str
class ListFileInfoRequest(BaseFileRequest):
user_id: Optional[str]
offset: conint(ge=0, le=100) = 0
limit: conint(ge=0, le=100) = 100
sort_by: Optional[str] = "uploadDate"
desc: Optional[bool] = True
def convert_sort_by(self):
"""
Convert sort_by values to the field names that are used in DB
"""
if self.sort_by == "uploaded_at":
self.sort_by = "uploadDate"
elif self.sort_by == "size":
self.sort_by = "length"
class SearchFileInfoRequest(BaseFileRequest):
user_id: Optional[str]
pattern: str
limit: conint(le=30) = 10
class ReadFileCountRequest(BaseFileRequest):
user_id: Optional[str]
class ReadUsageRequest(BaseFileRequest):
user_id: Optional[str]
|
[
"person0709@gmail.com"
] |
person0709@gmail.com
|
0fc588a170ae240fba84f25f932460125c67de1b
|
48523e13b15451542aa51ece14d2b3c6c4bb1b99
|
/Django_apps/teacher/migrations/0009_auto_20181123_0628.py
|
c93e5ce5b6d06afd8659042de9aa52c532c32d35
|
[] |
no_license
|
ss-8y0/SchoolSystem
|
c7c64695b5ed7979e2d530941f4fbd178d95ca64
|
63298fa495a98c45175904da2820bcfce4a582a4
|
refs/heads/master
| 2023-03-15T11:07:38.665758
| 2019-05-27T02:52:56
| 2019-05-27T02:52:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
# Generated by Django 2.1.1 on 2018-11-23 06:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('teacher', '0008_auto_20181123_0627'),
]
operations = [
migrations.AlterField(
model_name='classtoteacher',
name='stu_class',
field=models.ForeignKey(limit_choices_to={'school_id__in': ['115']}, on_delete=django.db.models.deletion.CASCADE, related_name='handle_class', to='school.StuClass', verbose_name='班级'),
),
]
|
[
"414804000@qq.com"
] |
414804000@qq.com
|
df1a5bbd20a618ff7e7927d732d4debac8e84041
|
9c29ee2d4670f9c208402dac9a4e5a890c968eec
|
/amona_python/user.py
|
528886e89d52b6d001981fb343c05ff97cbca354
|
[] |
no_license
|
thelazysolutions/amona-beaucoup
|
d235378af8e07718e4feebd1208006e731ed8af6
|
455496b965979db935218b1067e6661ef9e60b90
|
refs/heads/main
| 2023-03-15T09:08:09.052333
| 2021-03-16T05:57:08
| 2021-03-16T05:57:08
| 348,226,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,702
|
py
|
from flask import Flask, Blueprint, request
from db.database import User, connection, select, delete, insert, update, metadata, and_
import inspect
user = Blueprint('user', __name__, template_folder='templates')
def list_to_json(list):
"""[summary]
Appends the Column headers as Keys
and returns a JSON with the values
Args:
list ([type]): [description]
Returns:
JSON
[type]: [description]
"""
print(inspect.stack()[1][3])
op = {}
for (a, b) in zip((User.c.keys()), list):
op[a] = str(b).replace('user.', '')
return op
@user.route('/test/', methods=["GET", "POST"])
def viewTableAll():
print(inspect.stack()[1][3])
obj = {}
for key in User.c.keys():
obj[key] = '1'
return obj
@user.route('/login', methods=["GET", "POST"])
def login():
print(inspect.stack()[1][3])
req_data = request.get_json()
print(req_data)
json_data = {}
for req in req_data:
if (req in User.c.keys()):
json_data[req] = req_data[req]
print(json_data)
if('email' in json_data and 'password' in json_data):
# check for User_type
query = select([User]).where(and_(User.columns.email ==
json_data['email'], User.columns.password == json_data['password']))
ResultProxy = connection.execute(query)
ResultSet = ResultProxy.fetchone()
if(not ResultSet):
print('Unable to find the user for Login')
return {'error': 'Unable to find the user for Login'}
else:
print(list_to_json(ResultSet))
return {'success': ' User logs in', 'user_id': list_to_json(ResultSet)}
print('Cannot login')
return {'error': 'Cannot Login'}
@user.route('/addOne', methods=["PUT"])
def addOne():
"""[summary]
TESTED - FOUND OK
Add the Users's Data to an entry
Returns:
users data in a String (Do in JSON)
OR
Empty string Message
[type]: [description]
"""
# read data from the API call
print(inspect.stack()[1][3])
req_data = request.get_json()
print(req_data)
json_data = {}
for req in req_data:
if (req in User.c.keys()):
json_data[req] = req_data[req]
query = (
insert(User).
values(json_data)
)
ResultProxy = connection.execute(query)
if(not ResultProxy):
print("Unable to Add Users")
return {'error': 'Unable to Add the given users'}
print("Add Succesful")
return {'status': "Adding Succesful"}
@user.route('/', methods=["GET", "POST"])
def viewAll():
"""[summary]
TESTED - FOUND OK
View all the Userss Data
Returns:
users data in a String (Do in JSON)
[type]: [description]
"""
print(inspect.stack()[1][3])
query = select([User])
ResultProxy = connection.execute(query)
ResultSet = ResultProxy.fetchall()
res = []
for rs in ResultSet:
print(rs)
res.append(list_to_json(rs))
return dict(enumerate(res))
@user.route('/<id>', methods=["GET", "POST"])
def viewOne(id):
"""[summary]
TESTED - FOUND OK
View the Users's Data with a specific id
Returns:
users data in a String (Do in JSON)
OR
Empty string Message
[type]: [description]
"""
print(inspect.stack()[1][3])
query = select([User]).where(User.columns.id == id)
ResultProxy = connection.execute(query)
ResultSet = ResultProxy.fetchone()
if(not ResultSet):
return {'error': 'Unable to find the given users'}
print(ResultSet)
return list_to_json(ResultSet)
@user.route('/<id>', methods=["DELETE"])
def deleteOne(id):
"""[summary]
TESTED - FOUND OK
Delete the Users's Data with a specific id
Returns:
Success Message
OR
Empty ID Message
[type]: [description]
"""
print(inspect.stack()[1][3])
query = User.delete().where(User.columns.id == id)
ResultProxy = connection.execute(query)
if(not ResultProxy):
print('Unable to find the given user')
return {'error': 'Unable to find the given user'}
print("Delete Succesful for ID: " + str(id))
return {'status': "Delete Succesful for ID: " + str(id)}
@user.route('/<id>', methods=["PUT"])
def updateOne(id):
"""
[summary]
TESTED - FOUND OK
Update the Users's Data with a specific id
Returns:
users data in a String (Do in JSON)
OR
Empty string Message
[type]: [description]
"""
# read data from the API call
print(inspect.stack()[1][3])
req_data = request.get_json()
print(req_data)
query = select([User]).where(User.columns.id == id)
ResultProxy = connection.execute(query)
ResultSet = ResultProxy.fetchone()
if(not ResultSet):
print('Unable to find the given users')
return {'error': 'Unable to Find the given users'}
# Update the URL
json_data = {}
for req in req_data:
if (req in User.c.keys()):
json_data[req] = req_data[req]
query = (
update(User).
where(User.columns.id == id).
values(json_data)
)
ResultProxy = connection.execute(query)
if(not ResultProxy):
print("unable to update users")
return {'error': 'Unable to Update the given user'}
print("Update Succesful")
return {'status': "Update Succesful"}
|
[
"noreply@github.com"
] |
thelazysolutions.noreply@github.com
|
9e9a3a91d5637cb3fd7c408d679d60168748c261
|
e0ef7aaa2def3df2540d5a28c776e952e67b7ba7
|
/Tutorials/DSA/Stacks and Queues/bfs-and-dfs.py
|
b4119641761cd2c8e0ed032b86866f73016189a0
|
[] |
no_license
|
stevenxchung/Python-Repo
|
0c92c3a6ce5da420b114ccf04d1010b9bf6b2296
|
e0a5196ff298381b35cabc548c93ec6aadbd42bd
|
refs/heads/master
| 2023-07-19T01:59:07.122859
| 2023-07-09T13:32:21
| 2023-07-09T13:32:21
| 130,524,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,505
|
py
|
class Node(object):
def __init__(self, data=None, left=None, right=None):
self.data = data
self.left = left
self.right = right
def printTreeBFS(self):
node = self
queue = []
queue.append(node)
print('BFS: ', end='')
while len(queue) > 0:
# Since we are using a queue pop off the first element in the queue and set to node
print(queue[0].data, end=' ')
node = queue.pop(0)
if node.left is not None:
queue.append(node.left)
if node.right is not None:
queue.append(node.right)
print()
# Alternative BFS without a queue
def printTreeBFSNoQueue(self):
head = self
level = [head]
print('BFS: ', end='')
while head and level:
nextLevel = []
for node in level:
print(node.data, end=' ')
if node.left:
nextLevel.append(node.left)
if node.right:
nextLevel.append(node.right)
level = nextLevel
print()
# Using preorder DFS
def printTreePreorder(self):
node = self
stack = []
valueStack = []
print('DFS Iterative Preorder: ', end='')
while True:
if node:
stack.append(node)
valueStack.append(node.data)
print(node.data, end=' ')
node = node.left
elif stack:
# We pick off the top of the stack to reset to previous node
node = stack.pop()
valueStack.pop()
node = node.right
else:
break
print()
# Using inorder DFS
def printTreeInorder(self):
node = self
stack = []
print('DFS Iterative Inorder: ', end='')
while True:
if node:
stack.append(node)
node = node.left
elif stack:
node = stack.pop()
print(node.data, end=' ')
node = node.right
else:
break
print()
node4 = Node(4)
node5 = Node(5)
node6 = Node(6)
node7 = Node(7)
node2 = Node(2, node4, node5)
node3 = Node(3, node6, node7)
head = Node(1, node2, node3)
head.printTreeInorder() # 4, 2, 5, 1, 3, 6, 7
head.printTreePreorder() # 1, 2, 4, 5, 3, 6, 7
head.printTreeBFS() # 1, 2, 3, 4, 5, 6, 7
|
[
"stevenxchung@gmail.com"
] |
stevenxchung@gmail.com
|
0012bf8f5f48a7557827f2b393302e9e42cb6f0e
|
e5a3cfa21f78259dfcf9d991f6f67f0840d9c93c
|
/python/4_3.py
|
2e1c396667bb466d9c470125402ded87a9e1e6a2
|
[] |
no_license
|
Glittering/pythonLearn
|
ac91ed73cb615e3604d1c4bfde3692cf4efef6e3
|
0097b8fd5fc587a69f6c1bad95b08fe42481bf7c
|
refs/heads/master
| 2021-01-13T05:46:24.974340
| 2017-04-17T12:58:59
| 2017-04-17T12:58:59
| 77,099,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 55
|
py
|
l = [95.5, 85, 59]
print l[-1]
print l[-2]
print l[-3]
|
[
"zhaoshichengzhao@sina.com"
] |
zhaoshichengzhao@sina.com
|
eea1fff018b210546b23829922b2d1d7ba4daff2
|
ad5c21d677d4c477418bb1362ad08db11af6a5de
|
/neurodiffeq/neurodiffeq/solvers.py
|
9588a350add657f05973d0c6c8917e5dcb7db859
|
[
"MIT"
] |
permissive
|
jpe17/DeepRANSProject
|
89daef2c4e8da6a2fec85d8e2288d936c041cfb1
|
37e4be94917d6cf9c4684da7bcb19136efd500c2
|
refs/heads/main
| 2023-05-07T21:39:26.300458
| 2021-06-04T16:21:28
| 2021-06-04T16:21:28
| 373,889,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 50,237
|
py
|
import torch
import warnings
import torch.nn as nn
import inspect
from inspect import signature
from abc import ABC, abstractmethod
from itertools import chain
from copy import deepcopy
from torch.optim import Adam
from neurodiffeq.networks import FCNN
from neurodiffeq._version_utils import deprecated_alias
from neurodiffeq.generators import GeneratorSpherical
from neurodiffeq.generators import SamplerGenerator
from neurodiffeq.generators import Generator1D
from neurodiffeq.generators import Generator2D
from neurodiffeq.function_basis import RealSphericalHarmonics
def _requires_closure(optimizer):
return inspect.signature(optimizer.step).parameters.get('closure').default == inspect._empty
class BaseSolver(ABC):
r"""A class for solving ODE/PDE systems.
:param diff_eqs:
The differential equation system to solve, which maps a tuple of coordinates to a tuple of ODE/PDE residuals.
Both the coordinates and ODE/PDE residuals must have shape (-1, 1).
:type diff_eqs: callable
:param conditions:
List of boundary conditions for each target function.
:type conditions: list[`neurodiffeq.conditions.BaseCondition`]
:param nets:
List of neural networks for parameterized solution.
If provided, length must equal that of conditions.
:type nets: list[`torch.nn.Module`], optional
:param train_generator:
A generator for sampling training points.
It must provide a `.get_examples()` method and a `.size` field.
:type train_generator: `neurodiffeq.generators.BaseGenerator`, required
:param valid_generator:
A generator for sampling validation points.
It must provide a `.get_examples()` method and a `.size` field.
:type valid_generator: `neurodiffeq.generators.BaseGenerator`, required
:param analytic_solutions:
**[DEPRECATED]** Pass ``metrics`` instead.
The analytical solutions to be compared with neural net solutions.
It maps a tuple of three coordinates to a tuple of function values.
The output shape should match that of networks.
:type analytic_solutions: callable, optional
:param optimizer:
The optimizer to be used for training.
:type optimizer: `torch.nn.optim.Optimizer`, optional
:param criterion:
A function that maps a PDE residual vector (torch tensor with shape (-1, 1)) to a scalar loss.
:type criterion: callable, optional
:param n_batches_train:
Number of batches to train in every epoch, where batch-size equals ``train_generator.size``.
Defaults to 1.
:type n_batches_train: int, optional
:param n_batches_valid:
Number of batches to validate in every epoch, where batch-size equals ``valid_generator.size``.
Defaults to 4.
:type n_batches_valid: int, optional
:param metrics:
Additional metrics to be logged (besides loss). ``metrics`` should be a dict where
- Keys are metric names (e.g. 'analytic_mse');
- Values are functions (callables) that computes the metric value.
These functions must accept the same input as the differential equation ``diff_eq``.
:type metrics: dict, optional
:param n_input_units:
Number of input units for each neural network. Ignored if ``nets`` is specified.
:type n_input_units: int, required
:param n_output_units:
Number of output units for each neural network. Ignored if ``nets`` is specified.
:type n_output_units: int, required
:param batch_size:
**[DEPRECATED and IGNORED]**
Each batch will use all samples generated.
Please specify ``n_batches_train`` and ``n_batches_valid`` instead.
:type batch_size: int
:param shuffle:
**[DEPRECATED and IGNORED]**
Shuffling should be performed by generators.
:type shuffle: bool
"""
def __init__(self, diff_eqs, conditions,
nets=None, train_generator=None, valid_generator=None, analytic_solutions=None,
optimizer=None, criterion=None, n_batches_train=1, n_batches_valid=4,
metrics=None, n_input_units=None, n_output_units=None,
# deprecated arguments are listed below
shuffle=None, batch_size=None):
# deprecate argument `shuffle`
if shuffle:
warnings.warn(
"param `shuffle` is deprecated and ignored; shuffling should be performed by generators",
FutureWarning,
)
# deprecate argument `batch_size`
if batch_size is not None:
warnings.warn(
"param `batch_size` is deprecated and ignored; specify n_batches_train and n_batches_valid instead",
FutureWarning,
)
self.diff_eqs = diff_eqs
self.conditions = conditions
self.n_funcs = len(conditions)
if nets is None:
self.nets = [
FCNN(n_input_units=n_input_units, n_output_units=n_output_units, hidden_units=(32, 32), actv=nn.Tanh)
for _ in range(self.n_funcs)
]
else:
self.nets = nets
if train_generator is None:
raise ValueError("train_generator must be specified")
if valid_generator is None:
raise ValueError("valid_generator must be specified")
self.metrics_fn = metrics if metrics else {}
# For backward compatibility with the legacy `analytic_solutions` argument
if analytic_solutions:
warnings.warn(
'The `analytic_solutions` argument is deprecated and could lead to unstable behavior. '
'Pass a `metrics` dict instead.',
FutureWarning,
)
def analytic_mse(*args):
x = args[-n_input_units:]
u_hat = analytic_solutions(*x)
u = args[:-n_input_units]
u, u_hat = torch.stack(u), torch.stack(u_hat)
return ((u - u_hat) ** 2).mean()
if 'analytic_mse' in self.metrics_fn:
warnings.warn(
"Ignoring `analytic_solutions` in presence of key 'analytic_mse' in `metrics`",
FutureWarning,
)
else:
self.metrics_fn['analytic_mse'] = analytic_mse
# metric history, keys will be train_loss, valid_loss, train__<metric_name>, valid__<metric_name>.
# For compatibility with ode.py and pde.py,
# double underscore are used between 'train'/'valid' and custom metric names.
self.metrics_history = {}
self.metrics_history.update({'train_loss': [], 'valid_loss': []})
self.metrics_history.update({'train__' + name: [] for name in self.metrics_fn})
self.metrics_history.update({'valid__' + name: [] for name in self.metrics_fn})
self.optimizer = optimizer if optimizer else Adam(chain.from_iterable(n.parameters() for n in self.nets))
if criterion is None:
self.criterion = lambda r: (r ** 2).mean()
elif isinstance(criterion, nn.modules.loss._Loss):
self.criterion = lambda r: criterion(r, torch.zeros_like(r))
else:
self.criterion = criterion
def make_pair_dict(train=None, valid=None):
return {'train': train, 'valid': valid}
self.generator = make_pair_dict(
train=SamplerGenerator(train_generator),
valid=SamplerGenerator(valid_generator),
)
# number of batches for training / validation;
self.n_batches = make_pair_dict(train=n_batches_train, valid=n_batches_valid)
# current batch of samples, kept for additional_loss term to use
self._batch_examples = make_pair_dict()
# current network with lowest loss
self.best_nets = None
# current lowest loss
self.lowest_loss = None
# local epoch in a `.fit` call, should only be modified inside self.fit()
self.local_epoch = 0
# maximum local epochs to run in a `.fit()` call, should only set by inside self.fit()
self._max_local_epoch = 0
# controls early stopping, should be set to False at the beginning of a `.fit()` call
# and optionally set to False by `callbacks` in `.fit()` to support early stopping
self._stop_training = False
# the _phase variable is registered for callback functions to access
self._phase = None
@property
def global_epoch(self):
r"""Global epoch count, always equal to the length of train loss history.
:return: Number of training epochs that have been run.
:rtype: int
"""
return len(self.metrics_history['train_loss'])
def compute_func_val(self, net, cond, *coordinates):
r"""Compute the function value evaluated on the points specified by ``coordinates``.
:param net: The network to be parameterized and evaluated.
:type net: torch.nn.Module
:param cond: The condition (a.k.a. parameterization) for the network.
:type cond: `neurodiffeq.conditions.BaseCondition`
:param coordinates: A tuple of coordinate components, each with shape = (-1, 1).
:type coordinates: tuple[torch.Tensor]
:return: Function values at the sampled points.
:rtype: torch.Tensor
"""
return cond.enforce(net, *coordinates)
def _update_history(self, value, metric_type, key):
r"""Append a value to corresponding history list.
:param value: Value to be appended.
:type value: float
:param metric_type: Name of the metric. Must be 'loss' or present in ``self.metrics``.
:type metric_type: str
:param key: {'train', 'valid'}. Phase of the process.
:type key: str
"""
self._phase = key
if metric_type == 'loss':
self.metrics_history[f'{key}_{metric_type}'].append(value)
elif metric_type in self.metrics_fn:
self.metrics_history[f'{key}__{metric_type}'].append(value)
else:
raise KeyError(f"metric '{metric_type}' not specified")
def _update_train_history(self, value, metric_type):
r"""Append a value to corresponding training history list."""
self._update_history(value, metric_type, key='train')
def _update_valid_history(self, value, metric_type):
r"""Append a value to corresponding validation history list."""
self._update_history(value, metric_type, key='valid')
def _generate_batch(self, key):
r"""Generate the next batch, register in self._batch_examples and return the batch.
:param key:
{'train', 'valid'};
Dict key in ``self._examples``, ``self._batch_examples``, or ``self._batch_start``
:type key: str
:return: The generated batch of points.
:type: List[`torch.Tensor`]
"""
# the following side effects are helpful for future extension,
# especially for additional loss term that depends on the coordinates
self._phase = key
self._batch_examples[key] = [v.reshape(-1, 1) for v in self.generator[key].get_examples()]
return self._batch_examples[key]
def _generate_train_batch(self):
r"""Generate the next training batch, register in ``self._batch_examples`` and return."""
return self._generate_batch('train')
def _generate_valid_batch(self):
r"""Generate the next validation batch, register in ``self._batch_examples`` and return."""
return self._generate_batch('valid')
def _do_optimizer_step(self, closure=None):
r"""Optimization procedures after gradients have been computed. Usually ``self.optimizer.step()`` is sufficient.
At times, users can overwrite this method to perform gradient clipping, etc. Here is an example::
import itertools
class MySolver(Solver)
def _do_optimizer_step(self, closure=None):
nn.utils.clip_grad_norm_(itertools.chain([net.parameters() for net in self.nets]), 1.0, 'inf')
self.optimizer.step(closure=closure)
"""
self.optimizer.step(closure=closure)
def _run_epoch(self, key):
r"""Run an epoch on train/valid points, update history, and perform an optimization step if key=='train'.
:param key: {'train', 'valid'}; phase of the epoch
:type key: str
.. note::
The optimization step is only performed after all batches are run.
"""
if self.n_batches[key] <= 0:
# XXX maybe we should append NaN to metric history?
return
self._phase = key
epoch_loss = 0.0
batch_loss = 0.0
metric_values = {name: 0.0 for name in self.metrics_fn}
# Zero the gradient only once, before running the batches. Gradients of different batches are accumulated.
if key == 'train' and not _requires_closure(self.optimizer):
self.optimizer.zero_grad()
# perform forward pass for all batches: a single graph is created and release in every iteration
# see https://discuss.pytorch.org/t/why-do-we-need-to-set-the-gradients-manually-to-zero-in-pytorch/4903/17
for batch_id in range(self.n_batches[key]):
batch = self._generate_batch(key)
def closure(zero_grad=True):
nonlocal batch_loss
if key == 'train' and zero_grad:
self.optimizer.zero_grad()
funcs = [
self.compute_func_val(n, c, *batch) for n, c in zip(self.nets, self.conditions)
]
for name in self.metrics_fn:
value = self.metrics_fn[name](*funcs, *batch).item()
metric_values[name] += value
residuals = self.diff_eqs(*funcs, *batch)
residuals = torch.cat(residuals, dim=1)
self.residuals = residuals
loss = self.criterion(residuals) + self.additional_loss(funcs, key)
# accumulate gradients before the current graph is collected as garbage
if key == 'train':
loss.backward()
batch_loss = loss.item()
return loss
if key == 'train':
if _requires_closure(self.optimizer):
# If `closure` is required by `optimizer.step()`, perform a step for every batch
self._do_optimizer_step(closure=closure)
else:
# Otherwise, only perform backward propagation.
# Optimizer step will be performed only once outside the for-loop (i.e. after all batches).
closure(zero_grad=False)
epoch_loss += batch_loss
else:
epoch_loss += closure().item()
# calculate mean loss of all batches and register to history
self._update_history(epoch_loss / self.n_batches[key], 'loss', key)
# perform the optimizer step after all batches are run (if optimizer.step doesn't require `closure`)
if key == 'train' and not _requires_closure(self.optimizer):
self._do_optimizer_step()
if key == 'valid':
self._update_best()
# calculate average metrics across batches and register to history
for name in self.metrics_fn:
self._update_history(
metric_values[name] / self.n_batches[key], name, key)
def run_train_epoch(self):
r"""Run a training epoch, update history, and perform gradient descent."""
self._run_epoch('train')
def run_valid_epoch(self):
r"""Run a validation epoch and update history."""
self._run_epoch('valid')
def _update_best(self):
r"""Update ``self.lowest_loss`` and ``self.best_nets``
if current validation loss is lower than ``self.lowest_loss``
"""
current_loss = self.metrics_history['valid_loss'][-1]
if (self.lowest_loss is None) or current_loss < self.lowest_loss:
self.lowest_loss = current_loss
self.best_nets = deepcopy(self.nets)
def fit(self, max_epochs, callbacks=(), **kwargs):
r"""Run multiple epochs of training and validation, update best loss at the end of each epoch.
If ``callbacks`` is passed, callbacks are run, one at a time,
after training and validating and updating best model but before monitor checking
:param max_epochs: Number of epochs to run.
:type max_epochs: int
:param callbacks:
A list of callback functions.
Each function should accept the ``solver`` instance itself as its **only** argument.
:rtype callbacks: list[callable]
.. note::
1. This method does not return solution, which is done in the ``.get_solution()`` method.
2. A callback function `cb(solver)` can set ``solver._stop_training`` to True to perform early stopping.
"""
self._stop_training = False
self._max_local_epoch = max_epochs
monitor = kwargs.pop('monitor', None)
if monitor:
warnings.warn("Passing `monitor` is deprecated, "
"use a MonitorCallback and pass a list of callbacks instead")
callbacks = [monitor.to_callback()] + list(callbacks)
if kwargs:
raise ValueError(f'Unknown keyword argument(s): {list(kwargs.keys())}')
for local_epoch in range(max_epochs):
# stop training if self._stop_training is set to True by a callback
if self._stop_training:
break
# register local epoch (starting from 1 instead of 0) so it can be accessed by callbacks
self.local_epoch = local_epoch + 1
self.run_train_epoch()
self.run_valid_epoch()
for cb in callbacks:
cb(self)
@abstractmethod
def get_solution(self, copy=True, best=True):
r"""Get a (callable) solution object. See this usage example:
.. code-block:: python3
solution = solver.get_solution()
point_coords = train_generator.get_examples()
value_at_points = solution(point_coords)
:param copy:
Whether to make a copy of the networks so that subsequent training doesn't affect the solution;
Defaults to True.
:type copy: bool
:param best:
Whether to return the solution with lowest loss instead of the solution after the last epoch.
Defaults to True.
:type best: bool
:return:
A solution object which can be called.
To evaluate the solution on certain points,
you should pass the coordinates vector(s) to the returned solution.
:rtype: BaseSolution
"""
pass # pragma: no cover
def _get_internal_variables(self):
r"""Get a dict of all available internal variables.
:return:
All available internal variables,
where keys are variable names and values are the corresponding variables.
:rtype: dict
.. note::
Children classes should inherit all items and optionally include new ones.
"""
return {
"metrics": self.metrics_fn,
"n_batches": self.n_batches,
"best_nets": self.best_nets,
"criterion": self.criterion,
"conditions": self.conditions,
"global_epoch": self.global_epoch,
"lowest_loss": self.lowest_loss,
"n_funcs": self.n_funcs,
"nets": self.nets,
"optimizer": self.optimizer,
"diff_eqs": self.diff_eqs,
"generator": self.generator,
"train_generator": self.generator['train'],
"valid_generator": self.generator['valid'],
}
@deprecated_alias(param_names='var_names')
def get_internals(self, var_names=None, return_type='list'):
r"""Return internal variable(s) of the solver
- If var_names == 'all', return all internal variables as a dict.
- If var_names is single str, return the corresponding variables.
- If var_names is a list and return_type == 'list', return corresponding internal variables as a list.
- If var_names is a list and return_type == 'dict', return a dict with keys in var_names.
:param var_names: An internal variable name or a list of internal variable names.
:type var_names: str or list[str]
:param return_type: {'list', 'dict'}; Ignored if ``var_names`` is a string.
:type return_type: str
:return: A single variable, or a list/dict of internal variables as indicated above.
:rtype: list or dict or any
"""
available_variables = self._get_internal_variables()
if var_names == "all" or var_names is None:
return available_variables
if isinstance(var_names, str):
return available_variables[var_names]
if return_type == 'list':
return [available_variables[name] for name in var_names]
elif return_type == "dict":
return {name: available_variables[name] for name in var_names}
else:
raise ValueError(f"unrecognized return_type = {return_type}")
def additional_loss(self, funcs, key):
r"""Additional loss terms for training. This method is to be overridden by subclasses.
This method can use any of the internal variables: the current batch, the nets, the conditions, etc.
:param funcs: Outputs of the networks after parameterization.
:type funcs: list[torch.Tensor]
:param key: {'train', 'valid'}; Phase of the epoch, used to access the sample batch, etc.
:type key: str
:return: Additional loss. Must be a ``torch.Tensor`` of empty shape (scalar).
:rtype: torch.Tensor
"""
return 0.0
class BaseSolution(ABC):
r"""A solution to a PDE/ODE (system).
:param nets:
The neural networks that approximate the PDE/ODE solution.
- If ``nets`` is a list of ``torch.nn.Module``, it should have the same length with ``conditions``
- If ``nets`` is a single ``torch.nn.Module``, it should have as many output units as length of ``conditions``
:type nets: list[`torch.nn.Module`] or `torch.nn.Module`
:param conditions:
A list of conditions that should be enforced on the PDE/ODE solution.
``conditions`` should have a length equal to the number of dependent variables in the ODE/PDE system.
:type conditions: list[`neurodiffeq.conditions.BaseCondition`]
"""
def __init__(self, nets, conditions):
if isinstance(nets, nn.Module):
# This is for backward compatibility with the `single_net` option
# The same torch.nn.Module instance is repeated to form a list of the same length as `conditions`
self.nets = [nets] * len(conditions)
else:
self.nets = nets
self.conditions = conditions
@abstractmethod
def _compute_u(self, net, condition, *coords):
pass # pragma: no cover
@deprecated_alias(as_type='to_numpy')
def __call__(self, *coords, to_numpy=False):
r"""Evaluate the solution at certain points.
:param coords: tuple of coordinate tensors, each of shape (n_samples, 1)
:type coords: Tuple[`torch.Tensor`]
:param to_numpy:
If set to True, the call returns a ``numpy.ndarray`` instead of ``torch.Tensor``.
Defaults to False.
:type to_numpy: bool
:return: Dependent variables evaluated at given points.
:rtype: list[`torch.Tensor` or `numpy.array`] or `torch.Tensor` or `numpy.array`
"""
coords = [c if isinstance(c, torch.Tensor) else torch.tensor(c) for c in coords]
original_shape = coords[0].shape
coords = [c.reshape(-1, 1) for c in coords]
if isinstance(to_numpy, str):
# Why did we allow `tf` as an option >_<
# We should phase this out as soon as possible
if to_numpy == 'tf' or to_numpy == 'torch':
to_numpy = True
elif to_numpy == 'np':
to_numpy = True
else:
raise ValueError(f"Unrecognized `as_type` option: '{to_numpy}'")
us = [
self._compute_u(net, con, *coords).reshape(original_shape)
for con, net in zip(self.conditions, self.nets)
]
if to_numpy:
us = [u.detach().cpu().numpy() for u in us]
return us if len(self.nets) > 1 else us[0]
class SolverSpherical(BaseSolver):
r"""A solver class for solving PDEs in spherical coordinates
:param pde_system:
The PDE system to solve, which maps a tuple of three coordinates to a tuple of PDE residuals,
both the coordinates and PDE residuals must have shape (n_samples, 1).
:type pde_system: callable
:param conditions:
List of boundary conditions for each target function.
:type conditions: list[`neurodiffeq.conditions.BaseCondition`]
:param r_min:
Radius for inner boundary (:math:`r_0>0`).
Ignored if ``train_generator`` and ``valid_generator`` are both set.
:type r_min: float, optional
:param r_max:
Radius for outer boundary (:math:`r_1>r_0`).
Ignored if ``train_generator`` and ``valid_generator`` are both set.
:type r_max: float, optional
:param nets:
List of neural networks for parameterized solution.
If provided, length of ``nets`` must equal that of ``conditions``
:type nets: list[torch.nn.Module], optional
:param train_generator:
Generator for sampling training points,
which must provide a ``.get_examples()`` method and a ``.size`` field.
``train_generator`` must be specified if ``r_min`` and ``r_max`` are not set.
:type train_generator: `neurodiffeq.generators.BaseGenerator`, optional
:param valid_generator:
Generator for sampling validation points,
which must provide a ``.get_examples()`` method and a ``.size`` field.
``valid_generator`` must be specified if ``r_min`` and ``r_max`` are not set.
:type valid_generator: `neurodiffeq.generators.BaseGenerator`, optional
:param analytic_solutions:
Analytical solutions to be compared with neural net solutions.
It maps a tuple of three coordinates to a tuple of function values.
Output shape should match that of ``nets``.
:type analytic_solutions: callable, optional
:param optimizer:
Optimizer to be used for training.
Defaults to a ``torch.optim.Adam`` instance that trains on all parameters of ``nets``.
:type optimizer: ``torch.nn.optim.Optimizer``, optional
:param criterion:
Function that maps a PDE residual tensor (of shape (-1, 1)) to a scalar loss.
:type criterion: callable, optional
:param n_batches_train:
Number of batches to train in every epoch, where batch-size equals ``train_generator.size``.
Defaults to 1.
:type n_batches_train: int, optional
:param n_batches_valid:
Number of batches to validate in every epoch, where batch-size equals ``valid_generator.size``.
Defaults to 4.
:type n_batches_valid: int, optional
:param enforcer:
A function of signature
``enforcer(net: nn.Module, cond: neurodiffeq.conditions.BaseCondition,
coords: Tuple[torch.Tensor]) -> torch.Tensor``
that returns the dependent variable value evaluated on the batch.
:type enforcer: callable
:param n_output_units:
Number of output units for each neural network.
Ignored if ``nets`` is specified.
Defaults to 1.
:type n_output_units: int, optional
:param batch_size:
**[DEPRECATED and IGNORED]**
Each batch will use all samples generated.
Please specify ``n_batches_train`` and ``n_batches_valid`` instead.
:type batch_size: int
:param shuffle:
**[DEPRECATED and IGNORED]**
Shuffling should be performed by generators.
:type shuffle: bool
"""
def __init__(self, pde_system, conditions, r_min=None, r_max=None,
nets=None, train_generator=None, valid_generator=None, analytic_solutions=None,
optimizer=None, criterion=None, n_batches_train=1, n_batches_valid=4, enforcer=None,
n_output_units=1,
# deprecated arguments are listed below
shuffle=None, batch_size=None):
if train_generator is None or valid_generator is None:
if r_min is None or r_max is None:
raise ValueError(f"Either generator is not provided, r_min and r_max should be both provided: "
f"got r_min={r_min}, r_max={r_max}, train_generator={train_generator}, "
f"valid_generator={valid_generator}")
if train_generator is None:
train_generator = GeneratorSpherical(512, r_min, r_max, method='equally-spaced-noisy')
if valid_generator is None:
valid_generator = GeneratorSpherical(512, r_min, r_max, method='equally-spaced-noisy')
self.r_min, self.r_max = r_min, r_max
self.enforcer = enforcer
super(SolverSpherical, self).__init__(
diff_eqs=pde_system,
conditions=conditions,
nets=nets,
train_generator=train_generator,
valid_generator=valid_generator,
analytic_solutions=analytic_solutions,
optimizer=optimizer,
criterion=criterion,
n_batches_train=n_batches_train,
n_batches_valid=n_batches_valid,
n_input_units=3,
n_output_units=n_output_units,
shuffle=shuffle,
batch_size=batch_size,
)
def _auto_enforce(self, net, cond, *coordinates):
r"""Enforce condition on network with inputs. If self.enforcer is set, use it.
Otherwise, fill cond.enforce() with as many arguments as needed.
:param net: Network for parameterized solution.
:type net: torch.nn.Module
:param cond: Condition (a.k.a. parameterization) for the network.
:type cond: `neurodiffeq.conditions.BaseCondition`
:param coordinates: A tuple of vectors, each with shape = (-1, 1).
:type coordinates: tuple[torch.Tensor]
:return: Function values at sampled points.
:rtype: torch.Tensor
"""
if self.enforcer:
return self.enforcer(net, cond, coordinates)
n_params = len(signature(cond.enforce).parameters)
coordinates = coordinates[:n_params - 1]
return cond.enforce(net, *coordinates)
def compute_func_val(self, net, cond, *coordinates):
r"""Enforce condition on network with inputs. If self.enforcer is set, use it.
Otherwise, fill cond.enforce() with as many arguments as needed.
:param net: Network for parameterized solution.
:type net: torch.nn.Module
:param cond: Condition (a.k.a. parameterization) for the network.
:type cond: `neurodiffeq.conditions.BaseCondition`
:param coordinates: A tuple of vectors, each with shape = (-1, 1).
:type coordinates: tuple[torch.Tensor]
:return: Function values at sampled points.
:rtype: torch.Tensor
"""
return self._auto_enforce(net, cond, *coordinates)
def get_solution(self, copy=True, best=True, harmonics_fn=None):
r"""Get a (callable) solution object. See this usage example:
.. code-block:: python3
solution = solver.get_solution()
point_coords = train_generator.get_examples()
value_at_points = solution(point_coords)
:param copy:
Whether to make a copy of the networks so that subsequent training doesn't affect the solution;
Defaults to True.
:type copy: bool
:param best:
Whether to return the solution with lowest loss instead of the solution after the last epoch.
Defaults to True.
:type best: bool
:param harmonics_fn:
If set, use it as function basis for returned solution.
:type harmonics_fn: callable
:return: The solution after training.
:rtype: ``neurodiffeq.solvers.BaseSolution``
"""
nets = self.best_nets if best else self.nets
conditions = self.conditions
if copy:
nets = deepcopy(nets)
conditions = deepcopy(conditions)
if harmonics_fn:
return SolutionSphericalHarmonics(nets, conditions, harmonics_fn=harmonics_fn)
else:
return SolutionSpherical(nets, conditions)
def _get_internal_variables(self):
available_variables = super(SolverSpherical, self)._get_internal_variables()
available_variables.update({
'r_min': self.r_min,
'r_max': self.r_max,
'enforcer': self.enforcer,
})
return available_variables
class SolutionSpherical(BaseSolution):
def _compute_u(self, net, condition, rs, thetas, phis):
return condition.enforce(net, rs, thetas, phis)
class SolutionSphericalHarmonics(SolutionSpherical):
r"""A solution to a PDE (system) in spherical coordinates.
:param nets: List of networks that takes in radius tensor and outputs the coefficients of spherical harmonics.
:type nets: list[`torch.nn.Module`]
:param conditions: List of conditions to be enforced on each nets; must be of the same length as nets.
:type conditions: list[`neurodiffeq.conditions.BaseCondition`]
:param harmonics_fn: Mapping from :math:`\theta` and :math:`\phi` to basis functions, e.g., spherical harmonics.
:type harmonics_fn: callable
:param max_degree: **DEPRECATED and SUPERSEDED** by ``harmonics_fn``. Highest used for the harmonic basis.
:type max_degree: int
"""
def __init__(self, nets, conditions, max_degree=None, harmonics_fn=None):
super(SolutionSphericalHarmonics, self).__init__(nets, conditions)
if (harmonics_fn is None) and (max_degree is None):
raise ValueError("harmonics_fn should be specified")
if max_degree is not None:
warnings.warn("`max_degree` is DEPRECATED; pass `harmonics_fn` instead, which takes precedence")
self.harmonics_fn = RealSphericalHarmonics(max_degree=max_degree)
if harmonics_fn is not None:
self.harmonics_fn = harmonics_fn
def _compute_u(self, net, condition, rs, thetas, phis):
products = condition.enforce(net, rs) * self.harmonics_fn(thetas, phis)
return torch.sum(products, dim=1)
class Solution1D(BaseSolution):
def _compute_u(self, net, condition, ts):
return condition.enforce(net, ts)
class Solver1D(BaseSolver):
r"""A solver class for solving ODEs (single-input differential equations)
:param ode_system:
The ODE system to solve, which maps a torch.Tensor to a tuple of ODE residuals,
both the input and output must have shape (n_samples, 1).
:type ode_system: callable
:param conditions:
List of conditions for each target function.
:type conditions: list[`neurodiffeq.conditions.BaseCondition`]
:param t_min:
Lower bound of input (start time).
Ignored if ``train_generator`` and ``valid_generator`` are both set.
:type t_min: float, optional
:param t_max:
Upper bound of input (start time).
Ignored if ``train_generator`` and ``valid_generator`` are both set.
:type t_max: float, optional
:param nets:
List of neural networks for parameterized solution.
If provided, length of ``nets`` must equal that of ``conditions``
:type nets: list[torch.nn.Module], optional
:param train_generator:
Generator for sampling training points,
which must provide a ``.get_examples()`` method and a ``.size`` field.
``train_generator`` must be specified if ``t_min`` and ``t_max`` are not set.
:type train_generator: `neurodiffeq.generators.BaseGenerator`, optional
:param valid_generator:
Generator for sampling validation points,
which must provide a ``.get_examples()`` method and a ``.size`` field.
``valid_generator`` must be specified if ``t_min`` and ``t_max`` are not set.
:type valid_generator: `neurodiffeq.generators.BaseGenerator`, optional
:param analytic_solutions:
Analytical solutions to be compared with neural net solutions.
It maps a torch.Tensor to a tuple of function values.
Output shape should match that of ``nets``.
:type analytic_solutions: callable, optional
:param optimizer:
Optimizer to be used for training.
Defaults to a ``torch.optim.Adam`` instance that trains on all parameters of ``nets``.
:type optimizer: ``torch.nn.optim.Optimizer``, optional
:param criterion:
Function that maps a ODE residual tensor (of shape (-1, 1)) to a scalar loss.
:type criterion: callable, optional
:param n_batches_train:
Number of batches to train in every epoch, where batch-size equals ``train_generator.size``.
Defaults to 1.
:type n_batches_train: int, optional
:param n_batches_valid:
Number of batches to validate in every epoch, where batch-size equals ``valid_generator.size``.
Defaults to 4.
:type n_batches_valid: int, optional
:param metrics:
Additional metrics to be logged (besides loss). ``metrics`` should be a dict where
- Keys are metric names (e.g. 'analytic_mse');
- Values are functions (callables) that computes the metric value.
These functions must accept the same input as the differential equation ``ode_system``.
:type metrics: dict[str, callable], optional
:param n_output_units:
Number of output units for each neural network.
Ignored if ``nets`` is specified.
Defaults to 1.
:type n_output_units: int, optional
:param batch_size:
**[DEPRECATED and IGNORED]**
Each batch will use all samples generated.
Please specify ``n_batches_train`` and ``n_batches_valid`` instead.
:type batch_size: int
:param shuffle:
**[DEPRECATED and IGNORED]**
Shuffling should be performed by generators.
:type shuffle: bool
"""
def __init__(self, ode_system, conditions, t_min, t_max,
nets=None, train_generator=None, valid_generator=None, analytic_solutions=None, optimizer=None,
criterion=None, n_batches_train=1, n_batches_valid=4, metrics=None, n_output_units=1,
# deprecated arguments are listed below
batch_size=None, shuffle=None):
if train_generator is None or valid_generator is None:
if t_min is None or t_max is None:
raise ValueError(f"Either generator is not provided, t_min and t_max should be both provided: \n"
f"got t_min={t_min}, t_max={t_max}, "
f"train_generator={train_generator}, valid_generator={valid_generator}")
if train_generator is None:
train_generator = Generator1D(32, t_min=t_min, t_max=t_max, method='equally-spaced-noisy')
if valid_generator is None:
valid_generator = Generator1D(32, t_min=t_min, t_max=t_max, method='equally-spaced')
self.t_min, self.t_max = t_min, t_max
super(Solver1D, self).__init__(
diff_eqs=ode_system,
conditions=conditions,
nets=nets,
train_generator=train_generator,
valid_generator=valid_generator,
analytic_solutions=analytic_solutions,
optimizer=optimizer,
criterion=criterion,
n_batches_train=n_batches_train,
n_batches_valid=n_batches_valid,
metrics=metrics,
n_input_units=1,
n_output_units=n_output_units,
shuffle=shuffle,
batch_size=batch_size,
)
def get_solution(self, copy=True, best=True):
r"""Get a (callable) solution object. See this usage example:
.. code-block:: python3
solution = solver.get_solution()
point_coords = train_generator.get_examples()
value_at_points = solution(point_coords)
:param copy:
Whether to make a copy of the networks so that subsequent training doesn't affect the solution;
Defaults to True.
:type copy: bool
:param best:
Whether to return the solution with lowest loss instead of the solution after the last epoch.
Defaults to True.
:type best: bool
:return:
A solution object which can be called.
To evaluate the solution on certain points,
you should pass the coordinates vector(s) to the returned solution.
:rtype: BaseSolution
"""
nets = self.best_nets if best else self.nets
conditions = self.conditions
if copy:
nets = deepcopy(nets)
conditions = deepcopy(conditions)
return Solution1D(nets, conditions)
def _get_internal_variables(self):
available_variables = super(Solver1D, self)._get_internal_variables()
available_variables.update({
't_min': self.t_min,
't_max': self.t_max,
})
return available_variables
class Solution2D(BaseSolution):
def _compute_u(self, net, condition, xs, ys):
return condition.enforce(net, xs, ys)
class Solver2D(BaseSolver):
r"""A solver class for solving PDEs in 2 dimensions.
:param pde_system:
The PDE system to solve, which maps two ``torch.Tensor``s to PDE residuals (``tuple[torch.Tensor]``),
both the input and output must have shape (n_samples, 1).
:type pde_system: callable
:param conditions:
List of conditions for each target function.
:type conditions: list[`neurodiffeq.conditions.BaseCondition`]
:param xy_min:
The lower bound of 2 dimensions.
If we only care about :math:`x \geq x_0` and :math:`y \geq y_0`,
then `xy_min` is `(x_0, y_0)`.
Only needed when train_generator or valid_generator are not specified.
Defaults to None
:type xy_min: tuple[float, float], optional
:param xy_max:
The upper bound of 2 dimensions.
If we only care about :math:`x \leq x_1` and :math:`y \leq y_1`, then `xy_min` is `(x_1, y_1)`.
Only needed when train_generator or valid_generator are not specified.
Defaults to None
:type xy_max: tuple[float, float], optional
:param nets:
List of neural networks for parameterized solution.
If provided, length of ``nets`` must equal that of ``conditions``
:type nets: list[torch.nn.Module], optional
:param train_generator:
Generator for sampling training points,
which must provide a ``.get_examples()`` method and a ``.size`` field.
``train_generator`` must be specified if ``t_min`` and ``t_max`` are not set.
:type train_generator: `neurodiffeq.generators.BaseGenerator`, optional
:param valid_generator:
Generator for sampling validation points,
which must provide a ``.get_examples()`` method and a ``.size`` field.
``valid_generator`` must be specified if ``t_min`` and ``t_max`` are not set.
:type valid_generator: `neurodiffeq.generators.BaseGenerator`, optional
:param analytic_solutions:
Analytical solutions to be compared with neural net solutions.
It maps a torch.Tensor to a tuple of function values.
Output shape should match that of ``nets``.
:type analytic_solutions: callable, optional
:param optimizer:
Optimizer to be used for training.
Defaults to a ``torch.optim.Adam`` instance that trains on all parameters of ``nets``.
:type optimizer: ``torch.nn.optim.Optimizer``, optional
:param criterion:
Function that maps a PDE residual tensor (of shape (-1, 1)) to a scalar loss.
:type criterion: callable, optional
:param n_batches_train:
Number of batches to train in every epoch, where batch-size equals ``train_generator.size``.
Defaults to 1.
:type n_batches_train: int, optional
:param n_batches_valid:
Number of batches to validate in every epoch, where batch-size equals ``valid_generator.size``.
Defaults to 4.
:type n_batches_valid: int, optional
:param metrics:
Additional metrics to be logged (besides loss). ``metrics`` should be a dict where
- Keys are metric names (e.g. 'analytic_mse');
- Values are functions (callables) that computes the metric value.
These functions must accept the same input as the differential equation ``ode_system``.
:type metrics: dict[str, callable], optional
:param n_output_units:
Number of output units for each neural network.
Ignored if ``nets`` is specified.
Defaults to 1.
:type n_output_units: int, optional
:param batch_size:
**[DEPRECATED and IGNORED]**
Each batch will use all samples generated.
Please specify ``n_batches_train`` and ``n_batches_valid`` instead.
:type batch_size: int
:param shuffle:
**[DEPRECATED and IGNORED]**
Shuffling should be performed by generators.
:type shuffle: bool
"""
def __init__(self, pde_system, conditions, xy_min, xy_max,
nets=None, train_generator=None, valid_generator=None, analytic_solutions=None, optimizer=None,
criterion=None, n_batches_train=1, n_batches_valid=4, metrics=None, n_output_units=1,
# deprecated arguments are listed below
batch_size=None, shuffle=None):
if train_generator is None or valid_generator is None:
if xy_min is None or xy_max is None:
raise ValueError(f"Either generator is not provided, xy_min and xy_max should be both provided: \n"
f"got xy_min={xy_min}, xy_max={xy_max}, "
f"train_generator={train_generator}, valid_generator={valid_generator}")
if train_generator is None:
train_generator = Generator2D((32, 32), xy_min=xy_min, xy_max=xy_max, method='equally-spaced-noisy')
if valid_generator is None:
valid_generator = Generator2D((32, 32), xy_min=xy_min, xy_max=xy_max, method='equally-spaced')
self.xy_min, self.xy_max = xy_min, xy_max
super(Solver2D, self).__init__(
diff_eqs=pde_system,
conditions=conditions,
nets=nets,
train_generator=train_generator,
valid_generator=valid_generator,
analytic_solutions=analytic_solutions,
optimizer=optimizer,
criterion=criterion,
n_batches_train=n_batches_train,
n_batches_valid=n_batches_valid,
metrics=metrics,
n_input_units=2,
n_output_units=n_output_units,
shuffle=shuffle,
batch_size=batch_size,
)
def get_solution(self, copy=True, best=True):
r"""Get a (callable) solution object. See this usage example:
.. code-block:: python3
solution = solver.get_solution()
point_coords = train_generator.get_examples()
value_at_points = solution(point_coords)
:param copy:
Whether to make a copy of the networks so that subsequent training doesn't affect the solution;
Defaults to True.
:type copy: bool
:param best:
Whether to return the solution with lowest loss instead of the solution after the last epoch.
Defaults to True.
:type best: bool
:return:
A solution object which can be called.
To evaluate the solution on certain points,
you should pass the coordinates vector(s) to the returned solution.
:rtype: BaseSolution
"""
nets = self.best_nets if best else self.nets
conditions = self.conditions
if copy:
nets = deepcopy(nets)
conditions = deepcopy(conditions)
return Solution2D(nets, conditions)
def _get_internal_variables(self):
available_variables = super(Solver2D, self)._get_internal_variables()
available_variables.update({
'xy_min': self.xy_min,
'xy_max': self.xy_max,
})
return available_variables
|
[
"noreply@github.com"
] |
jpe17.noreply@github.com
|
07d2df4f3477240a51978f07113b474e92912098
|
c9259e697bd21e948e8e171bf1654d8792d7ff1e
|
/config.py
|
2d28a3f8cfe5f7db2655eb6449ac7e98147ddb1d
|
[] |
no_license
|
jakov96/imageAPI
|
0db07c452d884984fbab2a15fecb6de8e4ffe565
|
968bbc19f910ee19f8a3d7b2cb8e451e3eadbdcd
|
refs/heads/master
| 2023-05-24T04:43:06.481845
| 2021-06-22T07:24:15
| 2021-06-22T07:24:15
| 377,863,073
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
ALLOWED_EXTENSIONS = ['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif']
POSTGRES_USER = os.environ.get('POSTGRES_DB_USER')
POSTGRES_PASSWORD = os.environ.get('POSTGRES_DB_PASSWORD')
POSTGRES_HOST = os.environ.get('POSTGRES_DB_HOST')
POSTGRES_NAME = os.environ.get('POSTGRES_DB_NAME')
class Config:
SECRET_KEY = "\xda\xb2\x95\xd72\xd5\xf5\t\x96\xfd\x1d'\xb4\xe8h\x0cO\x0b(N)>\x88\xde"
SQLALCHEMY_ECHO = False
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://{0}:{1}@{2}:5432/{3}'.format(
POSTGRES_USER, POSTGRES_PASSWORD, POSTGRES_HOST, POSTGRES_NAME)
SQLALCHEMY_TRACK_MODIFICATIONS = False
UPLOAD_FOLDER = 'uploads'
|
[
"sharygin-1996@"
] |
sharygin-1996@
|
227d12ff393b70e128e6323523a7876c55abd958
|
88304cd64bd8de02d88a891f5ba6ed0e0a97b3a8
|
/rNN/vanillaRNN.py
|
1d4fbc6443d650d89779e01662be22aaa3d2b80b
|
[] |
no_license
|
hyanique/ai4good-19
|
ce90fb34f2df8c2e12d4d75b11a86a83e6c9258e
|
6af9222761b818cc99d49b2bf9b9ea1fafd0b99e
|
refs/heads/master
| 2020-05-22T18:31:54.288404
| 2019-06-13T17:41:54
| 2019-06-13T17:41:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,916
|
py
|
"""
A vanilla recurrent neural network created under the guide of official PyTorch tutorial.
------
Tasks:
1. Prepare and explore the data
2. Load the data
3. Create the network
4. Split the data
5. Train
6. Evaluate
7. Predict
------
Future TODOs:
- modify the way sampling: training set + testing set; "enlarge" a samll dataset
"""
############################################
# Import modules
############################################
import torch # for tensor class
import os # for file search
import re # regex
import unicodedata # dealing with utf8
import string # working with string
import matplotlib.pyplot as plt # for ploting
import matplotlib.ticker as ticker # for force ticker display
from datetime import datetime # for timestamp
import numpy as np # for ndarray
import torch.nn as nn # PyTorch neural net module
import random # random choice of
############################################
# Declare global variables: module non-public
############################################
ALL_LETTERS = string.ascii_letters + " .,;'"
PATH = '..\\datasets\\names'
############################################
# Prepare data
############################################
def list_by_extension(path, extension=r".*(\.txt)"):
# retrieve files with given extension in the given dir
dirlist = os.listdir(path)
pattern = re.compile(extension)
filtered = filter(pattern.match, dirlist)
files = list(filtered)
return files
def utf8_to_ascii(utf8_string):
""" convert utf8 strings to ascii, stripping away accents rather than translate them.
------
Code adopted from https://pytorch.org/tutorials/intermediate/char_rnn_classification_tutorial.html"""
return ''.join(
char for char in unicodedata.normalize('NFD', utf8_string)
if unicodedata.category(char) != 'Mn'
and char in ALL_LETTERS
)
def read_lines_from_file(file_path):
lines = open(file_path, encoding='utf-8').read().strip().split('\n')
return [utf8_to_ascii(l) for l in lines]
def create_lang_name_dict(files, path):
name_dict = {}
for file in files:
file_path = os.path.join(path, file)
lang = os.path.splitext(os.path.basename(file))[0]
names = read_lines_from_file(file_path)
name_dict[lang] = names
return name_dict
############################################
# Load data
############################################
def _one_hot_char_tensor(char, n_char=len(ALL_LETTERS)):
# one-hot encode a char
tensor = torch.zeros(1, n_char)
char_index = ALL_LETTERS.find(char)
tensor[0][char_index] = 1
return tensor
def _one_hot_word_tensor(word, n_char=len(ALL_LETTERS)):
# one-hot encode a word (string type!)
assert (len(word) >= 1)
chars = list(word) # convert string to a list ot chars
char = chars.pop(0)
tensor = _one_hot_char_tensor(char, n_char)
while len(chars) > 0:
char = chars.pop(0)
t = _one_hot_char_tensor(char, n_char)
tensor = torch.cat([tensor, t])
assert (tensor.shape[0] == len(word))
return tensor
# def one_hot_dict_tensor(dict, n_char=len(ALL_LETTERS)):
# # one-hot the dictionary
# pass
def map_output_to_category(out, categories):
# translate numerical values in support to categories in dictionary
top_values, top_indices = out.topk(1)
category_index = top_indices[0].item()
return category_index, categories[category_index]
############################################
# Visualize data
############################################
def _get_value_count(dict, keys):
# helper function for show_distr_dict
counts = []
for key in keys:
count = len(dict[key])
counts.append(count)
return counts
def show_distr_dict(dict, key_name='key', value_name='value', savefig=False):
keys = list(dict.keys())
counts = _get_value_count(dict, keys)
plt.bar(keys, counts, alpha=0.75, color="slateblue")
plt.title('Distribution of the {}-{} dictionary'.format(key_name, value_name))
plt.xlabel(key_name)
plt.tick_params(axis='x', rotation=70)
plt.ylabel("{} count".format(value_name))
if savefig:
plt.savefig(
'{} - data distribution.png'.format(datetime.now().strftime('%Y%m%d-%H%M')))
plt.show(block=False)
plt.pause(2)
plt.close()
def show_confusion_matrix_old(confusion_matrix, classes, savefig=False):
fig = plt.figure()
ax = plt.gca()
cmat = ax.matshow(confusion_matrix.numpy())
fig.colorbar(cmat) # color bar
ax.set_xticklabels([''] + classes, rotation=70) # x axis
ax.set_yticklabels([''] + classes) # y axis
if savefig:
plt.savefig(
'{} - confusion matrix'.format(datetime.now().strftime('%Y%m%d-%H%M')))
plt.show(block=False)
plt.pause(2)
plt.close()
def show_confusion_matrix(confusion_matrix, classes, savefig=False):
fig = plt.figure()
ax = plt.gca()
cmat = ax.imshow(confusion_matrix.numpy(), cmap='copper')
fig.colorbar(cmat) # color bar
ax.set_xticklabels([''] + classes, rotation=70) # x axis
ax.set_yticklabels([''] + classes) # y axis
# force show ticke labels
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
if savefig:
plt.savefig(
'{} - confusion matrix'.format(datetime.now().strftime('%Y%m%d-%H%M')))
plt.show(block=False)
plt.pause(2)
plt.close()
############################################
# Data Sampling
############################################
def _get_entry_from_dict(index, dict, verbose=False):
keys = list(dict.keys())
counts = _get_value_count(dict, keys)
while index >= counts[0]:
keys.pop(0)
index -= counts.pop(0)
key = keys.pop(0)
if verbose:
print("key={}, count={}, index={}".format(key, counts[0], index))
value = dict[key][index]
return key, value
def random_dict_samples(n_samples, data_dict, verbose=False):
keys = list(data_dict.keys())
counts = _get_value_count(data_dict, keys)
count_datapoints = sum(counts)
# generate random indicis, stored in a numpy array of shape (n_samples,)
rand_indices = np.random.randint(0, count_datapoints, n_samples)
samples = []
for i in range(n_samples):
index = rand_indices[i]
lang, name = _get_entry_from_dict(index, data_dict)
name_tensor = _one_hot_word_tensor(name)
lang_tensor = torch.tensor([keys.index(lang)], dtype=torch.long)
samples.append((name_tensor, lang_tensor, name, lang))
if verbose:
print(lang, name)
return samples
def train_test_split(data_dict, ratio, verbose=False):
n_samples = sum(_get_value_count(data_dict, list(data_dict.keys())))
total_samples = random_dict_samples(n_samples, data_dict, verbose=False)
random.shuffle(total_samples)
n_train = int(np.ceil(n_samples * ratio))
print("n_train={} of type {}".format(n_train, type(n_train)))
return total_samples[:n_train], total_samples[n_train:]
############################################
# Define recurrent neural net
############################################
class recNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(recNN, self).__init__()
self.hidden_size = hidden_size
self.in_to_hid = nn.Linear(input_size + hidden_size, hidden_size)
self.in_to_out = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
# print(input.shape, hidden.shape)
combined = torch.cat((input, hidden), 1)
hidden = self.in_to_hid(combined)
output = self.in_to_out(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return torch.zeros(1, self.hidden_size)
############################################
# Train & Evaluate & Predict
############################################
def train(rnn, category_tensor, word_tensor, criterion, lr):
# init
hidden = rnn.initHidden()
rnn.zero_grad()
for i in range(word_tensor.size()[0]):
output, hidden = rnn(word_tensor[i].view(1, -1), hidden)
loss = criterion(output, category_tensor)
loss.backward()
for p in rnn.parameters():
p.data.add_(-lr, p.grad.data)
return output, loss.item()
def fit_model(rnn, trainning_set, n_samples, support, criterion, lr, print_every, verbose=True, savefig=False):
total_loss = 0
iter_counter = 1
for sample in trainning_set:
(name_tensor, lang_tensor, name, lang) = sample
output, loss = train(rnn, lang_tensor, name_tensor, criterion, lr)
total_loss += loss
if verbose and iter_counter % print_every == 0:
pred_index, pred_lang = map_output_to_category(output, support)
correct = '√' if pred_lang == lang else '× {}'.format(lang)
print('{:>7} {:>3}% {:.5f} {:>12} | {:10} {}'.format(
iter_counter, int(iter_counter / n_samples * 100), loss, name, pred_lang, correct))
iter_counter += 1
return total_loss
def evaluate(rnn, criterion, lr, dataset, categories, plot=True, savefig=False):
total_loss = 0
correct = 0
n_categories = len(categories)
confusion = torch.zeros(n_categories, n_categories)
for sample in dataset:
(name_tensor, lang_tensor, name, lang) = sample
output, loss = train(rnn, lang_tensor, name_tensor, criterion, lr)
pred_index, pred_lang = map_output_to_category(output, categories)
lang_index = categories.index(lang)
confusion[lang_index][pred_index] += 1
total_loss += loss
if pred_lang == lang:
correct += 1
_normalize_confusion_matrix(confusion)
if plot:
show_confusion_matrix(confusion, categories, savefig=savefig)
loss /= len(dataset)
print('\nAverage loss: {:.4f}, Accuracy: {}/{}({:.3f}%)\n'.format(
loss, correct, len(dataset),
100. * correct / len(dataset)))
return None
def _normalize_confusion_matrix(matrix):
for i in range(matrix.size()[0]):
matrix[i] = matrix[i] / matrix[i].sum()
def predict(name, rnn, criterion, lr, categories, n_predictions=3):
print('\nInput name: {}'.format(name))
name_tensor = _one_hot_word_tensor(name)
with torch.no_grad():
hidden = rnn.initHidden()
rnn.zero_grad()
for index in range(name_tensor.size()[0]):
output, hidden = rnn(name_tensor[index].view(1, -1), hidden)
# Get top N categories
topv, topi = output.topk(n_predictions, 1, True)
predictions = []
for i in range(n_predictions):
value = topv[0][i].item()
lang_index = topi[0][i].item()
print('(%.2f) %s' % (value, categories[lang_index]))
predictions.append([value, categories[lang_index]])
############################################
# Main
############################################
def main(phase):
# 1. Prepare the data
# - From the text files, retrieve data to create a lang-names dictionary
# - To reduce the headache brought by UTF-8, convert the chars to ASCII
# - Display a histogram for this dataset
path = PATH
files = list_by_extension(path)
lang_name_dict = create_lang_name_dict(files, path)
categories = list(lang_name_dict.keys())
n_categories = len(categories)
n_char = len(ALL_LETTERS)
if phase == 1:
print("\nFile search result:\n\t{}".format(files))
print("\nLanguages:\n\t{}".format(categories))
print("\nSome numerical facts:\n\t{} language categories, {} letters in the character set".format(
n_categories, n_char))
show_distr_dict(lang_name_dict, 'lang', 'name', savefig=True)
# 2. Load the data
# - convert dictionary entry to tensor with 1-hot encoding
# - translating support: numercial output <-> category
if phase == 2:
name = lang_name_dict['Spanish'][0]
print(_one_hot_word_tensor(name).shape)
print(_one_hot_char_tensor(list(name).pop()).shape)
foo = np.array([-2.8523, -2.7800, -2.9394, -2.8962, -2.9287, -2.8165, -2.8406, -2.7723, -
3.0290, -2.9533, -2.8288, -2.9262, -2.9352, -2.8949, -2.8554, -2.9956, -2.9283, -2.8957])
tt = torch.from_numpy(foo.reshape(1, 18))
print(map_output_to_category(tt, categories))
# 3. Creat the network
n_hidden = 128
rnn = recNN(n_char, n_hidden, n_categories)
if phase == 3:
word = _one_hot_word_tensor('Albert')
hidden = torch.zeros(1, n_hidden)
out, next_hidden = rnn(word[0].view(1, -1), hidden)
print(out)
# 4. Splitting the datasets: get the training samples
if phase == 4:
n_samples = 10
trainning_set = random_dict_samples(
n_samples, lang_name_dict, verbose=True)
# 5. Train
criterion = nn.NLLLoss()
lr = 0.005
if phase == 5:
n_samples = 10
assert (n_samples < sum(_get_value_count(
lang_name_dict, list(lang_name_dict.keys()))))
trainning_set = random_dict_samples(n_samples, lang_name_dict)
fit_model(rnn, trainning_set, n_samples,
categories, criterion, lr, print_every=2, verbose=True)
exit() # end of phase 5 testing
n_train_samples = 20000
assert (n_train_samples < sum(_get_value_count(lang_name_dict, list(
lang_name_dict.keys())))), "training set should be smaller than the orignial dataset"
trainning_set = random_dict_samples(n_train_samples, lang_name_dict)
train_loss = fit_model(rnn, trainning_set, n_train_samples,
categories, criterion, lr, print_every=500, verbose=True)
# 6. Evaluation on training set
if phase == 6:
evaluate(rnn, criterion, lr, trainning_set,
categories, plot=True, savefig=True)
# 7. prediction
if phase == 7:
predict('Dovesky', rnn, criterion, lr, categories, n_predictions=3)
predict('Hashimoto', rnn, criterion, lr, categories, n_predictions=3)
predict('Jackson', rnn, criterion, lr, categories, n_predictions=3)
a_name = input('Type in a name: ')
predict(a_name, rnn, criterion, lr, categories, n_predictions=3)
return 0
if __name__ == '__main__':
phase = input('Key in phase of exploration: \n\t0 for nothing, \t1 for data exploration\n\t2 for data loading, \t3 for network creating\n \t4 for generate training set\t5 for train\n\t 6. Evaluate + predict:\n ')
main(int(phase))
|
[
"49214182+hqy06@users.noreply.github.com"
] |
49214182+hqy06@users.noreply.github.com
|
14e8e1c947e28696678c840d8513bceb939f5193
|
743d1815f98f0172916b33b497f2e708a2d6d56e
|
/paymongo/payment.py
|
6c83195095288c6e49413982768a0335f270c4d7
|
[
"MIT"
] |
permissive
|
rodneymandap/paymongo
|
f8093d9de0f382b3991cd05353335a0695223a1f
|
2e2b590f852abd5660eeb9ead88a66c72fc599e5
|
refs/heads/master
| 2022-04-22T09:14:08.642966
| 2020-04-12T14:25:26
| 2020-04-12T14:25:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
import requests
import json
from .urls import payment_url
from .constants import headers
class Payment(object):
def __init__(self, secret_key):
self.secret_key = secret_key
def create(self, payload):
response = requests.post(payment_url, data=json.dumps(payload), headers=headers,
auth=(self.secret_key, ''))
return response.json()
def retrieve(self, key):
response = requests.get(payment_url + '/' + key, auth=(self.secret_key, ''))
return response.json()
def list(self):
response = requests.get(payment_url, auth=(self.secret_key, ''))
return response.json()
|
[
"ccfiel@gmail.com"
] |
ccfiel@gmail.com
|
fd0153746cf04c8bada79511dd974aa203f9cc73
|
cbd865bdba079069ba52e4bf78dd1395acb99d5b
|
/1.py
|
a79655453e5982db056825dd11ff0150eebfdfe4
|
[] |
no_license
|
anhnguyendepocen/100-pythonExercises
|
52e72c214885e993207241b28124382365f28126
|
1f69184ba819b1a9d3880530aa349ae677dc1254
|
refs/heads/master
| 2022-01-17T14:00:16.390389
| 2019-03-23T19:22:13
| 2019-03-23T19:22:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
# Exercise No.1
# What will be printed?
a = 2
a = 4
a = 6
print(a + a + a)
# 18, because a was reassigned from 2 to 4 and finally to 6
|
[
"eduardessc0@hotmail.com"
] |
eduardessc0@hotmail.com
|
f254adfa0ee3ac7fb35d5d7f39c94bc39b5ccb0c
|
b659defb8d31cf43a17e8a7c9af29c7ba6eba87a
|
/object/app01/migrations/0002_content.py
|
dfe6fc06842b4ed1fa7fedec267424a1355d2f3c
|
[] |
no_license
|
tretea/python
|
40470ccbb943536152ae555b3781e164eb57814d
|
f7e89eeff2a1bb15be7fe20c6ae84a59a2a81226
|
refs/heads/master
| 2020-04-16T09:16:06.483724
| 2019-03-26T04:54:44
| 2019-03-26T04:54:44
| 165,457,705
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 534
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-02-16 14:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app01', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='content',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('content', models.CharField(max_length=5000)),
],
),
]
|
[
"2433469529@qq.com"
] |
2433469529@qq.com
|
55254bdd3ffae02f34f881723d3ce6181d83bace
|
a8e07b79a962562c5db420acc306df4e786c931d
|
/Practica VI/conecta.py
|
c7916559b4016657dc3195af610fe82a6d8ca328
|
[] |
no_license
|
m-vega/IA
|
e280e0457d302fb075723504a718c20b6e0ad401
|
c922c6b597ba985ac97e16fcf51da2e7dee9165b
|
refs/heads/master
| 2020-04-09T14:56:23.814133
| 2016-12-01T07:23:48
| 2016-12-01T07:23:48
| 68,161,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,254
|
py
|
#!/usr/bin/python
def ruta(a11,a12,b21,b22,lista,caracter):
if (a11==b21): #Si estan en el mismo renglon
if (a12<b22): #Si la columna de inicio esta a la izquierda de la columna final
lista[a11][a12]=caracter #Coloca el caracter en la posicion en donde estas
ruta(a11,a12+1,b21,b22,lista,caracter)
if (a12>b22): #Si la columna de inicio esta a la derecha de la columna final
lista[a11][a12]=caracter #Coloca el caracter en la posicion en donde estas
ruta(a11,a12-1,b21,b22,lista,caracter)
else: #Si la columna de inicio esta en la misma posicion o a la derecha de la columna final
lista[a11][a12]=caracter #Coloca el caracter en la posicion en donde estas
elif (a11<b21): #Si el renglon de inicio es menor al renglon final
lista[a11][a12]=caracter #Coloca el caracter en la posicion en donde estas
ruta(a11+1,a12,b21,b22,lista,caracter)
elif (a11>b21): #Si el renglon de inicio es mayor al renglon final
lista[a11][a12]=caracter #Coloca el caracter en la posicion en donde estas
ruta(a11-1,a12,b21,b22,lista,caracter)
return lista
lista=[]
renglones, columna = 10, 10
for x in range(renglones):
columnas=[]
for y in range(columna):
columnas.append(" ")
lista.append(columnas)
print "Imprimiendo tablero"
for x in range(renglones): #Por estetica, imprimimos las matrices por filas.
print lista[x]
numero_puntos = int(raw_input('Ingrese el numero de pares de coordenadas que utilizara:'))
i = 0
while i < numero_puntos :
caracter = raw_input('Ingrese el caracter del par de puntos: ')
A11 = int(raw_input('Ingrese el renglon de la primer coordenada: '))
A12 = int(raw_input('Ingrese la columna de la primer coordenada: '))
A21 = int(raw_input('Ingrese el renglon de la segunda coordenada: '))
A22 = int(raw_input('Ingrese la columna de la segunda coordenada: '))
print ""
solucion=ruta(A11,A12,A21,A22,lista, caracter) #Buscamos el Camino de A1 a A2
print ""
print "Generando una Ruta desde el punto " + caracter + "1(" + str(A11) + "," + str(A12) + ") a " + caracter + "2(" + str(A21) + "," + str(A22) +")"
print ""
print "Ruta encontrada"
for x in range(renglones): #Por estetica imprimimos el resultado de la ruta, por filas.
print solucion[x]
i = i + 1
if i is numero_puntos:
break
|
[
"manuelvega15@hotmail.com"
] |
manuelvega15@hotmail.com
|
4082985dbcd8f9a297eb590053254d9cef876f20
|
d3c5fbc501a47eedd786b0ad8f0cad48b8f58cf8
|
/virtual/bin/pyhtmlizer
|
d57e1dfac662f49130d32d531eb429fef7be4a4c
|
[] |
no_license
|
lorrainekamanda/hooliganeccomerce
|
89b2ddde08d266289c6f1d8d50bb028b155253ac
|
12de98ef17454d582e216c5ffe4e826328d43f5e
|
refs/heads/master
| 2023-02-21T06:04:22.645091
| 2021-01-21T19:07:32
| 2021-01-21T19:07:32
| 308,988,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
#!/home/user/Documents/HOOLIGAN_LLC/Ecommerce/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from twisted.scripts.htmlizer import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"user@localhost.localdomain"
] |
user@localhost.localdomain
|
|
267fe31e60e27caf1c10d47f5f02c42af1d37746
|
168739573a5072603363c762857c0f04139d3cb8
|
/python/game_states.py
|
ee6a85eb7b704a73df6b09b09cd5aaef52f11f11
|
[] |
no_license
|
paulomenezes/roguelike
|
bc0c084241d868f8d096ee76a9a6dac4d5fba39b
|
b9d513d9400b0f743477db80774940a242bf4788
|
refs/heads/master
| 2020-03-21T06:53:58.867378
| 2018-09-05T21:32:44
| 2018-09-05T21:32:44
| 138,248,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
from enum import Enum
class GameStates(Enum):
PLAYERS_TURN = 1
ENEMY_TURN = 2
PLAYER_DEAD = 3
SHOW_INVENTORY = 4
DROP_INVENTORY = 5
TARGETING = 6
|
[
"paulo.hgmenezes@gmail.com"
] |
paulo.hgmenezes@gmail.com
|
ae236f56ade8d3cef370de04f7956f885c2f084a
|
03991faf3f732d6d2b1caf106f2a835d4ded34da
|
/venv/Scripts/pip-script.py
|
80d9361ee6b7974e9c52251c0e58a13ca1c7014a
|
[] |
no_license
|
18502992073/regist
|
ce1b262c11216f16c91c49a6e328b347cfae50a3
|
b92ed44aec13a2eee39649c85c280a1ed6f0066c
|
refs/heads/master
| 2020-05-29T20:17:07.977696
| 2019-06-15T14:23:11
| 2019-06-15T14:23:11
| 189,123,573
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
#!C:\Users\Administrator\Desktop\regist\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
|
[
"18502992073@163.com"
] |
18502992073@163.com
|
eed59157b37eb14987ac2bf739a3e58a8d7d999c
|
a9a90eae727590f0ccffaa255ffeaa194309fbe9
|
/Codekata/vowel.py
|
e1dde24493390e35b649a4648c51b3f9a51750ac
|
[] |
no_license
|
dhanuskarthikeyan/guvi
|
18c39674d3ee8e0012caef781d7905e541792174
|
671d64189f6039ffad8d91cab13942aafa87bf29
|
refs/heads/master
| 2020-06-03T00:07:45.041170
| 2019-07-08T17:39:33
| 2019-07-08T17:39:33
| 191,355,054
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
ch=raw_input()
vo=['a','e','i','o','u']
if ch in vo:
print "Vowel"
else:
if ch.isalpha:
print "Consonant"
else:
print "invalid"
|
[
"noreply@github.com"
] |
dhanuskarthikeyan.noreply@github.com
|
fea248a38681abfda0a97fb1517b74aa0a16559e
|
766a67fff47209d383ac1024f7a18139717a0a02
|
/extra/pro-호텔 방 배정.py
|
8d9a1a1ffbe5c4b1c6b62758ac4ea5500936ae21
|
[] |
no_license
|
yusokk/algorithm
|
7ac53393a3c29255331ecc8b57615b3c0a736279
|
86a6f07669ac6d5405763a3631e4e1756b663272
|
refs/heads/main
| 2023-08-07T07:27:23.940004
| 2021-10-05T14:30:17
| 2021-10-05T14:30:17
| 338,821,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
import sys
sys.setrecursionlimit(10 ** 8)
def find_room(room, next_dict):
empty_room = room
if next_dict.get(room, 0):
empty_room = find_room(next_dict[room], next_dict)
next_dict[room] = empty_room
else:
next_dict[room] = room + 1
return empty_room
def solution(k, room_number):
answer = []
next_dict = dict()
for room in room_number:
my_room = find_room(room, next_dict)
answer.append(my_room)
return answer
|
[
"13chakbo@naver.com"
] |
13chakbo@naver.com
|
35a73a27d7873d30fc601b958c8b0283ca9d69bb
|
236545daa64d5e4368b4d9c977ee99d47fb1a241
|
/utils/log.py
|
e921b0ff00a459eb5292be4bc13154ddb2262f3e
|
[] |
no_license
|
yishuangxi/maicheren
|
ad890a842cb8d6be38338f3cf7d996fe5a44eae7
|
cd68b30dd394fa376a925c3edc18953140cede94
|
refs/heads/master
| 2021-05-03T16:55:37.220023
| 2016-10-31T23:32:34
| 2016-10-31T23:32:34
| 71,998,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,816
|
py
|
# coding: utf-8
import logging
from tornado.options import options
class Log(object):
def __init__(self, name=None):
self.log = None
# log format
log_format = "[%(levelname)1.1s %(asctime)s] %(message)s"
formatter = logging.Formatter(log_format, '%y%m%d %H:%M:%S')
# console log handler
console = logging.StreamHandler()
console.setLevel(logging.WARN)
console.setFormatter(formatter)
logging.getLogger().addHandler(console)
# file log handler
if options.log_file_prefix and name is not None:
log = logging.getLogger(name)
channel = logging.handlers.TimedRotatingFileHandler(
filename=options.log_file_prefix,
when='midnight',
backupCount=options.log_file_num_backups
)
channel.setFormatter(formatter)
log.addHandler(channel)
if options.debug:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
self.log = log
def format(self, *args):
_ = ' '.join('%s' for _ in xrange(len(args)))
return _ % (args)
def info(self, *args):
if self.log:
self.log.info(self.format(*args))
else:
print self.format(*args)
def warn(self, *args):
if self.log:
self.log.warn(self.format(*args))
else:
print self.format(*args)
def debug(self, *args):
if self.log:
self.log.debug(self.format(*args))
else:
print self.format(*args)
def error(self, *args):
if self.log:
self.log.error(self.format(*args))
else:
print self.format(*args)
log = Log('maicheren-api')
|
[
"2546618112@qq.com"
] |
2546618112@qq.com
|
eb7913935ba08ee8b2b313a5674f2c99aa869c51
|
ec62137be045f13fa410c6b336f621069a28def5
|
/units_03_python/PyPoll/PyPoll.py
|
ff7e37eceab4269e95cd75cb4505d30002b3e404
|
[] |
no_license
|
TGray77/SMU_Homework
|
ac21c01200d814487ad226f260223a2a6cc15eb9
|
2c821d5621f68cd2b27c0c8b334f829bc8ea37d0
|
refs/heads/master
| 2020-07-06T04:40:30.341090
| 2019-10-24T01:28:12
| 2019-10-24T01:28:12
| 202,895,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,850
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Import Dependencies
import pandas as pd
# In[2]:
# File to Load
pyPoll = "election_data.csv"
#Create Variable for dataframe
Poll_df = pd.read_csv(pyPoll)
Poll_df.head()
# In[3]:
# Import the budget_data.csv file as a DataFrame
print(Poll_df.columns)
print(Poll_df.info())
print(Poll_df.describe())
print(Poll_df.head())
# In[5]:
# calculate total # of votes cast
votes = len(Poll_df["Voter ID"].unique())
print(votes)
# In[7]:
#complete list of candidates
candidates = Poll_df["Candidate"].unique()
print(candidates)
#type candidates names into output lines
# In[21]:
# Total number of votes per candidate
count = Poll_df["Candidate"].value_counts()
print(count)
# In[22]:
maxVotes = count.max()
maxCand = count.idxmax()
print(str(maxCand) + " won the election with " + str(maxVotes) + " votes.")
# In[15]:
#Percentage of vote per candidate
percentage = (count / votes)*100
print(round(percentage))
# In[19]:
#Winner based on popular vote
candidates = Poll_df["Candidate"].max()
# In[31]:
# style output
# style output
output = (
f"\nElection Results: \n"
f"------------------- \n"
f"Total Votes: {votes} \n"
f"------------------- \n"
f"{list(percentage.index)[0]} {str(round(percentage[0]))}% ({str(count[0])})\n"
f"{list(percentage.index)[1]} {str(round(percentage[1]))}% ({str(count[1])})\n"
f"{list(percentage.index)[2]} {str(round(percentage[2]))}% ({str(count[2])})\n"
f"{list(percentage.index)[3]} {str(round(percentage[3]))}% ({str(count[3])})\n"
f"------------------- \n"
f"Winner : {maxCand} won the election with {maxVotes} votes.\n"
f"------------------- \n")
# print results
print(output)
# In[ ]:
# In[1]:
text_file = open("Output.txt", "w")
text_file.write(output)
text_file.close()
# In[ ]:
|
[
"terrence.gray@gmail.com"
] |
terrence.gray@gmail.com
|
bbf932846b07c11cfdebb182c170c83e0a647590
|
525c2f4c423e7c1dc358d4df57dabb8d4350fde0
|
/config.py
|
4fed0c1772b4963b6b8465ab784e96830efcd2f9
|
[] |
no_license
|
dgquintero/testing_webapp
|
4470fcafeae8dc947b33b22789f3ec49d7fcd951
|
73835072a809e22a4780e8cdb0dc28fb0ecadb68
|
refs/heads/master
| 2020-12-26T20:21:26.849542
| 2020-05-10T23:29:03
| 2020-05-10T23:29:03
| 237,630,266
| 0
| 0
| null | 2020-05-10T23:29:56
| 2020-02-01T14:55:57
|
Python
|
UTF-8
|
Python
| false
| false
| 264
|
py
|
class Config:
SECRET_KEY = 'codigofacilito'
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'mysql://root:root@localhost/project_we_facilito'
config = {
'development': DevelopmentConfig,
'default': DevelopmentConfig
}
|
[
"dgquintero02@hotmail.com"
] |
dgquintero02@hotmail.com
|
ea4ff949b96b77e5609b1483493095cfa93f54a1
|
92ff597d9880232841bb19f3160c71680939aef9
|
/paper/core/functions.py
|
93944e75cc16ed0f706ae38d8e6687a79028ac90
|
[] |
no_license
|
LinkZ-J/ransomware
|
53416c1882abaa176bdab004a626d8fb573f68e3
|
c8eb5ebb34c9624adc1c5e9a34f854bca03dfaa5
|
refs/heads/main
| 2023-03-28T22:08:13.287250
| 2021-04-16T14:53:04
| 2021-04-16T14:53:04
| 351,756,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 990
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from datetime import datetime
import time
import pandas as pd
# In[2]:
def diff_day(t1, t2):
dt1 = datetime.utcfromtimestamp(t1)
dt2 = datetime.utcfromtimestamp(t2)
return (dt2 - dt1).days
# In[3]:
# [最晚输入与最早输出天数之差, 生命周期, 活动天数, 每日最大交易次数, 传入时间的区间长度, 传出时间的区间长度]
# In[4]:
def analysize_timestamps(ts0, ts1, ts2):
result = []
result.append(diff_day(max(ts1), min(ts2)))
_all = ts1 + ts2
_all.sort()
result.append(diff_day(_all[0], _all[-1]))
day = lambda ts : datetime.utcfromtimestamp(ts).date()
days = [day(ts) for ts in _all]
day_num = pd.value_counts(days)
for ts in ts0:
day_num[day(ts)] -= 1
result.append(len(day_num))
result.append(day_num.max())
result.append(diff_day(min(ts1), max(ts1)))
result.append(diff_day(min(ts2), max(ts2)))
return result
|
[
"linke.ljy@alibaba-inc.com"
] |
linke.ljy@alibaba-inc.com
|
46ab3209317636f67b60a2f4597a5f163291ba2f
|
cf72ee10a251ea145d8d144495387f463f985c6b
|
/assignment1/python files/ibm2_random_convergence_aer.py
|
999026e9ee8e1e2ff3ac2ab5a2a6c611bcd30e73
|
[
"MIT"
] |
permissive
|
Kaleidophon/NLP2
|
8ea1d8b96dad3aadfbfab709ea2a270ceba2d5d6
|
c0fb89dda54867f043ca773a5d1ba265bc01e366
|
refs/heads/master
| 2020-03-18T15:02:53.368705
| 2018-05-25T15:35:44
| 2018-05-25T15:35:44
| 134,883,819
| 1
| 0
| null | 2018-05-25T17:04:17
| 2018-05-25T17:04:17
| null |
UTF-8
|
Python
| false
| false
| 10,886
|
py
|
import aer
from collections import defaultdict, Counter
import json
from math import log2
import numpy as np
from random import randint
import random
import progressbar
import pickle
import matplotlib.pyplot as plt
from scipy.special import digamma, gammaln
import os
def read_corpus(file_name, source_language):
"""
Reads the corpus and saves each sentence in a list.
"""
corpus = []
with open(file_name, "r", encoding='utf8') as f:
for line in f:
line = line.replace("\n", "")
sentence = line.split()
if source_language:
sentence.insert(0, "null")
corpus.append(sentence)
return corpus
def reduce_corpus(corpus):
"""
Reduces the vocabulary of the corpus by replacing each word that only
occurs once in the vocabulary by -LOW- in the corpus.
"""
flat_corpus = [word for sentence in corpus for word in sentence]
word_counts = Counter(flat_corpus)
small_corpus = []
for sentence in corpus:
small_sentence = []
for word in sentence:
if word_counts[word] != 1:
small_sentence.append(word)
else:
small_sentence.append("-LOW-")
small_corpus.append(small_sentence)
return small_corpus
def load_parameters(file_path):
"""
Loads the parameters that obtained the highest validation AER
from a Pickle file.
"""
f = open(file_path, "rb")
parameters = pickle.load(f)
f.close()
return parameters
def get_best_aer():
"""
Finds the file that had the highest AER score and returns the file path.
"""
dir_path = os.path.dirname(os.path.realpath("__file__"))
files = [f for f in os.listdir(dir_path) if f.endswith(".pkl")]
return files[0]
def initialise_parameters(source_corpus, target_corpus, method):
"""
Initialises the conditional probability of generating a source
word from a target word for all possible pairs of words in the source
and target sentences to 5 and then normalises the parameters such that
the initialisation is uniform.
"""
if method == "uniform":
vocabulary = set([word for sentence in source_corpus for word in sentence])
theta0 = 1/len(vocabulary)
return defaultdict(lambda: defaultdict(lambda: theta0))
elif method == "random":
file_path = get_best_aer()
initial_params = load_parameters(file_path)
parameters = {source_word: {target_word: np.random.uniform(0.001, 1) for
target_word, _ in target_words.items()} for
source_word, target_words in initial_params.items()}
return parameters
elif method == "ibm1":
file_path = get_best_aer()
parameters = load_parameters(file_path)
return parameters
def get_best_alignment(source_corpus, target_corpus, parameters, q):
"""
Gets the best alignment for each sentence and saves the alignment
in a list of lists that holds tuples for each position in the sentence
and looks as follows:
(sentence_index, target_word_index, source_word_index).
"""
alignments = []
print("Getting alignments...")
with progressbar.ProgressBar(max_value=len(target_corpus)) as bar:
for n in range(len(source_corpus)):
source_sentence = source_corpus[n]
target_sentence = target_corpus[n]
alignment = []
l = len(source_sentence)
m = len(target_sentence)
for i, target_word in enumerate(target_sentence):
best_prob = 0
best_j = 0
for j, source_word in enumerate(source_sentence):
# If a word does not occur in the training data, assign probability 0
prob = parameters[source_word].get(target_word, 0)
try:
prob = prob*q[j].get(i, 0).get(l, 0).get(m, 0)
except AttributeError:
prob = 0
if prob > best_prob:
best_prob = prob
best_j = j
if best_j != 0:
alignment.append((n, best_j, i+1))
alignments.append(alignment)
bar.update(n)
return alignments
def compute_aer(predictions, file_path):
"""
Computes the Alignment Error Rate.
"""
gold_sets = aer.read_naacl_alignments(file_path)
metric = aer.AERSufficientStatistics()
for gold, prediction in zip(gold_sets, predictions):
prediction = set([(alignment[1], alignment[2]) for alignment in prediction])
metric.update(sure=gold[0], probable=gold[1], predicted=prediction)
print(metric.aer())
return metric.aer()
def expectation_maximisation2(source_corpus, target_corpus, val_source,
val_target, parameters, num_iterations,
min_perplexity_change, model, file_path):
"""
Runs the EM algorithm for IBM Model 2.
"""
q = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.1))))
old_perplexity = -100000
perplexities = []
aers = []
best_aer = 1
# with open(file_path, "a") as f:
for k in range(0, num_iterations):
print("Iteration #" + str(k), "out of", num_iterations - 1)
counts_pairs = defaultdict(lambda: defaultdict(lambda: 0))
counts_alignments = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0))))
counts_single = defaultdict(lambda: 0)
counts_pairs, counts_single, counts_alignments = e_step2(source_corpus, target_corpus,
counts_pairs, counts_single,
counts_alignments, q)
parameters, q = m_step2(parameters, q, counts_alignments, counts_pairs, counts_single)
perplexity = compute_perplexity2(parameters, q, source_corpus, target_corpus)
alignments = get_best_alignment(val_source, val_target, parameters, q)
val_aer = compute_aer(alignments, "validation/dev.wa.nonullalign")
perplexities.append(perplexity)
aers.append(val_aer)
# Convergence in terms of training log likelihood
if model == "likelihood":
if abs(perplexity - old_perplexity) < min_perplexity_change:
return perplexities, aers, parameters, q
else:
old_perplexity = perplexity
# Convergence in terms of best AER on validation data
elif model == "aer":
if val_aer < best_aer:
best_aer = val_aer
else:
return perplexities, aers, parameters, q
# f.close()
return perplexities, aers, parameters, q
def e_step2(source_corpus, target_corpus, counts_pairs, counts_single, counts_alignments, q):
"""
Does the E-step for IBM Model 2.
"""
print("Doing E-step...")
with progressbar.ProgressBar(max_value=len(source_corpus)) as bar:
for n in range(len(source_corpus)):
source_sentence = source_corpus[n]
target_sentence = target_corpus[n]
l = len(source_sentence)
m = len(target_sentence)
for i, target_word in enumerate(target_sentence):
delta_denominator = sum([q[j_k][i][l][m]*parameters[source_sentence[j_k]][target_word]
for j_k in range(l)])
for j, source_word in enumerate(source_sentence):
delta = (q[j][i][l][m]*parameters[source_word][target_word])/delta_denominator
counts_pairs[source_word][target_word] += delta
counts_single[source_word] += delta
counts_alignments[l][m][i][j] += delta
bar.update(n)
return counts_pairs, counts_single, counts_alignments
def m_step2(parameters, q, counts_alignments, counts_pairs, counts_single):
"""
Does the M-step for IBM Model 2.
"""
print("Doing M-step...")
for j in q.keys():
for i in q[j].keys():
for l in q[j][i].keys():
for m in q[j][i][l].keys():
q[j][i][l][m] = counts_alignments[l][m][i][j]/sum(counts_alignments[l][m][i].values())
for source_word, target_words in parameters.items():
for target_word in target_words:
parameters[source_word][target_word] = \
counts_pairs[source_word][target_word]/counts_single[source_word]
return parameters, q
def compute_perplexity2(parameters, q, source_corpus, target_corpus):
"""
Computes the perplexity of the corpus for IBM Model 2.
"""
perplexity = 0
print("Calculating perplexity...")
with progressbar.ProgressBar(max_value=len(source_corpus)) as bar:
for n in range(len(source_corpus)):
source_sentence = source_corpus[n]
target_sentence = target_corpus[n]
log_sentence = 0
l = len(source_sentence)
m = len(target_sentence)
for i, target_word in enumerate(target_sentence):
log_sum = []
for j, source_word in enumerate(source_sentence):
log_sum.append(parameters[source_word][target_word]*q[j][i][l][m])
log_sentence += np.log(np.sum(log_sum))
perplexity += log_sentence
bar.update(n)
print(perplexity)
return perplexity
if __name__ == '__main__':
train_source = read_corpus("training/hansards.36.2.e", True)
train_source = reduce_corpus(train_source)
train_target = read_corpus("training/hansards.36.2.f", False)
train_target = reduce_corpus(train_target)
val_source = read_corpus("validation/dev.e", True)
val_target = read_corpus("validation/dev.f", False)
test_source = read_corpus("testing/test/test.e", True)
test_target = read_corpus("testing/test/test.f", False)
model = "aer"
initial = "random"
parameters = initialise_parameters(train_source, train_target, initial)
file_path = "ibm2_" + model + "_initial_" + initial + ".txt"
perplexities, aers, parameters, q = expectation_maximisation2(train_source, train_target,
val_source, val_target,
parameters, 10, 1000, model,
file_path)
alignments = get_best_alignment(test_source, test_target, parameters, q)
test_aer = compute_aer(alignments, "testing/answers/test.wa.nonullalign")
with open("ibm2_results.txt", "a") as f:
str = initial + " " + model + " " + str(test_aer) + "\n"
f.write(str)
f.close()
|
[
"tirza.soute@student.uva.nl"
] |
tirza.soute@student.uva.nl
|
5285d9f523e97f90f7f50cb7bb87a905c0a11d68
|
603a3ffca8cb2376dde71dfeedcbbc6738f9c7c0
|
/api/migrations/0039_update_question_type_choices.py
|
faffc7693b4b878409f376ef1d89d49b41460a38
|
[] |
no_license
|
Sarma38/know-your-planet
|
e7029e53a03106d87609b1faaddac1ca31a15021
|
9d891a21554257e963a52cc5dc1b71f39eae7c35
|
refs/heads/master
| 2023-02-04T06:02:27.604631
| 2020-12-25T09:41:43
| 2020-12-25T09:41:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
# Generated by Django 3.0.4 on 2020-06-24 07:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("api", "0038_question_hint"),
]
operations = [
migrations.AlterField(
model_name="question",
name="type",
field=models.CharField(
choices=[
("QCM", "Questionnaire à choix multiples"),
(
"QCM-RM",
"Questionnaire à choix multiples avec réponses multiples",
),
("VF", "Vrai ou Faux"),
],
help_text="Le type de question (QCM, V/F, ...)",
max_length=50,
),
),
]
|
[
"raphael.odini@protonmail.com"
] |
raphael.odini@protonmail.com
|
82cedccfaa35b19ded1347aec999489310a990d2
|
32a6f56e68762a9234dc79c3554b9dcf89eab404
|
/utilities/plug_and_play_DSP.py
|
39e46017592d6eaa88c40b54cbb3f86ad20dd1b7
|
[
"MIT"
] |
permissive
|
davidrower/arduinoAudioModule
|
7a0345701c78445dd07825b0c4b2b0c94046e07d
|
d83286353d62a8799e2df8fa1f9b356756e22fdc
|
refs/heads/master
| 2020-04-20T18:15:03.177465
| 2019-02-05T06:16:15
| 2019-02-05T06:16:15
| 169,014,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,226
|
py
|
#!/usr/bin/python3
"""
Purpose: Provide a simple plug-and-play environment to test and debug DSP
algorithms.
Author: David Rower
Date: Febuary 4, 2019
"""
print(__doc__ + "="*80)
import numpy as np
import matplotlib.pyplot as plt
def input_signal(t):
frequency = 440 # Hz
w = frequency * 2. * np.pi # angular frequency, rad/s
A = 5 # Volts, similar to a Eurorack audio signal.
return A * np.sin(w*t)
def output_signal(t, history):
#return history[0] # a simple follower
return history[0] - history[1] # difference
def main():
# Sampling Parameters and Input Signal
total_sample_time = 0.01 # s
sample_rate_khz = 38.5 # kHz
sample_dt = 1./(1000*sample_rate_khz) # seconds
sample_times = np.linspace(0,total_sample_time, total_sample_time/sample_dt)
input_samples = input_signal(sample_times)
output_samples = np.zeros_like(input_samples)
print("Will be sampling for %fms" % (total_sample_time*1000))
print("Sample Rate: %fkHz" % sample_rate_khz)
print("Corresponding dt: %fms" % (sample_dt*1000))
print("Sampled %d points." % len(input_samples))
# Keep buffer of most recent input samples, ignore starting effects
n_history = 4 # keep track of the last 4 samples
buff = np.zeros(n_history) # [s(t), s(t-1), s(t-2), s(t-3)]
# Let's get jiggy wit it
for index, input_signal_sample in enumerate(input_samples):
# most recent samples in buffer
buff = np.roll(buff, 1); buff[0] = input_signal_sample
# DSP algorithms loose a lot of cool properties when they depend on t
t = sample_times[index] # current time
output_samples[index] = output_signal(t, buff)
# Plot our samples
fig = plt.figure()
plt.xlim([sample_times[0],sample_times[-1]])
plt.ylim([-6,6]) # Eurorack goes between -5 and 5 V
plt.plot(sample_times, input_samples, label="Input")
plt.plot(sample_times, output_samples, label="Output")
plt.xlabel("Time (s)")
plt.ylabel("Signal Level (V)")
plt.title("Input and Output Signals, Sample Rate: %.1fkHz" % sample_rate_khz)
plt.legend()
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
|
[
"ki6pmp@gmail.com"
] |
ki6pmp@gmail.com
|
a7c889a4588922e550faecf5e144b2959ce311c1
|
3c4bdf17482c15f2d559a90033fecf9450a6cbf0
|
/I0320091_Exercise3.10.py
|
408286d409fa705d4908f1b979d2aa70106382f7
|
[] |
no_license
|
sasareginaa/Salsabila-Putri-Regina_I0320091_Wildan_Tugas3
|
e5ecb85f5299c808cc8b09c07e8b379d484da19d
|
fd58b0116c8a58d44ef287d2a9372c578a4c04ab
|
refs/heads/main
| 2023-03-20T10:05:21.839456
| 2021-03-20T00:15:02
| 2021-03-20T00:15:02
| 349,176,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
#Exercise 3.10
tup = ('fisika', 'kimia', 1993, 2017)
print(tup)
# hapus tuple dengan statement del
del tup
# lalu buat kembali tuple yang baru dengan elemen yang diinginkan
tup = ('Bahasa', 'Literasi', 2020)
print("Setelah menghapus tuple :", tup)
|
[
"sasaregina06@gmail.com"
] |
sasaregina06@gmail.com
|
cd5af7ea7817ee9550ec01f854cb03eb30d76fde
|
0d914c4c8bd4b502c02f47f8a8d858082a678504
|
/Basic/validation.py
|
c86a9b57e18832d6388fd00bba6b15fe296fd4d7
|
[] |
no_license
|
himalghimire68/python
|
dbdf42bb76b10a91430fedea71f7acb19bb46150
|
19313888c75fd66cf77b80ccf228f18c816a90eb
|
refs/heads/master
| 2020-04-19T08:34:14.970820
| 2019-05-06T04:29:44
| 2019-05-06T04:29:44
| 168,081,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
name = input("Enter your name:")
while name.isalpha() == False or len(name)<3:
print("Invalid name")
name= input("Enter your name again:")
print("Welcome",name)
|
[
"ghimirehimal68@gmail.com"
] |
ghimirehimal68@gmail.com
|
c0af3f56b1468d7c5b50f6ce9e1ce0ba4c78be78
|
37a50cbf73b0a3dde3de0b26f306f9c962bbb912
|
/hiro/_version.py
|
b1a15980e3aef18f3fbdaa0555eee7b544b0322b
|
[
"MIT"
] |
permissive
|
alisaifee/hiro
|
d5ced281396f9839607a255d54e661f257fd0365
|
f21b512d6d1867bcf7948f9c36a8ccfc20ebb353
|
refs/heads/master
| 2023-07-07T06:42:11.303543
| 2023-01-11T18:59:52
| 2023-01-11T18:59:52
| 14,812,997
| 7
| 4
|
MIT
| 2020-05-26T02:20:58
| 2013-11-30T00:55:10
|
Python
|
UTF-8
|
Python
| false
| false
| 23,718
|
py
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain.
# Generated by versioneer-0.28
# https://github.com/python-versioneer/python-versioneer
"""Git implementation of _version.py."""
import errno
import functools
import os
import re
import subprocess
import sys
from typing import Callable, Dict
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440-pre"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "hiro-"
cfg.versionfile_source = "hiro/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
popen_kwargs = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen(
[command] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
**popen_kwargs,
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r"\d", r):
continue
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=not verbose)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
f"{tag_prefix}[[:digit:]]*",
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root)
pieces["distance"] = len(out.split()) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
[
"ali@indydevs.org"
] |
ali@indydevs.org
|
b75c3ed4caad4d5ce9288bdf4d7bfe94b5cbf4b7
|
338741479e6fa31c904e193c2534f7f2c83ebb03
|
/Item.py
|
7e8185c56c6e9e50f7f7b0acc47ebff3d15324aa
|
[] |
no_license
|
S4more/manacube-sell-bot
|
2e8711b4015bc756840b4409b33d7ef53e2ae761
|
4855b7ee1042cf5500b9b61869b7cb2b19f94ba4
|
refs/heads/main
| 2023-07-31T18:13:53.151082
| 2021-08-25T14:42:42
| 2021-08-25T14:42:42
| 399,627,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
from enum import Enum
class Item(Enum):
BLAZEROD = "blaze.png"
ANVIL = "anvil.png"
BLAZEROD_PACK = "small.png"
FIRECHARGE_PACK = "fire_charge.png"
ACCEPT = "green.png"
|
[
"guimsc@hotmail.com"
] |
guimsc@hotmail.com
|
fa629d06253c59fec027cbe6e8328cec5f46c1e9
|
8c5b359bea4029c9bed27f7fe12953b119715efd
|
/src/svo_util/linearalg.py
|
24bda75f039a5467e065da0527276828831ea628
|
[
"Unlicense"
] |
permissive
|
svohara/svo_util
|
05f6340a909a0241f9e89a3ff4cfa4c9ccfc612a
|
5c2f3b4558cfce296bead1133cbcc5a0996040b4
|
refs/heads/master
| 2021-01-16T23:06:30.475517
| 2014-04-01T22:36:58
| 2014-04-01T22:36:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,424
|
py
|
'''
Created on Apr 10, 2013
@author: Stephen O'Hara
Utility functions relating to linear algebra
computations.
'''
import scipy.linalg as LA
import scipy as sp
def nullspace(A, atol=1e-13, rtol=0):
'''Compute an approximate basis for the nullspace of A.
The algorithm used by this function is based on the singular value
decomposition of `A`. This implementation was copied
from the scipy cookbook: http://www.scipy.org/Cookbook/RankNullspace
@param A: ndarray
A should be at most 2-D. A 1-D array with length k will be treated
as a 2-D with shape (1, k)
@param atol : float
The absolute tolerance for a zero singular value. Singular values
smaller than `atol` are considered to be zero.
@param rtol : float
The relative tolerance. Singular values less than rtol*smax are
considered to be zero, where smax is the largest singular value.
@note: If both `atol` and `rtol` are positive, the combined tolerance is the
maximum of the two; that is::
tol = max(atol, rtol * smax)
Singular values smaller than `tol` are considered to be zero.
@return: ns ndarray
If `A` is an array with shape (m, k), then `ns` will be an array
with shape (k, n), where n is the estimated dimension of the
nullspace of `A`. The columns of `ns` are a basis for the
nullspace; each element in numpy.dot(A, ns) will be approximately
zero.
'''
A = sp.atleast_2d(A)
_u, s, vh = LA.svd(A)
tol = max(atol, rtol * s[0])
nnz = (s >= tol).sum()
ns = vh[nnz:].conj().T
return ns
def closestOrthogonal(A):
'''
Uses SVD to compute the closest orthogonal
matrix to input matrix A
'''
U,_,Vt = LA.svd(A, full_matrices=False)
return sp.dot(U,Vt)
def isOrthogonal(A, tol=1e-13):
'''
Test whether matrix A is orthogonal, upto
numerical tolerance. If A is orthogonal then
sp.dot(A.T,A) will be the identity matrix.
'''
(_,p) = A.shape
Ix = sp.dot( A.T, A) - sp.eye(p)
return not sp.any(Ix > tol)
def isSameGrassmannPoint(M1,M2, tol=1e-13):
'''
Are matrices M1 and M2 two different matrix
representations of the same point, M, on
the Grassmann? Assume M in Gr(n,k),
M1 and M2 are orthogonal with dimensions (nxk)
True if M1 = sp.dot(M2,X) and sp.dot(X.T,X)==I (kxk)
'''
assert isOrthogonal(M1, tol=tol)
assert isOrthogonal(M2, tol=tol)
Xinv = sp.dot(M1.T,M2)
#if Xinv is orthogonal, then so is X
return isOrthogonal(Xinv, tol=tol)
def chordal_dist(M1, M2, already_orthogonal=False):
'''
The chordal distance is based on the canonical angles
between subspaces. This function computes the chordal
distance between two matrices.
@param M1: A 2D array (matrix) with rows >= cols.
@param M2: A 2D array (matrix) with rows >= cols.
@param already_orthogonal: Specify True if M1 and M2
are already orthogonal matrices, which will save on
unnecessary computation. Otherwise, an SVD will be
used to get an orthogonal representation of each matrix.
'''
(r,c) = M1.shape
assert( r >= c)
(r,c) = M2.shape
assert( r >= c)
if already_orthogonal:
Q1 = M1
Q2 = M2
else:
#get orthonormal bases
#NOTE: in scipy.linalg, using the thin svd to get the orthonormal bases is MUCH FASTER
# than using either the LA.orth(A) function or "economy" mode of QR decomposition!
(Q1,_,_) = LA.svd(M1, full_matrices=False)
(Q2,_,_) = LA.svd(M2, full_matrices=False)
#canonical angles between subspaces
X = sp.dot(Q1.T,Q2)
S = LA.svdvals( X )
#S = cos(Theta)
Theta = sp.arccos(S)
#chordal distance is ||sin(Theta)||_2
return LA.norm( sp.sin(Theta) )
def grassmann_expmap(tA,p, tol=1e-13):
'''
Computes the manifold exp-map of a point, tA, in the tangent space T,
of grassmann manifold M, with T centered at point p.
@param tA: The point from TpM to be mapped to the manifold
@param p: The "pole", or the point where the tangent space is
incident to the manifold.
@param tol: Numerical tolerance for assuming a number is zero
'''
U, s, Vt = LA.svd(tA, full_matrices=False)
s[ s < tol ]= 0 #set extremely small values to zero
cosTheta = sp.cos(s)
sinTheta = sp.sin(s)
V = Vt.T
exp_tA = sp.dot(p, sp.dot(V, sp.diag(cosTheta))) + sp.dot(U, sp.diag(sinTheta))
return exp_tA
def grassmann_logmap(A,p, tol=1e-13, skip_orthog_check=False):
'''
Computes the manifold log-map of (nxk) orthogonal matrix A,
centered at the point p (i.e. the "pole"), which is also an
(nxk) orthogonal matrix.
The log-map takes a point on the manifold and maps it to the
tangent space which is centered at a given pole.
The dimension of the tangent space is k(n-k),
and points A,p are on Gr(n,k).
@param A: The orthogonal matrix A, representing a point on
the grassmann manifold.
@param p: An orthogonal matrix p, representing a point on
the grassmann manifold where the tangent space will be formed.
Also called the "pole".
@param tol: Numerical tolerance used to set singular values
to exactly zero when within this tolerance of zero.
@param skip_orthog_check: Set to True if you can guarantee
that the inputs are already orthogonal matrices. Otherwise,
this function will check, and if A and/or p are not orthogonal,
the closest orthogonal matrix to A (or p) will be used.
@return: A tuple (log_p(A), ||log_p(A)|| ), representing
the tangent-space mapping of A, and the distance from the
mapping of A to the pole in the tangent space.
'''
#check that A and p are orthogonal, if
# not, then compute orthogonal representations and
# send back a warning message.
if not skip_orthog_check:
if not isOrthogonal(A):
print "WARNING: You are calling grassmann_logmap function on non-orthogonal input matrix A"
print "(This function will compute an orthogonal representation for A using an SVD.)"
A = closestOrthogonal(A)
if not isOrthogonal(p):
print "WARNING: You are calling grassmann_logmap function on non-orthogonal pole p."
print "(This function will compute an orthogonal representation for p using an SVD.)"
p = closestOrthogonal(p)
#p_perp is the orthogonal complement to p, = null(p.T)
p_perp = nullspace(p.T)
#compute p_perp * p_perp.T * A * inv(p.T * A)
T = sp.dot(p.T,A)
try:
Tinv = LA.inv(T)
except(LA.LinAlgError):
Tinv = LA.pinv(T)
X = sp.dot( sp.dot( sp.dot(p_perp,p_perp.T), A), Tinv )
u, s, vh = LA.svd(X, full_matrices=False)
s[ s < tol ]= 0 #set extremely small values to zero
theta = sp.diag( sp.arctan(s) )
logA = sp.dot(u, sp.dot( theta,vh))
normA = sp.trace( sp.dot(logA.T, logA) )
return logA, normA
def grassmann_frobenius_mean(point_set, ctol=1e-5, max_iter=100):
'''
Computes the mean of a set of orthogonal matrices (of same size)
using iterations to find a matrix that minimizes the frobenius
norm distance to all the points. The code is structured much like
the Karcher mean, but without the tangent space round-trip mapping,
and thus may be used to compare whether the karcher mean returns
better results (perhaps because it better takes into account the
geometry of the space).
'''
for M in point_set:
if not isOrthogonal(M):
raise ValueError("Non-orthogonal point found in input point set.")
N = len(point_set)
pole = point_set[0]
i=0
#step_shrinkage = float(step)/max_iter
print "Iterating to find frobenius mean of %d points..."%N
while (i<max_iter):
accum = sp.zeros_like(pole)
ds = []
#compute distance from pole to each point
for M in point_set:
ds.append( LA.norm(M-pole) ) #frobenius norm
ds = sp.array(ds)
#normalize distances to find weights
ws = list( ds / sp.sum(ds) )
#compute new pole as closest orthogonal matrix to weighted sum
for M,w in zip(point_set,ws):
accum = accum + w*M
prev_pole = pole
pole = closestOrthogonal(accum)
i += 1
delta = LA.norm( pole-prev_pole )
if i % 10 == 0: print "Iter %d: %3.8f"%(i,delta)
if delta <= ctol:
print ""
print "Converged within tolerance after %d steps."%i
break
print ""
return pole
def grassmann_karcher_mean( point_set, weights=None, step=1, ctol=1e-5, max_iter=100, verbose=True):
'''
Compute the karcher mean of a set of points on
a grassmann manifold.
@param point_set: A list of orthogonal matrices of same size, representing
points on a grassmann manifold, nxk
@param weights: If None, an unweighted mean is returned. Otherwise weights
is a list of sample length as point_set, used at each iteration.
@param step: Generally set to 1. Smaller step sizes may converge slower, but
with more iterations may yield a slightly better answer.
@param ctol: Convergence tolerance.
@param max_iter: The maximum number of iterations, at which point the computation
is stopped, even if the convergence tolerance has not yet been achieved.
@return: M the karcher mean.
'''
step = float(step)
#check that all points in point_set are orthogonal
for M in point_set:
if not isOrthogonal(M):
raise ValueError("Non-orthogonal point found in input point set.")
N = len(point_set)
#initialize pole
#pole = point_set[0]
#closest orthogonal matrix to entry-wise mean
em = sp.zeros_like( point_set[0])
for M in point_set:
em += M
em = (1.0/len(point_set))*em
pole = closestOrthogonal(em)
i=0
print "Iterating to find Karcher Mean of %d points..."%N
while (i<max_iter):
accum = sp.zeros_like(pole)
#ds = []
logMs = []
#compute tangent distance from each point to pole
for M in point_set:
logM, _d = grassmann_logmap(M, pole, skip_orthog_check=True)
#ds.append(d)
logMs.append(logM)
#normalize distances to get weights
ws = [1.0/N]*N if weights is None else list( sp.array(weights, dtype=float) / sp.sum(weights) )
#compute new pole as (weighted) sum in tangent space,
# mapped back to grassmann space
for lM,w in zip(logMs,ws):
accum = accum + w*step*lM
prev_pole = pole
pole = grassmann_expmap(accum, prev_pole)
i += 1
_, delta = grassmann_logmap(prev_pole, pole, skip_orthog_check=True)
if verbose:
if i % 10 == 0: print "Iter %d: %3.8f"%(i,delta)
#if isSameGrassmannPoint(pole, pole_old, tol=1e-8):
if delta <= ctol:
print ""
print "Converged within tolerance after %d steps."%i
break
print ""
return pole
if __name__ == '__main__':
pass
|
[
"svohara@gmail.com"
] |
svohara@gmail.com
|
3d1b1418108a42450539841ea0815e19a0d6ce3a
|
cb0195d3d13b4a34daf8f4b3309f18abd91d2699
|
/tailorswift/messages_server.py
|
f9a3f24c6cd8132eff2cc4b49acb075cd1fc5d07
|
[] |
no_license
|
tentacool9/tailor
|
fac85434b04b76585e6a9692cc6bfb318dad4ed1
|
5a0c20e0b9d66ef00bb5332a0dcc3328a9eb30d2
|
refs/heads/master
| 2020-08-06T20:07:58.050751
| 2019-10-06T14:36:29
| 2019-10-06T14:36:29
| 213,135,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
import socket
import sys
import time
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = ('localhost', 14000)
print(sys.stderr, 'connecting to %s port %s' % server_address)
sock.connect(server_address)
try:
# Send data
message = 'This is the message. It will be repeated.'
f = open("/home/david/Downloads/text", "r")
message = f.read()
sock.sendall(message.encode('UTF-8'))
# Look for the response
amount_received = 0
amount_expected = len(message)
print(amount_expected)
finally:
print (sys.stderr, 'closing socket')
sock.close()
|
[
"root@freeipa.kifarunix.com"
] |
root@freeipa.kifarunix.com
|
5916642504160ce3ecf54cac78484b24fcc097dc
|
022a62cd4adbb40aa7abd314c80289d71fb3fd40
|
/scraping_scripts/check_file_type.py
|
63a5e8ea046565332cc8216f260be8706bd1d558
|
[] |
no_license
|
linbug/GeoDB
|
cdafd348e67fef7efd7d3cf506b41ab0a98de9db
|
f918e420cd7e12514067c2bd9de5f59494a48fd2
|
refs/heads/master
| 2021-01-21T13:44:14.660924
| 2016-05-16T12:55:16
| 2016-05-16T12:55:16
| 49,030,140
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 784
|
py
|
import subprocess
TIFF_DIR = "scraped_tiffs"
def is_data(file_path):
return "image data" not in subprocess.check_output(["file", "scraped_tiffs/" + file_path])
def get_file_paths():
return subprocess.check_output(["ls", "scraped_tiffs/"]).split('\n')
# correct files look like this --> 2009-01-01.tiff: TIFF image data, big-endian
# corrupted files look like thi --> 2001-12-08.tiff: data
if __name__ == '__main__':
#make sure to run this file from /GeoDB directory
print("filtering dates")
dates = filter(is_data, get_file_paths())
print("dates filtered")
for index,i in enumerate(dates):
print("cleaning a date")
dates[index] = i[:-5]
print("about to scrape")
subprocess.check_output(["python", "nasa_scrape.py"] + dates)
|
[
"taylorxlin@gmail.com"
] |
taylorxlin@gmail.com
|
885f7d667aa02938316728198454aa4cfbbda645
|
223b40970d12fb47064005b87c16e9a0dbdaf1b1
|
/1.intro_to_python/26-players_bmi.py
|
38bc273db848eb1bdb5a33439dad096b68a955eb
|
[] |
no_license
|
Ann-Geo/Datacamp-exercises
|
8cd9f1979cf3f90da923d7727208d389232abae8
|
da90f8654e535d7c8b949e33dd8a8f381deaae67
|
refs/heads/master
| 2020-07-07T17:10:36.262904
| 2019-11-18T01:16:40
| 2019-11-18T01:16:40
| 203,417,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 781
|
py
|
'''
Instructions
Create a numpy array from the weight_lb list with the correct units. Multiply by 0.453592 to go from pounds to kilograms. Store the resulting numpy array as np_weight_kg.
Use np_height_m and np_weight_kg to calculate the BMI of each player. Use the following equation:
BMI=weight(kg)/height(m)^2
Save the resulting numpy array as bmi.
Print out bmi.
'''
# height and weight are available as regular lists
# Import numpy
import numpy as np
# Create array from height_in with metric units: np_height_m
np_height_m = np.array(height_in) * 0.0254
# Create array from weight_lb with metric units: np_weight_kg
np_weight_kg = np.array(weight_lb)*0.453592
# Calculate the BMI: bmi
bmi = np_weight_kg/np_height_m**2
# Print out bmi
print(bmi)
|
[
"noreply@github.com"
] |
Ann-Geo.noreply@github.com
|
98461f10d55277fa564d9a25974ea9dd4935d7d5
|
344b654cbb8b13d683bcd2cacf522c983287a5fe
|
/Tuple.py
|
37ca53a3f59541bc34d459b6b10a780acc815daa
|
[] |
no_license
|
tchaitanya2288/pyproject01
|
d869522584ab498008e67e81c209472ab20685c2
|
565660b73039db6f0e9ed986504c2f96ba674f9c
|
refs/heads/master
| 2020-03-15T13:18:21.480443
| 2018-06-19T18:44:47
| 2018-06-19T18:44:47
| 132,163,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 559
|
py
|
#!/usr/bin/python
tup1 = 3, 5, 2, 5, 6, 78, 4
tup2 = 'street', 'fatbob', 'iron883', 'night'
tup3 = "superman", 1990, "spiderman", 1992, "batman"
tup4 = ()
tup5 = (4,)
print(tup1,type(tup1),id(tup1),len(tup1))
print(tup2,type(tup2),id(tup2),len(tup2))
print(tup3,type(tup3),id(tup3),len(tup3))
print(tup4,type(tup4),id(tup4),len(tup4))
print(tup5,type(tup5),id(tup5),len(tup5))
print(tup1[3])
print(tup2[-1])
print(tup3[:3]) # End-1 : 3-1 : 2 (0,1,2)
# We are not allowed to modify the tuples
#Below line will generate error
#tup1[3] = 100
print (tup1)
|
[
"tchaitanya.2288@gmail.com"
] |
tchaitanya.2288@gmail.com
|
452a418c611316535cbae514d3a9150eed7847a6
|
e79772dc420306e42189f0b352caafe885f82539
|
/bot_toast.py
|
d40d1bd76e5980ef8d8ddc8086c0e4e422b3b794
|
[] |
no_license
|
rima9s/toastytoastpy
|
42dff69ae2800071e4299bd947bb9b417fdc60a4
|
1e7961096888b3e7bd2729484cd93a24794fffae
|
refs/heads/master
| 2020-04-07T04:35:32.609071
| 2018-11-28T07:50:07
| 2018-11-28T07:50:07
| 158,063,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,230
|
py
|
#python3.6 required
import random
import asyncio
import aiohttp
import json
import requests
from discord import Game
from discord.ext.commands import Bot
from bot_token import imtoken
TOKEN = imtoken
#client = discord.Client()
BOT_PREFIX = ("?", "!")
client = Bot(command_prefix=BOT_PREFIX)
@client.event
async def on_message(message):
# prevent bot replying to itself
if message.author == client.user:
return
if message.content.startswith('!hello'):
msg = 'hello {0.author.mention}'.format(message)
await client.send_message(message.channel, msg)
elif message.content.startswith('!bot'):
await client.send_message(message.channel, "You summoned?")
#make sure to include with on_message use
await client.process_commands(message)
@client.command(name='8ball',
description="Answers a yes/no question.",
brief="Answers from the beyond.",
aliases=['eight_ball', 'eightball', '8-ball'],
pass_context=True)
async def eight_ball(context):
possible_responses = [
'That. Is a no.',
'Mmm. Really not looking likely mate',
'Wee too hard to say',
'It is a possibility',
'Most certainly so',
]
await client.say(random.choice(possible_responses) + ", " + context.message.author.mention)
@client.command()
async def square(num):
squared_value = int(num) * int(num)
await client.say(str(num) + " squared is " + str(squared_value))
@client.event
async def on_ready():
await client.change_presence(game=Game(name="with humans"))
print("Logged in as: " + client.user.name + ", ID: " + client.user.id)
print('-------')
@client.command()
async def bitcoin():
url = 'https://api.coindesk.com/v1/bpi/currentprice/BTC.json'
response = requests.get(url)
value = response.json()['bpi']['USD']['rate']
await client.say("Bitcoin price: $" + value)
async def list_servers():
await client.wait_until_ready()
while not client.is_closed:
print("Current servers:")
for server in client.servers:
print(server.name)
await asyncio.sleep(600)
client.loop.create_task(list_servers())
client.run(TOKEN)
|
[
"33207744+rima9s@users.noreply.github.com"
] |
33207744+rima9s@users.noreply.github.com
|
ac0a158cccf53d526bb123b1e6c2758fc44413d0
|
80831d77ef6fc3b485be80501b73ccb30ce5e444
|
/networkapi/api_ip/v4/tasks/ipv6.py
|
4a8764bc352393c3efb984933cf873b73e444bb9
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
globocom/GloboNetworkAPI
|
e2fdf5a9e6070359e90801bf3e45c2d499f199c5
|
eb27e1d977a1c4bb1fee8fb51b8d8050c64696d9
|
refs/heads/master
| 2023-06-25T21:34:04.923940
| 2023-05-29T12:07:20
| 2023-05-29T12:07:20
| 22,734,387
| 86
| 74
|
Apache-2.0
| 2023-05-29T12:07:21
| 2014-08-07T19:47:43
|
Python
|
UTF-8
|
Python
| false
| false
| 2,286
|
py
|
# -*- coding: utf-8 -*-
from celery.utils.log import get_task_logger
from networkapi import celery_app
from networkapi.api_ip import facade as facade_v3
from networkapi.api_task.classes import BaseTask
from networkapi.usuario.models import Usuario
logger = get_task_logger(__name__)
@celery_app.task(bind=True, base=BaseTask)
def create_ipv6(self, ip_dict, user_id):
msg = {
'object_type': 'ipv6',
'action': 'allocate',
}
self.update_state(
state='PROGRESS',
meta=msg
)
user = Usuario.objects.get(id=user_id)
try:
ip = facade_v3.create_ipv6(ip_dict, user)
except Exception, exception:
msg['message'] = 'IPv6 {} was not allocated.'.format(ip)
msg['reason'] = str(exception)
raise Exception(msg)
else:
msg['message'] = 'IPv6 {} was allocated with success.'.format(ip)
msg['object_id'] = ip.id
return msg
@celery_app.task(bind=True, base=BaseTask)
def update_ipv6(self, ip_dict, user_id):
msg = {
'object_type': 'ipv6',
'action': 'update',
'object_id': ip_dict.get('id')
}
self.update_state(
state='PROGRESS',
meta=msg
)
ip_obj = facade_v3.get_ipv6_by_id(ip_dict.get('id'))
user = Usuario.objects.get(id=user_id)
try:
facade_v3.update_ipv6(ip_dict, user)
except Exception, exception:
msg['message'] = 'IPv6 {} was not updated.'.format(ip_obj)
msg['reason'] = str(exception)
raise Exception(msg)
else:
msg['message'] = 'IPv6 {} was updated with success.'.format(ip_obj)
return msg
@celery_app.task(bind=True, base=BaseTask)
def delete_ipv6(self, ip_id, user_id):
msg = {
'object_type': 'ipv6',
'action': 'deallocate',
'object_id': ip_id
}
self.update_state(
state='PROGRESS',
meta=msg
)
ip_obj = facade_v3.get_ipv6_by_id(ip_id)
try:
facade_v3.delete_ipv6(ip_id)
except Exception, exception:
msg['message'] = 'IPv6 {} was not deallocated.'.format(ip_obj)
msg['reason'] = str(exception)
raise Exception(msg)
else:
msg['message'] = 'IPv6 {} was deallocated with success.'.format(ip_obj)
return msg
|
[
"juan.augusto@ufrj.br"
] |
juan.augusto@ufrj.br
|
6b9f54811604c518ff13778f4bc7366cb3432c46
|
236a75f55f8583e964161b2b9f49458d9db660e7
|
/Mundo 03 - Estruturas Compostas/Aula 16/075.py
|
7cacac447659658d91679b49b2269ef40bdeda3f
|
[] |
no_license
|
phdfreitas/python
|
963d7f95e4b43920999efc5381b4288ea316484e
|
acf69fc6c002d8345906dce7d463445c1abf17b9
|
refs/heads/master
| 2020-12-01T13:11:07.004553
| 2020-01-20T16:51:03
| 2020-01-20T16:51:03
| 230,636,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
#
# Created by Pedro Freitas on 14/01/2020.
#
n1 = int(input('\033[1;37mDigite um número: '))
n2 = int(input('\033[1;37mDigite um número: '))
n3 = int(input('\033[1;37mDigite um número: '))
n4 = int(input('\033[1;37mDigite um número: '))
tupla = (n1, n2, n3, n4)
print(f'O número 9 aparece um total de {tupla.count(9)} vezes.')
if tupla.count(3) != 0:
print(f'O primeiro número 3 está na {tupla.index(3)}º posição.')
else:
print(f'Não existe qualquer valor 3 na tupla.')
par = 0
for pos, val in enumerate(tupla):
if val % 2 == 0:
par += 1
print(f'Foi digitado um total de {par} números pares.')
|
[
"pedro.hnq.fraga@gmail.com"
] |
pedro.hnq.fraga@gmail.com
|
0125a633ee97cbec684aefd541baaca392c938e0
|
1b9bd441c500e79042c48570035071dc20bfaf44
|
/sources/shita mekubetzet/index_nedarim.py
|
886890850472a0ed2e8f3d84c6e20ea913e91dd5
|
[] |
no_license
|
Sefaria/Sefaria-Data
|
ad2d1d38442fd68943535ebf79e2603be1d15b2b
|
25bf5a05bf52a344aae18075fba7d1d50eb0713a
|
refs/heads/master
| 2023-09-05T00:08:17.502329
| 2023-08-29T08:53:40
| 2023-08-29T08:53:40
| 5,502,765
| 51
| 52
| null | 2023-08-29T11:42:31
| 2012-08-22T00:18:38
| null |
UTF-8
|
Python
| false
| false
| 1,101
|
py
|
# -*- coding: utf-8 -*-
import urllib
import urllib2
from urllib2 import URLError, HTTPError
import json
import pdb
import os
import sys
from bs4 import BeautifulSoup
import re
p = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, p)
os.environ['DJANGO_SETTINGS_MODULE'] = "sefaria.settings"
from local_settings import *
from functions import *
sys.path.insert(0, SEFARIA_PROJECT_PATH)
from sefaria.model import *
from sefaria.model.schema import AddressTalmud
root=JaggedArrayNode()
root.add_title(u"Shita Mekubetzet on Nedarim", "en", primary=True)
root.add_title(u"שיטה מקובצת על נדרים", "he", primary=True)
root.key = 'shita'
root.sectionNames = ["Daf", "Comment"]
root.depth = 2
root.addressTypes = ["Talmud","Integer"]
root.validate()
'''
"categories" : [
"Commentary2",
"Talmud",
"Bavli",
Index().load({"title":masechet}).categories[2],
"%s" % masechet
'''
index = {
"title": "Shita Mekubetzet on Nedarim",
"categories": ["Commentary2", "Talmud", "Shita Mekubetzet"],
"schema": root.serialize()
}
post_index(index)
|
[
"skaplan@brandeis.edu"
] |
skaplan@brandeis.edu
|
1e637bdb62825237f72095f09903089644ecd5df
|
6d69b249a81e076d79787dd08eb8957908052052
|
/projects/inflection/scripts/lib/download.py
|
576356c64eb4e3b9a2a10e984f8b1efa594a7b14
|
[] |
no_license
|
2vitalik/wiktionary
|
02ee1f1327c3b82fc7b4d7da12083b1431b1eb8b
|
8edae2f7dcf9089084c5ce7033c4fb0b454f4dfa
|
refs/heads/master
| 2023-02-06T11:28:41.554604
| 2023-02-05T22:49:01
| 2023-02-05T22:49:01
| 121,025,447
| 7
| 2
| null | 2021-10-13T17:36:32
| 2018-02-10T15:06:24
|
Lua
|
UTF-8
|
Python
| false
| false
| 2,216
|
py
|
from os.path import exists
from pywikibot import NoPage
from libs.utils.io import write, read
from libs.utils.wikibot import load_page
from projects.inflection.scripts.lib.compare_dir import compare_dir
from projects.inflection.scripts.lib.files import declension_files, \
get_module_title, testcases_files, get_docs_title, tpl_files, get_tpl_title
from projects.inflection.scripts.lib.paths import get_path
def download_page(title, path):
print(f'- {title}', end='')
try:
content = load_page(title) + '\n'
except NoPage:
print(' - No page')
return
content = content.replace("\n-- dev_prefix = 'User:Vitalik/'",
"\ndev_prefix = 'User:Vitalik/'")
if exists(path):
old_content = read(path)
if old_content != content:
print(' - OK')
else:
write(path, content)
print(' - Not changed')
else:
write(path, content)
print(' - NEW')
def download_lua(dev, testcases=False):
if not compare_dir(dev, 'lua'):
print('Ошибка: папки `lua` не синхронизированы.')
return
path = get_path(dev, 'lua', '', root=True)
print(f'Скачиваю lua-модули в папку:\n {path}\nМодули:')
files = testcases_files if testcases else declension_files
for file in files:
title = get_module_title(file, dev)
filename = get_path(dev, 'lua', file, '')
download_page(title, filename)
def download_docs(dev):
path = get_path(dev, 'docs', '', root=True)
print(f'Скачиваю документацию в папку:\n {path}\nШаблоны:')
files = declension_files + testcases_files
for file in files:
title = get_docs_title(file, dev)
filename = get_path(dev, 'docs', file, '')
download_page(title, filename)
def download_tpls(dev):
path = get_path(dev, 'tpl', '', root=True)
print(f'Скачиваю шаблоны в папку:\n {path}\nШаблоны:')
for file in tpl_files:
title = get_tpl_title(file, dev)
filename = get_path(dev, 'tpl', file, '')
download_page(title, filename)
|
[
"vitaliy.lyapota@anvileight.com"
] |
vitaliy.lyapota@anvileight.com
|
911a34f2e202d9bb8138efc60668d386070310ec
|
cf3d198a7dc70861e912922e5fb377a02c724af8
|
/dashboard/migrations/0004_apitoken.py
|
24b5767c10cbff3043900aa524d6606f0df3f89e
|
[] |
no_license
|
jgasteiz/comics
|
197052a74a3367a9f3010205039b3c916692d77d
|
400b8e057278f3089c66b611c17cb32fb93daff4
|
refs/heads/master
| 2020-12-30T13:29:30.994563
| 2017-09-06T23:45:42
| 2017-09-06T23:45:42
| 91,228,219
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-12 21:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0003_auto_20170511_0923'),
]
operations = [
migrations.CreateModel(
name='APIToken',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('token', models.CharField(max_length=128)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"javi.manzano.oller@gmail.com"
] |
javi.manzano.oller@gmail.com
|
886c4b096d518fc341d5b8fbdd488b1605272998
|
e6bb9eb0c11df4ef85f592ef2fe81207c8793128
|
/sns-sqs-cdk/sns_sqs_cdk/sns_sqs_cdk_stack.py
|
e883e079924b56a1a2e1da3a17d0faed758eefed
|
[
"MIT-0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
wisolee/serverless-patterns
|
0e43d3ba0d1de243b4ee1cb56b975d109c04ea8a
|
fc660f2572d689705098cd2c6bf5fb8099a86749
|
refs/heads/main
| 2023-09-05T15:29:00.091050
| 2021-11-20T12:09:57
| 2021-11-20T12:09:57
| 430,963,227
| 1
| 0
|
NOASSERTION
| 2021-11-23T04:52:46
| 2021-11-23T04:52:45
| null |
UTF-8
|
Python
| false
| false
| 2,083
|
py
|
from aws_cdk import aws_iam as iam
from aws_cdk import aws_sns as sns
from aws_cdk import aws_sns_subscriptions as snssubs
# For consistency with other languages, `cdk` is the preferred import name for
# the CDK's core module. The following line also imports it as `core` for use
# with examples from the CDK Developer's Guide, which are in the process of
# being updated to use `cdk`. You may delete this import if you don't need it.
from aws_cdk import aws_sqs as sqs
from aws_cdk import core as cdk
class SnsSqsCdkStack(cdk.Stack):
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
# Create the queue
MySqsQueue = sqs.Queue(self, "MySqsQueue")
# Create the Topic
MySnsTopic = sns.Topic(self, "MySnsTopic")
# Create an SQS topic subscription object
sqsSubscription = snssubs.SqsSubscription(MySqsQueue)
# Add the SQS subscription to the sns topic
MySnsTopic.add_subscription(sqsSubscription)
# Add policy statement to SQS Policy that is created as part of the new queue
iam.PolicyStatement(actions=['SQS:SendMessage'],
effect=iam.Effect.ALLOW,
conditions={'ArnEquals': MySnsTopic.topic_arn},
resources=[MySqsQueue.queue_arn],
principals=[
iam.ServicePrincipal('sns.amazonaws.com')
]
)
cdk.CfnOutput(self, "SQS queue name", description="SQS queue name", value=MySqsQueue.queue_name)
cdk.CfnOutput(self, "SQS queue ARN", description="SQS queue arn", value=MySqsQueue.queue_arn)
cdk.CfnOutput(self, "SQS queue URL", description="SQS queue URL", value=MySqsQueue.queue_url)
cdk.CfnOutput(self, "SNS topic name", description="SNS topic name", value=MySnsTopic.topic_name)
cdk.CfnOutput(self, "SNS topic ARN", description="SNS topic ARN", value=MySnsTopic.topic_arn)
|
[
"illarso@amazon.com"
] |
illarso@amazon.com
|
0e5ce1618d98874fa2dc26aa318be0bb43482317
|
8c6867a4019ca8e622df216dc0e2566c51b3d396
|
/utils/__init__.py
|
12c551a4c3abe2ce6beed6641729c9d65151627d
|
[] |
no_license
|
deone/apply
|
3cd36233bc191393b05ef32073fdaa4a3b56fb2e
|
724e024c1455cd193901e2f7f5a8377806ffe974
|
refs/heads/master
| 2021-01-21T04:41:33.605076
| 2019-05-22T12:29:13
| 2019-05-22T12:29:13
| 54,662,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,790
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db.models.fields import SlugField
from django.utils.translation import ugettext_lazy as _
from django import forms
class AutoSlugField(SlugField):
"""
Auto populates itself from another field.
It behaves like a regular SlugField.
When populate_from is provided it'll populate itself on creation,
only if a slug was not provided.
"""
def __init__(self, *args, **kwargs):
self.populate_from = kwargs.pop('populate_from', None)
super(AutoSlugField, self).__init__(*args, **kwargs)
def pre_save(self, instance, add):
default = super(AutoSlugField, self).pre_save(instance, add)
if default or not add or not self.populate_from:
return default
inst = instance
for attr in self.populate_from.split('.'):
value = getattr(inst, attr)
inst = value
if value is None:
return default
slug = slugify(smart_text(value))
slug = slug[:self.max_length].strip('-')
# Update the model’s attribute
setattr(instance, self.attname, slug)
return slug
def deconstruct(self):
name, path, args, kwargs = super(AutoSlugField, self).deconstruct()
if self.populate_from is not None:
kwargs['populate_from'] = self.populate_from
return name, path, args, kwargs
def validate_phone_number(number):
if not number.startswith('+'):
raise forms.ValidationError(_('Prefix phone number with country code'), code='incorrect-number-format')
try:
int(number)
except (ValueError, TypeError):
raise forms.ValidationError(_('Enter a valid phone number'), code='invalid-phone-number')
|
[
"alwaysdeone@gmail.com"
] |
alwaysdeone@gmail.com
|
8c4747c1ee5882480cfa2b64f66505200514615a
|
cb0c06f92659ca997f752a849057fc9a6b37b325
|
/PangrammerHelper.roboFontExt/lib/PangrammerHelper.py
|
d407cf9c99c68f6997a959a67a937baff8893b68
|
[] |
no_license
|
frankrolf/PangrammerHelper
|
9130672e506484afc8df79d8788938ca6be553f8
|
0f2661a7fe2d9bfba4af534a95824ce333f616a6
|
refs/heads/master
| 2021-05-23T13:36:05.053677
| 2020-04-14T17:47:01
| 2020-04-14T17:47:01
| 253,314,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,827
|
py
|
'''
Pangrammer Helper
By Mark Simonson
Thanks to Frederik Berlaen for help with hooking up to Space Center
Simple Python version of a Flash/ActionScript "app" I first did in 2003:
http://www.ms-studio.com/Animation/pangrammerhelper.html
Purpose is to help you write pangrams--sentences that contain all the letters
of the alphabet.
Type in the window. The alphabet will shrink as you use up the letters.
Current pangram length displayed at bottom. If you have a Space Center window
open, its current text will be used and updated as you compose your pangram.
Note:
It doesn't do anything with spelled out glyph names (e.g., "/space" or
"/exclam"). It's only designed to work with literal text you can type directly.
Non-alphabetic characters are not included in the count for pangram length.
Small update by Frank Grießhammer on June 12, 2014:
- Add support for mixed-case pangrams
- Add support for non-ASCII characters
'''
import vanilla
import string
from AppKit import NSFont
from mojo.UI import CurrentSpaceCenter
alphabetSetMixed = string.ascii_letters
alphabetSetLower = string.ascii_lowercase
class PangrammerHelper(object):
def __init__(self):
self.alphabetSet = alphabetSetLower
# set up window
self.w = vanilla.Window((420, 150), "Pangrammer Helper")
# set up remaining letters display
self.w.alphabet = vanilla.TextBox((15, 15, -15, 20), self.alphabetSet)
# set up text field, inserting Space Center text if available
if CurrentSpaceCenter() is None:
pangram = "Type your pangram here"
else:
sp = CurrentSpaceCenter()
pangram = sp.getRaw()
self.w.pangramEditor = vanilla.TextEditor(
(15, 40, -15, 70), pangram, callback=self.textEditorCallback)
self.w.counter = vanilla.TextBox(
(-250, 112, -15, 20), "Pangram length: 0", alignment='right')
self.w.checkBox = vanilla.CheckBox(
(15, 110, -15, 20), "",
callback=self.checkBoxCallback, value=False)
self.w.checkBoxLabel = vanilla.TextBox(
# don’t know how to access the NSText of a Vanilla check box label
(32, 112, -15, 20), "Mixed case")
# set the editor font to be monospaced, and the rest to be system font
monospace_font = NSFont.userFixedPitchFontOfSize_(12)
system_font = NSFont.systemFontOfSize_(12)
self.w.pangramEditor.getNSTextView().setFont_(monospace_font)
self.w.alphabet.getNSTextField().setFont_(system_font)
self.w.counter.getNSTextField().setFont_(system_font)
self.w.checkBoxLabel.getNSTextField().setFont_(system_font)
self.w.open()
# set remaining letters and counter to reflect contents of text field
self.textEditorCallback(self)
def checkBoxCallback(self, sender):
if sender.get() == 1:
self.alphabetSet = alphabetSetMixed
else:
self.alphabetSet = alphabetSetLower
self.w.alphabet.set(
self.getRemainingLetters(self.w.pangramEditor.get()))
def textEditorCallback(self, sender):
pangram = self.w.pangramEditor.get()
self.w.alphabet.set(self.getRemainingLetters(pangram))
# determine and display pangram length
self.w.counter.set("Pangram length: %d" % len(pangram))
# update Space Center text
if CurrentSpaceCenter() is not None:
sp = CurrentSpaceCenter()
sp.setRaw(pangram)
def getRemainingLetters(self, pangram):
# determine and update list of unused letters
remainingLettersList = list(set(self.alphabetSet) - set(pangram))
remainingLettersList.sort()
remainingLetters = ''.join(remainingLettersList)
return remainingLetters
PangrammerHelper()
|
[
"frankrolf@gmail.com"
] |
frankrolf@gmail.com
|
7d566fd8b91834ae4874200fd8946801052ad9c4
|
5c39594af519fa0ba9c93fb74eb036f873f322dd
|
/P02/software/BIN/P02b.py
|
04dbba81974379d92cf889a1ab5776f5b696f512
|
[] |
no_license
|
DobleRodriguez/Metaheuristicas
|
575eed653e0199921145ff4861855f04a9244c03
|
e2465fa2c419c6509fa3494ee4406a8bf7fc2a9f
|
refs/heads/master
| 2022-11-11T07:24:29.692921
| 2020-07-05T00:18:33
| 2020-07-05T00:18:33
| 243,312,453
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,325
|
py
|
# Práctica 02.b
# Técnicas de Búsqueda basadas en Poblaciones para el Problema del Agrupamiento con Restricciones
# Metaheurísticas (MH) - Grupo A1
# Universidad de Granada - Grado en Ingeniería Informática - Curso 2019/2020
# Javier Rodríguez Rodríguez - @doblerodriguez
import pathlib as pl
import time
import numpy as np
def init_data(set_name, const_percent):
data = np.loadtxt(pl.Path(__file__).parent /
f"Instancias y Tablas PAR 2019-20/{set_name}_set.dat", delimiter=',')
const = np.loadtxt(pl.Path(__file__).parent /
f"Instancias y Tablas PAR 2019-20/{set_name}_set_const_{const_percent}.const", delimiter=',')
data_distances = np.empty(0)
for i in np.nditer(np.arange(len(data))):
data_distances = np.append(data_distances, np.linalg.norm(data[i] - data[i+1:], axis=1))
# Techo de la distancia máxima (lambda) entre cantidad de restricciones (solo consideramos
# la diagonal superior y sin contar la diagonal)
scale_factor = np.ceil(np.amax(data_distances)) / np.count_nonzero(np.triu(const, 1))
ml_const = np.argwhere(np.tril(const, -1) == 1)
cl_const = np.argwhere(np.tril(const, -1) == -1)
return data, ml_const, cl_const, scale_factor
##############################################################################################
# ALGORITMOS GENÉTICOS (AG)
def evaluation(data, ml_const, cl_const, population, ncluster, scale_factor):
centroids = np.zeros([population.shape[0], ncluster, data.shape[1]])
general_desviation = np.zeros(population.shape[0])
infeasibility = np.empty(population.shape[0])
objective = np.empty(population.shape[0])
for i in np.arange(population.shape[0]):
for j in np.arange(ncluster):
centroids[i,j] = np.mean(data[population[i] == j], axis=0)
for j in np.arange(ncluster):
general_desviation[i] += np.mean(np.linalg.norm(centroids[i,j] - data[population[i] == j], axis=1))/ncluster
infeasibility[i] = np.count_nonzero(population[i, ml_const[:,0]] != population[i, ml_const[:,1]]) + \
np.count_nonzero(population[i, cl_const[:,0]] == population[i, cl_const[:,1]])
objective[i] = general_desviation[i] + (infeasibility[i] * scale_factor)
return general_desviation, infeasibility, objective
# ESTACIONARIO
def stationary_genetic(data, ml_const, cl_const,ncluster, scale_factor, randgen, uniform=True, max_evals=100000,
population_size=50, mutation_odds=0.001):
# Generación de la población
# Matriz de tpoblacion x tdato (tcromosoma)
evals = 0
population = np.empty([population_size, data.shape[0]])
population = randgen.integers(ncluster, size=population.shape)
for i in np.arange(population_size):
ordering = np.bincount(population[i], minlength=ncluster)
check = np.flatnonzero(ordering == 0)
if check.size > 0:
single_elem_clusters = np.flatnonzero(ordering == 1)
changed = randgen.choice(np.flatnonzero(np.isin(population[i], single_elem_clusters, invert=True)), check.size, replace=False)
population[i, changed] = check
general_desviation, infeasibility, objective = evaluation(data, ml_const, cl_const, population, ncluster, scale_factor)
evals += population_size
chromo_size = population.shape[1]
# Mutación a nivel de cromosoma
mutations = mutation_odds * chromo_size
while evals < max_evals:
#print(np.amin(infeasibility))
#print(evals)
# TORNEO
# Utilizamos este mecanismo para permitir que un mismo dato participe en más de un torneo
# pero nunca compita contra sí mismo
parents = np.empty(2)
for j in np.arange(2):
tournament = randgen.choice(population_size, 2, replace=False)
parents[j] = tournament[np.argmin(objective[tournament])]
#print(np.sort(objective))
# CRUCE
# Hacemos los cruces
genex = np.copy(population[parents.astype(int)])
for j in np.arange(2):
segment_size = randgen.integers(chromo_size) * int(not uniform)
segment_start = randgen.integers(chromo_size)
# Copiar segmento
# AQUí CAMBIA SEGÚN SI ES UNIFORME O SEGMENTO FIJO
# Selección de la mitad de índices de elementos del cromosoma NO PERTENECIENTES al segmento fijo, aleatorios sin reemplazo.
segment = np.mod(np.arange(segment_start, segment_start+segment_size), chromo_size)
valid_points = np.flatnonzero(np.isin(np.arange(chromo_size), segment, invert=True))
crossing_points = randgen.choice(valid_points, int(np.rint(valid_points.size/2)), replace=False)
# Creación del hijo donde nos quedamos con el gen de cada padre respectivamente según si está o no en los puntos de cruce
genex[j, crossing_points] = population[parents[int(not j)].astype(int), crossing_points]
# REPARAR HIJOS
check = np.isin(np.arange(ncluster), genex[j].astype(int), invert=True)
empty_clusters = np.flatnonzero(check)
if (empty_clusters.size > 0):
cluster_amount = np.bincount(genex[j].astype(int), minlength=ncluster)
single_elem_clusters = np.flatnonzero(cluster_amount == 1)
# No quiero dejar otro cluster vacío
changed = randgen.choice(np.flatnonzero(np.isin(genex[j], single_elem_clusters, invert=True)), empty_clusters.size, replace=False)
genex[j, changed] = empty_clusters
# MUTACIÓN
dicerolls = randgen.random(2)
#print(dicerolls)
#print(mutations)
mutated = np.flatnonzero(dicerolls < mutations)
#input(mutated)
for j in mutated:
#input("Muta")
cluster_amount = np.bincount(genex[int(j)].astype(int), minlength=ncluster)
single_elem_clusters = np.flatnonzero(cluster_amount == 1)
possible_elements = np.flatnonzero(np.isin(genex[int(j)], single_elem_clusters, invert=True))
gen = randgen.choice(possible_elements)
possible_mutations = np.flatnonzero(np.isin(np.arange(ncluster), genex[int(j), int(gen)] ,invert=True))
genex[int(j), int(gen)] = randgen.choice(possible_mutations)
# COMPETENCIA HIJOS
#print(genex)
new_gd, new_infeas, new_objective = evaluation(data, ml_const, cl_const, genex, ncluster, scale_factor)
#print(new_objective)
evals += 2
#print(objective)
weakest = np.argpartition(objective, -2)[-2:]
#print(objective[weakest])
#print(weakest)
contestants = np.concatenate((genex, population[weakest]))
m_obj = np.append(new_objective, objective[weakest])
m_infeas = np.append(new_infeas, infeasibility[weakest])
m_gd = np.append(new_gd, general_desviation[weakest])
#print(m_obj)
#print(m_infeas)
#print(m_gd)
winners = np.argpartition(m_obj, 2)[:2]
#print(genex)
#print()
#print(contestants)
#print()
#print(population[weakest])
#print("\n")
#print(winners)
#print(weakest)
#input(np.argpartition(m_obj, 2))
#print(objective[weakest])
#print(m_obj)
population[weakest] = contestants[winners]
infeasibility[weakest] = m_infeas[winners]
general_desviation[weakest] = m_gd[winners]
objective[weakest] = m_obj[winners]
#print(objective[weakest])
#input()
best_solution = np.argmin(objective)
return general_desviation[best_solution], infeasibility[best_solution], objective[best_solution]
# GENERACIONAL CON ELITISMO
def generational_genetic(data, ml_const, cl_const,ncluster, scale_factor, randgen, uniform=True, max_evals=100000,
population_size=50, crossover_odds=0.7, mutation_odds=0.001):
# Generación de la población
# Matriz de tpoblacion x tdato (tcromosoma)
evals = 0
population = np.empty([population_size, data.shape[0]])
population = randgen.integers(ncluster, size=population.shape)
for i in np.arange(population_size):
ordering = np.bincount(population[i], minlength=ncluster)
check = np.flatnonzero(ordering == 0)
if check.size > 0:
single_elem_clusters = np.flatnonzero(ordering == 1)
changed = randgen.choice(np.flatnonzero(np.isin(population[i], single_elem_clusters, invert=True)), check.size, replace=False)
population[i, changed] = check
general_desviation, infeasibility, objective = evaluation(data, ml_const, cl_const, population, ncluster, scale_factor)
evals += population_size
crossovers = np.rint(crossover_odds * population_size/2)
mutations = np.rint(mutation_odds * population_size * population.shape[1])
while evals < max_evals:
#print(evals)
# TORNEO
# Utilizamos este mecanismo para permitir que un mismo dato participe en más de un torneo
# pero nunca compita contra sí mismo
parents = np.empty(population_size)
for j in np.arange(population_size):
tournament = randgen.choice(population_size, 2, replace=False)
parents[j] = tournament[np.argmin(objective[tournament])]
# CRUCE
genex = np.copy(population[parents.astype(int)])
# Hacemos los cruces
chromo_size = population.shape[1]
for j in np.arange(crossovers):
for k in np.arange(2):
segment_size = randgen.integers(chromo_size) * int(not uniform)
segment_start = randgen.integers(chromo_size)
# Copiar segmento
# AQUí CAMBIA SEGÚN SI ES UNIFORME O SEGMENTO FIJO
# Selección de la mitad de índices de elementos del cromosoma NO PERTENECIENTES al segmento fijo, aleatorios sin reemplazo.
segment = np.mod(np.arange(segment_start, segment_start+segment_size), chromo_size)
valid_points = np.flatnonzero(np.isin(np.arange(chromo_size), segment, invert=True))
crossing_points = randgen.choice(valid_points, int(np.rint(valid_points.size/2)), replace=False)
# Creación del hijo donde nos quedamos con el gen de cada padre respectivamente según si está o no en los puntos de cruce
genex[int(2*j + k), crossing_points] = population[parents[int(2*j + (not k))].astype(int), crossing_points]
# REPARAR HIJOS
check = np.isin(np.arange(ncluster), genex[int(2*j + k)].astype(int), invert=True)
empty_clusters = np.flatnonzero(check)
if (empty_clusters.size > 0):
cluster_amount = np.bincount(genex[int(2*j + k)].astype(int), minlength=ncluster)
single_elem_clusters = np.flatnonzero(cluster_amount == 1)
# No quiero dejar otro cluster vacío
changed = randgen.choice(np.flatnonzero(np.isin(genex[int(2*j + k)], single_elem_clusters, invert=True)), empty_clusters.size, replace=False)
genex[int(2*j + k), changed] = empty_clusters
# MUTACIÓN
mutated = randgen.choice(population_size, size=int(mutations))
for j in mutated:
cluster_amount = np.bincount(genex[int(j)].astype(int), minlength=ncluster)
single_elem_clusters = np.flatnonzero(cluster_amount == 1)
possible_elements = np.flatnonzero(np.isin(genex[int(j)], single_elem_clusters, invert=True))
gen = randgen.choice(possible_elements)
prev = np.copy(genex[int(j), int(gen)])
possible_mutations = np.flatnonzero(np.isin(np.arange(ncluster), genex[int(j), int(gen)] ,invert=True))
genex[int(j), int(gen)] = randgen.choice(possible_mutations)
if (prev == genex[int(j), int(gen)]):
print("Fuck")
# ELITISMO
new_gd, new_infeas, new_objective = evaluation(data, ml_const, cl_const, genex, ncluster, scale_factor)
evals += population_size
champion = np.argmin(objective)
if (not np.any(np.equal(population[champion], genex).all(axis=1))):
weakest = np.argmax(new_objective)
genex[weakest] = population[champion]
new_gd[weakest] = general_desviation[champion]
new_infeas[weakest] = infeasibility[champion]
new_objective[weakest] = objective[champion]
population = genex
general_desviation = new_gd
infeasibility = new_infeas
objective = new_objective
best_solution = np.argmin(objective)
return general_desviation[best_solution], infeasibility[best_solution], objective[best_solution]
#################################################################################################
#####################################################################################################
np.seterr(all='raise')
info_names = ["Algoritmo", "Dataset", "% Restricciones", "Semilla", "N° clústeres", "Desviación general",
"Infeasibility", "Función objetivo", "Tiempo de ejecución (s)"]
data, ml_const, cl_const, scale_factor = init_data("iris", 10)
stationary_genetic(data, ml_const, cl_const, 3, scale_factor, np.random.default_rng(1))
sets = np.array(["iris", "rand", "ecoli", "newthyroid"])
nclusters = np.array([3, 3, 8, 3])
percents = np.array([10, 20])
seeds = np.array([1, 112, 241, 27, 472])
values = np.stack(np.meshgrid(percents, sets, seeds), -1).reshape(-1,3)
sets, set_repeats = np.unique(values[:,1], return_counts=True)
set_repeats = np.repeat(nclusters, set_repeats)
values = np.concatenate((values, np.array([set_repeats]).T), axis=-1)
with open(pl.Path(__file__).parent / f"solutions_P02b.txt", 'w+') as sol_file:
sol_file.write(
f"{info_names[0]:>14} {info_names[1]:>10} {info_names[2]:>15} {info_names[3]:>7} {info_names[4]:>14} {info_names[5]:>20} {info_names[6]:>13} {info_names[7]:>20} {info_names[8]:>23}\n"
)
for percent,dataset,seed,ncluster in values:
data, ml_const, cl_const, scale_factor = init_data(dataset, percent)
randgen = np.random.default_rng(int(seed))
tic = time.perf_counter()
general_desviation, infeasibility, objective = stationary_genetic(data, ml_const, cl_const, int(ncluster), scale_factor, randgen, uniform=True)
toc = time.perf_counter()
func_name = "AGE-UN"
with open(pl.Path(__file__).parent / f"solutions_P02b.txt",'a+') as sol_file:
sol_file.write(
f"{func_name:>14} {dataset:>10} {percent:>15} {seed:>7} {ncluster:>14} {general_desviation:>20} {infeasibility:>13} {objective:>20} {toc - tic:>23.4f}\n"
)
tic = time.perf_counter()
general_desviation, infeasibility, objective = stationary_genetic(data, ml_const, cl_const, int(ncluster), scale_factor, randgen, uniform=False)
toc = time.perf_counter()
func_name = "AGE-SF"
with open(pl.Path(__file__).parent / f"solutions_P02b.txt",'a+') as sol_file:
sol_file.write(
f"{func_name:>14} {dataset:>10} {percent:>15} {seed:>7} {ncluster:>14} {general_desviation:>20} {infeasibility:>13} {objective:>20} {toc - tic:>23.4f}\n"
)
|
[
"javs.doblerodriguez@gmail.com"
] |
javs.doblerodriguez@gmail.com
|
4e084a47d41a74049654f3690ad886fc9791658b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02955/s581217127.py
|
41731962f67c5d041874d01e9d777477cc96b20a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 644
|
py
|
import numpy as np
# 約数の列挙
def make_divisors(n):
divisors = set()
for i in range(1, int(n ** 0.5) + 1):
if n % i == 0:
divisors.add(i)
divisors.add(n // i)
return sorted(list(divisors))
N, K, *A = map(int, open(0).read().split())
A = np.array(A, np.int64)
d = make_divisors(A.sum())
for i in d[::-1]:
r = A % i
r = r[r.nonzero()]
l = r.size
if l == 0:
print(i)
exit()
else:
n = np.arange(l - 1, 0, -1)
r.sort()
np.cumsum(r, out=r)
if np.any((r[:-1] == i * n - (r[-1] - r[:-1])) & (r[:-1] <= K)):
print(i)
exit()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
384c0bd120757e033265b8db5355587e95f62cca
|
0d10f30ec6f22fa3557a406a6226d91eca3c0280
|
/frontier/feincms/module/page/migrations/0002_auto_20180120_1209.py
|
92ea4955da3b7059d7599152e0bf481e58b24a0f
|
[
"MIT"
] |
permissive
|
mcmaxwell/frontier
|
11b1f1defe170806856504a3022ccf5acb558b3d
|
d1f59154108566c652965a43c4b999de33c05c58
|
refs/heads/master
| 2021-05-06T06:20:02.736302
| 2018-02-12T09:29:22
| 2018-02-12T09:29:22
| 113,828,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2018-01-20 12:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('page', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='applicationcontent',
name='urlconf_path',
field=models.CharField(choices=[(b'news.urls', b'News application')], max_length=100, verbose_name='application'),
),
]
|
[
"alexander@Alexanders-iMac.local"
] |
alexander@Alexanders-iMac.local
|
dfe951ebaa9f997957def347f68dca2c8ff8797b
|
f84a0d65297a0ca5f6f1ed1b9424386b3b837816
|
/pikachu.py
|
611de1c3853571cfe044f870694b4b152207222f
|
[] |
no_license
|
hmgtech/Draw-Pikachu
|
1df9239b86fcecab652876b8f6fc7fbcd22ed3bf
|
04f7fd2f2aea96848618a69e62aaabae403456db
|
refs/heads/main
| 2023-01-14T07:46:56.101100
| 2020-11-21T11:36:35
| 2020-11-21T11:36:35
| 314,796,721
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,511
|
py
|
import turtle
def getPosition(x, y):
turtle.setx(x)
turtle.sety(y)
print(x, y)
class Pikachu:
def __init__(self):
self.t = turtle.Turtle()
t = self.t
t.pensize(3)
t.speed(9)
t.ondrag(getPosition)
def noTrace_goto(self, x, y):
self.t.penup()
self.t.goto(x, y)
self.t.pendown()
def leftEye(self, x, y):
self.noTrace_goto(x, y)
t = self.t
t.seth(0)
t.fillcolor('#333333')
t.begin_fill()
t.circle(22)
t.end_fill()
self.noTrace_goto(x, y+10)
t.fillcolor('#000000')
t.begin_fill()
t.circle(10)
t.end_fill()
self.noTrace_goto(x+6, y + 22)
t.fillcolor('#ffffff')
t.begin_fill()
t.circle(10)
t.end_fill()
def rightEye(self, x, y):
self.noTrace_goto(x, y)
t = self.t
t.seth(0)
t.fillcolor('#333333')
t.begin_fill()
t.circle(22)
t.end_fill()
self.noTrace_goto(x, y+10)
t.fillcolor('#000000')
t.begin_fill()
t.circle(10)
t.end_fill()
self.noTrace_goto(x-6, y + 22)
t.fillcolor('#ffffff')
t.begin_fill()
t.circle(10)
t.end_fill()
def mouth(self, x, y):
self.noTrace_goto(x, y)
t = self.t
t.fillcolor('#88141D')
t.begin_fill()
l1 = []
l2 = []
t.seth(190)
a = 0.7
for i in range(28):
a += 0.1
t.right(3)
t.fd(a)
l1.append(t.position())
self.noTrace_goto(x, y)
t.seth(10)
a = 0.7
for i in range(28):
a += 0.1
t.left(3)
t.fd(a)
l2.append(t.position())
t.seth(10)
t.circle(50, 15)
t.left(180)
t.circle(-50, 15)
t.circle(-50, 40)
t.seth(233)
t.circle(-50, 55)
t.left(180)
t.circle(50, 12.1)
t.end_fill()
self.noTrace_goto(17, 54)
t.fillcolor('#DD716F')
t.begin_fill()
t.seth(145)
t.circle(40, 86)
t.penup()
for pos in reversed(l1[:20]):
t.goto(pos[0], pos[1]+1.5)
for pos in l2[:20]:
t.goto(pos[0], pos[1]+1.5)
t.pendown()
t.end_fill()
self.noTrace_goto(-17, 94)
t.seth(8)
t.fd(4)
t.back(8)
def leftCheek(self, x, y):
turtle.tracer(False)
t = self.t
self.noTrace_goto(x, y)
t.seth(300)
t.fillcolor('#DD4D28')
t.begin_fill()
a = 2.3
for i in range(120):
if 0 <= i < 30 or 60 <= i < 90:
a -= 0.05
t.lt(3)
t.fd(a)
else:
a += 0.05
t.lt(3)
t.fd(a)
t.end_fill()
turtle.tracer(True)
def rightCheek(self, x, y):
t = self.t
turtle.tracer(False)
self.noTrace_goto(x, y)
t.seth(60)
t.fillcolor('#DD4D28')
t.begin_fill()
a = 2.3
for i in range(120):
if 0 <= i < 30 or 60 <= i < 90:
a -= 0.05
t.lt(3)
t.fd(a)
else:
a += 0.05
t.lt(3)
t.fd(a)
t.end_fill()
turtle.tracer(True)
def colorLeftEar(self, x, y):
t = self.t
self.noTrace_goto(x, y)
t.fillcolor('#000000')
t.begin_fill()
t.seth(330)
t.circle(100, 35)
t.seth(219)
t.circle(-300, 19)
t.seth(110)
t.circle(-30, 50)
t.circle(-300, 10)
t.end_fill()
def colorRightEar(self, x, y):
t = self.t
self.noTrace_goto(x, y)
t.fillcolor('#000000')
t.begin_fill()
t.seth(300)
t.circle(-100, 30)
t.seth(35)
t.circle(300, 15)
t.circle(30, 50)
t.seth(190)
t.circle(300, 17)
t.end_fill()
def body(self):
t = self.t
t.fillcolor('#F6D02F')
t.begin_fill()
t.penup()
t.circle(130, 40)
t.pendown()
t.circle(100, 105)
t.left(180)
t.circle(-100, 5)
t.seth(20)
t.circle(300, 30)
t.circle(30, 50)
t.seth(190)
t.circle(300, 36)
t.seth(150)
t.circle(150, 70)
t.seth(200)
t.circle(300, 40)
t.circle(30, 50)
t.seth(20)
t.circle(300, 35)
#print(t.pos())
t.seth(240)
t.circle(105, 95)
t.left(180)
t.circle(-105, 5)
t.seth(210)
t.circle(500, 18)
t.seth(200)
t.fd(10)
t.seth(280)
t.fd(7)
t.seth(210)
t.fd(10)
t.seth(300)
t.circle(10, 80)
t.seth(220)
t.fd(10)
t.seth(300)
t.circle(10, 80)
t.seth(240)
t.fd(12)
t.seth(0)
t.fd(13)
t.seth(240)
t.circle(10, 70)
t.seth(10)
t.circle(10, 70)
t.seth(10)
t.circle(300, 18)
t.seth(75)
t.circle(500, 8)
t.left(180)
t.circle(-500, 15)
t.seth(250)
t.circle(100, 65)
# 左脚
t.seth(320)
t.circle(100, 5)
t.left(180)
t.circle(-100, 5)
t.seth(220)
t.circle(200, 20)
t.circle(20, 70)
t.seth(60)
t.circle(-100, 20)
t.left(180)
t.circle(100, 20)
t.seth(300)
t.circle(10, 70)
t.seth(60)
t.circle(-100, 20)
t.left(180)
t.circle(100, 20)
t.seth(10)
t.circle(100, 60)
# 横向
t.seth(180)
t.circle(-100, 10)
t.left(180)
t.circle(100, 10)
t.seth(5)
t.circle(100, 10)
t.circle(-100, 40)
t.circle(100, 35)
t.left(180)
t.circle(-100, 10)
t.seth(290)
t.circle(100, 55)
t.circle(10, 50)
t.seth(120)
t.circle(100, 20)
t.left(180)
t.circle(-100, 20)
t.seth(0)
t.circle(10, 50)
t.seth(110)
t.circle(100, 20)
t.left(180)
t.circle(-100, 20)
t.seth(30)
t.circle(20, 50)
t.seth(100)
t.circle(100, 40)
t.seth(200)
t.circle(-100, 5)
t.left(180)
t.circle(100, 5)
t.left(30)
t.circle(100, 75)
t.right(15)
t.circle(-300, 21)
t.left(180)
t.circle(300, 3)
t.seth(43)
t.circle(200, 60)
t.right(10)
t.fd(10)
t.circle(5, 160)
t.seth(90)
t.circle(5, 160)
t.seth(90)
t.fd(10)
t.seth(90)
t.circle(5, 180)
t.fd(10)
t.left(180)
t.left(20)
t.fd(10)
t.circle(5, 170)
t.fd(10)
t.seth(240)
t.circle(50, 30)
t.end_fill()
self.noTrace_goto(130, 125)
t.seth(-20)
t.fd(5)
t.circle(-5, 160)
t.fd(5)
self.noTrace_goto(166, 130)
t.seth(-90)
t.fd(3)
t.circle(-4, 180)
t.fd(3)
t.seth(-90)
t.fd(3)
t.circle(-4, 180)
t.fd(3)
self.noTrace_goto(168, 134)
t.fillcolor('#F6D02F')
t.begin_fill()
t.seth(40)
t.fd(200)
t.seth(-80)
t.fd(150)
t.seth(210)
t.fd(150)
t.left(90)
t.fd(100)
t.right(95)
t.fd(100)
t.left(110)
t.fd(70)
t.right(110)
t.fd(80)
t.left(110)
t.fd(30)
t.right(110)
t.fd(32)
t.right(106)
t.circle(100, 25)
t.right(15)
t.circle(-300, 2)
##############
#print(t.pos())
t.seth(30)
t.fd(40)
t.left(100)
t.fd(70)
t.right(100)
t.fd(80)
t.left(100)
t.fd(46)
t.seth(66)
t.circle(200, 38)
t.right(10)
t.fd(10)
t.end_fill()
t.fillcolor('#923E24')
self.noTrace_goto(126.82, -156.84)
t.begin_fill()
t.seth(30)
t.fd(40)
t.left(100)
t.fd(40)
t.pencolor('#923e24')
t.seth(-30)
t.fd(30)
t.left(140)
t.fd(20)
t.right(150)
t.fd(20)
t.left(150)
t.fd(20)
t.right(150)
t.fd(20)
t.left(130)
t.fd(18)
t.pencolor('#000000')
t.seth(-45)
t.fd(67)
t.right(110)
t.fd(80)
t.left(110)
t.fd(30)
t.right(110)
t.fd(32)
t.right(106)
t.circle(100, 25)
t.right(15)
t.circle(-300, 2)
t.end_fill()
self.cap(-134.07, 147.81)
self.mouth(-5, 25)
self.leftCheek(-126, 32)
self.rightCheek(107, 63)
self.colorLeftEar(-250, 100)
self.colorRightEar(140, 270)
self.leftEye(-85, 90)
self.rightEye(50, 110)
t.hideturtle()
def cap(self, x, y):
self.noTrace_goto(x, y)
t = self.t
t.fillcolor('#CD0000')
t.begin_fill()
t.seth(200)
t.circle(400, 7)
t.left(180)
t.circle(-400, 30)
t.circle(30, 60)
t.fd(50)
t.circle(30, 45)
t.fd(60)
t.left(5)
t.circle(30, 70)
t.right(20)
t.circle(200, 70)
t.circle(30, 60)
t.fd(70)
# print(t.pos())
t.right(35)
t.fd(50)
t.circle(8, 100)
t.end_fill()
self.noTrace_goto(-168.47, 185.52)
t.seth(36)
t.circle(-270, 54)
t.left(180)
t.circle(270, 27)
t.circle(-80, 98)
t.fillcolor('#444444')
t.begin_fill()
t.left(180)
t.circle(80, 197)
t.left(58)
t.circle(200, 45)
t.end_fill()
self.noTrace_goto(-58, 270)
t.pencolor('#228B22')
t.dot(35)
self.noTrace_goto(-30, 280)
t.fillcolor('#228B22')
t.begin_fill()
t.seth(100)
t.circle(30, 180)
t.seth(190)
t.fd(15)
t.seth(100)
t.circle(-45, 180)
t.right(90)
t.fd(15)
t.end_fill()
t.pencolor('#000000')
def start(self):
self.body()
def main():
print('Painting the Pikachu... ')
turtle.screensize(800, 700)
turtle.title('Pikachu')
pikachu = Pikachu()
pikachu.start()
turtle.mainloop()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
hmgtech.noreply@github.com
|
368902af6f99e1499622839e81971e00c35af28c
|
58eaf5b25b73a5451d28158d199de56addbcdd3d
|
/tubers/settings.py
|
f03abafc60c65383299beba18685c8bcd08bb91b
|
[] |
no_license
|
vanshika2900/ytubers
|
aba36ad8897993de1c955d7b1bfdc8cd415e2686
|
eb349eb2a6ba470543bef0d2f81ec4f4f98cc931
|
refs/heads/main
| 2023-07-15T08:41:41.987159
| 2021-09-01T04:38:22
| 2021-09-01T04:38:22
| 401,345,328
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,057
|
py
|
"""
Django settings for tubers project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-xn5s7c@mhd79aqj*op(!(ufxr+j()5krp%7!1a=jj(k9=5n-g&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1','middletuber.netlify.app']
LOGIN_REDIRECT_URL = 'dashboard'
#USE_X_FORWARDED_HOST=True
#SECURE_PROXY_SSL_HEADER=('HTTP_X_FORWARDED_PROTO','https')
# Application definition
INSTALLED_APPS = [
'hiretubers.apps.HiretubersConfig',
'accounts.apps.AccountsConfig',
'youtubers.apps.YoutubersConfig',
'webpages.apps.WebpagesConfig',
'djangocms_admin_style',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ckeditor',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.facebook',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tubers.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tubers.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'lcotubers',
'USER' : 'postgres',
'PASSWORD' : 'qwerty',
'HOST' : 'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT=os.path.join(BASE_DIR,'static')
STATICFILES_DIRS= [
os.path.join(BASE_DIR,'tubers/static')
]
SITE_ID = 1
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"vanshikagupta2900@gmail.com"
] |
vanshikagupta2900@gmail.com
|
29ebf8a71c177e98b8e7907300a0df0530c60284
|
acbbc13858038a7bfb2a18e7b477b775722e595b
|
/pe573.py
|
fc4548491a02a3e87f0b0c0eb2c984b88d8f2c89
|
[] |
no_license
|
spbgithub/projecteuler
|
584f49a40b1fc8ea00e629e1832ade49ccdbd950
|
ee8dafe7ffae534a5657b816c5623bc8e5c62e94
|
refs/heads/master
| 2021-01-02T23:00:34.809458
| 2017-08-07T22:36:30
| 2017-08-07T22:36:30
| 99,438,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,522
|
py
|
'''Unfair race
Problem 573
n runners in very different training states want to compete in a race. Each one of them is given a different starting number k (1 <= k <= n) according to his (constant) individual racing speed being v_k=k/n.
In order to give the slower runners a chance to win the race, n different starting positions are chosen randomly (with uniform distribution) and independently from each other within the racing track of length 1. After this, the starting position nearest to the goal is assigned to runner 1, the next nearest starting position to runner 2 and so on, until finally the starting position furthest away from the goal is assigned to runner n. The winner of the race is the runner who reaches the goal first.
Interestingly, the expected running time for the winner is 1/2, independently of the number of runners. Moreover, while it can be shown that all runners will have the same expected running time of n/n+1, the race is still unfair, since the winning chances may differ significantly for different starting numbers:
Let P_n,k be the probability for runner k to win a race with n runners and E_n=\sum_k=k P_n,k be the expected starting number of the winner in that race. It can be shown that, for example, P_3,1=4/9, P_3,2=2/9, P_3,3=1/3 and E_3=179 for a race with 3 runners.
You are given that E_4=2.21875, E_5=2.5104 and E_10=3.66021568.
Find E_1000000 rounded to 4 digits after the decimal point.'''
import math
def stirlingest(n,k):
floatn = float(n)
floatk = float(k)
return math.sqrt(floatn/(2.0*math.pi*floatk*(floatn - floatk)))
def prodfrac(n,k):
nkn = n - 2*k + 1
prodval = 1.0
nminkovern = float(n-k)/float(n)
for j in range(1,k):
prodval *= float((n-j)*k*(n-k))/float((k-j)*n*n)
while prodval > 1.0:
prodval *= nminkovern
nkn -= 1
while nkn > 0:
prodval *= nminkovern
nkn -= 1
return prodval
def nextterm(lastterm, n, k):
if k == 1: return math.exp((n-1)*(math.log(n-1) - math.log(n)))
v1 = float(k-1)/float(k)
v2 = float(n-k+1)/float(n-k)
return lastterm / math.exp(float(k-1)*math.log(v1) + float(n-k)*math.log(v2))
nn = 1000000
kk = 1
kkmax = nn/2
#nextval = prodfrac(nn,kk)
nextval = math.exp((nn-1)*(math.log(nn-1) - math.log(nn)))
print(nextval)
totalval = 1 + 2.0*nextval
kk += 1
while kk < kkmax:
#if kk % 10000 == 0: print(kk)
nextval = nextterm(nextval, nn, kk)
totalval += 2.0*nextval
kk += 1
nextval = nextterm(nextval, nn, kk)
totalval += nextval
if nn % 2 != 0:
totalval += nextval
print(totalval)
|
[
"seanpboyd1968@gmail.com"
] |
seanpboyd1968@gmail.com
|
005d99e2d1cb9d0b4460f6e3fa4a93214d587df2
|
143f2e6af2d318152bbf5134d37fa21c4000fe0e
|
/test_11.py
|
12511cd6a847b453f66030b4c0a75f040e27e6b8
|
[] |
no_license
|
zengzhgzz/pythontest
|
e5421d36826d11b7abecee7d60dcd69b5fb34d29
|
a3cc459744aa915d071eedb4884360bee585b66f
|
refs/heads/master
| 2021-06-10T14:18:21.008218
| 2017-02-07T16:34:14
| 2017-02-07T16:34:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
# -*- coding: utf-8 -*-
height = 1.75
weight = 80.5
bmi =weight/height**2
if bmi<18.5:
print("过轻")
elif bmi<25:
print("正常")
elif bmi<28:
print("过重")
elif bmi<32:
print("肥胖")
else:
print("严重肥胖")
print("BMI:%.2f%" % bmi)
|
[
"increasezhg@gmai.com"
] |
increasezhg@gmai.com
|
a0369de1729865f39cb4ee3552c69ab24c3a9398
|
4ba4a21fa8dfb51c62d788a45ccdf41d0460155b
|
/src/submain.py
|
b01ffcc843506498bc13799ae493941ff7db1a23
|
[] |
no_license
|
vaig-2019/HackthonGrandFinal
|
ec655983e26143a50938ece639908dd422c376a5
|
87c56d3bfa27ce45b927c69b98849ba4b1ccd40d
|
refs/heads/master
| 2020-07-26T19:41:59.716407
| 2019-08-25T14:59:34
| 2019-08-25T14:59:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,609
|
py
|
import eventlet
import os
import json
from flask import Flask, render_template
from flask_socketio import SocketIO
import numpy as np
import io
from OpenSSL import SSL, crypto
from PIL import Image
#######################################
# from chatbot import chatbot
# from chatbot.chatbot import response,classify
#######################################
from cv2 import cv2
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import time
from random import randrange
#######################################
# from facedetect_folder.facedetect import detect_face,reset_keras
# from keras.models import load_model
# from statistics import mode
# from facedetect_folder.utils.datasets import get_labels
# from facedetect_folder.utils.inference import detect_faces
# from facedetect_folder.utils.inference import draw_text
# from facedetect_folder.utils.inference import draw_bounding_box
# from facedetect_folder.utils.inference import apply_offsets
# from facedetect_folder.utils.inference import load_detection_model
# from facedetect_folder.utils.preprocessor import preprocess_input
# import face_recognition
# import code_tfpose
# from code_tfpose import *
# from keras.backend.tensorflow_backend import set_session
# from keras.backend.tensorflow_backend import clear_session
# from keras.backend.tensorflow_backend import get_session
# import tensorflow
# import gc
#######################################
eventlet.monkey_patch()
app = Flask(__name__)
socketio = SocketIO(app)
@app.route('/')
def index(): # set index
return render_template('index.html')
##############################################ACTIVE DETECT FACE##################################################
@socketio.on('active_face2')
def activez_face(json_str):
data = json_str
if data=='active_face':
# #sua lai phan active face known_face_encodings,known_face_names
# global emotion_model_path
# emotion_model_path = 'facedetect_folder/models/emotion_model.hdf5'
# global emotion_labels
# emotion_labels = get_labels('fer2013')
# global emotion_classifier
# emotion_classifier = load_model(emotion_model_path)
# global kn_image
# kn_image = face_recognition.load_image_file("facedetect_folder/faceknow/test.jpg")
# global kn_face_encoding
# kn_face_encoding = face_recognition.face_encodings(kn_image)[0]
# global known_face_encodings
# known_face_encodings = [kn_face_encoding]
# global known_face_names
# known_face_names = ["An"]
print(8*'active_face')
repliesmess="done_active"
socketio.emit('done_active_face', data=repliesmess)
##############################################ACTIVE DETECT FACE##################################################
@socketio.on('publish') # send mess
def handle_publish(json_str):
data = json_str
# ##################### (nhớ mở những dòng ở giữa đây)
# image_data = cv2.imdecode(np.frombuffer(data, np.uint8), -1)
# #####################
# #####################
# file_send,name,emotion_text=detect_face(image_data,known_face_names,known_face_encodings,emotion_classifier,emotion_labels)
# #####################
# #####################
socketio.emit('mqtt_message', data=data)
# # #####################
# if (name != 'Unknown'):
# check_time=time.time()
# socketio.emit('check_time', data=check_time)
# socketio.emit('mqtt_message_name', data=name)
# socketio.emit('mqtt_message_emotion', data=emotion_text)
# #####################
# #****************************#test
# if data == 'newblob':
# for i in range(1000):
# if i == 900:
# check_time=time.time()
# socketio.emit('check_time', data=check_time)
# socketio.emit('mqtt_message_name', data='name')
# socketio.emit('mqtt_message_emotion', data='emotion_text')
##############################################DEACTIVE DETECT FACE##################################################
@socketio.on('deactive_face')
def deactive_face(json_str):
data = json_str
if data=='deactive_face':
######################
# sess = get_session()
# clear_session()
# sess.close()
# sess = get_session()
# try:
# del emotion_model_path
# del emotion_labels
# del emotion_classifier
# del kn_image
# del kn_face_encoding
# del known_face_encodings
# del known_face_names
# except:
# pass
# print(gc.collect()) # if it's done something you should see a number being outputted
# # use the same config as you used to create the session
# config = tensorflow.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 1
# config.gpu_options.visible_device_list = "0"
# set_session(tensorflow.Session(config=config))
# config = tf.ConfigProto()
# config.gpu_options.allow_growth=True
# sess = tf.Session(config=config)
print(60*'@')
######################
repliesmess="done_deactive"
print(repliesmess)
socketio.emit('done_deactive_face', data=repliesmess)
##############################################DEACTIVE DETECT FACE##################################################
##############################################ACTIVE TRAIN##################################################
@socketio.on('active_train')
# action_labels= ['jump','kick','punch','run','sit','squat','stand','walk','wave']
######## BAI 1 ###################
# action_labels_1 = ['none','none','none','none','sit','sit','stand','none','none']
######## BAI 2 ###################
# action_labels_2 = ['none','none','none','none','none','squat','stand','none','none']
# ######## BAI 3 ###################
# action_labels_3 = ['jump','none','none','none','none','none','none','none','none']
# ######## BAI 4 ###################
# action_labels_4= ['none','none','none','run','none','none','none','none','none']
def active_train_zz(json_str):
data = json_str
# global action_labels
# global my_detector
# global multipeople_classifier,multiperson_tracker
# if data=='active_train1':
# ######################
# action_labels = ['none','none','none','none','sit','sit','stand','none','none']
# my_detector = SkeletonDetector(OpenPose_MODEL, image_size)
# multipeople_classifier,multiperson_tracker=multi(LOAD_MODEL_PATH,action_labels)
# print(80*'*')
# ######################
# repliesmess="done_active"
# print(repliesmess)
# socketio.emit('done_active_train', data=repliesmess)
# socketio.emit('done_select_td', data='1')
# if data=='active_train2':
# ######################
# action_labels = ['none','none','none','none','none','squat','stand','none','none']
# my_detector = SkeletonDetector(OpenPose_MODEL, image_size)
# multipeople_classifier,multiperson_tracker=multi(LOAD_MODEL_PATH,action_labels)
# print(80*'*')
# ######################
# repliesmess="done_active"
# print(repliesmess)
# socketio.emit('done_active_train', data=repliesmess)
# socketio.emit('done_select_td', data='2')
# if data=='active_train3':
# ######################
# action_labels = ['jump','none','none','none','none','none','none','none','none']
# my_detector = SkeletonDetector(OpenPose_MODEL, image_size)
# multipeople_classifier,multiperson_tracker=multi(LOAD_MODEL_PATH,action_labels)
# print(80*'*')
# ######################
# repliesmess="done_active"
# print(repliesmess)
# socketio.emit('done_active_train', data=repliesmess)
# socketio.emit('done_select_td', data='3')
# if data=='active_train4':
# ######################
# action_labels = ['none','none','none','run','none','none','none','none','none']
# my_detector = SkeletonDetector(OpenPose_MODEL, image_size)
# multipeople_classifier,multiperson_tracker=multi(LOAD_MODEL_PATH,action_labels)
# print(80*'*')
# ######################
# repliesmess="done_active"
# print(repliesmess)
print(80*'*')
socketio.emit('done_active_train', data='done_active')
socketio.emit('done_select_td', data='4')
# @socketio.on('select_td')
# def select_thd(json_str):
# data = json_str
# if data=='1':
# print('bai the duc 1')
# socketio.emit('done_active_train', data='done_active')
# socketio.emit('done_select_td', data='1')
# if data=='2':
# print('bai the duc 2')
# socketio.emit('done_active_train', data='done_active')
# socketio.emit('done_select_td', data='2')
# if data=='3':
# print('bai the duc 3')
# socketio.emit('done_active_train', data='done_active')
# socketio.emit('done_select_td', data='3')
# if data=='4':
# print('bai the duc 4')
# socketio.emit('done_active_train', data='done_active')
# socketio.emit('done_select_td', data='4')
@socketio.on('publish_train') # send mess
def handle_publish_train(json_str):
data = json_str
######################
# image_data = cv2.imdecode(np.frombuffer(data, np.uint8), -1)
# ######################
# #*******************************************************************************************************#
# #*******************************************************************************************************#
# ######################
# count_human,label,file_send=code_main(my_detector,multipeople_classifier,multiperson_tracker,image_data)
# ######################
# #*******************************************************************************************************#
# ######################
# socketio.emit('mqtt_message', data=file_send)
# print(2*'count_human',count_human)
# if count_human!=0:
# if label!='none':
# socketio.emit('label_human', data=label)
# check_time=time.time()
# socketio.emit('check_time_train', data=check_time)
# if count_human=='none':
# # time.sleep(20)
# check_time=time.time()
# socketio.emit('check_time_canhbao', data=check_time)
# print('GUI CANH BAO KHONG THAY NGUOI GUI CANH BAO')
######################
if data == 'newblob':
for i in range(1000):
if i == 900:
check_time=time.time()
socketio.emit('label_human', data=str(randrange(10)))
socketio.emit('check_time_train', data=check_time)
##############################################ACTIVE TRAIN##################################################
##############################################DEACTIVE DETECT TRAIN##################################################
@socketio.on('de_active_train')
def deactive_train(json_str):
data = json_str
if data=='deactive_train':
# ######################
# sess = get_session()
# clear_session()
# sess.close()
# sess = get_session()
# try:
# del my_detector
# del multipeople_classifier
# del multiperson_tracker
# except:
# pass
# print(gc.collect()) # if it's done something you should see a number being outputted
# # use the same config as you used to create the session
# config = tensorflow.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 1
# config.gpu_options.visible_device_list = "0"
# set_session(tensorflow.Session(config=config))
# config = tf.ConfigProto()
# config.gpu_options.allow_growth=True
# sess = tf.Session(config=config)
######################
print(60*'k')
repliesmess="done_deactive"
print(repliesmess)
socketio.emit('done_deactive_train', data=repliesmess)
##############################################DEACTIVE DETECT TRAIN##################################################
# ######################
@socketio.on('sendmess')
def handle_sendmess(json_str):
repliesmess=""
data = json_str
print("+++++++++")
print(data,type(data))
# repliesmess=response(data)
repliesmess=data
socketio.emit('replieszzz', data=repliesmess)
######################
######################
if __name__ == '__main__':
#socketio.run(app, host='127.0.0.1', port=5000, use_reloader=True, debug=True)
socketio.run(app, host='0.0.0.0',port=5000,use_reloader=False, debug = True,certfile="cert.pem", keyfile="key.pem")
|
[
"noreply@github.com"
] |
vaig-2019.noreply@github.com
|
d2977a5b549d836f6ba15a1a155910eff02249ba
|
ae6c2a6fa37613ac31b2bd3537b3276c9b333632
|
/server/plugins/messages/messages.py
|
15551365d17c42cb1824ca8e9a72fa09fd3e1319
|
[
"Apache-2.0"
] |
permissive
|
salopensource/sal
|
435a31904eb83048c02c9fbff02bbf832835d1b4
|
0895106c6729d5465da5e21a810e967a73ed6e24
|
refs/heads/main
| 2023-08-03T06:53:40.142752
| 2023-07-28T15:51:08
| 2023-07-28T15:51:08
| 35,883,375
| 227
| 94
|
Apache-2.0
| 2023-07-28T15:51:10
| 2015-05-19T13:21:57
|
Python
|
UTF-8
|
Python
| false
| false
| 2,512
|
py
|
"""Dashboard plugin to collate common messages.
This plugin displays a table of messages, sorted by message type, and
counts of their occurance, with links to list views of affected machines.
This plugin takes two configuration settings, set by using the Sal
admin page https://<sal_root>/admin/server/salsetting to add settings.
Settings:
MessagesPluginThreshold: Integer value. Messages must appear on this
number of machines (filtered for this view) to appear in the
plugin. Defaults to 5.
MessagesPluginLevel: One of the allowed `Message.MESSAGE_TYPES`
values. This setting controls which levels of messages are
considered for display. Messages with the configured level and
above are displayed. Default is "ERROR".
e.g. "WARNING" would show messages of WARNING level and above, so
"WARNING" and "ERROR".
"""
import urllib.parse
from django.db.models import Count
import sal.plugin
from server.models import Message
from server.utils import get_setting
DEFAULT_THRESHOLD = 5
class Messages(sal.plugin.Widget):
description = 'List of common errors and warnings.'
supported_os_families = [sal.plugin.OSFamilies.darwin]
def get_context(self, queryset, **kwargs):
context = self.super_get_context(queryset, **kwargs)
try:
count_threshold = int(get_setting('MessagesPluginThreshold', DEFAULT_THRESHOLD))
except ValueError:
count_threshold = DEFAULT_THRESHOLD
messages = (
Message
.objects
.filter(machine__in=queryset, message_type__in=self._get_status_levels())
.values('text', 'message_type')
.annotate(count=Count('text'))
.filter(count__gte=count_threshold)
.order_by('message_type', 'count'))
context['data'] = messages
return context
def filter(self, machines, data):
unquoted = urllib.parse.unquote(data)
machines = machines.filter(messages__text=unquoted)
return machines, f'Machines with message "{unquoted}'
def _get_status_levels(self):
message_values = list(zip(*Message.MESSAGE_TYPES))[0]
# Default to using only the highest severity message.
status_setting = get_setting('MessagesPluginLevel', message_values[0])
if status_setting.upper() not in message_values:
status_setting = message_values[0]
return message_values[:message_values.index(status_setting) + 1]
|
[
"sheagcraig@gmail.com"
] |
sheagcraig@gmail.com
|
f844a5a6bd55f0597c3fbaae79edb94062663d06
|
32aa592fc3b7376b8fb36c0ac2245e6571fb7bdd
|
/MachineLearning/BargraphWithCSV.py
|
ffff26b270329284d6c3e263a8de26457df180fe
|
[] |
no_license
|
1234567890boo/ywviktor
|
00063a1c58b392cb4230791a9cffced6d2864889
|
12b18887243e9b64fb08db4ad440c7144bdf8cbb
|
refs/heads/master
| 2022-05-14T12:43:43.422329
| 2022-04-30T04:24:05
| 2022-04-30T04:24:05
| 57,740,866
| 0
| 0
| null | 2020-06-29T00:22:12
| 2016-05-01T18:48:27
|
Python
|
UTF-8
|
Python
| false
| false
| 647
|
py
|
import matplotlib.pyplot as plt
file=open("BarGraphPlots.csv", "r")
import numpy as np
for i in file:
list=i.split(",")
listlen=int(len(list)/2)
xlen=list[0:listlen]#xlen is names
ylen=list[listlen:]#ylen is nums
ylen2=[]#for the ints so the graph works
for u in ylen:
ylen2.append(int(u))
bar_indexes = np.arange(len(xlen))
bar_heights = np.array(ylen2)
plt.bar(bar_indexes,bar_heights)
plt.yticks(bar_heights,ylen2)
plt.xticks(bar_indexes,xlen)
plt.show()
'''pie_slices=[50,90,20,14]
pie_names=['Python','Java','C#','C']
explode1=[0.2,0,0,0]
plt.pie(pie_slices,labels=pie_names, autopct="%1.1f%%",explode=explode1,shadow=True)
'''
|
[
"vgmeydbray@gmail.com"
] |
vgmeydbray@gmail.com
|
2b91501ec33673680694a4ecf563a07415e8d74b
|
70069681bb0ec3958cf2cdd501b995828a1c9041
|
/datacenter/datacenter/settings.py
|
ed6956d80d7e5bba3e73ebba8da0c52add2c4c8b
|
[] |
no_license
|
ramsdjango/djangobatch6
|
116a796fc72fab5f4afd7203fb58e63c7f7ecee0
|
b00d659a261af001f717611a6f709920aa3cf1b9
|
refs/heads/master
| 2020-03-22T04:54:31.498818
| 2019-11-09T10:07:47
| 2019-11-09T10:07:47
| 139,528,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,140
|
py
|
"""
Django settings for datacenter project.
Generated by 'django-admin startproject' using Django 1.11.14.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5fz18f5*ato0-lt=(+vq#o*+rrdnx62v&kpixwk(w)!&(vi_fb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cooling1',
'cooling',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'datacenter.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'datacenter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"ramsanangi@gmail.com"
] |
ramsanangi@gmail.com
|
1fc85b6a17c2f0902770e5174f6f9b7f9e4181af
|
c2ad8d51d315ae74ab9eb99ba730846576c95783
|
/app.py
|
e6e64bf6958208d2a5f4cfe60f67e26d1de9dd53
|
[] |
no_license
|
poojarjoshi/flask
|
eca042f2cdaad124343dff821cda8a5dbf922470
|
676a47400272d5095a135d09856a2ef2044ca413
|
refs/heads/master
| 2020-05-03T04:15:02.214262
| 2019-03-29T14:04:51
| 2019-03-29T14:04:51
| 178,416,515
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,491
|
py
|
from random import randint
from time import strftime
from flask import Flask, render_template, flash, request
from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField
DEBUG = True
app = Flask(__name__)
app.config.from_object(__name__)
app.config['SECRET_KEY'] = 'SjdnUends821Jsdlkvxh391ksdODnejdDw'
class ReusableForm(Form):
name = TextField('Name:', validators=[validators.required()])
surname = TextField('Surname:', validators=[validators.required()])
def get_time():
time = strftime("%Y-%m-%dT%H:%M")
return time
#def write_to_disk(name, surname, email):
# data = open('file.log', 'a')
# timestamp = get_time()
# data.write('DateStamp={}, Name={}, Surname={}, Email={} \n'.format(timestamp, name, surname, email))
# data.close()
@app.route("/", methods=['GET', 'POST'])
def hello():
form = ReusableForm(request.form)
#print(form.errors)
if request.method == 'POST':
name=request.form['name']
surname=request.form['surname']
email=request.form['email']
password=request.form['password']
if form.validate():
write_to_disk(name, surname, email)
flash('Hello: {} {}'.format(name, surname))
else:
flash('Error: All Fields are Required')
return render_template('index.html', form=form)
if __name__ == "__main__":
#app.run(host='0.0.0.0',port='8095')
app.run()
|
[
"root@POJOSHI2D1.ptcnet.ptc.com"
] |
root@POJOSHI2D1.ptcnet.ptc.com
|
86ffdf1f99389fe30a1e8e4f8852f5b47f2c374d
|
954814448bdf0a556c5fe60f1b5b5d8bbff6ea7f
|
/models/Fournisseur.py
|
da51c4976526acb857040cd0a7101bf62a419898
|
[] |
no_license
|
jameln/GC-TJARA
|
da665c1d2b8a6522285576bb9656417d4723eb5d
|
d694f455049e373fa0143e161eff20576354c0d3
|
refs/heads/master
| 2021-01-19T22:52:37.834672
| 2017-06-15T12:16:46
| 2017-06-15T12:16:46
| 88,878,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,281
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
class Fournisseur(models.Model):
_name = 'gctjara.fournisseur'
_rec_name = 'name'
_inherit='mail.thread'
name = fields.Char('Nom', required=True)
matriculefiscal = fields.Char('Matricule fiscale', required=True)
company_name = fields.Char('Company Name')
image = fields.Binary("Image",
attachment=True,
help="This field holds the image used as avatar for this contact, limited to 1024x1024px",
)
email = fields.Char('Email')
phone = fields.Char('Telephone')
fax = fields.Char('Fax')
mobile = fields.Char('Portable')
adresse = fields.Char('Adresse')
street = fields.Char()
zip = fields.Char(change_default=True)
city = fields.Char()
active = fields.Boolean(default=True)
# Agree = fields.Boolean(string='Fournisseur Agréé' ,default=False)
state = fields.Selection(string="Etat", required=True, selection=[
('Agree', 'Agrée'),
('NonAgree', 'Non Agrée')
], default='marche')
state_bool = fields.Boolean(compute="_state_bool")
@api.one
def toggle_state(self):
if self.state == 'Agree':
self.state = 'NonAgree'
else:
self.state = 'Agree'
return True
@api.depends("state")
def _state_bool(self):
for v in self:
v.state_bool = (v.state != 'NonAgree')
commande_id = fields.One2many(
string="Commande",
ondelete='restrict',
comodel_name='gctjara.cmdfournisseur',
inverse_name='fournisseur_id',
)
facture_id = fields.One2many(
string="Factures",
ondelete='restrict',
comodel_name='gctjara.factureachat',
inverse_name='fournisseur_id',
)
# produit_id = fields.Many2one(
# string='Prosuits',
# comodel_name='gctjara.produits'
# )
#
nature_relation = fields.Selection(
string='Nature de la relation',
default='',
selection=[
('fr', 'Fournisseur'),
('cn', 'Concurent'),
('ag', 'Agence'),
]
)
relation = fields.Selection(
string='Relation fournisseur',
default='',
selection=[
('exclusif', 'Régulier exclusif'),
('potentiel', 'Potentiel'),
('regulier', 'Régulier'),
('occacionnel', 'Occacionnel'),
('Pas', 'Pas de relation actuel')
]
)
type_de_relation = fields.Selection(
string='Type de relation',
default='',
selection=[
('Intense', 'Intense'),
('Confiante', 'Confiante'),
('Inexistante', 'Inexistante'),
('Réserve', 'Réservée'),
('Negative', 'Negative'),
('Limite', 'Limitée')
]
)
necessite_fournisseur = fields.Selection(
string='Type de relation',
default='',
selection=[
('Intense', 'Prioritaire'),
('Confiante', 'Contact interessant'),
('Inexistante', 'Structuration Engagée'),
('Réserve', 'Compangnes périodiques'),
('Negative', 'Non categorisé')
]
)
|
[
"djameloter@gmail.com"
] |
djameloter@gmail.com
|
a137cdc63868b4f0e028f8bdf53d7d97ff9d10de
|
cf1ab1af581f210f0c2cbb2712ad5da8fb7904c6
|
/ppgan/metric/test_fid_score.py
|
e8abccaaf3e8c4bda5a5c51e7621014b12a0664d
|
[] |
no_license
|
yxhpy/PaddleGAN
|
264314900ad6e3af89a80963cfaf510ddeac3cc4
|
ddbd89f8517aa3d826aaadf484bd8bd7edc335cc
|
refs/heads/master
| 2022-12-05T04:31:45.426468
| 2020-08-20T06:52:45
| 2020-08-20T06:52:45
| 289,302,768
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,111
|
py
|
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import argparse
from compute_fid import *
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--image_data_path1',
type=str,
default='./real',
help='path of image data')
parser.add_argument('--image_data_path2',
type=str,
default='./fake',
help='path of image data')
parser.add_argument('--inference_model',
type=str,
default='./pretrained/params_inceptionV3',
help='path of inference_model.')
parser.add_argument('--use_gpu',
type=bool,
default=True,
help='default use gpu.')
parser.add_argument('--batch_size',
type=int,
default=1,
help='sample number in a batch for inference.')
args = parser.parse_args()
return args
def main():
args = parse_args()
path1 = args.image_data_path1
path2 = args.image_data_path2
paths = (path1, path2)
inference_model_path = args.inference_model
batch_size = args.batch_size
with fluid.dygraph.guard():
fid_value = calculate_fid_given_paths(paths, inference_model_path,
batch_size, args.use_gpu, 2048)
print('FID: ', fid_value)
if __name__ == "__main__":
main()
|
[
"jslict09@gmail.com"
] |
jslict09@gmail.com
|
a66eb973ca566acdab39b7c50d501ddd46a8e84c
|
ec41b6d112afb14733e213a4a2eecac10bbe082a
|
/pydifact/segments.py
|
6f8d7811104b6316574d27299b2fc17fea81aee4
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
qwyk/pydifact
|
a8450080143f8d577f87bf4cee2be727bbb12115
|
ccb8907e443764c3ae981b4642ef1db3b0c81c7f
|
refs/heads/master
| 2020-07-22T21:12:51.338251
| 2019-09-09T15:01:01
| 2019-09-09T15:01:01
| 207,329,465
| 0
| 0
|
MIT
| 2019-09-09T14:30:49
| 2019-09-09T14:30:49
| null |
UTF-8
|
Python
| false
| false
| 2,727
|
py
|
# Pydifact - a python edifact library
#
# Copyright (c) 2019 Christian González
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from pydifact.control import Characters
class Segment:
"""Represent a segment of an EDI message."""
def __init__(self, tag: str, *elements):
"""Create a new instance.
:param str tag: The code/tag of the segment.
:param list elements: The data elements for this segment, as list.
"""
assert type(tag) == str, "%s is not a str, it is %s" % (tag, type(tag))
self.tag = tag
"""The data elements for this segment.
this is converted to a list (due to the fact that python creates a tuple
when passing a variable arguments list to a method)
"""
self.elements = list(elements)
def __str__(self) -> str:
"""Returns the Segment in Python list printout"""
return "'{tag}' EDI segment: {elements}".format(
tag=self.tag, elements=str(self.elements)
)
def __repr__(self) -> str:
return "{} segment: {}".format(self.tag, str(self.elements))
def __eq__(self, other) -> bool:
return type(self) == type(other) and list(self.elements) == list(other.elements)
class SegmentFactory:
"""Factory for producing segments."""
@staticmethod
def create_segment(characters: Characters, name: str, *elements: list) -> Segment:
"""Create a new instance of the relevant class type.
:param characters: The control characters
:param name: The name of the segment
:param elements: The data elements for this segment
"""
# FIXME: characters is not used!
return Segment(name, *elements)
|
[
"christian.gonzalez@nerdocs.at"
] |
christian.gonzalez@nerdocs.at
|
09051a8356c9f1c65f71e2ecc275947c8a977296
|
9f42793a562a19c61d6d999a2e88e786a6098621
|
/python.py
|
f8ab0d9add567d989d3a2317d6812af5c344935e
|
[] |
no_license
|
devpatel917/MasterMind
|
4071fa33ceb0c41773c47332e9497ec1de3f5472
|
5f63d523d7dca8721c8d4ea421c18057ac170d68
|
refs/heads/main
| 2023-05-29T06:40:41.892285
| 2021-06-19T02:57:41
| 2021-06-19T02:57:41
| 378,310,101
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,900
|
py
|
GamesWon=0 #Sets the initial amount of games won to 0. This is because I plan on creating the ability to play multiple games.
GamesLost=0 #Same purpose as above but setting the initial amount of games lost to 0
PlayerOneGamesWon=0 #purpose is to allow multiple games on two player (this is for the 1st player)
PlayerTwoGamesWon=0 # purpose is to allow multiple games on two player (this is for the 2nd player)
gameType=input("Do you want to play one player or two player. Type one or two") #get user input to play one or two player, so I can use this information to differntiate what to do when it's two player and one player
#this function checks the user input. the reason that this function is present is because if the user doesn't make a proper input, then the game won't play and the program will result in an error. the function checks the length of the guess and the presecne of commas
def checkUserInput(userGuess,properInput):
if len(userGuess) != 7 or userGuess[1] != "," or userGuess[3] != "," or userGuess[5] != ",":
properInput = False
return(properInput)
#function actually creates the computer guess. first, 4 random numbers are generated. Those 4 random numbers are are associated with a random position in the colors array. The 4 random colors are appended to another array (computer guess)
def createComputerGuess(computerGuess,colors):
import random
for z in range(0, 4):
computerGuess.append(colors[random.randrange(0, len(colors))])
return(computerGuess)
#calculates black pegs. for the same position, it compares every letter of the user guess to computer guess and if they are the same (in the same position), then it adds 1 black peg
def calculateBlackPegs(userGuess,computerGuess,blackPegs):
for i in range(0, 4):
if userGuess[i] == computerGuess[i]:
blackPegs = blackPegs + 1
return(blackPegs)
#for white pegs there are two criteria: that it is in both guesses and they are not in the same position, therefore, since there are only two options for each criteria, i used booleans
#i used a nested for loop to compare each letter of user guess to computer guess. if the letters do match, then the boolean is true otherwise it is already default to false
#because x and i are both positions, i used them to check if the letters are in the same position or not
#for duplicates, i knew that just doing the basic 2 criteria for white pegs wasn't going to give me purely white pegs, but rather white pegs that are double counting the black pegs as well.
#therefore, the logic was that a peg can either be a white peg or a black peg, so i differentiated between pure black pegs and pure white pegs
#the final if statement combines all the criteria for a truly pure white peg that works with duplicates. i then return the value
def calculateWhitePegs(userGuess,computerGuess,whitePegs):
for x in range(0, len(userGuess)):
InBothArrays = False
NotInSamePosition = False
for i in range(0, len(computerGuess)):
if (userGuess[x] == computerGuess[i]):
InBothArrays = True
if x != i:
NotInSamePosition = True
BlackPeg = False
if userGuess[x] == computerGuess[x]:
BlackPeg = True
if (InBothArrays == True and NotInSamePosition == True and BlackPeg == False):
whitePegs = whitePegs + 1
return(whitePegs)
#another method i could've used to calculate white pegs is that i could've only done white pegs and only done black pegs, then subtracted black pegs from white pegs
#that method would've been shorter in lines but this was my original idea, so i used this: the idea that if it's a white peg, it can't be black and if it's black, it can't be white
#this function allows to make changes according to easy, medium, or hard levels. if it's easy, then there is nothing to do, therefore, nothing is in this function for easy
#for medium and hard, more colors are added, so the computer has a wider range of colors to randomly select, making the game harder
#the amount played also increases per difficulty level
def AdjustmentsPerDifficultyAmountPlayed(difficulty,MaxAmountPlayed):
if difficulty=="medium":
MaxAmountPlayed=7
if difficulty=="hard":
MaxAmountPlayed=6
return(MaxAmountPlayed)
def AdjustmentsPerDifficultyColors(difficulty,colors):
if difficulty == "medium":
colors.append("v")
colors.append("i")
if difficulty == "hard":
colors.append("v")
colors.append("i")
colors.append("t")
return(colors)
def MasterMind():
global GamesLost
global GamesWon
global gameType
global PlayerOneGamesWon
global PlayerTwoGamesWon
# I need to make these variables global in order to allow multiple games for the user to play, so i keep track of the games won throughout those multiple games
bestScore=0 #this is necessary because i need to provide a starting point for this variable before i actually apply it within the next lines
MaxAmountPlayed=8#this is the default for the easy level
AmountPlayed = 0
blackPegs = 0
whitePegs = 0
#above variables are self explanatory
if gameType=="one":
difficulty = input("What difficulty level do you want to play: Type easy, medium, or hard")#difficulty level
colors = ["r", "g", "b", "y", "o", "p", "g", "m"] #the colors in which the computer picks in order to make the comp's guess or choice
colors=AdjustmentsPerDifficultyColors(difficulty, colors)
MaxAmountPlayed=AdjustmentsPerDifficultyAmountPlayed(difficulty,MaxAmountPlayed)
# t=turqois, i=indigo,v=violet
# r=red,g=green,b=blue,y=yellow,o=orange,p=purple,g=gray,m=magenta
computerGuess = [] #initially starts out as blank but colors get appended. this is necessary because i can't just introduce a new array in a new function. it has to have a value before
# rather than making an entire separate function or anothe part for two player which would be redundant because the comparision code and much of the code will be the same
# in order to abstract that concept, i decided to merely create if statements into my already one player game but now thec omputer guess is player 1 choice and the user guess is player 2 guess
if gameType=="one":
print(createComputerGuess(computerGuess, colors))
if gameType=="two":
computerGuess=input("Player 1, type in a 4 color sequence separated by commas that you want player 2 to guess")
computerGuess=computerGuess.split(",")
#because in one player, the computer guess was already an array, i had to split this computer guess (player 1's choice) so that it will also become an array
# the while loop plays the game until 8 rounds are done or when there are 4 black pegs
hintChoice="no"
#this is the default option that the user won't pick a hint. this is also necessary because i need to mention that variable because i apply it in the while loop
while (blackPegs < 4 and AmountPlayed < MaxAmountPlayed):#this while loop allows the game to go on until either black pegs reaches 4 or max amount played is reached
blackPegs = 0
whitePegs = 0
if gameType=="one":
if hintChoice=="no": #this is from the previous round. if they decided to use a hint last time, they can't do it this time
hintChoice = input("You get 1 hint per game, so use it carefully. Do you want a hint?Type yes or no")
if (hintChoice == "yes"):
print("Your hint is that: ", computerGuess[3], "is a part of the answer")
print(colors)
userGuess = input(
"What Guess would you like. You need to make 4 choices. Enter abbreviations and in commas. Choose between the above colors: ") #purpose of this to help the user out when picking the colors (giving them options)
if gameType=="two":
userGuess=input("player 2, what is your guess as to what the sequence is. sepearate your guess by commas")
properInput=True #assumes that the user input is true and the next lines calls a function to check that
checkUserInput(userGuess,properInput)
while (checkUserInput(userGuess,properInput)==False): #this while loop only stops when the the user gives a correct input. this while loop because this section is also necessary in order to make sure that that inproper guess doesn't count as an attempt in the game
userGuess=input("Type again. Make sure you type only 4 colors each seperated by 1 comma")
checkUserInput(userGuess, properInput)
userGuess = userGuess.split(",") #removes the commas and it becomes an array so that it is easier to compare to the computer guess
AmountPlayed = AmountPlayed + 1 #adds one to each time you guess
blackPegs=calculateBlackPegs(userGuess,computerGuess,blackPegs)
print("Black Pegs: ", blackPegs)
#this if statement calculates the best score. it compares the previous black pegs score to the best score, if it is greater, then it becomes the new best score
if blackPegs>bestScore:
bestScore = blackPegs
print("White Pegs: ", calculateWhitePegs(userGuess, computerGuess, whitePegs))
print("Best Score: ", bestScore)
#two ways to decide the end of the game: either you get 4 black pegs or you play until you are out of attempts
if blackPegs == 4:
#differntiation between one player and two player(what to print)
if gameType=="two":
PlayerTwoGamesWon=PlayerTwoGamesWon+1 #keeps track of games won
print("Player 1, you lost! Player 2, you won!")
print("Player 1 Games Won: ",PlayerOneGamesWon)
print("Player 2 Games Won: ",PlayerTwoGamesWon)
if gameType=="one":
GamesWon = GamesWon + 1 #keeps track of games won
print("You Win! Games Won: ", GamesWon)
print("Games Lost: ", GamesLost)
choice=input("Do you want to play again? Type yes or no")
if choice == "yes":
MasterMind()
if AmountPlayed == MaxAmountPlayed:
print("The answer is: ", computerGuess)
#differentiation between one player and two player (what to type when lost)
if gameType=="two":
PlayerOneGamesWon=PlayerOneGamesWon+1
print("Player 1, you won! Player 2, you lost!")
print("Player 1 Games Won: ", PlayerOneGamesWon)
print("Player 2 Games Won: ", PlayerTwoGamesWon)
#player 1 won 1 game, player 2 lost 1 game
if gameType=="one":
GamesLost = GamesLost + 1 #keeps track of games lost
print("You Lose! Games Lost: ", GamesLost)
print("Games Won: ", GamesWon)
choice=input("Do you want to play again? Type yes or no")
if choice=="yes":
MasterMind() #calling the actual game to start but this is with all of the information like games lost and won
MasterMind() #calling the actual game to start (initial start)
|
[
"noreply@github.com"
] |
devpatel917.noreply@github.com
|
1676af968a59569190bcd0fe7ca485fd047bc031
|
193cad84f85aeca8070b716044cde5b2ec4ad92f
|
/app/user/serializers.py
|
d6bf8fc5370ed9ef2bdb04429a907b3fac68d3d8
|
[
"MIT"
] |
permissive
|
pratishjage/instagram-api-clone
|
c0515475e1835f2b9b58a3d75f5b2d5d8dc15e68
|
cc8de53e1b304fc68bec27bb969e614a53b785f2
|
refs/heads/master
| 2022-11-30T06:57:09.854944
| 2020-08-09T07:23:16
| 2020-08-09T07:23:16
| 286,050,968
| 0
| 0
|
MIT
| 2020-08-08T16:05:34
| 2020-08-08T13:44:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,363
|
py
|
from django.contrib.auth import get_user_model
from rest_framework import serializers
from core.models import Post, Comment
from django.core.paginator import Paginator
from rest_framework.settings import api_settings
class RegisterUserSerializer(serializers.ModelSerializer):
"""Serializer for creating a new user account"""
class Meta:
model = get_user_model()
fields = ('id', 'email', 'fullname', 'username', 'password')
extra_kwargs = {'password': {'write_only': True,
'min_length': 5},
'username': {'min_length': 3}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
class UserInfoSerializer(serializers.ModelSerializer):
"""Serializer for the user settings objects"""
class Meta:
model = get_user_model()
fields = ('id', 'email', 'username', 'password',
'fullname', 'bio', 'profile_pic')
extra_kwargs = {'password': {'write_only': True,
'min_length': 5},
'username': {'min_length': 3}}
def update(self, instance, validated_data):
"""Update a user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class UserPostsSerializer(serializers.ModelSerializer):
"""Serializer for viewing a user profile"""
number_of_comments = serializers.SerializerMethodField()
class Meta:
model = Post
fields = ('id', 'photo', 'text', 'location', 'number_of_likes',
'number_of_comments', 'posted_on')
def get_number_of_comments(self, obj):
return Comment.objects.filter(post=obj).count()
class UserProfileSerializer(serializers.ModelSerializer):
"""Serializer for viewing a user posts"""
number_of_posts = serializers.SerializerMethodField()
followed_by_req_user = serializers.SerializerMethodField()
user_posts = serializers.SerializerMethodField('paginated_user_posts')
class Meta:
model = get_user_model()
fields = ('id', 'username', 'fullname',
'bio', 'profile_pic', 'number_of_followers',
'number_of_following', 'number_of_posts',
'user_posts', 'followed_by_req_user')
def get_number_of_posts(self, obj):
return Post.objects.filter(author=obj).count()
def paginated_user_posts(self, obj):
page_size = api_settings.PAGE_SIZE
paginator = Paginator(obj.user_posts.all(), page_size)
page = self.context['request'].query_params.get('page') or 1
user_posts = paginator.page(page)
serializer = UserPostsSerializer(user_posts, many=True)
return serializer.data
def get_followed_by_req_user(self, obj):
user = self.context['request'].user
return user in obj.followers.all()
class FollowSerializer(serializers.ModelSerializer):
"""Serializer for listing all followers"""
class Meta:
model = get_user_model()
fields = ('username', 'profile_pic')
|
[
"konradgalczynski07@gmail.com"
] |
konradgalczynski07@gmail.com
|
a68abc0e74f6406a38be4cf0e2f4e37c3e52825a
|
3c5cea6c077a778d7088a22452c6b5d9bf62566f
|
/awd-lstm-lm/locked_dropout.py
|
75a0b104beb42de20c99b90230196587b19b2596
|
[
"BSD-3-Clause"
] |
permissive
|
LGirrbach/JoeyNMT-QRNN
|
6961c6daf0022ea0f349ced08a1d5459ea378a4f
|
281dbf58282564328ea25b2553d978614881e746
|
refs/heads/main
| 2023-07-26T00:08:04.088332
| 2021-09-10T10:32:58
| 2021-09-10T10:32:58
| 405,043,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
import torch
import torch.nn as nn
class LockedDropout(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, dropout=0.5):
if not self.training or not dropout:
return x
m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - dropout)
mask = m / (1 - dropout)
mask = mask.expand_as(x)
return mask * x
|
[
"girrbach@cl.uni-heidelberg.de"
] |
girrbach@cl.uni-heidelberg.de
|
194c11aa6fdf3b79321ff5fcf7d39296f08faa0e
|
6b16458a0c80613a66c251511462e7a7d440970e
|
/packages/pyright-internal/src/tests/samples/genericTypes70.py
|
1129c1c2d934c4772b7f5fb75d150e24927d72e2
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
ikamensh/pyright
|
3bbbb2cf1a1bdbbecb89ef389036756f47ef7114
|
5ea620ad2008de57dcac720a84674bdb712bffc4
|
refs/heads/main
| 2023-08-26T05:54:43.660282
| 2021-10-30T16:35:06
| 2021-10-30T16:35:06
| 422,952,836
| 0
| 0
|
NOASSERTION
| 2021-10-30T17:52:03
| 2021-10-30T17:52:02
| null |
UTF-8
|
Python
| false
| false
| 851
|
py
|
# This sample tests the case where a generic class has a constructor that
# supplies the type arguments via a callable which is itself generic.
from typing import Callable, Generic, Literal, Sequence, TypeVar
T = TypeVar("T")
V = TypeVar("V", bound=object)
V_co = TypeVar("V_co", covariant=True)
U = TypeVar("U", bound=object)
class Result(Generic[V]):
pass
ParseFn = Callable[[Sequence[T], int, int], Result[V]]
class Parser(Generic[T, V_co]):
def fmap1(self, fn: Callable[[V_co], U]) -> "Parser[T, U]":
def fmap2(stream: Sequence[T], pos: int, bt: int) -> Result[U]:
raise NotImplementedError()
t1: Literal["FnParser[T@Parser, U@fmap1]"] = reveal_type(FnParser(fmap2))
return FnParser(fmap2)
class FnParser(Parser[T, V_co]):
def __init__(self, fn: ParseFn[T, V_co]):
self._fn = fn
|
[
"erictr@microsoft.com"
] |
erictr@microsoft.com
|
310c7d75a510f1deace2c9b811ef8d148fb15f85
|
c9fde4576216a22e8d5711bbe97adda1aafa2f08
|
/model-optimizer/mo/front/kaldi/extractors/reshape.py
|
cf47f67aba1d0db321e7d41c4f288aadffedfec0
|
[
"Apache-2.0"
] |
permissive
|
dliang0406/dldt
|
c703d6a837de3f996528fc8a9543f9530b23342c
|
d9b10abcebafe8b10ba81e09e433de7a366c072c
|
refs/heads/2018
| 2020-04-03T08:24:47.723353
| 2018-10-29T07:58:05
| 2018-10-29T07:58:05
| 155,132,108
| 3
| 1
|
Apache-2.0
| 2019-10-10T08:39:46
| 2018-10-29T01:03:54
|
C++
|
UTF-8
|
Python
| false
| false
| 2,856
|
py
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from mo.front.common.partial_infer.reshape import tf_reshape_shape_infer
from mo.front.common.partial_infer.utils import int64_array
from mo.front.extractor import FrontExtractorOp
from mo.graph.graph import Node
from mo.ops.op import Op
from mo.ops.reshape import Reshape
class ReshapeFrontExtractor(FrontExtractorOp):
op = 'reshape'
enabled = True
@staticmethod
def extract(node):
mapping_rule = {
'dim': node.pb.dim if hasattr(node.pb, 'dim') else None,
'axis': node.pb.axis,
'num_axes': node.pb.num_axes,
'infer': ReshapeFrontExtractor.infer
}
Op.get_op_class_by_name('Reshape').update_node_stat(node, mapping_rule)
return __class__.enabled
@staticmethod
def infer(node: Node):
in_node = node.in_node().in_node() # prev_layer_node -> data -> this_node
input_shape = node.in_node().shape
# Kaldi Reshape hugely depends on the layers that precedes or succeeds
# Convolution/Pooling layers. Therefore there are 4 cases with different
# partial inference.
batch = input_shape[0]
if in_node.type == 'Convolution' or in_node.type == 'Pooling':
output_spatial = int64_array([batch, np.prod(input_shape[1:])])
return ReshapeFrontExtractor.set_shape_and_dim(node, output_spatial)
# Supports ONLY NCHW and NH layouts
spatial_shape = input_shape[1]
if input_shape.shape == (4,):
spatial_shape = input_shape[2:3]
out_node = node.out_node().out_node()
if out_node.type == 'Convolution':
output_spatial = int64_array([batch, int(np.ceil(spatial_shape / out_node.patch_stride)), 1, out_node.patch_stride])
return ReshapeFrontExtractor.set_shape_and_dim(node, output_spatial)
elif out_node.type == 'Pooling':
output_spatial = int64_array([batch, out_node.pool_stride, 1, int(np.ceil(spatial_shape / out_node.pool_stride))])
return ReshapeFrontExtractor.set_shape_and_dim(node, output_spatial)
@staticmethod
def set_shape_and_dim(node: Node, reshape_dim):
Reshape.update_node_stat(node, {'dim': reshape_dim})
node.out_node().shape = reshape_dim
|
[
"openvino_pushbot@intel.com"
] |
openvino_pushbot@intel.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.