blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
be1f066283f5fc6153faa4a33622e7bcf627724c
|
0bb474290e13814c2498c086780da5096453da05
|
/code-festival-2018-final/E/main.py
|
dbd827d56fd4197001f45e30c3085466ba045326
|
[] |
no_license
|
ddtkra/atcoder
|
49b6205bf1bf6a50106b4ae94d2206a324f278e0
|
eb57c144b5c2dbdd4abc432ecd8b1b3386244e30
|
refs/heads/master
| 2022-01-25T15:38:10.415959
| 2020-03-18T09:22:08
| 2020-03-18T09:22:08
| 208,825,724
| 1
| 0
| null | 2022-01-21T20:10:20
| 2019-09-16T14:51:01
|
Python
|
UTF-8
|
Python
| false
| false
| 516
|
py
|
#!/usr/bin/env python3
import sys
sys.setrecursionlimit(10000000)
INF = 1<<32
def solve(N: int, K: int, A: "List[int]"):
return
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
K = int(next(tokens)) # type: int
A = [int(next(tokens)) for _ in range(N - 1 - 0 + 1)] # type: "List[int]"
solve(N, K, A)
if __name__ == '__main__':
main()
|
[
"deritefully@gmail.com"
] |
deritefully@gmail.com
|
b865161161cec01eff6c0fb80ba8e1ae7bf8b6a4
|
5d35e0adfb6316535900fd9aeb21fc349913dd6a
|
/venv/bin/easy_install
|
411bd80da8591e098ed08e8c4d1e930da531e1d7
|
[] |
no_license
|
coma2441/appnp_tf2
|
3e2ae0f3d073b5edbbd589a760f408732ff22da8
|
4ded26555e2e66869383b8d460016083941ea0f0
|
refs/heads/master
| 2020-12-19T14:06:52.943448
| 2020-01-23T12:34:13
| 2020-01-23T12:34:13
| 235,756,525
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
#!/Users/changkyu/Documents/GitHub/appnp_tf2/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"changkyu.choi@uit.no"
] |
changkyu.choi@uit.no
|
|
00836a0c635a9d150cd9e3ede019f8918f922dd2
|
b56fbf5308a5494c4064315767ee003771444f42
|
/Algorithm/0418_greedyicecream.py
|
9b222672c48c4565cb6ba6cf88193137d60503ea
|
[] |
no_license
|
Universe-Liu/Python
|
9a25888581a3d395e5d83d3e7eb164d7d5c3e0e8
|
26f54f63be03896de9004b4451b6af211210154d
|
refs/heads/master
| 2023-04-27T17:57:36.640407
| 2021-05-14T02:40:31
| 2021-05-14T02:40:31
| 284,634,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
costs=[1,6,3,1,2,5]
coins=20
def maxIceCream(costs, coins) -> int:
ans=0
s=0
costs.sort()
if costs[0]>coins:
return ans
for i in range(len(costs)):
if costs[i]+s<=coins:
ans+=1
s+=costs[i]
else:
break
return ans
print(maxIceCream(costs,coins))
|
[
"842124824@qq.com"
] |
842124824@qq.com
|
108d08d36e676c7be42ed12baef6c44c616f1a23
|
cd2cb47552a5ee914898bb27e5a7eb02362c4e0d
|
/Restaurantapp/views.py
|
36a6ebe0d499d842bc71ab679841c705f83788fc
|
[] |
no_license
|
Jap9/Django_Restaurant
|
02bcfb46d24bd83ea10bbc4c9db6c9b17a851396
|
9f9e0d36a1368f8fb7d9faf7a3be0c1586e69544
|
refs/heads/master
| 2021-01-10T16:33:50.811446
| 2016-04-30T12:44:07
| 2016-04-30T12:44:07
| 54,205,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,331
|
py
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, render_to_response
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.shortcuts import redirect
from .models import Restaurant
from forms import *
def mainpage(request):
rests = Restaurant.objects.order_by('price')
return render(request, 'Restaurantapp/base.html', {'rests': rests})
def new_restaurant(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.published_date = timezone.now()
post.save()
return redirect('http://127.0.0.1:8000/')
else:
form = PostForm()
return render(request, 'Restaurantapp/new_restaurant.html', {'form': form})
def delete_restaurant(request,rest_pk):
delRest= Restaurant.objects.get(pk=rest_pk)
delRest.delete()
return redirect('http://127.0.0.1:8000/')
def reservar_restaurant(request):
if request.method == "POST":
form = PostForm_reserva(request.POST)
if form.is_valid():
return redirect('http://127.0.0.1:8000/')
else:
form = PostForm_reserva()
return render(request, 'Restaurantapp/reservar_restaurant.html', {'form': form})
|
[
"josepalbertpifarre@gmail.com"
] |
josepalbertpifarre@gmail.com
|
cf2843748fee2cf8c6e7fc097c62279e7a0f3867
|
6d348a21b8345ed7ce9a85a1a3859dbf5b1cf429
|
/conanfile.py
|
b122302a1a90c5bad32d7f6fb237551d77144f4d
|
[
"MIT"
] |
permissive
|
lckroom/voxel-fluid
|
de535081177c86458223ce8254fff44621b10a13
|
3fd833629199cc0b31f0209b46b786aa9028a137
|
refs/heads/master
| 2023-03-17T09:22:58.117790
| 2018-11-17T23:22:35
| 2018-11-17T23:22:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 564
|
py
|
from conans import ConanFile, CMake
class FluidConan(ConanFile):
name = "Fluid"
version = "0.1"
settings = "os", "compiler", "build_type", "arch"
options = {}
requires = (
"gtest/1.8.0@lasote/stable",
)
default_options = (
"gtest:shared=False",
)
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.definitions.update(
{ "BUILD_TESTS": True
, "USE_CONAN": True
})
cmake.configure(source_dir=self.source_folder)
cmake.build()
|
[
"zoltan.gilian@gmail.com"
] |
zoltan.gilian@gmail.com
|
dd2a5d08559fdd5c85eaf3cf498ca2a3e338c55d
|
f0e7e46e8eb1ade59e006f3fa4e0c7967f1750f1
|
/RPIServers/restartServers.py
|
8bde480b1990b61601c16ea594eed3e540a666dc
|
[] |
no_license
|
flavioipp/RPIxmlrpc
|
3eec378e194804245c125b9c33f82d9545930056
|
1ad76cd08a4c0c2f0861e02c1cd740479d6bd05b
|
refs/heads/master
| 2021-09-10T06:28:58.039843
| 2018-03-21T15:15:27
| 2018-03-21T15:15:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,048
|
py
|
#!/usr/bin/env python3
import os
import sys
import time
import ctypes
import logging
import logging.config
from ansicolors import *
from pexpect import pxssh
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR + '/..')
from RPIServers import rpiConst
userID=rpiConst.USER['id']
userPWD=rpiConst.USER['pwd']
# init logging
logging.config.fileConfig(BASE_DIR + '/logging.conf')
logger = logging.getLogger('rpiServer')
def Mbox(title, text, style):
return ctypes.windll.user32.MessageBoxW(0, text, title, style)
if __name__ == '__main__':
logger.info(ANSI_info('Updating GIT Repositories...'))
for rpi in rpiConst.SERVER_LIST:
logger.info('RPI-%s : %s'%(str(rpi['id']),str(rpi['ip'])))
try:
s=pxssh.pxssh()
if not s.login(str(rpi['ip']),userID,userPWD):
logger.error('SSH Session failed on login')
logger.error(str(s))
else:
logger.info('SSH Session established')
logger.info('Restarting servergpio service...')
s.sendline('sudo service servergpio restart')
while not s.prompt():time.sleep(1)
time.sleep(1)
s.sendline('sudo service servergpio status')
while not s.prompt():time.sleep(1)
logger.info(s.before.decode("utf-8"))
logger.info('Done!!')
s.logout()
except Exception as xxx:
logger.error(str(xxx))
logger.error(ANSI_fail('RPI-%s : %s'%(str(rpi['id']),str(rpi['ip']))))
logger.info(ANSI_info('...Done!!'))
|
[
"pi@raspberry.com"
] |
pi@raspberry.com
|
af1869fbca418280a9fc8f31dec79b970cfeddaf
|
0141361f7c4d276f471ac278580479fa15bc4296
|
/arrayAndString/removeComments.py
|
710e9312eb976f20595fec8727a33ee4ad64cd2c
|
[] |
no_license
|
tr1503/LeetCode
|
a7f2f1801c9424aa96d3cde497290ac1f7992f58
|
6d361cad2821248350f1d8432fdfef86895ca281
|
refs/heads/master
| 2021-06-24T19:03:08.681432
| 2020-10-09T23:53:22
| 2020-10-09T23:53:22
| 146,689,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,093
|
py
|
class Solution:
def removeComments(self, source):
"""
:type source: List[str]
:rtype: List[str]
"""
res = []
comment = False
out = ""
for line in source:
i = 0
while i < len(line):
if not comment:
if i == len(line) - 1:
out += line[i]
else:
temp = line[i:i+2]
if temp == "/*":
comment = True
i += 1
elif temp == "//":
break
else:
out += line[i]
else:
if i < len(line) - 1:
temp = line[i:i+2]
if temp == "*/":
comment = False
i += 1
i += 1
if out != "" and not comment:
res.append(out)
out = ""
return res
|
[
"noreply@github.com"
] |
tr1503.noreply@github.com
|
78c05de618fa12839d79cf4cfc563337b93b099a
|
844b8a9d8ec1a2c93cc338deedc53eaf6426b2e0
|
/belote/client.py
|
668249ddd3f8bd387030e62a090333f90c0d34eb
|
[] |
no_license
|
FlorianDenis/Belote
|
57e42625999d7e286f4d3cfa5a6ad71c9c11116b
|
b72e40c304249cbd2fbb659dbfe7a8796d823dd4
|
refs/heads/master
| 2023-03-20T03:03:10.997610
| 2021-03-01T21:52:27
| 2021-03-01T21:52:27
| 250,744,944
| 1
| 0
| null | 2020-04-12T15:32:55
| 2020-03-28T08:14:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,900
|
py
|
#
# Copyright (C) Florian Denis - All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited.
# Proprietary and confidential.
#
import logging
import random
import socket
import os
from . import constants
from . import packet
from . import player
from . import transport
from . import gui
from . import game
log = logging.getLogger(__name__)
class Client:
def __init__(self, host, port, name, windowed, backend):
# Server info
self._host = host
self._port = port
# GUI info
self._windowed = windowed
self._backend = backend
# Local player instance
identifier = "{:x}".format(random.getrandbits(32))
self._player = player.Player(identifier, name)
def run(self):
# Connect to server
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((self._host, self._port))
log.info("Connection established with server {}:{}"
.format(self._host, self._port))
except:
log.error("Unable to establish connection to server {}:{}"
.format(self._host, self._port))
sock.close()
os._exit(0)
# Create transport
self._transport = transport.Transport(sock)
self._transport.on_recv = self.__recv
self._transport.on_drop = self.__drop
self._transport.run()
# Create GUI
self._gui = gui.GUI(self._windowed, self._backend)
self._gui.on_trump_picked = self._pick_trump
self._gui.on_card_picked = self._play_card
# Register as a new player
self._register()
self._gui.run()
def _perform(self, opcode, *args):
tx_packet = packet.Packet(constants.MessageType.COMMAND, opcode, *args)
self._transport.send(tx_packet)
def __drop(self, transport):
log.error("Connection dropped with server")
os._exit(0)
def _register(self):
self._perform(
constants.CommandOpcode.CREATE_PLAYER,
self._player.identifier,
self._player.name)
def _pick_trump(self, trump):
self._perform(constants.CommandOpcode.PICK_TRUMP, trump)
def _play_card(self, card):
self._perform(constants.CommandOpcode.PLAY_CARD, card.code)
def _handle_new_proxy(self, proxy):
self._gui.set_game(proxy)
def __recv(self, transport, rx_packet):
"""
Receive packet from server
"""
if rx_packet.msg_type != constants.MessageType.NOTIF:
log.warn("Cannot handle incoming message {}", rx_packet)
return
if rx_packet.opcode == constants.NotifOpcode.GAME_STATUS:
proxy = game.from_args(rx_packet.args)
self._handle_new_proxy(proxy)
def stop(self):
self._transport.stop()
|
[
"me@floriandenis.net"
] |
me@floriandenis.net
|
f41a4712e424658046a063682bfa65d6740679ef
|
c19165ac2929a937265f456c339bfdecb899ca15
|
/Solucion/Obstaculo.py
|
d0601c5eb137e01bbc841778eb6d48051b391644
|
[] |
no_license
|
kazp058/AlgoritmosGeneticos
|
41fd163d9e721ddb9259e97fcd9020567e6a85ae
|
c27773d0d33317a8f5b2296d7164207ac438e42f
|
refs/heads/master
| 2022-09-04T19:23:51.253673
| 2020-06-01T01:12:16
| 2020-06-01T01:12:16
| 268,134,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
import pygame
class Obstaculo(pygame.sprite.Sprite):
def __init__(self, color, x, y, ancho, alto, izq):
pygame.sprite.Sprite.__init__(self)
self.ancho = ancho
self.alto = alto
self.izq = izq
self.velocidad = 5
self.superficie = pygame.Surface((self.ancho, self.alto), pygame.SRCALPHA)
pygame.draw.circle(self.superficie, color, (ancho//2,alto//2), ancho//2)
self.rect = pygame.Rect(x, y, self.ancho, self.alto)
def update(self, bordes, poblacion):
if self.izq:
self.rect.x -= self.velocidad
else:
self.rect.x += self.velocidad
self.colisionar(bordes, poblacion)
def colisionar(self, bordes, poblacion):
blocks_hit_list = pygame.sprite.spritecollide(self, bordes, False)
if len(blocks_hit_list) > 0:
if self.izq:
self.rect.x = blocks_hit_list[0].rect.right
else:
self.rect.x = blocks_hit_list[0].rect.left - self.ancho
self.izq = not self.izq
blocks_hit_list = pygame.sprite.spritecollide(self, poblacion, False)
for elem in blocks_hit_list:
elem.muerto = True
|
[
"noreply@github.com"
] |
kazp058.noreply@github.com
|
c7487eb0e9a609d76bda84a0c104a6502dc38533
|
a8e66df1408a9c44b0771c6a6085d6d6540e138d
|
/bookclubapp/migrations/0003_auto_20180530_1757.py
|
ffd99fca39972a0fb6f9651e620e99b59ee92afd
|
[] |
no_license
|
Kiptim54/BookClub
|
6a1cabee174ce7286135ebd54b405f79851a02df
|
c0a28bc5a0d5a382da00cdd5b4cbbb431dc6991a
|
refs/heads/master
| 2020-03-19T01:18:51.419857
| 2018-06-04T14:50:16
| 2018-06-04T14:50:16
| 135,536,759
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
# Generated by Django 2.0 on 2018-05-30 14:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bookclubapp', '0002_auto_20180530_1730'),
]
operations = [
migrations.AlterModelOptions(
name='comment',
options={'ordering': ['-time_posted']},
),
migrations.AlterModelOptions(
name='review',
options={'ordering': ['-time_posted']},
),
migrations.RemoveField(
model_name='groups',
name='books',
),
migrations.AddField(
model_name='groups',
name='books',
field=models.ManyToManyField(to='bookclubapp.Books'),
),
]
|
[
"kiptim54@gmail.com"
] |
kiptim54@gmail.com
|
8f709f9485507eee939356065d8ac417c13155df
|
8f42a498f02abbfe05c8a99be45c4574f1eab8b4
|
/airline0/flights/migrations/0001_initial.py
|
e75f5c664b0914a1e886eec6e9f21af76d89f83e
|
[] |
no_license
|
Noshi96/AirlineDjango
|
b20528d720f919035fcfad719a34657155b1bb24
|
bd6a2fd954187d1a8bf463a6f5f873fdbdd2f0e8
|
refs/heads/master
| 2023-08-30T20:23:51.592659
| 2021-10-26T19:40:16
| 2021-10-26T19:40:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
# Generated by Django 3.2.7 on 2021-09-16 14:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Airport',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=4)),
('city', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Flight',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('duration', models.IntegerField()),
('destination', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='arrivals', to='flights.airport')),
('origin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='departues', to='flights.airport')),
],
),
]
|
[
"2spawel2@gmail.com"
] |
2spawel2@gmail.com
|
27b40f018874059501cc285337c05b7abc54e896
|
3aec6f08678f13bf3878bce77007f74cfe637773
|
/versions/1.1.0/lib/node_modules/slc/node_modules/sls-sample-app/node_modules/loopback-connector-oracle/node_modules/oracle/build/config.gypi
|
9deb7aba9050bc108a69eb3a04eb5960439c7489
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
lulinqing/sls-cartridge
|
93a32a1f1035901cf2a6699988c8b28b46ddad27
|
a0d704f69ad313a05825aa2be360c1d0dc4e6c79
|
refs/heads/master
| 2021-01-13T02:08:09.340281
| 2013-11-03T14:32:26
| 2013-11-03T14:32:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,968
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 44,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_systemtap": "false",
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"nodedir": "/root/.node-gyp/0.10.16",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/bin/bash",
"parseable": "",
"shrinkwrap": "true",
"userignorefile": "/root/.npmignore",
"cache_max": "null",
"init_author_email": "",
"sign_git_tag": "",
"ignore": "",
"long": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"npat": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"coverage": "",
"json": "",
"pre": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/root/.npm-init.js",
"userconfig": "/root/.npmrc",
"npaturl": "http://npat.npmjs.org/",
"node_version": "v0.10.16",
"user": "",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"optional": "true",
"username": "",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "null",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"yes": "",
"fetch_retry_maxtimeout": "60000",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/root/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "node/v0.10.16 linux x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "0.0.0",
"umask": "18",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/root/tmp",
"unsafe_perm": "",
"link": "",
"prefix": "/usr/local"
}
}
|
[
"lulinqing@gmail.com"
] |
lulinqing@gmail.com
|
ef11ce0b204e77697cad987aa597887b7411b96f
|
bfaf76a40c9ec192aa53332f66460375e89eccfe
|
/profil3r/modules/email/email.py
|
0ae1cc031a3bc60325eef8e623956a017d5145b1
|
[
"MIT"
] |
permissive
|
utopikkad/Profil3r
|
2e8b50063ed9051feff93be0865c8c5057d22d85
|
c76e04f9da9122926ac2ac077ec54245c3283a67
|
refs/heads/main
| 2023-04-13T20:02:09.913294
| 2021-04-15T12:38:53
| 2021-04-15T12:38:53
| 358,276,341
| 0
| 0
|
MIT
| 2021-04-15T13:51:54
| 2021-04-15T13:51:45
| null |
UTF-8
|
Python
| false
| false
| 1,808
|
py
|
import time
import pwnedpasswords
from profil3r.colors import Colors
class Email:
def __init__(self, config, permutations_list):
# Have I been pwned API rate limit ( 1500 ms)
self.delay = DELAY = config['plateform']['email']['rate_limit'] / 1000
# The 20 most common email domains, you can add more if you wish (in the config.jon file)
# The more domains you add, the longer it gets of course
self.domains = config['plateform']['email']['domains']
# {username}@{domain}
self.format = config['plateform']['email']['format']
# email adresses are not case sensitive
self.permutations_list = [perm.lower() for perm in permutations_list]
# Generate all potential adresses
def possible_emails(self):
possible_emails = []
for domain in self.domains:
for permutation in self.permutations_list:
possible_emails.append(self.format.format(
permutation = permutation,
domain = domain
))
return possible_emails
# We use the Have I Been Pwned API to search for breached emails
def search(self):
emails_usernames = {
"accounts": []
}
possible_emails_list = self.possible_emails()
for possible_email in possible_emails_list:
pwned = pwnedpasswords.check(possible_email)
# Not breached email
if not pwned:
emails_usernames["accounts"].append({"value": possible_email, "breached": False})
# Breached emails
else:
emails_usernames["accounts"].append({"value": possible_email, "breached": True})
time.sleep(self.delay)
return emails_usernames
|
[
"r0g3r5@protonmail.com"
] |
r0g3r5@protonmail.com
|
d9e1cc2298c99c803b828a8b40aa0eb55c015a49
|
fc7828cdb69f4c0fbd137d989607e6c4972406de
|
/teaching/jordan/assignments/a4/a4.py
|
e4348e4b1599ded900c8ac86e1d5e4f2214cb56a
|
[] |
no_license
|
KWMalik/organization
|
398167eb7b641df1bbc7f8c07d873baf77728440
|
977dcf68f9f07a36dd1cfbdeb907594b75a58ce4
|
refs/heads/master
| 2021-01-13T01:55:26.651655
| 2010-02-26T20:19:05
| 2010-02-26T20:19:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,053
|
py
|
class Empty:
pass
class List:
def __init__(self, head, tail):
self.hd = head
self.tl = tail
def List(L):
if L==Empty
return Empty
else:
return List(L.hd + L.tl)
#Returns a List containing all natural numbers by which "n" is divisible.
#List begins from "m" and counts down until 2.
def divisors(m,n):
if n % m-1==0:
return List(divisors(n/m-1)
else:
pass
if m<=2:
return "finished"
#Returns True if a given number is Prime.
def is_prime(n):
def divisors(m,n):
#Will return items in a list up to "n".
def take(L,n):
return List(L.hd*n)take(L.tl)
if n>=L:
return List
#Will return all numbers within a list, excluding those up to "n."
def drop(L,n):
return [L:n]
#Will return a List excluding the "Nth" element.
n>=0
def exclude(L,n):
return List and not (L.hd+n-1)
|
[
"="
] |
=
|
028c708347c597b72970133746f8e13bd7df4c9f
|
eaba0e6a50db2ec435904e72b019b0fd1c2f4562
|
/rospy_markerutils/__init__.py
|
56ac5fe28b17cd5df92995ae480ccfebedc5d925
|
[
"BSD-3-Clause"
] |
permissive
|
dimatura/rospy_markerutils
|
bb1474b3d09993b828494eba7e5eb9bcfb6b1b77
|
0f8d8f1f397612fd1bb6ef8d176286a33c425ab5
|
refs/heads/master
| 2020-03-23T23:12:03.654289
| 2018-08-28T07:46:05
| 2018-08-28T07:46:05
| 142,221,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
#
# @author Daniel Maturana
# @year 2015
#
# @attention Copyright (c) 2015
# @attention Carnegie Mellon University
# @attention All rights reserved.
#
# @=
from make_markers import make_marker
from make_markers import make_cube_marker
from make_markers import MarkerWrapper
from make_markers import CubeMarker
from make_markers import RectangleMarker
from make_markers import ArrowMarker
from make_markers import WireframeBoxMarker
from make_markers import PathMarker
from make_markers import TextMarker
from interactive import make_6dof_marker
from interactive import InteractiveMarkerManager
from helper import MarkerHelper
|
[
"dimatura@cmu.edu"
] |
dimatura@cmu.edu
|
f7d5cf15a51e28c62772043a155ecd084d090450
|
ae2f576bdc72e0a7d7578ced35987b9be56aeaaa
|
/cowin_notifier.py
|
6bfba0ca2a0fade5f2fdec059623fb7020377c83
|
[] |
no_license
|
raghurao4/Cowin_Notifier
|
cbbc6da24f773c55b13bab35e4a099f98863b1a3
|
b7936e89c2c3726f0080e742d71bac1bed12b745
|
refs/heads/main
| 2023-04-28T11:12:54.488426
| 2021-05-24T11:06:51
| 2021-05-24T11:06:51
| 369,985,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,192
|
py
|
#!/usr/bin/python
import sys
import cowin_switcher
import cowin_utility
def main():
text = '\n######################################################################'
text = text + '\n# Hello, Welcome to CoWin query notification autobot by distId! #'
text = text + '\n######################################################################\n\n'
colored_text = cowin_utility.colored(0, 255, 255, text)
print(colored_text)
s=cowin_switcher.Switcher()
choice = ''
while choice != 'q':
print(cowin_utility.colored(0, 0, 255, "\n[0] Enter 0 to set preference to Mail or Desktop notification."))
print(cowin_utility.colored(0, 0, 255, "[1] Enter 1 to get state id list."))
print(cowin_utility.colored(0, 0, 255, "[2] Enter 2 to get district id list."))
print(cowin_utility.colored(0, 0, 255, "[3] Enter 3 to create cowin query by dist id."))
print(cowin_utility.colored(0, 0, 255, "[4] Enter 4 to create cowin query by pincode."))
print(cowin_utility.colored(0, 0, 255, "[5] Enter 5 to print current query."))
print(cowin_utility.colored(0, 0, 255, "[6] Enter 6 to delete current query."))
print(cowin_utility.colored(0, 0, 255, "[7] Enter 7 to run created queries until interrupted."))
print(cowin_utility.colored(0, 0, 255, "[8] Enter 8 to update buffer sleep time between query, default is 60sec, min 30sec."))
print(cowin_utility.colored(0, 0, 255, "[9] Enter 9 help with less secure apps access on google account."))
print(cowin_utility.colored(255, 0, 0, "[q] Enter q to quit gracefully ^.^"))
print(cowin_utility.colored(255, 0, 0, "[Ctrl+c] Press Ctrl+c to Exit the application."))
try:
choice = input("\nWhat would you like to do? ")
print(cowin_utility.colored(0, 255, 0, s.indirect(choice)))
except KeyboardInterrupt:
print(cowin_utility.colored(0, 255, 0, "\n\nKeyboard interrupt exception caught, Exiting ..\n\n"))
sys.exit(0)
print(cowin_utility.colored(0, 255, 255, '\nLoop ended. Thanks!\n'))
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
raghurao4.noreply@github.com
|
340720e639b9ccf4197d1bb3b318ec99db33c1e1
|
c7d2c6fdf4911b92c36b6d33debeb5b7cbb2c9d8
|
/Pacote download/Desafios/90. Dicionário em Python.py
|
3e5a6fcb3fb2ef9d44a60ad5df473cf43855752a
|
[] |
no_license
|
luhpazos/Exercicios-Python
|
8a744f749796d1ac14a9c208c88bb21af0e3d0e2
|
c42d095f50b7fd7085dc11c15decb82df7db38be
|
refs/heads/main
| 2023-07-27T09:54:32.293220
| 2021-09-08T17:22:45
| 2021-09-08T17:22:45
| 404,376,989
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
dados = dict()
dados['nome'] = str(input('Nome:')).strip().capitalize()
dados['media'] = float(input(f'Média de {dados["nome"]}:'))
if dados['media'] >= 7:
dados['situaçao'] = 'Aprovado'
elif dados['media'] <= 5:
dados['situaçao'] = 'Reprovado'
else:
dados['situaçao'] = 'Recuperação'
for k, v in dados.items():
print(f"{k.capitalize()} é igual a {v}")
|
[
"pazosluh@gmail.com"
] |
pazosluh@gmail.com
|
72f1f4c207a2f65c6bf9049fa6b03b36df605ef9
|
2fd7686218d3a8aa86dbeabe17d63d35b05d2472
|
/tf_models/tpu/models/official/efficientnet/count_spikes.py
|
1c42c6ed31decfe8d3f9a0b4a2c9d8f005d3899e
|
[
"Apache-2.0"
] |
permissive
|
christophstoeckl/FS-neurons
|
42d14bf1e42ba4cf0e08db703de683687d2f04bb
|
899af4e2bdf915001ef8610373482679887a80c9
|
refs/heads/master
| 2023-04-13T21:07:30.727368
| 2021-04-12T07:21:12
| 2021-04-12T07:21:12
| 257,293,154
| 38
| 15
| null | 2022-12-08T11:22:53
| 2020-04-20T13:40:59
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 335
|
py
|
import numpy as np
import sys
if len(sys.argv) != 2:
print("Usage: python3 count_spikes.py log_file_name.txt")
quit()
with open(sys.argv[1], "r") as file:
log = file.read()
log = log.split("\n")
a = []
for i, line in enumerate(log):
if line.startswith("["):
a.append(eval(line)[0])
print(np.sum(a) / 10000)
|
[
"christoph.stoeckl@hotmail.com"
] |
christoph.stoeckl@hotmail.com
|
1fc1c8485ec1ef2c4f1c0cbc330304f0c2838563
|
59628a6ddef19c6e7493eb2a9f669339dcb014ae
|
/demo/views.py
|
5fa41dbbb35cc5a59b1b96cd7e5fdfb3e47ea82c
|
[] |
no_license
|
tzangms/django-bootstrap-form-demo
|
e5efbf995bd59d18a9d57681dd29155bec661486
|
8e16b7f002a17b4caf426366ec3547f90531aea7
|
refs/heads/master
| 2021-01-22T22:53:45.779273
| 2013-12-08T16:05:13
| 2013-12-08T16:05:13
| 15,019,093
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 585
|
py
|
from django.shortcuts import render
from demo.forms import ExampleForm
def index(request):
form = ExampleForm()
return render(request, 'index.html', {'form': form})
def basic(request):
form = ExampleForm(request.POST or None)
if form.is_valid():
return render(request, 'basic_done.html')
return render(request, 'basic.html', {'form': form})
def horizontal(request):
form = ExampleForm(request.POST or None)
if form.is_valid():
return render(request, 'basic_done.html')
return render(request, 'horizontal.html', {'form': form})
|
[
"tzangms@gmail.com"
] |
tzangms@gmail.com
|
7d2f19c42b8c5c0ec4c1fc8ad6c8dc5d08d16533
|
854f9c1101772b30baa3999fa5461b62e75f1754
|
/cobiv.py
|
04057ace17d8f6580f88f5ade735ef2ec14dd6fd
|
[
"MIT"
] |
permissive
|
gokudomatic/cobiv
|
fd27aa46be8389909827eee5fcb838142c3511d6
|
c095eda704fab319fccc04d43d8099f1e8327734
|
refs/heads/master
| 2020-04-05T10:14:35.361695
| 2018-04-11T16:46:04
| 2018-04-11T16:46:04
| 81,542,555
| 4
| 1
| null | 2017-10-01T20:18:38
| 2017-02-10T08:10:10
|
Python
|
UTF-8
|
Python
| false
| false
| 78
|
py
|
from cobiv.MainApp import Cobiv
if __name__ == '__main__':
Cobiv().run()
|
[
"gourry.gabrief@gmail.com"
] |
gourry.gabrief@gmail.com
|
7752a9dec6ae1571706fd1b98c1f9d017c4ad33e
|
e8a9719b1af70cd1269d85b5c05faebf50b9997e
|
/codes/balanced_bracket
|
b6776801f1f75fc850757ff786e650ddeb5c287b
|
[] |
no_license
|
nirushanayak/datastructure_and_algorithms_code
|
2d8df881e88d4fd734292e7d8927eb2d2f7b233c
|
561d19006f063cf8f4cb0b89863150c2e30c6e75
|
refs/heads/master
| 2023-02-13T04:36:50.503000
| 2021-01-04T07:32:08
| 2021-01-04T07:32:08
| 326,603,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
#!/bin/python3
import math
import os
import random
import re
import sys
from queue import LifoQueue
# Complete the isBalanced function below.
def isBalanced(s):
open_bra=['{','(','[']
closed_bra=['}',')',']']
stack = LifoQueue()
for i in range(len(s)):
if s[i] in open_bra:
stack.put(s[i])
elif s[i] in closed_bra:
top=stack.get()
if(open_bra.index(top) != closed_bra.index(s[i])):
return "NO"
if stack.empty():
return "YES"
else:
return "NO"
if __name__ == '__main__':
result=isBalanced('[{}]')
|
[
"nirunaya@cisco.com"
] |
nirunaya@cisco.com
|
|
225174a0152774e11e4a07f205ae153687681cfa
|
055d5a4dccb2296a73039ae1301dd3f442a590c3
|
/Course/admin.py
|
060722a8d45c6e599d83c885a55518415ed8dbce
|
[] |
no_license
|
IMRAN104/LeadManager
|
aa677cd6ae7ea6881b6dec504ca90c88c627e841
|
1d01a2c55542a162210626b382435a9a71cd11c7
|
refs/heads/master
| 2021-09-28T01:39:32.717616
| 2020-03-25T15:57:25
| 2020-03-25T15:57:25
| 250,023,646
| 0
| 0
| null | 2021-09-22T18:56:48
| 2020-03-25T15:56:34
|
Python
|
UTF-8
|
Python
| false
| false
| 291
|
py
|
from django.contrib import admin
from .models import Course, Step
# Register your models here.
class StepInline(admin.StackedInline):
model = Step
class CourseAdmin(admin.ModelAdmin):
inlines = [StepInline, ]
admin.site.register(Course, CourseAdmin)
admin.site.register(Step)
|
[
"omeca13@gmail.com"
] |
omeca13@gmail.com
|
a77ac77ef1eec43199e15ab11683f52bd42110f3
|
6860411d0dc3acea9994e288f7a524bac2be4894
|
/1/1c.py
|
fcd8baa3ebc7c28b2736c601d4efd38d19a82e11
|
[] |
no_license
|
mario-laserna/Apps.co-Python
|
3a2abdeb05c38bb961105b79e3594cb36a94e31b
|
d1adaf0559844f443312f210344139440bc77b88
|
refs/heads/master
| 2016-09-06T03:26:56.853441
| 2013-03-28T22:07:23
| 2013-03-28T22:07:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
# operadores matematicos
print(3+4+2)
print(3-4-2)
print(3*4)
print(25/4)
print(25%4)
#operadores logicos
print (5 < 4)
print (5 < 6)
print (5 <= 5)
# expresiones de comparacion
print(3 == 4)
print(2 == 2)
print('a' == 'a')
print('a' == 'aa')
print(6 != 7)
print(1 == 1 and 2 == 3)
print(1 == 1 or 2 == 3)
|
[
"laserna.mario@gmail.com"
] |
laserna.mario@gmail.com
|
b533c2d9b13257193f37d8f1f24ff20f8efe3e97
|
1cbc5e586beaf110edbab505d0d87d7cfac5628b
|
/Allen-Cahn equation.py
|
6752c4267fc00eaf53f8bb4f5b434ce67dee1a7a
|
[] |
no_license
|
lightunifly/repeat-the-result
|
1f14233eb6d337a95a57a2868d7bf89c1ba5e8fa
|
98dcc45b4211ee774dde3be3e30bd70fd6494e6a
|
refs/heads/master
| 2020-07-05T13:47:03.656561
| 2019-08-16T06:20:16
| 2019-08-16T06:20:16
| 202,664,789
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,373
|
py
|
import tensorflow as tf
import numpy as np
dims=100
T=0.3
N=20
deltaT=T/N
std=0.0 #biaozhuicha
x=tf.zeros([dims],dtype=tf.float64)
u0=tf.Variable(1.0,dtype=tf.float64)
deltau0=tf.Variable(tf.random_normal([dims],stddev=std,dtype=tf.float64))
W1=tf.Variable(tf.random_normal([N-1,dims,dims+10],stddev=std,dtype=tf.float64))
W2=tf.Variable(tf.random_normal([N-1,dims+10,dims+10],stddev=std,dtype=tf.float64))
W3=tf.Variable(tf.random_normal([N-1,dims+10,dims],stddev=std,dtype=tf.float64))
B=tf.Variable(tf.random_normal([N-1,dims],stddev=std,dtype=tf.float64))
batch_size=64
deltaW=tf.random_normal([batch_size,N,dims],stddev=np.sqrt(0.3/20),dtype=tf.float64)
u1=u0-(u0-u0**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau0,deltaW[:,0,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,0,:]
xx1=tf.reshape(x,[batch_size,dims])
digits11=tf.matmul(xx1,W1[0])
bat11=tf.layers.batch_normalization(digits11,axis=-1,training=True)
y11=tf.nn.relu(bat11)
digits21=tf.matmul(y11,W2[0])
bat21=tf.layers.batch_normalization(digits21,axis=-1,training=True)
y21=tf.nn.relu(bat21)
y31=tf.matmul(y21,W3[0])+B[0]
deltau1=y31
u2=u1-(u1-u1**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau1,deltaW[:,1,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,1,:]
xx2=tf.reshape(x,[batch_size,dims])
digits12=tf.matmul(xx2,W1[1])
bat12=tf.layers.batch_normalization(digits12,axis=-1,training=True)
y12=tf.nn.relu(digits12)
digits22=tf.matmul(y12,W2[1])
bat22=tf.layers.batch_normalization(digits22,axis=-1,training=True)
y22=tf.nn.relu(digits22)
y32=tf.matmul(y22,W3[1])+B[1]
deltau2=y32
u3=u2-(u2-u2**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau2,deltaW[:,2,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,2,:]
xx3=tf.reshape(x,[batch_size,dims])
digits13=tf.matmul(xx3,W1[2])
bat13=tf.layers.batch_normalization(digits13,axis=-1,training=True)
y13=tf.nn.relu(digits13)
digits23=tf.matmul(y13,W2[2])
bat23=tf.layers.batch_normalization(digits23,axis=-1,training=True)
y23=tf.nn.relu(digits23)
y33=tf.matmul(y23,W3[2])+B[2]
deltau3=y33
u4=u3-(u3-u3**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau3,deltaW[:,3,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,3,:]
xx4=tf.reshape(x,[batch_size,dims])
digits14=tf.matmul(xx4,W1[3])
bat14=tf.layers.batch_normalization(digits14,axis=-1,training=True)
y14=tf.nn.relu(digits14)
digits24=tf.matmul(y14,W2[3])
bat24=tf.layers.batch_normalization(digits24,axis=-1,training=True)
y24=tf.nn.relu(digits24)
y34=tf.matmul(y24,W3[3])+B[3]
deltau4=y34
u5=u4-(u4-u4**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau4,deltaW[:,4,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,4,:]
xx5=tf.reshape(x,[batch_size,dims])
digits15=tf.matmul(xx5,W1[4])
bat15=tf.layers.batch_normalization(digits15,axis=-1,training=True)
y15=tf.nn.relu(digits15)
digits25=tf.matmul(y15,W2[4])
bat25=tf.layers.batch_normalization(digits25,axis=-1,training=True)
y25=tf.nn.relu(digits25)
y35=tf.matmul(y25,W3[4])+B[4]
deltau5=y35
u6=u5-(u5-u5**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau5,deltaW[:,5,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,5,:]
xx6=tf.reshape(x,[batch_size,dims])
digits16=tf.matmul(xx6,W1[5])
bat16=tf.layers.batch_normalization(digits16,axis=-1,training=True)
y16=tf.nn.relu(digits16)
digits26=tf.matmul(y16,W2[5])
bat26=tf.layers.batch_normalization(digits26,axis=-1,training=True)
y26=tf.nn.relu(digits26)
y36=tf.matmul(y26,W3[5])+B[5]
deltau6=y36
u7=u6-(u6-u6**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau6,deltaW[:,6,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,6,:]
xx7=tf.reshape(x,[batch_size,dims])
digits17=tf.matmul(xx7,W1[6])
bat17=tf.layers.batch_normalization(digits17,axis=-1,training=True)
y17=tf.nn.relu(digits17)
digits27=tf.matmul(y17,W2[6])
bat27=tf.layers.batch_normalization(digits27,axis=-1,training=True)
y27=tf.nn.relu(digits27)
y37=tf.matmul(y27,W3[6])+B[6]
deltau7=y37
u8=u7-(u7-u7**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau7,deltaW[:,7,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,7,:]
xx8=tf.reshape(x,[batch_size,dims])
digits18=tf.matmul(xx8,W1[7])
bat18=tf.layers.batch_normalization(digits18,axis=-1,training=True)
y18=tf.nn.relu(digits18)
digits28=tf.matmul(y18,W2[7])
bat28=tf.layers.batch_normalization(digits28,axis=-1,training=True)
y28=tf.nn.relu(digits28)
y38=tf.matmul(y28,W3[7])+B[7]
deltau8=y38
u9=u8-(u8-u8**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau8,deltaW[:,8,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,8,:]
xx9=tf.reshape(x,[batch_size,dims])
digits19=tf.matmul(xx9,W1[8])
bat19=tf.layers.batch_normalization(digits19,axis=-1,training=True)
y19=tf.nn.relu(digits19)
digits29=tf.matmul(y19,W2[8])
bat29=tf.layers.batch_normalization(digits29,axis=-1,training=True)
y29=tf.nn.relu(digits29)
y39=tf.matmul(y29,W3[8])+B[8]
deltau9=y39
u10=u9-(u9-u9**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau9,deltaW[:,9,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,9,:]
xx10=tf.reshape(x,[batch_size,dims])
digits110=tf.matmul(xx10,W1[9])
bat110=tf.layers.batch_normalization(digits110,axis=-1,training=True)
y110=tf.nn.relu(digits110)
digits210=tf.matmul(y110,W2[9])
bat210=tf.layers.batch_normalization(digits210,axis=-1,training=True)
y210=tf.nn.relu(digits210)
y310=tf.matmul(y210,W3[9])+B[9]
deltau10=y310
u11=u10-(u10-u10**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau10,deltaW[:,10,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,10,:]
xx11=tf.reshape(x,[batch_size,dims])
digits111=tf.matmul(xx11,W1[10])
bat111=tf.layers.batch_normalization(digits111,axis=-1,training=True)
y111=tf.nn.relu(digits111)
digits211=tf.matmul(y111,W2[10])
bat211=tf.layers.batch_normalization(digits211,axis=-1,training=True)
y211=tf.nn.relu(digits211)
y311=tf.matmul(y211,W3[10])+B[10]
deltau11=y311
u12=u11-(u11-u11**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau11,deltaW[:,11,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,11,:]
xx12=tf.reshape(x,[batch_size,dims])
digits112=tf.matmul(xx12,W1[11])
bat112=tf.layers.batch_normalization(digits112,axis=-1,training=True)
y112=tf.nn.relu(digits112)
digits212=tf.matmul(y112,W2[11])
bat212=tf.layers.batch_normalization(digits212,axis=-1,training=True)
y212=tf.nn.relu(digits212)
y312=tf.matmul(y212,W3[11])+B[11]
deltau12=y312
u13=u12-(u12-u12**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau12,deltaW[:,12,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,12,:]
xx13=tf.reshape(x,[batch_size,dims])
digits113=tf.matmul(xx13,W1[12])
bat113=tf.layers.batch_normalization(digits113,axis=-1,training=True)
y113=tf.nn.relu(digits113)
digits213=tf.matmul(y113,W2[12])
bat213=tf.layers.batch_normalization(digits213,axis=-1,training=True)
y213=tf.nn.relu(digits213)
y313=tf.matmul(y213,W3[12])+B[12]
deltau13=y313
u14=u13-(u13-u13**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau13,deltaW[:,13,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,13,:]
xx14=tf.reshape(x,[batch_size,dims])
digits114=tf.matmul(xx14,W1[13])
bat114=tf.layers.batch_normalization(digits114,axis=-1,training=True)
y114=tf.nn.relu(digits114)
digits214=tf.matmul(y114,W2[13])
bat214=tf.layers.batch_normalization(digits214,axis=-1,training=True)
y214=tf.nn.relu(digits214)
y314=tf.matmul(y214,W3[13])+B[13]
deltau14=y314
u15=u14-(u14-u14**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau14,deltaW[:,14,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,14,:]
xx15=tf.reshape(x,[batch_size,dims])
digits115=tf.matmul(xx15,W1[14])
bat115=tf.layers.batch_normalization(digits115,axis=-1,training=True)
y115=tf.nn.relu(digits115)
digits215=tf.matmul(y115,W2[14])
bat215=tf.layers.batch_normalization(digits215,axis=-1,training=True)
y215=tf.nn.relu(digits215)
y315=tf.matmul(y215,W3[14])+B[14]
deltau15=y315
u16=u15-(u15-u15**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau15,deltaW[:,15,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,15,:]
xx16=tf.reshape(x,[batch_size,dims])
digits116=tf.matmul(xx16,W1[15])
bat116=tf.layers.batch_normalization(digits116,axis=-1,training=True)
y116=tf.nn.relu(digits116)
digits216=tf.matmul(y116,W2[15])
bat216=tf.layers.batch_normalization(digits216,axis=-1,training=True)
y216=tf.nn.relu(digits216)
y316=tf.matmul(y216,W3[15])+B[15]
deltau16=y316
u17=u16-(u16-u16**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau16,deltaW[:,16,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,16,:]
xx17=tf.reshape(x,[batch_size,dims])
digits117=tf.matmul(xx17,W1[16])
bat117=tf.layers.batch_normalization(digits117,axis=-1,training=True)
y117=tf.nn.relu(digits117)
digits217=tf.matmul(y117,W2[16])
bat217=tf.layers.batch_normalization(digits217,axis=-1,training=True)
y217=tf.nn.relu(digits217)
y317=tf.matmul(y217,W3[16])+B[16]
deltau17=y317
u18=u17-(u17-u17**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau17,deltaW[:,17,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,17,:]
xx18=tf.reshape(x,[batch_size,dims])
digits118=tf.matmul(xx18,W1[17])
bat118=tf.layers.batch_normalization(digits118,axis=-1,training=True)
y118=tf.nn.relu(digits118)
digits218=tf.matmul(y118,W2[17])
bat218=tf.layers.batch_normalization(digits218,axis=-1,training=True)
y218=tf.nn.relu(digits218)
y318=tf.matmul(y218,W3[17])+B[17]
deltau18=y318
u19=u18-(u18-u18**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau18,deltaW[:,18,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,18,:]
xx19=tf.reshape(x,[batch_size,dims])
digits119=tf.matmul(xx19,W1[18])
bat119=tf.layers.batch_normalization(digits119,axis=-1,training=True)
y119=tf.nn.relu(digits119)
digits219=tf.matmul(y119,W2[18])
bat219=tf.layers.batch_normalization(digits219,axis=-1,training=True)
y219=tf.nn.relu(digits219)
y319=tf.matmul(y219,W3[18])+B[18]
deltau19=y319
u20=u19-(u19-u19**3.0)*deltaT+np.sqrt(2.0)*tf.reduce_sum(tf.multiply(deltau19,deltaW[:,19,:]),axis=1)
x=x+np.sqrt(2.0)*deltaW[:,19,:]
y_=1.0/(2.0+0.4*tf.reduce_sum(tf.square(x),axis=1))
cost=tf.losses.mean_squared_error(u20,y_)
optimizer=tf.train.AdamOptimizer(0.005).minimize(cost)
init=tf.global_variables_initializer()
sess=tf.Session()
sess.run(init)
loss=np.array([])
ans=np.array([])
for i in range(2000):
sess.run(optimizer)
ans=np.append(ans,sess.run(u0))
loss=np.append(loss,sess.run(cost))
print(i,sess.run(u0))
|
[
"noreply@github.com"
] |
lightunifly.noreply@github.com
|
0ab7a1eaccd4af1af23d2d07510fced51c3cbd2a
|
41fd80f9ccc72a17c2db16b7019312a87d3181e8
|
/zhang_local/pdep/network4029_1.py
|
73ffd608834052497471ddced44db683940ef5a5
|
[] |
no_license
|
aberdeendinius/n-heptane
|
1510e6704d87283043357aec36317fdb4a2a0c34
|
1806622607f74495477ef3fd772908d94cff04d9
|
refs/heads/master
| 2020-05-26T02:06:49.084015
| 2019-07-01T15:12:44
| 2019-07-01T15:12:44
| 188,069,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35,122
|
py
|
species(
label = '[CH]C(=C)O[C]=O(17928)',
structure = SMILES('[CH]C(=C)O[C]=O'),
E0 = (231.142,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,350,440,435,1725,1855,455,950,253.416,253.814,253.894,253.897,254.577],'cm^-1')),
HinderedRotor(inertia=(1.09053,'amu*angstrom^2'), symmetry=1, barrier=(49.7265,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.08901,'amu*angstrom^2'), symmetry=1, barrier=(49.7281,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.09646,'amu*angstrom^2'), symmetry=1, barrier=(49.7263,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (83.0654,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.70558,0.0505719,-4.64063e-05,2.24562e-08,-4.48387e-12,27882.5,19.1046], Tmin=(100,'K'), Tmax=(1181.08,'K')), NASAPolynomial(coeffs=[10.1716,0.0218995,-9.99155e-06,1.90157e-09,-1.33026e-13,25882.7,-23.148], Tmin=(1181.08,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(231.142,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(195.39,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)(Cds-Cd)) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsHH) + group(Cds-OdOsH) + radical((O)CJOC) + radical(AllylJ2_triplet)"""),
)
species(
label = 'O=C=O(1731)',
structure = SMILES('O=C=O'),
E0 = (-403.087,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([459.923,1087.69,1087.69,2296.71],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (44.0095,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(2028.74,'J/mol'), sigma=(3.763,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(2.65,'angstroms^3'), rotrelaxcollnum=2.1, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.27862,0.0027414,7.16119e-06,-1.08033e-08,4.14308e-12,-48470.3,5.97933], Tmin=(100,'K'), Tmax=(988.876,'K')), NASAPolynomial(coeffs=[4.54605,0.0029192,-1.15488e-06,2.27663e-10,-1.70918e-14,-48980.3,-1.43251], Tmin=(988.876,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-403.087,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(62.3585,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cdd-OdOd)"""),
)
species(
label = 'C#C[CH2](17441)',
structure = SMILES('C#C[CH2]'),
E0 = (328.481,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,2175,525,1131.03,1132.16,1135.9],'cm^-1')),
HinderedRotor(inertia=(0.154206,'amu*angstrom^2'), symmetry=1, barrier=(3.5455,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (39.0559,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2095.25,'J/mol'), sigma=(4.76,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.32026,0.0108736,8.62061e-06,-1.82973e-08,7.68649e-12,39535.3,8.27851], Tmin=(100,'K'), Tmax=(960.555,'K')), NASAPolynomial(coeffs=[6.38511,0.00814486,-2.78734e-06,4.95348e-10,-3.50148e-14,38483.6,-8.79383], Tmin=(960.555,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(328.481,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CtHHH) + group(Ct-CtCs) + group(Ct-CtH) + radical(Propargyl)"""),
)
species(
label = '[CH]C1([CH2])OC1=O(19601)',
structure = SMILES('[CH]C1([CH2])OC1=O'),
E0 = (411.268,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (83.0654,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.55069,0.0431752,-1.70979e-05,-2.14333e-08,1.51444e-11,49562.2,18.2809], Tmin=(100,'K'), Tmax=(913.918,'K')), NASAPolynomial(coeffs=[17.0031,0.00400308,4.85122e-07,-1.86498e-10,1.18364e-14,45549.3,-61.3793], Tmin=(913.918,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(411.268,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(199.547,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-O2d)) + group(Cs-(Cds-O2d)CsCsOs) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-OdCsOs) + ring(cyclopropanone) + radical(CCJ2_triplet) + radical(CJC(C)OC)"""),
)
species(
label = '[CH][C]=C(18825)',
structure = SMILES('[CH][C]=C'),
E0 = (614.65,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,1685,370,228.264,228.889,229.07],'cm^-1')),
HinderedRotor(inertia=(1.35219,'amu*angstrom^2'), symmetry=1, barrier=(50.6528,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (39.0559,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.27541,0.0127954,9.49515e-06,-1.56026e-08,5.42938e-12,73954,11.3502], Tmin=(100,'K'), Tmax=(1063.31,'K')), NASAPolynomial(coeffs=[4.18965,0.0168435,-6.77763e-06,1.22218e-09,-8.33556e-14,73336.3,4.89309], Tmin=(1063.31,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(614.65,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Cds_S) + radical(AllylJ2_triplet)"""),
)
species(
label = '[CH]C(=[CH])OC=O(19602)',
structure = SMILES('[CH]C(=[CH])OC=O'),
E0 = (281.79,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2782.5,750,1395,475,1775,1000,350,440,435,1725,3120,650,792.5,1650,322.533,322.534,322.534,322.54],'cm^-1')),
HinderedRotor(inertia=(0.674457,'amu*angstrom^2'), symmetry=1, barrier=(49.7881,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.674454,'amu*angstrom^2'), symmetry=1, barrier=(49.7881,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.674455,'amu*angstrom^2'), symmetry=1, barrier=(49.7881,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (83.0654,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.76281,0.0472576,-3.81268e-05,1.54296e-08,-2.55087e-12,33973.4,19.9803], Tmin=(100,'K'), Tmax=(1407.06,'K')), NASAPolynomial(coeffs=[11.2805,0.0202001,-9.28174e-06,1.76259e-09,-1.22541e-13,31295,-29.1875], Tmin=(1407.06,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(281.79,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(195.39,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)(Cds-Cd)) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsHH) + group(Cds-OdOsH) + radical(AllylJ2_triplet) + radical(Cds_P)"""),
)
species(
label = '[O][C]=O(2059)',
structure = SMILES('[O][C]=O'),
E0 = (33.3014,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1855,455,950],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (44.0095,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.81048,-0.00025715,1.76446e-05,-2.38747e-08,9.15883e-12,4016.03,8.55818], Tmin=(100,'K'), Tmax=(975.962,'K')), NASAPolynomial(coeffs=[6.50409,-1.44217e-05,-6.90664e-08,7.0435e-11,-9.1126e-15,2952.93,-7.12421], Tmin=(975.962,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(33.3014,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)H) + group(Cds-OdOsH) + radical(OJC=O) + radical((O)CJOH)"""),
)
species(
label = 'H(8)',
structure = SMILES('[H]'),
E0 = (211.805,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25474.2,-0.444973], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25474.2,-0.444973], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.805,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = '[CH]C(=[CH])O[C]=O(19603)',
structure = SMILES('[CH]C(=[CH])O[C]=O'),
E0 = (478.238,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3120,650,792.5,1650,350,440,435,1725,1855,455,950,180,180,180,180],'cm^-1')),
HinderedRotor(inertia=(2.1616,'amu*angstrom^2'), symmetry=1, barrier=(49.6993,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.16097,'amu*angstrom^2'), symmetry=1, barrier=(49.685,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.15569,'amu*angstrom^2'), symmetry=1, barrier=(49.5636,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (82.0575,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.65503,0.0537334,-6.14597e-05,3.7411e-08,-9.26639e-12,57601.4,19.0027], Tmin=(100,'K'), Tmax=(973.301,'K')), NASAPolynomial(coeffs=[9.85542,0.0200329,-9.5238e-06,1.83822e-09,-1.29497e-13,56005.1,-20.3377], Tmin=(973.301,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(478.238,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(170.447,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)(Cds-Cd)) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsHH) + group(Cds-OdOsH) + radical(AllylJ2_triplet) + radical((O)CJOC) + radical(Cds_P)"""),
)
species(
label = '[CH][C]1CC(=O)O1(19591)',
structure = SMILES('[CH][C]1CC(=O)O1'),
E0 = (315.543,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (83.0654,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.56875,0.0233919,1.50877e-05,-3.89869e-08,1.80102e-11,38010.1,20.3515], Tmin=(100,'K'), Tmax=(906.688,'K')), NASAPolynomial(coeffs=[9.74141,0.0131061,-3.22915e-06,4.60835e-10,-3.00357e-14,35831.6,-18.391], Tmin=(906.688,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(315.543,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(203.705,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-O2d)) + group(Cs-CsCsOsH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsHHH) + group(Cds-OdCsOs) + ring(Beta-Propiolactone) + radical(CCJ2_triplet) + radical(C2CsJOC(O))"""),
)
species(
label = '[C-]#[O+](374)',
structure = SMILES('[C-]#[O+]'),
E0 = (299.89,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([180],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0101,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.33667,0.00896487,-2.66756e-05,3.61071e-08,-1.57199e-11,36069.2,-1.20266], Tmin=(100,'K'), Tmax=(865.594,'K')), NASAPolynomial(coeffs=[-0.394107,0.0117562,-6.47408e-06,1.26375e-09,-8.67562e-14,37256.3,19.3844], Tmin=(865.594,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(299.89,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(CsJ2_singlet-CsH)"""),
)
species(
label = '[CH]C(=C)[O](9170)',
structure = SMILES('[CH]C(=C)[O]'),
E0 = (300.054,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,350,440,435,1725,331.346,331.402,331.725,331.956],'cm^-1')),
HinderedRotor(inertia=(0.651703,'amu*angstrom^2'), symmetry=1, barrier=(50.8394,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (55.0553,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.68812,0.0228014,4.60461e-06,-2.33052e-08,1.11762e-11,36141,14.5418], Tmin=(100,'K'), Tmax=(929.513,'K')), NASAPolynomial(coeffs=[8.56019,0.0128616,-4.09323e-06,6.75825e-10,-4.57393e-14,34387.1,-16.9206], Tmin=(929.513,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(300.054,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(153.818,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsHH) + radical(AllylJ2_triplet) + radical(C=C(C)OJ)"""),
)
species(
label = '[C]=O(1149)',
structure = SMILES('[C]=O'),
E0 = (440.031,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([4000],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (28.0101,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.66064,-0.00539267,9.3647e-06,-6.04676e-09,1.10218e-12,52863.3,2.60381], Tmin=(100,'K'), Tmax=(2084.48,'K')), NASAPolynomial(coeffs=[9.43361,-0.00191483,-2.23152e-06,5.70335e-10,-4.024e-14,48128.1,-30.5142], Tmin=(2084.48,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(440.031,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cds-OdHH) + radical(CdCdJ2_triplet)"""),
)
species(
label = 'CH2(T)(28)',
structure = SMILES('[CH2]'),
E0 = (381.37,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1066.91,2790.99,3622.37],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.01192,-0.000154979,3.26298e-06,-2.40422e-09,5.69497e-13,45867.7,0.5332], Tmin=(100,'K'), Tmax=(1104.58,'K')), NASAPolynomial(coeffs=[3.14983,0.00296674,-9.76056e-07,1.54115e-10,-9.50338e-15,46058.1,4.77808], Tmin=(1104.58,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(381.37,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(T)""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'C#CO[C]=O(5349)',
structure = SMILES('C#CO[C]=O'),
E0 = (131.211,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([750,770,3400,2100,2175,525,1855,455,950,180],'cm^-1')),
HinderedRotor(inertia=(1.83092,'amu*angstrom^2'), symmetry=1, barrier=(42.0964,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.8282,'amu*angstrom^2'), symmetry=1, barrier=(42.0339,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (69.0388,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.31179,0.0326921,-3.23757e-05,1.50433e-08,-2.70077e-12,15845.5,12.7313], Tmin=(100,'K'), Tmax=(1361.98,'K')), NASAPolynomial(coeffs=[11.4718,0.00579012,-2.74751e-06,5.40776e-10,-3.87437e-14,13350.3,-34.2901], Tmin=(1361.98,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(131.211,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(124.717,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)(Cds-Cd)) + group(Cds-OdOsH) + group(Ct-CtOs) + group(Ct-CtH) + radical((O)CJOC)"""),
)
species(
label = 'O=[C]OC1=CC1(19604)',
structure = SMILES('O=[C]OC1=CC1'),
E0 = (128.985,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (83.0654,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.73551,0.0427185,-3.18624e-05,6.25935e-09,1.59846e-12,15601,17.4763], Tmin=(100,'K'), Tmax=(1036.41,'K')), NASAPolynomial(coeffs=[13.5178,0.0104609,-4.30358e-06,8.3603e-10,-6.12252e-14,12449,-43.2123], Tmin=(1036.41,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(128.985,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(199.547,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)(Cds-Cd)) + group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cds-CdsCsOs) + group(Cds-CdsCsH) + group(Cds-OdOsH) + ring(Cyclopropene) + radical((O)CJOC)"""),
)
species(
label = '[CH]=C1CC(=O)O1(19605)',
structure = SMILES('[CH]=C1CC(=O)O1'),
E0 = (2.30512,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (83.0654,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.26118,0.0226626,3.60229e-05,-7.13234e-08,3.12331e-11,354.191,18.6105], Tmin=(100,'K'), Tmax=(934.734,'K')), NASAPolynomial(coeffs=[15.536,0.00557223,-2.86267e-07,2.94975e-11,-8.27935e-15,-3862.57,-53.8181], Tmin=(934.734,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(2.30512,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(207.862,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)(Cds-Cd)) + group(Cs-(Cds-O2d)(Cds-Cds)HH) + group(Cds-CdsCsOs) + group(Cds-OdCsOs) + group(Cds-CdsHH) + ring(4-Methylene-2-oxetanone) + radical(Cds_P)"""),
)
species(
label = 'C=C1[CH]C(=O)O1(19606)',
structure = SMILES('C=C1[CH]C(=O)O1'),
E0 = (-127.874,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (83.0654,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.11743,0.0230835,4.49843e-05,-8.49572e-08,3.68107e-11,-15295.1,15.9225], Tmin=(100,'K'), Tmax=(937.096,'K')), NASAPolynomial(coeffs=[17.4167,0.0044006,2.6209e-07,-4.94605e-11,-4.94814e-15,-20209.6,-67.8162], Tmin=(937.096,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-127.874,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(207.862,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)(Cds-Cd)) + group(Cs-(Cds-O2d)(Cds-Cds)HH) + group(Cds-CdsCsOs) + group(Cds-OdCsOs) + group(Cds-CdsHH) + ring(4-Methylene-2-oxetanone) + radical(C=CCJCO)"""),
)
species(
label = '[CH]=[C]O[C]=O(5351)',
structure = SMILES('[CH]=[C]O[C]=O'),
E0 = (389.089,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1685,370,1855,455,950,3120,650,792.5,1650,180],'cm^-1')),
HinderedRotor(inertia=(1.29047,'amu*angstrom^2'), symmetry=1, barrier=(29.6704,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.28863,'amu*angstrom^2'), symmetry=1, barrier=(29.628,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 4,
opticalIsomers = 1,
molecularWeight = (69.0388,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.3273,0.038823,-5.66659e-05,4.13181e-08,-1.18076e-11,46855.1,17.2948], Tmin=(100,'K'), Tmax=(860.907,'K')), NASAPolynomial(coeffs=[8.78148,0.00883684,-4.42245e-06,8.64197e-10,-6.08282e-14,45743.7,-12.8766], Tmin=(860.907,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(389.089,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(124.717,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)(Cds-Cd)) + group(Cds-CdsOsH) + group(Cds-CdsHH) + group(Cds-OdOsH) + radical((O)CJOC) + radical(C=CJO) + radical(Cds_P)"""),
)
species(
label = '[C]C(=C)O[C]=O(19607)',
structure = SMILES('[C]C(=C)O[C]=O'),
E0 = (529.935,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,350,440,435,1725,1855,455,950,180,180,180],'cm^-1')),
HinderedRotor(inertia=(1.23851,'amu*angstrom^2'), symmetry=1, barrier=(28.4757,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.23683,'amu*angstrom^2'), symmetry=1, barrier=(28.4371,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 5,
opticalIsomers = 1,
molecularWeight = (82.0575,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.63705,0.0528991,-6.7204e-05,4.15722e-08,-1.00424e-11,63820.9,16.6989], Tmin=(100,'K'), Tmax=(1015.13,'K')), NASAPolynomial(coeffs=[12.2149,0.0112183,-5.61452e-06,1.12455e-09,-8.12294e-14,61673.3,-34.492], Tmin=(1015.13,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(529.935,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(174.604,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)(Cds-Cd)) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsOs) + group(Cds-CdsHH) + group(Cds-OdOsH) + radical(CJ3) + radical((O)CJOC)"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.64289,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.53101,-0.000123661,-5.02999e-07,2.43531e-09,-1.40881e-12,-1046.98,2.96747], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.95258,0.0013969,-4.92632e-07,7.8601e-11,-4.60755e-15,-923.949,5.87189], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-8.64289,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'He',
structure = SMILES('[He]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (4.0026,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(84.8076,'J/mol'), sigma=(2.576,'angstroms'), dipoleMoment=(0,'De'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""NOx2018"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,0.928724], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,0.928724], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""He""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'Ar',
structure = SMILES('[Ar]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (39.348,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1134.93,'J/mol'), sigma=(3.33,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,4.37967], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,4.37967], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ar""", comment="""Thermo library: primaryThermoLibrary"""),
)
transitionState(
label = 'TS1',
E0 = (231.142,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (411.268,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (241.446,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (326.098,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (647.951,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (690.501,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (357.378,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (612.497,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (745.252,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (529.351,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (239.231,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (239.426,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (239.426,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (804.775,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (741.74,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['[CH]C(=C)O[C]=O(17928)'],
products = ['O=C=O(1731)', 'C#C[CH2](17441)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission"""),
)
reaction(
label = 'reaction2',
reactants = ['[CH]C(=C)O[C]=O(17928)'],
products = ['[CH]C1([CH2])OC1=O(19601)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(3.33596e+11,'s^-1'), n=-0.0500183, Ea=(180.126,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_S_D;doublebond_intra_2H;radadd_intra] for rate rule [R4_S_D;doublebond_intra_2H;radadd_intra_CO]
Euclidian distance = 1.0
family: Intra_R_Add_Exocyclic
Ea raised from 178.6 to 180.1 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction3',
reactants = ['O=C=O(1731)', '[CH][C]=C(18825)'],
products = ['[CH]C(=C)O[C]=O(17928)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(23.3993,'m^3/(mol*s)'), n=2.021, Ea=(29.883,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Od_R;YJ] for rate rule [Od_Cdd-O2d;CJ]
Euclidian distance = 2.2360679775
Multiplied by reaction path degeneracy 2.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction4',
reactants = ['[CH]C(=[CH])OC=O(19602)'],
products = ['[CH]C(=C)O[C]=O(17928)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_singleH;XH_out] for rate rule [R4H_DSS;Cd_rad_out_singleH;CO_H_out]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction5',
reactants = ['[O][C]=O(2059)', '[CH][C]=C(18825)'],
products = ['[CH]C(=C)O[C]=O(17928)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(3.9578e+07,'m^3/(mol*s)'), n=-0.126319, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: R_Recombination
Ea raised from -15.6 to -15.6 kJ/mol.
Ea raised from -15.6 to 0 kJ/mol."""),
)
reaction(
label = 'reaction6',
reactants = ['H(8)', '[CH]C(=[CH])O[C]=O(19603)'],
products = ['[CH]C(=C)O[C]=O(17928)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(1.15742e+08,'m^3/(mol*s)'), n=0.0433333, Ea=(0.458029,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Cd_rad;H_rad]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: R_Recombination"""),
)
reaction(
label = 'reaction7',
reactants = ['[CH]C(=C)O[C]=O(17928)'],
products = ['[CH][C]1CC(=O)O1(19591)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(1.03419e+08,'s^-1'), n=1.06803, Ea=(126.236,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_S_D;doublebond_intra;radadd_intra] for rate rule [R4_S_D;doublebond_intra;radadd_intra_CO]
Euclidian distance = 1.0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction8',
reactants = ['[C-]#[O+](374)', '[CH]C(=C)[O](9170)'],
products = ['[CH]C(=C)O[C]=O(17928)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(3.41e+07,'cm^3/(mol*s)'), n=0, Ea=(12.552,'kJ/mol'), T0=(1,'K'), Tmin=(250,'K'), Tmax=(2500,'K'), comment="""Estimated using template [COm;O_sec_rad] for rate rule [COm;O_rad/OneDe]
Euclidian distance = 1.0
family: R_Addition_COm"""),
)
reaction(
label = 'reaction9',
reactants = ['[C]=O(1149)', '[CH]C(=C)[O](9170)'],
products = ['[CH]C(=C)O[C]=O(17928)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(43.5839,'m^3/(mol*s)'), n=1.88017, Ea=(5.1666,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [O_sec_rad;Birad] for rate rule [O_rad/OneDe;Birad]
Euclidian distance = 1.0
family: Birad_R_Recombination"""),
)
reaction(
label = 'reaction10',
reactants = ['CH2(T)(28)', 'C#CO[C]=O(5349)'],
products = ['[CH]C(=C)O[C]=O(17928)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(72.9469,'m^3/(mol*s)'), n=1.66457, Ea=(16.77,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Ct_Ct;YJ] for rate rule [Ct-O_Ct;CH2_triplet]
Euclidian distance = 2.2360679775
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction11',
reactants = ['[CH]C(=C)O[C]=O(17928)'],
products = ['O=[C]OC1=CC1(19604)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(7.38971e+10,'s^-1'), n=0.0476667, Ea=(8.08907,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Rn;C_rad_out_2H;Ypri_rad_out] for rate rule [R3_SD;C_rad_out_2H;CdsinglepriH_rad_out]
Euclidian distance = 2.82842712475
family: Birad_recombination"""),
)
reaction(
label = 'reaction12',
reactants = ['[CH]C(=C)O[C]=O(17928)'],
products = ['[CH]=C1CC(=O)O1(19605)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""Estimated using an average for rate rule [R4_SSS;Y_rad_out;Cpri_rad_out_2H]
Euclidian distance = 0
family: Birad_recombination"""),
)
reaction(
label = 'reaction13',
reactants = ['[CH]C(=C)O[C]=O(17928)'],
products = ['C=C1[CH]C(=O)O1(19606)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4;Y_rad_out;Ypri_rad_out] for rate rule [R4_SSD;Y_rad_out;CdsinglepriH_rad_out]
Euclidian distance = 2.2360679775
family: Birad_recombination"""),
)
reaction(
label = 'reaction14',
reactants = ['CH2(T)(28)', '[CH]=[C]O[C]=O(5351)'],
products = ['[CH]C(=C)O[C]=O(17928)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(1.14854e+06,'m^3/(mol*s)'), n=0.575199, Ea=(34.3157,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Birad]
Euclidian distance = 0
family: Birad_R_Recombination"""),
)
reaction(
label = 'reaction15',
reactants = ['H(8)', '[C]C(=C)O[C]=O(19607)'],
products = ['[CH]C(=C)O[C]=O(17928)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(1e+07,'m^3/(mol*s)'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [H_rad;Birad]
Euclidian distance = 0
family: Birad_R_Recombination"""),
)
network(
label = '4029',
isomers = [
'[CH]C(=C)O[C]=O(17928)',
],
reactants = [
('O=C=O(1731)', 'C#C[CH2](17441)'),
],
bathGas = {
'N2': 0.25,
'Ne': 0.25,
'He': 0.25,
'Ar': 0.25,
},
)
pressureDependence(
label = '4029',
Tmin = (1200,'K'),
Tmax = (1500,'K'),
Tcount = 10,
Tlist = ([1201.48,1213.22,1236.21,1269.31,1310.55,1356.92,1404.16,1447.02,1479.84,1497.7],'K'),
Pmin = (1,'atm'),
Pmax = (10,'atm'),
Pcount = 10,
Plist = ([1.02771,1.14872,1.41959,1.89986,2.67608,3.83649,5.40396,7.23219,8.93758,9.98989],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
|
[
"dinius.ab@husky.neu.edu"
] |
dinius.ab@husky.neu.edu
|
f0a4e25f5f04b19c48edef0b28dd0e78b0ea160e
|
c254fa15d9ac2a7e94bd1bba406a55f84db3a363
|
/ChaosClient/Tools/GM/depend/setuptools-20.1.1/setuptools/tests/test_test.py
|
a1d49ecff23651297772b20f46cd6a681ec5d5e9
|
[] |
no_license
|
atom-chen/ChaosGame
|
a1e7589ac084bba40f0d8cd937681a50c16a2f29
|
ec12208475435696e52c045ffd2f6bdfda45f275
|
refs/heads/master
| 2021-03-04T02:10:55.613286
| 2016-12-08T03:31:24
| 2016-12-08T03:31:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,465
|
py
|
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import os
import site
from distutils.errors import DistutilsError
import pytest
from setuptools.command.test import test
from setuptools.dist import Distribution
from .textwrap import DALS
from . import contexts
SETUP_PY = DALS("""
from setuptools import setup
setup(name='foo',
packages=['name', 'name.space', 'name.space.tests'],
namespace_packages=['name'],
test_suite='name.space.tests.test_suite',
)
""")
NS_INIT = DALS("""
# -*- coding: Latin-1 -*-
# Söme Arbiträry Ünicode to test Distribute Issüé 310
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
""")
TEST_PY = DALS("""
import unittest
class TestTest(unittest.TestCase):
def test_test(self):
print "Foo" # Should fail under Python 3 unless 2to3 is used
test_suite = unittest.makeSuite(TestTest)
""")
@pytest.fixture
def sample_test(tmpdir_cwd):
os.makedirs('name/space/tests')
# setup.py
with open('setup.py', 'wt') as f:
f.write(SETUP_PY)
# name/__init__.py
with open('name/__init__.py', 'wb') as f:
f.write(NS_INIT.encode('Latin-1'))
# name/space/__init__.py
with open('name/space/__init__.py', 'wt') as f:
f.write('#empty\n')
# name/space/tests/__init__.py
with open('name/space/tests/__init__.py', 'wt') as f:
f.write(TEST_PY)
@pytest.mark.skipif('hasattr(sys, "real_prefix")')
@pytest.mark.usefixtures('user_override')
@pytest.mark.usefixtures('sample_test')
class TestTestTest:
def test_test(self):
params = dict(
name='foo',
packages=['name', 'name.space', 'name.space.tests'],
namespace_packages=['name'],
test_suite='name.space.tests.test_suite',
use_2to3=True,
)
dist = Distribution(params)
dist.script_name = 'setup.py'
cmd = test(dist)
cmd.user = 1
cmd.ensure_finalized()
cmd.install_dir = site.USER_SITE
cmd.user = 1
with contexts.quiet():
# The test runner calls sys.exit
with contexts.suppress_exceptions(SystemExit):
cmd.run()
|
[
"evence_chen@live.com"
] |
evence_chen@live.com
|
906c192f7f18cafdb3c34e483501bbca940f8490
|
fb2cc597f319380d228fc15c4008760a82203687
|
/var/spack/repos/builtin/packages/r-scdblfinder/package.py
|
2dcd41c2578611979e0248ab7443c65d58f20a1a
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LGPL-2.1-only"
] |
permissive
|
JayjeetAtGithub/spack
|
c41b5debcbe139abb2eab626210505b7f930d637
|
6c2df00443a2cd092446c7d84431ae37e64e4296
|
refs/heads/develop
| 2023-03-21T02:35:58.391230
| 2022-10-08T22:57:45
| 2022-10-08T22:57:45
| 205,764,532
| 0
| 0
|
MIT
| 2019-09-02T02:44:48
| 2019-09-02T02:44:47
| null |
UTF-8
|
Python
| false
| false
| 1,967
|
py
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RScdblfinder(RPackage):
"""The scDblFinder package gathers various methods for the detection and
handling of doublets/multiplets in single-cell sequencing data (i.e.
multiple cells captured within the same droplet or reaction volume). It
includes methods formerly found in the scran package, the new fast and
comprehensive scDblFinder method, and a reimplementation of the Amulet
detection method for single-cell ATAC-seq."""
bioc = "scDblFinder"
version("1.10.0", commit="03512cad0cdfe3cddbef66ec5e330b53661eccfc")
depends_on("r@4.0:", type=("build", "run"))
depends_on("r-igraph", type=("build", "run"))
depends_on("r-matrix", type=("build", "run"))
depends_on("r-biocgenerics", type=("build", "run"))
depends_on("r-biocparallel", type=("build", "run"))
depends_on("r-biocneighbors", type=("build", "run"))
depends_on("r-biocsingular", type=("build", "run"))
depends_on("r-s4vectors", type=("build", "run"))
depends_on("r-summarizedexperiment", type=("build", "run"))
depends_on("r-singlecellexperiment", type=("build", "run"))
depends_on("r-scran", type=("build", "run"))
depends_on("r-scater", type=("build", "run"))
depends_on("r-scuttle", type=("build", "run"))
depends_on("r-bluster", type=("build", "run"))
depends_on("r-delayedarray", type=("build", "run"))
depends_on("r-xgboost", type=("build", "run"))
depends_on("r-mass", type=("build", "run"))
depends_on("r-iranges", type=("build", "run"))
depends_on("r-genomicranges", type=("build", "run"))
depends_on("r-genomeinfodb", type=("build", "run"))
depends_on("r-rsamtools", type=("build", "run"))
depends_on("r-rtracklayer", type=("build", "run"))
|
[
"noreply@github.com"
] |
JayjeetAtGithub.noreply@github.com
|
e0584cb81912975614de8c2f2686e676763337e5
|
4bd53a28fdd810d66b2742606728e2adc9e421e5
|
/opengl_render.py
|
7c313aafbe993cd4874c600dacbc511f0fef8d7b
|
[] |
no_license
|
jake32321/OpenGL_Python_Render
|
12fd9235d1f04f0728ae830a50a391122cc8e988
|
547cee39976aca1907e4ebca1058c656512be768
|
refs/heads/master
| 2021-01-10T10:35:42.755722
| 2015-11-07T03:13:25
| 2015-11-07T03:13:25
| 45,586,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,355
|
py
|
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
vertices = ( #Diffrent Nodes for vertices given in three dimensional coordinates
(1, -1, -1),
(1, 1, -1),
(-1, 1, -1),
(-1, -1, -1),
(1, -1, 1),
(1, 1, 1),
(-1, -1, 1),
(-1, 1, 1)
)
edges = ( #Nodes for the twelve edges of the cube to be rendered
(0, 1),
(0, 3),
(0, 4),
(2, 1),
(2, 3),
(2, 7),
(6, 3),
(6, 4),
(6, 7),
(5, 1),
(5, 4),
(5, 7)
)
surfaces = ( #Nodes for the various surfaces to be rendered
(0, 1, 2, 3),
(3, 2, 7, 6),
(6, 7, 5, 4),
(4, 5, 1, 0),
(1, 5, 7, 2),
(4, 0, 3, 6)
)
colors = ( #Sets a list of colors
(1, 0, 0),
(0, 1, 0),
(0, 0, 1),
(0, 0, 0),
(1, 1, 1),
(0, 1, 1),
(1, 0, 0),
(0, 1, 0),
(0, 0, 1),
(0, 0, 0),
(1, 1, 1),
(0, 1, 1),
)
def Cube():
glBegin(GL_QUADS) #Draws the surfaces on the line
for surface in surfaces:
x = 0
for vertex in surface:
x+=1
glColor3fv(colors[x])
glVertex3fv(vertices[vertex])
glEnd()
glBegin(GL_LINES) #Draws lines between the nodes
for edge in edges:
for vertex in edge:
glVertex3fv(vertices[vertex])
glEnd()
def main():
pygame.init()
display = (800, 600)
pygame.display.set_mode(display, DOUBLEBUF|OPENGL) #Makes working with your monitor's refresh rate more friendly using DOUBLEBUF
gluPerspective(45, (display[0]/display[1]), 0.1, 50.0) #Sets FOV, aspect ratio and clipping plane
glTranslatef(0.0, 0.0, -10) #Moves about the object
glRotatef(0, 0, 0, 0) # Rotates object
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygam.quit()
quit()
if event.type == pygame.KEYDOWN: #Moves object in 3D plane
if event.key == pygame.K_LEFT: #Translate left
glTranslatef(-0.1,0,0)
if event.key == pygame.K_RIGHT: #Translate right
glTranslatef(0.1,0,0)
if event.key == pygame.K_UP: #Translate up
glTranslatef(0,0.1,0)
if event.key == pygame.K_DOWN: #Translate down
glTranslatef(0,-0.1,0)
if event.key == pygame.K_w: #Four is the event for panning in
glTranslatef(0,0,-0.1)
if event.key == pygame.K_s: #Five is the event for panning out
glTranslatef(0,0,0.1)
#glRotatef(1, 3, 1, 1) #Rotates the cube
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT) #What needs to be cleared
Cube()
pygame.display.flip()
pygame.time.wait(10)
main()
|
[
"jake32321@yahoo.com"
] |
jake32321@yahoo.com
|
e6e26aecc3f4fb7442bb069ffb237b2bfda27146
|
4bcc9806152542ab43fc2cf47c499424f200896c
|
/tensorflow/python/autograph/operators/variables_test.py
|
59ff90120f0d71246433a4830682d9d8ded100d5
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
tensorflow/tensorflow
|
906276dbafcc70a941026aa5dc50425ef71ee282
|
a7f3934a67900720af3d3b15389551483bee50b8
|
refs/heads/master
| 2023-08-25T04:24:41.611870
| 2023-08-25T04:06:24
| 2023-08-25T04:14:08
| 45,717,250
| 208,740
| 109,943
|
Apache-2.0
| 2023-09-14T20:55:50
| 2015-11-07T01:19:20
|
C++
|
UTF-8
|
Python
| false
| false
| 1,864
|
py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for python_lang_utils module."""
from tensorflow.python.autograph.operators import variables
from tensorflow.python.platform import test
class SpecialValuesTest(test.TestCase):
def test_undefined(self):
undefined_symbol = variables.Undefined('name')
undefined_symbol2 = variables.Undefined('name')
self.assertEqual(undefined_symbol.symbol_name, 'name')
self.assertEqual(undefined_symbol2.symbol_name, 'name')
self.assertNotEqual(undefined_symbol, undefined_symbol2)
def test_undefined_operations(self):
undefined_symbol = variables.Undefined('name')
self.assertIsInstance(undefined_symbol.foo, variables.Undefined)
self.assertIsInstance(undefined_symbol[0], variables.Undefined)
self.assertNotIsInstance(undefined_symbol.__class__, variables.Undefined)
def test_read(self):
self.assertEqual(variables.ld(1), 1)
o = object()
self.assertEqual(variables.ld(o), o)
self.assertIsNone(variables.ld(None))
def test_read_undefined(self):
with self.assertRaisesRegex(UnboundLocalError, 'used before assignment'):
variables.ld(variables.Undefined('a'))
if __name__ == '__main__':
test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
ed818f77be185c01325b67e695e6679b9e3c3c1e
|
1f3f0dc8799dac1e7974b1b05211c2bb863db787
|
/junki/Chapter1/train-unigram.py
|
21c1f543c1bbf93d6530d0689905eed5bb87da60
|
[] |
no_license
|
m-note/100knock2015
|
665bb27bc84a0eacaa795523b5e65a5b64c426ac
|
84cd1d0617b0b5c15f64e593dd2e0ae21a4dcef7
|
refs/heads/master
| 2021-01-18T19:41:54.111994
| 2015-07-28T16:15:53
| 2015-07-28T16:15:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
#!/usr/bin/python
#-*-coding:utf-8-*-
import sys
my_dict = {}
total_count = 0
for line in open(sys.argv[1], "r"):
words = line.split(" ")
words.append("</s>")
for word in words:
my_dict[word] += 1
total_count += 1
for word, count in total_count:
probability = my_dict["words"] / total_count
print "%s %f" % (word, probability)
|
[
"junkimatsuo@Junki-no-MacBook-Air.local"
] |
junkimatsuo@Junki-no-MacBook-Air.local
|
92a24ce38c35186254b552e74be00a5c60872181
|
c7036e204574380adea3d8ea8c0e7a92a1dbc8e6
|
/utils/Utils/MyRangeList.py
|
c806d151804aa26bae34be76d01d101b0fc562d7
|
[] |
no_license
|
thegisexpert/someutilities
|
32a8b0dbd93f41584260652270fc88f52d205f28
|
18b4d1424fc5aa73ea4f6694f48c786b081bddf0
|
refs/heads/master
| 2020-12-11T13:28:17.873038
| 2020-01-14T14:56:16
| 2020-01-14T14:56:16
| 233,861,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,204
|
py
|
#https://docs.qgis.org/testing/pdf/en/QGIS-testing-PyQGISDeveloperCookbook-en.pdf p 35
from qgis.core import *
myVectorLayer = QgsVectorLayer(myVectorPath, myName, 'ogr')
myTargetField = 'target_field'
myRangeList = []
myOpacity = 1
# Make our first symbol and range...
myMin = 0.0
myMax = 50.0
myLabel = 'Group 1'
myColour = QtGui.QColor('#ffee00')
mySymbol1 = QgsSymbolV2.defaultSymbol(myVectorLayer.geometryType())
mySymbol1.setColor(myColour)
mySymbol1.setAlpha(myOpacity)
myRange1 = QgsRendererRangeV2(myMin, myMax, mySymbol1, myLabel)
myRangeList.append(myRange1)
#now make another symbol and range...
myMin = 50.1
myMax = 100
myLabel = 'Group 2'
myColour = QtGui.QColor('#00eeff')
mySymbol2 = QgsSymbolV2.defaultSymbol(
myVectorLayer.geometryType())
mySymbol2.setColor(myColour)
mySymbol2.setAlpha(myOpacity)
myRange2 = QgsRendererRangeV2(myMin, myMax, mySymbol2 myLabel)
myRangeList.append(myRange2)
myRenderer = QgsGraduatedSymbolRendererV2('', myRangeList)
myRenderer.setMode(QgsGraduatedSymbolRendererV2.EqualInterval)
myRenderer.setClassAttribute(myTargetField)
myVectorLayer.setRendererV2(myRenderer)
QgsMapLayerRegistry.instance().addMapLayer(myVectorLayer)
|
[
"noreply@github.com"
] |
thegisexpert.noreply@github.com
|
1c1c6ec77538e78dcf7f601fc87c8e36bbed9efb
|
4bccab04a435e4fe42e5a5fa8f053d18d7f9567c
|
/and/xgb_online.py
|
816c43cbf102405367de5079659f131bc411b433
|
[] |
no_license
|
thousandface/jdd_c
|
224ac90cbb8a819633a009196b35c4ae37c86d3d
|
cf28941075b6d303767384d7093c9005e2f86f3d
|
refs/heads/master
| 2021-05-11T01:51:30.644317
| 2018-01-21T14:21:30
| 2018-01-21T14:21:30
| 118,341,510
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
import xgboost
import numpy as np
import pandas as pd
train = pd.read_csv('../newdata/online_train_1126.csv')
test = pd.read_csv('../newdata/online_test_1126.csv')
orig = ['history_sum_mean', 'history_sum_median', 'history_sum_max', 'history_sum_min']
train = train.drop('dt',axis = 1)
test = test.drop('dt', axis = 1)
feature = [x for x in train.columns if x not in ['label','shop_id']]
#手动zscore
for col in train[feature].columns:
train[col] = (train[col] - train[col].mean()) / train[col].std(ddof=0)
for col in test[feature].columns:
test[col] = (test[col] - test[col].mean()) / test[col].std(ddof=0)
xgbTrain = xgboost.DMatrix(train[feature], label=train['label'])
xgbVal = xgboost.DMatrix(test[feature])
def fair_obj(preds, dtrain):
"""y = c * abs(x) - c * np.log(abs(abs(x) + c))"""
x = preds - dtrain.get_label()
c = 100000
den = abs(x) + c
grad = (c * x) / den
hess = (c * c) / (den * den)
return grad, hess
param = {}
param['eta'] = 0.01
param['max_depth'] = 2
param['mmin_child_weight'] = 4
param['subsample'] = 0.8
param['colsample_bytree'] = 0.3
param['silent'] = 1
num_round = 123
modle = xgboost.train(param, xgbTrain, num_round,obj=fair_obj)
preds = modle.predict(xgbVal)
preds = pd.DataFrame(preds)
result = pd.concat([test['shop_id'], preds], axis=1)
result.to_csv('../newdata/result1126_6.csv', index=False,header=None)
|
[
"thousandface@yahoo.com"
] |
thousandface@yahoo.com
|
8005abbe7fa52a78761f3f422c67487cd25bbc66
|
80216241654e9ecc368aee1c7465622753b053b5
|
/demo_运动传感器.py
|
a993e6819ce482ff0dd8e36aa6a838fc2e7f379c
|
[] |
no_license
|
tanglang1990/blog
|
e5b76b918e61ee62d16093703410fd58f668033f
|
4cab27ecefcf0d72988c3f20e0c209fbf101be2f
|
refs/heads/master
| 2020-06-23T09:33:51.478728
| 2019-10-31T06:11:07
| 2019-10-31T06:11:07
| 170,971,128
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,477
|
py
|
import datetime
import cv2 # pip install opencv-python
import numpy as np
import easygui
from wx_notice import send_msg
camera = cv2.VideoCapture(0) # cv2获取摄像头,0代表默认的摄像头,传给camera这个变量
if not camera.isOpened():
easygui.msgbox('please turn on you camera or switch to functional one')
'''
取得一个结构化的元素,用来做形态学膨胀, MORPH_ELLIPSE椭圆
'''
es = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 4))
# kernel = np.ones((5, 5), np.uint8) # 数组,用来处理
background = None # 背景指定为无,在循环中获取第一帧
has_sended_msg = False # 短信发送,只发送一次
while True: # 开启一个while循环,一直运行
has_some_one_in = False # 标注检测状态
grabbed, frame_lwpCV = camera.read() # 从camera中拿取当前帧
'''
转换成灰度图像(单通道)
熟悉图像的同学都知道,一般的图像通常都是3个通道,rgb,三个颜色之间没有必然的联系,
然后3个通道的图像,进行处理的时候,计算量会比较大,而且代码也会更复杂,这这两方面是不如单通道的。
'''
# print(frame_lwpCV)
gray_lwpCV = cv2.cvtColor(frame_lwpCV, cv2.COLOR_BGR2GRAY) # 转换成灰度图像(单通道)
# print(gray_lwpCV)
'''
差分法,比较不同,噪声也是不同,所以我们用高斯模糊去掉噪声,
其实我们都是到高斯模糊就是一种低通滤波器,可以额消除图像里面高频部分,就是噪点的部分
高斯滤波,其实就是消除噪点,即图像里面高频的部分
传入灰度图像,
高斯分布的 高斯和(25, 25)
高斯分布的 sigma码是3
高斯图像是有一个分布的,正态分布大家应该有听过说吧,sigma越大,中间部分就约尖
'''
gray_lwpCV = cv2.GaussianBlur(gray_lwpCV, (25, 25), 3) # 高斯滤波,消除噪点
'''
使用第一帧作为背景图像,查分法,摄像头开启那一瞬间,博物馆。。
'''
# 使用摄像头获取的第一帧作为背景图像
if background is None:
background = gray_lwpCV
continue
'''
absolutediff
一个数字图像是有很多像素
每个像素都是有数值的
那么我们这个方法其实就是把每个像素的数值,求一个差值,然后求一个绝对值
背景帧和当前帧进行比较
'''
# 应用差分法
diff = cv2.absdiff(background, gray_lwpCV)
'''
取得threshold
设定预值
就是我们说差多少算一样,差多少算不一样是吧,超过50算不一样
255 白色 显示白色的阈值
'''
diff = cv2.threshold(diff, 50, 255, cv2.THRESH_BINARY)[1]
'''
差分已经完成
形态学膨胀
给大家解释一下什么是形态学膨胀
我这里移动,大家看到我们手指,下面白色的部分,实际上比我的手指要大是吧
有时候我们的差异很小,可以通过形态学膨胀优化这种小的差值,使之整体的连贯起来
'''
diff = cv2.dilate(diff, es, iterations=3)
'''
Contours(不连续的物体)
接下来是学opencv都要掌握的一个方法
findContours 发现图像当中有多少个连续的物体
RETR_EXTERNAL # 外部轮廓
CHAIN_APPROX_SIMPLE # 连续的
'''
# 使用findContours查找图像中所有连续的物体
image, contours, hierarchy = cv2.findContours(diff.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
if cv2.contourArea(c) < 8000: # 连续的物体太小了就不显示,比如我扔一支笔就不显示了,但我的头太大就显示
continue
(x, y, w, h) = cv2.boundingRect(c) # contours的坐标(左上角)与宽和高
# 在图像上画框 左上角 右下角, 颜色, 框的字体大小
cv2.rectangle(frame_lwpCV, (x, y), (x + w, y + h), (0, 255, 0), 1)
has_some_one_in = True
# 至此程序主体已经完成
# 接下来就是通知
if has_some_one_in and not has_sended_msg:
send_msg()
has_sended_msg = True
cv2.imshow('contours', frame_lwpCV)
cv2.imshow('dis', diff)
key = cv2.waitKey(1) & 0xFFf
if key == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
tanglang1990.noreply@github.com
|
10348f2b2d3cb371fbf0cc7dfddd63b0dbc6ae3d
|
697e75455b17370abfe1c1af577dba5e6cea174d
|
/backend/test_sketch_15062/urls.py
|
1f80daaca68da0867f5ed4d6062700ac36b9b594
|
[] |
no_license
|
crowdbotics-apps/test-sketch-15062
|
5eda58eb4ab04a3e75d5428c8d58bede86e69043
|
1c6331195bbe17d76218a52e184a9c6c5088e245
|
refs/heads/master
| 2023-01-30T21:38:38.475884
| 2020-03-25T16:41:50
| 2020-03-25T16:41:50
| 250,034,521
| 0
| 0
| null | 2023-01-26T16:17:52
| 2020-03-25T16:41:01
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,932
|
py
|
"""test_sketch_15062 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Test Sketch"
admin.site.site_title = "Test Sketch Admin Portal"
admin.site.index_title = "Test Sketch Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="Test Sketch API",
default_version="v1",
description="API documentation for Test Sketch App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
2c13944446f7380430dd943b1632885da99a8784
|
e9b923f661c770c56136f59dbed786cd46924664
|
/crafted-pkt/tls-handshake-fragments.py
|
ad35dfeabc6e647ef920167cd5123efdb5bfc12b
|
[] |
no_license
|
Lekensteyn/wireshark-notes
|
47852566358e27778be83980a6d4219fe063e036
|
278ca860093c3a1c5c9d9171e8c2532f94b20902
|
refs/heads/master
| 2023-03-15T22:50:24.131652
| 2023-03-11T15:20:24
| 2023-03-11T15:26:29
| 227,219,034
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,553
|
py
|
#!/usr/bin/env python3
import argparse
import random
from scapy.all import *
# msg_type: 1 (Client Hello)
# length: 47
# client_version: 1.2
# random: 32 bytes
# session_id: empty
# cipher_suite[1]: 0x002F (TLS_RSA_WITH_AES_128_CBC_SHA)
# compression_method[1]: null
# extensions[1]: 0xAAAA (GREASE) with two values (will be used as identifier)
clientHelloMsg = bytes([
0x01,
0x00, 0x00, 0x31,
0x03, 0x03,
]) + 32 * b'3' + bytes([
0x00,
0x00, 0x02, 0x00, 0x2f,
0x01, 0x00,
0x00, 0x06, 0xaa, 0xaa, 0x00, 0x02,
0x00, 0x00
])
assert len(clientHelloMsg) == 53
clientHelloMsgBase = clientHelloMsg[:-2]
def CH(num : int):
'''Returns a Client Hello message with some identifier.'''
return clientHelloMsgBase + num.to_bytes(2, 'big')
def TLSRecord(data):
# Handshake (22), TLSv1.0
return b'\x16\x03\x01' + len(data).to_bytes(2, 'big') + data
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int)
parser.add_argument('--count', type=int, default=256, help='Streams count')
parser.add_argument('output_file')
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
# Pick a number of messages per stream such that at least the case is triggered
# where a record contains the end of a message, a full message and the start of
# another message. A lot more than three per record will likely not be useful
# since it does not trigger reassembly.
hsPerStream = 10
maxRecordSize = len(clientHelloMsg) * 4
packets = []
for i in range(args.count):
hs = b''.join(CH(hsPerStream * i + j + 1) for j in range(hsPerStream))
seq = 0x1000
records = b''
# Fragment handshake message over TLS records.
while hs:
# Does not matter that n > maxRecordSize, it is capped anyway.
n = random.randint(1, maxRecordSize)
recordData, hs = hs[:n], hs[n:]
records += TLSRecord(recordData)
# Fragment TLS records over TCP segments.
while records:
n = random.randint(1, maxRecordSize)
seg, records = records[:n], records[n:]
pkt = IP()/TCP(flags='A', seq=seq, sport=0xc000 + i, dport=443)/seg
packets.append(pkt)
seq += len(seg)
wrpcap(args.output_file, packets)
r"""
Test:
tshark -r hs-frag.pcapng -Tfields -Y tls.handshake.extension.data -e tls.handshake.extension.data | tr , '\n'
Expected result: for a given 'count' streams, expect hexadecimal numbers 0001 up
to and including 10*count. E.g. for --count=10 the output should match:
printf '%004x\n' {1..100}
"""
|
[
"peter@lekensteyn.nl"
] |
peter@lekensteyn.nl
|
32b75511f5f67bc0830183000def055d8e2a9796
|
4a71d8deae938c4b0b551419d3604e3e3f71b347
|
/Heroes.py
|
613b857820237861a9d2d2b74881b725a3f42776
|
[] |
no_license
|
zankrus/rpg2.0
|
041ff57822adbc005815a1f14a31b3c05bbaf51f
|
3449a5d3af68edcb374c9fa080b3e415bec9dda6
|
refs/heads/master
| 2022-11-15T16:27:53.544870
| 2020-07-01T12:35:22
| 2020-07-01T12:35:22
| 274,862,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,039
|
py
|
from abc import ABC, abstractmethod
import random
import Items
class Hero(ABC):
"""Abstract hero class"""
name = ''
hp = 30
weapons_list = {'sword': 10}
summary_arrows = 2
monster_dead = 0
save_hp = None
save_weapons_list = None
save_summary_arrows = 0
save_monster_dead = 0
game_saved = False
def take_weapon(self, weapon):
"""take a new weapom"""
while True:
choice = input('Введите 1 ,чтобы поднять меч '
'или 2, чтобы пройти мимо : ')
if choice == '1':
new_weapons_list = self.weapons_list
new_weapons_list[weapon[0]] = weapon[1]
self.weapons_list = new_weapons_list
break
elif choice == '2':
print('Вы прошли мимо')
break
else:
print('Вы ввели неверное значение. Введите 1 или 2')
def heal(self, healing_points: int):
"""restore some hp"""
self.hp = self.hp + healing_points
def hero_attack(self, weapon_list=weapons_list):
temp = {}
for i in range(len(weapon_list)):
print('Нажмите {0}, чтобы выбрать {1}'.format(i + 1, list(weapon_list)[i]))
temp[i + 1] = list(weapon_list)[i]
choice = input('Введите номер оружия: ')
while True:
if 0 < int(choice) < len(temp) + 1:
if temp[int(choice)] == 'bow':
if self.summary_arrows > 0:
self.summary_arrows -= 1
print('Вы выбрали {}'.format(temp[int(choice)]))
print('У вас осталось стрел {}'.format(self.summary_arrows))
print(weapon_list[temp[int(choice)]])
return weapon_list[temp[int(choice)]]
else:
print('У вас нет стрел')
choice = input('Введите номер оружия: ')
continue
print('Вы выбрали {}'.format(temp[int(choice)]))
print(weapon_list[temp[int(choice)]])
return weapon_list[temp[int(choice)]]
else:
choice = input('Введите номер оружия: ')
def save_game(self):
self.game_saved = True
self.save_hp = self.hp
self.save_monster_dead = self.monster_dead
self.save_weapons_list = self.weapons_list
self.save_summary_arrows = self.summary_arrows
def load_game(self):
self.hp = self.save_hp
self.monster_dead = self.save_monster_dead
self.weapons_list = self.save_weapons_list
self.summary_arrows = self.save_summary_arrows
@abstractmethod
def special_class_skill(self, enemy_attack):
luck = random.choice([True, False])
pass
class Warrior(Hero):
"""Warrior class"""
def special_class_skill(self, enemy_attack: tuple):
if enemy_attack[1] == 'melee':
luck = random.choice([True, False])
if luck == True:
print('Вы заблокировали атаку ближнего боя')
return 0
else:
print('Вы не смогли заблокировать атаку')
return enemy_attack[0]
else:
print('Особая способность неактивная против данного противника')
return enemy_attack[0]
class Archer(Hero):
"""Archer class"""
def special_class_skill(self, enemy_attack: tuple):
if enemy_attack[1] == 'range':
luck = random.choice([True, False])
if luck == True:
print('Вы заблокировали атаку дальнего боя')
return 0
else:
print('Вы не смогли заблокировать атаку')
return enemy_attack[0]
else:
print('Особая способность неактивная против данного противника')
return enemy_attack[0]
pass
class Mage(Hero):
"""mage class"""
def special_class_skill(self, enemy_attack: tuple):
if enemy_attack[1] == 'spell':
luck = random.choice([True, False])
if luck == True:
print('Вы заблокировали магическую атаку')
return 0
else:
print('Вы не смогли заблокировать атаку')
return enemy_attack[0]
else:
print('Особая способность неактивная против данного противника')
return enemy_attack[0]
|
[
"sealthepirate@gmail.com"
] |
sealthepirate@gmail.com
|
90784936f134554fe7973fa9a4916265586e31d6
|
a2dd34c194d53af9aeba7ffc43054a4443fa3485
|
/style_stripper/model/content_panel.py
|
3c3c70fca07dbeb70ab3c76a3665ade81aacad83
|
[] |
no_license
|
gre7g/style-stripper
|
c8ba544e9d43df1bdabd0fafe97312533860300c
|
07c76a22e96d1b34f1c5aa135f94121ce5613814
|
refs/heads/master
| 2022-11-16T19:16:23.652160
| 2022-11-15T01:07:59
| 2022-11-15T01:07:59
| 206,849,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
import wx
from style_stripper.data.enums import PanelType
try:
from style_stripper.model.main_app import StyleStripperApp
except ImportError:
StyleStripperApp = None
class ContentPanel(wx.Panel):
app: StyleStripperApp
PANEL_TYPE: PanelType
def __init__(self, *args, **kwargs):
super(ContentPanel, self).__init__(*args, **kwargs)
self.app = wx.GetApp()
def is_current_panel(self) -> bool:
return self.PANEL_TYPE == self.app.book.current_panel
def refresh_contents(self):
"""Move contents from Book to UI"""
self.Show(self.is_current_panel())
def apply(self):
pass
def grab_contents(self):
"""Move contents from UI to Book"""
pass
def new_dimensions(self):
"""Adapt to the current window size"""
pass
def book_loaded(self, is_loaded: bool = True):
pass
|
[
"greg.luterman@synapsewireless.com"
] |
greg.luterman@synapsewireless.com
|
558e2450afbfe1fed19b1389fbb7a74f64d381df
|
5cc70f94f73be01bcbe7275ed21b4c087e60c684
|
/example/views/application_user_messages.py
|
344436e22c8d2b744d77b519a6d14e1415c8af06
|
[] |
no_license
|
lloubiere/pyramid_rest
|
dfced8b37893f0072e6dfb8550291c5973656ad3
|
fcabb94d9a79580de11163715af8ba35ca937c38
|
refs/heads/master
| 2021-01-17T22:04:22.759942
| 2014-04-16T18:18:09
| 2014-04-16T18:18:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
# -*- coding: utf-8 -*-
from pyramid_rest.mongo import CollectionView
from example.model import Message
class ApplicationUserMessagesView(CollectionView):
model_class = Message
|
[
"hadrien@ectobal.com"
] |
hadrien@ectobal.com
|
94bad560eac184705536a6ca53035cc708e72f98
|
ab835f1ca110389079c81fff71bd073dcf303c1d
|
/abstract_factory.py
|
99a7e74dda94ea8e5ead571e51354c4f48712c32
|
[] |
no_license
|
mndimitrov92/Python_Scripts
|
333d375c6316af60277b4be81a2a94b1eb20144a
|
3ea7397c81f26d694c564148ee9660203efaf156
|
refs/heads/master
| 2023-05-10T17:32:09.827488
| 2023-04-29T15:26:39
| 2023-04-29T15:26:39
| 137,658,192
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,479
|
py
|
import abc
import urllib2
from BeautifulSoup import BeautifulStoneSoup
class AbstractFactory(object):
""" Abstract factory interface which will provide methods to implement in its subclasses """
__metaclass__ = abc.ABCMeta
def __init__(self, is_secure):
""" if is_secure is True, factory tries to make connection secure, otherwise not. """
self.is_secure = is_secure
@abc.abstractmethod
def create_protocol(self):
pass
@abc.abstractmethod
def create_port(self):
pass
@abc.abstractmethod
def create_parser(self):
pass
#HTTP Factory class
class HTTPFactory(AbstractFactory):
""" Concrete factory for building http connections """
def create_protocol(self):
if self.is_secure:
return "https"
return "http"
def create_port(self):
if self.is_secure:
return HTTPSecurePort()
return HTTPPort()
def create_parser(self):
return HTTPParser()
# FTP Factory class
class FTPFactory(AbstractFactory):
""" Concrete factory for building FTP connections """
def create_protocol(self):
return "ftp"
def create_port(self):
return FTPPort()
def create_parser(self):
return FTPParser()
class Port(object):
__metaclass__ = abc.ABCMeta
""" An abstract product which reporesents the port to connect. """
@abc.abstractmethod
def __str__(self):
pass
class HTTPPort(Port):
""" A concrete class which reporesents the http port """
def __str__(self):
return '80'
class HTTPSecurePort(Port):
""" A concrete class which represents the https port """
def __str__(self):
return '443'
class FTPPort(Port):
""" A concrete class which represents the ftp port """
def __str__(self):
return '21'
class Parser(object):
""" An abstract product ,reporesents parser to parse web content """
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __call__(self, content):
pass
class HTTPParser(Parser):
def __call__(self, content):
filenames = []
soup = BeautifulStoneSoup(content)
links = soup.table.findAll('a')
for link in links:
filenames.append(link)
return '\n'.join(filenames)
class FTPParser(Parser):
def __call__(self, content):
lines = content.split('\n')
filenames = []
for line in lines:
split_line = line.split(None, 8)
if len(split_line) == 9:
filenames.append(split_line[-1])
return "\n".join(filenames)
class Connector(object):
def __init__(self, factory):
self.protocol = factory.create_protocol()
self.port = factory.create_port()
self.parse = factory.create_parser()
def read(self, host, path):
url = self.protocol + "://" + host + ":" + str(self.port) + path
print "Connecting to: ", url
return urllib2.urlopen(url, timeout=2).read()
@abc.abstractmethod
def parse(self):
pass
if __name__ == "__main__":
domain = "ftp.freebsd.org"
path = "/pub/FreeBSD"
protocol = input("Connecting to {}. Which protocol to use? (0 - http; 1 - ftp):".format(domain))
if protocol == 0:
is_secure = bool(input("Use secure connection? (1- Yes; 0 - No)"))
factory = HTTPFactory(is_secure)
elif protocol == 1:
is_secure = False
factory = FTPFactory(is_secure)
else:
print "Sorry, could not connect"
connector = Connector(factory)
try:
content = connector.read(domain, path)
except urllib2.URLError,e:
print "Cannot access resource"
else:
print connector.parse(content)
|
[
"=marin.dimitrov92@gmail.com"
] |
=marin.dimitrov92@gmail.com
|
0ddeecabdafba350249119eb97782aa41fba8852
|
94743a85befdf16892cd28f771cd96373ddc995f
|
/GRPH.py
|
fa5d40c3dc735e24e129d3324b265d03f0966ec0
|
[] |
no_license
|
doelling/Rosalind
|
caa0201d792bd2aa06d76d53916e75858a3ca67a
|
ddefdab630fa9712cb7a69b540c2c4921d34f5d9
|
refs/heads/master
| 2020-06-03T13:46:12.055127
| 2019-07-03T16:33:25
| 2019-07-03T16:33:25
| 191,591,438
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
from Bio import SeqIO
import itertools
def overlapThree(strA, strB):
return strA[-3:] == strB[:3]
def main():
dnaStrands = {str(i.id): str(i.seq) for i in SeqIO.parse('grph.fna', 'fasta')}
for j, k in itertools.combinations(dnaStrands.keys(), 2):
jStrand, kStrand = dnaStrands[j], dnaStrands[k]
if jStrand[-3:] == kStrand[:3]:
print(j + ' ' + k)
if kStrand[-3:] == jStrand[:3]:
print(k + ' ' + j)
main()
|
[
"45838137+doelling@users.noreply.github.com"
] |
45838137+doelling@users.noreply.github.com
|
766bf2bbcc44602bb399c557f76f7d7532a2f02b
|
4d558925161df4a4ccf2b148a66d709baf32c46c
|
/Crypto/triple_prime/secret.py
|
bdb65aa8d65bf6b7d0adffd5b2f9c25494215f84
|
[] |
no_license
|
scist-tw/109-mini-CTF
|
41dc95ae4b403feb111425f1eb352125ae5f8c98
|
ffb6d071d1e3a2d39d2bf63b5819e24415a5672a
|
refs/heads/main
| 2023-02-22T13:16:33.827354
| 2021-01-27T15:34:34
| 2021-01-27T15:34:34
| 325,246,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 40
|
py
|
flag = b'SCIST{twin_prime==vykp_rtkog}'
|
[
"tsguan11@gmail.com"
] |
tsguan11@gmail.com
|
61328a76583eb2214a98f089697310c1ec6b10b2
|
b2acf2dab56a42c22304b0e236b154d453175d6d
|
/tests/BackTests.py
|
683ad5ee4a1bfa7b8deda39e87690f728d898f5c
|
[
"MIT"
] |
permissive
|
bitsalt/bitbot
|
8cd14da5015598e18e65d41843ab7ecb7d3347da
|
24dcc037fcc2778929cb1091f2c87f38359e50c1
|
refs/heads/main
| 2023-03-29T22:07:01.746273
| 2021-04-13T20:42:25
| 2021-04-13T20:42:25
| 357,523,571
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,748
|
py
|
import unittest
import configparser
from oanda_backtest import Backtest
class UnitTests(unittest.TestCase):
def test_canBacktestOanda(self):
config = configparser.ConfigParser()
config.read('../oanda.cfg')
# api = opy.API(
# environment='practice',
# access_token=config['oanda']['access_token']
bt = Backtest(
access_token=config['oanda']['access_token'],
environment='practice'
)
params = {
"granularity": "H1", # 1 hour candlesticks (default=S5)
"count": 5000 # 5000 candlesticks (default=500, maximum=5000)
}
bt.candles('USD_CAD', params)
fast_ma = bt.ema(period=12)
slow_ma = bt.ema(period=30)
exit_ma = bt.ema(period=5)
# bt.candles('EUR_USD', params)
# fast_ma = bt.ema(period=9)
# slow_ma = bt.ema(period=30)
# exit_ma = bt.ema(period=5)
bt.buy_entry = (fast_ma > slow_ma) & (fast_ma.shift() <= slow_ma.shift())
bt.sell_entry = (fast_ma < slow_ma) & (fast_ma.shift() >= slow_ma.shift())
bt.buy_exit = (bt.C < exit_ma) & (bt.C.shift() >= exit_ma.shift())
bt.sell_exit = (bt.C > exit_ma) & (bt.C.shift() <= exit_ma.shift())
bt.initial_deposit = 1000 # default=0
bt.units = 18000 # currency unit (default=10000)
bt.stop_loss = 30 # stop loss pips (default=0)
bt.take_profit = 80
print(bt.run())
bt.plot()
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
'''
Best runs...
bt.candles('USD_CAD', params)
fast_ma = bt.ema(period=12)
slow_ma = bt.ema(period=30)
exit_ma = bt.ema(period=5)
'''
|
[
"jeff@bitsalt.com"
] |
jeff@bitsalt.com
|
53f61304c2c224d35c58274d8e1443ff13f481ef
|
8588cff59e11fd3d39b59f1666d2aa72d92c1784
|
/demo/previews.py
|
279e7b0bbf32245b2501d88fb65035488af484be
|
[] |
no_license
|
amygwong/csc59866
|
54f566d46a0f6bbd12d36602aaf6a1856d612d12
|
17bde501b9da11764712ffdcf2c30bd3dcf361ff
|
refs/heads/master
| 2021-01-17T18:35:30.341289
| 2017-05-21T03:59:47
| 2017-05-21T03:59:47
| 69,483,710
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,740
|
py
|
from os import system, path
from speech_to_text import getUserInput
import subprocess
# opens a file in Prview with name
def findOpenImage(name):
# get all paths of files with value in its name
fpath = subprocess.check_output("mdfind -name " + name, shell=True)
fpath = fpath.decode("utf-8")
flist = fpath.split()
# no files found
if len(flist) == 0:
return -1
dist = len(path.splitext(path.basename(flist[0]))[0])
curfp = flist[0]
for fp in flist:
fpName = len(path.splitext(path.basename(fp))[0])
# open file with the exact name
if fpName == len(name):
findcmd = "open -a Preview " + fp
system(findcmd)
return 1
# get the path of file that has the closest match with name
# file with smallest name length
else:
if fpName < dist:
dis = fpName
curfp = fp
findcmd = "open -a Preview " + curfp
system(findcmd)
return 1
# calls this when user asks to open an image
def openImage():
found = -1
# get name of image
inp = getUserInput("What is the name of the image")
while inp == "" or inp == -1 or found == -1:
inp = getUserInput("Try Again")
if inp != "" and inp != -1:
found = findOpenImage(inp)
if inp == "quit" or found == 1:
break
def openFile():
found = -1
# get name of image
inp = getUserInput("What is the name of the file")
while inp == "" or inp == -1 or found == -1:
inp = getUserInput("Try Again")
if inp != "" and inp != -1:
found = findOpenImage(inp)
if inp == "quit" or found == 1:
break
#openImage()
|
[
"amygwlife123@gmail.com"
] |
amygwlife123@gmail.com
|
a376fdc691415fb9aa423aecba5b5ec02d31694e
|
72de9fe0f0252f965a7d791a7dbda245b1a48bbc
|
/webserver.py
|
a481d46757321ee7cc76273ce69f97f3e258028e
|
[] |
no_license
|
tianhao0211/web-server
|
3b6dca57255744099176bf8fee7dd743935240d7
|
bec92c0b5c288cbffa5788db5a9c7799bff3196d
|
refs/heads/main
| 2023-03-18T09:48:11.346539
| 2021-03-05T21:27:15
| 2021-03-05T21:27:15
| 338,962,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,904
|
py
|
import os
from os import curdir
from os.path import join as pjoin
import json
from datetime import datetime
#from http.server import BaseHTTPRequestHandler, HTTPServer
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
PORT = 8080
FILE = 'store.json'
# initialize the storage file if file does not exist yet (this will create the storage file when 1st run)
if os.path.exists(FILE) == 0:
with open(FILE, 'w') as f:
f.write('{}')
class StoreHandler(BaseHTTPRequestHandler):
store_path = pjoin(curdir, FILE)
# define the GET action
def do_GET(self):
if self.path.find("/conversations/") != -1:
# get last variable, which is the key - conversation_id
key = self.path.split("/conversations/")[-1]
# open the store.json file and read content under the key (conversation_id)
with open(self.store_path, 'r') as fh:
data = json.loads(fh.read())
result = data.get(key, [])
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
# pretty printing the output
self.wfile.write(json.dumps({'id':key,'messages':result},sort_keys=True, indent=4).encode())
else:
# return 404 error if the curl url is invalid
self.send_response(404)
# define the POST action
def do_POST(self):
# with or without the forward slash, both situations are considered to be valid
if self.path == '/messages' or self.path == '/messages/':
length = self.headers['content-length']
data = json.loads(self.rfile.read(int(length)))
# add the created time in the POST data, use specific format as instructed
time = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]+'Z'
data['created'] = time
# write the new data into sotrage file, with checking if the conversation_id exists in chat history
key = data["conversation_id"]
del data["conversation_id"]
with open(self.store_path, 'r') as fh:
db = json.loads(fh.read())
with open(self.store_path, 'w') as fh:
# if conversation_id exist, inject the data under the same JSON object
if key not in db:
db[key] = [data]
# if not, append the data as a new JSON object
else:
db[key].append(data)
fh.write(json.dumps(db)+'\n')
self.send_response(200)
else:
# return 404 error if the curl url is invalid
self.send_response(404)
server = HTTPServer(('', PORT), StoreHandler)
print 'Starting httpd on port: ', PORT
print 'Use curl command to test GET and POST actions'
server.serve_forever()
|
[
"noreply@github.com"
] |
tianhao0211.noreply@github.com
|
c1b177946e9f4e9be47868c4aa13bc7bb8632722
|
d032e376d1632cad38c767a866f8d75817f40231
|
/exercises/exc_02_04.py
|
001a09ca5663189e021e82ee5965d6edbdae1b12
|
[
"MIT"
] |
permissive
|
ali4413/MCL-DSCI-511-programming-in-python
|
2918d10195ede2432bd452657ff4a1c457da23a6
|
723422f3b57ad4290e82b1b07d33c4f2e2ec3ea2
|
refs/heads/master
| 2022-06-08T02:09:41.134527
| 2020-05-08T19:08:18
| 2020-05-08T19:08:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
import pandas as pd
# Read in the data from the text file using the full pathway
# Use the pokemon name as the index
____ = ____(____,
____,
____)
# Display the first 10 rows
____
|
[
"hayleyfboyce@hotmail.com"
] |
hayleyfboyce@hotmail.com
|
61deaaae3c82727d6ab5332fabcdd07c9532859b
|
b953a0f99042c43c07332b705bef86bf4769a32b
|
/polls/migrations/0005_auto_20151122_1023.py
|
147232772fb8dddc3dc197b5d41852b6e4084193
|
[] |
no_license
|
eshantewari/MBHS_SGA
|
a108d5d9d9f98b3feb14b03683b3eb69b61d37c1
|
38bc7eb38d61d8c13bd97f64f988d118519c923a
|
refs/heads/master
| 2020-05-30T20:17:55.724918
| 2016-08-14T01:20:48
| 2016-08-14T01:20:48
| 42,009,100
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0004_auto_20151108_1939'),
]
operations = [
migrations.AlterField(
model_name='candidate',
name='image',
field=models.ImageField(upload_to='candidate_pics', blank=True, null=True),
),
]
|
[
"eshan.tewari@gmail.com"
] |
eshan.tewari@gmail.com
|
e6d325ff856ba2b28e77783b99918a918a6ab823
|
7cfe8583ae7819799b18b88566eeff3e8cb4ba9e
|
/5_1_BuildingARecurrentNeuralNetwork-tf/data_utils.py
|
e4a28219d4df1a887d4923bd016fac7893b55fe4
|
[] |
no_license
|
IHNF262/DeepLearningPractice
|
5cb33e73625c947661314be86d9ae810605c2540
|
80ea499d24cffcf0ed3e16d73995d274594b4609
|
refs/heads/main
| 2023-06-25T01:22:07.042995
| 2021-07-23T09:30:45
| 2021-07-23T09:30:45
| 376,475,455
| 0
| 0
| null | 2021-06-13T08:24:56
| 2021-06-13T07:59:12
| null |
UTF-8
|
Python
| false
| false
| 6,107
|
py
|
from music_utils import *
from preprocess import *
from keras.utils import to_categorical
chords, abstract_grammars = get_musical_data('../data/original_metheny.mid')
corpus, tones, tones_indices, indices_tones = get_corpus_data(abstract_grammars)
N_tones = len(set(corpus))
n_a = 64
x_initializer = np.zeros((1, 1, 78))
a_initializer = np.zeros((1, n_a))
c_initializer = np.zeros((1, n_a))
def load_music_utils():
chords, abstract_grammars = get_musical_data('../data/original_metheny.mid')
corpus, tones, tones_indices, indices_tones = get_corpus_data(abstract_grammars)
N_tones = len(set(corpus))
X, Y, N_tones = data_processing(corpus, tones_indices, 60, 30)
return (X, Y, N_tones, indices_tones)
def generate_music(inference_model, corpus = corpus, abstract_grammars = abstract_grammars, tones = tones, tones_indices = tones_indices, indices_tones = indices_tones, T_y = 10, max_tries = 1000, diversity = 0.5):
"""
Generates music using a model trained to learn musical patterns of a jazz soloist. Creates an audio stream
to save the music and play it.
Arguments:
model -- Keras model Instance, output of djmodel()
corpus -- musical corpus, list of 193 tones as strings (ex: 'C,0.333,<P1,d-5>')
abstract_grammars -- list of grammars, on element can be: 'S,0.250,<m2,P-4> C,0.250,<P4,m-2> A,0.250,<P4,m-2>'
tones -- set of unique tones, ex: 'A,0.250,<M2,d-4>' is one element of the set.
tones_indices -- a python dictionary mapping unique tone (ex: A,0.250,< m2,P-4 >) into their corresponding indices (0-77)
indices_tones -- a python dictionary mapping indices (0-77) into their corresponding unique tone (ex: A,0.250,< m2,P-4 >)
Tx -- integer, number of time-steps used at training time
temperature -- scalar value, defines how conservative/creative the model is when generating music
Returns:
predicted_tones -- python list containing predicted tones
"""
# set up audio stream
out_stream = stream.Stream()
# Initialize chord variables
curr_offset = 0.0 # variable used to write sounds to the Stream.
num_chords = int(len(chords) / 3) # number of different set of chords
print("Predicting new values for different set of chords.")
# Loop over all 18 set of chords. At each iteration generate a sequence of tones
# and use the current chords to convert it into actual sounds
for i in range(1, num_chords):
# Retrieve current chord from stream
curr_chords = stream.Voice()
# Loop over the chords of the current set of chords
for j in chords[i]:
# Add chord to the current chords with the adequate offset, no need to understand this
curr_chords.insert((j.offset % 4), j)
# Generate a sequence of tones using the model
_, indices = predict_and_sample(inference_model)
indices = list(indices.squeeze())
pred = [indices_tones[p] for p in indices]
predicted_tones = 'C,0.25 '
for k in range(len(pred) - 1):
predicted_tones += pred[k] + ' '
predicted_tones += pred[-1]
#### POST PROCESSING OF THE PREDICTED TONES ####
# We will consider "A" and "X" as "C" tones. It is a common choice.
predicted_tones = predicted_tones.replace(' A',' C').replace(' X',' C')
# Pruning #1: smoothing measure
predicted_tones = prune_grammar(predicted_tones)
# Use predicted tones and current chords to generate sounds
sounds = unparse_grammar(predicted_tones, curr_chords)
# Pruning #2: removing repeated and too close together sounds
sounds = prune_notes(sounds)
# Quality assurance: clean up sounds
sounds = clean_up_notes(sounds)
# Print number of tones/notes in sounds
print('Generated %s sounds using the predicted values for the set of chords ("%s") and after pruning' % (len([k for k in sounds if isinstance(k, note.Note)]), i))
# Insert sounds into the output stream
for m in sounds:
out_stream.insert(curr_offset + m.offset, m)
for mc in curr_chords:
out_stream.insert(curr_offset + mc.offset, mc)
curr_offset += 4.0
# Initialize tempo of the output stream with 130 bit per minute
out_stream.insert(0.0, tempo.MetronomeMark(number=130))
# Save audio stream to fine
mf = midi.translate.streamToMidiFile(out_stream)
mf.open("output/my_music.midi", 'wb')
mf.write()
print("Your generated music is saved in output/my_music.midi")
mf.close()
# Play the final stream through output (see 'play' lambda function above)
# play = lambda x: midi.realtime.StreamPlayer(x).play()
# play(out_stream)
return out_stream
def predict_and_sample(inference_model, x_initializer = x_initializer, a_initializer = a_initializer,
c_initializer = c_initializer):
"""
Predicts the next value of values using the inference model.
Arguments:
inference_model -- Keras model instance for inference time
x_initializer -- numpy array of shape (1, 1, 78), one-hot vector initializing the values generation
a_initializer -- numpy array of shape (1, n_a), initializing the hidden state of the LSTM_cell
c_initializer -- numpy array of shape (1, n_a), initializing the cell state of the LSTM_cel
Ty -- length of the sequence you'd like to generate.
Returns:
results -- numpy-array of shape (Ty, 78), matrix of one-hot vectors representing the values generated
indices -- numpy-array of shape (Ty, 1), matrix of indices representing the values generated
"""
### START CODE HERE ###
pred = inference_model.predict([x_initializer, a_initializer, c_initializer])
indices = np.argmax(pred, axis = -1)
results = to_categorical(indices, num_classes=78)
### END CODE HERE ###
return results, indices
|
[
"noreply@github.com"
] |
IHNF262.noreply@github.com
|
8886c1dd47cd18565ecd6229a7357158d3105b71
|
407097d7c2058160e313e15a3bbd3a695b07ad4a
|
/composante_detection_CG_flux.py
|
aaf807a8dc26d68a13529086e7902a80a41df7ee
|
[] |
no_license
|
JouvinLea/multi_analysis
|
0427aa84d6184ef531733959914e30a0f871e9dd
|
1b086b19e3481e28976329cc108e1ee660a227e7
|
refs/heads/master
| 2020-07-23T00:32:48.899142
| 2017-06-14T15:35:41
| 2017-06-14T15:35:41
| 94,346,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,526
|
py
|
import numpy as np
from astropy.table import Table
from matplotlib import pyplot as plt
from gammapy.utils.energy import EnergyBounds, Energy
from gammapy.image import SkyImageList
from astropy.wcs.utils import pixel_to_skycoord, skycoord_to_pixel
import yaml
import sys
from method_fit import *
input_param=yaml.load(open(sys.argv[1]))
#Input param fit and source configuration
freeze_bkg=input_param["param_fit_morpho"]["freeze_bkg"]
param_fit = input_param["param_fit_morpho"]
source_name=input_param["general"]["source_name"]
name_method_fond = input_param["general"]["name_method_fond"]
image_size= input_param["general"]["image_size"]
for_integral_flux=input_param["exposure"]["for_integral_flux"]
if freeze_bkg:
name="_bkg_fix"
else:
name="_bkg_free"
if param_fit["Em_gal"]:
name += "_Em_gal"
if param_fit["gauss_SgrA"]["fit"]:
name += "_SgrA"
if param_fit["gauss_G0p9"]["fit"]:
name += "_G0p9"
# Si on inverse LS et CS alors c est qu il y a les deux!
if param_fit["invert_CS_LS"]:
if param_fit["invert_CC_LS"]:
name += "_CS__central_gauss_LS"
else:
name += "_CS__LS_central_gauss"
else:
if param_fit["Large scale"]["fit"]:
name += "_LS"
if param_fit["Gauss_to_CS"]["fit"]:
name += "_CS"
if param_fit["central_gauss"]["fit"]:
name += "_central_gauss"
if param_fit["arc source"]["fit"]:
name += "_arcsource"
if not param_fit["arc source"]["xpos_frozen"]:
name += "_pos_free"
if param_fit["SgrB2"]["fit"]:
name += "_SgrB2"
#if param_fit["Large scale"]["fwhm_min"]:
name += "_LS_fwhm_min_"+str(param_fit["Large scale"]["fwhm_min"])+"_init_"+str(param_fit["Large scale"]["fwhm_init"])
#if param_fit["Gauss_to_CS"]["fwhm_min"]:
name += "_CS_fwhm_min_"+str(param_fit["Gauss_to_CS"]["fwhm_min"])+"_init_"+str(param_fit["Gauss_to_CS"]["fwhm_init"])
name += "_GC_source_fwhm_"+str(param_fit["gauss_SgrA"]["fwhm_init"])
if param_fit["Large scale"]["ellip_frozen"]:
name += "_eLS_"+str(param_fit["Large scale"]["ellip_init"])
if param_fit["Large scale"]["fwhm_frozen"]:
name += "_fwhmLS_"+str(param_fit["Large scale"]["fwhm_init"])
if param_fit["Gauss_to_CS"]["fwhm_frozen"]:
name += "_fwhmCS_"+str(param_fit["Gauss_to_CS"]["fwhm_init"])
if param_fit["central_gauss"]["fwhm_frozen"]:
name += "_fwhmCC_"+str(param_fit["central_gauss"]["fwhm_init"])
config_name = input_param["general"]["config_name"]
energy_reco=[Energy(input_param["energy binning"]["Emin"],"TeV"),Energy(input_param["energy binning"]["Emax"],"TeV"), input_param["energy binning"]["nbin"]]
outdir_data = make_outdir_data(source_name, name_method_fond,config_name,image_size,for_integral_flux=False,ereco=energy_reco)
directory = make_outdir_filesresult(source_name, name_method_fond,config_name,image_size,for_integral_flux=False,ereco=energy_reco)
energy_bins=EnergyBounds.equal_log_spacing(0.5,100,1,"TeV")
pix_to_deg=0.02
for i, E in enumerate(energy_bins[:-1]):
E1=energy_bins[i].value
E2=energy_bins[i+1].value
print("Energy band:"+str("%.2f" % E1)+"-"+str("%.2f" % E2)+" TeV")
filename=directory+"/morphology_et_flux_fit_result_"+name+"_"+str("%.2f" % E1)+"_"+str("%.2f" % E2)+"_TeV.txt"
filename_err=directory+"/morphology_et_flux_fit_covar_"+name+"_"+str("%.2f" % E1)+"_"+str("%.2f" % E2)+"_TeV.txt"
t=Table.read(filename, format="ascii")
t_err=Table.read(filename_err, format="ascii")
for istep,step in enumerate(t["step"][:-1]):
sigma=np.sqrt(np.fabs((t["statval"][istep+1]-t["statval"][istep])))
print("step: "+str(step)+", detection a "+str(sigma)+" sigma")
ifinal_step=len(t)-1
CS_fwwhm=t[ifinal_step]["Gauss*Templ_CS.fwhm"]*pix_to_deg/2.35
CS_fwwhm_err=t_err[ifinal_step]["Gauss*Templ_CS.fwhm_min"]*pix_to_deg/2.35
LS_fwwhm=t[ifinal_step]["Asym Large Scale.fwhm"]*pix_to_deg/2.35
LS_fwwhm_err=t_err[ifinal_step]["Asym Large Scale.fwhm_min"]*pix_to_deg/2.35
#LS_ellip=0.8
#LS_ellip_err=0
LS_ellip=t[ifinal_step]["Asym Large Scale.ellip"]
LS_ellip_err=t_err[ifinal_step]["Asym Large Scale.ellip_min"]
sigma_x=LS_ellip*LS_fwwhm
sigma_y=(1-LS_ellip)*LS_fwwhm
#Err variable correle: Var(f(x,y))=(df/dx*dx)**2+(df/dy*dy)**2+2*df/dx*df/dy*dx*dy
sigma_x_err=np.sqrt(((LS_ellip)*LS_fwwhm_err)**2+(LS_fwwhm*LS_ellip_err)**2+2*(LS_fwwhm*(1-LS_ellip)*LS_fwwhm_err*LS_ellip_err))
sigma_y_err=np.sqrt(((1-LS_ellip)*LS_fwwhm_err)**2+(LS_fwwhm*LS_ellip_err)**2+2*(LS_fwwhm*(1-LS_ellip)*LS_fwwhm_err*LS_ellip_err))
CC_fwwhm=t[ifinal_step]["Central Component.fwhm"]*pix_to_deg/2.35
CC_fwwhm_err=t_err[ifinal_step]["Central Component.fwhm_min"]*pix_to_deg/2.35
print("Gauss*Templ_CS.fwhm=",CS_fwwhm," +/- ",CS_fwwhm_err)
print("LS sigma=",LS_fwwhm," +/- ",LS_fwwhm_err)
print("LS ellipticity=",LS_ellip," +/- ",LS_ellip_err)
print("LS sigmax=",sigma_x," +/- ",sigma_x_err)
print("LS sigmay=",sigma_y," +/- ",sigma_y_err)
print("Central Component =",CC_fwwhm," +/- ",CC_fwwhm_err)
flux_factor=1e-12
GC_ampl=t[ifinal_step]["GC source.ampl"]*flux_factor
G0p9_ampl=t[ifinal_step]["G0.9.ampl"]*flux_factor
CS_ampl=t[ifinal_step]["CS.ampl"]*flux_factor
LS_ampl=t[ifinal_step]["Asym Large Scale.ampl"]*flux_factor
CC_ampl=t[ifinal_step]["Central Component.ampl"]*flux_factor
arc_source_ampl=t[ifinal_step]["Arc source.ampl"]*flux_factor
GC_ampl_err=t_err[ifinal_step]["GC source.ampl_min"]*flux_factor
G0p9_ampl_err=t_err[ifinal_step]["G0.9.ampl_min"]*flux_factor
CS_ampl_err=t_err[ifinal_step]["CS.ampl_min"]*flux_factor
LS_ampl_err=t_err[ifinal_step]["Asym Large Scale.ampl_min"]*flux_factor
CC_ampl_err=t_err[ifinal_step]["Central Component.ampl_min"]*flux_factor
arc_source_ampl_err=t_err[ifinal_step]["Arc source.ampl_min"]*flux_factor
print ("Flux 1 Tev (en 1e-12, cm-2 TeV-1 s-1)")
print("GC source :",GC_ampl," +/- ",GC_ampl_err)
print("G0.9 :",G0p9_ampl," +/- ",G0p9_ampl_err)
print("Gauss*Templ_CS: ",CS_ampl," +/- ",CS_ampl_err)
print("LS :",LS_ampl," +/- ",LS_ampl_err)
print("Central Component:",CC_ampl," +/- ",CC_ampl_err)
print("Arc source :",arc_source_ampl," +/- ",arc_source_ampl_err)
if 'Arc source.xpos' in t.colnames:
on = SkyImageList.read(outdir_data + "/fov_bg_maps" + str(E1) + "_" + str(E2) + "_TeV.fits")["counts"]
coord_arc_source=pixel_to_skycoord(t["Arc source.xpos"][ifinal_step],t["Arc source.ypos"][ifinal_step],on.wcs)
print coord_arc_source
|
[
"lea.jouvin@gmail.com"
] |
lea.jouvin@gmail.com
|
5bab08f19961fec6da631384fc26abe23e29a028
|
6b10d023db347810c35298f8166af42de5dd13d3
|
/Udemi/Section 7/Script1.py
|
cbd66a13306fda0bef8f1e0a2eacd14795f9df53
|
[] |
no_license
|
alex89n/Py_Exercise
|
6bdf02ebedf4998a1135ca44ac3c60d99e1d3fce
|
bb5c9b0c967b2957230d398477193980ed12ea15
|
refs/heads/master
| 2020-07-23T06:25:26.759261
| 2017-09-06T10:53:01
| 2017-09-06T10:53:01
| 94,354,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
import random, string
def generator():
letter1 = random.choice(string.ascii_lowercase)
letter2 = random.choice(string.ascii_lowercase)
letter3 = random.choice(string.ascii_lowercase)
return letter1 + letter2 + letter3
print(generator())
|
[
"aleksandar.nikolic@rt-rk.com"
] |
aleksandar.nikolic@rt-rk.com
|
442030f5246fb8e61aaec6289d70093b01c5a6d5
|
f9576955010b4bd5512bd5af1f42dbef16009922
|
/exerise-2-9-number-of-negitive-numbers.py
|
3bcb91e2bd724273ffb9761efc9a9c24c7c248d7
|
[] |
no_license
|
den01-python-programming-exercises/exercise-2-9-number-of-negative-numbers-rangorstormbron
|
4ee5cfad4c0c6ec9c4f02525c13d22fc68d0c543
|
ae68f71d2b17b852a70a4b84a38122d7341f31f0
|
refs/heads/master
| 2023-02-25T14:00:44.161452
| 2021-02-08T15:57:41
| 2021-02-08T15:57:41
| 337,127,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
count=0
number=int(input("please enter a number to add to the count count stops when 0 is entered"))
while True:
number=int(input("please enter a number to add to the count count stops when 0 is entered"))
if(number ==0):
print("total number =" , str(count))
break
if(number <=0):
count= count+1
continue
|
[
"noreply@github.com"
] |
den01-python-programming-exercises.noreply@github.com
|
e85a77066d6a4f8022f19aecd6eed15b41dd31b5
|
82fbd036b8f8f636ed38d859d52343cbe2dc6ecf
|
/test/loader_tests.py
|
4936d741e1de9dbefb18e5ea3d5f835d5e1c235c
|
[] |
no_license
|
dhpollack/bytenet.pytorch
|
9db425b080c7ba41cd0e7cbde9056621526856b2
|
9aba0efe5139d886079c789e6420ce46f3f20683
|
refs/heads/master
| 2021-09-15T22:58:56.481145
| 2018-06-12T08:47:11
| 2018-06-12T08:47:11
| 115,529,747
| 9
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 723
|
py
|
import unittest
import json
import time
from data.enwik8_loader import *
from data.wmt_loader import *
class Loaders_Test(unittest.TestCase):
config = json.load(open("config.json"))
print(config)
def test_1_enwik8(self):
ds = WIKIPEDIA(self.config["HUTTER_DIR"])
for i, (src, tgt) in enumerate(ds):
print(len(src), len(tgt))
print(src[:20], tgt[:20])
if i > 0:
break
def test_2_wmtnews(self):
ds = WMT(self.config["WMT_DIR"])
for i, (src, tgt) in enumerate(ds):
print(len(src), len(tgt))
print(src, tgt)
if i > 0:
break
if __name__ == '__main__':
unittest.main()
|
[
"david@da3.net"
] |
david@da3.net
|
34e35985b36457bd4c5d814e0b138d08647c99c3
|
bb3c62c4d1726ab0b3a71753a17e6b7d25ad5b50
|
/automateScript/post.py
|
1b55840b8da204ad0657e0f02f1feac9bfe18d39
|
[] |
no_license
|
JasonCodeIT/g12code
|
b984ab7a2273c3ba2c26e737595af447b72d74ea
|
da343c24b610e289f46313d790f4161ed6b7226c
|
refs/heads/master
| 2021-06-01T13:10:09.487643
| 2018-12-03T01:58:57
| 2018-12-03T01:58:57
| 32,397,549
| 0
| 0
| null | 2015-04-01T14:09:04
| 2015-03-17T14:15:37
|
Python
|
UTF-8
|
Python
| false
| false
| 161
|
py
|
import requests
def post(url, data=None, cookies=None):
r = requests.post(url, data=data, cookies=cookies, verify=False)
return r.text.encode('utf-8')
|
[
"dzy0451@gmail.com"
] |
dzy0451@gmail.com
|
beb573f485521049d08a6feec29654c350245495
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_258/ch34_2019_03_19_16_59_24_061231.py
|
d6a923627829377948c28cf477c1fda1f09a90dd
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
a=float(input('Qual o valor do depósito inicial? '))
b=float(input('Qual o valor da taxa de juros? '))
n=1
while n<=24:
c=a*b**n
d=c-a
n+=1
print("{0:.2f}".format(c))
print("{0:.2f}".format(d))
|
[
"you@example.com"
] |
you@example.com
|
781f986fa5d85dd9546502e8ae29c6135ff9e1fc
|
bd3e8e7bbb85a1c99b392bac5298f38f87b6a739
|
/resources/user.py
|
162da241272955b1ef94a1273bf6df6ac99dda24
|
[] |
no_license
|
DimitriMichel/Flask-RESTful-for-Heroku
|
0a08c5508c71dbc65e4b44a725b66cb5a4d6010d
|
a5947ed622b7551d08f9fb760d8586f8f5c5ed7b
|
refs/heads/master
| 2020-08-16T17:05:37.902220
| 2019-10-16T17:13:03
| 2019-10-16T17:13:03
| 215,528,881
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,006
|
py
|
from flask_restful import Resource, reqparse
from models.user import UserModel
class UserRegister(Resource):
# important to require arguments so users cant be empty in either field
parser = reqparse.RequestParser()
parser.add_argument('username',
type=str,
required=True,
help="This field cannot be blank."
)
parser.add_argument('password',
type=str,
required=True,
help="This field cannot be blank."
)
def post(self):
data = UserRegister.parser.parse_args()
if UserModel.find_by_username(data['username']):
return {"message": "A user with that username already exists"}, 400 # 400 Bad Request
user = UserModel(data['username'], data['password'])
user.save_to_db()
return {"message": "User created successfully."}, 201 # 201 created
|
[
"dimitripl@Dimitris-MBP.fios-router.home"
] |
dimitripl@Dimitris-MBP.fios-router.home
|
f00f0ec16d5c23510d81f61420f4c02d8948e730
|
9bb8f046743021611d110b69521b04ebf5a2a254
|
/income/migrations/0003_auto_20201226_2137.py
|
d4df2290418f2292d58c64ea88a20b1453a21dc9
|
[] |
no_license
|
Sukhvsin2/Expense-Income-Api
|
89b8ca8f517fcabe9c0b1c5406dde68858af928e
|
6f11fe8a64783bbd332a6c93a5bd10997f16a6c6
|
refs/heads/master
| 2023-04-01T13:12:21.362974
| 2021-03-09T21:54:02
| 2021-03-09T21:54:02
| 324,355,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
# Generated by Django 3.1.4 on 2020-12-26 16:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('income', '0002_auto_20201226_2136'),
]
operations = [
migrations.AlterField(
model_name='income',
name='source',
field=models.CharField(choices=[('ONLINE_SERVICE', 'ONLINE_SERVICE'), ('RENT', 'RENT'), ('SALARY', 'SALARY'), ('TRAVEL', 'TRAVEL'), ('FOOD', 'FOOD'), ('OTHERS', 'OTHERS')], max_length=255),
),
]
|
[
"sukhvsingh2026@gmail.com"
] |
sukhvsingh2026@gmail.com
|
a067cc38f5853f9457b486b101c755817cddf438
|
b111524b4bbec6e8d5ad42e6969a42b060fc9e24
|
/qa/rpc-tests/test_framework/util.py
|
3928e67feb218eae56aa5e5811b2c0a7b398e4fc
|
[
"MIT"
] |
permissive
|
lamhuuvan/octoin
|
9c2c39865503bb00ad17d3cd08e26bfc792d9fb4
|
f406ea4b9c4ea76d12866682e47c1f782f742787
|
refs/heads/master
| 2020-03-18T18:25:44.133335
| 2018-02-20T12:55:54
| 2018-02-20T12:55:54
| 135,092,121
| 1
| 0
|
MIT
| 2018-05-28T00:35:58
| 2018-05-28T00:35:57
| null |
UTF-8
|
Python
| false
| false
| 22,954
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import http.client
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
BITCOIND_PROC_WAIT_TIMEOUT = 60
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to Jan 1, 2014 + (201 * 10 * 60)
global MOCKTIME
MOCKTIME = 1388534400 + (201 * 10 * 60)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1, timeout=60):
"""
Wait until everybody has the same tip
"""
while timeout > 0:
tips = [ x.getbestblockhash() for x in rpc_connections ]
if tips == [ tips[0] ]*len(tips):
return True
time.sleep(wait)
timeout -= wait
raise AssertionError("Block sync failed")
def sync_mempools(rpc_connections, wait=1, timeout=60):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
return True
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "octoin.conf"), 'w') as f:
f.write("regtest=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser💻' + str(n), 'rpcpass🔑' + str(n)
def rpc_url(i, rpchost=None):
rpc_u, rpc_p = rpc_auth_pair(i)
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, rpchost or '127.0.0.1', rpc_port(i))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for bitcoind to start. This means that RPC is accessible and fully initialized.
Raise an exception if bitcoind exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('bitcoind exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir, num_nodes):
"""
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache
"""
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join('cache', 'node'+str(i))):
create_cache = True
break
if create_cache:
#find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "octoind"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0", "-reindex", "-crowdsale=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: bitcoind started, waiting for RPC to come up")
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: RPC succesfully started")
rpcs = []
for i in range(MAX_NODES):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a bitcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("BITCOIND", "octoind")
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-mocktime="+str(get_mocktime()), "-crowdsale=0" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: bitcoind started, waiting for RPC to come up")
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: RPC succesfully started")
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple bitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for _ in range(num_nodes) ]
if binary is None: binary = [ None for _ in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
bitcoind_processes[i].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = tx_size * fee_per_kB / 1000
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in range (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in range(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
|
[
"root@build3.occ"
] |
root@build3.occ
|
32406dd2aa2de9ae977c1df924adc8b5b501ba3f
|
fe317eff7427e24db4b905d3e2ac395b25bb4c9f
|
/CoviHelp/forms.py
|
36bfcc1c2c41e50911d51fc6db0564cfb709a634
|
[] |
no_license
|
ishivanshgoel/Combatting-Covid
|
71e99fa91ee32c3cf6876ffd845cf8d8094c268b
|
2abb2665eed3f6a978b463a2475610ebe6136e91
|
refs/heads/master
| 2023-04-25T19:39:01.926190
| 2021-05-21T08:02:49
| 2021-05-21T08:02:49
| 360,958,587
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 628
|
py
|
from django import forms
## login forms
class UserSignIn(forms.Form):
email = forms.CharField(label='Email', max_length=20)
password = forms.CharField(label='Password', max_length=20)
class OxygenUser(UserSignIn):
pass
class PharmaUser(UserSignIn):
pass
class HospitalUser(UserSignIn):
pass
## registration forms
class User(forms.Form):
company = forms.CharField(label='Company Name', max_length=100)
name = forms.CharField(label='Contact Person Name', max_length=100)
phone = forms.CharField(label='Phone Number', max_length=100)
email = forms.CharField(label='Email', max_length=100)
|
[
"ishivanshgoel@gmail.com"
] |
ishivanshgoel@gmail.com
|
657ab173695eee56f3c94d50c887b2dde25fec3b
|
7fd6c2e12894060b7e3c74cba8302f1acece9db3
|
/functions/functionsdemo.py
|
2da9177b201e57431d58b007ef6c73f72f2cda68
|
[] |
no_license
|
karthikapresannan/karthikapresannan
|
c664ae5cd8d33df0e8b4c2e57c29e1e2787dad81
|
5d0d55d3d64186659d6eebee391aeca4fc609c22
|
refs/heads/master
| 2023-03-29T15:38:03.559918
| 2021-04-08T15:45:55
| 2021-04-08T15:45:55
| 335,202,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
def add(num1,num2):
result=num1*num2
print(result)
add(100,200)
def mul(num1,num2):
res=num1*num2
print(res)
mul(100,200)
def sub(num1,num2):
res=num1-num2
print(res)
sub(100,200) #combine cheyathekuna program calc
|
[
"karthikapresannan97@gmail.com"
] |
karthikapresannan97@gmail.com
|
389eabd5e691f8a8709ba52c6936a0ad41ce47fb
|
103ee7cb409ec7495b1a1d28d1fbb849c69180a9
|
/utils/transducer/train_transducer.py
|
b5a491f85266151c29cee5ce7562df49d0ba7df3
|
[] |
no_license
|
m-wiesner/GKT
|
2777081d28c02f496c61556518e53a17de9fdb79
|
fa977494322863f6607849e2e094853c02f4e581
|
refs/heads/master
| 2020-09-25T19:35:45.457465
| 2016-09-01T19:41:20
| 2016-09-01T19:41:20
| 66,901,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,551
|
py
|
#!/usr/local/bin/pypy
from __future__ import print_function
import pickle
import sys
import os
from Transducer import Grapheme, Utterance
import argparse
import json
import mlf
DEF_MIN_LL = -999.0
def usage():
print(" Usage: ./make_transducer.py [opts] <REF_ALI> <AUD_ALI> <OUTPUT> <EMISSIONS>")
print(" --iter : specify the number of training iterations. Default = 8")
print(" --train_size : specify what fraction of input data to train on. Default = 1.0")
print(" --add_1_smooth: specify the add val parameter for smoothing HMM arc counts. Default = 0.01")
def main():
if len(sys.argv[1:]) == 0:
usage()
sys.exit(1)
# Parser Arguments
parser = argparse.ArgumentParser()
parser.add_argument("input1", help="rspecifier alignment format such as "
"the output of ali-to-phones in kaldi.")
parser.add_argument("input2", help="rspecifier alignment format such as "
"the output of ali-phones in kaldi.")
parser.add_argument("output", help="wspecifier pickled list of trained "
"Grapheme HMM objects.")
parser.add_argument("emissions", help="rspecifier distributions p(aud | graph) "
"stored in json format.")
parser.add_argument("-I","--iters", action="store", help="Number of iterations of training.",
type=int, default=8)
parser.add_argument("-T","--train_size", action="store", help="Fraction of the matched "
"input files over which to train.", type=float, default=1.0)
parser.add_argument("-L","--add_1_smooth", action="store", help="Smoothing "
"value for HMM arcs", type=float, default = 0.01)
args = parser.parse_args()
EMISSIONS = args.emissions
REF_ALI = args.input1
RES_ALI = args.input2
OUTFILE = args.output
ITERS = args.iters
TRAINING_SIZE = args.train_size
LAM = args.add_1_smooth
# Get training and tesing test
ref_utterances = mlf.ali2dict(REF_ALI)
train_utterances = mlf.ali2dict(RES_ALI)
train_utterances = {u:train_utterances[u] for u in ref_utterances.iterkeys()}
emissions_dict = json.load(open(EMISSIONS,"rb"))
graphemes = {name: Grapheme(name,emissions,lam=LAM) for name,emissions in emissions_dict.iteritems()}
num_utts = len(ref_utterances.keys())
train_keys = sorted(train_utterances.keys())[0:int(num_utts*TRAINING_SIZE)]
num_train = len(train_keys)
LL_old = DEF_MIN_LL
for i in range(ITERS):
print("Iteration ", i )
LL = 0.0
num_frames = 0.0
for i_u,u in enumerate(train_keys,start=1):
sys.stdout.write("Created utterance %d of %d\r" % (i_u,num_train) )
sys.stdout.flush()
utt = Utterance(u,ref_utterances[u],graphemes)
seq = train_utterances[u].split(" ")
num_frames += len(seq)
LL += utt.train_sequence(seq)
LL /= num_frames
if LL >= LL_old:
LL_old = LL
else:
print("ERROR")
print("LL: %f, LL_old: %f" % (LL, LL_old))
print(" ")
print("LL: %f" % LL )
for g in graphemes.itervalues():
g.update()
pickle.dump(graphemes, open( OUTFILE, "wb" ) )
# Initialize inputs from unigram distribution of grapheme level alignments
if __name__ == "__main__":
main()
|
[
"wiesner@jhu.edu"
] |
wiesner@jhu.edu
|
f6d690fcbc0cef5954203798eebf583e0922e61f
|
6c7ec742e9b92a54de54e32b242057ab105f5399
|
/scienv/bin/rst2odt.py
|
33a437123869e84aca776a9abba58fc13cd0c14b
|
[] |
no_license
|
sdnnet3/scienceprobs
|
0a72a59755ede896f97a973657ff83c820ae0af6
|
07c0adc68a171427bf7eb9c83b0fddfd0fbb9341
|
refs/heads/master
| 2020-06-04T23:51:06.227553
| 2019-06-16T20:58:56
| 2019-06-16T20:58:56
| 161,008,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 793
|
py
|
#!/home/clayton/scienceprobs/scienv/bin/python3
# $Id: rst2odt.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
A front end to the Docutils Publisher, producing OpenOffice documents.
"""
import sys
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline_to_binary, default_description
from docutils.writers.odf_odt import Writer, Reader
description = ('Generates OpenDocument/OpenOffice/ODF documents from '
'standalone reStructuredText sources. ' + default_description)
writer = Writer()
reader = Reader()
output = publish_cmdline_to_binary(reader=reader, writer=writer,
description=description)
|
[
"clayton.hutton@gmail.com"
] |
clayton.hutton@gmail.com
|
a17b2ddfb0202a5bbf9947270096fe596200604d
|
71a93e99a9e2c15c8a1727e823c20c6007d6a787
|
/desc.py
|
fa045d10b28c77d2bdfc97938a64257962f8dfb0
|
[] |
no_license
|
7erduk/tor
|
d5334b40211b3f2509d7a0d562146f694c2e862e
|
631bc6b29d92e00b5bb0b56f1217fe77b601613b
|
refs/heads/master
| 2021-01-19T00:40:51.109871
| 2018-10-21T07:43:44
| 2018-10-21T07:43:44
| 87,199,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,023
|
py
|
import re
import stem.descriptor
from stem.exit_policy import ExitPolicy
import StringIO
import marshal
import random
class Descriptors:
def __init__(self,routers={}):
self.routers=routers
def loadTextDesc(self,fname):
f=open(fname,'rb')
desc=stem.descriptor.parse_file(f,'server-descriptor 1.0')
self.routers={}
for d in desc:
router={}
router['nickname']=d.nickname
router['address']=d.address
router['or_port']=d.or_port
router['exit_policy']=str(d.exit_policy)
router['onion_key']=d.onion_key
router['signing_key']=d.signing_key
router['dir_port']=d.dir_port
self.routers[d.fingerprint]=router
return self.routers
def getExitPoliceRouters(self,ip,port):
routers={}
for r in self.routers:
e=ExitPolicy(self.routers[r]['exit_policy'])
if e.can_exit_to(ip,port):
routers[r]=self.routers[r]
return routers
def saveDesc(self,fname):
open(fname,'wb+').write(marshal.dumps(self.routers))
def loadDesc(self,fname):
self.routers=marshal.loads(open(fname,'rb').read())
return self.routers
def randRouter(self,ip=None,port=None):
keys=self.routers.keys()
random.shuffle(keys)
routers={}
for r in keys:
if ip and port:
e=ExitPolicy(self.routers[r]['exit_policy'])
if e.can_exit_to(ip,port):
return self.routers[r]
else:
return self.routers[r]
return None
if __name__=="__main__":
d=Descriptors()
#d.loadTextDesc('all.z')
#d.saveDesc('all.m')
d.loadDesc('all.m')
r=d.randRouter('8.8.8.8',4444)
print r['nickname']
r=d.randRouter('8.8.8.8',4444)
print r['nickname']
|
[
"7erduk@gmail.com"
] |
7erduk@gmail.com
|
d1444d69323c69bb381deecdeeaef7eedd2c783a
|
72c3c34f9eb47c9490f5461adaf28693eca27ebd
|
/test_coronavirus_reader.py
|
19ea00d6536f85a085ac5849e053d4baacb4f8be
|
[] |
no_license
|
JasonMTarka/Coronavirus-Tracker
|
ba88ca327b76eed2945a4f65788aca48d70ffff9
|
90ff2e12713dad777cbcdd99499490b74e179994
|
refs/heads/main
| 2023-08-18T00:17:23.581131
| 2021-10-08T13:07:46
| 2021-10-08T13:07:46
| 341,175,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
import unittest
from coronavirus_reader import COVID19_Reader
class TestReader(unittest.TestCase):
@classmethod
def setUpClass(cls):
cvd_reader = COVID19_Reader()
cls.data = cvd_reader.data_reader()
def test_dataislist(self):
self.assertEqual(type(self.data), type([]))
def test_regex(self):
date = self.data[-1][0]
regex = r"(?P<day>\d{2})-(?P<month>\d{2})-(?P<year>\d\d?)"
self.assertRegex(date, regex)
if __name__ == "__main__":
unittest.main()
|
[
"jasonmtarka@gmail.com"
] |
jasonmtarka@gmail.com
|
420f7ad5777496c03d255f050c43577174561aa9
|
5c96fae506378a1ff0618e82e56fb89038df260c
|
/stockExchangePrediction/newsInput.py
|
7462750b287b9a2c5c602e78ed4152e2fb284a61
|
[] |
no_license
|
Skyraider150/stock_market_pred
|
6c89f34ac9802b0f1ca85533e161872051d9ad45
|
1f68eec6612f8eefdf178bbac9d0742ca5af3a58
|
refs/heads/master
| 2020-04-01T08:04:36.084798
| 2018-12-09T20:37:15
| 2018-12-09T20:37:15
| 153,016,664
| 0
| 0
| null | 2018-12-09T12:08:54
| 2018-10-14T20:36:47
|
Python
|
UTF-8
|
Python
| false
| false
| 4,451
|
py
|
from stockPreprocessing import *
# Lets Check how our news data files look
#For this purpose we load a list with the name of the files
FilesOfNews=['data/news/abcnews-date-text.csv','data/news/articles1.csv','data/news/articles2.csv','data/news/articles3.csv','data/news/RedditNews.csv','data/news/Combined_News_DJIA.csv','data/news/data.csv']
#During iteration we write out the firs two and the last line of the files with thair length and name
for filePath in []: #FilesOfNews:
os.sys("wc -l {filePath}")
os.sys("head -n 2 {filePath}")
os.sys("tail -n 1 {filePath}")
print()
print()
# Loading in data
import csv
import sys
#For such a big file we have to set a new limit for csv
csv.field_size_limit(sys.maxsize)
#We read in the data and print it out with it length as checking
NewsContent=[]
file='data/news/abcnews-date-text.csv'
counter=0
DatabaseContent=[]
with open(file,'r') as NewsFile:
NewsReader=csv.DictReader(NewsFile)
for line in NewsReader:
if line != "":
NewsContent.append(line)
print(NewsContent[0])
print(len(NewsContent))
# using nltk it is much easier to create the word vectors
import nltk
nltk.download('punkt')
#WordCountVectors is going to be the dictionary we are going to store all existing word and the count of their accurance
WordCountVectors={}
for name in ['apple', 'amazon', 'facebook', 'google']:
WordCountVectors[name]={}
for line in NewsContent:
if(name in line['headline_text']):
words=nltk.word_tokenize(line['headline_text'])
for word in words:
if(word in WordCountVectors[name].keys()):
WordCountVectors[name][word]=WordCountVectors[name][word]+1
else:
WordCountVectors[name][word]=1
# as a check we are printing the word vector and the count of the words relevant for amazon
print(WordCountVectors['amazon'])
sum=0
for wordC in WordCountVectors['amazon'].values():
sum+=wordC
print(sum)
#We have to reformat the date so we have the same format as the stock data
# datetime.strptime(x[0], '%Y-%m-%d').timestamp()
for line in NewsContent:
line['date']=datetime.strptime(line.pop('publish_date'), '%Y%m%d').timestamp()
print(NewsContent[0])
#After this we destroy the unrelevant words
top_words = 300
from collections import Counter
word_counter = Counter((' '.join([i['headline_text'] for i in NewsContent])).split(' '))
keymap = {item[0]: i+1 for i, item in enumerate(word_counter.most_common(top_words))}
#We collect the words to have them in a per day format
news_content_parsed2 = [[i['date'], [keymap[j] for j in i['headline_text'].split(' ') if j in keymap.keys()]] for i in NewsContent]
#lets check the format
news_content_parsed2[-1]
#Then we create a dictionary so we can combine the dates belonging to the same date into one big list
wordsByDate={}
for wordOfNewWithDate in news_content_parsed2:
if wordOfNewWithDate[0] not in wordsByDate:
wordsByDate[wordOfNewWithDate[0]]=[]
wordsByDate[wordOfNewWithDate[0]].extend(wordOfNewWithDate[1])
#for date in wordsByDate.keys():
# wordsByDate[date]=sorted(wordsByDate[date])
# +++
#We eliminate all data wh2ich's date is out of the time range of the stock data
stock_dates=stock_dict['AAPL'].values[:, 0]
daily_news = {}
for k, v in wordsByDate.items():
if min(stock_dates) < k < max(stock_dates):
daily_news[int(k/86400)*86400] = daily_news.get(int(k/86400)*86400, []) + v
#Lets see how many words we have on each day so we can decide what size of input we should have for the network
a = []
for words in wordsByDate.values():
a.append(len(words))
#print sizes
print(min(a), np.mean(a), max(a))
# word vectors max. (kind of...) length
review_length = 600
from pandas import DataFrame
from pandas import concat
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
|
[
"holloszaboakos.1996@gmail.com"
] |
holloszaboakos.1996@gmail.com
|
10db0ee4df7314f6e57d235041f86412e0e3397f
|
3869cbd5ee40e2bab5ca08b80b48115a7b4c1d5a
|
/Python-3/basic_examples/strings/string_contains.py
|
91c957a2f8af0439feffe2bf2eb5c39d78bec717
|
[
"MIT"
] |
permissive
|
Tecmax/journaldev
|
0774c441078816f22edfd68286621493dd271803
|
322caa8e88d98cfe7c71393bcd2a67cf77368884
|
refs/heads/master
| 2020-07-08T04:05:03.028015
| 2019-08-12T09:17:48
| 2019-08-12T09:17:48
| 203,559,030
| 0
| 1
|
MIT
| 2019-08-21T10:13:47
| 2019-08-21T10:13:47
| null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
s = 'abc'
print('s contains a =', s.__contains__('a'))
print('s contains A =', s.__contains__('A'))
print('s contains X =', s.__contains__('X'))
print(str.__contains__('ABC', 'A'))
print(str.__contains__('ABC', 'D'))
input_str1 = input('Please enter first input string\n')
input_str2 = input('Please enter second input string\n')
print('First Input String Contains Second String? ', input_str1.__contains__(input_str2))
|
[
"pankaj.0323@gmail.com"
] |
pankaj.0323@gmail.com
|
f8d7d22ef6d96ecd64064d87736a65402390b602
|
ee63710a782fc75491a92b7e7860f433d803701d
|
/Problems/How many nuts will be left after division/task.py
|
b515069005cdb92cd82fe6c48282cdb49eefa108
|
[] |
no_license
|
kudrinsky/jba_simple_chatty_bot
|
b252cc32c8701d8e18229ea1137ee9b82c915b1e
|
064e73ba58e929464f6f243f9a4602fddb602b27
|
refs/heads/master
| 2022-11-07T12:30:01.947207
| 2020-06-16T09:41:14
| 2020-06-16T09:41:14
| 272,668,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 78
|
py
|
squirrels = abs(int(input()))
nuts = abs(int(input()))
print(nuts % squirrels)
|
[
"66451886+kudrinsky@users.noreply.github.com"
] |
66451886+kudrinsky@users.noreply.github.com
|
0affee18b4eb2f0d6b168699a616474a7dd1bd50
|
368465d522ccfcb7292405943c153c8488bd3292
|
/blog/migrations/0002_comment.py
|
ec43cc40385aaf5c354e8551ebaa02af62a8f40d
|
[] |
no_license
|
Yejin6911/piro12-Django
|
d83d4fae68c6396e65aa3d1a9ce3ab34a39bbc41
|
70368a9a9c38b1921ecf7449c200153462774d21
|
refs/heads/master
| 2020-12-12T20:13:33.168241
| 2020-01-21T08:20:07
| 2020-01-21T08:20:07
| 234,218,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
# Generated by Django 2.2.9 on 2020-01-17 14:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.TextField()),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post')),
],
),
]
|
[
"cdjin6911@gmail.com"
] |
cdjin6911@gmail.com
|
c3c54dc37a631e08fb78b5443f7372563c1875f3
|
9237a661640afa4b755ae2fc69a86dbdc75f019f
|
/analysis/splicing/splicing_differential_genesis_helpers.py
|
aa503a63c04976a31f6672e73dd1fbcb9bd5aaef
|
[
"MIT"
] |
permissive
|
TrentBrick/genesis
|
06ca7e665b0713d8dee9b9a966e3ff97fdb0eae3
|
d80725b51b4b97fb5cddde7b7f0dc1362c11b26b
|
refs/heads/master
| 2021-03-10T04:49:49.960116
| 2019-12-03T22:51:56
| 2019-12-03T22:51:56
| 246,420,721
| 0
| 1
|
MIT
| 2020-03-10T22:24:05
| 2020-03-10T22:24:04
| null |
UTF-8
|
Python
| false
| false
| 9,310
|
py
|
import isolearn.keras as iso
import numpy as np
import pandas as pd
import scipy.sparse as sp
import scipy.io as spio
import matplotlib.pyplot as plt
import seaborn as sns
def plot_logo_w_hexamer_scores(pwm, iso_pred, cut_pred, hexamer_scores, cse_start_pos=70, annotate_peaks=None, sequence_template=None, figsize=(12, 3), width_ratios=[1, 7], logo_height=1.0, usage_unit='log', plot_start=0, plot_end=164, save_figs=False, fig_name=None, fig_dpi=300) :
n_samples = pwm.shape[0]
#Slice according to seq trim index
pwm = pwm[:, plot_start: plot_end, :]
cut_pred = cut_pred[:, plot_start: plot_end]
sequence_template = sequence_template[plot_start: plot_end]
iso_pred = np.mean(iso_pred, axis=0)
cut_pred = np.mean(cut_pred, axis=0)
pwm = np.sum(pwm, axis=0)
pwm += 0.0001
for j in range(0, pwm.shape[0]) :
pwm[j, :] /= np.sum(pwm[j, :])
entropy = np.zeros(pwm.shape)
entropy[pwm > 0] = pwm[pwm > 0] * -np.log2(pwm[pwm > 0])
entropy = np.sum(entropy, axis=1)
conservation = 2 - entropy
fig = plt.figure(figsize=figsize)
gs = gridspec.GridSpec(2, 2, width_ratios=[width_ratios[0], width_ratios[-1]], height_ratios=[1, 1])
ax0 = plt.subplot(gs[0, 0])
ax1 = plt.subplot(gs[0, 1])
ax2 = plt.subplot(gs[1, 0])
ax3 = plt.subplot(gs[1, 1])
plt.sca(ax0)
plt.axis('off')
plt.sca(ax2)
plt.axis('off')
annot_text = 'Samples = ' + str(int(n_samples))
if usage_unit == 'log' :
annot_text += '\nUsage = ' + str(round(np.log(iso_pred[0] / (1. - iso_pred[0])), 4))
else :
annot_text += '\nUsage = ' + str(round(iso_pred[0], 4))
ax2.text(0.99, 0.5, annot_text, horizontalalignment='right', verticalalignment='center', transform=ax2.transAxes, color='black', fontsize=12, weight="bold")
l2, = ax1.plot(np.arange(plot_end - plot_start), cut_pred, linewidth=3, linestyle='-', label='Predicted', color='red', alpha=0.7)
if annotate_peaks is not None :
objective_pos = 0
if annotate_peaks == 'max' :
objective_pos = np.argmax(cut_pred)
else :
objective_pos = annotate_peaks - plot_start
text_x, text_y, ha = -30, -5, 'right'
if objective_pos < 30 :
text_x, text_y, ha = 30, -5, 'left'
annot_text = '(CSE+' + str(objective_pos + plot_start - (cse_start_pos + 6) + 0) + ') ' + str(int(round(cut_pred[objective_pos] * 100, 0))) + '% Cleavage'
ax1.annotate(annot_text, xy=(objective_pos, cut_pred[objective_pos]), xycoords='data', xytext=(text_x, text_y), ha=ha, fontsize=10, weight="bold", color='black', textcoords='offset points', arrowprops=dict(connectionstyle="arc3,rad=-.1", headlength=8, headwidth=8, shrink=0.15, width=1.5, color='black'))
plt.sca(ax1)
plt.xlim((0, plot_end - plot_start))
#plt.ylim((0, 2))
plt.xticks([], [])
plt.yticks([], [])
plt.legend(handles=[l2], fontsize=12, prop=dict(weight='bold'), frameon=False)
plt.axis('off')
height_base = (1.0 - logo_height) / 2.
for j in range(0, pwm.shape[0]) :
sort_index = np.argsort(pwm[j, :])
for ii in range(0, 4) :
i = sort_index[ii]
nt_prob = pwm[j, i] * conservation[j]
nt = ''
if i == 0 :
nt = 'A'
elif i == 1 :
nt = 'C'
elif i == 2 :
nt = 'G'
elif i == 3 :
nt = 'T'
color = None
if sequence_template[j] != 'N' :
color = 'black'
if ii == 0 :
letterAt(nt, j + 0.5, height_base, nt_prob * logo_height, ax3, color=color)
else :
prev_prob = np.sum(pwm[j, sort_index[:ii]] * conservation[j]) * logo_height
letterAt(nt, j + 0.5, height_base + prev_prob, nt_prob * logo_height, ax3, color=color)
text_x, text_y, ha = -30, -5, 'right'
if objective_pos < 30 :
text_x, text_y, ha = 30, -5, 'left'
if j < len(hexamer_scores) :
#annot_text = str(hexamer_scores[j][0]) + " = " + str(round(hexamer_scores[j][1], 2))
annot_text = str(hexamer_scores[j][1])
ax3.text(j + 0.25, 2.0, annot_text, size=50, rotation=90., ha="left", va="bottom", fontsize=8, weight="bold", color='black')
plt.sca(ax3)
plt.xlim((0, plot_end - plot_start))
plt.ylim((0, 2.5))
plt.xticks([], [])
plt.yticks([], [])
plt.axis('off')
ax3.axhline(y=0.01 + height_base, color='black', linestyle='-', linewidth=2)
for axis in fig.axes :
axis.get_xaxis().set_visible(False)
axis.get_yaxis().set_visible(False)
plt.tight_layout()
if save_figs :
plt.savefig(fig_name + '.png', transparent=True, dpi=fig_dpi)
plt.savefig(fig_name + '.svg')
plt.savefig(fig_name + '.eps')
plt.show()
#plt.close()
mer6_dict = {}
i = 0
for b1 in ['A', 'C', 'G', 'T'] :
for b2 in ['A', 'C', 'G', 'T'] :
for b3 in ['A', 'C', 'G', 'T'] :
for b4 in ['A', 'C', 'G', 'T'] :
for b5 in ['A', 'C', 'G', 'T'] :
for b6 in ['A', 'C', 'G', 'T'] :
mer6_dict[b1 + b2 + b3 + b4 + b5 + b6] = i
i += 1
def get_hexamer_pred_both_regions(seq) :
preds = []
for cell_line, w, w_0 in zip(['hek', 'hela', 'mcf7', 'cho'], [hek_both_regions_w, hela_both_regions_w, mcf7_both_regions_w, cho_both_regions_w], [hek_both_regions_w_0, hela_both_regions_w_0, mcf7_both_regions_w_0, cho_both_regions_w_0]) :
pred = w_0
region_1 = seq[5: 40]
for j in range(0, len(region_1) - 5) :
pred += w[mer6_dict[region_1[j: j+6]]]
region_2 = seq[48: 83]
for j in range(0, len(region_2) - 5) :
pred += w[4096 + mer6_dict[region_2[j: j+6]]]
preds.append(1. / (1. + np.exp(-pred)))
return np.array(preds)
def get_hexamer_diff_scores_both_regions(seq, cell_1, cell_2) :
scores = []
w_dict = {
'hek' : hek_both_regions_w,
'hela' : hela_both_regions_w,
'mcf7' : mcf7_both_regions_w,
'cho' : cho_both_regions_w
}
w_cell_1 = w_dict[cell_1]
w_cell_2 = w_dict[cell_2]
scores.extend([('_', ''), ('_', ''), ('_', ''), ('_', ''), ('_', '')])
region_1 = seq[5: 40]
for j in range(0, len(region_1) - 5) :
hexamer_score_cell_1 = w_cell_1[mer6_dict[region_1[j: j+6]]]
hexamer_score_cell_2 = w_cell_2[mer6_dict[region_1[j: j+6]]]
scores.append((region_1[j: j+6], str(round(hexamer_score_cell_1 - hexamer_score_cell_2, 2))))
scores.extend([('_', ''), ('_', ''), ('_', ''), ('_', ''), ('_', ''), ('_', ''), ('_', ''), ('_', ''), ('_', ''), ('_', ''), ('_', ''), ('_', ''), ('_', '')])
region_2 = seq[48: 83]
for j in range(0, len(region_2) - 5) :
hexamer_score_cell_1 = w_cell_1[4096 + mer6_dict[region_2[j: j+6]]]
hexamer_score_cell_2 = w_cell_2[4096 + mer6_dict[region_2[j: j+6]]]
scores.append((region_2[j: j+6], str(round(hexamer_score_cell_1 - hexamer_score_cell_2, 2))))
return scores
def get_hexamer_preds_both_regions(seqs) :
preds = np.zeros((len(seqs), 4))
for i, seq in enumerate(seqs) :
preds[i, :] = get_hexamer_pred_both_regions(seq)
return preds
def get_hexamer_pred(seq) :
preds = []
for cell_line, w, w_0 in zip(['hek', 'hela', 'mcf7', 'cho'], [hek_w, hela_w, mcf7_w, cho_w], [hek_w_0, hela_w_0, mcf7_w_0, cho_w_0]) :
pred = w_0
region_1 = seq[5: 40]
for j in range(0, len(region_1) - 5) :
pred += w[mer6_dict[region_1[j: j+6]]]
preds.append(1. / (1. + np.exp(-pred)))
return np.array(preds)
def get_hexamer_diff_scores(seq, cell_1, cell_2) :
scores = []
w_dict = {
'hek' : hek_w,
'hela' : hela_w,
'mcf7' : mcf7_w,
'cho' : cho_w
}
w_cell_1 = w_dict[cell_1]
w_cell_2 = w_dict[cell_2]
scores.extend([('_', ''), ('_', ''), ('_', ''), ('_', ''), ('_', '')])
region_1 = seq[5: 40]
for j in range(0, len(region_1) - 5) :
hexamer_score_cell_1 = w_cell_1[mer6_dict[region_1[j: j+6]]]
hexamer_score_cell_2 = w_cell_2[mer6_dict[region_1[j: j+6]]]
scores.append((region_1[j: j+6], str(round(hexamer_score_cell_1 - hexamer_score_cell_2, 2))))
return scores
def get_hexamer_preds(seqs) :
preds = np.zeros((len(seqs), 4))
for i, seq in enumerate(seqs) :
preds[i, :] = get_hexamer_pred(seq)
return preds
def decode_onehot_consensus(onehot) :
seq = ''
for i in range(onehot.shape[0]) :
max_j = np.argmax(onehot[i, :])
if max_j == 0 :
seq += 'A'
elif max_j == 1 :
seq += 'C'
elif max_j == 2 :
seq += 'G'
elif max_j == 3 :
seq += 'T'
return seq
def decode_onehots_consensus(onehots) :
seqs = [
decode_onehot_consensus(onehots[i, :, :, 0]) for i in range(onehots.shape[0])
]
return seqs
|
[
"jlinder2@cs.washington.edu"
] |
jlinder2@cs.washington.edu
|
28f39f597186477ec91bf7e05985629bf67e5cc3
|
d45af79b136fe6f43ec039061dcba323e69fa3d1
|
/larb/models.py
|
c8468212b5e95e26c9b8a96d489608776a7d7d66
|
[] |
no_license
|
claireellul/allauth_tests
|
9111c552cb44e93725a087ffaf721fb4febbbcee
|
8f0fdbd390f6ebcae04ac4efb2063efa47a64b66
|
refs/heads/master
| 2021-01-09T20:33:42.123935
| 2016-08-07T09:32:24
| 2016-08-07T09:32:24
| 65,121,976
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,415
|
py
|
from django.contrib.auth.models import User
from django.db import models
from allauth.account.models import EmailAddress
from allauth.socialaccount.models import SocialAccount
import hashlib
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name='profile')
about_me = models.TextField(null=True, blank=True)
def __unicode__(self):
return "{}'s profile".format(self.user.username)
class Meta:
db_table = 'user_profile'
def profile_image_url(self):
"""
Return the URL for the user's Facebook icon if the user is logged in via Facebook,
otherwise return the user's Gravatar URL
"""
fb_uid = SocialAccount.objects.filter(user_id=self.user.id, provider='facebook')
if len(fb_uid):
return "http://graph.facebook.com/{}/picture?width=40&height=40".format(fb_uid[0].uid)
return "http://www.gravatar.com/avatar/{}?s=40".format(
hashlib.md5(self.user.email).hexdigest())
def account_verified(self):
"""
If the user is logged in and has verified hisser email address, return True,
otherwise return False
"""
result = EmailAddress.objects.filter(email=self.user.email)
if len(result):
return result[0].verified
return False
User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])
|
[
"c.ellul@ucl.ac.uk"
] |
c.ellul@ucl.ac.uk
|
99d0f95105b52e4fc4cfacbe6ec49fbd64f80a9d
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/MY_REPOS/INTERVIEW-PREP-COMPLETE/Leetcode/170.py
|
b238a6610bd16f36ca650f8091f32d871ece572a
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 865
|
py
|
class TwoSum:
def __init__(self):
"""
Initialize your data structure here.
"""
self.d = {}
def add(self, number: int) -> None:
"""
Add the number to an internal data structure..
"""
if number in self.d:
self.d[number] += 1
else:
self.d[number] = 1
def find(self, value: int) -> bool:
"""
Find if there exists any pair of numbers which sum is equal to the value.
"""
result = False
for n in self.d:
if value - n in self.d:
if value - n == n:
result = self.d[n] > 1
else:
return True
return result
# Your TwoSum object will be instantiated and called as such:
# obj = TwoSum()
# obj.add(number)
# param_2 = obj.find(value)
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
f94aab8232101484b98aba33e99cbdde14146796
|
cddfc1e27c32e0913c144694727d78d1e9961490
|
/locallibreria/noticias/migrations/0040_auto_20201128_1826.py
|
27231fd7ef3078c1115d7fa4a3e9a06384dc472c
|
[] |
no_license
|
jimfox27/Fase3BarrigaCabrera001
|
faf47502365603974721881d4e6d810286325369
|
149504becd4552a80f1c9b4fe8f48cb837ea4dfa
|
refs/heads/main
| 2023-01-23T11:03:08.589478
| 2020-11-30T02:57:42
| 2020-11-30T02:57:42
| 317,052,644
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
# Generated by Django 3.1.2 on 2020-11-28 21:26
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('noticias', '0039_profile'),
]
operations = [
migrations.RenameModel(
old_name='Profile',
new_name='Perfil',
),
migrations.RemoveField(
model_name='analisis',
name='autor',
),
migrations.RemoveField(
model_name='post',
name='autor',
),
migrations.DeleteModel(
name='User',
),
]
|
[
"jim.cabrera@alumnos.duoc.cl"
] |
jim.cabrera@alumnos.duoc.cl
|
b0fd358140c1977de4c6b967445752375d5b320d
|
0841643267b9fc1478f6e3d21bfccb17aba67af6
|
/gs_quant/test/timeseries/multi_measure/test_commod.py
|
62cf3e27ac706122e335c093c15aad79882e93ec
|
[
"Apache-2.0"
] |
permissive
|
goldmansachs/gs-quant
|
55618e0e4e961d4ee50b7393f27c258e2647a957
|
4cf8ec75c4d85b16ec08371c46cc1a9ede9d72a2
|
refs/heads/master
| 2023-08-20T00:55:43.324547
| 2023-08-16T16:55:22
| 2023-08-16T16:55:22
| 161,840,815
| 2,088
| 596
|
Apache-2.0
| 2023-08-16T16:55:23
| 2018-12-14T21:10:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,325
|
py
|
"""
Copyright 2021 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import pandas as pd
import pytest
from gs_quant.timeseries import USE_DISPLAY_NAME
from pandas.testing import assert_series_equal
from testfixtures import Replacer
from gs_quant.api.gs.data import MarketDataResponseFrame
from gs_quant.data import DataContext
from gs_quant.errors import MqError
from gs_quant.markets.securities import CommodityNaturalGasHub, Cross
from gs_quant import timeseries as tm
_test_datasets = ('TEST_DATASET',)
@pytest.mark.skipif(not USE_DISPLAY_NAME, reason="requires certain evnvar to run")
def test_forward_price():
# Tests for US NG assets
def mock_natgas_forward_price(_cls, _q, ignore_errors=False):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_natgas_forward_price)
mock = CommodityNaturalGasHub('MA001', 'AGT')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = pd.Series(tm.forward_price(mock, price_method='GDD', contract_range='F21'))
expected = pd.Series([2.880], index=[datetime.date(2019, 1, 2)], name='price')
assert_series_equal(expected, actual)
with pytest.raises(MqError):
tm.forward_price(Cross('MA002', 'USD/EUR'), price_method='GDD', contract_range='F21')
replace.restore()
if __name__ == "__main__":
pytest.main(args=["test_commod.py"])
|
[
"noreply@github.com"
] |
goldmansachs.noreply@github.com
|
670d6399b2a39e759e420646da725fcdb2bf9977
|
93ad9efb1576207fe53d2d46051088d507aaae82
|
/Day05/regex3.py
|
c5ba0a293b4d7611743cd30d28a303db3be3d274
|
[] |
no_license
|
akkiaakriti/FSDP_2019
|
0e18653122ef283b4fa917edea9b644db3132ff7
|
6768d57642e441377dfc01f18f73240caeb97b34
|
refs/heads/master
| 2020-06-04T04:51:24.274917
| 2019-06-14T07:08:35
| 2019-06-14T07:08:35
| 191,879,133
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 7 13:21:14 2019
@author: Aakriti
"""
card=[]
n=int(input("enter the no.of credit cards>>"))
for i in range(n):
st=input("enter the credit card no. \n")
card.append(st)
print(card)
import re
#check whether valid or not card
for num in card:
if re.search(r'^[456]+[0-9]{15}$',num):
print(True)
else:
print(False)
|
[
"aakritiya2001@gmail.com"
] |
aakritiya2001@gmail.com
|
97a4c72c7fcccc9e7f1ef779b3c122d0659d0abc
|
d37c6cb24324325ce54306bfe538c7582fae1dcd
|
/testapp/eventhandler/migrations/0004_remove_event_author.py
|
6b065e4d33e7b46290c814ba15688074b8462a0d
|
[] |
no_license
|
lazysuperturtle/webPortfolio
|
fa996e7e7179197b3fc203d081131442869d0a32
|
7e50fac890e7c3b03cf965b86e6aa46b90612c9d
|
refs/heads/master
| 2022-11-18T09:34:08.217802
| 2020-07-09T18:39:40
| 2020-07-09T18:39:40
| 278,443,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
# Generated by Django 2.2.7 on 2020-05-02 12:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('eventhandler', '0003_auto_20200502_1144'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='author',
),
]
|
[
"denisbereziuc@gmail.com"
] |
denisbereziuc@gmail.com
|
74f7f62634d459c2674276c9979443d8848b6d3c
|
54e5ef4f91e271ad8ae4582bac30257e52095aff
|
/leetcode/Compress_the_string.py
|
2bfcfd7e697601b2ca77385d055a82d1a50449a2
|
[] |
no_license
|
Rahuly-adav/leetcode
|
71e1fa638f689f45283e0022b34bbd58be4e6d5f
|
c2f345e3bd8353e01fc09ee1308f946dbc027c90
|
refs/heads/main
| 2023-02-25T10:42:36.189639
| 2021-02-02T13:28:56
| 2021-02-02T13:28:56
| 335,296,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
a="1213412346453876587654653425345343334343"
b=[]
i=0
while i<len(a):
co=1
d=1
for j in range(i+1,len(a)):
if a[i]==a[j]:
co+=1
d+=1
else:
break
b.append([a[i],co])
i+=d
print(b)
|
[
"noreply@github.com"
] |
Rahuly-adav.noreply@github.com
|
e59776c112e433c63c93c9b9c5ec31a490ebdcd0
|
6241a486ce4969187d9d1939bda063168b41927b
|
/SDL/occurance.py
|
d30cb93fe1ec49584f13b9a660e3cefc315f1fe5
|
[] |
no_license
|
AadityaDeshpande/TE-Assignments
|
0b9de8d691f26b6056360b8ce727ea0893bc628e
|
730480624d7a6952f2c23213c4f2711477e5eeaa
|
refs/heads/master
| 2022-03-07T15:12:06.603440
| 2019-09-25T13:34:51
| 2019-09-25T13:34:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
#input a word to search in a given string
para="this is a string to test the program hello hello is "
print(para)
para=para.split(" ")
count=0
z=input("Enter a word to search : ")
for i in para:
if(i==z):
count=count+1
print("***************************OUTPUT*****************************")
print("occurance of "+z+" is :")
print(count)
|
[
"noreply@github.com"
] |
AadityaDeshpande.noreply@github.com
|
cc3955239bdf5d9f39a4dce8866b9062fe174133
|
cbd5be8ec36d68dce7ebbfd78277ffe80bf0e50d
|
/fibo.py
|
e8eaed97ab1f5c13e3dd9caf1b2150da92af656f
|
[] |
no_license
|
Gaya3balu0509/program
|
56afa3451da71d7e434b3a1c10d22cef71e63acb
|
9b1c5c42b9f57368ecc2ee274e56d2ee094a58c8
|
refs/heads/master
| 2020-05-28T03:19:43.368488
| 2019-08-12T11:35:30
| 2019-08-12T11:35:30
| 188,865,443
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 116
|
py
|
numb1=int(input())
a,b=0,1
print(b,end=" ")
for i in range(1,numb1):
c=a+b
print(c,end=" ")
a=b
b=c
|
[
"noreply@github.com"
] |
Gaya3balu0509.noreply@github.com
|
3d770c7e452d235ccfc7f3f73b99e99c26666f20
|
a5ce0be27c22f3db1a9829c5c30142a7db21daeb
|
/Sifter2-master/Fargate/Orchestrator/register_task.py
|
1dddd3e6fcd3815006c0771eb5fc6d512781356f
|
[] |
no_license
|
naveen-10d/backend-shift
|
eaa6fa49d48c55ddb194bd99ba9d45014a2d4f3a
|
fc7b9fd57525b5752ceddefd9933e2feeff3655c
|
refs/heads/master
| 2020-03-29T16:29:49.833605
| 2018-09-24T14:34:04
| 2018-09-24T14:34:04
| 150,116,190
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
import boto3
client = boto3.client('ecs')
response = client.register_task_definition(
family='test',
networkMode= 'awsvpc',
cpu='256',
memory='512',
taskRoleArn='arn:aws:iam::967636366309:role/ecsExecutionRole',
executionRoleArn='arn:aws:iam::967636366309:role/ecsExecutionRole',
requiresCompatibilities=[
'FARGATE'
],
containerDefinitions=[
{
'name': 'testhttp',
'command': [
'sleep',
'360',
],
#'cpu': 256,
'essential': True,
"image": "967636366309.dkr.ecr.us-east-1.amazonaws.com/geppetto/httpd:latest",
#'memory': 512,
'portMappings': [
{
"hostPort": 80,
"protocol": "tcp",
"containerPort": 80
}
],
},
],
)
print(response)
#taskRoleArn='',
|
[
"navyn10d@gmail.com"
] |
navyn10d@gmail.com
|
c07ae0324a8d1e763a658f7ca5d998829eaab954
|
2ea690a9cd4560aeaaa78dd54a88cc1f1a28b6fc
|
/fooddelivery/dbinsert.py
|
5eff11bd717f0af16dbf31de1eb4eb02a4601793
|
[
"MIT"
] |
permissive
|
rasika-chavan/Food-Delivery-System
|
56efb6c7864ff364984773a7cf268cf2d9f4fd22
|
9306c49ab26973a8e209f976ce027ac7e675f7b3
|
refs/heads/master
| 2021-04-23T18:13:13.029676
| 2020-04-24T05:29:11
| 2020-04-24T05:29:11
| 249,962,433
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,426
|
py
|
import sqlite3
conn = sqlite3.connect('C:/Users/Admin/Desktop/foodizz/fooddelivery/site1.db')
print("DB connected sucessfully")
cursor = conn.cursor()
print("Connected to SQLite")
sqlite_insert_query = """insert into course(coid,catgry_id,coname)values(30,0,"Desserts") """
count=cursor.execute(sqlite_insert_query)
conn.commit()
print("Inserted sucessfully")
cursor.close()
if(conn):
conn.close()
print("Connection closed")
'''
def convertToBinaryData(filename):
#Convert digital data to binary format
with open(filename, 'rb') as file:
blobData = file.read()
return blobData
def insertBLOB(pid, name, cost, details, category_id,category_name, course_id, course_name, restro_id, image_file1,quantity,food_type,iaa,ibb,icc,idd,iee,iff):
try:
conn = sqlite3.connect('C:/Users/Admin/Desktop/Foodizz/fooddelivery/site1.db')
cursor = conn.cursor()
print("Connected to SQLite")
sqlite_insert_blob_query = """INSERT INTO product(pid, name, cost, details, category_id, category_name,course_id,course_name,restro_id, image_file1,quantity,food_type,iaa,ibb,icc,idd,iee,iff) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?,?,?,?,?,?,?,?)"""
if image_file1:
img1 = convertToBinaryData(image_file1)
else:
img1=None
# Convert data into tuple format
data_tuple = (pid, name, cost, details, category_id,category_name, course_id, course_name, restro_id, img1,quantity,food_type,iaa,ibb,icc,idd,iee,iff)
cursor.execute(sqlite_insert_blob_query, data_tuple)
conn.commit()
print("Image and file inserted successfully as a BLOB into a table")
cursor.close()
except sqlite3.Error as error:
print("Failed to insert blob data into sqlite table", error)
finally:
if (conn):
conn.close()
print("the sqlite connection is closed")
insertBLOB(630012,"WINE CAKE",365,
"Wine cake, known in Spanish as torta envinada, is a cake made with wine in Colombian cuisine.Torta negra Colombiana (Colombian black cake) and Bizcocho Negro are similar cakes with varying ingredients (raisins, candied fruit, and rum)",
6, "CAKE AND PASTRIES",30,"Desserts",00, "C:/Users/Admin/Desktop/Foodizz/fooddelivery/static/c12.jpg"
,"500 GM","Non Veg","Energy : 470 Cal",
"Proteins : 7g",
"Carbohydrates : 59g",
"Fat : 13g",
"Fiber : 0.5g",
"Cholestrol : 10mg"
)
'''
|
[
"shreyadj5@gmail.com"
] |
shreyadj5@gmail.com
|
2b09786d97063f6e780eec0529832c7d10ea7f7c
|
aa771dcaaa255c49c36a7d29d6321932c584134f
|
/transformer_lm.py
|
7c802066440e2c8b4b9da85cf831f5bb94c1862c
|
[] |
no_license
|
svwriting/Anytime-Auto-Regressive-Model
|
88a81860913fefe93e7cc90117ee563addee77b9
|
9ee5a833e3614a20d98b64243e902f7995b90e26
|
refs/heads/master
| 2023-06-13T04:52:29.056698
| 2021-06-23T01:37:24
| 2021-06-23T01:37:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,131
|
py
|
from fairseq import options, utils
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
Embedding,
TransformerDecoder,
)
from fairseq.modules import (
AdaptiveInput,
CharacterTokenEmbedder,
)
DEFAULT_MAX_TARGET_POSITIONS = 1024
#@register_model('transformer_lm')
class TransformerLanguageModel(FairseqLanguageModel):
@classmethod
def hub_models(cls):
def moses_fastbpe(path):
return {
'path': path,
'tokenizer': 'moses',
'bpe': 'fastbpe',
}
return {
'transformer_lm.gbw.adaptive_huge': 'https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_gbw_huge.tar.bz2',
'transformer_lm.wiki103.adaptive': 'https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_wiki103.tar.bz2',
'transformer_lm.wmt19.en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.en.tar.bz2'),
'transformer_lm.wmt19.de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.de.tar.bz2'),
'transformer_lm.wmt19.ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.ru.tar.bz2'),
}
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-output-dim', type=int, metavar='N',
help='decoder output dimension')
parser.add_argument('--decoder-input-dim', type=int, metavar='N',
help='decoder input dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--no-decoder-final-norm', action='store_true',
help='don\'t add an extra layernorm after the last decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
# args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
parser.add_argument('--layernorm-embedding', action='store_true',
help='add layernorm to embedding')
# fmt: on
@classmethod
def build_model(cls, args, vocab_size):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lm_architecture(args)
embed_tokens = Embedding(vocab_size, args.decoder_input_dim)
decoder = TransformerDecoder(
args, None, embed_tokens, no_encoder_attn=True,
)
return TransformerLanguageModel(decoder)
#@register_model_architecture('transformer_lm', 'transformer_lm')
def base_lm_architecture(args):
# backward compatibility for older model checkpoints
if hasattr(args, 'decoder_final_norm'):
args.no_decoder_final_norm = not args.decoder_final_norm
args.dropout = 0.1
args.attention_dropout = 0.0
args.activation_dropout = 0.0
args.decoder_embed_dim = 512
args.decoder_ffn_embed_dim = 2048
args.decoder_layers = 6
args.decoder_attention_heads = 8
args.decoder_learned_pos = False
args.activation_fn = 'relu'
args.add_bos_token = False
args.no_token_positional_embeddings = False
args.share_decoder_input_output_embed = True
args.decoder_output_dim = 512
args.decoder_input_dim = 512
# Model training is not stable without this
args.decoder_normalize_before = True
args.no_decoder_final_norm = False
args.no_scale_embedding = False
args.layernorm_embedding = False
args.max_target_positions = 100
|
[
"aaronxu@DNab421a90.stanford.edu"
] |
aaronxu@DNab421a90.stanford.edu
|
96cfb7c483f708319a7c00e984a8af71fdb83c5e
|
121e16003427287b14a93fe7c3e915715cb16ff8
|
/config.py
|
4c2e1f1158a15987fd0d28e9da87046685a4deb2
|
[] |
no_license
|
sandsmark/desman
|
6eac2ffb83259acbc45b78cefc1cf15c366d5790
|
cb138159772a238a1bae3ba02a357c504d23f1b5
|
refs/heads/master
| 2021-03-13T00:12:01.574273
| 2014-06-13T21:37:48
| 2014-06-13T21:37:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Configuration - the python way
------------------------------
Fuck.
"""
#### ####
# Generic Configuration #
# Path to where Lollercoaster is installed:
PATH = "/home/cassarossa/itk/sandsmark/prosjekter/lollercoaster"
# Key used to authenticate communication:
KEY = "L%Fdfffd¤T¤%&//¤#RQWERFWE%T har har har, I'm an incredib"
#### ####
# Tester configuration #
# Local tests to run:
LOCAL_TESTS = ('apache_local', 'ssh_local')
# Port for the tester to listen on:
TESTER_PORT = 65000
# Max. number of threads:
MAX_CHILDREN = 10
# Path to tests:
TESTS_PATH = PATH + "/tests/"
|
[
"martin.sandsmark@kde.org"
] |
martin.sandsmark@kde.org
|
8134fcc62254ea05f408dee156c66462980f38c1
|
0b0f29a6c02c800a3efe6c832438356df1faf39a
|
/tensorflow/lite/experimental/examples/lstm/unidirectional_sequence_rnn_test.py
|
6f9e2dd9498f03665b52e423db43ce38d5401eb1
|
[
"Apache-2.0"
] |
permissive
|
minminsun/tensorflow
|
aa6ad47307e72489836015b116630efea89303ce
|
0f2192d6439bf6826d71f2ca46dbe44d585883af
|
refs/heads/master
| 2021-05-10T11:54:07.972749
| 2019-01-31T08:15:57
| 2019-01-31T08:15:57
| 168,342,618
| 1
| 0
|
Apache-2.0
| 2019-01-30T14:49:22
| 2019-01-30T12:53:21
|
C++
|
UTF-8
|
Python
| false
| false
| 7,155
|
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow import flags
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.lite.experimental.examples.lstm.tflite_rnn import TfLiteRNNCell
from tensorflow.lite.python.op_hint import convert_op_hints_to_stubs
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.tools import optimize_for_inference_lib
FLAGS = flags.FLAGS
# Number of steps to train model.
TRAIN_STEPS = 1
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
class UnidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
def __init__(self, *args, **kwargs):
super(UnidirectionalSequenceRnnTest, self).__init__(*args, **kwargs)
# Define constants
# Unrolled through 28 time steps
self.time_steps = 28
# Rows of 28 pixels
self.n_input = 28
# Learning rate for Adam optimizer
self.learning_rate = 0.001
# MNIST is meant to be classified in 10 classes(0-9).
self.n_classes = 10
# Batch size
self.batch_size = 16
# Rnn Units.
self.num_units = 16
def setUp(self):
super(UnidirectionalSequenceRnnTest, self).setUp()
# Import MNIST dataset
data_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
self.mnist = input_data.read_data_sets(data_dir, one_hot=True)
def buildRnnLayer(self):
return tf.nn.rnn_cell.MultiRNNCell([
TfLiteRNNCell(self.num_units, name="rnn1"),
TfLiteRNNCell(self.num_units, name="rnn2")
])
def buildModel(self, rnn_layer):
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
# input image placeholder
x = tf.placeholder(
"float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")
# x is shaped [batch_size,time_steps,num_inputs]
rnn_input = tf.unstack(x, self.time_steps, 1)
outputs, _ = tf.nn.static_rnn(rnn_layer, rnn_input, dtype="float32")
# Compute logits by multiplying outputs[-1] of shape [batch_size,num_units]
# by the softmax layer's out_weight of shape [num_units,n_classes]
# plus out_bias
prediction = tf.matmul(outputs[-1], out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
# input label placeholder
y = tf.placeholder("float", [None, self.n_classes])
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Optimization
opt = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(loss)
# Initialize variables
sess.run(tf.global_variables_initializer())
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
def saveAndRestoreModel(self, rnn_layer, sess, saver):
"""Saves and restores the model to mimic the most common use case.
Args:
rnn_layer: The rnn layer either a single rnn cell or a multi rnn cell.
sess: Old session.
saver: saver created by tf.train.Saver()
Returns:
A tuple containing:
- Input tensor of the restored model.
- Prediction tensor of the restored model.
- Output tensor, which is the softwmax result of the prediction tensor.
- new session of the restored model.
"""
model_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
saver.save(sess, model_dir)
# Reset the graph.
tf.reset_default_graph()
x, prediction, output_class = self.buildModel(rnn_layer)
new_sess = tf.Session(config=CONFIG)
saver = tf.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
def getInferenceResult(self, x, output_class, sess):
b1, _ = self.mnist.train.next_batch(batch_size=1)
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})
frozen_graph = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, [output_class.op.name])
return sample_input, expected_output, frozen_graph
def tfliteInvoke(self, graph, test_inputs, outputs):
tf.reset_default_graph()
# Turn the input into placeholder of shape 1
tflite_input = tf.placeholder(
"float", [1, self.time_steps, self.n_input], name="INPUT_IMAGE_LITE")
tf.import_graph_def(graph, name="", input_map={"INPUT_IMAGE": tflite_input})
with tf.Session() as sess:
curr = sess.graph_def
curr = convert_op_hints_to_stubs(graph_def=curr)
curr = optimize_for_inference_lib.optimize_for_inference(
curr, ["INPUT_IMAGE_LITE"], ["OUTPUT_CLASS"],
[tf.float32.as_datatype_enum])
tflite = tf.lite.toco_convert(
curr, [tflite_input], [outputs], allow_custom_ops=False)
interpreter = tf.lite.Interpreter(model_content=tflite)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]["index"]
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = interpreter.get_output_details()[0]["index"]
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
interpreter.reset_all_variables()
return result
def testStaticRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(self.buildRnnLayer())
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(), sess, saver)
test_inputs, expected_output, frozen_graph = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(frozen_graph, test_inputs, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
if __name__ == "__main__":
test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
8d6fdd93b74d1e23f7bc5d19b6e87989d4225dd9
|
60a107093402d7a50b4a16d495d043b9b71757f9
|
/model_builder.py
|
b888cf3702cf12bcf1b809a839732b526572130e
|
[
"MIT"
] |
permissive
|
AtriSaxena/ssd-detection-app
|
9428b8b6fe9e7f500f224922cc08e63b660fe600
|
2662d93c8daf0ed2226c4d956037ce14e092d94f
|
refs/heads/master
| 2020-12-26T23:52:40.653368
| 2020-02-02T00:43:37
| 2020-02-02T00:43:37
| 237,694,205
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,080
|
py
|
# ssds part
#from lib.modeling.ssds import ssd
#from lib.modeling.ssds import ssd_lite
import rfb
#from lib.modeling.ssds import rfb_lite
#from lib.modeling.ssds import fssd
#from lib.modeling.ssds import fssd_lite
#from lib.modeling.ssds import yolo
ssds_map = {
'rfb': rfb.build_rfb,
}
# nets part
#from lib.modeling.nets import vgg
import resnet
#from lib.modeling.nets import mobilenet
#from lib.modeling.nets import darknet
networks_map = {
'resnet_18': resnet.resnet_18,
'resnet_34': resnet.resnet_34,
'resnet_50': resnet.resnet_50,
'resnet_101': resnet.resnet_101
}
from prior_box import PriorBox
import torch
def _forward_features_size(model, img_size):
model.eval()
x = torch.rand(1, 3, img_size[0], img_size[1])
x = torch.autograd.Variable(x, volatile=True) #.cuda()
feature_maps = model(x, phase='feature')
return [(o.size()[2], o.size()[3]) for o in feature_maps]
def create_model():
'''
'''
ASPECT_RATIOS = [[2,3], [2, 3], [2, 3], [2, 3], [2], [2]]
IMAGE_SIZE = [300, 300]
FEATURE_LAYER = [[22, 34, 'S', 'S', '', ''], [512, 1024, 512, 256, 256, 256]]
NUM_CLASSES = 21
SIZES = [0.2, 0.95]
STEPS = []
CLIP = True
#
base = networks_map['resnet_50']
number_box= [2*len(aspect_ratios) if isinstance(aspect_ratios[0], int) else len(aspect_ratios) for aspect_ratios in ASPECT_RATIOS]
model = ssds_map['rfb'](base=base, feature_layer=FEATURE_LAYER, mbox=number_box, num_classes= NUM_CLASSES)
#
print(model)
feature_maps = _forward_features_size(model, IMAGE_SIZE)
print('==>Feature map size:')
print(feature_maps)
#
priorbox = PriorBox(image_size=IMAGE_SIZE, feature_maps=feature_maps, aspect_ratios=ASPECT_RATIOS,
scale=SIZES, archor_stride=STEPS, clip=CLIP)
# priors = Variable(priorbox.forward(), volatile=True)
return model, priorbox
|
[
"noreply@github.com"
] |
AtriSaxena.noreply@github.com
|
14fc007e35c67323a39194333b4fc33d88ef3611
|
bcfb257c25fda93175613ec17d0261fa9d8cebfc
|
/DownWeb/OpenStack/apps.py
|
0d604a888fdd64fcf74a27c83af66aebd15f0b38
|
[] |
no_license
|
downtiser/DownWeb
|
cca02e532c6d584a6e4e33f694d9e034d526e660
|
3c5c86a971a0c79af6e61b9b4d45edb8ce350b82
|
refs/heads/master
| 2020-03-28T14:29:55.013783
| 2018-09-14T08:20:54
| 2018-09-14T08:20:54
| 148,492,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98
|
py
|
from django.apps import AppConfig
class OpenstackConfig(AppConfig):
name = 'OpenStack'
|
[
"noreply@github.com"
] |
downtiser.noreply@github.com
|
cc6bf435aa7d12ea3ebbb2f45ae5a21a4c5deb36
|
512ac468047cf3b8bf2580ac99c80fb8f0f938d3
|
/daemon/lib/raw_parsers/document_raw_parser.py
|
0a8a03ea4acfec3f846f1249c2f931b4d3644f66
|
[] |
no_license
|
secaglobal/Document-Generator
|
033b6d7a4eac7a761ba4209a16bf735592f2cc1e
|
bd80258cfd72703f126eedc06be53176e6168d27
|
refs/heads/master
| 2016-09-05T09:13:04.486702
| 2012-03-18T18:59:01
| 2012-03-18T18:59:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
import logging
import FileBroken from Exceptions
import JSONRawParser from json_raw_parser
DocumentRawParser(object):
"""Interface for parsing document raw files (pattern Bridge)"""
def __init__(self, filepath):
self.filepath = filepath
self.parser_impl = None
def __getParser(self):
"""Returns parser"""
if not self.parser_impl:
fp = file(self.filepth, 'r')
line = fp.readline()
if line == 'json0': self.parser_impl = JSONRawParser(fp)
else: raise FileBroken()
return self.parser_impl
|
[
"levandovskiy.s@gmail.com"
] |
levandovskiy.s@gmail.com
|
e937b8b73e73ecfc56fbada1397ff4fb0189da70
|
a925f08a313f3e74e02a1793db7679a37edf26ec
|
/homework, 2-1.py
|
8733dc0c8beb4c0abc18583de09ccd8bcaec5069
|
[] |
no_license
|
oynovichkova/python-learning
|
b1a5d476bdcc566bdf7b7d789eaa4ec78c96ea5a
|
a6727824fefa4c2f5cc6667da426549689263d26
|
refs/heads/master
| 2021-01-20T06:11:44.819623
| 2017-05-29T15:44:45
| 2017-05-29T15:44:45
| 89,849,716
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
#1. Написать функцию принимающую на вход число и
# возвращающую последовательность чисел Фибоначи до этого числа.
n=int(input())
from __future__ import division
import math
def fib(n):
SQRT5 = math.sqrt(5)
PHI = (SQRT5 + 1) / 2
return int(PHI ** n / SQRT5 + 0.5)
print(fib(n))
|
[
"noreply@github.com"
] |
oynovichkova.noreply@github.com
|
d9684aba570e5e88fb207cccb8de9c32dddb5451
|
3345eb032afa159a5b4b32fe0b62e4435dac523f
|
/dingtalk/api/rest/OapiEduFamilyChildGetRequest.py
|
42eedcee8bcf1a1220f5d83407a7417d041548e1
|
[] |
no_license
|
xududu/dingDingApi
|
ada6c17e5c6e25fd00bdc4b444171b99bc9ebad7
|
b1e7c384eb8fb7f79f6f5a6879faadfa95d3eda3
|
refs/heads/master
| 2023-08-31T03:04:48.438185
| 2021-10-13T08:52:13
| 2021-10-13T08:52:13
| 416,558,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
'''
Created by auto_sdk on 2021.01.20
'''
from dingtalk.api.base import RestApi
class OapiEduFamilyChildGetRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.child_userid = None
self.op_userid = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.edu.family.child.get'
|
[
"739893589@qq.com"
] |
739893589@qq.com
|
def48508b78949edd5005daa6c518427f898c0c1
|
0f2c8dcd9b54e1c2723c18f1207a9b3b668e20be
|
/hints.py
|
035607b5c5ac301348e7358568c9e634ca345cb9
|
[] |
no_license
|
DiWorm/py-selenium-learning
|
68b68e11caf4f167c71d22715664688d11a69894
|
ba3fccfbbaf8e4318a13b327ef5aff02be5c5f9a
|
refs/heads/master
| 2020-04-26T23:56:54.201935
| 2019-04-02T10:32:51
| 2019-04-02T10:32:51
| 173,919,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,535
|
py
|
#element exist?
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
def is_element_present(driver, *args):
try:
driver.find_element(*args)
return True
except NoSuchElementException:
return False
is_element_present(driver, By.name, "q")
###
def are_elements_present(driver, *args):
return len(driver.find_elements(*args)) > 0
are_elements_present(driver, By.name, "q")
####################################################################
#wait element
#настройка неявных ожиданий
driver.implicitly_wait(10)
element = driver.find_element_by_name("q")
#явное ожидание появления элемента
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
wait = WebDriverWait(driver, 10) # seconds
element = wait.until(EC.presence_of_element_located((By.NAME, "q")))
# обратите внимание, что локатор передается как tuple!
####################################################################
#find element by JS (Execute script) (nice wuth jquerry)
links = driver.execute_script("return $$('a:contains((WebDriver)')")
####################################################################
#send keys
from selenium.webdriver.common.keys import Keys
search_field.send_keys("selenium" + Keys.ENTER)
# если в поле есть маска -- надо перед вводом текста перейти в начало
date_field.send_keys(Keys.HOME + "01.01.2001")
####################################################################
#drag n drop
from selenium.webdriver.common.action_chains import ActionChains
ActionChains(driver).move_to_element(drag).click_and_hold().move_to_element(drop).release().perform()
####################################################################
#wait element
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
wait = WebDriverWait(driver, 10) # seconds
# обратите внимание, что локатор передается как tuple!
element = wait.until(EC.presence_of_element_located((By.NAME, "q")))
element2 = wait.until(lambda d: d.find_element_by_name("q"))
####################################################################
#refresh element
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
wait = WebDriverWait(driver, 10) # seconds
driver.refresh()
wait.until(EC.staleness_of(element))
####################################################################
#wait visible
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
wait = WebDriverWait(driver, 10) # seconds
wait.until(EC.visibility_of(element))
####################################################################
#dismiss alerts
alert = driver.switch_to_alert()
alert_text = alert.text
alert.accept()
# либо alert.dismiss()
####################################################################
#new tabs
main_window = driver.current_window_handle
old_windows = driver.window_handles
link.click() # открывает новое окно
# ожидание появления нового окна,
# идентификатор которого отсутствует в списке oldWindows,
# остаётся в качестве самостоятельного упражнения
new_window = wait.until(there_is_window_other_than(old_windows))
driver.switch_to_window(new_window)
# ...
driver.close()
driver.switch_to_window(main_window)
####################################################################
#switch to frame
driver.switch_to_frame(driver.find_element_by_tag_name("iframe"))
driver.switch_to_default_content()
####################################################################
#some shit with windows
driver.set_window_size(800, 600)
driver.maximize_window()
####################################################################
#work with server
driver = webdriver.Remote("http://localhost:4444/wd/hub", desired_capabilities={"browserName": "chrome"})
####################################################################
##event finding c#
from selenium.webdriver.support.events import EventFiringWebDriver, AbstractEventListener
class MyListener(AbstractEventListener):
def before_find(self, by, value, driver):
print(by, value)
def after_find(self, by, value, driver):
print(by, value, "found")
def on_exception(self, exception, driver):
print(exception)
wd = EventFiringWebDriver(webdriver.Chrome(), MyListener())
####################################################################
#get screenshoot
driver.get_screenshot_as_file('screen.png')
####################################################################
#get browser logs
for l in driver.get_log("browser"):
print(l)
####################################################################
#proxy traffic
driver = webdriver.Chrome(desired_capabilities={"proxy": {"proxyType": "MANUAL", "httpProxy": "localhost:8888"}})
####################################################################
'''
BrowserMobProxy (Java):
https://github.com/lightbody/browsermob-proxy
BrowserMobProxy, обёртка для C#:
https://github.com/AutomatedTester/AutomatedTester.BrowserMob
Пример использования:
http://automatedtester.science/load-testing-with-selenium-and-browsermob-proxy/
Ещё одна обёртка для C#:
https://github.com/tmulkern/RemoteBrowserMobProxy
BrowserMobProxy, обёртка для Python:
https://github.com/AutomatedTester/browsermob-proxy-py
BrowserMobProxy, обёртка для Ruby:
https://github.com/jarib/browsermob-proxy-rb
BrowserMobProxy, обёртка для JavaScript:
https://github.com/zzo/browsermob-node
Что можно делать при помощи прокси:
http://selenium2.ru/articles/106-selenium-i-browsermobproxy-vmeste-veselee.html
Titanium: встраиваемый прокси на .Net:
https://github.com/justcoding121/Titanium-Web-Proxy
FiddlerCore: встраиваемый прокси на .Net:
http://www.telerik.com/fiddler/fiddlercore
mitmproxy: встраиваемый прокси на Python:
https://mitmproxy.org/
'''
|
[
"dyworm@gmail.com"
] |
dyworm@gmail.com
|
99324ee02481ac62b51faf12124967285ab469b3
|
f516b7561b93f640bcb376766a7ecc3440dcbb99
|
/contests/codeforces/ed-59-div-2-rated/a.py
|
72f069f62c4b5f6ac44f9b6d8c8c6372abc9c36c
|
[
"Apache-2.0"
] |
permissive
|
vtemian/interviews-prep
|
c41e1399cdaac9653c76d09598612f7450e6d302
|
ddef96b5ecc699a590376a892a804c143fe18034
|
refs/heads/master
| 2020-04-30T15:44:42.116286
| 2019-09-10T19:41:41
| 2019-09-10T19:41:41
| 176,928,167
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
n = int(input())
def solve():
no_digits = int(input())
seq = input()
if len(seq) < 2:
print('NO')
return
first = seq[0]
if not int(first) < int(seq[1:]):
print('NO')
return
print('YES')
print('2')
print(first, seq[1:])
while n:
solve()
n -= 1
|
[
"vladtemian@gmail.com"
] |
vladtemian@gmail.com
|
38740b98576b55d5cd8c85e332533a2421918824
|
c24ad19b65992dd2be3d3210b889d970e43b9cdc
|
/class/phase1/day14/exercise01.py
|
8005be1464a334e50856aaca74d8b3cb1914041a
|
[] |
no_license
|
ivoryli/myproject
|
23f39449a0bd23abcc3058c08149cebbfd787d12
|
cebfa2594198060d3f8f439c971864e0639bbf7e
|
refs/heads/master
| 2020-05-30T06:25:41.345645
| 2019-05-31T11:15:55
| 2019-05-31T11:15:55
| 189,578,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
def get_score():
while True:
try:
n = int(input("请输入成绩:"))
except ValueError:
print("输入有误")
continue
if 0 < n < 101:
return n
print("成绩不在范围内")
get_score()
|
[
"2712455490@qq.com"
] |
2712455490@qq.com
|
117b993bbae896721a3b122edc3855657b696d81
|
85cb87ea9f1faa780b14ca5acbc8ebd96feb498d
|
/leetcode_python/pic45.py
|
b26ce3b880a2fa3622c1d080df6b04161de06a74
|
[] |
no_license
|
Bxyz/learnPython
|
0225f0a70297eff68b1d77a447e95a35c24ba7f0
|
dfd698ed8237b47b0162d4840184c5d7c22f7c6c
|
refs/heads/master
| 2021-05-22T16:55:13.362339
| 2018-10-22T04:33:08
| 2018-10-22T04:33:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88
|
py
|
a = 77
b = a & 3
print(b)
b = a & 0
print(b)
#按位与:0&0=0; 0&1=0; 1&0=0; 1&1=1。
|
[
"645112728@qq.com"
] |
645112728@qq.com
|
e65a43c45593bea9823add36d55d59cbf9ee83a9
|
ae7849ced646132c235f1244856ef8f08108a294
|
/4sqdungeoncrawl.com/passenger_wsgi.py
|
0c744ef715fffe524428fe5f7b65e3fff81f80c9
|
[] |
no_license
|
sarvenna/dungeoncrawl
|
81726547c6e78846a9cf4fd2fa7dec7ba76137d5
|
f5849a849fce90297efe57a1b6d9adcb7b98170e
|
refs/heads/master
| 2021-01-10T00:54:42.031590
| 2011-02-20T01:12:47
| 2011-02-20T01:12:47
| 1,386,708
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
import sys, os
sys.path.append(os.getcwd())
os.environ['DJANGO_SETTINGS_MODULE'] = "dungeoncrawl.settings"
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
|
[
"dungeoncrawl@alcor.dreamhost.com"
] |
dungeoncrawl@alcor.dreamhost.com
|
ff8eaaceca06ce5bda3a8176aefdf72bb1f77635
|
f42a10b970c4e0c91f5430ce206297fa0f54f451
|
/application.py
|
3a0c0efb22f1b9df35f296b4b9fd08dd212a3831
|
[] |
no_license
|
williamssanders/generateMappings
|
1914cbbcb09e206d63690ac4315dc34f98351dff
|
22f11bb401a80f545d26174ab75112732970d924
|
refs/heads/master
| 2021-01-06T00:47:44.379656
| 2020-02-17T18:53:07
| 2020-02-17T18:53:07
| 241,181,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,768
|
py
|
class Application:
def __init__(self,key,name,rate,perturbedRate,computeName,mapping):
self.key = key
self.name = name
self.rate = rate
self.perturbedRate = perturbedRate
self.computeName = computeName
self.mapping = mapping
def get_key(self):
return self.key
def set_key(self,key):
self.key = key
def get_name(self):
return self.name
def set_name(self,name):
self.name = name
def get_rate(self):
return self.rate
def set_rate(self,rate):
self.rate = rate
def get_perturbedRate(self):
return self.perturbedRate
def set_perturbedRate(self,perturbedRate):
self.perturbedRate = perturbedRate
def get_computeName(self):
return self.computeName
def set_computeName(self,computeName):
self.computeName = computeName
def get_mapping(self):
return self.mapping
def set_mapping(self,mapping):
self.mapping = mapping
def print_values(self):
#print(str(self.key) + "," + self.name + "," + str(self.rate) + "," + str(self.perturbedRate) + "," + self.computeName + "," + self.mapping)
return(str(self.key) + "," + self.name + "," + str(self.rate) + "," + str(self.perturbedRate) + "," + self.computeName + "," + self.mapping)
def definition(self):
#print(self.name + " = (" + self.computeName + ", infty)." + self.name + ";")
return(self.name + " = (" + self.computeName + ", infty)." + self.name + ";")
def string_rate(self):
return("r" + str(self.key) + " = " + str(self.rate) + ";")
def string_perturbedRate(self):
return("p" + str(self.key) + " = " + str(self.perturbedRate) + ";")
|
[
"william.s.sanders@gmail.com"
] |
william.s.sanders@gmail.com
|
bb9f0c7c0157706622f6063db74fe839cbfb853b
|
7c80dad63f56f7c83fd9d1d970804da2c8c79a9c
|
/apps/blog/templatetags/my_func.py
|
397f876f9dd3dec69a3c01ee3948461962b09503
|
[] |
no_license
|
zzy0371/Django-demo1
|
d7115ff9b236f9b21e12e2f259e850b28b905fd0
|
25f32035d38a69681a3eafb02fda1b46436b16f5
|
refs/heads/master
| 2022-12-07T18:33:53.805005
| 2019-08-23T02:30:18
| 2019-08-23T02:30:18
| 203,908,229
| 0
| 0
| null | 2022-11-22T04:12:26
| 2019-08-23T02:23:24
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 818
|
py
|
"""
自定义模板表达式
扩展Django原有功能
"""
from django.template import library
register = library.Library()
from blog.models import Article,Category,Tag,Ads
@register.simple_tag
def getads():
return Ads.objects.all()
@register.simple_tag
def getlatestarticles(num=3):
return Article.objects.order_by("-create_time")[:num]
@register.simple_tag
def gettimes():
times = Article.objects.dates("create_time","month","DESC")
return times
@register.simple_tag
def getcategorys():
return Category.objects.all()
@register.simple_tag
def gettags():
return Tag.objects.all()
@register.filter
def mylower(value):
return value.lower()
@register.filter
def myjoin(value,spl):
return spl.join(value)
@register.filter
def myjoin2(value,spl,s):
return spl.join(value) + s
|
[
"496575233@qq.com"
] |
496575233@qq.com
|
26dcbfa26bb2658c145592501ec56fe06211e560
|
340fd9a6345a1fd6ea5c494fd1e25ef1a15cd83d
|
/my_blog/article/migrations/0002_articlepost_author.py
|
4e223e50fc8e55a2de6e430d3361f5dab52f13d0
|
[] |
no_license
|
ltfred/blog
|
48024992922d570db7042d56855f6c28b7c581f8
|
9e7c9253a4576cbc443edea8fdebed92733d6f9f
|
refs/heads/master
| 2020-07-26T15:24:24.215702
| 2019-09-23T02:18:55
| 2019-09-23T02:18:55
| 208,689,787
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-09-16 06:07
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('article', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='articlepost',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='文章作者'),
),
]
|
[
"liutao@Freddembp.local"
] |
liutao@Freddembp.local
|
d7e3a9a23c6de743ce2aa6d5ceebc803276c5541
|
50be0c9ffbb8e019c723add6447efcfc70e9b250
|
/faceNet/get_emb_serving/pre_whiten.py
|
b470dfae122d90cd70196c941c8ff349c9a2d721
|
[] |
no_license
|
wkkyle/based_on_facenet
|
4df2fdcd20c83fe9a8e77e8f493794587fe962e0
|
1f9cb914d86934afd24697342e488b068bb813dc
|
refs/heads/master
| 2020-06-12T16:02:08.065407
| 2019-06-28T08:46:36
| 2019-06-28T08:46:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,475
|
py
|
import numpy as np
from scipy import misc
import os
import cv2
def load_data(image_paths, image_size):
images = np.zeros((image_size, image_size, 3))
img = misc.imread(image_paths)
# print('pre-wihten', img)
if img.ndim == 2:
img = to_rgb(img)
img = prewhiten(img)
images[:,:,:] = img
images = misc.imresize(images,(160, 160))
# print('after-wihter', images)
return images
def prewhiten(x):
mean = np.mean(x)
std = np.std(x)
std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
y = np.multiply(np.subtract(x, mean), 1/std_adj)
return y
def to_rgb(img):
w, h = img.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
return ret
def main():
train_data_dir = 'F:/baidu_crop'
folders = os.listdir(train_data_dir)
for i in folders:
for j in os.listdir(os.path.join(train_data_dir, i)):
img_path = train_data_dir + '/' + i + '/' + j
images = load_data(img_path, 182)
dst_img_dir = train_data_dir + '_wihten_160/' + i
if not os.path.exists(dst_img_dir):
os.makedirs(dst_img_dir)
dst_img_path = dst_img_dir + '/' + j
misc.imsave(dst_img_path, images)
# cv2.imwrite(dst_img_path, images.convertTo(images, CV_8UC3, 255.0))
# cv2.imshow('prewihten', images)
# cv2.waitKey()
if __name__ == '__main__':
main()
|
[
"zhushuaisjtu@qq.com"
] |
zhushuaisjtu@qq.com
|
07ff42ba2fc6571a35e448bad26b012e1fccf8d7
|
9a70f9d53466db9bd6c772505a7084668ad16a2c
|
/fbone/config.py
|
c497c0f25e0c341482262071058190b1d1ea2da9
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
butfriendly/flaskbone
|
14a27a52a83e094196cddb76190804e7c514836e
|
b3e0316eb53892423e2b5f24a2627184ba2bcb91
|
refs/heads/master
| 2021-01-18T12:06:32.536036
| 2012-12-06T09:33:06
| 2012-12-06T09:33:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,426
|
py
|
# -*- coding: utf-8 -*-
PROJECT = "fbone"
class BaseConfig(object):
DEBUG = False
TESTING = False
# os.urandom(24)
SECRET_KEY = 'secret key'
class DefaultConfig(BaseConfig):
DEBUG = True
SQLALCHEMY_ECHO = True
# Sqlite
# Use a tmp database, change to anywhere to suit yourself.
SQLALCHEMY_DATABASE_URI = 'sqlite:////tmp/%s.sqlite' % PROJECT
# Mysql:
#SQLALCHEMY_DATABASE_URI = 'mysql://dbusername:dbpassword@dbhost/dbname'
# To create log folder.
# $ sudo mkdir -p /var/log/<PROJECT>
# $ sudo chown $USER /var/log/<PROJECT>
DEBUG_LOG = '/var/log/%s/debug.log' % PROJECT
ACCEPT_LANGUAGES = ['zh']
BABEL_DEFAULT_LOCALE = 'en'
CACHE_TYPE = 'simple'
CACHE_DEFAULT_TIMEOUT = 60
# Should be imported from env var.
# https://bitbucket.org/danjac/flask-mail/issue/3/problem-with-gmails-smtp-server
MAIL_DEBUG = DEBUG
MAIL_SERVER = 'smtp.gmail.com'
MAIL_USE_TLS = True
MAIL_USE_SSL = False
MAIL_USERNAME = 'gmail_username'
MAIL_PASSWORD = 'gmail_password'
DEFAULT_MAIL_SENDER = '%s@gmail.com' % MAIL_USERNAME
# Should be imported from env var.
# export FBONE_APP_CONFIG=/home/wilson/.fbone.cfg
USER_IMG_UPLOAD_PATH = "/path/to/fbone/static/img/users"
class TestConfig(BaseConfig):
TESTING = True
CSRF_ENABLED = False
SQLALCHEMY_ECHO = False
SQLALCHEMY_DATABASE_URI = 'sqlite://'
|
[
"im.wilson.xu@gmail.com"
] |
im.wilson.xu@gmail.com
|
198c93de09e3177371ddbe742a4b5e083ea10b82
|
9b58789397b939438d6016a13334b32de58f458f
|
/psdaq/psdaq/control_gui/CGWMainTabExpert.py
|
1af1af7b97771df392a36211575f77930c5ed68b
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
AntoineDujardin/lcls2
|
8a07e73ebf2d583e6a3ff518147b1908cce9e17b
|
8b9d2815497fbbabb4d37800fd86a7be1728b552
|
refs/heads/master
| 2020-07-07T19:22:05.176926
| 2019-08-20T20:46:52
| 2019-08-20T20:48:46
| 203,452,948
| 0
| 0
|
NOASSERTION
| 2019-08-20T20:53:56
| 2019-08-20T20:53:55
| null |
UTF-8
|
Python
| false
| false
| 3,826
|
py
|
"""
Class :py:class:`CGWMainTabExpert` is a QWidget for interactive image
=======================================================================
Usage ::
import sys
from PyQt5.QtWidgets import QApplication
from psdaq.control_gui.CGWMainTabExpert import CGWMainTabExpert
app = QApplication(sys.argv)
w = CGWMainTabExpert(None, app)
w.show()
app.exec_()
See:
- :class:`CGWMainTabExpert`
- :class:`CGWMainPartition`
- `lcls2 on github <https://github.com/slac-lcls/lcls2/psdaq/psdaq/control_gui>`_.
Created on 2019-05-07 by Mikhail Dubrovin
"""
#------------------------------
import logging
logger = logging.getLogger(__name__)
#------------------------------
from time import time
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QVBoxLayout, QSplitter, QTextEdit, QSizePolicy
from PyQt5.QtCore import Qt, QSize
from psdaq.control_gui.CGWMainPartition import CGWMainPartition
from psdaq.control_gui.CGWMainControl import CGWMainControl
#------------------------------
class CGWMainTabExpert(QWidget) :
_name = 'CGWMainTabExpert'
def __init__(self, **kwargs) :
parent = kwargs.get('parent', None)
parent_ctrl = kwargs.get('parent_ctrl', None)
QWidget.__init__(self, parent=None)
logger.debug('In %s' % self._name)
self.wpart = CGWMainPartition()
parent_ctrl.wpart = self.wpart
parent_ctrl.wcoll = self.wpart.wcoll
self.wctrl = CGWMainControl(parent, parent_ctrl)
parent_ctrl.wctrl = self.wctrl
#self.wpart = QTextEdit('Txt 1')
#self.wctrl = QTextEdit('Txt 2')
self.vspl = QSplitter(Qt.Vertical)
self.vspl.addWidget(self.wpart)
self.vspl.addWidget(self.wctrl)
self.mbox = QHBoxLayout()
self.mbox.addWidget(self.vspl)
self.setLayout(self.mbox)
self.set_style()
#------------------------------
def set_tool_tips(self) :
pass
#self.butStop.setToolTip('Not implemented yet...')
#--------------------
def sizeHint(self):
return QSize(300, 280)
#--------------------
def set_style(self) :
self.setMinimumSize(280, 260)
self.layout().setContentsMargins(0,0,0,0)
self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Preferred)
#--------------------
def closeEvent(self, e) :
logger.debug('%s.closeEvent' % self._name)
try :
pass
#self.wpart.close()
#self.wctrl.close()
except Exception as ex:
print('Exception: %s' % ex)
#--------------------
if __name__ == "__main__" :
def resizeEvent(self, e):
#logger.debug('resizeEvent', self._name)
print('CGWMainTabExpert.resizeEvent: %s' % str(self.size()))
#def moveEvent(self, e) :
#logger.debug('moveEvent', self._name)
#self.position = self.mapToGlobal(self.pos())
#self.position = self.pos()
#logger.debug('moveEvent - pos:' + str(self.position), __name__)
#logger.info('CGWMainTabExpert.moveEvent - move window to x,y: ', str(self.mapToGlobal(QPoint(0,0))))
#self.wimg.move(self.pos() + QPoint(self.width()+5, 0))
#pass
#--------------------
if __name__ == "__main__" :
from psdaq.control_gui.CGDaqControl import daq_control, DaqControlEmulator, Emulator
daq_control.set_daq_control(DaqControlEmulator())
import sys
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG)
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
kwargs = {'parent':None, 'parent_ctrl':Emulator()}
w = CGWMainTabExpert(**kwargs)
w.show()
app.exec_()
del w
del app
#------------------------------
|
[
"msdubrovin@gmail.com"
] |
msdubrovin@gmail.com
|
2c994000c850714168262903315cb27d77c69372
|
144cfa74610b439d0fdb867b72f4bba82990e86f
|
/Ejercicios Realizados/librerias/datetime_ejercicio_tres.py
|
040ae641cae57658b17893714aecfbb3bb3e436b
|
[] |
no_license
|
rbo93/mi_primer_programa
|
2d2ea089f5cabbb491fc9ea38f1c711d15caaf4f
|
1f65b9dea507696944975c829e17d5b4bfde79ba
|
refs/heads/master
| 2020-07-31T07:13:18.148156
| 2019-10-28T04:15:22
| 2019-10-28T04:15:22
| 210,527,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
"""
Ejercicio 3
Crea un programa que te diga, introduciendo cualquier fecha, cuantas horas han pasado desde ese momento.
"""
import datetime
hoy = datetime.datetime.now()
print("Fecha actual {} {}:{}".format(hoy.strftime("%d-%m-%y"), hoy.hour, hoy.minute))
print("Dime una fecha y te dire cuantas horas han pasado de ese momento")
day = int(input("Dime el dia: "))
month = int(input("Dime el mes: "))
year = int(input("Dime el año: "))
hour = hoy.hour
user_date = datetime.datetime(year=year, month=month, day=day, hour=hour)
if year <= hoy.year:
if month < hoy.month or day < hoy.day:
time_calculated = datetime.datetime.now() - user_date
print("Han pasado {} horas hasta llegar a hoy: {}".format(int(time_calculated.total_seconds() / 3600),
hoy.strftime("%d-%m-%y")))
else:
time_calculated = user_date - datetime.datetime.now()
print("Recien vuelvo del futuru y te informo que faltan {} horas para que se produzca la fecha indicada.".format(
int(time_calculated.total_seconds() / 3600)))
else:
time_calculated = user_date - datetime.datetime.now()
print("Recien vuelvo del futuru y te informo que faltan {} horas para que se produzca la fecha indicada.".format(int(time_calculated.total_seconds() / 3600)))
|
[
"rbo.93@hotmail.com"
] |
rbo.93@hotmail.com
|
b1055682fe506ed63bbfba2ce4bc1c423a6661f1
|
35825218a8e032dc44abd216788be92f8c518d1c
|
/event_extraction/model.py
|
61944fa2a3387b07f35e09d7c3a8e94d0810b5d7
|
[] |
no_license
|
qfzxhy/EventExtraction_system
|
31deca268e1d05eeee0ebaa3656762803bee9558
|
ad03d24a030ac6ea2467cf1d9d5a97621fa98399
|
refs/heads/master
| 2020-03-19T01:05:01.160144
| 2018-05-31T02:32:30
| 2018-05-31T02:32:30
| 135,521,338
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 41,293
|
py
|
# -*- coding:utf-8 -*-
_author_ = 'qianf'
import helper
import tensorflow as tf
import codecs
# from tensorflow.models.rnn import rnn
import math
import os
import numpy as np
import heapq
import MDAtt as mdatt
# from voc import Tri_Tag_Index
# tri_num_class = 4
# print(tri_num_class)
def bidirectional_LSTM(input, hidden_state_dimension, initializer, sequence_length=None, output_sequence=True):
with tf.variable_scope("bidirectional_LSTM"):
if sequence_length == None:
batch_size = 1
sequence_length = tf.shape(input)[1]
sequence_length = tf.expand_dims(sequence_length, axis=0, name='sequence_length')
else:
batch_size = tf.shape(sequence_length)[0]
lstm_cell = {}
initial_state = {}
for direction in ["forward", "backward"]:
with tf.variable_scope(direction):
# LSTM cell
lstm_cell[direction] = tf.contrib.rnn.CoupledInputForgetGateLSTMCell(hidden_state_dimension,
forget_bias=1.0,
initializer=initializer,
state_is_tuple=True)
# initial state: http://stackoverflow.com/questions/38441589/tensorflow-rnn-initial-state
initial_cell_state = tf.get_variable("initial_cell_state", shape=[1, hidden_state_dimension],
dtype=tf.float32, initializer=initializer)
initial_output_state = tf.get_variable("initial_output_state", shape=[1, hidden_state_dimension],
dtype=tf.float32, initializer=initializer)
c_states = tf.tile(initial_cell_state, tf.stack([batch_size, 1]))
h_states = tf.tile(initial_output_state, tf.stack([batch_size, 1]))
initial_state[direction] = tf.contrib.rnn.LSTMStateTuple(c_states, h_states)
# sequence_length must be provided for tf.nn.bidirectional_dynamic_rnn due to internal bug
outputs, final_states = tf.nn.bidirectional_dynamic_rnn(lstm_cell["forward"],
lstm_cell["backward"],
input,
dtype=tf.float32,
sequence_length=sequence_length,
initial_state_fw=initial_state["forward"],
initial_state_bw=initial_state["backward"])
if output_sequence == True:
outputs_forward, outputs_backward = outputs
output = tf.concat([outputs_forward, outputs_backward], axis=2, name='output_sequence')
else:
final_states_forward, final_states_backward = final_states
output = tf.concat([final_states_forward[1], final_states_backward[1]], axis=1, name='output')
return output
class BILSTM(object):
def __init__(self, FLAGs):
self.FLAGS = FLAGs
# self.patients = FLAGs.patients
self.task1_num_classess = FLAGs.task1_num_class
self.task2_num_classess = FLAGs.task2_num_class
self.num_corpus = FLAGs.num_corpus
self.num_words = FLAGs.num_word
self.token_emb_dim = FLAGs.token_edim
self.num_postags = FLAGs.num_postag
self.postag_emb_dim = FLAGs.postag_emb_dim
self.num_hiddens = FLAGs.num_hidden
self.token_max_len = FLAGs.token_max_len
self.num_layers = 1
self.pretrain_emb = FLAGs.pretrain_emb
# self.is_crf = FLAGs.use_crf
self.num_units = 100
self.if_l2 = False
self.learning_rate = FLAGs.init_lr
self.max_f1 = -1.0
# self.sess = sess
self.sou_global_step = tf.Variable(0, name='sou_global_step', trainable=False)
self.tri_global_step = tf.Variable(0, name='tri_global_step', trainable=False)
self.batch_size = tf.placeholder(tf.int32, name='batch_size')
self.input_token_indices = tf.placeholder(tf.int32, [None, None], name="input_token_indices")
self.input_token_character_indices = tf.placeholder(tf.int32, [None, None, self.token_max_len],name="input_token_character_indices")
self.input_postag_indices = tf.placeholder(tf.int32, [None, None], name="input_postag_indices")
self.input_suffix_indices = tf.placeholder(tf.int32, [None, None], name="input_suffix_indices")
self.y_entitys = tf.placeholder(tf.int32, [None, None], name='y_entitys')
# two class
self.y_entitys_tc = tf.placeholder(tf.int32, [None, None], name='y_entitys_tc')
# self.y_targets = tf.placeholder(tf.int32, [None, None], name='y_targets')
# #two class
# self.y_targets_tc = tf.placeholder(tf.int32, [None, None], name='y_targets_tc')
self.y_triggers = tf.placeholder(tf.int32, [None, None], name='y_triggers')
self.y_triggers_tc = tf.placeholder(tf.int32, [None, None], name='y_triggers_tc')
self.keep_dropout = tf.placeholder(dtype=tf.float32, name='keep_dropout')
self.initializer = tf.contrib.layers.xavier_initializer(seed=0)
self.length = tf.reduce_sum(tf.sign(self.input_token_indices), 1)
self.length = tf.to_int32(self.length)
max_seq_len = tf.shape(self.input_token_indices)[1]
#
def _entity_model(input_data, seq_len):
# batchsize, step, dim
# print(input_data.get_shape()[-1].value)
with tf.variable_scope("bilstm_layer_1"):
lstm_outputs_flat = bidirectional_LSTM(input_data, self.num_hiddens, self.initializer, sequence_length=seq_len, output_sequence=True)
# batchsize*step , 2*dim
lstm_outputs = tf.reshape(lstm_outputs_flat, shape=[-1, lstm_outputs_flat.get_shape()[-1].value])
with tf.variable_scope("feedforward_tc"):
scores_tc = _common_layer(lstm_outputs, 2)
with tf.variable_scope("bilstm_layer_2"):
lstm_outputs = bidirectional_LSTM(lstm_outputs_flat, self.num_hiddens, self.initializer, sequence_length=seq_len, output_sequence=True)
return lstm_outputs,scores_tc
def _trigger_model(input_data, seq_len):
# batchsize, step, dim
# print(input_data.get_shape()[-1].value)
with tf.variable_scope("bilstm_layer_1"):
lstm_outputs_flat = bidirectional_LSTM(input_data, self.num_hiddens, self.initializer, sequence_length=seq_len, output_sequence=True)
# batchsize*step , 2*dim
lstm_outputs = tf.reshape(lstm_outputs_flat, shape=[-1, lstm_outputs_flat.get_shape()[-1].value])
with tf.variable_scope("feedforward_tc"):
scores_tc = _common_layer(lstm_outputs, 2)
with tf.variable_scope("bilstm_layer_2"):
lstm_outputs = bidirectional_LSTM(lstm_outputs_flat, self.num_hiddens, self.initializer, sequence_length=seq_len, output_sequence=True)
return lstm_outputs,scores_tc
def _target_model(input_data, seq_len):
# if step:
# input_data = tf.concat([input_data, share_outputs], axis=-1)
with tf.variable_scope("bilstm_layer_1"):
lstm_outputs_flat = bidirectional_LSTM(input_data, self.num_hiddens, self.initializer, sequence_length=seq_len, output_sequence=True)
lstm_outputs = tf.reshape(lstm_outputs_flat, shape=[-1, lstm_outputs_flat.get_shape()[-1].value])
with tf.variable_scope("feedforward_tc"):
scores_tc = _common_layer(lstm_outputs, 2)
with tf.variable_scope("bilstm_layer_2"):
lstm_outputs = bidirectional_LSTM(lstm_outputs_flat, self.num_hiddens, self.initializer, sequence_length=seq_len, output_sequence=True)
return lstm_outputs,scores_tc
def _common_layer(input_data, output_size, activity=None):
# print(input_data.get_shape().as_list()[-1])
W = tf.get_variable(
"W",
shape=[input_data.get_shape().as_list()[-1], output_size],
initializer=self.initializer)
b = tf.Variable(tf.constant(0.0, shape=[output_size]), name="bias")
# batchsize*step, 1 * hiddens
outputs = tf.nn.xw_plus_b(input_data, W, b)
if activity is not None:
outputs = activity(outputs, name="activity")
return outputs
def _no_interact_layer(lstm_outputs_ent,lstm_outputs_tri):
lstm_outputs_ent = tf.reshape(lstm_outputs_ent,shape=[-1, lstm_outputs_ent.get_shape()[-1].value])
lstm_outputs_tri = tf.reshape(lstm_outputs_tri, shape=[-1, lstm_outputs_tri.get_shape()[-1].value])
# lstm_outputs_tar = tf.reshape(lstm_outputs_tar,shape=[-1, lstm_outputs_tar.get_shape()[-1].value])
with tf.variable_scope("source_feedforward_after_lstm"):
outputs_sou = _common_layer(lstm_outputs_ent, 2 * self.num_hiddens, activity=tf.nn.tanh)
with tf.variable_scope("trigger_feedforward_after_lstm"):
outputs_tri = _common_layer(lstm_outputs_tri, 2 * self.num_hiddens, activity=tf.nn.tanh)
# with tf.variable_scope("target_feedforward_after_lstm"):
# outputs_tar = _common_layer(lstm_outputs_tar, 2 * self.num_hiddens, activity=tf.nn.tanh)
return outputs_sou,outputs_tri
def _interact_layer(lstm_outputs_ent,lstm_outputs_tri,sou_prob,tri_prob):
with tf.variable_scope('interact'):
left_values_ent, right_values_ent, _ = _compute_interact_(lstm_outputs_ent, sou_prob)
left_values_tri, right_values_tri, _ = _compute_interact_(lstm_outputs_tri, tri_prob)
# left_values_tar, right_values_tar, _ = _compute_interact_(lstm_outputs_tar, tar_prob)
lstm_outputs_ent_new = tf.concat([lstm_outputs_ent, left_values_tri, right_values_tri], axis=-1)
# lstm_outputs_tar_new = tf.concat([lstm_outputs_tar, left_values_tri, right_values_tri], axis=-1)
lstm_outputs_tri_new = tf.concat([lstm_outputs_tri, left_values_ent, right_values_ent], axis=-1)
lstm_outputs_ent_new = tf.reshape(lstm_outputs_ent_new, shape=[-1, lstm_outputs_ent_new.get_shape()[-1].value])
# lstm_outputs_tar_new = tf.reshape(lstm_outputs_tar_new,shape=[-1, lstm_outputs_tar_new.get_shape()[-1].value])
lstm_outputs_tri_new = tf.reshape(lstm_outputs_tri_new,shape=[-1, lstm_outputs_tri_new.get_shape()[-1].value])
with tf.variable_scope("source_feedforward_after_lstm"):
outputs_ent = _common_layer(lstm_outputs_ent_new, 2 * self.num_hiddens, activity=tf.nn.tanh)
with tf.variable_scope("trigger_feedforward_after_lstm"):
outputs_tri = _common_layer(lstm_outputs_tri_new, 2 * self.num_hiddens, activity=tf.nn.tanh)
# with tf.variable_scope("target_feedforward_after_lstm"):
# outputs_tar = _common_layer(lstm_outputs_tar_new, 2 * self.num_hiddens, activity=tf.nn.tanh)
return outputs_ent,outputs_tri
def _inter_att_layer(lstm_outputs_OP,prob_OP,lstm_outputs_TA,prob_TA,use_prob=True):
#query
#keys
#values
with tf.variable_scope('interact'):
left_TA_values, right_TA_values, cur_values = _compute_interact_(lstm_outputs_TA,prob_TA)
# lstm_outputs_s_p_flat = tf.concat([lstm_outputs_s,left_values,right_values],axis=-1)
left_OP_values, right_OP_values, cur_values = _compute_interact_(lstm_outputs_OP,prob_OP)
# lstm_outputs_p_s_flat = tf.concat([lstm_outputs_p,left_values,right_values],axis=-1)
window = 3
# if use_prob:
# lstm_outputs_prob_OP = lstm_outputs_OP * prob_OP
# lstm_outputs_prob_TA = lstm_outputs_TA * prob_TA
with tf.variable_scope('attention_project'):
op_proj_w = tf.get_variable('op_proj_w',shape=[lstm_outputs_OP.get_shape()[-1],self.num_units])
lstm_outputs_proj_OP = tf.tensordot(lstm_outputs_OP,op_proj_w,axes=1)
ta_proj_w = tf.get_variable('ta_proj_w', shape=[lstm_outputs_TA.get_shape()[-1], self.num_units])
lstm_outputs_proj_TA = tf.tensordot(lstm_outputs_TA, ta_proj_w, axes=1)
b = tf.shape(lstm_outputs_OP)[0]
num_step = tf.shape(lstm_outputs_OP)[1]
with tf.variable_scope('att_OP'):
input_ta = tf.TensorArray(size=num_step,dtype=tf.float32)
output_ta = tf.TensorArray(size=num_step,dtype=tf.float32)
lstm_op_trans = tf.transpose(lstm_outputs_proj_OP,[1,0,2])
input_ta = input_ta.unstack(lstm_op_trans)
def _body(time,output_ta_t):
uid1 = tf.cond(time < window,lambda : tf.constant(0),lambda : time - window)
uid2 = tf.cond(time+window>=num_step-1,lambda :num_step-1,lambda : time+ window)
tmp = [0] * 3
# tmp[0] = tf.zeros([b, uid1], tf.float32)
tmp[0] = tf.ones([b, time], tf.float32)
tmp[1] = tf.zeros([b, 1], tf.float32)
tmp[2] = tf.ones([b, num_step - 1 - time], tf.float32)
# tmp[4] = tf.zeros([b, num_step - 1 - uid2], tf.float32)
score_mask = tf.concat(tmp, axis=1)
s,context = _att_layer(input_ta.read(time), lstm_outputs_proj_TA, lstm_outputs_TA, score_mask)
att_re = tf.reduce_sum(context, axis=1)
output_ta_t = output_ta_t.write(time,att_re)
return time + 1,output_ta_t
with tf.variable_scope('attention'):
v_att = tf.get_variable('v_att',shape=[self.num_units],dtype=tf.float32)
def _condition(time,output_ta_t):
return time < num_step
time = tf.constant(0)
final_step,output_ta_final = tf.while_loop(
cond=_condition,
body=_body,
loop_vars=(time,output_ta)
)
output_final = output_ta_final.stack()
output_final = tf.transpose(output_final,[1,0,2])
lstm_outputs_s_p_flat = tf.concat([lstm_outputs_OP, output_final,left_TA_values,right_TA_values], axis=-1)
with tf.variable_scope('att_TA'):
input_ta = tf.TensorArray(size=num_step,dtype=tf.float32)
output_ta = tf.TensorArray(size=num_step,dtype=tf.float32)
lstm_ta_trans = tf.transpose(lstm_outputs_proj_TA,[1,0,2])
input_ta = input_ta.unstack(lstm_ta_trans)
def _body(time,output_ta_t):
uid1 = tf.cond(time < window,lambda : tf.constant(0),lambda : time - window)
uid2 = tf.cond(time+window>=num_step-1,lambda :num_step-1,lambda : time+ window)
tmp = [0] * 3
# tmp[0] = tf.zeros([b, uid1], tf.float32)
tmp[0] = tf.ones([b, time], tf.float32)
tmp[1] = tf.zeros([b, 1], tf.float32)
tmp[2] = tf.ones([b, num_step - 1 - time], tf.float32)
# tmp[4] = tf.zeros([b, num_step - 1 - uid2], tf.float32)
score_mask = tf.concat(tmp, axis=1)
s,context = _att_layer(input_ta.read(time), lstm_outputs_proj_OP, lstm_outputs_OP, score_mask)
att_re = tf.reduce_sum(context, axis=1)
output_ta_t = output_ta_t.write(time,att_re)
return time + 1,output_ta_t
with tf.variable_scope('attention'):
v_att = tf.get_variable('v_att',shape=[self.num_units],dtype=tf.float32)
def _condition(time,output_ta_t):
return time < num_step
time = tf.constant(0)
final_step,output_ta_final = tf.while_loop(
cond=_condition,
body=_body,
loop_vars=(time,output_ta)
)
output_final = output_ta_final.stack()
output_final = tf.transpose(output_final, [1, 0, 2])
lstm_outputs_p_s_flat = tf.concat([lstm_outputs_TA, output_final,left_OP_values,right_OP_values], axis=-1)
lstm_outputs_s_p = tf.reshape(lstm_outputs_s_p_flat, shape=[-1, 4 * lstm_outputs_OP.get_shape()[-1].value])
lstm_outputs_p_s = tf.reshape(lstm_outputs_p_s_flat, shape=[-1, 4 * lstm_outputs_TA.get_shape()[-1].value])
with tf.variable_scope("sp_feedforward_after_lstm"):
outputs_s_p = _common_layer(lstm_outputs_s_p, 2 * self.num_hiddens, activity=tf.nn.tanh)
with tf.variable_scope("ps_feedforward_after_lstm"):
outputs_p_s = _common_layer(lstm_outputs_p_s, 2 * self.num_hiddens, activity=tf.nn.tanh)
return outputs_s_p, outputs_p_s
def _crf_interact(Op_score,Ta_score):
ta_soft = tf.nn.softmax(Ta_score)
op_soft = tf.nn.softmax(Op_score)
ta_o_score = tf.split(ta_soft,self.num_classess,axis=2)[0]
op_o_score = tf.split(op_soft,self.num_classess,axis=2)[0]
op_score = ta_o_score * Op_score
ta_score = op_o_score * Ta_score
return op_score,ta_score
def _relation_layer(lstm_outputs_s,opword_prob,lstm_outputs_p,target_prob):
with tf.variable_scope('relation'):
target_left_values, _, target_cur_values = _compute_interact_(lstm_outputs_p, target_prob, windows=1)
opinion_left_values, _, opinion_cur_values = _compute_interact_(lstm_outputs_s, opword_prob, windows=1)
lstm_outputs_OP_flat = tf.concat([target_left_values, opinion_cur_values], axis=-1)
lstm_outputs_AS_flat = tf.concat([opinion_left_values, target_cur_values], axis=-1)
lstm_outputs_OP = tf.reshape(lstm_outputs_OP_flat,shape=[-1, lstm_outputs_OP_flat.get_shape()[-1].value])
lstm_outputs_AS = tf.reshape(lstm_outputs_AS_flat,shape=[-1, lstm_outputs_AS_flat.get_shape()[-1].value])
with tf.variable_scope("feedforward_layer_OP"):
scores_OP = _common_layer(lstm_outputs_OP, 2)
with tf.variable_scope("feedforward_layer_AS"):
scores_AS = _common_layer(lstm_outputs_AS, 2)
return scores_OP,scores_AS
pass
def _att_layer(query,keys,memory,scores_mask):
#compute attention energies using a feed forward
#query: [batchsize,dim]
#keys: [batchsize,step.dim]
FLOAT_MIN = -1.e9
with tf.variable_scope('attention',reuse=True):
v_att = tf.get_variable('v_att',shape=[self.num_units],dtype=tf.float32)
energies = tf.reduce_sum(v_att * tf.tanh(keys + tf.expand_dims(query,1)),[2])
num_scores = tf.shape(energies)[1]
energies = energies * scores_mask + (1.0 - scores_mask) * FLOAT_MIN
energies = energies - tf.reduce_max(energies,axis=1,keep_dims=True)
unnormalized_scores = tf.exp(energies) * scores_mask + 0.0001
normalization = tf.reduce_sum(unnormalized_scores,axis=1,keep_dims=True)
normalized_scores = unnormalized_scores / normalization
context = tf.expand_dims(normalized_scores,2) * memory
return normalized_scores,context
def _compute_interact_(keys,probs,flag=True,windows = 3):
num_step = keys.get_shape().as_list()[1]
values = keys * probs
values = tf.transpose(values,[1,0,2])
append1 = tf.tile(tf.zeros_like(values[:1],dtype=tf.float32),[windows,1,1])
append2 = tf.zeros_like(values[:1],dtype=tf.float32)
left_values = 0
l = []
l.append(tf.concat([values,append1],axis=0))
for i in range(1,windows + 1):
l.append(tf.concat([append2,l[i-1][:-1]],axis=0))
left_values += l[i]
left_values = tf.transpose(left_values[:-windows],[1,0,2])
right_values = 0
r = []
r.append(tf.concat([append1,values],axis=0))
for i in range(1, windows + 1):
r.append(tf.concat([r[i-1][1:],append2],axis=0))
right_values += r[i]
right_values = tf.transpose(right_values[windows:],[1,0,2])
cur_values = tf.transpose(values,[1,0,2])
return left_values,right_values,cur_values
with tf.variable_scope('embedding_layer') as vs:
if self.pretrain_emb is not None:
self.token_embedding_weights = tf.Variable(self.pretrain_emb, trainable=True,
name='token_embedding_weights', dtype=tf.float32)
else:
self.token_embedding_weights = tf.get_variable('token_embedding_weights',
[self.num_words, self.token_emb_dim])
self.postag_embdding_weights = tf.get_variable('postag_embdding_weights',
[self.num_postags, self.postag_emb_dim])
self.suffix_embdding_weights = tf.get_variable('suffix_embdding_weights',
[FLAGs.num_suffix, FLAGs.suffix_emb_dim])
embedded_tokens = tf.nn.embedding_lookup(self.token_embedding_weights, self.input_token_indices)
embedded_postags = tf.nn.embedding_lookup(self.postag_embdding_weights, self.input_postag_indices)
embedded_suffixs = tf.nn.embedding_lookup(self.suffix_embdding_weights, self.input_suffix_indices)
if FLAGs.use_character_lstm:
# Character-level LSTM
# Idea: reshape so that we have a tensor [number_of_token, max_token_length, token_embeddings_size], which we pass to the LSTM
# Character embedding layer
with tf.variable_scope("character_embedding"):
self.character_embedding_weights = tf.get_variable(
"character_embedding_weights",
shape=[FLAGs.alphabet_size, FLAGs.character_embedding_dimension],
initializer=self.initializer)
input_character_indices = tf.reshape(self.input_token_character_indices,shape=[-1, self.token_max_len])
embedded_characters = tf.nn.embedding_lookup(self.character_embedding_weights,
input_character_indices,
name='embedded_characters')
self.input_token_lengths = tf.reduce_sum(tf.sign(input_character_indices), 1)
self.input_token_lengths = tf.cast(self.input_token_lengths, tf.int32)
# Character LSTM layer
with tf.variable_scope('character_lstm') as vs:
character_lstm_output = bidirectional_LSTM(embedded_characters,
FLAGs.character_lstm_hidden_state_dimension,
self.initializer,
sequence_length=self.input_token_lengths,
output_sequence=False)
character_lstm_output = tf.reshape(character_lstm_output,shape=[self.batch_size,-1,2 * FLAGs.character_lstm_hidden_state_dimension])
# suffix_embs = tf.one_hot(self.input_suffix_indices,depth=7,dtype=tf.float32)
# Concatenate character LSTM outputs and token embeddings
# print(FLAGs.use_character_lstm)
tmp = [embedded_tokens]
if FLAGs.use_character_lstm:
tmp.append(character_lstm_output)
if FLAGs.use_postag:
tmp.append(embedded_postags)
if FLAGs.use_suffix:
tmp.append(embedded_suffixs)
token_lstm_input = tf.concat(tmp, axis=2,name='token_lstm_input')
with tf.variable_scope('add_dropout') as vs:
# batchsize, step, dim_n
token_lstm_input_drop = tf.nn.dropout(token_lstm_input, self.keep_dropout, name='token_lstm_input_drop')
with tf.variable_scope("entity_moudle"):
# 经过共享层句子的表示
lstm_outputs_source, scores_tc_source = _entity_model(token_lstm_input_drop, self.length)
soft_scores = tf.nn.softmax(scores_tc_source)
_, source_prob = tf.split(soft_scores, 2, axis=1)
source_prob = tf.reshape(source_prob, shape=[self.batch_size, -1, 1])
with tf.variable_scope('trigger_moudle'):
# batch,step,dim # batch*step,2
lstm_outputs_trigger, scores_tc_trigger = _trigger_model(token_lstm_input_drop, self.length)
soft_scores = tf.nn.softmax(scores_tc_trigger)
_, trigger_prob = tf.split(soft_scores, 2, axis=1)
trigger_prob = tf.reshape(trigger_prob, shape=[self.batch_size, -1, 1])
outputs_sou = None
outputs_tri = None
# outputs_tar = None
if not self.FLAGS.is_interact:
outputs_sou, outputs_tri = _no_interact_layer(lstm_outputs_source, lstm_outputs_trigger)
if self.FLAGS.is_interact:
outputs_sou, outputs_tri = _interact_layer(lstm_outputs_source, lstm_outputs_trigger,source_prob,trigger_prob)
with tf.variable_scope('source_feedforward_before_crf'):
source_scores = _common_layer(outputs_sou, self.task1_num_classess)
with tf.variable_scope('trigger_feedforward_before_crf'):
trigger_scores = _common_layer(outputs_tri, self.task2_num_classess)
# batchsize*step, num_classess
source_unary_scores = tf.reshape(source_scores,shape=[tf.shape(self.input_token_indices)[0], -1, self.task1_num_classess])
trigger_unary_scores = tf.reshape(trigger_scores, shape=[tf.shape(self.input_token_indices)[0], -1, self.task2_num_classess])
# target_unary_scores = tf.reshape(target_scores,shape=[tf.shape(self.input_token_indices)[0], -1, self.num_classess])
# if self.FLAGS.crf_interact:
# sp_scores_interact,ps_scores_interact = _crf_interact(sp_unary_scores,ps_unary_scores)
with tf.variable_scope('source_crf'):
log_likelihood1, self.transition_params1, self.unary_scores1 = self._crf_layer(source_unary_scores, self.length, self.y_entitys,self.task1_num_classess)
self.source_loss = tf.reduce_mean(-log_likelihood1)
with tf.variable_scope('trigger_crf'):
log_likelihood2, self.transition_params2, self.unary_scores2 = self._crf_layer(trigger_unary_scores, self.length, self.y_triggers,self.task2_num_classess)
self.trigger_loss = tf.reduce_mean(-log_likelihood2)
with tf.variable_scope('loss'):
#2分loss
self.union_loss = self.source_loss + self.trigger_loss
if self.FLAGS.is_interact:
y_sources_tc = tf.reshape(self.y_entitys_tc, shape=[-1])
y_triggers_tc = tf.reshape(self.y_triggers_tc, shape=[-1])
# y_targets_tc = tf.reshape(self.y_targets_tc, shape=[-1])
source_tc_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_sources_tc,
logits=scores_tc_source,
name='source_tc_loss')
trigger_tc_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_triggers_tc,
logits=scores_tc_trigger,
name='trigger_tc_loss')
# target_tc_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_targets_tc,
# logits=scores_tc_target,
# name='target_tc_loss')
source_tc_loss = tf.reduce_mean(source_tc_loss)
trigger_tc_loss = tf.reduce_mean(trigger_tc_loss)
# target_tc_loss = tf.reduce_mean(target_tc_loss)
self.source_loss += source_tc_loss
self.trigger_loss += trigger_tc_loss
self.union_loss = self.union_loss + source_tc_loss + trigger_tc_loss
self.train_op_sou = self.define_training_procedure(self.source_loss, self.sou_global_step)
self.train_op_tri = self.define_training_procedure(self.trigger_loss, self.tri_global_step)
self.train_op_uon = self.define_training_procedure(self.union_loss,self.sou_global_step)
def _crf_layer(self, unary_scores, seq_len, y, num_class):
small_score = -1000.0
large_score = 0.0
# batchsize , step , dim
# self.unary_scores = tf.reshape(self.unary_scores, shape=[tf.shape(self.input_token_indices)[0], -1, self.num_classess])
#
# # num_steps
# sequence_length = tf.reduce_sum(tf.sign(self.input_token_indices), 1)
# sequence_length = tf.cast(sequence_length, tf.int32)
# # batchsize, num_steps ,num_classes + 2
_batchsize = tf.shape(self.input_token_indices)[0]
batch_max_step = tf.shape(self.input_token_indices)[1]
unary_scores_with_start_and_end = tf.concat(
[unary_scores, tf.tile(tf.constant(small_score, shape=[1, 1, 2]), [_batchsize, batch_max_step, 1])],
-1)
# num_classes + 2
start_unary_scores = tf.constant([[small_score] * num_class + [large_score, small_score]],
shape=[1, 1, num_class + 2])
start_unary_scores = tf.tile(start_unary_scores, [_batchsize, 1, 1])
# num_classes + 2
end_unary_scores = tf.constant([[small_score] * num_class + [small_score, large_score]],
shape=[1, 1, num_class + 2])
end_unary_scores = tf.tile(end_unary_scores, [_batchsize, 1, 1])
# batchsize, seq + 2, num_classes + 2
unary_scores = tf.concat([start_unary_scores, unary_scores_with_start_and_end, end_unary_scores], 1)
start_index = num_class
end_index = num_class + 1
input_label_indices_flat_with_start_and_end = tf.concat(
[tf.tile(tf.constant(start_index, shape=[1, 1]), [_batchsize, 1]), y,
tf.tile(tf.constant(end_index, shape=[1, 1]), [_batchsize, 1])], 1)
# Apply CRF layer
# sequence_length = tf.shape(self.unary_scores)[0]
# sequence_lengths = tf.expand_dims(sequence_length, axis=0, name='sequence_lengths')
transition_parameters = tf.get_variable(
"transitions",
shape=[num_class + 2, num_class + 2],
initializer=self.initializer)
# self.unary_scores_expanded = tf.expand_dims(self.unary_scores,axis=0,name='unary_scores_expanded')
# input_label_indices_flat_batch = tf.expand_dims(input_label_indices_flat_with_start_and_end,axis=0,name='targets_expanded')
log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(
unary_scores, input_label_indices_flat_with_start_and_end, seq_len,
transition_params=transition_parameters)
return log_likelihood, transition_parameters, unary_scores
def define_training_procedure(self, loss, global_step):
# Define training procedure
# self.global_step = tf.Variable(0, name='global_step', trainable=False)
if self.FLAGS.optimizer == 'adam':
self.optimizer = tf.train.AdamOptimizer(self.FLAGS.init_lr)
elif self.FLAGS.optimizer == 'sgd':
self.optimizer = tf.train.GradientDescentOptimizer(self.FLAGS.init_lr)
elif self.FLAGS.optimizer == 'adadelta':
self.optimizer = tf.train.AdadeltaOptimizer(self.FLAGS.init_lr)
else:
raise ValueError('The lr_method parameter must be either adadelta, adam or sgd.')
grads_and_vars = self.optimizer.compute_gradients(loss)
if self.FLAGS.gradient_clipping_value:
for i, (grad, var) in enumerate(grads_and_vars):
if grad is not None:
grads_and_vars[i] = (
tf.clip_by_value(grad, -self.FLAGS.gradient_clipping_value, self.FLAGS.gradient_clipping_value),
var)
# By defining a global_step variable and passing it to the optimizer we allow TensorFlow handle the counting of training steps for us.
# The global step will be automatically incremented by one every time you execute train_op.
train_op = self.optimizer.apply_gradients(grads_and_vars, global_step=global_step)
return train_op
def train_model_source(self,sess,x_batch,y_batch):
feed_dict = {
self.input_token_indices: x_batch[0],
self.input_postag_indices: x_batch[1],
self.input_suffix_indices:x_batch[2],
self.input_token_character_indices: x_batch[3],
self.y_entitys: y_batch[0],
# self.y_triggers: y_batch[1],
# self.y_targets:y_batch[2],
self.y_entitys_tc: y_batch[2],
# self.y_triggers_tc: y_batch[3],
# self.y_targets_tc: y_batch[5],
self.keep_dropout: 0.5}
feed_dict[self.batch_size] = len(x_batch[0])
_, loss_train, global_step \
= sess.run([
self.train_op_sou,
self.source_loss,
self.sou_global_step
],
feed_dict=feed_dict)
return global_step, loss_train
pass
def train_model_trigger(self,sess,x_batch,y_batch):
token_indices_train_batch = x_batch
# tys_sou, tys_tri, tys_tar = y_batch
feed_dict = {
self.input_token_indices: x_batch[0],
self.input_postag_indices: x_batch[1],
self.input_suffix_indices: x_batch[2],
self.input_token_character_indices: x_batch[3],
# self.y_entitys: y_batch[0],
self.y_triggers: y_batch[1],
# self.y_targets:y_batch[2],
# self.y_entitys_tc: y_batch[2],
self.y_triggers_tc: y_batch[3],
# self.y_targets_tc: y_batch[5],
self.keep_dropout: 0.5}
feed_dict[self.batch_size] = len(x_batch[0])
_, loss_train, global_step \
= sess.run([
self.train_op_tri,
self.trigger_loss,
self.tri_global_step
],
feed_dict=feed_dict)
return global_step, loss_train
pass
def train_model_union(self, sess, x_batch, y_batch, task_op, loss):
# token_indices_train_batch = x_batch
# tys_sou, tys_tri, tys_tar = y_batch
feed_dict = {
self.input_token_indices: x_batch[0],
self.input_postag_indices: x_batch[1],
self.input_suffix_indices: x_batch[2],
self.input_token_character_indices: x_batch[3],
self.y_entitys:y_batch[0],
self.y_triggers:y_batch[1],
# self.y_targets:y_batch[2],
self.y_entitys_tc: y_batch[2],
self.y_triggers_tc: y_batch[3],
# self.y_targets_tc: y_batch[5],
self.keep_dropout: 0.5}
feed_dict[self.batch_size] = len(x_batch[0])
_, loss_train, global_step \
= sess.run([
task_op,
loss,
self.sou_global_step
],
feed_dict=feed_dict)
return global_step, loss_train
def inference(self, sess, x_eval,y_eval):
source_res = []
target_res = []
trigger_res = []
all_loss = []
for sample_index in range(len(x_eval[0])):
x_eval_batch = [wxs[sample_index] for wxs in x_eval]
y_eval_batch = [tys[sample_index] for tys in y_eval]
feed_dict = {
self.input_token_indices: [x_eval_batch[0]],
self.input_postag_indices: [x_eval_batch[1]],
self.input_suffix_indices: [x_eval_batch[2]],
self.y_entitys: [y_eval_batch[0]],
self.y_triggers: [y_eval_batch[1]],
# self.y_targets: [y_eval_batch[2]],
self.y_entitys_tc: [y_eval_batch[2]],
self.y_triggers_tc: [y_eval_batch[3]],
# self.y_targets_tc: [y_eval_batch[5]],
self.keep_dropout: 1}
feed_dict[self.batch_size] = 1
unary_score1, unary_score2,test_seq_len, transMatrix1, transMatrix2,loss = sess.run(
[self.unary_scores1,
self.unary_scores2,
# self.unary_scores3,
self.length,
self.transition_params1,
self.transition_params2,
# self.transition_params3,
self.union_loss],
feed_dict=feed_dict
)
source_res.extend(self.viterbi_decode_batch(unary_score1, test_seq_len, transMatrix1))
trigger_res.extend(self.viterbi_decode_batch(unary_score2, test_seq_len, transMatrix2))
# target_res.extend(self.viterbi_decode_batch(unary_score3, test_seq_len, transMatrix3))
all_loss.append(loss)
return source_res,trigger_res, np.mean(np.array(all_loss))
def inference_single(self, sess, x,y ):
source_res = []
target_res = []
trigger_res = []
x_list = [wxs for wxs in x]
y_list = [tys for tys in y]
feed_dict = {
self.input_token_indices: [x_list[0]],
self.input_postag_indices: [x_list[1]],
self.input_suffix_indices: [x_list[2]],
self.y_entitys: [y_list[0]],
self.y_triggers: [y_list[1]],
# self.y_targets: [y_eval_batch[2]],
self.y_entitys_tc: [y_eval_batch[2]],
self.y_triggers_tc: [y_eval_batch[3]],
# self.y_targets_tc: [y_eval_batch[5]],
self.keep_dropout: 1}
feed_dict[self.batch_size] = 1
unary_score1, unary_score2,test_seq_len, transMatrix1, transMatrix2,loss = sess.run(
[self.unary_scores1,
self.unary_scores2,
# self.unary_scores3,
self.length,
self.transition_params1,
self.transition_params2,
# self.transition_params3,
self.union_loss],
feed_dict=feed_dict
)
source_res.extend(self.viterbi_decode_batch(unary_score1, test_seq_len, transMatrix1))
trigger_res.extend(self.viterbi_decode_batch(unary_score2, test_seq_len, transMatrix2))
# target_res.extend(self.viterbi_decode_batch(unary_score3, test_seq_len, transMatrix3))
all_loss.append(loss)
return source_res,trigger_res, np.mean(np.array(all_loss))
def nn_decode_batch(self, unary_scores, test_seq_len):
# unary_scores = [batch_size,num_steps,num_classes]
# return list: [batch_size,seq_len]
y_preds = []
for tf_unary_scores_, seq_len_ in zip(unary_scores, test_seq_len):
tf_unary_scores_ = tf_unary_scores_[:seq_len_]
y_pred = []
for j in range(len(tf_unary_scores_)):
id = np.where(tf_unary_scores_[j] == np.max(tf_unary_scores_[j]))
y_pred.append(id[0][0])
y_preds.append(y_pred)
return y_preds
def viterbi_decode_batch(self, unary_scores, test_seq_len, transMatrix):
# unary_scores = [batch_size,num_steps,num_classes]
# return list: [batch_size,seq_len]
y_pred = []
for tf_unary_scores_, seq_len_ in zip(unary_scores, test_seq_len):
# tf_unary_scores_ = tf_unary_scores_[:seq_len_]
# viterbi_sequence = [num_steps]
viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(
tf_unary_scores_, transMatrix)
y_pred.append(viterbi_sequence[1:-1])
# y_gold.append(y_)
return y_pred
|
[
"qfzxhy@qianfangdeMacBook-Pro.local"
] |
qfzxhy@qianfangdeMacBook-Pro.local
|
91df19419892d18e2f7d515207f89d1433bf1a48
|
0bf2955e12abc2ccd4c074e1d75cc421a0c25270
|
/my_neighbors/settings.py
|
ed93256f2e4321b04c43363279b497aa1743de46
|
[] |
no_license
|
Kamiran79/my_neighbors_server
|
dbde22dc52cf95402f07a9f9bb209ce79fc29b83
|
70c5943b250ae7f81faf8a45a8941abf385f327c
|
refs/heads/main
| 2023-03-26T10:47:46.150514
| 2021-03-28T22:16:06
| 2021-03-28T22:16:06
| 345,204,364
| 0
| 0
| null | 2021-03-28T22:16:06
| 2021-03-06T22:01:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,818
|
py
|
"""
Django settings for my_neighbors project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pn@m0z8ee7c#y7%rx7no&!)&e7$@147@p_p51o=&1*f$m@)2qn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'my_neighbors_api',
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10
}
CORS_ORIGIN_WHITELIST = (
'http://localhost:3001',
'http://127.0.0.1:3001'
)
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'my_neighbors.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'my_neighbors.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
|
[
"kamiran.ibrahim@gmail.com"
] |
kamiran.ibrahim@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.