blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
80eec8664ce238ba64458a1eab5e43600de6ac96
|
6af7d27d52844240fef42978fe4e739b23c80fa9
|
/mock-hsm/hsm_listener.py
|
65d570a4880e989bfb59a6043fb83390d9d02905
|
[] |
no_license
|
rkbalgi/python-scripts
|
52342a5c1559978998c5807299d02c858a2b005d
|
7a2e2dcb2131a86e5825a781ddf07a260e3cb40a
|
refs/heads/master
| 2021-01-22T10:32:23.256187
| 2017-05-29T11:04:11
| 2017-05-29T11:04:11
| 53,861,038
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,380
|
py
|
import threading
import socket
import hsm_client_session_handler
import sys
import logging
import mock_hsm_ui
class HsmListenerThread(threading.Thread):
def __init__(self, port):
threading.Thread.__init__(self, name='Thread - Hsm-Listener')
self.port = port
self.log = logging.getLogger('HsmListener::')
self.log.setLevel(logging.DEBUG)
self.ssock = None
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
self.log.addHandler(stream_handler)
def run(self):
self.log.info('starting mock hsm at port -' + str(self.port))
self.ssock = socket.socket()
self.ssock.bind(('localhost', self.port))
self.ssock.listen(1)
try:
while mock_hsm_ui.hsm_started:
(csock, addr) = self.ssock.accept()
self.log.info('client connection accepted from - ' + str(addr))
hsm_client_session_handler.ClientSessionHandlerThread(csock).start()
self.log.info('hsm stopped listening ..')
mock_hsm_ui.hsm_started = 0
except:
self.log.err('error while running hsm', str(sys.exc_info()))
mock_hsm_ui.hsm_started = 0
def stop_hsm(self):
self.ssock.shutdown(socket.SHUT_RDWR)
self.ssock.close()
mock_hsm_ui.hsm_started = 0
|
[
"pinkcat17"
] |
pinkcat17
|
05f9847aeb2b22b0db42d2ff89f2abe32e34b431
|
26dd1f6d64227035d947b94405c291b4311c1ba7
|
/django_app/bilateral_data.py
|
f6368e779fd2eadd7f93d2deb4b9ccb412d6be64
|
[] |
no_license
|
hayleefay/hayleecodes
|
3a700219781fd1bcd55e5d00ddf3c652735e601a
|
ab4e7eb9cd660b9fbcf3a7106a89f159cf239df3
|
refs/heads/master
| 2021-01-19T14:42:28.621825
| 2019-06-05T14:21:43
| 2019-06-05T14:21:43
| 88,184,661
| 0
| 1
| null | 2017-04-27T20:22:31
| 2017-04-13T16:24:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,470
|
py
|
# Creating a script that inputs the bilateral csv into my django psqldb
import os
import django
import pandas as pd
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hayleecodes.settings')
django.setup()
from mysite.models import Bilateral
bilateral_data = pd.read_csv('django_app/bilateral_flow.csv', low_memory=False)
for index, row in bilateral_data.iterrows():
region_orig = row[0]
region_orig_id = row[1]
region_dest = row[2]
region_dest_id = row[3]
country_orig = row[4]
country_orig_id = row[5]
country_dest = row[6]
country_dest_id = row[7]
regionflow_1990 = row[8]
regionflow_1995 = row[9]
regionflow_2000 = row[10]
regionflow_2005 = row[11]
countryflow_1990 = row[12]
countryflow_1995 = row[13]
countryflow_2000 = row[14]
countryflow_2005 = row[15]
metadata = row[16]
q = Bilateral(region_orig = region_orig, region_orig_id = region_orig_id, region_dest = region_dest, \
region_dest_id = region_orig_id, country_orig = country_orig, country_orig_id = country_orig_id, \
country_dest = country_dest, country_dest_id = country_dest_id, regionflow_1990 = regionflow_1990, \
regionflow_1995 = regionflow_1995, regionflow_2000 = regionflow_2000, regionflow_2005 = regionflow_2005, \
countryflow_1990 = countryflow_1990, countryflow_1995 = countryflow_1995, countryflow_2000 = countryflow_2000, \
countryflow_2005 = countryflow_2005, metadata = metadata)
q.save()
|
[
"hayleeham@gmail.com"
] |
hayleeham@gmail.com
|
b18cf7ab8a7089ef90a6f7a75b3c13911e42ba76
|
24b7f29f8a3b59165dea16b94bdf973196d14b4e
|
/backend/dating/admin.py
|
7ec91e27ab611a2676a20848fbe2ec9db8a2ea7b
|
[] |
no_license
|
crowdbotics-apps/pumpkin-29320
|
eb5c10c72341378dbef3a160b8e6f6e7c151f0a7
|
764677ffa078076d8041caea2f9c74afc8490b49
|
refs/heads/master
| 2023-06-28T02:04:55.191999
| 2021-08-01T17:23:54
| 2021-08-01T17:23:54
| 391,691,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
from django.contrib import admin
from .models import Setting, Profile, Inbox, Dislike, Match, UserPhoto, Like
admin.site.register(UserPhoto)
admin.site.register(Like)
admin.site.register(Profile)
admin.site.register(Match)
admin.site.register(Inbox)
admin.site.register(Setting)
admin.site.register(Dislike)
# Register your models here.
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
942fbf0ecdb26e480bf20310aca128665a9d7794
|
e75ff80e3d69c28a409e6527c25627d45bd12eab
|
/test/LowerMinisterNamesTest.py
|
6436396c1a1bceb2af564327ecc9253c41780deb
|
[] |
no_license
|
rabriol/naive_bayes_classification
|
1b44fcc4df5c26b6de9e5b30d2f485107a6a8014
|
e13d123847ec20250b30dfc4490f15399750b963
|
refs/heads/master
| 2021-01-13T16:56:18.875394
| 2017-01-16T02:57:16
| 2017-01-16T02:57:16
| 79,077,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,325
|
py
|
# coding=utf-8
__author__ = 'brito'
if __name__ == '__main__':
minister_names = ('Adalício Nogueira',
'Adaucto Cardoso',
'Alberto Torres',
'Aldir Passarinho',
'Alencar Araripe',
'Alfredo Buzaid',
'Alfredo Pinto',
'Aliomar Baleeiro',
'Amaral Santos',
'Amaro Cavalcanti',
'Americo Braziliense',
'Americo Lobo',
'Amphilophio',
'Andrade Pinto',
'André Cavalcanti',
'Annibal Freire',
'Antonio Neder',
'Aquino e Castro',
'Armando de Alencar',
'Arthur Ribeiro',
'Ary Franco',
'Ataulpho de Paiva',
'Augusto Olyntho',
'Ayres Britto',
'Barão de Pereira Franco',
'Barão de Lucena',
'Barão de Sobral',
'Barata Ribeiro',
'Barros Barreto',
'Barros Monteiro',
'Barros Pimentel',
'Bento de Faria',
'Bento Lisboa',
'Bernardino Ferreira',
'Bilac Pinto',
'Candido Motta',
'Canuto Saraiva',
'Cardoso de Castro',
'Cardoso Ribeiro',
'Carlos Madeira',
'Carlos Maximiliano',
'Carlos Medeiros',
'Carlos Velloso',
'Cármen Lúcia',
'Carvalho Mourão',
'Castro Nunes',
'Célio Borja',
'Celso de Mello',
'Cezar Peluso',
'Clóvis Ramalhete',
'Coelho e Campos',
'Cordeiro Guerra',
'Costa Barradas',
'Costa Manso',
'Cunha Mello',
'Cunha Peixoto',
'Décio Miranda',
'Dias Toffoli',
'Djaci Falcão',
'Edgard Costa',
'Edmundo Lins',
'Edson Fachin',
'Eduardo Espinola',
'Ellen Gracie',
'Eloy da Rocha',
'Enéas Galvão',
'Epitacio Pessôa',
'Eros Grau',
'Evandro Lins',
'Faria',
'Faria Lemos',
'Ferreira de Resende',
'Figueiredo Junior',
'Firmino Paz',
'Firmino Whitaker',
'Francisco Rezek',
'Freitas Henriques',
'Geminiano da Franca',
'Gilmar Mendes',
'Godofredo Cunha',
'Gonçalves de Carvalho',
'Gonçalves de Oliveira',
'Goulart de Oliveira',
'Guimarães Natal',
'Hahnemann Guimarães',
'Heitor de Sousa',
'Herculano de Freitas',
'Hermenegildo de Barros',
'Hermes Lima',
'Herminio do Espirito Santo',
'Ilmar Galvão',
'João Barbalho',
'João Luiz Alves',
'João Mendes',
'João Pedro',
'Joaquim Barbosa',
'José Hygino',
'José Linhares',
'Lafayette de Andrada',
'Laudo de Camargo',
'Leitão de Abreu',
'Leoni Ramos',
'Lucio de Mendonça',
'Luiz Fux',
'Luiz Gallotti',
'Luiz Osorio',
'Macedo Soares',
'Manoel Espinola',
'Manoel Murtinho',
'Marco Aurélio',
'Mário Guimarães',
'Maurício Corrêa',
'Mendonça Uchôa',
'Menezes Direito',
'Moreira Alves',
'Muniz Barreto',
'Nelson Hungria',
'Nelson Jobim',
'Néri da Silveira',
'Octavio Gallotti',
'Octavio Kelly',
'Oliveira Figueiredo',
'Oliveira Ribeiro',
'Orozimbo Nonato',
'Oscar Corrêa',
'Oswaldo Trigueiro',
'Paulo Brossard',
'Pedro Chaves',
'Pedro dos Santos',
'Pedro Lessa',
'Pedro Mibieli',
'Philadelpho e Azevedo',
'Pindahiba de Mattos',
'Pires e Albuquerque',
'Piza e Almeida',
'Plínio Casado',
'Prado Kelly',
'Queiroz Barros',
'Rafael Mayer',
'Ribeiro da Costa',
'Ribeiro de Almeida',
'Ricardo Lewandowski',
'Roberto Barroso',
'Rocha Lagôa',
'Rodrigo Octavio',
'Rodrigues Alckmin',
'Rosa Weber',
'Sebastião Lacerda',
'Sepúlveda Pertence',
'Soares Muñoz',
'Soriano de Souza',
'Souza Martins',
'Souza Mendes',
'Sydney Sanches',
'Teori Zavascki',
'Themístocles Cavalcanti',
'Thompson Flores',
'Trigo de Loureiro',
'Ubaldino do Amaral',
'Victor Nunes',
'Vilas Boas',
'Visconde de Sabará',
'Viveiros de Castro',
'Waldemar Falcão',
'Washington Oliveira',
'Xavier de Albuquerque')
print '('
for name in minister_names:
print "'" + name.lower() + "',"
print ')'
|
[
"rabriol@Admins-MacBook.local"
] |
rabriol@Admins-MacBook.local
|
7bf5c4ef4883730ab765d40a24eac81b6858e456
|
f1a66e50ed8423de48411186ab205a6e7ac4c29b
|
/bookmarks/bookmarks/urls.py
|
36e00afdf607727dbb3f4d7dd61129d676834ee9
|
[] |
no_license
|
Zorro-Lin-7/DjangoByExample
|
57ec916528b262495b43997a4fe56cdeaaa5256c
|
0432ad9f45a6e9777fcc6a7948cd34c41d7ad999
|
refs/heads/master
| 2021-01-25T11:02:47.566715
| 2017-07-17T09:38:35
| 2017-07-17T09:38:35
| 93,905,625
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
"""bookmarks URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^account/', include('account.urls')),
#url('social-auth/',include('social.apps.django_app.urls', namespace='social')),
url(r'^images/', include('images.urls', namespace='images')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"linzhun7@gmail.com"
] |
linzhun7@gmail.com
|
ad25cd071ce59b69379abe51bd402b5d6ba2967b
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_1/87.py
|
d7cf4af0fea932c17348dece3e0217dfe2a7bd93
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
import sys
from array import array
def main():
N = int(sys.stdin.readline())
for case in xrange(N):
S = int(sys.stdin.readline())
engine = dict([(sys.stdin.readline(), i) for i in xrange(S)])
Q = int(sys.stdin.readline())
queries = [engine[sys.stdin.readline()] for i in xrange(Q)]
dp_prev = array('i', [0 for x in xrange(S)])
dp_next = array('i', [0 for x in xrange(S)])
queries.reverse()
for q in queries:
for i in xrange(S):
dp_next[i] = dp_prev[i] if i != q else min([dp_prev[j] for j in xrange(S) if j != i]) + 1
dp_prev, dp_next = dp_next, dp_prev
print 'Case #%d: %d' % (case + 1, min(dp_prev))
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
ec3c13e38a130b81855f34290c24718a9a265513
|
7f5e39adc33b71e226a425c8e05012f9c449d834
|
/bots/verivox-de/helpers.py
|
16ad0517ffe0d706bb07d7f854ad201954ca8fd2
|
[] |
no_license
|
nzzdev/st-methods
|
a87eb4967b64bd1079a1a42e280fcf7679b6eb7d
|
fa12656afa043303ec99374b51c5e12762f20a35
|
refs/heads/master
| 2023-08-17T03:32:33.553268
| 2023-08-16T19:22:08
| 2023-08-16T19:22:08
| 107,977,306
| 52
| 8
| null | 2022-11-24T16:20:14
| 2017-10-23T12:12:52
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,210
|
py
|
import pandas as pd
import json
import requests
from requests.adapters import HTTPAdapter, Retry
import logging
def download_data(url, headers=""):
logging.basicConfig(level=logging.INFO)
s = requests.Session()
retries = Retry(total=10, backoff_factor=1,
status_forcelist=[502, 503, 504])
s.mount('https://', HTTPAdapter(max_retries=retries))
return s.get(url, headers=headers)
def update_chart(id, title="", subtitle="", notes="", data=pd.DataFrame(), options="", files=[]): # Q helper function
# read qConfig file
json_file = open('./q.config.json')
qConfig = json.load(json_file)
# update chart properties
for item in qConfig.get('items'):
for environment in item.get('environments'):
if environment.get('id') == id:
if title != '':
item.get('item').update({'title': title})
if subtitle != '':
item.get('item').update({'subtitle': subtitle})
if notes != '':
item.get('item').update({'notes': notes})
if data.size > 0:
# reset_index() and T (for transpose) are used to bring column names into the first row
transformed_data = data.applymap(str).reset_index(
drop=False).T.reset_index().T.apply(list, axis=1).to_list()
if 'table' in item.get('item').get('data'):
item.get('item').get('data').update(
{'table': transformed_data})
else:
item.get('item').update({'data': transformed_data})
if len(files) > 0:
item['item']['files'] = files
print('Successfully updated item with id', id,
'on', environment.get('name'), 'environment')
if options != '':
item.get('item').update({'options': options})
# write qConfig file
with open('./q.config.json', 'w', encoding='utf-8') as json_file:
json.dump(qConfig, json_file, ensure_ascii=False,
indent=1, default=str)
json_file.close()
|
[
"31181952+somm-doe@users.noreply.github.com"
] |
31181952+somm-doe@users.noreply.github.com
|
d9b459037cbbf593d289eda101e458bd82c16e30
|
3c5b38954f5cb1e09b08b721610ab0f04ed69fa8
|
/excel_log.py
|
cd2d0885fee95e65d8e3b5fbff69076ffef30de4
|
[] |
no_license
|
nicolas1805961/amundi_scripts
|
e15eff64643f948aee1c8252e479076b6698a62b
|
98df6dab98a59491938c06fea97c0019c1d4ad7e
|
refs/heads/master
| 2020-07-24T20:31:26.334407
| 2019-12-19T14:50:52
| 2019-12-19T14:50:52
| 208,040,437
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,362
|
py
|
import csv
import xlsxwriter
values = ["log_sid", "Address", "User_Name", "time_stamp", "date_time", "session_id", "Access_Profile", "Client_Hostname"]
csv.register_dialect('myDialect', delimiter = ',', quoting=csv.QUOTE_MINIMAL)
col = []
file = input("Please enter the file name to format:")
file = file + ".csv"
out_file = input("Please enter the name of the output file:")
out_file = out_file + ".xlsx"
if "vm" not in file:
values.append("Virtual_IP")
values.append("Access_Policy_Result")
workbook = xlsxwriter.Workbook(out_file)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': True})
with open(file, "r") as file_in:
reader = csv.reader(file_in, dialect = "myDialect")
data = list(reader)
new_data = [x for x in data if x != []]
index_user = new_data[0].index("User_Name")
for i, j in enumerate(new_data):
for u, k in enumerate(j):
if i == 0 and new_data[i][u] not in values:
col.append(u)
continue
if u == index_user and new_data[i][u] == "":
worksheet.set_row(i, None, None, {"hidden": 1})
worksheet.write(i, u, k)
for col_index in col:
worksheet.set_column(col_index, col_index, None, None, {"hidden": 1})
worksheet.set_column(0, 30, 20)
worksheet.set_row(0, None, bold)
workbook.close()
|
[
"nicolas.portal@amundi.com"
] |
nicolas.portal@amundi.com
|
a87dc8f3080ad1af54ea9204960917049462444e
|
bd45e8359651aa56592e223f9b903f4fbb011dae
|
/UI/src/tkinter_t2.py
|
03724d6dab1f243c6648df67992ca5e143f4b5b0
|
[] |
no_license
|
siroikisetu/eclipse-workspace
|
ac11e76dfc9db068a23c7e834c1e1eae7e081531
|
ec537401b1f3bdc79d9461474ebfbe2a62e4b4f6
|
refs/heads/master
| 2020-03-19T19:58:52.843800
| 2018-06-11T06:16:28
| 2018-06-11T06:16:28
| 136,882,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
#coding=utf-8
from tkinter import * # 导入 Tkinter 库
root = Tk() # 创建窗口对象的背景色
# 创建两个列表
li = ['C','python','php','html','SQL','java']
movie = ['CSS','jQuery','Bootstrap']
listb = Listbox(root) # 创建两个列表组件
listb2 = Listbox(root)
for item in li: # 第一个小部件插入数据
listb.insert(0,item)
for item in movie: # 第二个小部件插入数据
listb2.insert(0,item)
listb.pack() # 将小部件放置到主窗口中
listb2.pack()
root.mainloop() # 进入消息循环
|
[
"34596651+siroikisetu@users.noreply.github.com"
] |
34596651+siroikisetu@users.noreply.github.com
|
7b2ebd64fecfce81d2d79080991d9d1198da040a
|
b02a2c1e8cf778f8f810897c478abcec720b7220
|
/ds_algos_primer/python/linked_lists_part_2.py
|
c0c87d766c2ad39b46a856faeb85ef430e5d63a7
|
[] |
no_license
|
naraekwon/CodingInterviewMastery
|
d8596a4decb52086ea2eefa32ebccd4a25c6181a
|
c14ceaa19649269467160a5bf53e4a3d927e97a5
|
refs/heads/main
| 2023-09-05T16:28:25.253482
| 2021-11-04T18:56:19
| 2021-11-04T18:56:19
| 428,470,905
| 0
| 0
| null | 2022-02-13T19:54:09
| 2021-11-16T00:58:46
| null |
UTF-8
|
Python
| false
| false
| 4,394
|
py
|
"""
Title: Linked List Solutions Part 2
This file contains the template for Exercise Sets #2-5 of the Linked List
exercises in the DS & Algos Primer. Fill in the exercises here and refer to
linked_list_solutions_part_2.py for the complete code samples.
Execution: python linked_lists_part_2.py
"""
"""
A simple singly-linked node class (copied from Part 1)
"""
class SinglyLinkedListNode:
def __init__(self, val=0):
self.val = val
self.next = None
"""
A simple doubly-linked node class (copied from Part 1)
"""
class DoublyLinkedListNode:
def __init__(self, val=0):
self.val = val
self.prev = None
self.next = None
"""
Exercise 2.1: Write a function that swaps two nodes in a doubly-linked
list
Time Complexity:
Space Complexity:
"""
def swap_nodes(l: DoublyLinkedListNode, n: int, m: int):
# INSERT YOUR CODE HERE
"""
Exercise 2.2: Write a function that removes the odd-indexed values from a
singly-linked list
Time Complexity:
Space Complexity:
"""
def remove_odd(l: SinglyLinkedListNode):
# INSERT YOUR CODE HERE
"""
Exercise 2.3: Write a function that de-interleaves the even and odd indices in a
singly-linked list. Your resulting list should have all the even indices first
followed by all the odd indices
Time Complexity:
Space Complexity:
"""
def deinterleave(l: SinglyLinkedListNode):
# INSERT YOUR CODE HERE
"""
Exercise 2.4: Write a function that reverses a singly-linked list
Time Complexity:
Space Complexity:
"""
def reverse(l: SinglyLinkedListNode) -> SinglyLinkedListNode:
# INSERT YOUR CODE HERE
"""
Exercise 3.1: Write a function that compares 2 singly-linked lists and returns
true if the two lists are identical
Time Complexity:
Space Complexity:
"""
def are_equal(l1: SinglyLinkedListNode, l2: SinglyLinkedListNode) -> bool:
# INSERT YOUR CODE HERE
"""
Exercise 3.2: Write a function that returns the nth-to-last value in a
singly-linked list
Time Complexity:
Space Complexity:
"""
def nth_to_last(l: SinglyLinkedListNode, n: int) -> SinglyLinkedListNode:
# INSERT YOUR CODE HERE
"""
Exercise 3.3: Write a function that returns the value at the midpoint of a
singly-linked list. You can assume the length of the list is odd.
Time Complexity:
Space Complexity:
"""
def midpoint(l: SinglyLinkedListNode) -> SinglyLinkedListNode:
# INSERT YOUR CODE HERE
"""
Exercise 4.1: Remove all occurrences of n from a singly-linked list
Time Complexity:
Space Complexity:
"""
def remove_all(l: SinglyLinkedListNode, n: int) -> SinglyLinkedListNode:
# INSERT YOUR CODE HERE
"""
Exercise 5.1: Given a singly-linked list, determine if the list contains a
cycle. DO NOT use Floyd’s algorithm. FInd some other method for identifying a
cycle
Time Complexity:
Space Complexity:
"""
def has_cycle_naive(l: SinglyLinkedListNode) -> bool:
# INSERT YOUR CODE HERE
"""
Exercise 5.2: Given a singly-linked list, determine if the list contains a cycle
using Floyd's algorithm
Time Complexity:
Space Complexity:
"""
def has_cycle(l: SinglyLinkedListNode) -> bool:
# INSERT YOUR CODE HERE
"""
We've included some helper methods below that you can use for your tests
"""
"""
Test method to generate singly linked list with n items
"""
def single_generator(n: int) -> SinglyLinkedListNode:
head = SinglyLinkedListNode(1)
curr = head
for i in range(2, n+1):
curr.next = SinglyLinkedListNode(i)
curr = curr.next
return head
"""
Test method to generate doubly linked list with n items
"""
def double_generator(n: int) -> DoublyLinkedListNode:
head = DoublyLinkedListNode(1)
curr = head
for i in range(2, n+1):
curr.next = DoublyLinkedListNode(i)
curr.next.prev = curr
curr = curr.next
return head
"""
Test method to print singly linked list
"""
def print_single(n: SinglyLinkedListNode):
curr = n
string = []
while curr:
string.append(str(curr.val) + " -> ")
curr = curr.next
string.append("null")
print(''.join(string))
"""
Test method to print doubly linked list
"""
def print_double(n: DoublyLinkedListNode):
if not n:
print("null")
curr = n
string = []
while curr:
string.append(str(curr.val) + " -> ")
curr = curr.next
string.append("null")
print(''.join(string))
if __name__ == '__main__':
# ADD YOUR TEST CASES HERE
|
[
"sam@byte-by-byte.com"
] |
sam@byte-by-byte.com
|
cb9dc25d7bf21bb48137dd82b2ed0480c6ab4710
|
2dfbd1328e4dd6dc9852d52e90cd627b402059e1
|
/OOPALL/Hakrrank_problem/maps.py
|
28d019687100f1ad9574de624ed7de3829190ac5
|
[] |
no_license
|
Bappy200/Python
|
2817bcb32bf751033fadc960694704a546055e47
|
0ad3d53cc204af361890182585cdf9bd3ec9a199
|
refs/heads/main
| 2023-03-01T05:29:32.994940
| 2021-02-12T23:08:36
| 2021-02-12T23:08:36
| 338,268,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
n = int(input())
while (n > 0):
n1 = int(input())
s = 0
while (n1 > 0):
a = int(input())
s = s + a
n1 = n1 - 1
print(s)
n = n - 1
|
[
"sbappyi200@gmail.com"
] |
sbappyi200@gmail.com
|
e0ceae55d4e631d23c39dffa689763972b6f1b35
|
c4f55a244a72449da32eb95c28f1498483bdb40b
|
/Defference of any number.py
|
5e7f6072a3b334e5529ff7bbe39f9e8479e05221
|
[] |
no_license
|
chavitodionisio/Differences-of-any-number
|
76d83e7da75b54a0d046a8a641b2a2aa8e08f556
|
b801d394b938d5f5ff749dcb5ec47308593949ac
|
refs/heads/master
| 2021-02-18T05:03:00.837561
| 2020-03-05T13:00:37
| 2020-03-05T13:00:37
| 245,163,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
num1 = int(input("Enter first Number:\n"))
num2 = int(input("Enter Second number:\n"))
answer = num1 - num2
print()
print("The Diffrence between first and second number is: ", answer)
|
[
"noreply@github.com"
] |
chavitodionisio.noreply@github.com
|
16acd034ab01fe86fb480e059e14cb74d58d4c10
|
6bbba511254c1cd9ece47c8c1558a79746e9998d
|
/kriging/stations_v1.py
|
ccb6fa82d24c566993ea96b90335d4ee7b328e53
|
[] |
no_license
|
profcarlos/MSG
|
1e99292653878465db9aa888f55e029c29344601
|
3ab5b478777622f8abfe461a6450fec1f0a3cc0f
|
refs/heads/master
| 2021-01-22T19:55:18.205573
| 2018-09-25T11:13:30
| 2018-09-25T11:13:30
| 100,710,099
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,838
|
py
|
import sys
sys.path.insert(0, 'C:\\Users\\carlos\\Dropbox\\newPython\\utils')
sys.path.insert(0, 'C:\\Users\\carlos.silveira\\Dropbox\\newPython\\utils')
import numpy
import sys
import subprocess
import os
import glob
from utils import *
inputDir = 'z:\\OUTROS\\INMET\\'
subfolder = 'BCK_ANO_year\\'
filename = inputDir + subfolder + 'BCK_ANO_year_EST_station.txt'
outfile = inputDir + 'data_all_year_all_stations.csv'
stat_csv = inputDir + 'stations_goias.csv'
outputDir = 'z:\\OUTROS\\PREC_DAILY\\'
grid_stations ='C:\\Users\\carlos.silveira\\Dropbox\\newPython\\kriging\\grid_stations.csv'
exeDir = 'C:\\osgeo4w\\bin\\'
fileDataRetriever = os.getcwd() + '\\shapes\\dataRetrieverShape.shp'
fileMask = os.getcwd() + '\\shapes\\limite_go_df_WGS84.shp'
def calcDoy(Y,M,D):
""" given year, month, day return day of year
Astronomical Algorithms, Jean Meeus, 2d ed, 1998, chap 7 """
if is_leap_year(Y):
K = 1
else:
K = 2
N = int((275 * M) / 9.0) - K * int((M + 9) / 12.0) + D - 30
return N
def is_leap_year(year):
""" if year is a leap year return True
else return False """
if year % 100 == 0:
return year % 400 == 0
return year % 4 == 0
def calcMonth(Y,N):
""" given year = Y and day of year = N, return year, month, day
Astronomical Algorithms, Jean Meeus, 2d ed, 1998, chap 7 """
if is_leap_year(Y):
K = 1
else:
K = 2
M = int((9 * (K + N)) / 275.0 + 0.98)
if N < 32:
M = 1
D = N - int((275 * M) / 9.0) + K * int((M + 9) / 12.0) + 30
return M
#return Y, M, D
def calcDay(Y, N):
""" given year = Y and day of year = N, return year, month, day
Astronomical Algorithms, Jean Meeus, 2d ed, 1998, chap 7 """
if is_leap_year(Y):
K = 1
else:
K = 2
M = int((9 * (K + N)) / 275.0 + 0.98)
if N < 32:
M = 1
D = N - int((275 * M) / 9.0) + K * int((M + 9) / 12.0) + 30
return D
#return Y, M, D
class main():
#STATIONS = [NAME, LONG, LAT]
STATIONS = numpy.loadtxt(stat_csv, delimiter = ',', skiprows = 1)
n_stations = len(STATIONS)
# HEADER has all variables
HEADER = ['STATION','YEAR','MON','DAY','OBS.','BAT','TEMP_CPU','AIR_INST.','AIR_MAX','AIR_MIN','UMID_INST','UMID_MAX','UMID_MIN', 'DP_INST','DP_MAX','DP_MIN','PRES_INST', 'PRES_MAX','PRES_MIN', 'WIND_Speed','WIND_Dir','WIND_Gust','RAD','PREC', 'CLOUD_TOT','CLOUD_CODE','CLOUD_BASE','CLOUD_VISIB']
# VARS has variables to get
VARS = ['YEAR','MON','DAY','OBS.','PREC']
data_header = ['STAT','YEAR', 'MON','DAY','PREC']
usecols = []
for i, var in zip(range(len(HEADER)), HEADER):
if var in VARS:
usecols.append(i)
print('n_stations: %d usecols of data: %s' %(n_stations, usecols))
if(not os.path.isfile(outfile)):
DATA = numpy.zeros((366*3*n_stations,len(VARS)))
n_data = 0
for year in range(2013, 2016, 1):
for x in range(n_stations):
file_station = filename
file_station = file_station.replace('year', str(year))
file_station = file_station.replace('station', 'A' + str('%03d'%int(STATIONS[x][0])))
print('. reading file: %s'%(file_station))
TAB = numpy.genfromtxt(file_station, delimiter = ' ', usecols = usecols, invalid_raise=False, unpack=True, missing_values=['//////', '/////', '//', '/', '=', ''])
#TAB = numpy.transpose(TAB)
#print(TAB)
for i in range(len(VARS)):
globals()[VARS[i]] = TAB[i]
for month in range(1,13,1):
for day in range(1,32,1):
paser = numpy.logical_and(MON==month,DAY==day)
if (numpy.sum(paser) > 0):
DATA[n_data] = [STATIONS[x][0], year, month, day,numpy.sum(PREC[paser])]
#print(DATA[n_data])
n_data = n_data + 1
header = 'STAT'
for var in VARS:
if(var != 'OBS.'):
header = header + ' ' + str(var)
print(header)
numpy.savetxt(outfile, DATA[:n_data], delimiter=" ", header = header, comments = '', fmt='%.1f')
else:
DATA = numpy.loadtxt(outfile, delimiter = ' ', skiprows = 1, unpack = True)
for i in range(len(VARS)):
globals()[VARS[i]] = None
for i in range(len(data_header)):
globals()[data_header[i]] = DATA[i]
for year in range(2013, 2016,1):
for month in range(1,13,1):
for day in range(1,32,1):
# Verify if file exist
outputSubfolder = outputDir + str('%04d'%int(year)) + '\\' + str('%02d'%int(month)) + '\\'
if(not os.path.isdir(outputSubfolder)):
os.makedirs(outputSubfolder)
outfile = outputSubfolder + str('%04d'%int(year)) + str('%02d'%int(month)) + str('%02d'%int(day)) + '_prec.tif'
tmpfile1 = outfile.replace('.tif', '_tmpfile1.tif')
tmpfile2 = outfile.replace('.tif', '_tmpfile2.tif')
tmpfile3 = outfile.replace('.tif', '_tmpfile3.tif')
if(os.path.isfile(outfile)):
#print('file exist: %s'%(outfile))
TEST = readFileBand(outfile, 1)
has_nan = numpy.any(numpy.isnan(TEST))
if(has_nan):
os.remove(outfile)
print('...REMOVING [has nan] file %s'%(outfile))
else:
continue
print('process file: %s'%(outfile))
print('...create grid stations file')
if(os.path.isfile(grid_stations)):
os.remove(grid_stations)
#print('deleting grid_stations file %s'%(grid_stations))
try:
file = open(grid_stations,'w')
except:
print('... Error to open grid_stations file')
sys.exit()
file.write('LONG,LAT,PREC\n')
if(os.path.isfile(outfile)):
os.remove(outfile)
n_error = 0
for i, stat in zip(range(n_stations), numpy.transpose(STATIONS)[0]):
#print('search in %d of %d %d %d'%(stat, year, month, day))
#print(STAT[0])
#print(YEAR[0])
#print(MON[0])
paser = numpy.logical_and(STAT==stat, numpy.logical_and(numpy.logical_and(YEAR==year, MON==month), DAY==day))
if(numpy.sum(paser) == 0):
n_error = n_error + 1
#print('error...')
else:
prec_station = numpy.sum(PREC[paser])
if(not numpy.isnan(prec_station) or prec_station < 0):
prec_day = str('%.6f,%.6f,%.2f'%(STATIONS[i][1], STATIONS[i][2], prec_station))
print(prec_day)
if(i != n_stations-1):
prec_day = prec_day + '\n'
file.write(prec_day)
else:
n_error = n_error + 1
file.close()
if(n_error > 0.1*n_stations):
print('...error to generate file !! [n_error: %d]'%(n_error))
continue
print('...process create image')
print(exeDir + 'gdal_grid -a invdist:power=2.0:smoothing=1.0 -zfield PREC -l grid_stations grid_stations.vrt ' + ' ' + tmpfile1)
subprocess.call(exeDir + 'gdal_grid -a invdist:power=2.0:smoothing=1.0 -zfield PREC -l grid_stations grid_stations.vrt '+ ' ' + tmpfile1)
#print(exeDir + 'gdalwarp.exe -t_srs "+proj=latlong +datum=WGS84" ' + tmpfile1 + ' ' + tmpfile2)
#subprocess.call(exeDir + 'gdalwarp.exe -t_srs "+proj=latlong +datum=WGS84" ' + tmpfile1 + ' ' + tmpfile2)
print(exeDir + 'gdalwarp -tr 0.034840758326260 0.034840758326260 -r "cubic" -cutline ' + fileDataRetriever + ' -crop_to_cutline ' + tmpfile1 + ' ' + tmpfile2)
subprocess.call(exeDir + 'gdalwarp -tr 0.034840758326260 0.034840758326260 -r "bilinear" -cutline ' + fileDataRetriever + ' -crop_to_cutline ' + tmpfile1 + ' ' + tmpfile2)
print(exeDir + 'gdal_translate.exe -projwin -53.2507227 -12.3947856 -45.8993227 -19.5023003 -of GTiff ' + tmpfile2 + ' ' + tmpfile3)
subprocess.call(exeDir + 'gdal_translate.exe -projwin -53.2507227 -12.3947856 -45.8993227 -19.5023003 -of GTiff ' + tmpfile2 + ' ' + tmpfile3)
print(exeDir + 'gdalwarp.exe -cutline ' + fileMask + ' ' + tmpfile3 + ' ' + outfile)
subprocess.call(exeDir + 'gdalwarp.exe -cutline ' + fileMask + ' ' + tmpfile3 + ' ' + outfile)
#sys.exit()
delFile(tmpfile1)
delFile(tmpfile2)
delFile(tmpfile3)
#sys.exit()
|
[
"noreply@github.com"
] |
profcarlos.noreply@github.com
|
c914a8b2559ac7573d889ba0c0485586ee917073
|
607758c00b94a0a0d04143b2e1bce50cad510d25
|
/apps/context_processors.py
|
75b6e91b21ba6a4995f093737cb7d1729c5260ab
|
[] |
no_license
|
kailIII/demo
|
bfe767c93bfcd2fe8ec8e5a127d4d7761795b86c
|
676e5f4f51eafbd15bc0e7195cbbe6e1ed9545db
|
refs/heads/master
| 2021-01-15T13:00:54.277594
| 2014-09-17T16:44:13
| 2014-09-17T16:44:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
__author__ = 'root'
import django
from apps.home.models import userProfile
def user_image(request):
try:
image = None
user = request.user
up = userProfile.objects.get(user=user)
image = '/%s'%up.photo
except:
image = '/static/img/user.gif'
return image
def my_processor(request):
context ={
'django_version':django.get_version(),
'get_image_profile':user_image(request),
}
return context
|
[
"jc.fie.umich@gmail.com"
] |
jc.fie.umich@gmail.com
|
727e16449c31d0c1b22b78350eca61a16af0476f
|
789eb9836ef1b4082a0658b2c98d8c6b45294e8f
|
/Django_app1/models.py
|
9e12ea46c52284de93060e1bfbfa05b7f3597e45
|
[] |
no_license
|
view1994/djago_proj1
|
fcd067461d7cbaf5b54913b0123bc4053b90cac7
|
81b16579c5a1aba49cc5abacb737426391744cea
|
refs/heads/master
| 2020-04-29T03:22:37.787928
| 2019-05-30T10:02:51
| 2019-05-30T10:02:51
| 175,807,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 702
|
py
|
from django.db import models
from mongoengine import *
from mongoengine import connect
connect('wbsite', host='127.0.0.1', port=27017)
# ORM
# Create your models here.
class AccountInfo(Document):
# 账号信息数据库数据结构
login_flag = BooleanField( default= False)
usr_name = StringField(primary_key= True)
password = StringField( min_length = 6 )
regis_date = DateField( )
facial_count = IntField( default= 0 )
meta = {'collection': 'AccountInfo'}
class FacialFeature(Document):
name = StringField()
facial_feature = ListField(ListField())
class RoomInfo(Document):
url = StringField()
price = IntField()
meta = {'collection': 'RoomInfo'}
|
[
"379144671@qq.com"
] |
379144671@qq.com
|
e709d51744615b258a4f725ad7bd8c4f43704cb7
|
fe882b0e0c2c95e476d7ab1d7afeade5e04ed310
|
/okno.py
|
0403388145c3db7c07f98050da8e0c23c4c09603
|
[] |
no_license
|
trofimov87/python
|
0ce731f88342493dc911d0298ae6c32896742e89
|
ea8498202c3b630b2dbf3b9f7c7e625fb5dd4f44
|
refs/heads/master
| 2020-06-15T16:29:12.585509
| 2019-07-09T05:49:21
| 2019-07-09T05:49:21
| 195,342,215
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,147
|
py
|
from tkinter import *
from tkinter import messagebox
from datetime import date
class okno:
def proverka(self):
log = self.ent1.get()
p = self.ent2.get()
if log == "admin" and p == "admin":
messagebox.showinfo("Отчёт!", "Логин и пароль введены верно!")
else:
messagebox.showinfo("Отчёт!", "Логин и пароль указаны неправильно!")
def __init__(self):
self.but = Button(root, text="OK", command=self.proverka)
self.lab1 = Label(root, text="Login")
self.ent1 = Entry(root, width=20, bd=3)
self.lab2 = Label(root, text="Password")
self.ent2 = Entry(root, width=20, bd=3)
self.date = Label(root, text=date.today())
self.date.grid(row=0, column=0)
self.lab1.grid(row=1, column=0, sticky="w")
self.ent1. grid(row=1, column=1, sticky="w")
self.lab2.grid(row=2, column=0, sticky="w")
self.ent2.grid(row=2, column=1, sticky="w")
self.but.grid(row=3, column=1, sticky="n")
root = Tk()
root.title("OKNO")
obj = okno()
root.mainloop()
|
[
"trofimov-egor-87@yandex.ru"
] |
trofimov-egor-87@yandex.ru
|
0ce2d25ddc74d179b2a58c323d5412be17f0ea72
|
a01348fc9b634dcc73ddb209ef64e261b633e51d
|
/sanity/Sprint-5-test/connectMinio.py
|
4c37080e719b6881883c9660ed556ec41488f232
|
[] |
no_license
|
00mjk/Deduplicating-Cloud-functions
|
65db59ca69c1c35e447c1cd84f7fc669cffb356e
|
fe893dcbbc6b182f842c42d7b057ef0b909c0a2a
|
refs/heads/master
| 2023-03-20T06:00:46.680981
| 2019-05-04T19:01:47
| 2019-05-04T19:01:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 781
|
py
|
from minio import Minio
from checksum import calculate_checksum
from minio.error import ResponseError
from connectCouchdb import connect_couchdb,addFunctionIfNotExist
def connect_minio():
mc = Minio('52.116.33.131:9000',
access_key='sanity',
secret_key='CloudforAll!',
secure=False)
return mc
def getObject(mc,fromkafka,bucket):
data = mc.get_object(bucket, fromkafka)
obj = "testImg"
with open(obj, 'wb') as file_data:
for d in data.stream(32 * 1024):
file_data.write(d)
return obj
def createBucket(mc,bucket):
try:
if not mc.bucket_exists(bucket):
mc.make_bucket(bucket,location="sanity-local")
except ResponseError as err:
print(err)
|
[
"asupat12@gmail.com"
] |
asupat12@gmail.com
|
dfad328393c8ffd2e9dce2259caa4f0b4ed8b721
|
6d21002d0a6ba249e2b9772b2edf097514f3109b
|
/ppo2/ppo2_agent_custom.py
|
4719a4e7a8e52ae6245cbd614f01d1c4f9447d3d
|
[
"MIT"
] |
permissive
|
gardenermike/openai-retro-contest-experiments
|
4069a6635e1eac6c5e923933187220c7b0bd96ea
|
2a6bfb00ba9c03b73ec1547143e653636fa368d3
|
refs/heads/master
| 2020-03-19T11:50:27.462937
| 2018-06-11T14:23:28
| 2018-06-11T14:23:28
| 136,480,133
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,618
|
py
|
#!/usr/bin/env python
"""
Train an agent on Sonic using PPO2 from OpenAI Baselines.
"""
import tensorflow as tf
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
import baselines.ppo2.ppo2 as ppo2
#import ppo2 as ppo2
import baselines.ppo2.policies as policies
from baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch, lstm, lnlstm
from baselines.common.distributions import make_pdtype
from baselines import logger
import gym_remote.exceptions as gre
import math
from keras.layers import Conv2D, Dropout, Flatten, Dense
from sonic_util import make_env
#import retro
#from sonic_util_train import AllowBacktracking, SonicDiscretizer, RewardScaler, FrameStack, WarpFrame, make_env
import random
import csv
import sys
def get_training_envs():
envs = []
with open('./sonic-train.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
game, state = row
if game == 'game':
continue
else:
envs.append(row)
return envs
def make_training_env(game, state, stack=True, scale_rew=True):
"""
Create an environment with some standard wrappers.
"""
env = retro.make(game=game, state=state)
env = SonicDiscretizer(env)
if scale_rew:
env = RewardScaler(env)
env = WarpFrame(env)
if stack:
env = FrameStack(env, 4)
return env
def custom_cnn(obs_batch, **conv_kwargs):
x = obs_batch
# normalize
x = (tf.cast(x, tf.float32) / 255.) - 0.5
#x = tf.concat(x, axis=-1)
initializer = tf.orthogonal_initializer(gain=math.sqrt(2))
# activation=WaveNet_activation
activation = 'relu'
y = Conv2D(32, kernel_size=8, strides=4, activation=activation, kernel_initializer=initializer, name='layer_1')(x)
y = Dropout(0.2)(y)
y = Conv2D(64, kernel_size=4, strides=2, activation=activation, kernel_initializer=initializer, name='layer_2')(y)
y = Dropout(0.1)(y)
y = Conv2D(64, kernel_size=3, strides=1, activation=activation, kernel_initializer=initializer, name='layer_3')(y)
y = Dropout(0.1)(y)
y = Flatten(name='flatten')(y)
y = Dense(512, activation='relu', kernel_initializer=initializer, name='dense1')(y)
return y
class CustomCnnPolicy(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, reuse=False, **conv_kwargs): #pylint: disable=W0613
nh, nw, nc = ob_space.shape
ob_shape = (nbatch, nh, nw, nc)
self.pdtype = make_pdtype(ac_space)
X = tf.placeholder(tf.uint8, ob_shape) #obs
with tf.variable_scope("model", reuse=reuse):
#h = custom_cnn(X, **conv_kwargs)
#print(conv_kwargs)
h = policies.nature_cnn(X, **conv_kwargs)
vf = fc(h, 'v', 1)[:,0]
self.pd, self.pi = self.pdtype.pdfromlatent(h, init_scale=0.01)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob})
return a, v, self.initial_state, neglogp
def value(ob, *_args, **_kwargs):
return sess.run(vf, {X:ob})
self.X = X
self.vf = vf
self.step = step
self.value = value
#[<tf.Variable 'model/c1/w:0' shape=(8, 8, 4, 32) dtype=float32_ref>, <tf.Variable 'model/c1/b:0' shape=(1, 32, 1, 1) dtype=float32_ref>, <tf.Variable 'model/c2/w:0' shape=(4, 4, 32, 64) dtype=float32_ref>, <tf.Variable 'model/c2/b:0' shape=(1, 64, 1, 1) dtype=float32_ref>, <tf.Variable 'model/c3/w:0' shape=(3, 3, 64, 64) dtype=float32_ref>, <tf.Variable 'model/c3/b:0' shape=(1, 64, 1, 1) dtype=float32_ref>, <tf.Variable 'model/fc1/w:0' shape=(3136, 512) dtype=float32_ref>, <tf.Variable 'model/fc1/b:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'model/v/w:0' shape=(512, 1) dtype=float32_ref>, <tf.Variable 'model/v/b:0' shape=(1,) dtype=float32_ref>, <tf.Variable 'model/pi/w:0' shape=(512, 7) dtype=float32_ref>, <tf.Variable 'model/pi/b:0' shape=(7,) dtype=float32_ref>]
#<tf.Variable 'model/c1/w:0' shape=(8, 8, 1, 32) dtype=float32_ref>, <tf.Variable 'model/c1/b:0' shape=(1, 32, 1, 1) dtype=float32_ref>, <tf.Variable 'model/c2/w:0' shape=(4, 4, 32, 64) dtype=float32_ref>, <tf.Variable 'model/c2/b:0' shape=(1, 64, 1, 1) dtype=float32_ref>, <tf.Variable 'model/c3/w:0' shape=(3, 3, 64, 64) dtype=float32_ref>, <tf.Variable 'model/c3/b:0' shape=(1, 64, 1, 1) dtype=float32_ref>, <tf.Variable 'model/fc1/w:0' shape=(3136, 512) dtype=float32_ref>, <tf.Variable 'model/fc1/b:0' shape=(512,) dtype=float32_ref>
class MultigameEnvWrapper():
def __init__(self):
self.envs = get_training_envs()
self.make_env()
self.envs = get_training_envs()
self.nsteps = 0
self.switch_after_steps = 10000
def make_env(self):
game, state = random.choice(self.envs)
self.env = make_training_env(game, state, stack=True, scale_rew=True)
def step(self, *args):
self.nsteps += 1
if self.nsteps % self.switch_after_steps == 0:
self.env.close()
self.make_env()
self.env.reset()
return self.env.step(*args)
def __getattr__(self, attr):
return getattr(self.env, attr)
def main():
"""Run PPO until the environment throws an exception."""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # pylint: disable=E1101
with tf.Session(config=config):
env = make_env
#env = MultigameEnvWrapper
#load_path = '/root/compo/trained_on_images_nature_cnn.joblib'
load_path = './saved_weights_cnn.joblib'
#load_path = './saved_weights.joblib'
#logger.configure(dir='./logs', format_strs=['stdout', 'tensorboard'])
# Take more timesteps than we need to be sure that
# we stop due to an exception.
ppo2.learn(policy=CustomCnnPolicy,
env=DummyVecEnv([env]),
nsteps=4096,
nminibatches=8,
lam=0.95,
gamma=0.99,
noptepochs=3,
log_interval=1,
ent_coef=0.01, #0.2,
lr=lambda _: 2e-4,
cliprange=lambda i: 0.1, #1e-3,
total_timesteps=int(1e7),
load_path=load_path,
save_interval=500)
if __name__ == '__main__':
try:
main()
except gre.GymRemoteError as exc:
print('exception', exc)
|
[
"mike.challis@ryedigital.com"
] |
mike.challis@ryedigital.com
|
bfaf2638b2d779abc676e6ed5fdc62a1205d8d56
|
f56260df9a0db96d4cc215bca9d2f679ac443526
|
/psf.py
|
da4a506aa53f804b2d6ae65a228655263a1d900a
|
[] |
no_license
|
augucarv/PSF-photometry
|
24c7831464ff1b9c7057e0ca7a4b05d06a465f68
|
e6b5cd2029ec902c79f023af15f0334eee233d50
|
refs/heads/master
| 2022-12-25T18:48:52.163276
| 2020-10-06T21:09:41
| 2020-10-06T21:09:41
| 235,566,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,823
|
py
|
# This code uses the catalogs made with match_py and performs PSF photometry
# by using photutils's DAOPhotPSFPhotometry.
#
# INPUTS: (1) Catalog obtained from match_cat.py
# (2) FITS image of the region you want to evaluate
# (3) Catalog or table with the sources chosen to create the PSF
# OUTPUTS: (1) Table with photometric results (fluxes)
# (2) Table with data from the residual image
# (3) The residual image itself
# Prepping the data
import matplotlib.pyplot as plt
import numpy as np
from astropy.table import Table
from astropy.io import fits
# Importing the catalog from match_cat.py
catalog = Table.read('/etc/YOUR_CATALOG.txt',
format = 'ascii')
x_0=catalog['X_IMAGE_DBL']
y_0=catalog['Y_IMAGE_DBL']
flux_0 = catalog['FLUX_APER']
# Importing data from the image
image_data = fits.getdata('/etc/YOUR_FITS_IMAGE.fits')
# Creating PRF from image data
psf_reg = Table.read('/etc/YOUR_PSF_SOURCES.reg',
format = 'ascii') # Importing the catalog of chosen sources
from photutils.psf.sandbox import DiscretePRF
mask = np.isfinite(image_data)
psf = DiscretePRF.create_from_image(image_data,
psf_reg,
size=15,
mask=np.logical_not(mask),
mode='median',
subsampling=1)
# Performing Photometry
psf.x_0.fixed = True
psf.y_0.fixed = True
pos = Table(names=['x_0', 'y_0'], data=[catalog['X_IMAGE_DBL'],catalog['Y_IMAGE_DBL']]) # Using the initial positions
from photutils.psf import DAOPhotPSFPhotometry
photometry= DAOPhotPSFPhotometry(crit_separation=19, # The higher the crit_separation, the higher the computational cost
threshold=1.0,
fwhm=3.0,
psf_model=psf,
fitshape=9,
sigma=3.0,
ratio=1.0,
theta=0.0,
sigma_radius=1.5,
sharplo=0.2,
sharphi=1.0,
roundlo=-1.0,
roundhi=1.0,
fitter=LevMarLSQFitter(),
niters=3,
aperture_radius=5)
import timeit
tic=timeit.default_timer()
phot_results = photometry(image_data,init_guesses=pos)
residual = photometry.get_residual_image()
toc=timeit.default_timer()
print((toc-tic)/60)
# Plotting the images
import matplotlib
from matplotlib.colors import LogNorm
fig = plt.figure()
matplotlib.rc('xtick', labelsize=20)
matplotlib.rc('ytick', labelsize=20)
fig.suptitle("NGC1600 PSF (GMOS, i-band)", fontsize=25)
plt.subplot(1, 2, 1)
plt.imshow(image_data, cmap='gray_r', aspect=1,interpolation='nearest', origin='lower',norm=LogNorm())
plt.title('Image data',fontsize=22)
plt.xlabel('px', fontsize=20)
plt.ylabel('px', fontsize=20)
plt.colorbar(orientation='horizontal')
plt.subplot(1 ,2, 2)
plt.imshow(residual, cmap='gray_r', aspect=1,interpolation='nearest', origin='lower',norm=LogNorm())
plt.title('Residual Image',fontsize=22)
plt.xlabel('px', fontsize=20)
plt.ylabel('px', fontsize=20)
plt.colorbar(orientation='horizontal')
# Writing the results
from astropy.io import ascii
ascii.write(phot_results, 'phot_results.txt', overwrite=True)
ascii.write(residual, 'residual.txt', overwrite=True)
# Writing the FITS file of the residual image
hdu = fits.PrimaryHDU(residual)
hdulist = fits.HDUList([hdu])
hdulist.writeto('residual.fits',overwrite=True)
|
[
"noreply@github.com"
] |
augucarv.noreply@github.com
|
777fa31ed54f0883f564c29385341735844ca14c
|
227d5f958b6d7bf8a5bf91479c599d9c4bd800c5
|
/tests/unit/customizations/test_assumerole.py
|
08d51fce09a6d85ad282680ad2e22832c35ba2d9
|
[
"Apache-2.0"
] |
permissive
|
defconcepts/aws-cli
|
81c7c0d678f23d5dbcfa5dfbae3d8ff42afb1ab8
|
b9984c9b21221fe34db4b7e01739797c4fed01d2
|
refs/heads/develop
| 2020-12-11T08:08:05.576167
| 2015-11-13T23:07:54
| 2015-11-13T23:07:54
| 46,190,524
| 1
| 0
| null | 2015-11-14T20:17:47
| 2015-11-14T20:17:47
| null |
UTF-8
|
Python
| false
| false
| 16,156
|
py
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import shutil
import tempfile
import os
import platform
from datetime import datetime, timedelta
import mock
from botocore.hooks import HierarchicalEmitter
from botocore.exceptions import PartialCredentialsError
from dateutil.tz import tzlocal
from awscli.testutils import unittest, skip_if_windows
from awscli.customizations import assumerole
class TestAssumeRolePlugin(unittest.TestCase):
def test_assume_role_provider_injected(self):
session = mock.Mock()
assumerole.inject_assume_role_provider(
session, event_name='building-command-table.foo')
session.get_component.assert_called_with('credential_provider')
credential_provider = session.get_component.return_value
call_args = credential_provider.insert_before.call_args[0]
self.assertEqual(call_args[0], 'shared-credentials-file')
self.assertIsInstance(call_args[1], assumerole.AssumeRoleProvider)
def test_assume_role_provider_registration(self):
event_handlers = HierarchicalEmitter()
assumerole.register_assume_role_provider(event_handlers)
session = mock.Mock()
event_handlers.emit('session-initialized', session=session)
# Just verifying that anything on the session was called ensures
# that our handler was called, as it's the only thing that should
# be registered.
session.get_component.assert_called_with('credential_provider')
def test_provider_not_registered_on_error(self):
session = mock.Mock()
session.get_component.side_effect = Exception(
"Couldn't get credential_provider.")
assumerole.inject_assume_role_provider(
session, event_name='building-command-table.foo')
self.assertFalse(
session.get_component.return_value.insert_before.called)
class TestAssumeRoleCredentialProvider(unittest.TestCase):
maxDiff = None
def setUp(self):
self.fake_config = {
'profiles': {
'development': {
'role_arn': 'myrole',
'source_profile': 'longterm',
},
'longterm': {
'aws_access_key_id': 'akid',
'aws_secret_access_key': 'skid',
}
}
}
def create_config_loader(self, with_config=None):
if with_config is None:
with_config = self.fake_config
load_config = mock.Mock()
load_config.return_value = with_config
return load_config
def create_client_creator(self, with_response):
# Create a mock sts client that returns a specific response
# for assume_role.
client = mock.Mock()
client.assume_role.return_value = with_response
return mock.Mock(return_value=client)
def test_assume_role_with_no_cache(self):
response = {
'Credentials': {
'AccessKeyId': 'foo',
'SecretAccessKey': 'bar',
'SessionToken': 'baz',
'Expiration': datetime.now(tzlocal()).isoformat()
},
}
client_creator = self.create_client_creator(with_response=response)
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(),
client_creator, cache={}, profile_name='development')
credentials = provider.load()
self.assertEqual(credentials.access_key, 'foo')
self.assertEqual(credentials.secret_key, 'bar')
self.assertEqual(credentials.token, 'baz')
def test_assume_role_retrieves_from_cache(self):
date_in_future = datetime.utcnow() + timedelta(seconds=1000)
utc_timestamp = date_in_future.isoformat() + 'Z'
self.fake_config['profiles']['development']['role_arn'] = 'myrole'
cache = {
'development--myrole': {
'Credentials': {
'AccessKeyId': 'foo-cached',
'SecretAccessKey': 'bar-cached',
'SessionToken': 'baz-cached',
'Expiration': utc_timestamp,
}
}
}
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(), mock.Mock(),
cache=cache, profile_name='development')
credentials = provider.load()
self.assertEqual(credentials.access_key, 'foo-cached')
self.assertEqual(credentials.secret_key, 'bar-cached')
self.assertEqual(credentials.token, 'baz-cached')
def test_cache_key_is_windows_safe(self):
response = {
'Credentials': {
'AccessKeyId': 'foo',
'SecretAccessKey': 'bar',
'SessionToken': 'baz',
'Expiration': datetime.now(tzlocal()).isoformat()
},
}
cache = {}
self.fake_config['profiles']['development']['role_arn'] = (
'arn:aws:iam::foo-role')
client_creator = self.create_client_creator(with_response=response)
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(),
client_creator, cache=cache, profile_name='development')
provider.load()
# On windows, you cannot use a a ':' in the filename, so
# we need to do some small transformations on the filename
# to replace any ':' that come up.
self.assertEqual(cache['development--arn_aws_iam__foo-role'],
response)
def test_cache_key_with_role_session_name(self):
response = {
'Credentials': {
'AccessKeyId': 'foo',
'SecretAccessKey': 'bar',
'SessionToken': 'baz',
'Expiration': datetime.now(tzlocal()).isoformat()
},
}
cache = {}
self.fake_config['profiles']['development']['role_arn'] = (
'arn:aws:iam::foo-role')
self.fake_config['profiles']['development']['role_session_name'] = (
'foo_role_session_name')
client_creator = self.create_client_creator(with_response=response)
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(),
client_creator, cache=cache, profile_name='development')
provider.load()
self.assertEqual(cache['development--arn_aws_iam__foo-role--foo_role_session_name'],
response)
def test_assume_role_in_cache_but_expired(self):
expired_creds = datetime.utcnow()
utc_timestamp = expired_creds.isoformat() + 'Z'
response = {
'Credentials': {
'AccessKeyId': 'foo',
'SecretAccessKey': 'bar',
'SessionToken': 'baz',
'Expiration': utc_timestamp,
},
}
client_creator = self.create_client_creator(with_response=response)
cache = {
'development--myrole': {
'Credentials': {
'AccessKeyId': 'foo-cached',
'SecretAccessKey': 'bar-cached',
'SessionToken': 'baz-cached',
'Expiration': utc_timestamp,
}
}
}
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(), client_creator,
cache=cache, profile_name='development')
credentials = provider.load()
self.assertEqual(credentials.access_key, 'foo')
self.assertEqual(credentials.secret_key, 'bar')
self.assertEqual(credentials.token, 'baz')
def test_role_session_name_provided(self):
self.fake_config['profiles']['development']['role_session_name'] = 'myname'
response = {
'Credentials': {
'AccessKeyId': 'foo',
'SecretAccessKey': 'bar',
'SessionToken': 'baz',
'Expiration': datetime.now(tzlocal()).isoformat(),
},
}
client_creator = self.create_client_creator(with_response=response)
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(),
client_creator, cache={}, profile_name='development')
provider.load()
client = client_creator.return_value
client.assume_role.assert_called_with(
RoleArn='myrole', RoleSessionName='myname')
def test_external_id_provided(self):
self.fake_config['profiles']['development']['external_id'] = 'myid'
response = {
'Credentials': {
'AccessKeyId': 'foo',
'SecretAccessKey': 'bar',
'SessionToken': 'baz',
'Expiration': datetime.now(tzlocal()).isoformat(),
},
}
client_creator = self.create_client_creator(with_response=response)
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(),
client_creator, cache={}, profile_name='development')
provider.load()
client = client_creator.return_value
client.assume_role.assert_called_with(
RoleArn='myrole', ExternalId='myid', RoleSessionName=mock.ANY)
def test_assume_role_with_mfa(self):
self.fake_config['profiles']['development']['mfa_serial'] = 'mfa'
response = {
'Credentials': {
'AccessKeyId': 'foo',
'SecretAccessKey': 'bar',
'SessionToken': 'baz',
'Expiration': datetime.now(tzlocal()).isoformat(),
},
}
client_creator = self.create_client_creator(with_response=response)
prompter = mock.Mock(return_value='token-code')
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(), client_creator,
cache={}, profile_name='development', prompter=prompter)
provider.load()
client = client_creator.return_value
# In addition to the normal assume role args, we should also
# inject the serial number from the config as well as the
# token code that comes from prompting the user (the prompter
# object).
client.assume_role.assert_called_with(
RoleArn='myrole', RoleSessionName=mock.ANY, SerialNumber='mfa',
TokenCode='token-code')
def test_assume_role_mfa_cannot_refresh_credentials(self):
# Note: we should look into supporting optional behavior
# in the future that allows for reprompting for credentials.
# But for now, if we get temp creds with MFA then when those
# creds expire, we can't refresh the credentials.
self.fake_config['profiles']['development']['mfa_serial'] = 'mfa'
response = {
'Credentials': {
'AccessKeyId': 'foo',
'SecretAccessKey': 'bar',
'SessionToken': 'baz',
# We're creating an expiry time in the past so as
# soon as we try to access the credentials, the
# refresh behavior will be triggered.
'Expiration': (
datetime.now(tzlocal()) -
timedelta(seconds=100)).isoformat(),
},
}
client_creator = self.create_client_creator(with_response=response)
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(), client_creator,
cache={}, profile_name='development',
prompter=mock.Mock(return_value='token-code'))
creds = provider.load()
with self.assertRaises(assumerole.RefreshWithMFAUnsupportedError):
# access_key is a property that will refresh credentials
# if they're expired. Because we set the expiry time to
# something in the past, this will trigger the refresh
# behavior, with with MFA will currently raise an exception.
creds.access_key
def test_no_config_is_noop(self):
self.fake_config['profiles']['development'] = {
'aws_access_key_id': 'foo',
'aws_secret_access_key': 'bar',
}
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(),
mock.Mock(), cache={}, profile_name='development')
# Because a role_arn was not specified, the AssumeRoleProvider
# is a noop and will not return credentials (which means we
# move on to the next provider).
credentials = provider.load()
self.assertIsNone(credentials)
def test_source_profile_not_provided(self):
del self.fake_config['profiles']['development']['source_profile']
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(),
mock.Mock(), cache={}, profile_name='development')
# source_profile is required, we shoudl get an error.
with self.assertRaises(PartialCredentialsError):
provider.load()
def test_source_profile_does_not_exist(self):
dev_profile = self.fake_config['profiles']['development']
dev_profile['source_profile'] = 'does-not-exist'
provider = assumerole.AssumeRoleProvider(
self.create_config_loader(),
mock.Mock(), cache={}, profile_name='development')
# source_profile is required, we shoudl get an error.
with self.assertRaises(assumerole.InvalidConfigError):
provider.load()
class TestJSONCache(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.cache = assumerole.JSONFileCache(self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_supports_contains_check(self):
# By default the cache is empty because we're
# using a new temp dir everytime.
self.assertTrue('mykey' not in self.cache)
def test_add_key_and_contains_check(self):
self.cache['mykey'] = {'foo': 'bar'}
self.assertTrue('mykey' in self.cache)
def test_added_key_can_be_retrieved(self):
self.cache['mykey'] = {'foo': 'bar'}
self.assertEqual(self.cache['mykey'], {'foo': 'bar'})
def test_only_accepts_json_serializable_data(self):
with self.assertRaises(ValueError):
# set()'s cannot be serialized to a JSOn string.
self.cache['mykey'] = set()
def test_can_override_existing_values(self):
self.cache['mykey'] = {'foo': 'bar'}
self.cache['mykey'] = {'baz': 'newvalue'}
self.assertEqual(self.cache['mykey'], {'baz': 'newvalue'})
def test_can_add_multiple_keys(self):
self.cache['mykey'] = {'foo': 'bar'}
self.cache['mykey2'] = {'baz': 'qux'}
self.assertEqual(self.cache['mykey'], {'foo': 'bar'})
self.assertEqual(self.cache['mykey2'], {'baz': 'qux'})
def test_working_dir_does_not_exist(self):
working_dir = os.path.join(self.tempdir, 'foo')
cache = assumerole.JSONFileCache(working_dir)
cache['foo'] = {'bar': 'baz'}
self.assertEqual(cache['foo'], {'bar': 'baz'})
def test_key_error_raised_when_cache_key_does_not_exist(self):
with self.assertRaises(KeyError):
self.cache['foo']
@skip_if_windows('File permissions tests not supported on Windows.')
def test_permissions_for_file_restricted(self):
self.cache['mykey'] = {'foo': 'bar'}
filename = os.path.join(self.tempdir, 'mykey.json')
self.assertEqual(os.stat(filename).st_mode & 0xFFF, 0o600)
|
[
"js@jamesls.com"
] |
js@jamesls.com
|
6773476a091ee16de4a729e1fd6a17925b4f75bd
|
9c974edd8b0b733fb0eceeca6929bfc7053016e3
|
/medium/29.py
|
c360430dbde8b94bbca7321e276b0dda976630e3
|
[] |
no_license
|
MaoYuwei/leetcode
|
df8ce2fdf73dffe737624f8d37a0f9263df87770
|
69478ccbf3323689cd22c3e64f39da642fe137d6
|
refs/heads/master
| 2020-03-22T07:40:35.530814
| 2019-08-02T03:12:31
| 2019-08-02T03:12:31
| 139,717,041
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
# Divide Two Integers
class Solution(object):
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
ret = 0
a = abs(dividend)
b = abs(divisor)
while a >= b:
count = 1
sum = b
while sum + sum < a:
sum += sum
count += count
ret += count
a -= sum
if (dividend > 0 and divisor < 0) or (dividend < 0 and divisor > 0):
ret = -ret
return min(max(-2147483648, ret), 2147483647)
|
[
"maoyuwei@nuaa.edu.cn"
] |
maoyuwei@nuaa.edu.cn
|
08d92cbf11ee4861a7e748e92822518f1b1a8e23
|
b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1
|
/tensorflow/contrib/rnn/python/kernel_tests/rnn_test.py
|
7a50d68d5b9dd0a4091b3b5ef2e4445e5a0ce8dc
|
[
"Apache-2.0"
] |
permissive
|
uve/tensorflow
|
e48cb29f39ed24ee27e81afd1687960682e1fbef
|
e08079463bf43e5963acc41da1f57e95603f8080
|
refs/heads/master
| 2020-11-29T11:30:40.391232
| 2020-01-11T13:43:10
| 2020-01-11T13:43:10
| 230,088,347
| 0
| 0
|
Apache-2.0
| 2019-12-25T10:49:15
| 2019-12-25T10:49:14
| null |
UTF-8
|
Python
| false
| false
| 20,079
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rnn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.contrib.rnn.python.ops import rnn as contrib_rnn
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class StackBidirectionalRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _createStackBidirectionalRNN(self,
use_gpu,
use_shape,
use_sequence_length,
initial_states_fw=None,
initial_states_bw=None,
scope=None):
self.layers = [2, 3]
input_size = 5
batch_size = 2
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
sequence_length = array_ops.placeholder(
dtypes.int64) if use_sequence_length else None
self.cells_fw = [
rnn_cell.LSTMCell(
num_units,
input_size,
initializer=initializer,
state_is_tuple=False) for num_units in self.layers
]
self.cells_bw = [
rnn_cell.LSTMCell(
num_units,
input_size,
initializer=initializer,
state_is_tuple=False) for num_units in self.layers
]
inputs = max_length * [
array_ops.placeholder(
dtypes.float32,
shape=(batch_size, input_size) if use_shape else (None, input_size))
]
outputs, state_fw, state_bw = contrib_rnn.stack_bidirectional_rnn(
self.cells_fw,
self.cells_bw,
inputs,
initial_states_fw,
initial_states_bw,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertAlmostEqual(
out.get_shape().as_list(),
[batch_size if use_shape else None, 2 * self.layers[-1]])
input_value = np.random.randn(batch_size, input_size)
outputs = array_ops.stack(outputs)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
def _testStackBidirectionalRNN(self, use_gpu, use_shape):
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createStackBidirectionalRNN(use_gpu, use_shape, True))
variables.global_variables_initializer().run()
# Run with pre-specified sequence lengths of 2, 3.
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw],
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward states of the first layer
# must be the same.
# For the next layers, since the input is a concat of forward and backward
# outputs of the previous layers the symmetry is broken and the following
# states and outputs differ.
# We cannot access the intermediate values between layers but we can
# check that the forward and backward states of the first layer match.
self.assertAllClose(s_fw[0], s_bw[0])
# If outputs are not concat between layers the output of the forward
# and backward would be the same but symmetric.
# Check that it is not the case.
# Due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
# First sequence in batch is length=2
# Check that the time=0 forward output is not equal to time=1 backward.
self.assertNotEqual(out[0][0][0], out[1][0][3])
self.assertNotEqual(out[0][0][1], out[1][0][4])
self.assertNotEqual(out[0][0][2], out[1][0][5])
# Check that the time=1 forward output is not equal to time=0 backward.
self.assertNotEqual(out[1][0][0], out[0][0][3])
self.assertNotEqual(out[1][0][1], out[0][0][4])
self.assertNotEqual(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the time=0 forward output is not equal to time=2 backward.
self.assertNotEqual(out[0][1][0], out[2][1][3])
self.assertNotEqual(out[0][1][1], out[2][1][4])
self.assertNotEqual(out[0][1][2], out[2][1][5])
# Check that the time=1 forward output is not equal to time=1 backward.
self.assertNotEqual(out[1][1][0], out[1][1][3])
self.assertNotEqual(out[1][1][1], out[1][1][4])
self.assertNotEqual(out[1][1][2], out[1][1][5])
# Check that the time=2 forward output is not equal to time=0 backward.
self.assertNotEqual(out[2][1][0], out[0][1][3])
self.assertNotEqual(out[2][1][1], out[0][1][4])
self.assertNotEqual(out[2][1][2], out[0][1][5])
def _testStackBidirectionalRNNStates(self, use_gpu):
# Check that the states are correctly initialized.
# - Create a net and iterate for 3 states. Keep the state (state_3).
# - Reset states, and iterate for 5 steps. Last state is state_5.
# - Reset the sets to state_3 and iterate for 2 more steps,
# last state will be state_5'.
# - Check that the state_5 and state_5' (forward and backward) are the
# same for the first layer (it does not apply for the second layer since
# it has forward-backward dependencies).
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
batch_size = 2
# Create states placeholders.
initial_states_fw = [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, layer * 2))
for layer in self.layers
]
initial_states_bw = [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, layer * 2))
for layer in self.layers
]
# Create the net
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createStackBidirectionalRNN(use_gpu, True, True,
initial_states_fw,
initial_states_bw))
variables.global_variables_initializer().run()
# Run 3 steps.
feed_dict = {inputs[0]: input_value, sequence_length: [3, 2]}
# Initialize to empty state.
for i, layer in enumerate(self.layers):
feed_dict[initial_states_fw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
feed_dict[initial_states_bw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
_, st_3_fw, st_3_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
# Reset the net and run 5 steps.
feed_dict = {inputs[0]: input_value, sequence_length: [5, 3]}
for i, layer in enumerate(self.layers):
feed_dict[initial_states_fw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
feed_dict[initial_states_bw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
_, st_5_fw, st_5_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
# Reset the net to state_3 and run 2 more steps.
feed_dict = {inputs[0]: input_value, sequence_length: [2, 1]}
for i, _ in enumerate(self.layers):
feed_dict[initial_states_fw[i]] = st_3_fw[i]
feed_dict[initial_states_bw[i]] = st_3_bw[i]
out_5p, st_5p_fw, st_5p_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
# Check that the 3+2 and 5 first layer states.
self.assertAllEqual(st_5_fw[0], st_5p_fw[0])
self.assertAllEqual(st_5_bw[0], st_5p_bw[0])
def testStackBidirectionalRNN(self):
self._testStackBidirectionalRNN(use_gpu=False, use_shape=False)
self._testStackBidirectionalRNN(use_gpu=True, use_shape=False)
self._testStackBidirectionalRNN(use_gpu=False, use_shape=True)
self._testStackBidirectionalRNN(use_gpu=True, use_shape=True)
self._testStackBidirectionalRNNStates(use_gpu=False)
self._testStackBidirectionalRNNStates(use_gpu=True)
def _createStackBidirectionalDynamicRNN(self,
use_gpu,
use_shape,
use_state_tuple,
initial_states_fw=None,
initial_states_bw=None,
scope=None):
self.layers = [2, 3]
input_size = 5
batch_size = 2
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
sequence_length = array_ops.placeholder(dtypes.int64)
self.cells_fw = [
rnn_cell.LSTMCell(
num_units,
input_size,
initializer=initializer,
state_is_tuple=False) for num_units in self.layers
]
self.cells_bw = [
rnn_cell.LSTMCell(
num_units,
input_size,
initializer=initializer,
state_is_tuple=False) for num_units in self.layers
]
inputs = max_length * [
array_ops.placeholder(
dtypes.float32,
shape=(batch_size, input_size) if use_shape else (None, input_size))
]
inputs_c = array_ops.stack(inputs)
inputs_c = array_ops.transpose(inputs_c, [1, 0, 2])
outputs, st_fw, st_bw = contrib_rnn.stack_bidirectional_dynamic_rnn(
self.cells_fw,
self.cells_bw,
inputs_c,
initial_states_fw=initial_states_fw,
initial_states_bw=initial_states_bw,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
# Outputs has shape (batch_size, max_length, 2* layer[-1].
output_shape = [None, max_length, 2 * self.layers[-1]]
if use_shape:
output_shape[0] = batch_size
self.assertAllEqual(outputs.get_shape().as_list(), output_shape)
input_value = np.random.randn(batch_size, input_size)
return input_value, inputs, outputs, st_fw, st_bw, sequence_length
def _testStackBidirectionalDynamicRNN(self, use_gpu, use_shape,
use_state_tuple):
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createStackBidirectionalDynamicRNN(use_gpu, use_shape,
use_state_tuple))
variables.global_variables_initializer().run()
# Run with pre-specified sequence length of 2, 3
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw],
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward states of the first layer has
# to be the same.
# For the next layers, since the input is a concat of forward and backward
# outputs of the previous layers the symmetry is broken and the following
# states and outputs differ.
# We cannot access the intermediate values between layers but we can
# check that the forward and backward states of the first layer match.
self.assertAllClose(s_fw[0], s_bw[0])
out = np.swapaxes(out, 0, 1)
# If outputs are not concat between layers the output of the forward
# and backward would be the same but symmetric.
# Check that is not the case.
# Due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
# First sequence in batch is length=2
# Check that the time=0 forward output is not equal to time=1 backward.
self.assertNotEqual(out[0][0][0], out[1][0][3])
self.assertNotEqual(out[0][0][1], out[1][0][4])
self.assertNotEqual(out[0][0][2], out[1][0][5])
# Check that the time=1 forward output is not equal to time=0 backward.
self.assertNotEqual(out[1][0][0], out[0][0][3])
self.assertNotEqual(out[1][0][1], out[0][0][4])
self.assertNotEqual(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the time=0 forward output is not equal to time=2 backward.
self.assertNotEqual(out[0][1][0], out[2][1][3])
self.assertNotEqual(out[0][1][1], out[2][1][4])
self.assertNotEqual(out[0][1][2], out[2][1][5])
# Check that the time=1 forward output is not equal to time=1 backward.
self.assertNotEqual(out[1][1][0], out[1][1][3])
self.assertNotEqual(out[1][1][1], out[1][1][4])
self.assertNotEqual(out[1][1][2], out[1][1][5])
# Check that the time=2 forward output is not equal to time=0 backward.
self.assertNotEqual(out[2][1][0], out[0][1][3])
self.assertNotEqual(out[2][1][1], out[0][1][4])
self.assertNotEqual(out[2][1][2], out[0][1][5])
def _testStackBidirectionalDynamicRNNStates(self, use_gpu):
# Check that the states are correctly initialized.
# - Create a net and iterate for 3 states. Keep the state (state_3).
# - Reset states, and iterate for 5 steps. Last state is state_5.
# - Reset the sets to state_3 and iterate for 2 more steps,
# last state will be state_5'.
# - Check that the state_5 and state_5' (forward and backward) are the
# same for the first layer (it does not apply for the second layer since
# it has forward-backward dependencies).
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
batch_size = 2
# Create states placeholders.
initial_states_fw = [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, layer * 2))
for layer in self.layers
]
initial_states_bw = [
array_ops.placeholder(
dtypes.float32, shape=(batch_size, layer * 2))
for layer in self.layers
]
# Create the net
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createStackBidirectionalDynamicRNN(
use_gpu,
use_shape=True,
use_state_tuple=False,
initial_states_fw=initial_states_fw,
initial_states_bw=initial_states_bw))
variables.global_variables_initializer().run()
# Run 3 steps.
feed_dict = {inputs[0]: input_value, sequence_length: [3, 2]}
# Initialize to empty state.
for i, layer in enumerate(self.layers):
feed_dict[initial_states_fw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
feed_dict[initial_states_bw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
_, st_3_fw, st_3_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
# Reset the net and run 5 steps.
feed_dict = {inputs[0]: input_value, sequence_length: [5, 3]}
for i, layer in enumerate(self.layers):
feed_dict[initial_states_fw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
feed_dict[initial_states_bw[i]] = np.zeros(
(batch_size, layer * 2), dtype=np.float32)
_, st_5_fw, st_5_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
# Reset the net to state_3 and run 2 more steps.
feed_dict = {inputs[0]: input_value, sequence_length: [2, 1]}
for i, _ in enumerate(self.layers):
feed_dict[initial_states_fw[i]] = st_3_fw[i]
feed_dict[initial_states_bw[i]] = st_3_bw[i]
out_5p, st_5p_fw, st_5p_bw = sess.run([outputs, state_fw, state_bw],
feed_dict=feed_dict)
# Check that the 3+2 and 5 first layer states.
self.assertAllEqual(st_5_fw[0], st_5p_fw[0])
self.assertAllEqual(st_5_bw[0], st_5p_bw[0])
def testBidirectionalRNN(self):
# Generate 2^3 option values
# from [True, True, True] to [False, False, False]
options = itertools.product([True, False], repeat=3)
for option in options:
self._testStackBidirectionalDynamicRNN(
use_gpu=option[0], use_shape=option[1], use_state_tuple=option[2])
# Check States.
self._testStackBidirectionalDynamicRNNStates(use_gpu=False)
self._testStackBidirectionalDynamicRNNStates(use_gpu=True)
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
# REMARKS: factory(scope) is a function accepting a scope
# as an argument, such scope can be None, a string
# or a VariableScope instance.
with self.session(use_gpu=True, graph=ops.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts with the proper scope.
variables.global_variables_initializer()
all_vars = variables.global_variables()
prefix = prefix or "stack_bidirectional_rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("StackRNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testStackBidirectionalRNNScope(self):
def factory(scope):
return self._createStackBidirectionalRNN(
use_gpu=True, use_shape=True, use_sequence_length=True, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
def testBidirectionalDynamicRNNScope(self):
def factory(scope):
return self._createStackBidirectionalDynamicRNN(
use_gpu=True, use_shape=True, use_state_tuple=True, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
if __name__ == "__main__":
test.main()
|
[
"v-grniki@microsoft.com"
] |
v-grniki@microsoft.com
|
26e51cc0dae9c24e7ff06f11b954400c926013a1
|
f3897bbf525cb86e79d536e10aaf9d56ea32823f
|
/Chess-ELO-Mongo-Pipelines/mongo_aggregator/mongo_aggregators.py
|
83e2d6936757af0fd37478334183394a126e1bbe
|
[] |
no_license
|
amanagarwal03/Chess-ELO-Predictor
|
e3f3e87f2b0475e80e7a377c2249c9f26270cab7
|
562ca14a7a0170b0edbf8767bd2ec6f5d550bd94
|
refs/heads/master
| 2022-12-08T12:08:18.157465
| 2020-03-04T00:20:12
| 2020-03-04T00:20:12
| 244,763,716
| 0
| 0
| null | 2022-12-08T07:31:30
| 2020-03-03T23:30:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,125
|
py
|
__author__ = 'Mayank Tiwari'
import errno
import os
from datetime import datetime
now = datetime.now()
import logging.config
from models.game import *
from util import *
# ROOT_DIR = os.path.abspath(os.path.join(os.path.split(__file__)[0], '..'))
# sys.path.append(ROOT_DIR)
'''
Loading the Logging library here
'''
# logging.basicConfig(level=logging.INFO, filename='app.log', filemode='w', format='%(asctime)s - %(levelname)s - %(message)s')
with open('../logging_config.yaml', 'r') as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
logger = logging.getLogger(__name__)
'''
Connecting to Mongo DB
'''
config = load_config('../elo_config.yaml')
profile = config["profile"]
logger.info(f'Loaded configuration for Profile: {profile}')
init_database(profile, config)
def writeOutput(file, line):
logging.info(line)
file.write(line)
file.write("\n")
# SEPARATOR = ", "
SEPARATOR = "|"
timestampString = now.strftime("%m_%d_%Y_%H_%M_%S")
outputFileName = f"dump/game_analysis_{timestampString}.psv"
if not os.path.exists(os.path.dirname(outputFileName)):
try:
os.makedirs(os.path.dirname(outputFileName))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
outF = open(outputFileName, "w")
writeOutput(
outF,
'Event' + SEPARATOR + 'Is Draw' + SEPARATOR + 'Has Black Won' + SEPARATOR + 'Opening' + SEPARATOR + 'White ELO' + SEPARATOR + 'Black ELO' + SEPARATOR + 'Time Control'
+ SEPARATOR + 'Termination' + SEPARATOR + 'Total White Score' + SEPARATOR + 'Total Black Score' + SEPARATOR + 'No. White Moves' + SEPARATOR + 'No. Black Moves' + SEPARATOR +
'White Avg. Score' + SEPARATOR + 'Black Avg. Score'
)
logging.info('Beginning to process data from MongoDB...')
for game in Game.objects.all():
isDraw = game.result == "1/2-1/2"
hasBlackWon = False
if not isDraw:
hasBlackWon = game.result == "0-1"
white_score_sum = 0
black_score_sum = 0
total_white_games = 0
total_black_games = 0
for move in game.moves:
if move.turn:
total_white_games = total_white_games + 1
white_score_sum = white_score_sum + move.score
else:
total_black_games = total_black_games + 1
black_score_sum = black_score_sum + move.score
white_avg_score = 0 if total_white_games == 0 else round(white_score_sum / total_white_games, 2)
black_avg_score = 0 if total_black_games == 0 else round(black_score_sum / total_black_games, 2)
writeOutput(
outF,
game.event + SEPARATOR + str(isDraw) + SEPARATOR + str(hasBlackWon) + SEPARATOR +
game.opening + SEPARATOR + game.whiteElo + SEPARATOR + game.blackElo + SEPARATOR + game.timeControl + SEPARATOR + game.termination + SEPARATOR \
+ str(white_score_sum) + SEPARATOR + str(black_score_sum) + SEPARATOR + str(total_white_games) + SEPARATOR + str(total_black_games) + SEPARATOR \
+ str(white_avg_score) + SEPARATOR + str(black_avg_score)
)
outF.close()
logging.info('Finished processing data from MongoDB!')
|
[
"amanagarwal03@MacBook-Pro.local"
] |
amanagarwal03@MacBook-Pro.local
|
7c7c986ec8247f700d45abb685745ff743b71d29
|
981be8c453a98801950efbdeaea7b57c27854176
|
/venv/Lib/site-packages/wsgidav/error_printer.py
|
52b19d58723b814c6a3045dcd17667d0ac318618
|
[] |
no_license
|
overburdeni/SpiderMan
|
04ee7427d69ce08bf1751f8bc9cc5a9a323a3719
|
7a16151ac59accd7f7b95acf54c6a9fa191abccd
|
refs/heads/master
| 2022-12-01T23:38:33.199517
| 2020-08-06T15:54:13
| 2020-08-06T15:54:13
| 285,609,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,129
|
py
|
# -*- coding: utf-8 -*-
# (c) 2009-2020 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Original PyFileServer (c) 2005 Ho Chun Wei.
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
"""
WSGI middleware to catch application thrown DAVErrors and return proper
responses.
"""
from wsgidav import util
from wsgidav.dav_error import (
as_DAVError,
DAVError,
get_http_status_string,
HTTP_INTERNAL_ERROR,
HTTP_NO_CONTENT,
HTTP_NOT_MODIFIED,
)
from wsgidav.middleware import BaseMiddleware
import traceback
__docformat__ = "reStructuredText"
_logger = util.get_module_logger(__name__)
# ========================================================================
# ErrorPrinter
# ========================================================================
class ErrorPrinter(BaseMiddleware):
def __init__(self, wsgidav_app, next_app, config):
super(ErrorPrinter, self).__init__(wsgidav_app, next_app, config)
self.err_config = config.get("error_printer", {})
self.catch_all_exceptions = self.err_config.get("catch_all", False)
def is_disabled(self):
return self.err_config.get("enable") is False
def __call__(self, environ, start_response):
# Intercept start_response
sub_app_start_response = util.SubAppStartResponse()
try:
try:
# request_server app may be a generator (for example the GET handler)
# So we must iterate - not return self.next_app(..)!
# Otherwise the we could not catch exceptions here.
response_started = False
app_iter = self.next_app(environ, sub_app_start_response)
for v in app_iter:
# Start response (the first time)
if not response_started:
# Success!
start_response(
sub_app_start_response.status,
sub_app_start_response.response_headers,
sub_app_start_response.exc_info,
)
response_started = True
yield v
# Close out iterator
if hasattr(app_iter, "close"):
app_iter.close()
# Start response (if it hasn't been done yet)
if not response_started:
# Success!
start_response(
sub_app_start_response.status,
sub_app_start_response.response_headers,
sub_app_start_response.exc_info,
)
return
except DAVError as e:
_logger.debug("re-raising {}".format(e))
raise
except Exception as e:
# Caught a non-DAVError
if self.catch_all_exceptions:
# Catch all exceptions to return as 500 Internal Error
# traceback.print_exc(10, environ.get("wsgi.errors") or sys.stderr)
_logger.error("{}".format(traceback.format_exc(10)))
raise as_DAVError(e)
else:
_logger.error(
"Caught Exception\n{}".format(traceback.format_exc(10))
)
# traceback.print_exc(10, sys.stderr)
raise
except DAVError as e:
_logger.debug("caught {}".format(e))
status = get_http_status_string(e)
# Dump internal errors to console
if e.value == HTTP_INTERNAL_ERROR:
tb = traceback.format_exc(10)
_logger.error(
"Caught HTTPRequestException(HTTP_INTERNAL_ERROR)\n{}".format(tb)
)
# traceback.print_exc(10, environ.get("wsgi.errors") or sys.stdout)
_logger.error("e.src_exception:\n{}".format(e.src_exception))
elif e.value in (HTTP_NOT_MODIFIED, HTTP_NO_CONTENT):
# _logger.warning("Forcing empty error response for {}".format(e.value))
# See paste.lint: these code don't have content
start_response(
status, [("Content-Length", "0"), ("Date", util.get_rfc1123_time())]
)
yield b""
return
# If exception has pre-/post-condition: return as XML response,
# else return as HTML
content_type, body = e.get_response_page()
# TODO: provide exc_info=sys.exc_info()?
start_response(
status,
[
("Content-Type", content_type),
("Content-Length", str(len(body))),
("Date", util.get_rfc1123_time()),
],
)
yield body
return
|
[
"1678458207@qq.com"
] |
1678458207@qq.com
|
994658cd4a34e6ab2d1d8577cba58f08f1193a90
|
0e76f9a3db8392050fd07c22cf8668672f377162
|
/blog.py
|
e505dc79f97cb5036d52a93b32fc9cc2308a528d
|
[] |
no_license
|
alvmillan/flask-blog
|
2e6bd0d11ca34d5feb760b527d9cb11425defcfb
|
3fd355e9467a3b61f0bfbce71e49f3f0046bbb2e
|
refs/heads/master
| 2021-01-12T17:22:46.172242
| 2016-10-22T12:14:37
| 2016-10-22T12:14:37
| 71,553,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,043
|
py
|
#blog.py - controller
#imports
from flask import Flask, render_template, request, session, \
flash, redirect, url_for, g
import sqlite3
from functools import wraps
#configuration
DATABASE = 'blog.db'
USERNAME = 'admin'
PASSWORD = 'admin'
SECRET_KEY = 'hard_to_guess'
app = Flask(__name__)
# pulls in app configuration by looking for UPPERCASE variable
app.config.from_object(__name__)
# function used for connection to the database
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('login'))
return wrap
@app.route('/', methods=['GET','POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME'] or request.form['password'] != app.config['PASSWORD']:
error = 'Invalid Credentials. Please try again.'
else:
session['logged_in'] = True
return redirect(url_for('main'))
return render_template('login.html', error = error)
@app.route('/main')
@login_required
def main():
g.db = connect_db()
cur = g.db.execute('SELECT * FROM posts')
posts = [dict(title=row[0], post=row[1]) for row in cur.fetchall()]
g.db.close()
return render_template('main.html', posts = posts)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('login'))
@app.route('/add', methods=['POST'])
@login_required
def add():
title = request.form['title']
post = request.form['post']
if not title or not post:
flash("All fields are required. Please try again.")
return redirect(url_for('main'))
else:
g.db = connect_db()
g.db.execute('INSERT INTO posts (title,post) values (?,?)',[request.form['title'], request.form['post']])
g.db.commit()
g.db.close()
flash('New entry was successfully posted!')
return redirect(url_for('main'))
if __name__ == '__main__':
app.run(debug=True)
|
[
"amillan@protonmail.com"
] |
amillan@protonmail.com
|
0f419fc26d221b44ddcc1eef9896202cd8814fdc
|
fb612ae638e6d25f30fd930b7e9ddd9af6553b6f
|
/multi_gpu_main.py
|
914efc2dbcddc6858d75781f918fcb0bda250d20
|
[] |
no_license
|
jxz542189/atrous_crf
|
5fe1c42fde71859cf51265aa2cce5e256b674145
|
ae5614cc77c772d2b1d1bc86aca02f2e5b4a2f7a
|
refs/heads/master
| 2020-04-11T17:50:04.244094
| 2018-12-16T06:55:22
| 2018-12-16T06:55:22
| 161,975,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,211
|
py
|
import tensorflow as tf
from tensorflow import data
from datetime import datetime
import multiprocessing
import json
import shutil
import os
from model.model import Model
from utils.data_processor import DataProcessor, get_seg_features
import numpy as np
from utils import tf_metrics
tf.reset_default_graph()
path = os.path.dirname(os.path.realpath(__file__))
print(path)
config_path = os.path.join(path, 'config')
params_path = os.path.join(config_path, 'params.json')
with open(params_path) as param:
params_dict = json.load(param)
config = tf.contrib.training.HParams(**params_dict)
os.environ['CUDA_VISIBLE_DEVICES'] = config.cuda_visible_devices
model_dir = 'trained_models/{}'.format(config.model_name)
total_steps = int((config.train_size/config.batch_size) * config.num_epochs)
test_steps = int((config.num_test_size/config.batch_size))
target_labels = ['O','B-PER','I-PER','B-ORG','I-ORG','I-LOC','B-LOC']
PAD_WORD = '<pad>'
UNK = '<unk>'
N_WORDS = 4467
EVAL_AFTER_SEC = 60
RESUME_TRAINING = False
VOCAB_LIST_FILE = os.path.join(path, "data", "vocab.txt")
label_dict = {}
for i, label in enumerate(target_labels):
label_dict[label] = i
word_dict = {}
k = 0
with open(VOCAB_LIST_FILE) as f:
for word in f:
word = word.strip()
word_dict[word] = k
k += 1
def input_fn(filename, mode=tf.estimator.ModeKeys.EVAL,
num_epochs=1,
batch_size=32):
labels, lines = DataProcessor().read_data(filename)
shuffle = True if mode == tf.estimator.ModeKeys.TRAIN else False
num_threads = multiprocessing.cpu_count()
buffer_size = 2 * batch_size + 1
print("")
print("* data input_fn:")
print("================")
print("Batch size: {}".format(batch_size))
print("Epoch Count: {}".format(num_epochs))
print("Mode: {}".format(mode))
print("Thread Count: {}".format(num_threads))
print("Shuffle: {}".format(shuffle))
print("================")
print("")
max_seq_length = config.max_seq_length
labels_id = []
for label in labels:
label_list = []
for ch in label:
if ch in label_dict:
label_list.append(label_dict[ch])
else:
label_list.append(label_dict['O'])
label_list = label_list[:max_seq_length] if len(label_list) >= max_seq_length else label_list + [label_dict['O']] * (
max_seq_length - len(label_list))
labels_id.append(np.array(label_list, dtype=np.int32))
words_id = []
segs_id = []
lengths = []
for line in lines:
seg_id = get_seg_features(line)
word_id = []
for word in line:
if word in word_dict:
word_id.append(word_dict[word])
else:
word_id.append(word_dict[UNK])
lengths.append(len(word_id))
seg_id = seg_id[:max_seq_length] if len(seg_id) >= max_seq_length else seg_id + [2] * (max_seq_length - len(seg_id))
word_id = word_id[:max_seq_length] if len(word_id) >= max_seq_length else word_id + [word_dict[PAD_WORD]] * (max_seq_length - len(word_id))
segs_id.append(np.array(seg_id, dtype=np.int32))
words_id.append(np.array(word_id, dtype=np.int32))
assert len(seg_id) == len(word_id)
assert len(words_id) == len(labels_id)
#words_id:(None,max_seq_length) segs_id:(None, max_seq_length) lengths:(None)
res = np.concatenate([np.array(words_id), np.array(segs_id), np.reshape(np.array(lengths), (-1, 1))], axis=-1)
dataset = tf.data.Dataset.from_tensor_slices(({"instances":res}, np.array(labels_id, dtype=np.int32)))
if shuffle:
dataset = dataset.shuffle(buffer_size)
dataset = dataset.batch(batch_size)
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.repeat(None)
else:
dataset = dataset.repeat(1)
dataset = dataset.prefetch(buffer_size)
return dataset
def model_fn(features, labels, mode, params):
max_seq_length = params.max_seq_length
words_id, segs_id, lengths = tf.split(features['instances'], axis=-1, num_or_size_splits=[max_seq_length, max_seq_length, 1])
lengths = tf.squeeze(lengths)
model = Model(words_id, segs_id, labels, lengths, params)
if mode == tf.estimator.ModeKeys.PREDICT:
paths = model.decode(model.logits, model.lengths, model.trans)
predictions = {
'predictions': paths
}
export_outputs = {
'prediction': tf.estimator.export.PredictOutput(predictions)
}
return tf.estimator.EstimatorSpec(mode,
predictions=predictions,
export_outputs=export_outputs)
model.get_loss(model.logits, labels, lengths)
tf.summary.scalar('loss', model.loss)
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.EstimatorSpec(mode=mode,
loss=model.loss,
train_op=model.train_op)
if mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(label_ids, logits, trans):
pred_ids = model.decode(model.logits, model.lengths, model.trans)
pred_ids = tf.cast(pred_ids, tf.int32)
precision = tf_metrics.precision(label_ids, pred_ids, len(target_labels), [1, 2, 3, 4, 5, 6])
recall = tf_metrics.recall(label_ids, pred_ids, len(target_labels), [1, 2, 3, 4, 5, 6])
f = tf_metrics.f1(label_ids, pred_ids, len(target_labels), [1, 2, 3, 4, 5, 6])
return {
"eval_precision": precision,
"eval_recall": recall,
"eval_f": f,
# "eval_loss": loss,
}
eval_metrics = metric_fn(labels, model.logits, model.trans)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=model.loss,
eval_metric_ops=eval_metrics)
def create_estimator(run_config, hparams):
estimator = tf.estimator.Estimator(model_fn=model_fn,
params=hparams,
config=run_config)
print("")
print("Estimator Type: {}".format(type(estimator)))
print("")
return estimator
def serving_input_fn():
receiver_tensor = {
'instances': tf.placeholder(tf.int32, [None, None])
}
features = {
key: tensor
for key, tensor in receiver_tensor.items()
}
return tf.estimator.export.ServingInputReceiver(
features, receiver_tensor)
if __name__ == '__main__':
# ==============另一训练方式===============
if not RESUME_TRAINING:
print("Removing previous artifacts...")
shutil.rmtree(model_dir, ignore_errors=True)
else:
print("Resuming training...")
distribution = tf.contrib.distribute.MirroredStrategy(num_gpus=4)
# run_config = tf.estimator.RunConfig(log_step_count_steps=config.train['log_step_count_steps'],
# tf_random_seed=config.train['tf_random_seed'],
# model_dir=model_dir,
# )
run_config = tf.estimator.RunConfig(log_step_count_steps=config.log_step_count_steps,
tf_random_seed=config.tf_random_seed,
model_dir=model_dir,
session_config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True),
train_distribute=distribution)
estimator = create_estimator(run_config, config)
train_spec = tf.estimator.TrainSpec(
input_fn=lambda: input_fn(os.path.join(path, 'data', 'example.train'),
mode=tf.estimator.ModeKeys.TRAIN,
num_epochs=config.num_epochs,
batch_size=config.batch_size),
max_steps=total_steps,
hooks=None
)
eval_spec = tf.estimator.EvalSpec(
input_fn=lambda: input_fn(os.path.join(path, 'data', 'example.dev'),
mode=tf.estimator.ModeKeys.EVAL,
batch_size=config.batch_size),
exporters=[tf.estimator.LatestExporter(name="predict",
serving_input_receiver_fn=serving_input_fn,
exports_to_keep=1,
as_text=True)],
steps=test_steps,
throttle_secs=EVAL_AFTER_SEC
)
# eval_spec = tf.estimator.EvalSpec(
# input_fn=lambda: input_fn(os.path.join(path, 'data', 'example.dev'),
# mode=tf.estimator.ModeKeys.EVAL,
# batch_size=config.batch_size),
# steps=None,
# throttle_secs=EVAL_AFTER_SEC
# )
tf.estimator.train_and_evaluate(estimator=estimator,
train_spec=train_spec,
eval_spec=eval_spec)
|
[
"1318394945@qq.com"
] |
1318394945@qq.com
|
8fa80e0d3e25278dde5c59bfe9d1f68560057a19
|
48f297637ae79f0242e4cbc0944b68e5ae331a13
|
/0273_integer-to-english-words.py
|
f42bd331af3aa9d0b62aa78d2f8203bd3f4ed008
|
[] |
no_license
|
bolatov/leetcode
|
a1ff46be79ccb46e7c8e8b754ef35579ce4bd863
|
8da24f993535cf22ae1af5b8a98fd9dada22b498
|
refs/heads/master
| 2020-04-17T08:59:23.516987
| 2019-12-19T14:50:15
| 2019-12-19T14:50:15
| 65,895,687
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
digits = ['', 'One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine']
teens = ['Ten', 'Eleven', 'Twelve', 'Thirteen', 'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen']
tens = ['', '', 'Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety']
class Solution:
def toWord(self, num):
tokens = []
if num >= 1_000_000_000:
quotient, num = divmod(num, 1_000_000_000)
tokens.append(digits[quotient])
tokens.append('Billion')
if num >= 1_000_000:
quotient, num = divmod(num, 1_000_000)
_tokens = self.toWord(quotient)
tokens.extend(_tokens)
tokens.append('Million')
if num >= 1000:
quotient, num = divmod(num, 1000)
_tokens = self.toWord(quotient)
tokens.extend(_tokens)
tokens.append('Thousand')
if num >= 100:
quotient, num = divmod(num, 100)
tokens.append(digits[quotient])
tokens.append('Hundred')
if num >= 20:
quotient, num = divmod(num, 10)
tokens.append(tens[quotient])
tokens.append(digits[num])
elif num >= 10:
quotient, num = divmod(num, 10)
tokens.append(teens[num])
else:
tokens.append(digits[num])
return tokens
def numberToWords(self, num: int) -> str:
if num == 0:
return 'Zero'
tokens = self.toWord(num)
return ' '.join([t for t in tokens if t])
|
[
"noreply@github.com"
] |
bolatov.noreply@github.com
|
113ed67923dd0c3f1bb09d16b1bbcf84ce4e7f7e
|
4a6b4d54c4a1e99cb46c704a6c9ef24d9725317e
|
/enemies.py
|
509eac7b1b5c8b2cb70f5db4c430582f08d2a6fd
|
[] |
no_license
|
NCavaliere1991/space-invaders
|
c3ca38911cafed6ddedb06d3d85c095f589c40b8
|
c16de1f4943622399db25b0eac86a4b852895ffb
|
refs/heads/main
| 2023-04-03T08:00:30.840478
| 2021-04-09T03:33:19
| 2021-04-09T03:33:19
| 340,783,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,162
|
py
|
from turtle import Turtle
import random
class Enemies(Turtle):
def __init__(self):
super().__init__()
self.enemy_list = []
self.make_enemies()
self.moving_right = True
self.enemy_bullet = None
self.move_speed = 0.1
def make_enemies(self):
x, y = -250, 250
for i in range(5):
for j in range(11):
enemy = Turtle()
enemy.shape("sprites/New Piskel.gif")
enemy.penup()
enemy.goto(x, y)
self.enemy_list.append(enemy)
x += 40
x -= 440
y -= 30
def move(self):
sorted_list = sorted(self.enemy_list, key=lambda x: x.xcor(), reverse=True)
last_enemy = sorted_list[0]
first_enemy = sorted_list[-1]
if self.moving_right:
if last_enemy.xcor() < 270:
for enemy in self.enemy_list:
enemy.forward(3)
else:
for enemy in self.enemy_list:
enemy.sety(enemy.ycor() - 3)
self.moving_right = False
else:
if first_enemy.xcor() > -275:
for enemy in self.enemy_list:
enemy.backward(3)
else:
for enemy in self.enemy_list:
enemy.sety(enemy.ycor() - 3)
self.moving_right = True
def kill(self, dead):
dead.hideturtle()
self.enemy_list.remove(dead)
def enemy_shoot(self):
if not self.enemy_bullet:
self.enemy_bullet = Turtle(shape='square')
self.enemy_bullet.color('red')
self.enemy_bullet.shapesize(stretch_len=.5, stretch_wid=.1)
self.enemy_bullet.setheading(270)
self.enemy_bullet.penup()
random_enemy = random.choice(self.enemy_list)
self.enemy_bullet.goto(random_enemy.xcor(), random_enemy.ycor())
def enemy_bullet_move(self):
self.enemy_bullet.forward(15)
def destroy_enemy_bullet(self):
self.enemy_bullet.goto(2000, 2000)
self.enemy_bullet = None
|
[
"noreply@github.com"
] |
NCavaliere1991.noreply@github.com
|
3b5d2409423e635de41b65955729f39ee28a7878
|
7367236c0b9cc395f041867d78ae2f576e772274
|
/flaskapp.py
|
03d55dc5ec613e76182efe6ecb88488bbb2e70f3
|
[
"Apache-2.0"
] |
permissive
|
stackparty/hiring-engineers
|
a0a21e889bae0552f4810900803d382c8dbccc6e
|
f5bb4dc88d0d3c6ad7611c6eda0de1123d1c7170
|
refs/heads/master
| 2021-04-15T13:25:39.657721
| 2018-03-24T12:37:09
| 2018-03-24T12:37:09
| 126,187,967
| 0
| 0
|
Apache-2.0
| 2018-03-21T14:04:38
| 2018-03-21T14:04:37
| null |
UTF-8
|
Python
| false
| false
| 973
|
py
|
from flask import Flask
import logging
import sys
# imported ddtrace modules
from ddtrace import tracer
from ddtrace.contrib.flask import TraceMiddleware
tracer.configure(hostname='127.0.0.1') # configured for localhost as lazy
# Have flask use stdout as the logger
main_logger = logging.getLogger()
main_logger.setLevel(logging.DEBUG)
c = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
c.setFormatter(formatter)
main_logger.addHandler(c)
app = Flask(__name__)
traced_app = TraceMiddleware(app, tracer, service='AndyTestApp') ## added this with an app name
@app.route('/')
def api_entry():
return 'Entrypoint to the Application'
@app.route('/api/apm')
def apm_endpoint():
return 'Getting APM Started'
@app.route('/api/trace')
def trace_endpoint():
return 'Posting Traces'
if __name__ == '__main__':
app.run(port=9999) ## changed this port number as conflict with another port
|
[
"noreply@github.com"
] |
stackparty.noreply@github.com
|
97be4605ce53217537e3575cdcfee5cde7088155
|
b7eed26cf8a0042a61f555eed1e9bf0a3227d490
|
/students/synowiec_krzysztof/lesson_02_flow_control/the_number_of_zeros.py
|
1e52b6e558a734c741f88e015c19612c4194c9bb
|
[] |
no_license
|
jedzej/tietopythontraining-basic
|
e8f1ac5bee5094c608a2584ab19ba14060c36dbe
|
a68fa29ce11942cd7de9c6bbea08fef5541afa0f
|
refs/heads/master
| 2021-05-11T11:10:05.110242
| 2018-08-20T12:34:55
| 2018-08-20T12:34:55
| 118,122,178
| 14
| 84
| null | 2018-08-24T15:53:04
| 2018-01-19T12:23:02
|
Python
|
UTF-8
|
Python
| false
| false
| 209
|
py
|
def main():
n = int(input())
numberOfzeros = 0
for x in range(0, n):
if (int(input()) == 0 ):
numberOfzeros += 1
print(numberOfzeros)
if __name__ == '__main__':
main()
|
[
"synulewar@gmail.com"
] |
synulewar@gmail.com
|
44c0e522433573ee5bf9d1aa518fc2bca5ad04d7
|
9fb51d04354b18144234ce11e0aec3f2b62ebe5f
|
/pages/migrations/0004_auto_20200407_1910.py
|
908fa5bc9e60a53f38cdbbbaf5cb51371eaeb4fc
|
[] |
no_license
|
madrix01/Posty
|
ce64797c80f94ceb35c009e36ebc6765988fa767
|
70e6a0f1a019176273df3ca4df58acd678fbbdc3
|
refs/heads/master
| 2021-05-25T16:07:52.402938
| 2020-04-07T14:40:05
| 2020-04-07T14:40:05
| 253,819,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 537
|
py
|
# Generated by Django 3.0.4 on 2020-04-07 13:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0003_auto_20200407_1655'),
]
operations = [
migrations.AlterField(
model_name='post',
name='description',
field=models.TextField(max_length=69),
),
migrations.AlterField(
model_name='post',
name='title',
field=models.TextField(max_length=20),
),
]
|
[
"51414879+madrix01@users.noreply.github.com"
] |
51414879+madrix01@users.noreply.github.com
|
c994f3b51493b3cbea3626fe6ac85e3aea76467f
|
4aa7a4d0525095725eb99843c83827ba4806ceb1
|
/tf/tf20_rnn1.py
|
bb6180d40fd285e1019eb214504f5f0962ba6ca1
|
[] |
no_license
|
seonukim/Study
|
65a70f5bdfad68f643abc3086d5c7484bb2439d4
|
a5f2538f9ae8b5fc93b5149dd51704e8881f0a80
|
refs/heads/master
| 2022-12-04T17:04:31.489771
| 2020-08-21T00:35:15
| 2020-08-21T00:35:15
| 260,144,755
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,536
|
py
|
## Tensorflow - RNN
import numpy as np
import tensorflow as tf
# data : hihello
idx2char = ['e', 'h', 'i', 'l', 'o']
_data = np.array([['h', 'i', 'h', 'e', 'l', 'l', 'o']], dtype = np.str).reshape(-1, 1)
print(_data)
print(_data.shape) # (7, 1)
print(type(_data)) # <class 'numpy.ndarray'>
# onehotencoding
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder()
enc.fit(_data)
_data = enc.transform(_data).toarray()
print("=" * 40)
print(_data)
print(type(_data)) # <class 'numpy.ndarray'>
print(_data.dtype) # float64
'''
h i h e l l o 중,
1) h ~ l까지는 x
2) i ~ o까지는 y 로 잡는다.
'''
x_data = _data[:6, ] # hihell
y_data = _data[1:, ] # ihello
print("=" * 40)
print("=" * 40)
print(f'x_data : \n{x_data}')
print("=" * 40)
print(f'y_data : \n{y_data}')
print("=" * 40)
'''
x_data :
[[0. 1. 0. 0. 0.]
[0. 0. 1. 0. 0.]
[0. 1. 0. 0. 0.]
[1. 0. 0. 0. 0.]
[0. 0. 0. 1. 0.]
[0. 0. 0. 1. 0.]]
========================================
y_data :
[[0. 0. 1. 0. 0.]
[0. 1. 0. 0. 0.]
[1. 0. 0. 0. 0.]
[0. 0. 0. 1. 0.]
[0. 0. 0. 1. 0.]
[0. 0. 0. 0. 1.]]
'''
y_data = np.argmax(y_data, axis = 1)
print("=" * 40)
print(f'y_data_argmax : \n{y_data}')
print(y_data.shape)
'''
y_data_argmax :
[2 1 0 3 3 4]
(6,)
'''
x_data = x_data.reshape(1, 6, 5)
y_data = y_data.reshape(1, 6)
print(f'x_data.shape : {x_data.shape}') # x_data.shape : (1, 6, 5)
print(f'y_data.shape : {y_data.shape}') # y_data.shape : (1, 6)
sequence_length = 6
input_dim = 5
output = 5
batch_size = 1 # 전체 행
X = tf.compat.v1.placeholder(tf.float32, shape = (None, sequence_length, input_dim))
Y = tf.compat.v1.placeholder(tf.int64, shape = (None, sequence_length))
print(X) # Tensor("Placeholder:0", shape=(?, 6, 5), dtype=float32)
print(Y) # Tensor("Placeholder_1:0", shape=(?, 6), dtype=int64)
# 2. 모델 구성
# 케라스 형식
# model = tf.keras.models.Sequential([
# tf.keras.layers.LSTM(output, input_shape = (6, 5))
# ])
# 텐서플로 1.14
# _lstm = tf.nn.rnn_cell.BasicLSTMCell(output) # RNN은 두 번 연산함
_lstm = tf.keras.layers.LSTMCell(output)
hypothesis, _states = tf.nn.dynamic_rnn(_lstm, X, dtype = tf.float32) # 동적 RNN
# model.add(LSTM) 위 두 줄이 케라스의 model.add(LSTM)과 같다
print(hypothesis) # Tensor("rnn/transpose_1:0", shape=(?, 6, 5), dtype=float32)
# 3. 컴파일
# loss 설정 : LSTM의 로스(hypothesis - y)
weights = tf.ones([batch_size, sequence_length]) # Y의 shape와 같음
sequence_loss = tf.contrib.seq2seq.sequence_loss(
logits = hypothesis, targets = Y, weights = weights) # targets은 int32, 64형 들어가야 함
cost = tf.compat.v1.reduce_mean(sequence_loss)
train = tf.compat.v1.train.AdamOptimizer(learning_rate = 0.1).minimize(cost)
prediction = tf.math.argmax(hypothesis, axis = 2)
# 3-2. 훈련
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
for i in range(401):
loss, _ = sess.run([cost, train], feed_dict = {X: x_data, Y: y_data})
result = sess.run(prediction, feed_dict = {X: x_data})
print(f'\nEpoch : {i}, Prediction : {result}, true Y : {y_data}')
result_str = [idx2char[c] for c in np.squeeze(result)] # np.sqeeze() : 찾아보기
print(f"\nPrediction : {''.join(result_str)}") # ''.join() : 붙인다, 찾아보기
|
[
"92.seoonooo@gmail.com"
] |
92.seoonooo@gmail.com
|
0777e6b1e3a96ca28ed9d2a1e5077e6ed5062469
|
8eaf88fb487e075ecbb66a23a92d269c10f40a6f
|
/DAndB/d_and_b/d_and_b.py
|
cbfb61fa2d9b7f9372ccfe95c2fba62a94cdd0a4
|
[] |
no_license
|
smallliang/Boxereum
|
933135964ed93f1d0c8186dcb64fc93e28a0e443
|
340e848bb0329bc04c45d53f46332ccf5420c1bb
|
refs/heads/master
| 2020-03-22T17:01:32.231180
| 2018-07-20T09:27:07
| 2018-07-20T09:27:07
| 140,367,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,110
|
py
|
# -*- coding: UTF-8 -*-
import json, time
from .game import *
from .player import *
class DotsAndBoxes:
def __init__(self, window_controller=None):
self._current_game = None
self._history = None
self._current_step = None
self._red_player = None
self._blue_player = None
self._window_controller = window_controller
self._update_time = time.time()
@property
def current_game(self):
return self._current_game
@property
def show_board(self):
return self._current_game.board.pieces
@property
def history(self):
return self._history.copy()
@property
def last_move(self):
if (self._current_game == None or self._current_step == 0):
return None
return self._history[self._current_step-1]
@property
def red_player(self):
return self._red_player
@red_player.setter
def red_player(self, value):
if (value.color != Color.red):
raise DBError("Invalid players", value)
if (self._current_game != None):
if (not self._current_game.is_end):
raise DBError("Current game is not over")
self._red_player = value
@property
def blue_player(self):
return self._blue_player
@blue_player.setter
def blue_player(self, value):
if (value.color != Color.blue):
raise DBError("Invalid players", value)
if (self._current_game != None):
if (not self._current_game.is_end):
raise DBError("Current game is not over")
self._blue_player = value
@property
def current_player(self):
return (self._red_player if self._current_game.current_player_color == Color.red else self._blue_player)
@property
def current_step(self):
# int 返回当前步数
return self._current_step
@property
def need_update(self, last_update_time):
return self._update_time > last_update_time
def _update(self):
self._update_time = time.time()
if (self._window_controller != None):
self._window_controller.update()
if self.current_game.is_end:
self.red_player._game_is_over(self.current_game.winner == Color.red)
self.blue_player._game_is_over(self.current_game.winner == Color.blue)
else:
print(isinstance(self.red_player, AIPlayer))
if isinstance(self.red_player, AIPlayer):
self.red_player.last_move(self.last_move, self._current_game.board, self.current_player.color)
if isinstance(self.blue_player, AIPlayer):
self.blue_player.last_move(self.last_move, self._current_game.board, self.current_player.color)
def new_game(self):
if (self._current_game != None):
if (not self._current_game.is_end):
raise DBError("Current game is not over")
if (self._red_player == None or self._blue_player == None):
raise DBError("Lack of player")
self._new_game()
self._update()
def _new_game(self):
self._current_game = Game(self._red_player, self._blue_player)
self._history = []
self._current_step = 0
def end_game(self):
if (self._current_game == None):
raise DBError("Do not have current game")
self._current_game = None
self._history = None
self._current_step = None
def _move(self, piece):
self._current_game.move(piece)
if (self._current_step < len(self._history)): # 当从某一历史步直接下新步时 (先行判断可以避免_history越界)
if (piece != self._history[self._current_step]): # 如果新步与历史步的下一步历史不同
while (self._current_step < len(self._history)): # 先删除这一历史步之后的数据
self._history.pop()
self._history.append(piece)
else:
self._history.append(piece)
self._current_step = self._current_step + 1
def move(self, piece):
if (self._current_game == None):
raise DBError("Do not have current game")
if (piece.color != self._current_game.current_player_color):
raise MoveError("Player color is wrong")
self._move(piece)
self._update()
def move_with_str(self, input_str):
(color, user_coordinate) = self._str_to_coordinate(input_str)
if (color != self._current_game.current_player_color):
raise MoveError("Player color is wrong")
self.move(Piece(color, user_coordinate))
def _str_to_coordinate(self, input_str):
color = x = y = type = None
try:
if (input_str[0] == 'r' or input_str[0] == 'R'):
color = Color.red
elif (input_str[0] == 'b' or input_str[0] == 'B'):
color = Color.blue
else:
raise ValueError()
if (input_str[2] == 'a' or input_str[2] == 'A'):
x = 'a'
elif (input_str[2] == 'b' or input_str[2] == 'B'):
x = 'b'
elif (input_str[2] == 'c' or input_str[2] == 'C'):
x = 'c'
elif (input_str[2] == 'd' or input_str[2] == 'D'):
x = 'd'
elif (input_str[2] == 'e' or input_str[2] == 'E'):
x = 'e'
elif (input_str[2] == 'f' or input_str[2] == 'F'):
x = 'f'
else:
raise ValueError()
y = int(input_str[3])
if (y < 0 or y > 6):
raise ValueError()
if (input_str[5] == 'v' or input_str[5] == 'V'):
type = 'v'
elif (input_str[5] == 'h' or input_str[5] == 'H'):
type = 'h'
else:
raise ValueError
except (IndexError, ValueError, TypeError):
raise DBError("Invalid input", input_str)
return (color, (x, str(y), type))
def _back(self):
self._current_game.back()
self._current_step = self._current_step - 1
def back(self):
if (self._current_game == None):
raise DBError("Do not have current game")
if (self._current_step == 0):
raise DBError("Do not have step")
self._back()
self._update()
def turn_to_step(self, step_num):
if (self._current_game == None):
raise DBError("Do not have current game")
if (step_num < 0 or step_num > len(self._history) or step_num == self._current_step):
raise DBError("Invalid step num")
while (self._current_step > step_num):
self._back()
while (self._current_step < step_num):
self._move(self._history[self._current_step])
self._update()
def _data_as_dict(self):
if (self._current_game == None):
raise DBError("Do not have current game")
if (self._current_step == 0):
raise DBError("Do not have step data")
pieces = []
for piece in self._current_game.history:
piece_dict = {"timestamp": piece.datetime.timestamp(),
"player": "r" if piece.color == Color.red else "b",
"coordinate": "".join(piece.user_coordinate)}
if piece.annotation != "":
piece_dict["annotation"] = piece.annotation
pieces.append(piece_dict)
dict = {"R": self._red_player.name,
"B": self._blue_player.name,
"is_end": self._current_game.is_end,
"timestamp": self._current_game.datetime.timestamp(),
"pieces": pieces}
if (self._current_game.is_end):
dict["winner"] = "R" if self._current_game.winner == Color.red else "B"
return dict
def save_to_file(self, file_path, mode=1, event=None):
dict = self._data_as_dict()
#'''
if (mode == 0): # 非常智障的模式
if (not self._current_game.is_end):
raise DBError("Current game is not over")
if (event == None):
raise DBError("Invalid event")
pieces_arr = []
for piece in self._current_game.history:
piece_str = ""
if (piece.color == Color.red):
piece_str = piece_str + "r"
else:
piece_str = piece_str + "b"
piece_str = piece_str + "(" + "".join(piece.user_coordinate[0:2]) + "," + "".join(piece.user_coordinate[2]) + ")"
piece_dict = {"piece": piece_str}
if piece.annotation != "":
piece_dict["annotation"] = piece.annotation
pieces_arr.append(piece_dict)
dict = {"R": self._red_player.name,
"B": self._blue_player.name,
"winner": "R" if self._current_game.winner == Color.red else "B",
"RScore": self._red_player.score,
"BScore": self._blue_player.score,
"Date": self._current_game.datetime.strftime("%Y-%m-%d"),
"Event": event,
"game": pieces_arr}
file_path = file_path + "DB:" + self._red_player.name + " vs " + self._blue_player.name + ":"
file_path = file_path + ("先手胜" if self._current_game.winner == Color.red else "后手胜")
file_path = file_path + ".txt"#'''
f = open(file_path, 'w')
f.write(json.dumps(dict))
f.close()
return True
def load_from_file(self, file_path, mode=1):
f = open(file_path, 'r')
file_data = f.read()
f.close()
if (mode == 0): # 非常智障的模式
data = json.loads(file_data)
self._red_player = HumanPlayer(Color.red, data['R'], self)
self._blue_player = HumanPlayer(Color.blue, data['B'], self)
self._new_game()
for step in data['game']:
self.move_with_str(step["piece"])
else:
data = json.loads(file_data)
self._red_player = HumanPlayer(Color.red, data['R'], self)
self._blue_player = HumanPlayer(Color.blue, data['B'], self)
self._new_game()
for step_data in data['pieces']:
piece = Piece(Color.red if step_data['player'] == 'r' else Color.blue, (step_data['coordinate'][0], step_data['coordinate'][1], step_data['coordinate'][2]))
self.move(piece)
def set_piece_annotation(self, step_num, annotation):
if (self._current_game == None):
raise DBError("Do not have current game")
if (step_num < 0 or step_num > len(self._history)):
raise DBError("Invalid step num")
self._history[step_num].annotation = annotation
class DBError(DBException):
def __init__(self, *args, **kwargs):
super(DBError, self).__init__(args, kwargs)
|
[
"gafei_han@163.com"
] |
gafei_han@163.com
|
91466eeda4cf8c699e0377984a6ca5727ab863cd
|
b2d9d48585748adaacda73c59eb8de129f519632
|
/vxi11cmd_custom.py
|
ddefc43e0d7b995168a2ed6c45ad0d0366b862b8
|
[] |
no_license
|
W5Jinlon/mo-han-toolbox
|
aff18f6f30abe4c3852bbde2fc0b876c033383e1
|
b386864cd8c3dd52113742c19e5d53774d7b46da
|
refs/heads/master
| 2022-07-17T01:22:20.939722
| 2020-05-21T02:43:05
| 2020-05-21T02:43:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,778
|
py
|
#!/usr/bin/env python3
# encoding=utf8
import vxi11cmd
class VXI11CmdCustom(vxi11cmd.VXI11Cmd):
DATA_ITEMS_TRIPLE = ['urms', 'umn', 'irms', 's', 'p', 'lamb', 'fu', ]
DATA_ITEMS_SIGMA = ['urms', 'umn', 'irms', 's', 'p', 'lamb', ]
DATA_ITEMS = [i + ',1' for i in DATA_ITEMS_TRIPLE] + \
[i + ',2' for i in DATA_ITEMS_TRIPLE] + \
[i + ',3' for i in DATA_ITEMS_TRIPLE] + \
[i + ',sigma' for i in DATA_ITEMS_SIGMA]
DATA_ITEMS_NUM = len(DATA_ITEMS)
DATA_ITEMS_CMD = ':num:num {}'.format(DATA_ITEMS_NUM)
for i in range(DATA_ITEMS_NUM):
DATA_ITEMS_CMD += ';:num:item{} {}'.format(i + 1, DATA_ITEMS[i])
# def do_cmd(self, line=None):
# command = line or input('命令:')
# r = self.send_command(command)
# if r:
# if ';' in r:
# r = '\r\n'.join(r.split(';'))
# elif ',' in r:
# r = '\r\n'.join(r.split(','))
# print(r)
# return r
def do_setdata(self, *args):
self.do_remote()
self.do_cmd(':wir v3a3')
self.do_cmd(self.DATA_ITEMS_CMD)
def do_getdata(self, *args):
str2float = self.str2float
data = self.do_cmd(':num:val?')
return ','.join([str(str2float(s)) for s in data.split(',')])
def do_ct(self, line=None):
msg = ':scal:ct'
if line:
# msg = '{0} {1};{0}?'.format(msg, line)
msg += ' ' + line
else:
msg += '?'
return self.do_cmd(msg)
def do_vt(self, line=None):
msg = ':scal:vt'
if line:
# msg = '{0} {1};{0}?'.format(msg, line)
msg = ' ' + line
else:
msg += '?'
return self.do_cmd(msg)
|
[
"zmhungrown@gmail.com"
] |
zmhungrown@gmail.com
|
bf74e55be99a9dee4af75cc81b3b169667e7704f
|
e12070bd51f1b2b3ba308b74325d4ff3fc3639b4
|
/prep_data.py
|
442d2531c29e2486aea724f8fed5c0d85c9e7ed1
|
[
"Apache-2.0"
] |
permissive
|
achbogga/achunet
|
e058c45d105879a6290b16b09f8e09ff36bd260e
|
e8b27b0840929e823a5109d43302afbf721d41d2
|
refs/heads/master
| 2021-01-19T01:54:57.750169
| 2017-04-05T02:41:30
| 2017-04-05T02:41:30
| 87,256,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,050
|
py
|
import numpy as np
from scipy.ndimage.interpolation import zoom
def downsample_video(video, output_frames):
from scipy.ndimage.interpolation import zoom
output_vid = zoom(video, [(float(output_frames)/video.shape[0]),1,1,1])
return output_vid
def read_video_to_frames(filename = 'train_001.avi', output_resolution = (150,150)):
import imageio
vid = imageio.get_reader(filename, 'ffmpeg')
vid2 = imageio.get_reader(filename, 'ffmpeg')
frames = []
no_of_frames = vid.get_length()
input_resolution = np.array(vid2.get_data(0)).shape
#frame_jumpsize = int(no_of_frames/n_frames)
#print frame_jumpsize
for i in range(0,no_of_frames,1):
frames.append(zoom(np.array(vid.get_data(i)), [(output_resolution[0]/float(input_resolution[0])),(output_resolution[1]/float(input_resolution[1])),1]))
frames = np.array(frames)
print frames.shape
return frames
# returns a numpy array pretty much ready for training as X_train
def get_video_ready_for_training(i=0, root = 'Left', output_resolution = (150,150)):
import os
import cv2
face_detection_xml ="opencv2_data/haarcascades/haarcascade_frontalface_default.xml"
faceDetectClassifier = cv2.CascadeClassifier(face_detection_xml)
samples = os.listdir(root)
Frames = read_video_to_frames(filename = os.path.join(root, samples[i]), output_resolution = output_resolution)
Frames = downsample_video(Frames, 2400)
x_train = np.zeros(shape = Frames.shape)
i = 0
for frame in Frames:
facePoints = faceDetectClassifier.detectMultiScale(frame[:,:,0])
if (len(facePoints)<1):
x_train[i] = frame
i += 1
continue
x,y,w,h = facePoints[0]
cropped = frame[y:y+h, x:x+w, :]
face = zoom(cropped, [float(output_resolution[0])/cropped.shape[0],float(output_resolution[1])/cropped.shape[1], 1])
#face_3d = face[:, :, None] * np.ones(3, dtype=int)[None, None, :]
x_train[i] = face
i += 1
x_train /= 255
return x_train
|
[
"achbogga@indiana.edu"
] |
achbogga@indiana.edu
|
3acd554dc2ba13468602c99b2c04b21b6bdeb1f8
|
f3b903183d0438b293817ce564e9772dc707fd32
|
/roadmap/migrations/0001_initial.py
|
2fdbdd48f97ac8587b8eea6dfb52287b78196c62
|
[] |
no_license
|
RathiRohit/Collaborative-Education-Platform
|
f40bd1337224f9f7383855b4a806120b9f8abe85
|
4e16ce61532ff638bc17256544b71a83bc8de6e4
|
refs/heads/master
| 2020-03-13T15:31:42.162510
| 2018-04-24T14:55:05
| 2018-04-24T14:55:05
| 131,178,726
| 1
| 0
| null | 2018-04-26T15:52:14
| 2018-04-26T15:52:14
| null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-04-15 14:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Roadmap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('r_title', models.CharField(max_length=50)),
('author', models.CharField(max_length=50)),
('upvotes', models.IntegerField(default=0)),
('downvotes', models.IntegerField(default=0)),
('published_date', models.DateTimeField(blank=True)),
],
),
]
|
[
"onkarsathe27@gmail.com"
] |
onkarsathe27@gmail.com
|
1b36a9a66459082bdd8db0595a6759ab6a28e64f
|
5b2c9386534dab0afc006ae44f3f19b527c3dae5
|
/source/classifiers/ir_relevance.py
|
f777104243f8557988cf4dc2075f3d792c3147e0
|
[] |
no_license
|
saurabhc123/unsupervised
|
d1cd2820fa7ee6269acb6d176e99799475337947
|
6a36fcfacea9d788e5274b4cde763104d2231789
|
refs/heads/master
| 2020-03-18T15:38:53.519559
| 2018-07-30T02:16:47
| 2018-07-30T02:16:47
| 134,920,402
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,553
|
py
|
from tensorflow.contrib.rnn import BasicLSTMCell
from tensorflow.python.ops.rnn import bidirectional_dynamic_rnn as bi_rnn
import time
from torch.utils.data import DataLoader
from classifiers.prepare_data import *
# Hyperparameter
from data_processors.noise_remover import NoiseRemover
from data_processors.tweet_basic_dataset import TweetBasicDataSet
from data_processors.tweet_dataset import TweetDataSet
from data_processors.tweet_lda_dataset import TweetLDADataSet
from entities.tweet import Tweet
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import confusion_matrix
from parameters import Parameters
import os
import csv
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 2
HIDDEN_SIZE = 4
ATTENTION_SIZE = 3
lr = 1e-3
BATCH_SIZE = 256
KEEP_PROB = 0.5
LAMBDA = 0.0001
MAX_LABEL = 2
epochs = 100
#dbpedia = tf.contrib.learn.datasets.load_dataset('dbpedia')
parameters = Parameters()
parameters.add_parameter("MAX_LABEL",MAX_LABEL)
# load data
x_train, y_train = ([],[])#load_data("data/classification_data/Training Data/train.csv", names=["Label", "clean_text", "tweet_text"])
x_test, y_test = ([],[])#load_data("data/classification_data/Training Data/test.csv")
datafolder = 'data/classification_data/Training Data/1045'
exports_folder = 'data/exports/'
training_fileName = 'training_large_top50_clusters.csv'
test_fileName = 'test.csv'
parameters.add_parameter("Data Folder", datafolder)
parameters.add_parameter("Training filename", training_fileName)
parameters.add_parameter("Test filename", test_fileName)
pre_processor = NoiseRemover()
training_dataset = TweetBasicDataSet(datafolder, training_fileName, transformer=pre_processor)
training_dataloader = DataLoader(training_dataset, batch_size=len(training_dataset.data))
test_dataset = TweetBasicDataSet(datafolder, test_fileName, transformer=pre_processor)
test_dataloader = DataLoader(test_dataset, batch_size=len(test_dataset.data))
training_tweets = []
tweets_dict = set()
sentence_vectors = []
test_tweets = []
X_TEST = []
TEST_TWEETS = []
Y_TEST = []
def to_one_hot(y, n_class):
return np.eye(n_class)[y]
for data in training_dataloader:
x_train = data['clean_text']
y_train = to_one_hot(data['cluster_label'],MAX_LABEL)
break
for data in test_dataloader:
X_TEST = x_test = data['clean_text']
TEST_TWEETS = test_tweets = data['text']
Y_TEST = data['cluster_label']
y_test = to_one_hot(data['cluster_label'],MAX_LABEL)
break
# data preprocessing
# x_train, x_test, vocab, vocab_size = \
# data_preprocessing(x_train, x_test, MAX_DOCUMENT_LENGTH)
# print(vocab_size)
#Create set out of (only positive) training data
positive_set = set()
for training_instance in x_train:
#training_instance = training_instance.tolist()
if training_instance not in positive_set:
positive_set.add(training_instance)
#If the tweet words are in the training set, mark the class as positive, else negative
predictions = []
for test_instance in x_test:
if test_instance in positive_set:
predictions.append(1)
else:
predictions.append(0)
f1_predictions = np.array(predictions)
#print(f1_predictions.shape)
f1_truelabels = np.argmax(y_test, 1)
#print(f1_truelabels.shape)
f1score = f1_score(f1_truelabels, f1_predictions, average='macro')
precision = precision_score(f1_truelabels, f1_predictions, average='macro')
recall = recall_score(f1_truelabels, f1_predictions, average='macro')
print("Test Precision:{:.2} Recall:{:.2} F1:{:.2}".format(precision, recall, f1score))
cnf_matrix = confusion_matrix(f1_truelabels, f1_predictions)
print(cnf_matrix)
parameters.add_parameter("Test Statistics", "Precision:{:.2} Recall:{:.2} F1:{:.2}".format(precision, recall, f1score))
parameters.add_parameter("Test Confusion matrix", cnf_matrix)
exports_folder = 'data/exports/'
timestamp = time.strftime("%Y%m%d-%H%M%S")
parameters.write_parameters(exports_folder, timestamp+"_TestF1_{:.2}".format(f1score))
print("Identifier:{}".format(timestamp))
results_filename = "classification_results_" + timestamp + "_TestF1_{:.2}".format(f1score)+".csv"
filepath = os.path.join(exports_folder, results_filename)
with open(filepath,'w') as out:
csv_out=csv.writer(out, delimiter = ',')
csv_out.writerow(['Predicted' , 'Truth' ,'Text'])
for i in range(len(predictions)):
#print([f1_predictions[i], f1_truelabels[i], test_tweets[i]])
csv_out.writerow([f1_predictions[i], f1_truelabels[i] , test_tweets[i]])
|
[
"saurabc@yahoo.com"
] |
saurabc@yahoo.com
|
e2e5f556f421cbb5df0de52853da00ef2563b495
|
3ae70c39236906bdbf3e5a4e3c4a9054595ef341
|
/classify/cmp.py
|
c900de2412c24ddc196b2e66a9e8b5892146e764
|
[] |
no_license
|
pcschneider/eroML
|
75e3b48d6ec738199c92ceb86fbe686c0cb4636f
|
444fc9412e324d51019e98c8adb35383fa602fcf
|
refs/heads/master
| 2023-05-01T00:42:46.229076
| 2021-03-25T11:31:09
| 2021-03-25T11:31:09
| 370,687,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
import numpy as np
from astropy.io import fits as pyfits
fn = "major_eFEDS_classified.fits"
ff = pyfits.open(fn)
fd = ff[1].data
gi = np.where(fd["category"] == 0)[0]
s_names = fd["original_srcID"][gi]
print(s_names, len(s_names))
fn1 = "../ero_data/efeds_c001_V3_main_HamStar_internal.fits"
ff1 = pyfits.open(fn1)
fd1 = ff1[1].data
gi = np.where(fd1["p_stellar"] > 0.735)[0]
print(len(gi))
#b_names = fd1["ero_NAME"][gi]
b_names = np.array([str("ML%05i" % int(d)) for d in fd1["ero_ID"][gi]])
print(b_names)
ovl = np.in1d(s_names, b_names) # s_name in b_names
print(ovl, len(ovl), np.sum(ovl))
|
[
"astro@pcschneider.eu"
] |
astro@pcschneider.eu
|
6f3673227637351a7965acc9379c4b76317ddca5
|
9e26b28f74585a4f95476847f6e886ecf3bff0a1
|
/mywork/stat/core/eventype.py
|
c26470fd2d9ab45ebb863b9128c86dc221a8d7c9
|
[] |
no_license
|
wanziforever/tools
|
28e4aa30377395fd9966407051e92f83efd23481
|
49f8de6e3dd18846c8b5f187507513312778a7ed
|
refs/heads/master
| 2016-09-06T10:30:28.303092
| 2015-06-04T01:53:18
| 2015-06-04T01:53:18
| 6,934,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
#!/usr/bin/env python
''' define all the event type, currently only timer and socket
message event were supported '''
class EVENTYPE(object):
INVALID = 0
TIMEREXPIRE = 1
NORMALMSG = 2
|
[
"wanziforever@gmail.com"
] |
wanziforever@gmail.com
|
21296bb60e05156762f7d9539dbfee8c6ea02205
|
c5116fa01aab6d1c0a68b1e6f0b27ea453a714be
|
/tests/test_20_main.py
|
773eeab9680a6f2e0f2686ecabb5c4b7d4dc8990
|
[
"Apache-2.0"
] |
permissive
|
ecmwf/cfgrib
|
1848ed6be566ecd05f31212dd41618d2e85cc5f8
|
00e936e4677cf2ff434f928c1f3740dcb3581ec9
|
refs/heads/master
| 2023-08-08T23:39:02.128983
| 2023-06-02T13:52:20
| 2023-06-02T13:52:20
| 141,110,423
| 366
| 68
|
Apache-2.0
| 2023-09-06T12:35:59
| 2018-07-16T08:34:34
|
Python
|
UTF-8
|
Python
| false
| false
| 375
|
py
|
import click.testing
from cfgrib import __main__
def test_cfgrib_cli_selfcheck() -> None:
runner = click.testing.CliRunner()
res = runner.invoke(__main__.cfgrib_cli, ["selfcheck"])
assert res.exit_code == 0
assert "Your system is ready." in res.output
res = runner.invoke(__main__.cfgrib_cli, ["non-existent-command"])
assert res.exit_code == 2
|
[
"a.amici@bopen.eu"
] |
a.amici@bopen.eu
|
350d498add8f9c0bd0aacea3c76b8f490284b7d6
|
e1c9ba82c0d68c1c3170bb49428274246aeaba20
|
/src/assets/scene_config/pandemic/5/solution.py
|
1e0100e5107fa1f2300d7c0f72d9d83ad115b9ee
|
[
"Apache-2.0"
] |
permissive
|
zheka2304/kiddo
|
032c7505d7362341dda2d4a2c7f859aac74f9523
|
58fc41bd8d1ee93c9af464179fe6e3c0d6150b50
|
refs/heads/master
| 2023-07-24T20:27:29.205146
| 2020-11-16T12:27:55
| 2020-11-16T12:27:55
| 384,953,198
| 1
| 0
|
Apache-2.0
| 2021-07-11T13:19:31
| 2021-07-11T13:19:31
| null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
import player
player.put_on_mask()
player.go_right(4)
player.go_down(4)
player.go_right()
player.get_products()
player.go_left(4)
player.go_up()
player.go_left()
player.go_up(4)
player.wash_hands()
|
[
"denis.tsyplakov@onsolve.com"
] |
denis.tsyplakov@onsolve.com
|
ae5b78d881838cb1b4a291f55be0ef3b54f4fe10
|
32544e36cf43e0ff41bdec6aab4a9c7e2d6506fd
|
/11_subseq/maxseq.py
|
87b9ca2c3a361de9fedb141d04f57930da620cbb
|
[] |
no_license
|
luispabreu/p3ws
|
9374bfadcbda85b874c9dd441caf962fbcc8eea2
|
e7ef18d36a222a2d4f2ef0b506a45a1d896f8273
|
refs/heads/master
| 2023-02-05T21:33:13.523410
| 2020-12-18T18:27:50
| 2020-12-18T18:27:50
| 319,834,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
def maxSeq(list):
if len(list) < 2:
return len(list)
length_now = 1
length_max = 1
for i in range(1,len(list)):
if list[i] > list[i-1]:
length_now += 1
else:
length_now = 1
pass
if length_now > length_max:
length_max = length_now
pass
pass
return length_max
|
[
"lep43@duke.edu"
] |
lep43@duke.edu
|
6080bf13c35035ee28bb2bd1fa4394aa388aaa81
|
175978fc145804d2a348f25fc39d7c6050d23b06
|
/338. Counting Bits.py
|
706181271dcd18f2fe90fa86830f4c155d109584
|
[] |
no_license
|
daxiangpanda/leetcode
|
4738c9a5b274686f5af0ab0a00b55d9abd92969e
|
e00db2936db64dc200ad62a18d0bc7af4599b013
|
refs/heads/master
| 2021-01-09T21:46:45.360041
| 2019-01-27T10:18:57
| 2019-01-27T10:18:57
| 49,489,483
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 585
|
py
|
#!/usr/bin/env python
# encoding: utf-8
# 算法挺好的 也不知道是怎么想出来的
class Solution(object):
def countBits(self, num):
lst = [0] * (num + 1)
milestone = 0
nextMilestone = 2
for i in range(1, num + 1):
if i == nextMilestone:
milestone = nextMilestone
nextMilestone = nextMilestone * 2
# print milestone
lst[i] = lst[i - milestone] + 1
# print i
# print lst
# print bin(i)
return lst
a = Solution()
a.countBits(100)
|
[
"313514820@qq.com"
] |
313514820@qq.com
|
d4355072527465d76b91d5813414594d7050b896
|
1f56491c9f009295fe443cce59fb3ecebbe0bd45
|
/weather.py
|
3d71e5c15bdd9c6b79f71f210377aa52a417b19b
|
[] |
no_license
|
MechOrigin/dialogflow-webhook-weather
|
d95400d00982c2f8dad8091ff73797bc54c70054
|
e895d55d7222d5dcf5f35930ba65a01ea1e9fc00
|
refs/heads/master
| 2021-01-26T09:00:23.757962
| 2020-03-26T15:20:16
| 2020-03-26T15:20:16
| 243,395,122
| 0
| 0
| null | 2020-02-27T00:27:06
| 2020-02-27T00:27:05
| null |
UTF-8
|
Python
| false
| false
| 2,156
|
py
|
from flask import Flask,request,make_response
import os,json
import pyowm
import os
app = Flask(__name__)
owmapikey=os.environ.get('9648b1724d6c1b3023a40af748a3b846') #or provide your key here
owm = pyowm.OWM(owmapikey)
#geting and sending response to dialogflow
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
#processing the request from dialogflow
def processRequest(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("geo-city")
observation = owm.weather_at_place(city)
w = observation.get_weather()
latlon_res = observation.get_location()
lat=str(latlon_res.get_lat())
lon=str(latlon_res.get_lon())
wind_res=w.get_wind()
wind_speed=str(wind_res.get('speed'))
humidity=str(w.get_humidity())
celsius_result=w.get_temperature('celsius')
temp_min_celsius=str(celsius_result.get('temp_min'))
temp_max_celsius=str(celsius_result.get('temp_max'))
fahrenheit_result=w.get_temperature('fahrenheit')
temp_min_fahrenheit=str(fahrenheit_result.get('temp_min'))
temp_max_fahrenheit=str(fahrenheit_result.get('temp_max'))
speech = "Today the weather in " + city + ": \n" + "Temperature in Celsius:\nMax temp :"+temp_max_celsius+".\nMin Temp :"+temp_min_celsius+".\nTemperature in Fahrenheit:\nMax temp :"+temp_max_fahrenheit+".\nMin Temp :"+temp_min_fahrenheit+".\nHumidity :"+humidity+".\nWind Speed :"+wind_speed+"\nLatitude :"+lat+".\n Longitude :"+lon
return {
"speech": speech,
"displayText": speech,
"source": "dialogflow-weather-by-satheshrgs"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
|
[
"noreply@github.com"
] |
MechOrigin.noreply@github.com
|
8e5245dcebd5cadbdeaa80b6aab19e6bfad12ecc
|
bc6d580c3a28f0c19e12176db7b4fac31848f6ea
|
/temp.py
|
ccad1df251f2de864ec1536f0996c6563cb6665c
|
[] |
no_license
|
soumalipal/BTP
|
1f2ae1067319367fa029eb263c4b581c2713f9c8
|
376c7cdbda589ef1cf066e82f2a152051e4f2372
|
refs/heads/master
| 2020-04-26T09:12:04.787029
| 2019-03-01T15:17:02
| 2019-03-01T15:17:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,240
|
py
|
import tkinter as tk
import serial
import time
import os
from PIL import ImageTk
#import RPi.GPIO as GPIO
#Function Definitions
#Find Connected Arduinos
def find_dev():
global left_ar,right_ar,rpm_ar,cabin_ar,back_ar
devices=os.listdir("/dev")
for dev in devices:
if dev[0:6]=="ttyUSB" or dev[0:6]=="ttyACM":
arduino=serial.Serial("/dev/" + str(dev),9600)
assign=arduino.readline()
assign=arduino.readline()
para=str(assign).split()
#print(para)
if(para[0][2:]=="b'Distancelf:"):
left_ar=arduino
elif para[0]=="b'Distancerf:":
right_ar=arduino
elif para[0]=="b'RPM:":
rpm_ar=arduino
elif para[0]=="b'cabin":
cabin_ar=arduino
elif para[0]=="b'Distancebl":
back_ar=arduino
#Update Ultrasonic Sensor Labels-Left
def update_left():
global left_ar
data=left_ar.readline()
data_split=str(data).split()
label_lf.config(text=data_split[1])
label_lb.config(text=data_split[3])
if int(data_split[1])<50:
label_lf.place(x=((screen_width-picture_width)/2)+50,y=((screen_height-picture_height)/2)+250)
else:
label_lf.place(x=screen_width+1,y=screen_height+1)
if int(data_split[3])<50:
label_lb.place(x=((screen_width-picture_width)/2)+50,y=((screen_height+picture_height)/2)-250)
else:
label_lb.place(x=screen_width+1,y=screen_height+1)
#Update Ultrasonic Sensor Labels-Right
def update_right():
global right_ar
data=right_ar.readline()
data_split=str(data).split()
label_rf.config(text=data_split[1])
label_rb.config(text=data_split[3])
if int(data_split[1])<50:
label_rf.place(x=((screen_width+picture_width)/2)+20,y=((screen_height-picture_height)/2)+30)
else:
label_rf.place(x=screen_width+1,y=screen_height+1)
if int(data_split[3])<50:
label_rb.place(x=((screen_width+picture_width)/2)+20,y=((screen_height+picture_height)/2)-30)
else:
label_rb.place(x=screen_width+1,y=screen_height+1)
#Update Ultrasonic Sensor Labels-Back
def update_back():
global back_ar
data=back_ar.readline()
data_split=str(data).split()
label_bl.config(text=data_split[1])
label_br.config(text=data_split[3])
if int(data_split[1])<50:
label_bl.place(x=((screen_width-picture_width)/2)+20,y=((screen_height+picture_height)/2)+30)
else:
label_bl.place(x=screen_width+1,y=screen_height+1)
if int(data_split[3])<50:
label_br.place(x=((screen_width+picture_width)/2)-20,y=((screen_height+picture_height)/2)+30)
else:
label_br.place(x=screen_width+1,y=screen_height+1)
#Update Time
def update_time():
global startTime
label_Time.config(text="Time of Operation " + str(int((time.time()-startTime)/3600))+" Hours "+str(int((time.time()-startTime)/60))+" Minutes",fg="black",bg="yellow")
label_Time.place(x=0,y=0)
#Relay Control Button Toggle
def relay_control():
global flag
if flag==0:
#GPIO.output(40, GPIO.HIGH)
relay_button.config(text="Turn Off",command=relay_control)
flag=1
elif flag==1:
#GPIO.output(40, GPIO.LOW)
relay_button.config(text="Turn On",command=relay_control)
flag=0
#Update
def update():
update_left()
#update_right()
#update_back()
#update_rpm()
update_time()
#Recursion for each update
root.after(1000, update)
#Initialize Time
startTime=time.time()
#Initialize RPi for GPIO
##GPIO.setmode(GPIO.BOARD)
##GPIO.setup(40,GPIO.OUT,initial=GPIO.LOW)
flag = 0 #for Relay Control
#Tkinter for GUI
root=tk.Tk()
#Fullscreen
root.attributes('-zoomed', True)
root.wm_attributes('-alpha', 0.7)
root.configure(bg='white')
#Getting Screen Resolution
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
#Center Image
photo_vehicle=ImageTk.PhotoImage(file="/home/ritwick/Documents/Arduino/US/car.png")
center_img=tk.Label(root,image=photo_vehicle,relief='flat',bg='white')
picture_height=photo_vehicle.height()
picture_width=photo_vehicle.width()
center_img.place(x=(screen_width-picture_width)/2,y=(screen_height-picture_height)/2)
#Labels for Ultrasonic Sensor
label_lf= tk.Label(root,text="lf")
label_lf.config(bg='white',font=("Courier", 32),fg='black')
label_lb= tk.Label(root,text="lb")
label_lb.config(bg='white',font=("Courier", 32),fg='black')
label_rf= tk.Label(root,text="rf")
label_rb= tk.Label(root,text="rb")
label_bl= tk.Label(root,text="bl")
label_br= tk.Label(root,text="br")
#Label for RPM Sensor
label_rpm=tk.Label(root,text="RPM",bg="yellow",fg="black")
label_rpm.place(x=(screen_width/2)-10,y=(screen_height/2)-10)
#Label for Time
label_Time=tk.Label(root,text=startTime)
#Button to Control Relay
relay_button=tk.Button(root,text="Turn On",command=relay_control)
relay_button.place(x=(screen_width/2)-10,y=((screen_height+picture_height)/2)+60)
#Find and Assign Connected Arduinos
find_dev()
#Update Each Component
update()
#Mainloop
root.mainloop()
|
[
"verma1997@gmail.com"
] |
verma1997@gmail.com
|
c2b28cb187efe12273a5b0dac1b810439e8b15c0
|
09d349155446f2f32519cfc7deb7f79b1138a158
|
/bluegriffon/actions.py
|
b7ad185f9ae865049030bba33396598bc95bda1b
|
[] |
no_license
|
marcin-/pardususer.de
|
632d7fb4c5a9252dbcf82711a5da126523d3b8e8
|
1d4bb1d1f9da113cf2b8cbcc6b544ec9b9616862
|
refs/heads/master
| 2016-09-05T23:22:38.726769
| 2012-10-08T20:40:39
| 2012-10-08T20:40:39
| 6,114,809
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
#Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import get
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
def setup():
shelltools.system("tar -jxvf %s-%s-Linux-%s.tar.bz2" % (get.srcNAME(), get.srcVERSION(), get.ARCH().replace("i6", "x")))
def install():
pisitools.insinto("/opt/%s" % get.srcNAME(), "%s/*" % get.srcNAME())
|
[
"marcin.bojara@gmail.com"
] |
marcin.bojara@gmail.com
|
79f03d9d426e4d5f9502ae84952938e4ad0411aa
|
6cb23c2937bfbf3a91bacb661677e6425399eb7f
|
/Appium/demo/zhuye.py
|
744aa112a0a0ea2582a062c62ff48fabefb9d356
|
[] |
no_license
|
jinyaxuan1119/test
|
d65c2a578ab48ed3dddb658610579be629fb7c98
|
e61163308d0071d8f0a1b0658f2e0642c1c3be97
|
refs/heads/master
| 2023-05-26T21:21:36.851829
| 2023-05-10T10:32:57
| 2023-05-10T10:32:57
| 241,112,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
from selenium.webdriver.common.by import By
from Appium.demo.hexin import HeXin
class Zhuye(HeXin):
def click_sousuo(self):
return self._driver.find_element(By.ID,"")
|
[
"jinyaxuan_test@sina.com"
] |
jinyaxuan_test@sina.com
|
20ce49a41a21f7f150a97dc9ae000af2a1a46c30
|
9953156013e0d572f01715ce5e0716918fb4e7c1
|
/polls/models.py
|
92f41fa8822682aa9c06492c577633951858e666
|
[] |
no_license
|
cohki0305/django-tutorial
|
20a6681afcae52a3153ed07e40456c96f7c22712
|
99d685453c2ed6496eaee8e512705ca06bb447e5
|
refs/heads/master
| 2021-08-31T21:28:41.688548
| 2017-12-23T01:03:43
| 2017-12-23T01:03:43
| 114,948,161
| 0
| 0
| null | 2017-12-23T01:03:45
| 2017-12-21T01:30:01
|
Python
|
UTF-8
|
Python
| false
| false
| 550
|
py
|
from django.db import models
# Create your models here.
class Question(models.Model): # modles.Model: 継承元, OR マッパー?
question_text = models.CharField(max_length=200) # 何でこいつだけ CamelCase?
pub_date = models.DateTimeField('date published') # オプションとして人間可読なフィールド名を指定
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
|
[
"cohki0305@gmail.com"
] |
cohki0305@gmail.com
|
e622e6f9dcd6b08fdbcf2d8614ca4f86f2da6d2a
|
7305127e14995404d6ca5653b50c43370b6af815
|
/bookwyrm/models/readthrough.py
|
61cac7e6ab4dc5378d474a80549a3317f23a5393
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
nornagon/bookwyrm
|
2cdda17b1fffd09c932cf09bfdc48021f24fbfb6
|
53e34cf7de47e988386c80933b59326667f7aa4b
|
refs/heads/main
| 2023-02-10T04:24:24.528971
| 2021-01-07T00:05:46
| 2021-01-07T00:05:46
| 327,500,772
| 2
| 0
|
NOASSERTION
| 2021-01-07T04:16:36
| 2021-01-07T04:16:35
| null |
UTF-8
|
Python
| false
| false
| 780
|
py
|
''' progress in a book '''
from django.db import models
from django.utils import timezone
from .base_model import BookWyrmModel
class ReadThrough(BookWyrmModel):
''' Store progress through a book in the database. '''
user = models.ForeignKey('User', on_delete=models.PROTECT)
book = models.ForeignKey('Edition', on_delete=models.PROTECT)
pages_read = models.IntegerField(
null=True,
blank=True)
start_date = models.DateTimeField(
blank=True,
null=True)
finish_date = models.DateTimeField(
blank=True,
null=True)
def save(self, *args, **kwargs):
''' update user active time '''
self.user.last_active_date = timezone.now()
self.user.save()
super().save(*args, **kwargs)
|
[
"mousereeve@riseup.net"
] |
mousereeve@riseup.net
|
371048d4f86718481ae9a1618e377c44396fff3b
|
7805134ab326271dfceccdfe29cdbc2f85a40e85
|
/ncarrara/budgeted_rl/tools/utils_run.py
|
c1117dce7fe7ebd81ad8c00b7f296816c1177861
|
[] |
no_license
|
ncarrara/budgeted-rl
|
9248c9a206bfa2c6c588e9cde0219f443922e3f7
|
b588361a263022eb624fe83e8b16abac4e68e33e
|
refs/heads/master
| 2020-08-18T18:35:11.731139
| 2019-10-17T15:35:08
| 2019-10-17T15:35:08
| 215,821,809
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,451
|
py
|
from collections import Iterable
import numpy as np
import ncarrara.budgeted_rl.bftq.pytorch_budgeted_fittedq as pbf
from ncarrara.budgeted_rl.tools.policies import policy_factory
from ncarrara.utils.datastructure import merge_two_dicts
from ncarrara.utils.math_utils import set_seed
from ncarrara.utils.os import makedirs
from ncarrara.utils_rl.environments import envs_factory
from ncarrara.utils_rl.transition.transition import Transition
import logging
logger = logging.getLogger(__name__)
def datas_to_transitions(datas, env, feature, lambda_, normalize_reward):
logger.info("data to transition ... ")
max_r_ftq = 0
max_r_bftq = 0
e = env
nbNone = 0
for data in datas:
reward_ftq = data.r_ - (lambda_ * data.info["c_"])
max_r_ftq = np.abs(reward_ftq) if np.abs(reward_ftq) > max_r_ftq else max_r_ftq
max_r_bftq = np.abs(data.r_) if np.abs(data.r_) > max_r_bftq else max_r_bftq
if data.s_ is None: nbNone += 1
logger.info("max_r_ftq : {:.2f} max_r_bftq : {:.2f}".format(max_r_ftq, max_r_bftq))
transitions_ftq = []
transitions_bftq = []
for data in datas:
# if not data.a in 'hello()':
r_ = data.r_
beta = data.info.get("beta", None)
s = feature(data.s, e)
if data.done:
s_ = None
else:
s_ = feature(data.s_, e)
a = data.a # e.action_space().index(data.a)
c_ = data.info["c_"]
reward_ftq = r_ - (lambda_ * c_)
reward_bftq = r_
if normalize_reward:
reward_ftq /= (1. if max_r_ftq == 0. else max_r_ftq)
reward_bftq /= (1. if max_r_bftq == 0. else max_r_bftq)
t_ftq = Transition(s, a, reward_ftq, s_)
t_bftq = pbf.TransitionBFTQ(s, a, reward_bftq, s_, c_, beta, None)
transitions_ftq.append(t_ftq)
transitions_bftq.append(t_bftq)
logger.info("nbdialogues : {}".format(nbNone))
logger.info("data to transition ... done")
return transitions_ftq, transitions_bftq
def format_results(results):
N = len(results)
rew_r, rew_c, ret_r, ret_c = np.mean(results, axis=0)
std_rew_r, std_rew_c, std_ret_r, std_ret_c = np.std(results, axis=0)
p = "R={:.2f}+/-{:.2f} C={:.2f}+/-{:.2f} , return : R={:.2f}+/-{:.2f} C={:.2f}+/-{:.2f}".format(
rew_r, std_rew_r, rew_c, std_rew_c, ret_r, std_ret_r, ret_c, std_ret_c)
confidence_r = 1.96 * (std_rew_r / np.sqrt(N))
confidence_r_str = "[{:.2f};{:.2f}]".format(rew_r - confidence_r, rew_r + confidence_r)
confidence_c = 1.96 * (std_rew_c / np.sqrt(N))
confidence_c_str = "[{:.2f};{:.2f}]".format(rew_c - confidence_c, rew_c + confidence_c)
pp = "R=" + confidence_r_str + " C=" + confidence_c_str
return (pp + " " + p)
def execute_policy_one_trajectory(env, pi, gamma_r=1.0, gamma_c=1.0, beta=None):
trajectory = []
pi.reset()
if hasattr(env, "ID") and env.ID == "gym_pydial":
s = env.reset()
a = env.action_space_str.index('hello')
rew_r, rew_c, ret_r, ret_c = 0., 0., 0., 0.
i = 0
s_, r_, end, info_env = env.step(a)
transition = (s, a, r_, s_, end, info_env)
trajectory.append(transition)
info_env = {}
info_pi = {"beta": beta}
i += 1
else:
s_ = env.reset()
rew_r, rew_c, ret_r, ret_c = 0., 0., 0., 0.
i = 0
info_env = {}
info_pi = {"beta": beta}
end = False
while not end:
s = s_
action_mask = get_action_mask(env)
beta = info_pi["beta"]
info_pi = merge_two_dicts(info_pi, info_env)
a, is_master_action, info_pi = pi.execute(s, action_mask, info_pi)
if hasattr(env, "ID") and env.ID == "gym_pydial":
s_, r_, end, info_env = env.step(a, is_master_act=is_master_action)
else:
s_, r_, end, info_env = env.step(a)
c_ = info_env["c_"]
info = info_env.copy()
info["beta"] = beta
transition = (s, a if type(a) is str else int(a), r_, s_, end, info)
rew_r += r_
rew_c += c_
ret_r += r_ * (gamma_r ** i)
ret_c += c_ * (gamma_c ** i)
trajectory.append(transition)
i += 1
return trajectory, rew_r, rew_c, ret_r, ret_c
def get_action_mask(env):
action_mask = np.zeros(env.action_space.n)
if hasattr(env, "action_space_executable"):
raise Exception("Remove this expection please")
actions = env.action_space_executable()
action_mask[actions] = 1
elif hasattr(env, "get_available_actions"):
actions = env.get_available_actions()
action_mask = np.ones(env.action_space.n)
action_mask[actions] = 0
return action_mask
def execute_policy(env, pi,
gamma_r=1.0,
gamma_c=1.0,
n_trajectories=10,
beta=1.,
epsilon_schedule=None,
save_path=None):
"""
Execute a policy on an environment for several trajectories.
:param env: environment
:param pi: policy
:param gamma_r: reward discount factor
:param gamma_c: constraint discount factor
:param n_trajectories: number of trajectories to generate
:param beta: constraint threshold. Either a float, or an array of size n_trajectories for beta scheduling
:param epsilon_schedule: array of size n_trajectories: schedule of epsilon to use for EpsilonGreedy policies
:param save_path: if not none, results will be saved to that path
:return: list of trajectories, array of [total reward,
total constraint,
discounted total reward,
discounted total constraint]
"""
trajectories = []
results = np.zeros((n_trajectories, 4))
for d in range(n_trajectories):
# Beta schedule
if isinstance(beta, Iterable):
traj_beta = beta[d]
else:
traj_beta = beta
# Epsilon schedule
if epsilon_schedule is not None:
pi.epsilon = epsilon_schedule[d]
# Execution
trajectory, rew_r, rew_c, ret_r, ret_c = execute_policy_one_trajectory(env, pi, gamma_r, gamma_c, traj_beta)
trajectories.append(trajectory)
results[d] = np.array([rew_r, rew_c, ret_r, ret_c])
logger.info("[execute_policy] mean length : {}".format(np.mean([len(t) for t in trajectories])))
if save_path is not None:
logger.info("[execute_policy] saving results at : {}".format(save_path))
with open(save_path, 'ab') as f:
np.savetxt(f, results)
return trajectories, results
def execute_policy_from_config(generate_envs,
policy_config,
seed=None,
gamma_r=1.0,
gamma_c=1.0,
n_trajectories=10,
beta=1.,
epsilon_schedule=None,
save_path=None,
logging_config={}):
"""
Generate an environment and a policy from configurations, and collect trajectories.
:param generate_envs: environment config
:param policy_config: policy config
:param seed: to seed the environment before execution
:param gamma_r: see execute_policy()
:param gamma_c: see execute_policy()
:param n_trajectories: see execute_policy()
:param beta: see execute_policy()
:param epsilon_schedule: see execute_policy()
:param save_path: see execute_policy()
:param logging_config: the logging config of the process
:return: the collected trajectories
"""
if logging_config:
import logging.config as config
config.dictConfig(logging_config)
envs, params = envs_factory.generate_envs(**generate_envs)
env = envs[0]
set_seed(seed, env)
policy_config["env"] = env
pi = policy_factory(policy_config)
return execute_policy(env, pi, gamma_r, gamma_c, n_trajectories, beta, epsilon_schedule, save_path)
if __name__ == '__main__':
envs = {
"envs_str": "highway-v0",
"envs_params": {
"lanes_count": [2],
"initial_spacing": [2],
"vehicles_count": [5],
"duration": [20],
"other_vehicles_type": ["highway_env.vehicle.behavior.IDMVehicle"],
"centering_position": [[0.3, 0.5]],
"collision_reward": [0]
}
}
n_trajectories = 10
from ncarrara.budgeted_rl.tools.policies import RandomBudgetedPolicy
trajs, res = execute_policy_from_config(envs,
policy_config={"__class__": repr(RandomBudgetedPolicy)},
seed=0,
gamma_r=1,
gamma_c=1,
n_trajectories=n_trajectories,
beta=np.linspace(0, 1, n_trajectories),
epsilon_schedule= 1 - np.linspace(0, 1, n_trajectories),
save_path=None,
logging_config=None)
print(len(trajs), "trajectories")
print(res)
|
[
"nicolas.carrara1u@gmail.com"
] |
nicolas.carrara1u@gmail.com
|
58cbb3a8e9131a30feccda6177ed9c6a44a48c25
|
f9237f8db69af2590b90b4c40f8ed1ddabe7c703
|
/test/test_standard.py
|
6322d7f9100d896954f7d39477809b05f343edec
|
[
"MIT"
] |
permissive
|
Vaziria/stattest
|
d1ca89b6b090a40abf32c6071b988894ed39585f
|
82fd47a93a7b6f5c54a61080498dbf71413ce5bd
|
refs/heads/master
| 2023-08-28T04:30:20.430307
| 2021-10-29T22:40:33
| 2021-10-29T22:40:33
| 351,990,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
import time
from test_server import io_loop, test
def test_using_distributed(benchmark):
def run():
io_loop.run_until_complete(test())
benchmark(run)
|
[
"manorder123@gmail.com"
] |
manorder123@gmail.com
|
603ce5a4b3d247a939d7be67cce624b99f22c876
|
5dea5b2aee34962d6ecff89da9d4b2f7499c85ce
|
/rasputin/labs/migrations/0001_initial.py
|
527c304c7ecfd219e96476c293d37e693c15513a
|
[
"BSD-3-Clause"
] |
permissive
|
goldhand/terpene
|
3a691fde0258d9a40dd505b10f6ea7fbb5245815
|
6cd1d8839abb8eacabde32140489463099d982fc
|
refs/heads/master
| 2023-04-01T05:29:17.196607
| 2014-11-19T20:49:42
| 2014-11-19T20:49:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,259
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Lab',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('domain', models.URLField()),
('active', models.BooleanField(default=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LabCustomer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('customer_id', models.CharField(max_length=255)),
('lab', models.ForeignKey(to='labs.Lab')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
|
[
"will@django.nu"
] |
will@django.nu
|
090f9208b53e74dcd17d537b19804743a8aa1995
|
66ed253c19ac712cb5f7c6e3e33d6c6e45e1cadb
|
/test_hw1.py
|
56991535778f6255850978c7bedcf082c0143d60
|
[] |
no_license
|
GeorgeNekws/2017sum_era1_kol3
|
a05b3d77bbd54061eb1676cbadee8c96dab616f5
|
1e993b16fbe454da0443ef8bf5cb5889298738f2
|
refs/heads/master
| 2021-01-19T03:01:17.212308
| 2017-04-25T15:47:07
| 2017-04-25T15:47:07
| 87,303,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,896
|
py
|
# GeorgeNekws
import unittest
from matrix_module import Matrix
import math
class test_matrix_operations(unittest.TestCase):
def setUp(self):
self.obj = Matrix([1, 1, 1, 1] , [2, 2, 2, 2])
def test_add_between_matrices(self):
self.assertEqual([3, 3, 3, 3] , self.obj.addition([1, 1, 1, 1] , [2, 2, 2, 2]) )
def test_add_between_matrix_and_scalar(self):
self.assertEqual([6, 6, 6, 6] , self.obj.addition([1, 1, 1, 1] , [5]) )
def test_add_between_scalar_and_matrix(self):
self.assertEqual([6, 6, 6, 6] , self.obj.addition( [5], [1, 1, 1, 1]) )
def test_add_between_negative_scalar_and_matrix(self):
self.assertEqual([-4, -4, -4, -4] , self.obj.addition( [-5], [1, 1, 1, 1]) )
def test_subtract_between_matrices(self):
self.assertEqual([1, 1, 1, 1] , self.obj.subtraction([2, 2, 2, 2] , [1, 1, 1, 1]))
def test_subtract_between_matrix_and_scalar(self):
self.assertEqual([4, 4, 4, 4] , self.obj.subtraction([5] , [1, 1, 1, 1]))
def test_subtract_between_scalar_and_matrix(self):
self.assertEqual([-4, -4, -4, -4] , self.obj.subtraction([1, 1, 1, 1] , [5]))
def test_multiplication_between_matrices(self):
self.assertEqual([4, 4, 4, 4] , self.obj.multiplication([1, 1, 1, 1] , [2, 2, 2, 2]))
def test_multiplication_between_negative_matrices(self):
self.assertEqual([4, 4, 4, 4] , self.obj.multiplication([-1, -1, -1, -1] , [-2, -2, -2, -2]))
def test_multiplication_between_negative_and_positive_matrices(self):
self.assertEqual([-4, -4, -4, -4] , self.obj.multiplication([1, 1, 1, 1] , [-2, -2, -2, -2]))
def test_multiplication_between_matrix_and_scalar(self):
self.assertEqual([5, 5, 5, 5] , self.obj.multiplication([5] , [1, 1, 1, 1]))
def test_multiplication_between_scalar_and_matrix(self):
self.assertEqual([5, 5, 5, 5] , self.obj.multiplication([1, 1, 1, 1] , [5]))
if __name__ == '__main__':
unittest.main()
|
[
"gtsikriteas@uth.gr"
] |
gtsikriteas@uth.gr
|
b310a26be8babcba616413af5ffda081b6b8d4a0
|
70205da2c939cdee904420f73eb76df05e9a0d80
|
/String/214.py
|
dc602cd45a94b19da56fdca78177cc4269389c43
|
[] |
no_license
|
ShangruZhong/leetcode
|
4f9a1ae7d71a73bbd8fc42a55c6c12d61f47589e
|
6582b0f138a19f9d4a005eda298ecb1488eb0d2e
|
refs/heads/master
| 2020-04-12T01:29:52.990418
| 2017-06-25T13:48:35
| 2017-06-25T13:48:35
| 42,118,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 906
|
py
|
"""
214. Shortest Palindrome
Brute force solution, why not KMP?
@date: 2017/01/24
"""
class Solution(object):
def shortestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
reverse = s[::-1]
for i in xrange(len(s) + 1):
if s.startswith(reverse[i:]):
# if self.startwith(s, reverse[i:]):
return reverse[:i] + s
def startwith(self, s, prefix):
"""
Implement string.startswith(prefix)
but O(|prefix|)
if prefix == None,
also return True
"""
if len(s) < len(prefix):
return False
l, r = 0, len(prefix) - 1
while l <= r:
if s[l] != prefix[l] or s[r] != prefix[r]:
return False
else:
l += 1
r -= 1
return True
|
[
"draco.mystack@gmail.com"
] |
draco.mystack@gmail.com
|
24b34b4e2935a0873c6ed9f8dd9f18757c6170cc
|
6d9d6c52ad77c92de5fe194468eb86ea6ac1ea72
|
/Startcamp/dayone/lotto.py
|
96799c4f6ddf3657664dbe5b0346865ebd30aac1
|
[] |
no_license
|
Wonsang-Joo/TIL
|
80e91d0027309da8af1409cfdf7d62419e97895a
|
5b095b49c5ba7b1ec9ddd6eedf0126c523d8b8a3
|
refs/heads/master
| 2023-04-08T16:54:01.638298
| 2021-04-01T14:48:23
| 2021-04-01T14:48:23
| 329,817,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
#import문은 스크립트 파일 최상단에
import random
numbers = range(1, 46)
# print(numbers)
lucky_numbers = random.sample(numbers, 6)
# print(lucky_numbers)
#정렬
print(sorted(lucky_numbers))
|
[
"nigagaraha@naver.com"
] |
nigagaraha@naver.com
|
196ac8aa67084770b50f05d2fc6acd8a17d1aa71
|
080919d33b7b016a854a6bb8de5927fc66a58b74
|
/JokeRecommender/lib/python3.6/_bootlocale.py
|
69455e67e024913c30c31f5dbda668c80c3b4ab4
|
[] |
no_license
|
anjaliverma96/Project_test
|
d6872103912804d0ddcd72d6cd00d2d9344c12ca
|
38a301973326b5d24c78514c76f8ef05ca247b5a
|
refs/heads/master
| 2022-12-11T00:05:45.727270
| 2019-06-13T06:55:30
| 2019-06-13T06:55:30
| 191,632,055
| 0
| 0
| null | 2022-12-08T01:05:05
| 2019-06-12T19:26:24
|
Python
|
UTF-8
|
Python
| false
| false
| 57
|
py
|
/Users/anjaliverma/anaconda3/lib/python3.6/_bootlocale.py
|
[
"anjaliverma2020@u.northwestern.edu"
] |
anjaliverma2020@u.northwestern.edu
|
929a196f65d8e6bc7b0b5b6553c806a5e7cf401b
|
9569292f00f459a77cb61250f32ce7bbc45181f5
|
/main.py
|
7ec7277a2decf0d49e2ed1844851e4646104a842
|
[] |
no_license
|
Sasayaki7/A-Star
|
a8cf2dc6a145179dd80504a7b06629e50f1a4eaa
|
510ef781a3e3cedf5dea546f8d008e1bdd32d678
|
refs/heads/master
| 2023-07-19T21:49:47.409115
| 2021-08-29T05:50:08
| 2021-08-29T05:50:08
| 362,492,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 57
|
py
|
import astargui
gui = astargui.AStarApplet()
gui.run()
|
[
"50351133+Sasayaki7@users.noreply.github.com"
] |
50351133+Sasayaki7@users.noreply.github.com
|
c850902c967838dc029860db23de8f5c1cfcb0f5
|
a51795ac9ec30826d1dd049e5a9a10a5fcfcf332
|
/cart/serializers.py
|
9d3e6a804ac8764b5f8e50f7d1b2225415175aa8
|
[] |
no_license
|
GurungAman/Khaja-khaja
|
71efb2870ea4e5c20e5a40f545a8ac7f94985b45
|
075025491e0d7dae36f98053a5571c93157f0700
|
refs/heads/master
| 2023-09-05T14:28:22.455462
| 2021-10-20T05:55:02
| 2021-10-20T05:55:02
| 389,515,986
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
from django.contrib.auth import get_user_model
from rest_framework import serializers
from .models import OrderItem
from restaurant.models import FoodItems
from user.models import Customer
User = get_user_model()
class OrderItemSerializer(serializers.ModelSerializer):
class Meta:
model = OrderItem
fields = '__all__'
def save(self, validated_data):
food_item = FoodItems.objects.get(id=validated_data['food_item'])
customer = Customer.objects.get(id=validated_data['user'])
order_item, _ = OrderItem.objects.get_or_create(
user=customer,
food_item=food_item,
ordered=False
)
order_item.quantity = validated_data['quantity']
order_item.save()
return order_item
|
[
"amenag30@gmail.com"
] |
amenag30@gmail.com
|
b94818c0353b6a951d62578237c10906cd914980
|
03c083f3d817e054f9490e8eb6ddc2c780833674
|
/FileManager.py
|
a1b1940ce83d3080ca9327fa661b89f1c0daae30
|
[] |
no_license
|
mapic91/FileManager
|
c2a7640da52ae9a6f94ead3b039798164151a91b
|
194bc4f22354f799be7cc81acdd3d342afb5233c
|
refs/heads/master
| 2021-06-26T04:36:40.433356
| 2020-09-19T11:08:05
| 2020-09-19T11:08:05
| 128,938,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,964
|
py
|
from flask import Flask, url_for, request, render_template, redirect, send_from_directory
from werkzeug.utils import secure_filename
import os
import re
import mimetypes
import shutil
import base64
from urllib import parse
root_path = os.getenv("FileManager_Root_Path")
passwod = os.getenv("FileManager_Login_Password")
aria2_path = os.getenv("aria2_path")
download_server = os.getenv("download_server")
password_error_count = 0
max_password_error_count = 5
app = Flask(__name__)
app.config['UPLOAD_FOLODER'] = root_path
def tryint(s):
try:
return int(s)
except:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [tryint(c) for c in re.split('([0-9]+)', s.name)]
def sort_names(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
def decodestr(strdata):
return parse.unquote(base64.b64decode(strdata.encode(encoding='ascii')).decode(encoding='ascii'))
class DirEntryInfo:
def __init__(self, name, size='-', mimetype='-'):
self.name = name
self.size = size
self.mimetype = mimetype is None and 'unknown' or mimetype
def get_dirs_files(path):
dirs = []
files = []
try:
for entry in os.scandir(path):
if entry.is_dir():
dirs.append(DirEntryInfo(entry.name))
elif entry.is_file():
files.append(DirEntryInfo(entry.name, entry.stat().st_size,
mimetypes.guess_type(os.path.join(root_path, entry.name))[0]))
except FileNotFoundError:
pass
return dirs, files
def get_path_parts(path):
parts = [{'part': 'root', 'path': ''}] # first root path
combined = ''
for part in path.split(sep='/'):
if part != '':
combined = combined + part + '/'
parts.append({'part': part, 'path': combined})
return parts
def delete_all_content_in_folder(path):
for entry in os.scandir(path):
if entry.is_dir():
shutil.rmtree(os.path.join(path, entry.name), ignore_errors=True)
elif entry.is_file():
os.remove(os.path.join(path, entry.name))
def get_all_download_link(serveraddr, dirpath, links):
for r, d, f in os.walk(dirpath):
for fn in f:
links.append(serveraddr + os.path.relpath(os.path.join(r, fn), root_path))
@app.route('/login', methods=['GET', 'POST'])
def login():
msg = None
global password_error_count
if request.method == 'POST':
if password_error_count < max_password_error_count and request.form.get('pwd', '') == passwod:
redirect_to_index = redirect(request.args.get('next', '/'))
response = app.make_response(redirect_to_index)
response.set_cookie('pwd', value=passwod, max_age=99999999)
return response
else:
password_error_count += 1
msg = "Password not correct."
return render_template('login.html', msg=msg)
def not_login():
return request.cookies.get('pwd') != passwod
def to_login(current_path):
return redirect(url_for('login', next=current_path))
@app.route('/', methods=['GET'])
def index():
if not_login():
return to_login(request.full_path)
request_path = decodestr(request.args.get('path', ''))
if request.args.get('delete', '') == '1':
if request.args.get('dir', '') == '1':
shutil.rmtree(os.path.join(root_path, request_path), ignore_errors=True)
return 'OK'
else:
os.remove(os.path.join(root_path, request_path))
return 'OK'
elif request.args.get('deleteall', '') == '1':
delete_all_content_in_folder(os.path.join(root_path, request_path))
return 'OK'
elif request.args.get('rename', '') == '1':
oldname = decodestr(request.args.get('oldname', ''))
newname = decodestr(request.args.get('newname', ''))
if oldname != '' and newname != '':
os.rename(os.path.join(root_path, request_path, oldname),
os.path.join(root_path, request_path, newname))
return 'OK'
elif request.args.get('move', '') == "1":
try:
shutil.move(request_path, root_path)
except Exception as e:
return str(e)
return 'OK'
else:
if request_path == "/":
abs_path = "/"
else:
abs_path = os.path.join(root_path, request_path)
dirs, files = get_dirs_files(abs_path)
sort_names(dirs)
sort_names(files)
space = shutil.disk_usage(root_path)
usage_str = str.format("{0:.2f}/{1:.2f}GB[{2:.0%}]>>Free:{3:.2f}GB",
space.used / 1024 / 1024 / 1024,
space.total / 1024 / 1024 / 1024,
space.used / space.total,
space.free / 1024 / 1024 / 1024, )
return render_template('index.html', path_parts=get_path_parts(request_path), path=request_path, dirs=dirs,
files=files, usage_str=usage_str, download_server=download_server)
@app.route('/deletselections', methods=['POST'])
def deletselections():
if not_login():
return to_login(request.full_path)
else:
paths = request.get_json()
if paths is not None:
for item in paths:
if item['type'] == 'floder':
shutil.rmtree(os.path.join(root_path, item['value']), ignore_errors=True)
elif item['type'] == 'file':
os.remove(os.path.join(root_path, item['value']))
return 'OK'
@app.route('/getdownloadurl', methods=['POST'])
def getdownloadurl():
if not_login():
return to_login(request.full_path)
else:
paths = request.get_json()
links = []
host = request.host.split(sep=':')[0]
downloadserver = download_server + '/'
if paths is not None:
for item in paths:
if item['type'] == 'floder':
get_all_download_link(downloadserver, os.path.join(root_path, item['value']), links)
elif item['type'] == 'file':
links.append(downloadserver + item['value'])
return "\n".join(links)
@app.route('/play', defaults={'filename': None}, methods=['GET'])
@app.route('/play/<path:filename>', methods=['GET'])
def play(filename):
if not_login():
return to_login(request.full_path)
host = request.host.split(sep=':')[0]
filepath = download_server + '/' + filename
return render_template('play.html', filepath=filepath)
@app.route('/upload', methods=['GET', 'POST'])
def upload():
if not_login():
return to_login(request.full_path)
if request.method == 'POST':
if 'file' not in request.files:
return redirect(request.url)
file = request.files['file']
if file.filename == '':
return redirect(request.url)
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLODER'], filename))
return redirect(request.url)
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form method=post enctype=multipart/form-data>
<p><input type=file name=file>
<input type=submit value=Upload>
</form>
'''
@app.route('/aria2/', defaults={'filename': None}, methods=['GET'])
@app.route('/aria2/<path:filename>', methods=['GET'])
def aria2(filename):
if not_login():
return to_login(request.full_path)
filename = filename or 'index.html'
return send_from_directory(aria2_path, filename=filename)
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True, ssl_context=('server.crt','server.key'))
|
[
"mapic91@gmail.com"
] |
mapic91@gmail.com
|
c9df2a1953a51ae76421896271d2507d8f35d214
|
2259aac861af506d6189c7dc1a9af8a2c3f25b7f
|
/Calendar.py
|
8f3571bf91b79b1ac46af40d7948b0b5ce6faa00
|
[] |
no_license
|
kteel620/Python-Files
|
e19085975b9dbbc6ad833b2ce41fa27a2cd48e30
|
eaf5712ad34c15982ac4f72b1c7d6b212be97c0b
|
refs/heads/master
| 2021-01-15T21:34:06.661720
| 2019-06-22T02:29:10
| 2019-06-22T02:29:10
| 99,876,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,452
|
py
|
'''Program that lets a user interact with a calendar-like app.'''
from time import sleep, strftime
const_var = 'Keshav'
calendar = {}
def welcome():
print("Welcome to the calendar, " + const_var)
sleep(1.0)
print(strftime("%A, %B %d %Y"))
print(strftime("%I:%M:%S %p"))
sleep(1)
print("What would you like to do? ")
def start_calendar():
welcome()
start = True
while (start):
user_choice = input("Enter 'A' to Add, 'U' to Update, 'V' to View, 'D' to Delete, or 'X' to Exit ")
if (user_choice == 'V' or user_choice == 'v'):
if (len(calendar.keys()) < 1):
print('The calendar is empty.')
else:
print(calendar)
elif (user_choice == 'U' or user_choice == 'u'):
date = input("What date?")
update = input("Enter the update: ")
calendar[date] = update
print("Update successful.")
print(calendar)
elif (user_choice == 'A' or user_choice == 'a'):
event = input("Enter event: ")
date = input("Enter date (MM/DD/YYYY): ")
year = int(date[6:10])
year1 = int(strftime("%Y"))
if (len(date) > 10 or year < year1):
print("Invalid date entered. ")
try_again = input("Would you like to try again? 'Y' for Yes or 'N' for No. ")
try_again = try_again.upper()
if (try_again == 'Y'):
continue
else:
start = False
else:
calendar[date] = event
print("Calendar successfully updated.")
print(calendar)
elif (user_choice == 'D' or user_choice == 'd'):
if (len(calendar.keys()) < 1):
print("The calendar is empty already.")
else:
event = input("What event?")
for date in list(calendar):
if (event == calendar[date]):
del calendar[date]
print("The event has been deleted.")
print(calendar)
else:
print("An incorrect event was specified.")
elif (user_choice == 'X' or user_choice =='x'):
start = False
else:
print("You entered garbage. Byeee!")
start = False
start_calendar()
|
[
"noreply@github.com"
] |
kteel620.noreply@github.com
|
575e4e0842798328d732603296abdcd280b6eed1
|
e4767f88a8e1bbfc5661d774aedd772f746119d3
|
/Contest Questions/CCC2018J1JG.PY
|
b69a2d6b84f863ceb42894e3a350a6ee3a7e05f6
|
[] |
no_license
|
jaimangledhill/Year9DesignCS-PythonJG
|
8c45f120f24d10376e989885487fcfbcf7b0c724
|
f93a8ed7d63a0c1835dfb67aabbcdd9b26d3565f
|
refs/heads/master
| 2020-03-28T12:58:04.165143
| 2018-11-28T23:59:05
| 2018-11-28T23:59:05
| 148,352,682
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 34
|
py
|
print ("Welcome to the CCC2018J1")
|
[
"jaiman.gledhill@Gledhill.local"
] |
jaiman.gledhill@Gledhill.local
|
deba6641d4c3c9e73e4fe824920baee96c8765a3
|
41b1a27b59dca48469ecbbca5c36a93753dde316
|
/lithops/config.py
|
e171f45dce14c07f408750ded67e747ef65e4fb6
|
[
"Apache-2.0"
] |
permissive
|
963086989/lithops
|
269a4b09117980d748d92018841d4130f9a541a6
|
44ef83e3511c40b49cab86356e2434b688e2a3f2
|
refs/heads/master
| 2023-07-30T00:03:07.539008
| 2021-09-14T03:17:09
| 2021-09-14T03:17:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,421
|
py
|
#
# (C) Copyright IBM Corp. 2021
# (C) Copyright Cloudlab URV 2021
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import copy
import json
import importlib
import logging
import multiprocessing as mp
from lithops import constants
from lithops.version import __version__
from lithops.utils import verify_runtime_name, get_mode, get_default_backend
from builtins import FileNotFoundError
logger = logging.getLogger(__name__)
os.makedirs(constants.LITHOPS_TEMP_DIR, exist_ok=True)
os.makedirs(constants.JOBS_DIR, exist_ok=True)
os.makedirs(constants.LOGS_DIR, exist_ok=True)
CPU_COUNT = mp.cpu_count()
def load_yaml_config(config_filename):
import yaml
try:
with open(config_filename, 'r') as config_file:
data = yaml.safe_load(config_file)
except FileNotFoundError:
data = {}
return data
def dump_yaml_config(config_filename, data):
import yaml
if not os.path.exists(os.path.dirname(config_filename)):
os.makedirs(os.path.dirname(config_filename))
with open(config_filename, "w") as config_file:
yaml.dump(data, config_file, default_flow_style=False)
def get_default_config_filename():
"""
First checks .lithops_config
then checks LITHOPS_CONFIG_FILE environment variable
then ~/.lithops/config
"""
if 'LITHOPS_CONFIG_FILE' in os.environ:
config_filename = os.environ['LITHOPS_CONFIG_FILE']
elif os.path.exists(".lithops_config"):
config_filename = os.path.abspath('.lithops_config')
else:
config_filename = constants.CONFIG_FILE
if not os.path.exists(config_filename):
return None
return config_filename
def load_config(log=True):
""" Load the configuration """
config_data = None
if 'LITHOPS_CONFIG' in os.environ:
if log:
logger.debug("Loading configuration from env LITHOPS_CONFIG")
config_data = json.loads(os.environ.get('LITHOPS_CONFIG'))
else:
config_filename = get_default_config_filename()
if config_filename:
if log:
logger.debug("Loading configuration from {}".format(config_filename))
config_data = load_yaml_config(config_filename)
if not config_data:
# Set to Localhost mode
if log:
logger.debug("Config file not found")
config_data = {'lithops': {'mode': constants.LOCALHOST,
'backend': constants.LOCALHOST,
'storage': constants.LOCALHOST}}
return config_data
def get_log_info(config_data=None):
""" Return lithops logging information set in configuration """
config_data = copy.deepcopy(config_data) or load_config(log=False)
if 'lithops' not in config_data or not config_data['lithops']:
config_data['lithops'] = {}
cl = config_data['lithops']
if 'log_level' not in cl:
cl['log_level'] = constants.LOGGER_LEVEL
if 'log_format' not in cl:
cl['log_format'] = constants.LOGGER_FORMAT
if 'log_stream' not in cl:
cl['log_stream'] = constants.LOGGER_STREAM
if 'log_filename' not in cl:
cl['log_filename'] = None
return cl['log_level'], cl['log_format'], cl['log_stream'], cl['log_filename']
def default_config(config_data=None, config_overwrite={}, load_storage_config=True):
"""
First checks .lithops_config
then checks LITHOPS_CONFIG_FILE environment variable
then ~/.lithops/config
"""
logger.info('Lithops v{}'.format(__version__))
config_data = copy.deepcopy(config_data) or load_config()
if 'lithops' not in config_data or not config_data['lithops']:
config_data['lithops'] = {}
# overwrite values provided by the user
if 'lithops' in config_overwrite:
config_data['lithops'].update(config_overwrite['lithops'])
backend = config_data['lithops'].get('backend')
mode = config_data['lithops'].get('mode')
if mode and not backend:
if mode in config_data and 'backend' in config_data[mode]:
config_data['lithops']['backend'] = config_data[mode]['backend']
else:
config_data['lithops']['backend'] = get_default_backend(mode)
elif backend:
config_data['lithops']['mode'] = get_mode(backend)
elif not backend and not mode:
mode = config_data['lithops']['mode'] = constants.MODE_DEFAULT
config_data['lithops']['backend'] = get_default_backend(mode)
backend = config_data['lithops'].get('backend')
mode = config_data['lithops'].get('mode')
if mode == constants.LOCALHOST:
logger.debug("Loading compute backend module: localhost")
config_data['lithops']['workers'] = 1
if 'storage' not in config_data['lithops']:
config_data['lithops']['storage'] = constants.LOCALHOST
if 'worker_processes' not in config_data['lithops']:
config_data['lithops']['worker_processes'] = CPU_COUNT
if constants.LOCALHOST not in config_data or \
config_data[constants.LOCALHOST] is None:
config_data[constants.LOCALHOST] = {}
if 'runtime' in config_overwrite:
config_data[constants.LOCALHOST]['runtime'] = config_overwrite['runtime']
if 'runtime' not in config_data[constants.LOCALHOST]:
config_data[constants.LOCALHOST]['runtime'] = constants.LOCALHOST_RUNTIME_DEFAULT
verify_runtime_name(config_data[constants.LOCALHOST]['runtime'])
elif mode == constants.SERVERLESS:
if constants.SERVERLESS not in config_data or \
config_data[constants.SERVERLESS] is None:
config_data[constants.SERVERLESS] = {}
if 'runtime' in config_overwrite:
config_data[backend]['runtime'] = config_overwrite['runtime']
logger.debug("Loading Serverless backend module: {}".format(backend))
cb_config = importlib.import_module('lithops.serverless.backends.{}.config'.format(backend))
cb_config.load_config(config_data)
if 'runtime' in config_overwrite:
config_data[backend]['runtime'] = config_overwrite['runtime']
if 'runtime_memory' in config_overwrite:
config_data[backend]['runtime_memory'] = config_overwrite['runtime_memory']
if 'remote_invoker' in config_overwrite:
config_data[constants.SERVERLESS]['remote_invoker'] = config_overwrite['remote_invoker']
verify_runtime_name(config_data[backend]['runtime'])
elif mode == constants.STANDALONE:
if constants.STANDALONE not in config_data or \
config_data[constants.STANDALONE] is None:
config_data[constants.STANDALONE] = {}
if 'auto_dismantle' not in config_data[constants.STANDALONE]:
config_data[constants.STANDALONE]['auto_dismantle'] = constants.STANDALONE_AUTO_DISMANTLE_DEFAULT
if 'soft_dismantle_timeout' not in config_data[constants.STANDALONE]:
config_data[constants.STANDALONE]['soft_dismantle_timeout'] = constants.STANDALONE_SOFT_DISMANTLE_TIMEOUT_DEFAULT
if 'hard_dismantle_timeout' not in config_data[constants.STANDALONE]:
config_data[constants.STANDALONE]['hard_dismantle_timeout'] = constants.STANDALONE_HARD_DISMANTLE_TIMEOUT_DEFAULT
logger.debug("Loading Standalone backend module: {}".format(backend))
sb_config = importlib.import_module('lithops.standalone.backends.{}.config'.format(backend))
sb_config.load_config(config_data)
if 'runtime' in config_overwrite:
config_data[constants.STANDALONE]['runtime'] = config_overwrite['runtime']
if 'runtime' not in config_data[constants.STANDALONE]:
config_data[constants.STANDALONE]['runtime'] = constants.STANDALONE_RUNTIME_DEFAULT
verify_runtime_name(config_data[constants.STANDALONE]['runtime'])
if 'execution_timeout' not in config_data['lithops']:
config_data['lithops']['execution_timeout'] = constants.EXECUTION_TIMEOUT_DEFAULT
if 'worker_processes' not in config_data['lithops']:
config_data['lithops']['worker_processes'] = constants.WORKER_PROCESSES_DEFAULT
if 'chunksize' not in config_data['lithops']:
config_data['lithops']['chunksize'] = config_data['lithops']['worker_processes']
if 'monitoring' not in config_data['lithops']:
config_data['lithops']['monitoring'] = constants.MONITORING_DEFAULT
if load_storage_config:
config_data = default_storage_config(config_data)
if config_data['lithops']['storage'] == constants.LOCALHOST and mode != constants.LOCALHOST:
raise Exception('Localhost storage backend cannot be used in {} mode'.format(mode))
return config_data
def default_storage_config(config_data=None, backend=None):
""" Function to load default storage config """
config_data = copy.deepcopy(config_data) or load_config()
if 'lithops' not in config_data or not config_data['lithops']:
config_data['lithops'] = {}
if 'storage' not in config_data['lithops']:
config_data['lithops']['storage'] = constants.STORAGE_BACKEND_DEFAULT
if backend:
config_data['lithops']['storage'] = backend
sb = config_data['lithops']['storage']
logger.debug("Loading Storage backend module: {}".format(sb))
sb_config = importlib.import_module('lithops.storage.backends.{}.config'.format(sb))
sb_config.load_config(config_data)
if 'storage_bucket' not in config_data['lithops']:
raise Exception("storage_bucket is mandatory in "
"lithops section of the configuration")
return config_data
def extract_storage_config(config):
storage_config = {}
sb = config['lithops']['storage']
storage_config['backend'] = sb
storage_config['bucket'] = config['lithops']['storage_bucket']
storage_config[sb] = config[sb]
storage_config[sb]['user_agent'] = 'lithops/{}'.format(__version__)
if 'storage_region' in config['lithops']:
storage_config[sb]['region'] = config['lithops']['storage_region']
return storage_config
def extract_localhost_config(config):
localhost_config = config[constants.LOCALHOST].copy()
return localhost_config
def extract_serverless_config(config):
sl_config = config[constants.SERVERLESS].copy()
sb = config['lithops']['backend']
sl_config['backend'] = sb
sl_config[sb] = config[sb] if sb in config and config[sb] else {}
sl_config[sb]['user_agent'] = 'lithops/{}'.format(__version__)
return sl_config
def extract_standalone_config(config):
sa_config = config[constants.STANDALONE].copy()
sb = config['lithops']['backend']
sa_config[sb] = config[sb] if sb in config and config[sb] else {}
sa_config[sb]['runtime'] = sa_config['runtime']
sa_config[sb]['user_agent'] = 'lithops/{}'.format(__version__)
return sa_config
|
[
"josep.sampe@gmail.com"
] |
josep.sampe@gmail.com
|
438f66724fca30c63a1c3a9e16e4d7e4d1621a92
|
533ec52d57d4ea55307ec6245a31acf91b76240f
|
/telegrambot/simple.py
|
8649c9ffa3c6cad24a9d864a2e37aafd9b4e65d9
|
[] |
no_license
|
priteshgudge/telegrambot
|
d10759437b497f8e507189c73db07e4fbe91764b
|
91860c0fef060d821d282c513436beee60857dc7
|
refs/heads/master
| 2020-06-13T02:30:14.029408
| 2016-12-03T08:16:41
| 2016-12-03T08:16:41
| 75,461,377
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
import telebot
TOKEN = "298441962:AAES7xnrWjy2eppOMX5YvfDd1flXSOkxJ6M"
bot = telebot.TeleBot(TOKEN)
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
bot.reply_to(message, "Howdy, how are you doing?")
@bot.message_handler(func=lambda message: True)
def echo_all(message):
bot.reply_to(message, message.text)
bot.polling()
|
[
"pritesh.gudge@agrostar.in"
] |
pritesh.gudge@agrostar.in
|
07363b4e0d5d306973bb0914451ce518775d9dad
|
892238666cc505d563d3f716876b5f192787fb1e
|
/Others/movefiles.py
|
d1908fc5a7f098b3e9f89c02de2ee47fd84b3936
|
[] |
no_license
|
LucasWangZH/Auxiliary_Scripts
|
c0055c77d8e89e9a4841a53133da4629919fe532
|
b3ba1591c94414158bbf8c69ca87d79145a80f5b
|
refs/heads/master
| 2020-09-14T11:01:02.790994
| 2019-11-21T07:16:29
| 2019-11-21T07:16:29
| 223,109,621
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 14 11:06:59 2019
@author: slkj
"""
import os, random, shutil
def movefile(dir):
pathDir = os.listdir(dir)
# num是要随机选取的图片数量
num = 130
sample = random.sample(pathDir,num)
print(sample)
for name in sample:
#若果是拷贝,不是移动,就把move改为copyfile
shutil.move(fileDir+name, tarDir+name)
if __name__ == '__main__':
fileDir = "D:/project/openposelab/rawimages_train/"
tarDir = "D:/project/openposelab/rawimages_test/"
movefile(fileDir)
|
[
"wangzh@deepblueai.com"
] |
wangzh@deepblueai.com
|
e8ad1e80765394c19fc8f08d99cbdd4bb83036fe
|
d2fc3b933468f462153d2390724d7a134f5bd0d1
|
/Service_2/application/routes.py
|
a8da1fb6a3295fc9259da52f950e1413b549737f
|
[] |
no_license
|
Shana12345/LatestDocker
|
2350f74efe6bf68521a19ae998ddd33b9bd1711e
|
dfb789ffc95a5ee783feb8bf1656894ce801273f
|
refs/heads/master
| 2023-04-04T08:52:15.818331
| 2020-11-15T14:01:01
| 2020-11-15T14:01:01
| 254,642,472
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
from application import app
import random
@app.route('/randomFS', methods=['GET'])
def beginning():
list = ['Riley','Jordan','Angel','Spencer','Hayden','Phoenix']
return list[random.randrange(6)]
|
[
"shana_charlery@msn.com"
] |
shana_charlery@msn.com
|
566c6a43df5d4c9261bf3f643a8bf8c76be190a1
|
7aff02a15c6309f0e6938109be78062eded5c0ec
|
/fetch_viruses/merge_viruses.py
|
1a887211cd25f140f6a4cfdab41dfa1350297d5e
|
[] |
no_license
|
sridhar0605/picobio
|
1ee4373f56155aaa6f5d6e7d3c90f8d9b4039197
|
ba6ac0bf1112edd32ef5c7cb28d409c3c11dc5d6
|
refs/heads/master
| 2021-01-20T06:18:02.329566
| 2017-02-09T18:05:37
| 2017-02-09T18:05:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,227
|
py
|
# Prepares merged FASTA files to use for BLAST databases
#
# v000 - proteins only
# v001 - date based filename
# - CDS nuc sequences too
# v002 - Use BLAST friendly names
# v003 - multiple sets of viruses
# v004 - fixed missing | in fna names
# v005 - Handle feature extraction via Biopython
# - Tested under Python 3
import os
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
date_stamp = "20131114"
tables = {"NC_008956": 1, "NC_008954": 1, "NC_008949": 1, "NC_008948": 1,
"NC_011452": 1, "NC_008956": 1}
def dedup(input_fasta, output_fasta):
"""Merge identical FASTA entries using NCBI NR Ctrl+A style"""
# Try it in memory...
print("Deduplicating %s..." % input_fasta)
by_seq = dict()
total = 0
for record in SeqIO.parse(input_fasta, "fasta"):
total += 1
s = str(record.seq).upper()
t = record.description
try:
by_seq[s].append(t)
except KeyError:
by_seq[s] = [t]
print("Reduced %s from %i to %i records as %s (%0.1f%%)"
% (input_fasta, total, len(by_seq), output_fasta,
len(by_seq) * 100.0 / float(total)))
handle = open(output_fasta, "w")
for s in by_seq:
titles = by_seq[s]
# chr(1) = CTRL+A
handle.write(">%s\n%s\n" % (chr(1).join(sorted(titles)), s))
handle.close()
def get_nuc(seq, loc_string):
reverse = False
if loc_string.startswith("complement("):
assert loc_string[-1] == ")"
loc_string = loc_string[11:-1]
reverse = True
start, end = [int(x.strip("<").strip(">")) for x in loc_string.split("..")]
nuc = seq[start - 1:end]
if reverse:
return nuc.reverse_complement()
else:
return nuc
def make_db(fasta, protein=False):
stem = os.path.splitext(fasta)[0]
if protein:
t = "prot"
if os.path.isfile(stem + ".pin"):
return
else:
t = "nucl"
if os.path.isfile(stem + ".nin"):
return
cmd = "makeblastdb -in %s -dbtype %s -out %s -parse_seqids" % (
fasta, t, stem)
print(cmd)
rc = os.system(cmd)
if rc:
raise RuntimeError("Return code %i from:\n%s" % (rc, cmd))
def make_merged_genomes(genomes_file, names):
count = 0
handle = open(genomes_file, "w")
for name in names:
acc = (name + ".").split(".")[0]
record = SeqIO.read("GenBank/%s.gbk" % acc, "gb")
gi = record.annotations["gi"]
# Convert to NCBI style FASTA identifier...
record.id = "gi|%s|ref|%s" % (gi, record.id)
count += SeqIO.write(record, handle, "fasta")
handle.close()
return count
for group in ["dsDnaViruses",
"ssDnaViruses",
"dsRnaViruses",
"ssRnaViruses",
"allViruses"]:
print("=" * len(group))
print(group)
print("=" * len(group))
names = open("GenBank/%s.txt" % group).read().split("\n")
genomes_file = "%s_%s_genomes.fna" % (group, date_stamp)
genomes_nr = "%s_%s_genomes_NR.fna" % (group, date_stamp)
protein_file = "%s_%s_proteins.faa" % (group, date_stamp)
protein_nr = "%s_%s_proteins_NR.faa" % (group, date_stamp)
nuc_file = "%s_%s_genes.ffn" % (group, date_stamp)
nuc_nr = "%s_%s_genes_NR.ffn" % (group, date_stamp)
print("Looking at %i %s" % (len(names), group))
if os.path.isfile(genomes_file):
print("Got %s" % genomes_file)
else:
print("Writing %s..." % genomes_file)
count = make_merged_genomes(genomes_file, names)
print("%i records in %s" % (count, genomes_file))
if os.path.isfile(genomes_nr):
print("Got %s" % genomes_nr)
else:
dedup(genomes_file, genomes_nr)
make_db(genomes_nr, protein=False)
if os.path.isfile(protein_file):
print("Got %s" % protein_file)
else:
handle = open(protein_file, "w")
bad = 0
count = 0
for index, name in enumerate(names):
name = name.split(".", 1)[0]
filename = "GenBank/%s.gbk" % name
parent = None
for record in SeqIO.parse(open(filename), "genbank-cds"):
if "pseudo" in record.annotations:
continue
if "pseudogene" in record.annotations:
continue
count += 1
try:
protein_id = record.annotations["protein_id"]
except KeyError:
print(filename)
print(record)
assert False
gi = None
for xref in record.dbxrefs:
if xref.lower().startswith("gi:"):
gi = xref[3:]
break
assert gi and protein_id, str(record)
record.id = "gi|%s|ref|%s" % (gi, record.id)
if record.description == "<unknown description>":
if "product" in record.annotations:
record.description = record.annotations["product"]
elif "note" in record.annotations:
record.description = record.annotations["note"]
if record.seq is None:
bad += 1
print("%s %s" %
(filename, record.annotations["raw_location"]))
if parent is None:
parent = SeqIO.read(open(filename), "gb")
nuc = get_nuc(parent.seq, record.annotations[
"raw_location"])
if "transl_table" in record.annotations:
table = int(record.annotations["transl_table"])
else:
table = tables[name]
pro = nuc.translate(table)
assert pro.endswith("*") and pro.count("*") == 1
record.seq = pro[:-1] # remove stop
SeqIO.write([record], handle, "fasta")
# print("%i: %i in %s" % (index+1, count, name))
handle.close()
print("Done")
print("%i proteins" % count)
print("%i missing provided translation" % bad)
if os.path.isfile(protein_nr):
print("Got %s" % protein_nr)
else:
dedup(protein_file, protein_nr)
make_db(protein_nr, protein=True)
if os.path.isfile(nuc_file):
print("Got %s" % nuc_file)
else:
handle = open(nuc_file, "w")
count = 0
for index, name in enumerate(names):
name = name.split(".", 1)[0]
filename = "GenBank/%s.gbk" % name
# print(name)
parent = SeqIO.read(open(filename), "genbank")
for f in parent.features:
if f.type != "CDS":
continue
if "pseudo" in f.qualifiers:
continue
if "pseudogene" in f.qualifiers:
continue
nuc = f.extract(parent.seq)
protein_id = f.qualifiers["protein_id"][0]
gi = None
pro = nuc.translate(tables.get(name, 1))
if not (pro.endswith("*") and pro.count("*") == 1):
print("%s %s lacks stop codon" % (name, protein_id))
for xref in f.qualifiers["db_xref"]:
if xref.lower().startswith("gi:"):
gi = xref[3:]
break
if not (gi and protein_id):
print(f)
assert False
# Bit of a hack, we are using the protein's ID here!
record = SeqRecord(nuc, id="gi|%s|ref|%s" % (gi, protein_id),
description="; ".join(f.qualifiers.get("note", [])))
SeqIO.write([record], handle, "fasta")
count += 1
# print "%i: %i in %s" % (index+1, count, name)
handle.close()
print("Done")
print("%i genes" % count)
if os.path.isfile(nuc_nr):
print("Got %s" % nuc_nr)
else:
dedup(nuc_file, nuc_nr)
make_db(nuc_nr, protein=False)
|
[
"p.j.a.cock@googlemail.com"
] |
p.j.a.cock@googlemail.com
|
153deac2f217521b1526f12edc90ff24f03b488f
|
49aa1aa5eeb2a49d2f38a5c53344107eb339d606
|
/oddeven.py
|
ce9521e28d2212114745759842a2ce1ba2c67f5d
|
[
"MIT"
] |
permissive
|
ISE2012/ch5
|
7394fd5e6579a7ed22be4efa9bc960ccb70a1b56
|
cce470d418419bff0c9d37233755ee39e1362a27
|
refs/heads/main
| 2022-12-24T22:49:34.660291
| 2020-10-14T09:55:31
| 2020-10-14T09:55:31
| 303,972,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 14 07:14:51 2020
@author: ucobiz
"""
for num in range(1,11):
print(num)
if (num % 2): # will be 1 (True)
print("The number", num ,"is odd")
else: # divides cleanly into 2
print("The number", num,"is even")
|
[
"noreply@github.com"
] |
ISE2012.noreply@github.com
|
b381fca8415ae5b9a92e8edaee8fe9658b27599c
|
1af36fcd4ac2a303870f9602ba980abd58830be8
|
/user.py
|
5bcfb684ea6faed7b596d24cd4a9d889daa99ab7
|
[] |
no_license
|
arash-rasouli/SAD_Final_CA5
|
355b4cfa8f029fa5befb0b2aab28a0d78ab6e2cd
|
7075c75a5a8342c728df4c232f9a5db1ca5e88b9
|
refs/heads/master
| 2023-06-16T04:22:41.181240
| 2021-06-25T15:08:39
| 2021-06-25T15:08:39
| 380,273,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
from datetime import *
class User:
def __init__(self, userId, name):
self.name = name
self.userId = userId
|
[
"arash3908@gmail.com"
] |
arash3908@gmail.com
|
2c3fd44d91c0bcc6b6c08ec140620817fb6b324f
|
7cb20a102caca2d87395978d3daf2af19facfd8c
|
/SapFlask/models.py
|
cb3e1ba980b45ee6bee155fcd9d49255ed18477e
|
[] |
no_license
|
mecomontes/Flask
|
8426216fc0eccf55393f70779f0dcf8f5c940384
|
214b5f9592d0254b631cc51dd3e82a074b72b5ef
|
refs/heads/main
| 2023-06-18T09:26:15.466768
| 2021-07-15T22:32:19
| 2021-07-15T22:32:19
| 386,441,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
from app import db
class Persona(db.Model):
id = db.Column(db.Integer, primary_key=True)
nombre = db.Column(db.String(250))
apellido = db.Column(db.String(250))
email = db.Column(db.String(250))
def __str__(self):
return (
f'Id: {self.id}, '
f'Nombre: {self.nombre}, '
f'Apellido: {self.apellido}, '
f'Email: {self.email}'
)
|
[
"1574@holbertonschool.com"
] |
1574@holbertonschool.com
|
8a36a2e30de9202bf868ea60648ddcb795903c12
|
82bb063c2b5e70b0e2ab308b6446822352fadd74
|
/test/Akuna1.py
|
6d947efb21f029c35464f62b051b55e1990652fd
|
[] |
no_license
|
yaolinxia/leetcode_study
|
dfc0ff04eff0a4c2fd6efb20bf3525c7397461db
|
c2250f2c7365976a6767e3c12760474f7a6618eb
|
refs/heads/master
| 2021-04-03T08:32:10.782215
| 2020-09-10T23:39:39
| 2020-09-10T23:39:39
| 124,657,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,250
|
py
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
def find_minima(n, m):
# print(m[0][1])
# print(len(m[0]))
# row = len(m)
# col = len(m[0])
l = []
for i in range(0, n):
for j in range(0, n):
# 四个顶角考虑下
# if i == 0 and j == 0:
# if m[i][j] < m[i+1][j] and m[i][j]<m[i][j+1]:
# l.append(m[i][j])
# if i == n-1 and j == 0:
# if m[i][j] < m[i-1][j] and m[i][j]<m[i][j+1]:
# l.append(m[i][j])
# if i == 0 and j == n-1:
# if m[i][j] < m[i+1][j] and m[i][j]<m[i][j-1]:
# l.append(m[i][j])
# if i == n-1 and j == n-1:
# if m[i][j] < m[i-1][j] and m[i][j]<m[i][j-1]:
# l.append(m[i][j])
# if i == 0 and j != 0:
if (i==0 or m[i][j]<m[i-1][j]) \
and (i+1>n-1 or m[i][j] < m[i+1][j]) \
and (j==0 or m[i][j]<m[i][j-1]) \
and (j+1>n-1 or m[i][j] < m[i][j+1]):
l.append(float(m[i][j]))
l.sort()
print(l)
if __name__ == '__main__':
n = 3
m = [[1,2,3],
[4,1,6],
[7,0,9]]
find_minima(n, m)
|
[
"18860976931@163.com"
] |
18860976931@163.com
|
4e05a75483d1f170b9cea614ab19f774a8429930
|
305237c7969ddaa3c45fc155e61f976eb7cac167
|
/docs/conf.py
|
339b1e68d5af6549d6ed7b48fb0e975ba24b9088
|
[
"MIT"
] |
permissive
|
yuyang0809/led_tester
|
e6c437f76f9d1fca93e6532bb07c8f6ba4568137
|
b5a3c4e86a370ac71606f7aaecdea8fd6d5d7c64
|
refs/heads/master
| 2022-07-31T06:52:50.119277
| 2018-03-09T17:43:02
| 2018-03-09T17:43:02
| 123,455,000
| 0
| 0
|
MIT
| 2021-06-01T21:57:14
| 2018-03-01T15:37:53
|
Python
|
UTF-8
|
Python
| false
| false
| 9,146
|
py
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import inspect
import shutil
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(__location__, '../src'))
# -- Run sphinx-apidoc ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/assignment3_led")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
from distutils.version import LooseVersion
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
args = cmd_line.split(" ")
if LooseVersion(sphinx.__version__) >= LooseVersion('1.7'):
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.mathjax',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'assignment3_LED'
copyright = u'2018, yuyang0809'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from assignment3_led import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'assignment3_led-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'assignment3_LED Documentation',
u'yuyang0809', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://www.sphinx-doc.org/en/stable', None),
'python': ('https://docs.python.org/' + python_version, None),
'matplotlib': ('https://matplotlib.org', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
}
|
[
"yu.yang@ucdconnect.ie"
] |
yu.yang@ucdconnect.ie
|
82d4033851b3c6f9e9fdb3f26a0dc8847a06ff54
|
1feb1c79c7f05f3ec040eed1415beccf90435299
|
/Stats_Analysis/additional_analysis/bin_FSM_IDACI/helper_files.py
|
200376fb3f6e2eab5d8312dbab3b411fe98f9b7d
|
[] |
no_license
|
KeepFloyding/IP-research
|
5221b95f56d91472817568b15cdc7bb294647bc4
|
8b9ab874141b560c24ad56bd83e34b1a86020d31
|
refs/heads/master
| 2021-09-05T01:55:55.614248
| 2018-01-23T16:13:44
| 2018-01-23T16:13:44
| 110,840,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,896
|
py
|
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def test_correlation(df, x_array, y_array, corrType):
store_array = []
for item_x in x_array:
for item_y in y_array:
score, pval = corrType(df[item_x],df[item_y])
store_array.append([item_x, item_y, score,pval])
df = pd.DataFrame(store_array,columns=['x','y','Correlation','P Value'])
return df
# Function for univariate linear regression
# Inputs
# df: dataframe of values
# x_array: key names for dataframe
# y_array: key names for dataframe
# Outputs
# df_out: dataframe of key results
# fig: multiple subplots
def checkLinearFit(df,x_array,y_array,figsize=(10,10),n_cols=2,n_rows=2,alpha=0.2,log=[0,0]):
# Creating storage arrays
x_choose = []
y_choose = []
coef_array = [];
intercept_array = [];
R_array = []
plt.subplots(n_rows,n_cols,figsize=figsize)
# Cycling through each dependent variable
count = 1;
for item_x in x_array:
for item_y in y_array:
if item_y != item_x:
# Grabbing X and Y values
if log[0]=='yes':
X = np.log(df[item_x].values.reshape(-1,1) + 1)
else:
X = df[item_x].values.reshape(-1,1)
if log[1] == 'yes':
Y = np.log(df[item_y].values.reshape(-1,1) + 1)
else:
Y = df[item_y].values.reshape(-1,1)
# Training the model
clf = LinearRegression()
clf.fit(X,Y)
Y_pred = clf.predict(X)
# Storing important values in a dataframe
x_choose.append(item_x)
y_choose.append(item_y)
coef_array.append(clf.coef_[0][0])
intercept_array.append(clf.intercept_[0])
R_array.append(r2_score(Y, Y_pred))
# Plotting results
plt.subplot(n_rows,n_cols,count)
plt.scatter(X,Y,alpha=alpha)
plt.xlabel(item_x)
plt.ylabel(item_y)
plt.plot(X,Y_pred)
count += 1
# Storing results in a dataframe
df_out = pd.DataFrame({'X':x_choose,'Y':y_choose,'Coef':coef_array,'intercept':intercept_array,'R^2':R_array})
return df_out
# Functions that help with classifiers
"""
General function to test any classifier
Inputs include:
- X: values of a dataframe (should be scaled)
- y: list of labels
- clf_class: machine learning classifier
- n_fold: Number of kfolds to iterate over
- **kwargs: Number of additional parameters that are passed to the classifier
Outputs include:
- y_pred: array of predicted values for X that are taken from the kfolds
- df_score: dataframe containing accuracy, precision and recall for each Kfold iteration
"""
from sklearn.metrics import recall_score, precision_score, accuracy_score
from sklearn.model_selection import KFold, train_test_split
def test_classifier(X,y, clf_class,n_fold,**kwargs):
# Construct a kfolds object
kf = KFold(n_splits=n_fold,shuffle=True)
y_checks = np.zeros(len(y))
# Iterate through folds
score = [];
for train_index, test_index in kf.split(y):
# Training classifier
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
clf = clf_class(**kwargs)
clf.fit(X_train,y_train)
# Predicting values and testing
y_pred = clf.predict(X_test)
score.append([accuracy_score(y_test, y_pred),precision_score(y_test, y_pred),
recall_score(y_test,y_pred)])
# Predicted values from cross-validation
y_checks[test_index] = y_pred
df_score = pd.DataFrame(score, columns=['Accuracy', 'Precision','Recall'])
df_score.loc['mean'] = df_score.mean()
return df_score, y_checks, clf
"""
Function that trains a random forest classifier and returns the feature importance
Inputs are:
- X, y input variables
- n_estimators: number of trees for random forest classifier
- keys: feature labels
"""
from sklearn.ensemble import RandomForestClassifier as RF
def return_feature_importance(X,y,keys,n_estimators = 100):
# Using the random forest classifier, find out what are the main features that predict whether a user is likely to churn or not
randomForest = RF(n_estimators)
randomForest.fit(X,y)
importances = randomForest.feature_importances_
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]),keys[indices[f]])
# ----------------------------------------------------------------------------------------------------
# Binning and sensitivity test
# ----------------------------------------------------------------------------------------------------
from sklearn.feature_selection import f_classif
# Bin the schools into seperate categories
def bin_groups(df, feature, bins, group_names):
categories = pd.cut(df[feature],bins, labels=group_names)
return categories
# Sensitivity test
def sensitivity_on_bin(df, feature_to_bin, features_to_evaluate, bins, group_names, cut_off_array):
store= [];
group_0 = group_names[0]
group_1 = group_names[1]
group_2 = group_names[2]
for item in cut_off_array:
# Updating bins
bins[2] = item
# Binning the groups
df_test = df
df_test['categories'] = bin_groups(df_test,feature_to_bin, bins,group_names)
num_active = df_test['categories'].value_counts().loc[group_2]
df_test = df_test[df_test['categories']!= group_1]
# Performing an ANOVA test
X = df_test[features_to_evaluate]
y = [1 if item == group_2 else 0 for item in df_test['categories']]
F, pval = f_classif(X, y)
# Determining difference in means between active and inactive groups
mu_active_inactive = df_test[df_test['categories']==group_2].filter(regex='exam_improv').mean() - df_test[df_test['categories']==group_0][features_to_evaluate].mean()
# Storing the array as a Dataframe
df_score = pd.DataFrame({'Key':X.keys(),'F score':F,'p values':pval,'Cut off':np.ones(len(X.keys()))*item, 'Num active':num_active, 'Group_2 - Group_0':mu_active_inactive[X.keys()]})
store.append(df_score)
df_score = pd.concat(store)
return df_score
|
[
"andris.piebalgs@outlook.com"
] |
andris.piebalgs@outlook.com
|
143f894e3dc1b6349d616866a30f02042fedef89
|
1906c642d582a2a43a9627e9d46fa93c5bfd3e52
|
/data_generator.py
|
3a7e75b0486f58d7bd774fea80b3cf372a878d8f
|
[] |
no_license
|
conwayok/SelfHealth
|
c2af0c9f52231517716282b9c587b571be726f09
|
b7d964155ba390549310a79a27bf14da966e0f1d
|
refs/heads/master
| 2021-10-09T12:25:19.154736
| 2018-12-28T06:27:40
| 2018-12-28T06:27:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
import datetime
import random
days_to_generate = 3650
today = datetime.datetime.now().date()
file = open('selfHealth_%s.json' % today, 'w+')
file.write('[')
for i in range(days_to_generate):
file.write('{')
file.write('"dataDate":"%s"' % today)
file.write(',')
file.write('"hoursOfSleep":"%s"' % str(random.randint(2, 10)))
file.write(',')
file.write('"hoursPhoneUse":"%s"' % str(random.randint(1, 6)))
file.write(',')
file.write('"steps":"%s"' % str(random.randint(1000, 20000)))
file.write(',')
file.write('"userId":"id"')
file.write(',')
file.write('"waterCC":"%s"' % str(random.randint(500, 10000)))
file.write('}')
if i != days_to_generate - 1:
file.write(',')
today -= datetime.timedelta(days=1)
file.write(']')
file.close()
|
[
"kable2580@gmail.com"
] |
kable2580@gmail.com
|
de4f12747776cd7e0245e3b437a89aeec88d6a30
|
d92330be8ea281bdfefff5d17039b1a6d44057dc
|
/src/stiamro/google/analytics/browser.py
|
c692f160eb0d72abbc4950e0171977d888ce7702
|
[] |
no_license
|
avoinea/stiamro
|
2af6f2329abafb59b7e6b54abacb95c8f6b3d697
|
5ee6ec3b754a776cb87a9fa452e21cb2afbf38f9
|
refs/heads/master
| 2021-01-18T14:38:49.669592
| 2011-03-11T10:39:44
| 2011-03-11T10:39:44
| 1,397,488
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,893
|
py
|
""" Analytics browser pages
"""
import operator
import logging
from zope.component import getUtility, queryUtility
from content import AnalyticsReport
from interfaces import IAnalytics, IGoogleAnalyticsConnection, IAnalyticsReport, IXMLParser
from zope.publisher.browser import BrowserPage
logger = logging.getLogger('stiamro.google')
#
# Register service
#
class AnalyticsRegisterPage(BrowserPage):
""" Register token
"""
def _redirect(self, msg=""):
return msg
def __call__(self, **kwargs):
if self.request:
kwargs.update(self.request.form)
token = kwargs.get('token', '') or ''
utility = getUtility(IGoogleAnalyticsConnection)
# Reset tooken
if not token:
conn = utility(self.context.token)
response = conn.request(scope='/accounts/AuthSubRevokeToken')
self.context._token = token
if response:
return self._redirect('Token unregistered successfully')
else:
return self._redirect('Token removed, but you have to manually unregister it at '
'https://www.google.com/accounts/IssuedAuthSubTokens')
# Update token
conn = utility(token)
# Replace single call token with a session one
token = conn.token2session()
if not token:
return self._redirect(("An error occured during registration process. "
"Please check the log file"))
self.context._token = token
return self._redirect('Successfully registered with Google.')
class AnalyticsViewPage(BrowserPage):
""" View Google Analytics connection information
"""
@property
def status_error(self):
if not self.context.token:
return {
'status': 404,
'error': 'Not initialized'
}
utility = getUtility(IGoogleAnalyticsConnection)
conn = utility(self.context.token)
status, error = conn.status
return {
'status': status,
'error': error
}
def __call__(self, **kwargs):
return self.index()
class ReportViewPage(BrowserPage):
""" Index xml
"""
def __init__(self, context, request):
self.context = context
self.request = request
util = queryUtility(IAnalytics)
self.token = util and util.token or ''
def error_xml(self, query):
res = ['<?xml version="1.0" ?>']
res.append('<error>')
res.append('<query><![CDATA[%s]]></query>' % query)
res.append('</error>')
return '\n'.join(res)
def xml(self, **kwargs):
if self.request:
kwargs.update(self.request.form)
scope = '/analytics/feeds/data'
dimensions = ','.join(self.context.dimensions)
metrics = ','.join(self.context.metrics)
query = {
'ids': self.context.table,
'dimensions': dimensions,
'metrics': metrics,
'filters': self.context.filters,
'sort': self.context.sort,
'start-date': str(self.context.start_date),
'end-date': str(self.context.end_date),
'start-index': self.context.start_index,
'max-results': self.context.max_results,
}
# Filter None parameters
query = dict((key, value) for key, value in query.items() if value)
utility = getUtility(IGoogleAnalyticsConnection)
conn = utility(self.token)
response = conn.request(scope=scope, data=query, method='GET')
content_type = kwargs.get('content_type', 'text/xml')
if self.request and content_type:
self.request.response.setHeader('content-type', content_type)
if not response:
return self.error_xml(query)
return response.read()
def table(self, **kwargs):
""" Return a table generator
"""
parser = getUtility(IXMLParser)
return parser(self.xml(content_type=None))
def brains(self):
brains = {}
for dimenstions, metrics in self.table():
path = dimenstions.get('ga:pagePath', '')
views = metrics.get('ga:pageviews', 0)
if not (path or views):
continue
if path.startswith('/ajax'):
path = path.replace('/ajax', '', 1)
elif path.startswith('/exit'):
path = path.replace('/exit', '', 1)
brains.setdefault(path, 0)
brains[path] += int(views)
items = brains.items()
items.sort(key=operator.itemgetter(1), reverse=True)
for path, count in items:
yield path, count
def __call__(self, **kwargs):
for brain, count in self.brains():
print brain, count
return 'Done'
|
[
"alin@serenity.(none)"
] |
alin@serenity.(none)
|
487e0c33c65897ca4b50123a2888585b84e4b51b
|
20e97864e2330c386a8385bd12982cddb140a55d
|
/Motivator/main/urls.py
|
e1e7a1b81eaa39742a8b504b2a629f9bc5965f20
|
[] |
no_license
|
nnbzh/Motivator
|
6624d75c281118bfc23f8a5ed5350d23c8149a83
|
86a4bb4f08524ef54ef4d3f88935b46e3c816e58
|
refs/heads/main
| 2023-04-30T04:05:21.472519
| 2021-05-13T14:55:52
| 2021-05-13T14:55:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
from django.urls import path, include
from rest_framework_jwt.views import obtain_jwt_token
from main.views import ProfileViewSet
urlpatterns = [
path('profile/', ProfileViewSet.as_view({'get': 'list', 'put': 'nullify'})),
path('profile/<int:pk>/', ProfileViewSet.as_view({'get': 'profile_detail',
'put': 'update',
'delete': 'destroy'})),
path('profile/myprofile/', ProfileViewSet.as_view({'get': 'my_profile'}))
]
|
[
"zhumanrakhat01@gmail,com"
] |
zhumanrakhat01@gmail,com
|
09fde6504d7896ffd9a4db03c813edbf6647aac8
|
eb5783ab8a9fe673abe1caf66e7e55908d8c53fc
|
/image_holder.py
|
c60f1e2b3f306b93b9da5970853c662102cc26b6
|
[] |
no_license
|
romka-u/space-gravity
|
704bb4195cfdb6700d8948e7ad0a53008895b34e
|
2a30e24d8843e0a0fb2a82cd0a7af2d83ba1d06e
|
refs/heads/master
| 2021-01-23T15:42:38.361284
| 2012-08-13T08:48:34
| 2012-08-13T08:48:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
import pygame
import os
class ImageHolder(object):
def __init__(self):
self.spaceship = {"red": self.load("my_spaceship_red.png"),
"blue": self.load("my_spaceship_blue.png")}
planets_names = ["planet2.gif", "planet6.png",
"planet3.gif", "planet4.gif", "planet5.png"]
self.planets = [self.load(pl) for pl in planets_names]
def load(self, filename):
return pygame.image.load(os.path.join("img", filename))
|
[
"mailromka@gmail.com"
] |
mailromka@gmail.com
|
43ac35f1a33abcfc28501852aea767ff85e56cbf
|
c7800138a8486bfc76243e5c26d40fdc6980fa24
|
/rnns/mlp_dist/rnn_forecast.py
|
d1bb7668ed9637c84716d0ad7672cb09a1fed475
|
[] |
no_license
|
mlap/neon4cast-aquatics
|
fac5a9a8fd8db36cec864e36fb364d37e057a41c
|
9fdd15a5e074bd91d56bbcdec88e734af599bb54
|
refs/heads/master
| 2023-06-23T23:12:06.128009
| 2021-07-21T16:18:41
| 2021-07-21T16:18:41
| 332,877,284
| 0
| 1
| null | 2021-07-23T15:31:20
| 2021-01-25T20:40:14
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,983
|
py
|
import torch
from sklearn.preprocessing import MinMaxScaler
from utils import *
import argparse
# Argument parsing block; to get help on this from CL run `python tune_sb3.py -h`
parser = argparse.ArgumentParser()
parser.add_argument(
"--png-name",
type=str,
default="trash_forecast",
help="Name of the png that will be saved in `rnns/`",
)
parser.add_argument(
"--model-name",
type=str,
default="trash_model_dist",
help="Name of model to load from `models/`",
)
parser.add_argument(
"--start-date",
type=str,
default="2021-05-01",
help="Specify the YYYY-MM-DD that the forecast will start",
)
parser.add_argument(
"--end-date",
type=str,
default="2021-05-07",
help="Specify the YYYY-MM-DD that the forecast will end",
)
parser.add_argument(
"--predict-window",
type=int,
default=7,
help="How long of a forecast to make",
)
args = parser.parse_args()
def main():
params_etcs = load_etcs(args.model_name)
df = get_data(params_etcs["csv_name"])
variables = get_variables(params_etcs)
data = df[variables]
# Normalizing data to -1, 1 scale; this improves performance of neural nets
scaler = MinMaxScaler(feature_range=(-1, 1))
data_scaled = scaler.fit_transform(data)
condition_seq = create_sequence(
data_scaled[: -params_etcs["train_window"]],
params_etcs["train_window"],
)
# Indexing the appropriate data
evaluation_data = data_scaled[-params_etcs["train_window"] :]
# Evaluating the data
model = torch.load(f"models/{args.model_name}.pkl")
means, stds = evaluate(
evaluation_data, condition_seq, args, scaler, params_etcs, model
)
make_forecast(args, params_etcs, means, stds)
data_len = len(evaluation_data)
start_idx = data_len + 1
end_idx = data_len + 1 + args.predict_window
plot(evaluation_data, means, stds, args, params_etcs, start_idx, end_idx)
if __name__ == "__main__":
main()
|
[
"marcuslapeyrolerie@me.com"
] |
marcuslapeyrolerie@me.com
|
72eeb848f907ec2810966c49410de30a3fa39571
|
ce777e3af08dd7391ea9755647d5ad3b37ee5a0d
|
/kannada_digit_recognition.py
|
3bef5244144b5ae14aba47964bada2780f0da021
|
[] |
no_license
|
darshanja/kannada-MNIST
|
23c7d4be65baeac1fdf005fe113ed0eb79b0f69c
|
04cc3d3459782a1cd0df2a6f13fe070cb19f00d3
|
refs/heads/master
| 2021-01-04T00:26:21.896460
| 2020-02-13T16:04:32
| 2020-02-13T16:04:32
| 240,301,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,901
|
py
|
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.optimizers import Adam, RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
def init_model():
### Model Definition
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5, 5), padding='Valid', activation='relu', input_shape=(28, 28, 1)))
model.add(Conv2D(filters=32, kernel_size=(3, 3), padding='Same', activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(filters=64, kernel_size=(5, 5), padding='Valid', activation='relu'))
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding='Same', activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(519, activation="relu")) # [[521,0.9962,70],[519,0.9969,51]
model.add(Dropout(0.5))
model.add(Dense(10, activation="softmax"))
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=1e-3), metrics=["accuracy"])
annealer = ReduceLROnPlateau(monitor='val_acc', patience=1, verbose=2, factor=0.5, min_lr=0.0000001) #patience=2
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False) # , preprocessing_function=random_add_or_erase_spot)
return model, annealer, datagen
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import lib.model as md
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import accuracy_score, confusion_matrix
from keras.utils.np_utils import to_categorical
import time
start_time = time.time()
# Load the data
train = pd.read_csv("train.csv")
print(train.shape)
#Prepare dataset
y = train["label"]
X = train.drop("label", axis = 1)
print(y.value_counts().to_dict())
y = to_categorical(y, num_classes = 10)
del train
X = X / 255.0
X = X.values.reshape(-1,28,28,1)
# Shuffle Split Train and Test from original dataset
seed=2
train_index, valid_index = ShuffleSplit(n_splits=1,
train_size=0.9,
test_size=None,
random_state=seed).split(X).__next__()
x_train = X[train_index]
Y_train = y[train_index]
x_test = X[valid_index]
Y_test = y[valid_index]
# Parameters
epochs = 30
batch_size = 64
validation_steps = 10000
# initialize Model, Annealer and Datagen
model, annealer, datagen = md.init_model()
# Start training
train_generator = datagen.flow(x_train, Y_train, batch_size=batch_size)
test_generator = datagen.flow(x_test, Y_test, batch_size=batch_size)
history = model.fit_generator(train_generator,
steps_per_epoch=x_train.shape[0]//batch_size,
epochs=epochs,
validation_data=test_generator,
validation_steps=validation_steps//batch_size,
callbacks=[annealer])
score = model.evaluate(x_test, Y_test)
print('Test accuracy: ', score[1])
# Saving Model for future API
model.save('Digits-1.3.0.h5')
print("Saved model to disk")
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='lower right')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
## Model predict
test = pd.read_csv("./input/test.csv")
print(test.shape)
test = test / 255
test = test.values.reshape(-1, 28, 28, 1)
p = np.argmax(model.predict(test), axis=1)
print('Base model scores:')
valid_loss, valid_acc = model.evaluate(x_test, Y_test, verbose=0)
valid_p = np.argmax(model.predict(x_test), axis=1)
target = np.argmax(Y_test, axis=1)
cm = confusion_matrix(target, valid_p)
print(cm)
## Preparing file for submission
submission = pd.DataFrame(pd.Series(range(1, p.shape[0] + 1), name='ImageId'))
submission['Label'] = p
filename="keras-cnn-{0}.csv".format(str(int(score[1]*10000)))
submission.to_csv(filename, index=False)
elapsed_time = time.time() - start_time
print("Elapsed time: {0}".format(time.strftime("%H:%M:%S", time.gmtime(elapsed_time))))
|
[
"noreply@github.com"
] |
darshanja.noreply@github.com
|
3a3541f8ed55a568db3eb577e181365595254391
|
a2262c5abcd242bbcc8b37f16c815950113123a8
|
/src/mi/client/modules/actions/installrpm.py
|
7eed798f8ecdf86b0654cac44ff4aa30ac6f1c3c
|
[] |
no_license
|
MagicGroup/magicinstaller2
|
9945db3a0dc18cec395fb493492c93a561ad181f
|
7c543ca38343c44179eeaf5eaaebe178248971a3
|
refs/heads/master
| 2021-10-27T22:01:54.728592
| 2013-06-06T07:19:11
| 2013-06-06T07:19:11
| 2,591,373
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,984
|
py
|
import os, time
from mi.client.utils import logger, CF, _
'''
We use callback method to interact with tackactions.py(It is GUI of install action)
self.cb0_insert_next_disc Install package out of range, ask GUI to insert next disc.
self.cb0_install_pkg_err Install package error, ask GUI to retry or abort.
self.cb0_install_pkg_end Install package end, give GUI the package (disc No., package No., size) information.
self.cb0_install_end Install all packages complete, just notify GUI.
'''
class MiAction_InstallRpm():
def __init__(self, rootobj):
self.rootobj = rootobj
#self.actlist.append( (_('Install Package'), self.act_start_instpkg, self.act_end_instpkg) )
#(_('Setup Keyboard'), self.act_start_keyboard, None)]
self.add_action = self.rootobj.tm.add_action
self.cb0_insert_next_disc = self.rootobj.cb0_insert_next_disc
self.cb0_install_pkg_err = self.rootobj.cb0_install_pkg_err
self.cb0_install_pkg_end = self.rootobj.cb0_install_pkg_end
self.cb0_install_end = self.rootobj.cb0_install_end
self.cb0_fatal_err = self.rootobj.cb0_fatal_err
self.disc_first_pkgs = []
self.cur_disc_no = -1 # current installing disc number
self.totalsize = 0 # sum packages size.
self.totalpkg = 0 # packages count.
self.prepared = False
def prepare(self):
self.arch = self.rootobj.tm.actserver.get_arch()
logger.i('Detected Arch: %s\n' % str(self.arch))
self.archcompat_list = CF.G.arch_map[self.arch]
self._calc_instpkg_map()
self.prepared = True
def get_package_size(self):
if not self.prepared:
raise Exception('Not Prepared', 'Please call prepare first.')
return self.totalsize
def get_package_count(self):
if not self.prepared:
raise Exception('Not Prepared', 'Please call prepare first.')
return self.totalpkg
def _calc_instpkg_map(self):
'''
RPM: calculate self.totalpkg, self.totalsize
'''
archsize_map = CF.G.archsize_map
pkgpos_map = CF.G.pkgpos_map
toplevelgrp_map = CF.G.toplevelgrp_map
datanode = self.rootobj.srh_data_node(self.rootobj.values, 'package.choosedgroup')
choosed_list = []
self.totalsize = 0
self.install_allpkg = None
self.disc_map = {}
self.instpkg_map = {}
for selnode in datanode.getElementsByTagName('select'):
thisid = selnode.getAttribute('id')
if thisid == 'ALL':
self.install_allpkg = 1
self.totalsize = archsize_map[self.arch]
break
choosed_list.append(thisid)
if self.install_allpkg:
self.totalpkg = len(pkgpos_map.keys())
return
for grp in ['lock'] + choosed_list:
if not toplevelgrp_map.has_key(grp):
# Omit the invalid group name.
continue
for pkg in toplevelgrp_map[grp].keys():
if not self.instpkg_map.has_key(pkg):
(apkg, aarch, asize) = self.pkg2archpkg(pkg)
self.disc_map[pkgpos_map[pkg][0]] = 'y'
self.instpkg_map[pkg] = 'y'
self.totalsize = self.totalsize + asize
self.totalpkg = len(self.instpkg_map.keys())
def start(self):
for disc_no in range(len(CF.G.arrangement)):
self.disc_first_pkgs.append(CF.G.arrangement[disc_no][0][1])
(pafile, dev, fstype, reldir, bootiso_relpath) = CF.G.choosed_patuple
logger.i('disc_first_pkgs: %s\n' % str(self.disc_first_pkgs))
self.add_action(_('Search packages'),
self.act_install_prepare, None,
'probe_all_disc', dev, fstype, bootiso_relpath, reldir, self.disc_first_pkgs)
def act_install_prepare(self, tdata, data):
'''
invoke install_prep(server) to mount target system partitions.
'''
self.probe_all_disc_result = tdata
logger.i('probe_all_disc_result: %s\n' % str(self.probe_all_disc_result))
(pafile, dev, fstype, reldir, bootiso_relpath) = CF.G.choosed_patuple
self.add_action(None,
self.act_rpm_pre_install, 0,
'install_prep', (dev, fstype, bootiso_relpath, reldir),
CF.G.mount_all_list, # partition list [(mntpoint, devfn, fstype), ...] , be filled at partition step.
)
def act_rpm_pre_install(self, tdata, disc_no):
if tdata != 0:
### TODO: Only reboot, because install_prep can not mount target system partitions, so we can not install packages in it.
#### occur error, tdata is the error msg
self.cb0_fatal_err(str(tdata))
return
self.add_action(None,
self.act_install_disc_start, 0,
'rpm_pre_install', None)
def act_install_disc_start(self, tdata, disc_no):
'''
mount each ISO, and install it.
install_disc_prep(server) mount ISO.
'''
(pafile, dev, fstype, reldir, bootiso_relpath) = CF.G.choosed_patuple
while disc_no < len(CF.G.arrangement):
logger.i("act_install_disc_start probe_all_disc_result: %s" % self.probe_all_disc_result)
if not self.install_allpkg and not self.disc_map.has_key(disc_no):
# Skip the disc which is not needed.
disc_no = disc_no + 1
continue
if disc_no >= len(self.probe_all_disc_result) \
or not self.probe_all_disc_result[disc_no]: ## TODO multi disc will always have only one probe_all_disc_result. please fix it.
# we known have next disc in pkgarr.py file (CF.G.arrangement is the variable in pkgarr.py)
# but there can not find next disc with disc_no in probe_all_disc_result. so we think it should have a next disc, and ask user to insert it.
self.cb0_insert_next_disc(disc_no + 1, self.retry_clicked, self.abort_clicked) ## This call back will active next install action.
return
self.cur_disc_no = disc_no
bootiso_relpath = self.probe_all_disc_result[disc_no][0]
self.add_action(None,
self.act_install_pkg_start, (disc_no, 0),
'install_disc_prep', dev, fstype, bootiso_relpath, reldir)
# add install current disc action, return now, and wait next disc action after current disc install action finished.
return
### Install all disc finished
self.add_action(_('Last operations for package installation'),
self.nextop, None,
'rpm_post_install', None)
def act_install_pkg_start(self, tdata, data):
if tdata != 0:
### TODO install_disc_prep ERROR, because package source on disc and can not access, error should exit.
pass
(disc_no, pkg_no) = data
while pkg_no < len(CF.G.arrangement[disc_no]):
pkgtuple = CF.G.arrangement[disc_no][pkg_no]
noscripts = pkgtuple[6]
if self.install_allpkg or self.instpkg_map.has_key(pkgtuple[1]):
archpkg = self.pkg2archpkg(pkgtuple[1])
if not archpkg:
data = (disc_no, pkg_no, 0, False)
msg = _("Target System Arch %s is not Compatible with package arch %s" % (self.arch, pkgtuple[1]))
self.cb0_install_pkg_err(msg, self.rpmerr_retry_clicked, self.rpmerr_skip_clicked, data)
return
(apkg, aarch, asize) = archpkg
apkg = os.path.basename(apkg)
# Debug
# self.add_action(apkg, self.act_install_pkg_end, (disc_no, pkg_no, asize, False),
# 'sleep', 0)
self.add_action(apkg,
self.act_install_pkg_end, (disc_no, pkg_no, asize, False),
'rpm_install_pkg', apkg, self.probe_all_disc_result[disc_no][1], noscripts)
return
pkg_no = pkg_no + 1
(pafile, dev, fstype, reldir, bootiso_relpath) = CF.G.choosed_patuple
# Install current disc finished, add action to start next disc installation.
self.add_action(None,
self.act_install_disc_start, disc_no + 1,
'install_disc_post', dev, fstype, self.probe_all_disc_result[disc_no][0], reldir)
def rpmerr_retry_clicked(self, data):
(disc_no, pkg_no, asize, is_skip) = data
self.act_install_pkg_start(-1, (disc_no, pkg_no))
def rpmerr_skip_clicked(self, data):
(disc_no, pkg_no, asize, is_skip) = data
self.act_install_pkg_end(-1, (disc_no, pkg_no, asize, True))
def act_install_pkg_end(self, tdata, data):
#--- FIXME ---
# It doesn't check the result of package_install now, but it should...
# It should popup an dialog to let the user choose 'continue' or
# 'abandon' the installation.
(disc_no, pkg_no, asize, is_skip) = data
if not is_skip and tdata != 0: # This package can not skip and have an install error.
self.cb0_install_pkg_err(tdata, self.rpmerr_retry_clicked, self.rpmerr_skip_clicked, data)
return
if self.cb0_install_pkg_end: # tell GUI to calculate package size
self.cb0_install_pkg_end(disc_no, pkg_no, asize)
self.act_install_pkg_start(tdata, (disc_no, pkg_no + 1))
def act_install_end(self, tdata, data):
'''
will be invoked at the end of installation.
'''
if self.cb0_install_end:
self.cb0_install_end()
def nextop(self, tdata, data):
(pafile, dev, fstype, reldir, bootiso_relpath) = CF.G.choosed_patuple
self.add_action(None, self.act_install_end, None,
"install_post", (dev, fstype, bootiso_relpath, reldir), CF.G.mount_all_list)
###################### ################################################################ ################
def pkg2archpkg(self, pkg):
(disc_no, pkg_no) = CF.G.pkgpos_map[pkg]
pkgarchmap = {}
pkgpathes = CF.G.arrangement[disc_no][pkg_no][4] # pkgpublic.pathes == 4.
for (apkg, aarch, asize) in pkgpathes:
if aarch == 'noarch':
return (apkg, aarch, asize)
pkgarchmap[aarch] = (apkg, aarch, asize)
for a in self.archcompat_list:
if pkgarchmap.has_key(a):
return pkgarchmap[a]
# It is an error to reach here!!!
logger.e('Unresolved package: pkg = %s, pkgarchmap = %s, archcompat_list = %s\n' % (pkg, str(pkgarchmap), str(self.archcompat_list)))
############################ Graphic Button Action ###################################
def retry_clicked(self):
'''
This is the DiscDialog retry button click callback.
DiscDialog alert indicate that package installation pause, we should active it by reprobe disc.(If user has changed it)
'''
(pafile, dev, fstype, reldir, bootiso_relpath) = CF.G.choosed_patuple
self.add_action(_('Researching packages...'),
self.retry_install, None,
'probe_all_disc', dev, fstype, bootiso_relpath, reldir, self.disc_first_pkgs)
def retry_install(self, tdata, data):
self.probe_all_disc_result = tdata
self.act_install_disc_start(None, self.cur_disc_no + 1)
# def reprobe_all_disc_0(self, tdata, data):
# (pafile, dev, fstype, reldir, bootiso_relpath) = CF.G.choosed_patuple
# self.add_action(_('Researching packages...'),
# self.reprobe_all_disc_1, None,
# 'probe_all_disc', dev, fstype, bootiso_relpath, reldir, self.disc_first_pkgs)
# def reprobe_all_disc_1(self, tdata, data):
# self.probe_all_disc_result = tdata
# logger.i('probe_all_disc_result: %s\n' % str(self.probe_all_disc_result))
# (pafile, dev, fstype, reldir, bootiso_relpath) = CF.G.choosed_patuple
# self.add_action(_('Researching packages...'),
# self.act_install_disc_start, self.cur_disc_no + 1,
# 'install_prep', (dev, fstype, bootiso_relpath, reldir), CF.G.mount_all_list)
def abort_clicked(self):
self.add_action(_('Aborting...'),
self.nextop, None,
'rpm_post_install', None)
|
[
"zy.netsec@gmail.com"
] |
zy.netsec@gmail.com
|
d3846f6490834dd0e965029972e889ece8282130
|
2501e59588cb38f00c0e1ef4c99936e1a809e4af
|
/venv/Scripts/runxlrd.py
|
7aeed4004a3b6a8630e3787ad48138d19ebb57f7
|
[] |
no_license
|
jasnPackage/ChenweiManager
|
97cebc5e268a490fbb11bdcf830f2fc6081d3f18
|
8dfb5decf1891374395210475148daa07755f8d9
|
refs/heads/master
| 2022-12-03T17:14:47.202809
| 2020-08-25T09:20:46
| 2020-08-25T09:20:46
| 290,368,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,346
|
py
|
#!C:\Users\Administrator\PycharmProjects\zhandian_jiekou\venv\Scripts\python.exe
# Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd
# This script is part of the xlrd package, which is released under a
# BSD-style licence.
from __future__ import print_function
cmd_doc = """
Commands:
2rows Print the contents of first and last row in each sheet
3rows Print the contents of first, second and last row in each sheet
bench Same as "show", but doesn't print -- for profiling
biff_count[1] Print a count of each type of BIFF record in the file
biff_dump[1] Print a dump (char and hex) of the BIFF records in the file
fonts hdr + print a dump of all font objects
hdr Mini-overview of file (no per-sheet information)
hotshot Do a hotshot profile run e.g. ... -f1 hotshot bench bigfile*.xls
labels Dump of sheet.col_label_ranges and ...row... for each sheet
name_dump Dump of each object in book.name_obj_list
names Print brief information for each NAME record
ov Overview of file
profile Like "hotshot", but uses cProfile
show Print the contents of all rows in each sheet
version[0] Print versions of xlrd and Python and exit
xfc Print "XF counts" and cell-type counts -- see code for details
[0] means no file arg
[1] means only one file arg i.e. no glob.glob pattern
"""
options = None
if __name__ == "__main__":
PSYCO = 0
import xlrd
import sys
import time
import glob
import traceback
import gc
from xlrd.timemachine import xrange, REPR
class LogHandler(object):
def __init__(self, logfileobj):
self.logfileobj = logfileobj
self.fileheading = None
self.shown = 0
def setfileheading(self, fileheading):
self.fileheading = fileheading
self.shown = 0
def write(self, text):
if self.fileheading and not self.shown:
self.logfileobj.write(self.fileheading)
self.shown = 1
self.logfileobj.write(text)
null_cell = xlrd.empty_cell
def show_row(bk, sh, rowx, colrange, printit):
if bk.ragged_rows:
colrange = range(sh.row_len(rowx))
if not colrange: return
if printit: print()
if bk.formatting_info:
for colx, ty, val, cxfx in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r, xfx: %s"
% (xlrd.colname(colx), rowx+1, ty, val, cxfx))
else:
for colx, ty, val, _unused in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r" % (xlrd.colname(colx), rowx+1, ty, val))
def get_row_data(bk, sh, rowx, colrange):
result = []
dmode = bk.datemode
ctys = sh.row_types(rowx)
cvals = sh.row_values(rowx)
for colx in colrange:
cty = ctys[colx]
cval = cvals[colx]
if bk.formatting_info:
cxfx = str(sh.cell_xf_index(rowx, colx))
else:
cxfx = ''
if cty == xlrd.XL_CELL_DATE:
try:
showval = xlrd.xldate_as_tuple(cval, dmode)
except xlrd.XLDateError as e:
showval = "%s:%s" % (type(e).__name__, e)
cty = xlrd.XL_CELL_ERROR
elif cty == xlrd.XL_CELL_ERROR:
showval = xlrd.error_text_from_code.get(cval, '<Unknown error code 0x%02x>' % cval)
else:
showval = cval
result.append((colx, cty, showval, cxfx))
return result
def bk_header(bk):
print()
print("BIFF version: %s; datemode: %s"
% (xlrd.biff_text_from_num[bk.biff_version], bk.datemode))
print("codepage: %r (encoding: %s); countries: %r"
% (bk.codepage, bk.encoding, bk.countries))
print("Last saved by: %r" % bk.user_name)
print("Number of data sheets: %d" % bk.nsheets)
print("Use mmap: %d; Formatting: %d; On demand: %d"
% (bk.use_mmap, bk.formatting_info, bk.on_demand))
print("Ragged rows: %d" % bk.ragged_rows)
if bk.formatting_info:
print("FORMATs: %d, FONTs: %d, XFs: %d"
% (len(bk.format_list), len(bk.font_list), len(bk.xf_list)))
if not options.suppress_timing:
print("Load time: %.2f seconds (stage 1) %.2f seconds (stage 2)"
% (bk.load_time_stage_1, bk.load_time_stage_2))
print()
def show_fonts(bk):
print("Fonts:")
for x in xrange(len(bk.font_list)):
font = bk.font_list[x]
font.dump(header='== Index %d ==' % x, indent=4)
def show_names(bk, dump=0):
bk_header(bk)
if bk.biff_version < 50:
print("Names not extracted in this BIFF version")
return
nlist = bk.name_obj_list
print("Name list: %d entries" % len(nlist))
for nobj in nlist:
if dump:
nobj.dump(sys.stdout,
header="\n=== Dump of name_obj_list[%d] ===" % nobj.name_index)
else:
print("[%d]\tName:%r macro:%r scope:%d\n\tresult:%r\n"
% (nobj.name_index, nobj.name, nobj.macro, nobj.scope, nobj.result))
def print_labels(sh, labs, title):
if not labs:return
for rlo, rhi, clo, chi in labs:
print("%s label range %s:%s contains:"
% (title, xlrd.cellname(rlo, clo), xlrd.cellname(rhi-1, chi-1)))
for rx in xrange(rlo, rhi):
for cx in xrange(clo, chi):
print(" %s: %r" % (xlrd.cellname(rx, cx), sh.cell_value(rx, cx)))
def show_labels(bk):
# bk_header(bk)
hdr = 0
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
clabs = sh.col_label_ranges
rlabs = sh.row_label_ranges
if clabs or rlabs:
if not hdr:
bk_header(bk)
hdr = 1
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
print_labels(sh, clabs, 'Col')
print_labels(sh, rlabs, 'Row')
if bk.on_demand: bk.unload_sheet(shx)
def show(bk, nshow=65535, printit=1):
bk_header(bk)
if 0:
rclist = xlrd.sheet.rc_stats.items()
rclist = sorted(rclist)
print("rc stats")
for k, v in rclist:
print("0x%04x %7d" % (k, v))
if options.onesheet:
try:
shx = int(options.onesheet)
except ValueError:
shx = bk.sheet_by_name(options.onesheet).number
shxrange = [shx]
else:
shxrange = range(bk.nsheets)
# print("shxrange", list(shxrange))
for shx in shxrange:
sh = bk.sheet_by_index(shx)
nrows, ncols = sh.nrows, sh.ncols
colrange = range(ncols)
anshow = min(nshow, nrows)
print("sheet %d: name = %s; nrows = %d; ncols = %d" %
(shx, REPR(sh.name), sh.nrows, sh.ncols))
if nrows and ncols:
# Beat the bounds
for rowx in xrange(nrows):
nc = sh.row_len(rowx)
if nc:
sh.row_types(rowx)[nc-1]
sh.row_values(rowx)[nc-1]
sh.cell(rowx, nc-1)
for rowx in xrange(anshow-1):
if not printit and rowx % 10000 == 1 and rowx > 1:
print("done %d rows" % (rowx-1,))
show_row(bk, sh, rowx, colrange, printit)
if anshow and nrows:
show_row(bk, sh, nrows-1, colrange, printit)
print()
if bk.on_demand: bk.unload_sheet(shx)
def count_xfs(bk):
bk_header(bk)
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
nrows = sh.nrows
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
# Access all xfindexes to force gathering stats
type_stats = [0, 0, 0, 0, 0, 0, 0]
for rowx in xrange(nrows):
for colx in xrange(sh.row_len(rowx)):
xfx = sh.cell_xf_index(rowx, colx)
assert xfx >= 0
cty = sh.cell_type(rowx, colx)
type_stats[cty] += 1
print("XF stats", sh._xf_index_stats)
print("type stats", type_stats)
print()
if bk.on_demand: bk.unload_sheet(shx)
def main(cmd_args):
import optparse
global options, PSYCO
usage = "\n%prog [options] command [input-file-patterns]\n" + cmd_doc
oparser = optparse.OptionParser(usage)
oparser.add_option(
"-l", "--logfilename",
default="",
help="contains error messages")
oparser.add_option(
"-v", "--verbosity",
type="int", default=0,
help="level of information and diagnostics provided")
oparser.add_option(
"-m", "--mmap",
type="int", default=-1,
help="1: use mmap; 0: don't use mmap; -1: accept heuristic")
oparser.add_option(
"-e", "--encoding",
default="",
help="encoding override")
oparser.add_option(
"-f", "--formatting",
type="int", default=0,
help="0 (default): no fmt info\n"
"1: fmt info (all cells)\n",
)
oparser.add_option(
"-g", "--gc",
type="int", default=0,
help="0: auto gc enabled; 1: auto gc disabled, manual collect after each file; 2: no gc")
oparser.add_option(
"-s", "--onesheet",
default="",
help="restrict output to this sheet (name or index)")
oparser.add_option(
"-u", "--unnumbered",
action="store_true", default=0,
help="omit line numbers or offsets in biff_dump")
oparser.add_option(
"-d", "--on-demand",
action="store_true", default=0,
help="load sheets on demand instead of all at once")
oparser.add_option(
"-t", "--suppress-timing",
action="store_true", default=0,
help="don't print timings (diffs are less messy)")
oparser.add_option(
"-r", "--ragged-rows",
action="store_true", default=0,
help="open_workbook(..., ragged_rows=True)")
options, args = oparser.parse_args(cmd_args)
if len(args) == 1 and args[0] in ("version", ):
pass
elif len(args) < 2:
oparser.error("Expected at least 2 args, found %d" % len(args))
cmd = args[0]
xlrd_version = getattr(xlrd, "__VERSION__", "unknown; before 0.5")
if cmd == 'biff_dump':
xlrd.dump(args[1], unnumbered=options.unnumbered)
sys.exit(0)
if cmd == 'biff_count':
xlrd.count_records(args[1])
sys.exit(0)
if cmd == 'version':
print("xlrd: %s, from %s" % (xlrd_version, xlrd.__file__))
print("Python:", sys.version)
sys.exit(0)
if options.logfilename:
logfile = LogHandler(open(options.logfilename, 'w'))
else:
logfile = sys.stdout
mmap_opt = options.mmap
mmap_arg = xlrd.USE_MMAP
if mmap_opt in (1, 0):
mmap_arg = mmap_opt
elif mmap_opt != -1:
print('Unexpected value (%r) for mmap option -- assuming default' % mmap_opt)
fmt_opt = options.formatting | (cmd in ('xfc', ))
gc_mode = options.gc
if gc_mode:
gc.disable()
for pattern in args[1:]:
for fname in glob.glob(pattern):
print("\n=== File: %s ===" % fname)
if logfile != sys.stdout:
logfile.setfileheading("\n=== File: %s ===\n" % fname)
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC before open:", n_unreachable, "unreachable objects")
if PSYCO:
import psyco
psyco.full()
PSYCO = 0
try:
t0 = time.time()
bk = xlrd.open_workbook(
fname,
verbosity=options.verbosity, logfile=logfile,
use_mmap=mmap_arg,
encoding_override=options.encoding,
formatting_info=fmt_opt,
on_demand=options.on_demand,
ragged_rows=options.ragged_rows,
)
t1 = time.time()
if not options.suppress_timing:
print("Open took %.2f seconds" % (t1-t0,))
except xlrd.XLRDError as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
continue
except KeyboardInterrupt:
print("*** KeyboardInterrupt ***")
traceback.print_exc(file=sys.stdout)
sys.exit(1)
except BaseException as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
traceback.print_exc(file=sys.stdout)
continue
t0 = time.time()
if cmd == 'hdr':
bk_header(bk)
elif cmd == 'ov': # OverView
show(bk, 0)
elif cmd == 'show': # all rows
show(bk)
elif cmd == '2rows': # first row and last row
show(bk, 2)
elif cmd == '3rows': # first row, 2nd row and last row
show(bk, 3)
elif cmd == 'bench':
show(bk, printit=0)
elif cmd == 'fonts':
bk_header(bk)
show_fonts(bk)
elif cmd == 'names': # named reference list
show_names(bk)
elif cmd == 'name_dump': # named reference list
show_names(bk, dump=1)
elif cmd == 'labels':
show_labels(bk)
elif cmd == 'xfc':
count_xfs(bk)
else:
print("*** Unknown command <%s>" % cmd)
sys.exit(1)
del bk
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC post cmd:", fname, "->", n_unreachable, "unreachable objects")
if not options.suppress_timing:
t1 = time.time()
print("\ncommand took %.2f seconds\n" % (t1-t0,))
return None
av = sys.argv[1:]
if not av:
main(av)
firstarg = av[0].lower()
if firstarg == "hotshot":
import hotshot
import hotshot.stats
av = av[1:]
prof_log_name = "XXXX.prof"
prof = hotshot.Profile(prof_log_name)
# benchtime, result = prof.runcall(main, *av)
result = prof.runcall(main, *(av, ))
print("result", repr(result))
prof.close()
stats = hotshot.stats.load(prof_log_name)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
elif firstarg == "profile":
import cProfile
av = av[1:]
cProfile.run('main(av)', 'YYYY.prof')
import pstats
p = pstats.Stats('YYYY.prof')
p.strip_dirs().sort_stats('cumulative').print_stats(30)
elif firstarg == "psyco":
PSYCO = 1
main(av[1:])
else:
main(av)
|
[
"amazinglala@163.com"
] |
amazinglala@163.com
|
f2c9f6c41b7100c977f20edceab2a62490d65917
|
a8f222e132fb4d235d2e52069a0bd8820b0a69ef
|
/flaskapp.py
|
e69e8dbd10be63a8fa06c9669d8c281e78a22490
|
[] |
no_license
|
virtis16/Data_visulaization_AWS_RDS
|
b1227d452623afbe3e53e2a3f1d3a36762545ec4
|
d0c3ba8418d3c5cfceef96c3f69ed0ba300b1de8
|
refs/heads/master
| 2021-04-09T11:30:58.245135
| 2018-10-09T05:11:09
| 2018-10-09T05:11:09
| 125,573,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,760
|
py
|
import os, re, time, memcache
from flask import Flask, render_template, request, redirect, session
from random import randint
from datetime import datetime
import sys, csv
import pymysql
ACCESS_KEY_ID = '######'
ACCESS_SECRET_KEY = '##############'
BUCKET_NAME = '####'
hostname = '####################3'
username = '#####'
password = '#######'
database = '#####'
Conn = pymysql.connect( host=hostname, user=username, passwd=password, db=database, autocommit = True, cursorclass=pymysql.cursors.DictCursor, local_infile=True)
application = Flask(__name__)
app = application
@app.route("/")
def hello():
return render_template("file1.html")
@app.route('/plotbarchart', methods=['POST'])
def plotbarchart():
variable = request.form['limit']
query1 = "select count(county),state from USZipcodes group by state having count(distinct county) <"+str(variable)
result1 = []
with Conn.cursor() as curs:
curs.execute(query1)
for row in curs:
result1.append(row)
x1 = [x['count(distinct county)'] for x in result1]
x2 = [x['state'] for x in result1]
result2 = []
print x1
print x2
print result2
for p in x2:
result2.append(p)
print(result2)
return render_template("index.html",zipped_data= x1,x2=result2)
@app.route('/plothorizontalchart', methods=['POST'])
def plothorizontalchart():
variable = request.form['limit']
query1 = "############"
result1 = []
with Conn.cursor() as curs:
curs.execute(query1)
for row in curs:
result1.append(row)
x1 = [x['count(distinct county)'] for x in result1]
x2 = [x['state'] for x in result1]
result2 = []
print x1
print x2
print result2
for p in x2:
result2.append(p)
print(result2)
return render_template("multiBarHorizontalChart.html",zipped_data= x1,x2=result2)
@app.route('/plotpiechart', methods=['POST'])
def plotpiechart():
variable = request.form['limit']
query1 = "##########"
result1 = []
with Conn.cursor() as curs:
curs.execute(query1)
for row in curs:
result1.append(row)
x1 = [x['count(distinct county)'] for x in result1]
x2 = [x['state'] for x in result1]
result2 = []
print x1
print x2
print result2
for p in x2:
result2.append(p)
return render_template("PieChart.html",zipped_data= x1,x2=result2)
# Adding extra functions
@app.route('/createtablevoting', methods=['POST'])
def createtablevoting():
cursor = Conn.cursor()
file_name2 = 'StateVotingClean.csv'
# file_name = 'home/ubuntu/flaskapp/data.csv'
droptbl = "DROP TABLE IF EXISTS vbsdatabase.StateVotingClean;"
cursor.execute(droptbl)
with open(file_name2, 'rb') as csvfile:
reader = csv.reader(csvfile, quotechar='`')
headers = reader.next()
print len(headers)
start_time = time.time()
print 'ttt'
sqlcreate = "create table if not exists StateVotingClean("
for i in range(0, len(headers)):
sqlcreate += headers[i] + " varchar(100),"
sqlcreate += "Idautonum int AUTO_INCREMENT PRIMARY KEY)"
cursor.execute(sqlcreate)
print sqlcreate
uploadqry = """LOAD DATA LOCAL INFILE 'StateVotingClean.csv'
INTO TABLE StateVotingClean FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' LINES TERMINATED BY '\r' IGNORE 1 ROWS;"""
cursor.execute(uploadqry)
Conn.commit()
print uploadqry
end_time = time.time()
time_diff = end_time - start_time
print time_diff
cursor.close()
return render_template('file1.html', crteducation=time_diff)
@app.route('/query1', methods=['POST'])
def query1():
State = request.form['State']
q1 = "#######"
qq1 = "#######"
print (q1)
print (qq1)
with Conn.cursor() as curs:
curs.execute(q1)
rows = curs.fetchall()
curs.execute(qq1)
res = curs.fetchall()
curs.close()
return render_template('file1.html', answer1=rows, answer2=res[0]['count(*)'])
@app.route('/query4', methods=['GET', 'POST'])
def query4():
logi1 = request.form['val1']
logi2 = request.form['val2']
lati1 = request.form['val3']
lati2 = request.form['val4']
locquery = "########"
print (locquery)
starttime = time.time()
result1=[]
with Conn.cursor() as cursor:
cursor.execute(locquery)
for row in cursor:
result1.append(row)
x1 = [x['count(*)'] for x in result1]
x2 = [x['longitude'] for x in result1]
x3 = [x['latitude'] for x in result1]
endtime = time.time()
totalsqltime = endtime - starttime
result2=[]
result3=[]
for p in x2:
result2.append(p)
for p1 in x3:
result3.append(p1)
return render_template('lineChart.html',zipped_data= x1,x2=result2,x3=result3)
if __name__ == '__main__':
app.run(host='127.0.0.1', debug=True)
|
[
"virtibipin.sanghavi@mavs.uta.edu"
] |
virtibipin.sanghavi@mavs.uta.edu
|
a02b1b4faab9a1399cfe1621d15f118c5bbd2a1d
|
8cd06b330687331cc03974a0d6477b303e2def7b
|
/backend/todo/admin.py
|
e26b1b9def871eaf41554b7ec882bc560597adcb
|
[] |
no_license
|
justinformentin/fullstack-base
|
b0c0dcfc45f6e7843ff8d53f06e8a542a058f270
|
3407a86552fb311ece9114ea83a485add190ca68
|
refs/heads/master
| 2023-01-05T09:23:17.228075
| 2019-11-30T23:04:24
| 2019-11-30T23:04:24
| 224,877,445
| 1
| 1
| null | 2023-01-04T13:25:08
| 2019-11-29T15:18:50
|
Python
|
UTF-8
|
Python
| false
| false
| 728
|
py
|
# todo/admin.py
from django.contrib import admin
from .models import (Todo, Customer, Snippet)
class TodoAdmin(admin.ModelAdmin):
list_display = ('title', 'description', 'completed')
class CustomerAdmin(admin.ModelAdmin):
list_display = (
'first_name',
'last_name',
'email',
'phone',
'address',
'description',
'created_at'
)
class SnippetAdmin(admin.ModelAdmin):
list_display = (
'created',
'title',
'code',
'linenos',
'language',
'style',
'owner',
'highlighted'
)
# Register your models here.
admin.site.register(Todo, TodoAdmin)
admin.site.register(Customer, CustomerAdmin)
admin.site.register(Snippet, SnippetAdmin)
|
[
"justin.formentin@gmail.com"
] |
justin.formentin@gmail.com
|
7e02f53f868875096ec81111f8ae33d6515129ed
|
4f9257cc5fc94d6947373ece3921a9a34ff60cb2
|
/pkg/query/qry.py
|
04302d465ab4f57a106e1c089520d63ef89396fe
|
[] |
no_license
|
harpreet1302/testing533
|
6b09465872a5d799682ea627866c8fdf11296d23
|
526ce400e16c6e234264c1a95a1d889117abe25d
|
refs/heads/master
| 2020-04-11T02:10:46.905308
| 2018-12-12T05:45:56
| 2018-12-12T05:45:56
| 161,437,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
# coding: utf-8
# In[ ]:
import pandas as pd
def count(df):
try :
if len(df) != 0 :
return (len(df))
except :
print("Data Frame is empty")
def fltr(df,agel,ageu,sall,salu):
try :
if len(df) != 0 :
return df[(df.age > int(agel)) & (df.age < int(ageu)) & (df.salary > int(sall)) & (df.salary < int(salu))]
else :
print("Data Frame is empty")
except :
print("Please verify parameters being transfered")
|
[
"harpreet1302"
] |
harpreet1302
|
312fe181545ad7384059df1ba7cad7856e2da5e4
|
f6ae370247bc72c6101a6b150ad2ba90880c0c66
|
/assignment3/cs231n/rnn_layers.py
|
88022784b12680f2b1cfaf76bfd0e18108d0ba4c
|
[] |
no_license
|
douyh/cs231n_assignment
|
60a75b8a8fd5282a521569c816ef530391aa55ec
|
c490d85cd5d4470d819c6cde2acfc52185268b43
|
refs/heads/master
| 2021-09-04T00:51:40.180435
| 2018-01-13T16:09:29
| 2018-01-13T16:09:29
| 115,477,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,848
|
py
|
from __future__ import print_function, division
from builtins import range
import numpy as np
"""
This file defines layer types that are commonly used for recurrent neural
networks.
"""
def rnn_step_forward(x, prev_h, Wx, Wh, b):
"""
Run the forward pass for a single timestep of a vanilla RNN that uses a tanh
activation function.
The input data has dimension D, the hidden state has dimension H, and we use
a minibatch size of N.
Inputs:
- x: Input data for this timestep, of shape (N, D).
- prev_h: Hidden state from previous timestep, of shape (N, H)
- Wx: Weight matrix for input-to-hidden connections, of shape (D, H)
- Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)
- b: Biases of shape (H,)
Returns a tuple of:
- next_h: Next hidden state, of shape (N, H)
- cache: Tuple of values needed for the backward pass.
"""
next_h, cache = None, None
##############################################################################
# TODO: [get]Implement a single forward step for the vanilla RNN. Store the next #
# hidden state and any values you need for the backward pass in the next_h #
# and cache variables respectively. #
##############################################################################
next_h = np.tanh(np.dot(x, Wx) + np.dot(prev_h, Wh) + b)
cache = Wx, Wh, next_h, prev_h, x#cache
##############################################################################
# END OF YOUR CODE #
##############################################################################
return next_h, cache
def rnn_step_backward(dnext_h, cache):
"""
Backward pass for a single timestep of a vanilla RNN.
Inputs:
- dnext_h: Gradient of loss with respect to next hidden state
- cache: Cache object from the forward pass
Returns a tuple of:
- dx: Gradients of input data, of shape (N, D)
- dprev_h: Gradients of previous hidden state, of shape (N, H)
- dWx: Gradients of input-to-hidden weights, of shape (D, H)
- dWh: Gradients of hidden-to-hidden weights, of shape (H, H)
- db: Gradients of bias vector, of shape (H,)
"""
dx, dprev_h, dWx, dWh, db = None, None, None, None, None
##############################################################################
# TODO: [get]Implement the backward pass for a single step of a vanilla RNN. #
# #
# HINT: For the tanh function, you can compute the local derivative in terms #
# of the output value from tanh. #
##############################################################################
Wx, Wh, next_h, prev_h, x = cache
dtanh = dnext_h * (1 - next_h ** 2)# if y = tanh(x) then y' = 1 - y * y next_h(N,H)
dWx = np.dot(x.T, dtanh)#(D,H) x(N,D) dtanh(N, H)
dx = np.dot(dtanh, Wx.T) #dx(N,D) dtanh(N, H) Wx(D, H)
dWh = np.dot(prev_h.T, dtanh) #dWh(H, H) dtanh(N, H) prev_h(N, H) or np.dot(prev_h, dtanh.T)
dprev_h = np.dot(dtanh, Wh.T)#dprev_h(N, H) dtanh(N, H) Wh(H, H) notice T ,like dx
db = dtanh.sum(axis = 0)#db(H,) dtanh(N, H)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dx, dprev_h, dWx, dWh, db
def rnn_forward(x, h0, Wx, Wh, b):
"""
Run a vanilla RNN forward on an entire sequence of data. We assume an input
sequence composed of T vectors, each of dimension D. The RNN uses a hidden
size of H, and we work over a minibatch containing N sequences. After running
the RNN forward, we return the hidden states for all timesteps.
Inputs:
- x: Input data for the entire timeseries, of shape (N, T, D).
- h0: Initial hidden state, of shape (N, H)
- Wx: Weight matrix for input-to-hidden connections, of shape (D, H)
- Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)
- b: Biases of shape (H,)
Returns a tuple of:
- h: Hidden states for the entire timeseries, of shape (N, T, H).
- cache: Values needed in the backward pass
"""
h, cache = None, None
##############################################################################
# TODO: [get]Implement forward pass for a vanilla RNN running on a sequence of #
# input data. You should use the rnn_step_forward function that you defined #
# above. You can use a for loop to help compute the forward pass. #
##############################################################################
#h0? use dict?
N, T, H = x.shape[0], x.shape[1], h0.shape[1]
cache = {}
prev_h = h0
h = np.zeros((N, T, H))
for i in range(T):
if i == 0:
h[:, i, :], cache[i] = rnn_step_forward(x[:, i, :], prev_h, Wx, Wh, b)
else:
h[:, i, :], cache[i] = rnn_step_forward(x[:, i, :], h[:, i - 1, :], Wx, Wh, b)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return h, cache
def rnn_backward(dh, cache):
"""
Compute the backward pass for a vanilla RNN over an entire sequence of data.
Inputs:
- dh: Upstream gradients of all hidden states, of shape (N, T, H)
Returns a tuple of:
- dx: Gradient of inputs, of shape (N, T, D)
- dh0: Gradient of initial hidden state, of shape (N, H)
- dWx: Gradient of input-to-hidden weights, of shape (D, H)
- dWh: Gradient of hidden-to-hidden weights, of shape (H, H)
- db: Gradient of biases, of shape (H,)
"""
dx, dh0, dWx, dWh, db = None, None, None, None, None
##############################################################################
# TODO: [get]Implement the backward pass for a vanilla RNN running an entire #
# sequence of data. You should use the rnn_step_backward function that you #
# defined above. You can use a for loop to help compute the backward pass. #
##############################################################################
N, T, H = dh.shape
#we also need D Wx.shape[0]. Wx is in cache[0][0] it's the first element of every cache
#in the forward step pass cache = Wx, Wh, next_h, prev_h, x#cache
D = cache[0][0].shape[0]
dWx = np.zeros((D, H))#or dWx = np.zeros(cache[0][0].shape)
dWh = np.zeros((H, H))#or dWh = np.zeros(cache[0][1].shape)
db = np.zeros(H,) # cache doen't include b,so ~
dx = np.zeros((N, T, D))#or dx = np.zeros(cache[0][4].shape)
dprev_h = np.zeros((N, H))
for i in range(T, 0, -1):
#dx is corresponding to every x
dx[:, i - 1, :], dprev_h, dWx_step, dWh_step, db_step = rnn_step_backward(dh[:, i - 1, :] + dprev_h, cache[i - 1])#+dprev_h important
#from h[:,0,:] to h[:,-1,:] we share Wx Wh and b. so we must accumulate their gradients
dWx += dWx_step
dWh += dWh_step
db += db_step
dh0 = dprev_h#dh0 is the last one, we don't need to accumulate it.
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dx, dh0, dWx, dWh, db
def word_embedding_forward(x, W):
"""
Forward pass for word embeddings. We operate on minibatches of size N where
each sequence has length T. We assume a vocabulary of V words, assigning each
to a vector of dimension D.
Inputs:
- x: Integer array of shape (N, T) giving indices of words. Each element idx
of x muxt be in the range 0 <= idx < V.
- W: Weight matrix of shape (V, D) giving word vectors for all words.
Returns a tuple of:
- out: Array of shape (N, T, D) giving word vectors for all input words.
- cache: Values needed for the backward pass
"""
out, cache = None, None
##############################################################################
# TODO: [get]Implement the forward pass for word embeddings. #
# #
# HINT: This can be done in one line using NumPy's array indexing. #
##############################################################################
out = W[x]#out = W[x,:]
cache = x, W
##############################################################################
# END OF YOUR CODE #
##############################################################################
return out, cache
def word_embedding_backward(dout, cache):
"""
Backward pass for word embeddings. We cannot back-propagate into the words
since they are integers, so we only return gradient for the word embedding
matrix.
HINT: Look up the function np.add.at
Inputs:
- dout: Upstream gradients of shape (N, T, D)
- cache: Values from the forward pass
Returns:
- dW: Gradient of word embedding matrix, of shape (V, D).
"""
dW = None
##############################################################################
# TODO: [get]Implement the backward pass for word embeddings. #
# #
# Note that Words can appear more than once in a sequence. #
# HINT: Look up the function np.add.at #
##############################################################################
x, W = cache
dW = np.zeros(W.shape)
np.add.at(dW, x, dout)#dW(V, D) x(N, T) dout(N, T, D)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dW
def sigmoid(x):
"""
A numerically stable version of the logistic sigmoid function.
"""
pos_mask = (x >= 0)
neg_mask = (x < 0)
z = np.zeros_like(x)
z[pos_mask] = np.exp(-x[pos_mask])
z[neg_mask] = np.exp(x[neg_mask])
top = np.ones_like(x)
top[neg_mask] = z[neg_mask]
return top / (1 + z)
def lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b):
"""
Forward pass for a single timestep of an LSTM.
The input data has dimension D, the hidden state has dimension H, and we use
a minibatch size of N.
Inputs:
- x: Input data, of shape (N, D)
- prev_h: Previous hidden state, of shape (N, H)
- prev_c: previous cell state, of shape (N, H)
- Wx: Input-to-hidden weights, of shape (D, 4H)
- Wh: Hidden-to-hidden weights, of shape (H, 4H)
- b: Biases, of shape (4H,)
Returns a tuple of:
- next_h: Next hidden state, of shape (N, H)
- next_c: Next cell state, of shape (N, H)
- cache: Tuple of values needed for backward pass.
"""
next_h, next_c, cache = None, None, None
#############################################################################
# TODO: [get]Implement the forward pass for a single timestep of an LSTM. #
# You may want to use the numerically stable sigmoid implementation above. #
#############################################################################
H = prev_h.shape[1]
Wl = np.dot(x, Wx) + np.dot(prev_h, Wh) + b#(N, 4H)
i = sigmoid(Wl[:, 0: H])#(N, H)
f = sigmoid(Wl[:, H: 2 * H])
o = sigmoid(Wl[:, 2 * H: 3 * H])
g = np.tanh(Wl[:, 3 * H: 4 * H])
next_c = f * prev_c + i * g
next_h = o * np.tanh(next_c)
cache = i, f, o, g, x, next_c, prev_c, prev_h, Wx, Wh
##############################################################################
# END OF YOUR CODE #
##############################################################################
return next_h, next_c, cache
def lstm_step_backward(dnext_h, dnext_c, cache):
"""
Backward pass for a single timestep of an LSTM.
Inputs:
- dnext_h: Gradients of next hidden state, of shape (N, H)
- dnext_c: Gradients of next cell state, of shape (N, H)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient of input data, of shape (N, D)
- dprev_h: Gradient of previous hidden state, of shape (N, H)
- dprev_c: Gradient of previous cell state, of shape (N, H)
- dWx: Gradient of input-to-hidden weights, of shape (D, 4H)
- dWh: Gradient of hidden-to-hidden weights, of shape (H, 4H)
- db: Gradient of biases, of shape (4H,)
"""
dx, dh, dc, dWx, dWh, db = None, None, None, None, None, None
#############################################################################
# TODO: [get]Implement the backward pass for a single timestep of an LSTM. #
# #
# HINT: For sigmoid and tanh you can compute local derivatives in terms of #
# the output value from the nonlinearity. #
#############################################################################
i, f, o, g, x, next_c, prev_c, prev_h, Wx, Wh = cache
dnext_c += o * (1 - np.tanh(next_c) ** 2) * dnext_h#update dnext_c it's zero at the initial state, we must update it
do = dnext_h * np.tanh(next_c) # (N, H)
dprev_c = dnext_c * f#(N, H) dnext_c(N, H) f (N, H)
df = dnext_c * prev_c#(N, H)
di = dnext_c * g#(N, H)
dg = dnext_c * i#(N, H)
# y = sigmoid(x) y' = y (1 - y)
dWl = np.column_stack((di * i * (1 - i), df * f * (1 - f), do * o * (1 - o), dg * (1 - g ** 2))) #(N, 4H) or hstackm
dWh = np.dot(prev_h.T, dWl)#dWh(H, 4H) dWl(N, 4H) prev_h(N, H)
dprev_h = np.dot(dWl, Wh.T)#dprev_h(N,H) dWl(N, 4H) Wh(H, 4H)
dWx = np.dot(x.T, dWl)#dWx(D, 4H) dWl(N, 4H) x(N, D)
dx = np.dot(dWl, Wx.T)#dx(N, D) dWl(N, 4H) Wx(D, 4H)
db = np.sum(dWl, axis = 0)#db(4H,) dWl(N, 4H)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dx, dprev_h, dprev_c, dWx, dWh, db
def lstm_forward(x, h0, Wx, Wh, b):
"""
Forward pass for an LSTM over an entire sequence of data. We assume an input
sequence composed of T vectors, each of dimension D. The LSTM uses a hidden
size of H, and we work over a minibatch containing N sequences. After running
the LSTM forward, we return the hidden states for all timesteps.
Note that the initial cell state is passed as input, but the initial cell
state is set to zero. Also note that the cell state is not returned; it is
an internal variable to the LSTM and is not accessed from outside.
Inputs:
- x: Input data of shape (N, T, D)
- h0: Initial hidden state of shape (N, H)
- Wx: Weights for input-to-hidden connections, of shape (D, 4H)
- Wh: Weights for hidden-to-hidden connections, of shape (H, 4H)
- b: Biases of shape (4H,)
Returns a tuple of:
- h: Hidden states for all timesteps of all sequences, of shape (N, T, H)
- cache: Values needed for the backward pass.
"""
h, cache = None, None
#############################################################################
# TODO: [get]Implement the forward pass for an LSTM over an entire timeseries. #
# You should use the lstm_step_forward function that you just defined. #
#############################################################################
N, T, H = x.shape[0], x.shape[1], h0.shape[1]
h = np.zeros((N, T, H))
cell, cache = {}, {}
prev_h = h0
prev_c = np.zeros(h0.shape)
for i in range(T):
if i == 0:
h[:, i, :], cell[i], cache[i] = lstm_step_forward(x[:, i, :], prev_h, prev_c, Wx, Wh, b)
else:
h[:, i, :], cell[i], cache[i] = lstm_step_forward(x[:, i, :], h[:, i - 1, :], cell[i - 1], Wx, Wh, b)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return h, cache
def lstm_backward(dh, cache):
"""
Backward pass for an LSTM over an entire sequence of data.]
Inputs:
- dh: Upstream gradients of hidden states, of shape (N, T, H)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient of input data of shape (N, T, D)
- dh0: Gradient of initial hidden state of shape (N, H)
- dWx: Gradient of input-to-hidden weight matrix of shape (D, 4H)
- dWh: Gradient of hidden-to-hidden weight matrix of shape (H, 4H)
- db: Gradient of biases, of shape (4H,)
"""
dx, dh0, dWx, dWh, db = None, None, None, None, None
#############################################################################
# TODO: [get]Implement the backward pass for an LSTM over an entire timeseries. #
# You should use the lstm_step_backward function that you just defined. #
#############################################################################
N, T, H = dh.shape
#in lstm step forward cache = i, f, o, g, x, next_c, prev_c, prev_h, Wx, Wh
D = cache[0][8].shape[0]
dx = np.zeros((N, T, D))
dWx = np.zeros((D, 4 * H))
dWh = np.zeros((H, 4 * H))
db = np.zeros((4 * H,))
dprev_c = np.zeros((N, H))#it's set to 0 and updated with i
dprev_h = np.zeros((N, H))
for i in range(T, 0, -1):
dx[:, i - 1, :], dprev_h, dprev_c, dWx_step, dWh_step, db_step = lstm_step_backward(dh[:, i - 1, :] + dprev_h, dprev_c,cache[i - 1])
dWx += dWx_step
dWh += dWh_step
db += db_step
dh0 = dprev_h
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dx, dh0, dWx, dWh, db
def temporal_affine_forward(x, w, b):
"""
Forward pass for a temporal affine layer. The input is a set of D-dimensional
vectors arranged into a minibatch of N timeseries, each of length T. We use
an affine function to transform each of those vectors into a new vector of
dimension M.
Inputs:
- x: Input data of shape (N, T, D)
- w: Weights of shape (D, M)
- b: Biases of shape (M,)
Returns a tuple of:
- out: Output data of shape (N, T, M)
- cache: Values needed for the backward pass
"""
N, T, D = x.shape
M = b.shape[0]
out = x.reshape(N * T, D).dot(w).reshape(N, T, M) + b
cache = x, w, b, out
return out, cache
def temporal_affine_backward(dout, cache):
"""
Backward pass for temporal affine layer.
Input:
- dout: Upstream gradients of shape (N, T, M)
- cache: Values from forward pass
Returns a tuple of:
- dx: Gradient of input, of shape (N, T, D)
- dw: Gradient of weights, of shape (D, M)
- db: Gradient of biases, of shape (M,)
"""
x, w, b, out = cache
N, T, D = x.shape
M = b.shape[0]
dx = dout.reshape(N * T, M).dot(w.T).reshape(N, T, D)
dw = dout.reshape(N * T, M).T.dot(x.reshape(N * T, D)).T
db = dout.sum(axis=(0, 1))
return dx, dw, db
def temporal_softmax_loss(x, y, mask, verbose=False):
"""
A temporal version of softmax loss for use in RNNs. We assume that we are
making predictions over a vocabulary of size V for each timestep of a
timeseries of length T, over a minibatch of size N. The input x gives scores
for all vocabulary elements at all timesteps, and y gives the indices of the
ground-truth element at each timestep. We use a cross-entropy loss at each
timestep, summing the loss over all timesteps and averaging across the
minibatch.
As an additional complication, we may want to ignore the model output at some
timesteps, since sequences of different length may have been combined into a
minibatch and padded with NULL tokens. The optional mask argument tells us
which elements should contribute to the loss.
Inputs:
- x: Input scores, of shape (N, T, V)
- y: Ground-truth indices, of shape (N, T) where each element is in the range
0 <= y[i, t] < V
- mask: Boolean array of shape (N, T) where mask[i, t] tells whether or not
the scores at x[i, t] should contribute to the loss.
Returns a tuple of:
- loss: Scalar giving loss
- dx: Gradient of loss with respect to scores x.
"""
N, T, V = x.shape
x_flat = x.reshape(N * T, V)
y_flat = y.reshape(N * T)
mask_flat = mask.reshape(N * T)
probs = np.exp(x_flat - np.max(x_flat, axis=1, keepdims=True))
probs /= np.sum(probs, axis=1, keepdims=True)
loss = -np.sum(mask_flat * np.log(probs[np.arange(N * T), y_flat])) / N
dx_flat = probs.copy()
dx_flat[np.arange(N * T), y_flat] -= 1
dx_flat /= N
dx_flat *= mask_flat[:, None]
if verbose: print('dx_flat: ', dx_flat.shape)
dx = dx_flat.reshape(N, T, V)
return loss, dx
|
[
"noreply@github.com"
] |
douyh.noreply@github.com
|
a8421853195f74811a37464d18aae3b0494b4b76
|
ebae48030f2c4dc61a2721a742cce6e8be0b1a4c
|
/backend/heir/migrations/0002_heir_user.py
|
1da85f51f13d9626935ca809e6d114783d046847
|
[] |
no_license
|
TestamentSoar/testament
|
f751f285451a6e16dc4a846f581af33417af629e
|
e29a8c366f20312cdf42ed1bddd27da1d832bc62
|
refs/heads/master
| 2023-01-09T23:05:16.169195
| 2019-11-10T06:51:28
| 2019-11-10T06:51:28
| 220,591,069
| 0
| 0
| null | 2022-12-11T12:38:12
| 2019-11-09T04:27:17
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 579
|
py
|
# Generated by Django 2.2.7 on 2019-11-09 12:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('heir', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='heir',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"asuhacoder@gmail.com"
] |
asuhacoder@gmail.com
|
f87ee193e778f285703837298bdc7b1351873aaf
|
713deead16f883ea2987b135d212de89de650e06
|
/DAY4/romeo.py
|
9c9dec734add15855e2dc047566a500313825321
|
[] |
no_license
|
paratropper/starting
|
a0ce9bd159ab6c100c96ae4020c69bf3af09af76
|
6724c098664748a60f076695bc127278f9c2a647
|
refs/heads/master
| 2020-06-04T07:19:09.296278
| 2019-06-14T10:39:57
| 2019-06-14T10:39:57
| 191,920,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 6 16:33:52 2019
@author: HP
"""
character = {}
file = open("romeo.txt", mode = "rt")
with open ("romeo.txt", 'rt'):
for line in file:
file.readline().split(" ")
for word in file:
if word in character.keys():
character[word] = character[word] + 1
else:
character[word]=1
print(character)
file.close()
|
[
"raghavparashar2105@gmail.com"
] |
raghavparashar2105@gmail.com
|
62d9ce930c9ad1aa3a0d9988ea318b7e9d503152
|
f92c1be1627642ebdb507ad13aba44ac5a52586f
|
/groupBY.py
|
cf8ff2933edcaf2f7dd05baa9c18d7c124769369
|
[] |
no_license
|
naveenrajut/MYfirst
|
bb035f55e9fcb93121897bd95cc413ccb947e66b
|
24c9e14ab3491ab40308e9b2b19782e2d05a0161
|
refs/heads/master
| 2021-05-09T19:00:07.117564
| 2018-08-03T03:07:40
| 2018-08-03T03:07:40
| 119,179,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
"""
from __future__ import print_function
from itertools import *
for i,j in groupby(map(int,list(raw_input()))):
print(tuple([len(list(j)), i]) ,end = " ")
"""
from itertools import groupby
|
[
"noreply@github.com"
] |
naveenrajut.noreply@github.com
|
7fcf54c6ab50c34fa058b3055a4d7cdd27efe590
|
ad2729cc3daf640d5d307ea661997845e1892722
|
/Question4/Question4.py
|
9feb015c0c0fd2ab9dec22abff2502369259138f
|
[] |
no_license
|
NdukuCarole/Day-6
|
06d79bea9738ecc71b1821bc2310b4e30a99382a
|
89b08e739b1c3330362b89f087f9efc7eadc50ec
|
refs/heads/main
| 2023-06-06T16:03:43.888636
| 2021-06-30T06:55:41
| 2021-06-30T06:55:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
import csv
# use your own csv
info = csv.DictReader(open("covid.csv"))
print("CSV file as a dictionary:\n")
#display data of the csv file
for column in info:
print(column)
|
[
"noreply@github.com"
] |
NdukuCarole.noreply@github.com
|
835347cdcd02e23a7a22f743827346c501f182c4
|
c47a9d1b857ceb5927ef5ea2143dbb798a9e38c0
|
/lessons/lesson-1/solutions/p2.py
|
17bea734b0be0cba86748384171a083a7b8d8382
|
[] |
no_license
|
ScottRMalley/intro-programming
|
3f74390c3fd62e12bb7d60024ad7878458bcf0f4
|
a13d5eee4dce646e69143566dd9a367d8358a167
|
refs/heads/master
| 2023-02-24T07:31:05.010763
| 2021-01-31T14:19:52
| 2021-01-31T14:19:52
| 299,049,406
| 0
| 1
| null | 2021-01-31T14:19:53
| 2020-09-27T14:26:54
|
Python
|
UTF-8
|
Python
| false
| false
| 358
|
py
|
import sys
if(len(sys.argv) < 2):
print('Please input a word')
else:
word = sys.argv[1]
is_palindrome = True
for i in range(len(word)//2):
if not (word[i] == word[len(word)-i-1]):
is_palindrome = False
if is_palindrome:
print(word + ' is a palindrome')
else:
print(word + ' is not a palindrome')
|
[
"srmalley-sphereon@github.com"
] |
srmalley-sphereon@github.com
|
92865cb0988244782e03d7e6649c67dd04e8b3fc
|
93b22689089e32ce4e7b69f1c9f91e1c5908f4d9
|
/ble_modules/ble_load_data.py
|
aae1b69fea1a0fbf5843c3b9322125c3ec54be83
|
[] |
no_license
|
y411186312/vsi_ble_bool
|
0b58d2d76fe293f61f7dd10cd70e80c10fd5486b
|
bed3006ec1e10c8a497bb6eef7f78797e4cd2165
|
refs/heads/master
| 2020-03-29T18:02:51.158225
| 2018-11-14T08:12:42
| 2018-11-14T08:12:42
| 150,192,042
| 0
| 0
| null | 2018-11-14T08:12:43
| 2018-09-25T01:45:25
|
Python
|
UTF-8
|
Python
| false
| false
| 8,253
|
py
|
import sys,os,time,binascii,json
#sys.path.append(os.getcwd())
import includes.ble_common_class as comm_cls
import ble_modules.ble_cmd_buffer as buf_Cls
#name OGF
event_file_list = [
"HCI_Events.data"
]
EVENT_DATA_SUFFIX = "_Return_Parameter.data"
packetType = [
" ",
"Cmd Packet",
"ACL Data Packet",
"Sync Packet",
"Event Packet"
]
################################################################################## new start ###########
def line_is_empty(line_str):
if line_str.find("{", 0) < 0:
return False
else:
if line_str.find("}", 1) < 0:
return False
else:
return True
#return [name_x,name_y,name_z],[size_x, size_y,size_z],[1, 1, 1]
def parse_line_cmd_parameter(para_str, count):
name_list,size_list, fixFlag_List = [], [], []
err_out = [None, None, None]
if count <= 0:
return err_out
header=tail=0
for i in range(count):
#name
header = para_str.find("\"", tail)
tail = para_str.find("\"", header + 1)
if tail < header:
return err_out
content = para_str[header+1: tail]
name = content
#size
header = para_str.find(",", tail)
tail = para_str.find(",", header + 1)
if tail < header:
return None
content = para_str[header+1: tail]
#print "content:",content
try:
size = int(content)
except:
print "Error %s size." % (name)
return err_out
#fixLen
header = para_str.find(",", tail)
tail = para_str.find(",", header + 1)
if tail < header:
return None
content = para_str[header+1: tail].strip('}') #skip all tag '}'
#print "content:",content
try:
fixFlag = int(content)
except:
print "Error %s size." % (name)
return err_out
name_list.append(name)
size_list.append(size)
fixFlag_List.append(fixFlag)
return [name_list, size_list, fixFlag_List]
def parse_line_para(org_str, ogf):
name = sys._getframe().f_code.co_name
hci_cmd_obj = comm_cls.HCI_SPEC_CLASS()
hci_cmd_obj._ogf = ogf
header=tail=0
#1. find cmd name
header = org_str.find("\"", tail)
tail = org_str.find("\"", header + 1)
if tail <= header:
return None
content = org_str[header+1: tail]
#print "name:",content
hci_cmd_obj._name = content.lower()
#print "cmd:",hci_cmd_obj._cmd
#2. find ocf
header = org_str.find(",", tail)
tail = org_str.find(",", header + 1)
if tail < header:
return None
content = org_str[header+1: tail].strip('}')
#print "content:",content
try:
hci_cmd_obj._ocf = int(content, 16)
#print "ocf:",hci_cmd_obj._ocf
except:
print "Error ocf format, cmd:", hci_cmd_obj._name
return None
hci_cmd_obj._oprCode = (hci_cmd_obj._ocf & 0x3ff) | ((hci_cmd_obj._ogf & 0x3f) << 10)
#3. find paramtere counts
header = org_str.find(",", tail)
tail = org_str.find(",", header + 1)
if tail < header:
return None
content = org_str[header+1: tail].strip('}')
try:
hci_cmd_obj._paraCounts = int(content)
except:
print "Error parameters size, cmd:", hci_cmd_obj._name
return None
if hci_cmd_obj._paraCounts == 0:
return hci_cmd_obj
#4. parse paramteres
name_list, size_list, fixFlag_list = parse_line_cmd_parameter(org_str[tail:] ,hci_cmd_obj._paraCounts)
if name_list != None and size_list != None and fixFlag_list != None:
hci_cmd_obj._paraNameLists = name_list
hci_cmd_obj._paraSizeLists = size_list
hci_cmd_obj._paraFixLenFlagLists = fixFlag_list
else:
print "Error org_str:",org_str
return None
return hci_cmd_obj
def load_para_from_file(path, ogf):
cmd_lists_array = []
try:
file = open(path)
except:
print ("Error to open :", path)
return None
lines = file.readlines() #read all lines
if len(lines) > 0:
for line in lines:
if line_is_empty(line) == False:
continue
line = line.strip('\n') # remove the '\n'
para_lists = parse_line_para(line, ogf)
if para_lists == None:
print "Error on line:", line
continue
cmd_lists_array.append(para_lists)
return cmd_lists_array
#cmd file to OGF
g_cmdFileToOgfArray = [
["LinkControlCommands.data", 0x1],
["Link_Policy_Commands.data", 0x2],
["Controller_Baseband_Commands.data", 0x3],
["Informational_Parameters_Commands.data", 0x4],
["Status_Parameters_Commands.data", 0x05],
["Testing_Commands.data", 0x6],
["Vendor_Commands.data", 0x3f],
["LE_Commands.data", 0x8],
]
"""
g_paraFileToOgfArray = [
["LinkControlCommands.data", 0x1],
["Link_Policy_Commands.data", 0x2],
["Controller_Baseband_Commands.data", 0x3],
["Informational_Parameters_Commands.data", 0x4],
["Status_Parameters_Commands.data", 0x05],
["Testing_Commands.data", 0x6],
["Vendor_Commands.data", 0x3f],
["LE_Commands.data", 0x8],
]
"""
class Ble_LoadCmdClass:
def __init__(self, cmdSpecFolder, bleSubEventCodeJsonFilePath, cmdDefaultFilePath):
self._cmdsList = []
self._returnParaList = []
self._eventsList = []
#self._cmdBufferDefault_dic = {}
self._subEventJsonObj = None
self._cmdSpecFolder = cmdSpecFolder
self._subEvtJsonFilePath = bleSubEventCodeJsonFilePath
self._cmdDefaultFilePath = cmdDefaultFilePath
self._cmdBufClsObj = None
self._loadInit()
#self._defaultValueComment = ''
def _loadInit(self):
self._loadSubEventCode()
self._cmdBufClsObj = buf_Cls.cmdBufferOprClass(self._cmdDefaultFilePath)
self._load_spec_parameters()
def _cmdInputSave(self, name, value_list):
self._cmdBufClsObj._cmd_buf_add(name, value_list)
def _loadClose(self):
print "enter _loadClose"
self._cmdBufClsObj._cmd_buf_close()
def _loadAddDefault(self, name, value_list): #for add default value
return self._cmdBufClsObj. _cmd_buf_add(name, value_list)
def _loadSubEventCode(self):
try:
f = open(self._subEvtJsonFilePath)
self._subEventJsonObj = json.load(f)
except:
print "error............"
return
def _getCmdList(self):
return self._cmdsList
def _getEventList(self):
return self._eventsList
def _getReturnParaList(self):
return self._returnParaList
def _printCmdParaList(self):
for i in range(len(self._cmdsList)):
print "name: %s, oprcode:%x" % (self._cmdsList[i]._name, self._cmdsList[i]._oprCode)
def _printReturnParaList(self):
for i in range(len(self._returnParaList)):
print "name: %s, oprcode:%x" % (self._returnParaList[i]._name, self._returnParaList[i]._oprCode)
#def _loadClose(self):
# buf_Cls
def _load_spec_parameters(self):
try:
fileList = os.listdir(self._cmdSpecFolder)
except:
return
for file in fileList:
ogf=0
isCmd = False
isEvent = False
if event_file_list[0] == file:
#parse event list
#print "find event file............"
isEvent = True
for item in g_cmdFileToOgfArray:
if item[0] == file:
isCmd = True
ogf = item[1]
break
else:
compareStr = item[0].split('.')[0]
if compareStr == file[0:len(compareStr)]:
ogf = item[1]
break
fullPath = self._cmdSpecFolder + "\\" + file
curFileParaList = load_para_from_file(fullPath, ogf)
if curFileParaList == None:
print "Error format on file:", file
continue
for para in curFileParaList:
para._classStr = file
if isCmd == False and isEvent == False:
para._isCmd = False
self._returnParaList.append(para)
else:
if isEvent == True:
#use the same function to parse event, so need to re-srite value
para._isEvent = True
para._eventCode = para._ocf
para._ocf = 0
if self._subEventJsonObj != None:
#print "para._name:",para._name
#print "self._subEventJsonObj[para._name]:",self._subEventJsonObj[para._name]
#print "::::::::::",para._name
if self._subEventJsonObj.get(para._name) != None:
para._subEventCode = int(self._subEventJsonObj[para._name], 16)
#print "get subevent code [%s]....:%d"%(para._name, para._subEventCode)
self._eventsList.append(para)
else:
para._isCmd = True
#add default value
if self._cmdBufClsObj != None:
para._defaultValueList = self._cmdBufClsObj._cmd_buf_get_list(para._name)
self._cmdsList.append(para)
def _loadPrintDefaultValue(self):
if self._cmdBufClsObj != None:
self._cmdBufClsObj._cmd_buf_print_all()
for item in self._cmdsList:
print "name: %s, default value: %s" % (item._name, item._defaultValueList)
|
[
"jianjun.yang@verisilicon.com"
] |
jianjun.yang@verisilicon.com
|
ad54650741f2e89368f0bc7e5da5295a2146878e
|
4722a265adcece16fd13e214cc3181a487bd7915
|
/category.py
|
46d5988dc1e80bffb96b08abd1310504eda88daa
|
[] |
no_license
|
stradtkt/game_api
|
8a7adf48b4780e5ce4f1fe6455e60bce928d49ea
|
624040bd6c7b6aed01828b9d84802a01a2f8a4bf
|
refs/heads/master
| 2020-08-06T09:43:48.495943
| 2019-10-05T06:03:20
| 2019-10-05T06:03:20
| 212,929,710
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,940
|
py
|
import sqlite3
from flask_restful import Resource
class Category(Resource):
def get(self, name):
game = self.find_by_name(name)
if game:
return game
return {"message": "Platform not found"}
@classmethod
def find_by_name(cls, name):
conn = sqlite3.connect('games.db')
cursor = conn.cursor()
query = "SELECT * FROM categories WHERE name=?"
result = cursor.execute(query, (name,))
row = result.fetchone()
conn.close()
if row:
return {"category": {"name": row[0]}}
@classmethod
def find_by_id(cls, id):
conn = sqlite3.connect('games.db')
cursor = conn.cursor()
query = "SELECT * FROM categories WHERE id=?"
result = cursor.execute(query, (id,))
row = result.fetchone()
conn.close()
if row:
return {"category": {"name": row[0]}}
@classmethod
def insert(cls, cat):
conn = sqlite3.connect('games.db')
cursor = conn.cursor()
query = "INSERT INTO categories VALUES (?,?,?,?,?,?,?)"
cursor.execute(query, (cat['name']))
conn.commit()
conn.close()
@classmethod
def update(cls, cat):
conn = sqlite3.connect('games.db')
cursor = conn.cursor()
query = "UPDATE categories SET name=? WHERE name=?"
cursor.execute(query, (cat['name']))
conn.commit()
conn.close()
def post(self, name):
data = Category.parser.parse_args()
category = {"name": name}
try:
self.insert(category)
except:
return {"message": "An error occurred inserting the platform."}, 500
return category, 201
def delete(self, id):
conn = sqlite3.connect('games.db')
cursor = conn.cursor()
query = "DELETE FROM categories WHERE id=?"
cursor.execute(query, (id,))
conn.commit()
conn.close()
return {"message": "Platform deleted"}
def put(self, id):
data = Category.parser.parse_args()
category = self.find_by_id(id)
updated_category = {"name": data['name']}
if category is None:
try:
self.insert(updated_category)
except:
return {"message": "An error occurred inserting the Category."}
else:
try:
self.update(updated_category)
except:
return {"message": "An error occurred inserting the Category."}
return updated_category
class CategoryList(Resource):
def get(self):
conn = sqlite3.connect('games.db')
cursor = conn.cursor()
query = "SELECT * FROM categories"
result = cursor.execute(query)
categories = []
for row in result:
categories.append({"name": row[0]})
conn.close()
return {"categories": categories}
|
[
"stradtkt22@gmail.com"
] |
stradtkt22@gmail.com
|
8ca8f2c3e5ea24a215316ed5900f17d04a6072fb
|
3d5642fd6ec2e89ed87438ebd3c55ef97c04eadf
|
/nfnets/utils.py
|
a082b98b534dad65a36233a27bc9e527c161b491
|
[] |
no_license
|
peternara/Transformer-Recommender
|
725218fbd1be32cdd7246ee4b3c831927cd44619
|
f5b9bb01f6ca9be282233a1458b06a70c4642dcd
|
refs/heads/main
| 2023-03-22T02:43:45.041901
| 2021-02-28T15:53:29
| 2021-02-28T15:53:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,328
|
py
|
import torch
from torch import nn
from nfnets import WSConv2d
def replace_conv(module: nn.Module, conv_class=WSConv2d):
"""Recursively replaces every convolution with WSConv2d.
Usage: replace_conv(model) #(In-line replacement)
Args:
module (nn.Module): target's model whose convolutions must be replaced.
conv_class (Class): Class of Conv(WSConv2d or ScaledStdConv2d)
"""
for name, mod in module.named_children():
target_mod = getattr(module, name)
if type(mod) == torch.nn.Conv2d:
setattr(module, name, conv_class(target_mod.in_channels, target_mod.out_channels, target_mod.kernel_size,
target_mod.stride, target_mod.padding, target_mod.dilation, target_mod.groups, target_mod.bias))
if type(mod) == torch.nn.BatchNorm2d:
setattr(module, name, torch.nn.Identity())
for name, mod in module.named_children():
replace_conv(mod)
def unitwise_norm(x: torch.Tensor):
if x.ndim <= 1:
dim = 0
keepdim = False
elif x.ndim in [2, 3]:
dim = 0
keepdim = True
elif x.ndim == 4:
dim = [1, 2, 3]
keepdim = True
else:
raise ValueError('Wrong input dimensions')
return torch.sum(x**2, dim=dim, keepdim=keepdim) ** 0.5
|
[
"noreply@github.com"
] |
peternara.noreply@github.com
|
b5a44008058029d6497dabbfbf18ebed04eba4fd
|
cd7e0fa02276f289a883ec4999d613ce144e4b3c
|
/cv_cw2/proto/corner_harris.py
|
e3214fe94d4b95bdbaa849bf3b7f56421989f39d
|
[] |
no_license
|
eubinecto/cv-cw2
|
337ce026c7b757af9fbcf11e4cc4bf333c303998
|
a4f8c315a69be8a054c7428436d5cd8c49810446
|
refs/heads/main
| 2023-05-15T04:21:02.214018
| 2021-05-27T05:51:29
| 2021-05-27T05:51:29
| 369,805,825
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 740
|
py
|
import numpy as np
import cv2
from cv_cw2.paths import BERNIE_JPEG
import matplotlib.pyplot as plt
from scipy.ndimage import gaussian_filter
def main():
# read bernie as a gray image
bernie: np.array = cv2.imread(BERNIE_JPEG, cv2.IMREAD_GRAYSCALE)
bernie = gaussian_filter(bernie, 0.5)
kps = cv2.cornerHarris(bernie, blockSize=7, ksize=3, k=0.04)
kps = np.argwhere(kps > 0.001 * kps.max()) # thresholding occurs here.
kps = [cv2.KeyPoint(x=float(pt[1]), y=float(pt[0]), _size=1) for pt in kps] # instantiating the key points
drawn = cv2.drawKeypoints(bernie, kps, None, color=(255, 0, 0)) # draw the key points here.
plt.imshow(drawn, cmap="gray")
plt.show()
if __name__ == '__main__':
main()
|
[
"tlrndk123@gmail.com"
] |
tlrndk123@gmail.com
|
db390961179380ef85d957a425ab19eb1f46d760
|
d6f40f5c3e4f1e9fcbf8d5fb495f72ed11844e76
|
/P48/test.py
|
d95031ea554c9a3aa4bfc825340cd8de5b52189b
|
[] |
no_license
|
sakamoto2019/python-99
|
019a9414c9e30c4bb225a277b361cde988330efd
|
45a0b4054e1881ca9872b01b0b33efec09f53bab
|
refs/heads/master
| 2020-07-02T03:06:14.897472
| 2019-08-19T04:07:37
| 2019-08-19T04:07:37
| 201,396,592
| 0
| 0
| null | 2019-08-19T04:07:38
| 2019-08-09T05:31:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,164
|
py
|
import sys
import unittest
from main import table
sys.path.append('../')
from P46.main import AND, OR, EQ
class Test(unittest.TestCase):
def test_size2(self):
self.assertEqual(table(lambda a, b: AND(a, OR(a, b))),
[
[True, True, True],
[True, False, True],
[False, True, False],
[False, False, False]
])
def test_size3(self):
self.assertEqual(table(lambda a, b, c: EQ(AND(a, OR(b, c)), OR(AND(a, b), AND(a, c)))),
[
[True, True, True, True],
[True, True, False, True],
[True, False, True, True],
[True, False, False, True],
[False, True, True, True],
[False, True, False, True],
[False, False, True, True],
[False, False, False, True]
])
if __name__ == "__main__":
unittest.main()
|
[
"daisuke.develop@gmail.com"
] |
daisuke.develop@gmail.com
|
93efaa4bb153fd02946d5bfa88b60510b117e571
|
0d483b997d4e0a818f3c388e3997db9a20378926
|
/accounts/urls.py
|
ec9db4785d1879de0354afc0fa8fcc1cf52f4067
|
[] |
no_license
|
guptabhishek8/cse4
|
7676b4240e610a9c5d7c729e53bad0c132c5867c
|
ab11c2e3aaa5de4dbddc7d3ec619ffff727353f4
|
refs/heads/master
| 2021-09-29T05:43:27.763759
| 2020-02-01T18:19:13
| 2020-02-01T18:19:13
| 237,559,441
| 0
| 0
| null | 2021-09-22T18:30:14
| 2020-02-01T04:05:59
|
HTML
|
UTF-8
|
Python
| false
| false
| 655
|
py
|
from django.urls import path
from django.conf.urls import url
from . import views
from django.contrib.auth.views import LoginView, LogoutView
urlpatterns = [
path('', views.index, name='index'),
url(r'^signin/', views.signin, name="signin"),
url(r'^postsignin/', views.postsignin, name="postsignin"),
url(r'^termspage/', views.termspage, name="termspage"),
url(r'^signup/', views.signup, name="signup"),
url(r'^contact/', views.contact, name="contact"),
url(r'^home/', views.home, name="home"),
url(r'^signout/', LogoutView.as_view(next_page='index'), name="logout"),
url(r'^domain/', views.domain, name="domain"),
]
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
256dc3949b5884d61d38068d042134c5851a1e4a
|
b556e612ce736a137c1fd6e820675c2b05a2dd62
|
/posts/serializers.py
|
8a36e34e08522b44052e7020b516663a13ee8b12
|
[] |
no_license
|
adhiman1999/djangoBlogAPI
|
a1990248e4dc348c88e969be73755b9627c36bb4
|
52acda8be88561d829ce0023f70e848c39dba077
|
refs/heads/main
| 2023-06-09T12:11:53.280729
| 2021-07-04T11:43:01
| 2021-07-04T11:43:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
from rest_framework import serializers
from .models import Post
class PostSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = ('id', 'author', 'title', 'body', 'created_at',)
|
[
"adhiman1999@gmail.com"
] |
adhiman1999@gmail.com
|
d1b2971e1a82f56173d3c5463430dd1325cefc8f
|
44f1973af95503e8dcddb4eddd04270908172018
|
/CS-121/Assignment 1/c/TwoGramFrequencyCounter.py
|
de629caf732f43cb3a9f11f904373f8927771dcf
|
[] |
no_license
|
donglil/UCI
|
e997f1cdd05be1af52cd495da89d5d21451dce9d
|
2738d4edde8ea0e9ebc4068d0f691dac10829584
|
refs/heads/master
| 2020-12-11T07:45:36.229893
| 2015-04-21T01:50:07
| 2015-04-21T01:50:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
__author__ = 'Viral_Shah'
from b.WordFrequencyCounter import compute_word_frequencies
import itertools
def compute_two_gram_frequencies(words: list):
"""
:param words: takes in a list of strings
:return: returns a list of frequency objects
Uses Python's itertools to take pairwise pairs of strings and count their frequencies and return them using WordFrequencyCounter
"""
if not words:
return []
a, b = itertools.tee(words)
next(b, None)
pairs = zip(a, b)
two_grams = [" ".join(p) for p in pairs]
return compute_word_frequencies(two_grams)
|
[
"vrshah@uci.edu"
] |
vrshah@uci.edu
|
c774923c4adcd8df1a711581705bcb2009cd8c5c
|
38ec04c9a1941f1524c2fde59f8cb06074a3be8e
|
/src/expenses_tracker/expenses_tracker/expenses/views.py
|
1d1ddd40de085cfb07960acbc2c4587d5c2c1ea3
|
[] |
no_license
|
ivelinakaraivanova/SoftUniPythonWebBasics
|
7fec8ac6e3cb4248f3c8286f2194a30c34ac20ac
|
0a49c5f5e1c36240a8888ce92e2a48016c84571c
|
refs/heads/main
| 2023-06-09T12:42:30.764565
| 2021-07-05T10:17:02
| 2021-07-05T10:17:02
| 373,289,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,867
|
py
|
from django.shortcuts import render, redirect
from expenses_tracker.core.profile_utils import get_profile
from expenses_tracker.expenses.forms import CreateExpenseForm, EditExpenseForm, DeleteExpenseForm
from expenses_tracker.expenses.models import Expenses
def home(request):
profile = get_profile()
if not profile:
return redirect('create profile')
expenses = Expenses.objects.all()
context = {
'expenses': expenses,
'budget': profile.budget,
'budget_left': profile.budget_left,
}
return render(request, 'home-with-profile.html', context)
def create_expense(request):
if request.method == 'POST':
form = CreateExpenseForm(request.POST) # take the data from the request body and fill them in the form
if form.is_valid():
form.save()
return redirect('home')
else:
form = CreateExpenseForm()
context = {
'form': form
}
return render(request, 'expense-create.html', context)
def edit_expense(request, pk):
expense = Expenses.objects.get(pk=pk)
if request.method == 'POST':
form = EditExpenseForm(request.POST, instance=expense) # to load the data of the edited expense
if form.is_valid():
form.save()
return redirect('home')
else:
form = EditExpenseForm(instance=expense)
context = {
'expense': expense,
'form': form
}
return render(request, 'expense-edit.html', context)
def delete_expense(request, pk):
expense = Expenses.objects.get(pk=pk)
if request.method == 'POST':
expense.delete()
return redirect('home')
else:
form = DeleteExpenseForm(instance=expense)
context = {
'expense': expense,
'form': form
}
return render(request, 'expense-delete.html', context)
|
[
"73067985+ivelinakaraivanova@users.noreply.github.com"
] |
73067985+ivelinakaraivanova@users.noreply.github.com
|
d3b1d9f3f0d4403423097a07d210622c11a907f2
|
24d8bc5f09f8b8e484ab76aa317c5436a7ddf43c
|
/Code-it/Greedy Algorithm - 수강신청.py
|
4e0a1d30c94c1ed2bf035e3dd6ec30c27a7e0b67
|
[] |
no_license
|
GroundP/Algorithm
|
c00de37256a193c42051eb58aa8157d0d1882f67
|
1dd4d22141a43be99a9aaff6ba00e04b66e4cccb
|
refs/heads/master
| 2020-07-27T09:21:28.814675
| 2019-10-04T16:07:13
| 2019-10-04T16:07:13
| 209,044,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,974
|
py
|
"""
이번 학기 코드잇 대학교의 수업 리스트가 나왔습니다.
[(4, 7), (2, 5), (1, 3), (8, 10), (5, 9), (2, 6), (13, 16), (9, 11), (1, 8)]
리스트에 담겨있는 튜플들은 각각 하나의 수업을 나타냅니다. 각 튜플의 0번째 항목은 해당 수업의 시작 시간, 그리고 1 번 항목은 해당 수업이 끝나는 시간입니다. 예를 들어서 0번 인덱스에 있는 튜플값은 (4, 7)이니까, 해당 수업은 4교시에 시작해서 7교시에 끝나는 거죠.
(2, 5)를 듣는다고 가정합시다. (4, 7) 수업은 (2, 5)가 끝나기 전에 시작하기 때문에, 두 수업은 같이 들을 수 없습니다. 반면, 수업 (1, 3)과 (4, 7)은 시간이 겹치지 않기 때문에 동시에 들을 수 있습니다.
열정이 불타오르는 신입생 지웅이는 최대한 많은 수업을 들을 수 있는 수업 조합을 찾아주는 함수 course_selection 함수를 작성하려고 합니다.
course_selection은 파라미터로 전체 수업 리스트를 받고 가능한 많은 수업을 담은 리스트를 리턴합니다.
탬플릿
def course_selection(course_list):
# 코드를 쓰세요
# 테스트
print(course_selection([(4, 7), (2, 5), (1, 3), (8, 10), (5, 9), (2, 6), (13, 16), (9, 11), (1, 8)]))
[(1, 3), (4, 7), (8, 10), (13, 16)]
"""
def course_selection(course_list):
def endSort(tu):
return tu[1]
sorted_list = sorted(course_list, key =endSort)
# start = sorted_list[0][0]
end = sorted_list[0][1]
sel_list = [sorted_list[0]]
for tup in sorted_list:
if tup[0] > end:
sel_list.append(tup)
end = tup[1]
return sel_list
# 테스트
print(course_selection([(6, 10), (2, 3), (4, 5), (1, 7), (6, 8), (9, 10)]))
print(course_selection([(1, 2), (3, 4), (0, 6), (5, 7), (8, 9), (5, 9)]))
print(course_selection([(4, 7), (2, 5), (1, 3), (8, 10), (5, 9), (2, 5), (13, 16), (9, 11), (1, 8)]))
|
[
"bloom0819@naver.com"
] |
bloom0819@naver.com
|
24d94894ab51cc872088cfbec9ddf99e63313312
|
4cb77681b2a007d7c83ea0e540f619c3a1faacae
|
/unit/gather.py
|
c06209de1d97073816ca7d564e248e46f6353068
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
odeke-em/mocollab
|
35cb9d5ccd2440528dad6ee4167eb7ba4b7fd7c8
|
53785e5371b304a0571db627d8e7c2afc31355b4
|
refs/heads/master
| 2020-05-20T05:55:10.876429
| 2015-03-17T05:10:07
| 2015-03-17T05:10:07
| 31,869,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,903
|
py
|
def gather(transport, unit_list, value):
"""
Finds the maximum possible "value" of troops that can be loaded
in to the remaining space of a transport.
Input:
transport - The transport unit to be loaded.
unit_list - The list of units that can be loaded onto transport.
You may assume that these units are non-transport ground units
that are already on the same team as the transport, have not
moved this turn, and can reach the transport with a single move.
value - a function that maps a unit to some value
Output:
A list of units from unit_list. Do NOT load them into the transport
here, just compute the list of units with maximum possible value whose
total size is at most the remaining capacity of the transport.
The calling function from gui.py will take care of loading them.
Target Complexity:
It is possible to implement this method to run in time
O(n * C) where n is the number of units in unit_list and C
is the remaining capacity in the transport. Remember, the capacity
of a transport and the sizes of the units are all integers.
"""
gathered = list()
# the unused capacity of the transport
remain = transport.capacity
# Just a greedy algorithm that looks at each unit
# and selects them if they will fit (ignoring their value).
#
# This will NOT always find the optimum solution.
# (e.g. running this on the cluster of troops in the lower-left corner
# of the level many.lvl will find a sub-optimal solution
# when value(unit) is the remainnig health of the unit)
for u in unit_list:
if u.unit_size <= remain:
gathered.append(u)
remain -= u.unit_size
# return the list of units from an optimal solution
# note, in particular, that we have not actually loaded them here
return gathered
|
[
"brennen.mcphee@gmail.com"
] |
brennen.mcphee@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.