blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b2df3ae98d1a7f952e4f192ca17d0aa494cf4859 | 5c5fb808dd5f4a6eaa78f2d1bf7b0a6812e41b41 | /database/mymysql.py | 819fcf32a7ea589acae0514b79a183bff1102a45 | [] | no_license | bugdude01/databaseTester | aa4d204ac7cfbc7bbacdc517249c04aaea9c294b | 82d9817b1d0ebc52376a7a5e162f614b1b354f96 | refs/heads/master | 2020-12-02T22:36:33.306029 | 2018-01-10T15:40:12 | 2018-01-10T15:40:12 | 96,155,503 | 0 | 1 | null | 2017-07-04T07:57:55 | 2017-07-03T22:39:04 | Python | UTF-8 | Python | false | false | 2,078 | py | import mysql.connector as _mysql
class MySQLDatabase(object):
"""
This is the driver class that we will use
for connecting to our database. In here we'll
create a constructor (__init__) that will connect
to the database once the driver class is instantiated
and a destructor method that will close the database
connection once the driver object is destroyed.
"""
def __init__(self, database_name, username,
password, host='localhost'):
"""
Here we'll try to connect to the database
using the variables that we passed through
and if the connection fails we'll print out the error
"""
try:
self.db = _mysql.connect(db=database_name, host=host,
user=username, passwd=password)
self.database_name = database_name
print "Connected to MySQL!"
except _mysql.Error, e:
print e
def __del__(self):
"""
Here we'll do a check to see if `self.db` is present.
This will only be the case if the connection was
successfully made in the initialiser.
Inside that condition we'll close the connection
"""
if hasattr(self, 'db'):
self.db.close()
print "MySQL Connection Closed"
def get_available_tables(self):
"""
This method will allow us to what
tables are available to us when we're
running our queries
"""
cursor = self.db.cursor()
cursor.execute("SHOW TABLES;")
self.tables = cursor.fetchall()
cursor.close()
return self.tables
def get_columns_for_table(self, table_name):
"""
This method will enable to interact
with our database to find what columns
are currently in a specific table
"""
cursor = self.db.cursor()
cursor.execute("SHOW COLUMNS FROM `%s`" % table_name)
self.columns = cursor.fetchall()
cursor.close()
return self.columns | [
"rafrhodes@yahoo.co.uk"
] | rafrhodes@yahoo.co.uk |
511e48ff9cf4124d93300673c578b39bc6d7f334 | 0bd1a778c5cf9d908a2eee61945f86aa61b686ff | /Circular_Array/test_circular_array.py | 536932be7132b394a22f41d6b561d5f928fc8b29 | [] | no_license | baubrun/HackerRank | 81f652bde608d79ec9d678e21afa9d174fe850b1 | f9d5def5b5f9f4cf4d365ccb2e51053de72e482a | refs/heads/master | 2021-07-06T18:02:35.585945 | 2020-07-19T14:47:55 | 2020-07-19T14:47:55 | 133,244,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | import pytest
from .circular_array import circular_array_rotation as cr
@pytest.mark.parametrize("k, a, queries, expected", [
(2, [1, 2, 3], [0, 0, 0], [2, 2, 2]),
(3, [99, 10, 2, 3, 672], [0, 2, 3], [2, 672, 99]),
(10, [], [], []),
(1, [1, 2], [-1], [1]),
])
def test_circular_array_rotation(k, a, queries, expected):
assert cr(a, k, queries) == expected
| [
"noreply@github.com"
] | noreply@github.com |
e6599d6bb43f7f48a41f3ba72d4313610e4c2643 | 07c427042a83fb823c78646fcd50d5522afec797 | /url_manager.py | 5ff908eebb479be7a9bab451f1bab4f18f8c24b6 | [] | no_license | ZXShwan/Python_Web_Crawler_BaiduBaike | ebc7b2a6cc9cc7bd012bcaf2bf6ffbd75a509143 | c534f6284af2e041308dde3d4be8217bca4c2f69 | refs/heads/master | 2021-01-23T10:19:38.865100 | 2017-06-01T10:58:44 | 2017-06-01T10:58:44 | 93,044,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | # -*- coding: utf-8 -*-
__author__ = 'zx'
__date__ = '6/1/17 12:14'
class UrlManager(object):
def __init__(self):
self.new_urls = set()
self.old_urls = set()
def add_new_url(self, url):
if url is None:
return
if url not in self.new_urls and url not in self.old_urls:
self.new_urls.add(url)
def add_new_urls(self, urls):
if urls is None or len(urls) == 0:
return
for url in urls:
self.add_new_url(url)
def has_new_url(self):
return len(self.new_urls) != 0
def get_new_url(self):
new_url = self.new_urls.pop()
self.old_urls.add(new_url)
return new_url
| [
"xz1661@nyu.edu"
] | xz1661@nyu.edu |
d55793fb609d3a3e4998700541371e7831f39f4d | a978460896c2d81f009dd96f7601403b01e1cb50 | /Code-Fun-Do-18-tried-django/yt_comment.py | 222ff59587a55a30b6e14a427552b98196b68b23 | [] | no_license | ankusht/Django_project | 717b6483db6d212292a56079a8888a58dfdf3ff6 | 2153edc5753af2ff119c4db97aa54a542e73f575 | refs/heads/master | 2021-04-06T18:05:52.108748 | 2018-03-15T10:33:20 | 2018-03-15T10:33:20 | 125,348,538 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | import yt_comment_api as yt_api
# # # # #
ITERATION_CALL = 10
# # # # #
def get_videoId(url) :
tmp = url.split("?")[1]
video_id = tmp.split("=")[1]
return video_id
def process(video_id) :
response_raw = yt_api.init(video_id)
return response_raw
def init(url) :
video_id = get_videoId(url)
response_list = process(video_id)
return response_list | [
"noreply@github.com"
] | noreply@github.com |
d7d20ba482b4a4e2eb22989f1522607e4cf5c0db | 80cd47d959c2d17672cc4a79eed6a476c4dc5e6f | /binding.gyp | 505430e66ca3202fb6d2f3b69b2e2b57cff357a6 | [] | no_license | KTXSoftware/kit-git | 5ad520a94e681a18431de48a21f343903e3cb1c8 | 622266935528a0d1cd9bf517906481c6a5a346f5 | refs/heads/master | 2021-03-12T19:53:44.133880 | 2014-02-06T02:44:32 | 2014-02-06T02:44:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | gyp | {
"targets": [
{
"target_name": "git",
"include_dirs": ["libgit2/src", "libgit2/include", "libgit2/deps/http-parser", "libgit2/deps/regex", "libgit2/deps/zlib"],
"sources": [
'<!@(ls -1 libgit2/src/*.c)',
'libgit2/deps/http-parser/http_parser.c',
'libgit2/deps/regex/regex.c',
'<!@(ls -1 libgit2/deps/zlib/*.c)',
'<!@(ls -1 libgit2/src/transports/*.c)',
'<!@(ls -1 libgit2/src/xdiff/*.c)',
'node_git.cpp'
],
'conditions': [
['OS=="linux"', {
}],
['OS=="win"', {
"defines": ["WIN32_SHA1", "GIT_WINHTTP"],
"sources": ['libgit2/src/hash/hash_win32.c', '<!@(ls -1 libgit2/src/win32/*.c)']
}, { # OS != "win",
"defines": ["STDC"],
"sources": ['libgit2/src/hash/hash_generic.c', '<!@(ls -1 libgit2/src/unix/*.c)']
}]
]
}
]
}
| [
"robert@ktx-software.com"
] | robert@ktx-software.com |
a3e864dbe6345a2de83ebc2a959dd972fad8e4c0 | 7785cc3baee39fb4065538ea96513f181f5b2260 | /modules/BettingAnalyzer.py | b5ad5913c58b8b217338a8a9d8412a3f5c0988b9 | [] | no_license | AgborFrank/soccer-draws-bettor | 18d71fad5ef6e90e1f2a85d5a35c9faf8c552964 | 59c9c02b1fbfe18a57e3079966a1df6e3e524afb | refs/heads/master | 2022-02-20T09:52:26.752692 | 2018-12-26T16:53:09 | 2018-12-26T16:53:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,995 | py | """
BettingAnalyzer.py
"""
from Logger import Logger
from SystemParameters import *
import time
class BettingAnalyzer(object):
"""
Keeps bet history, provides related statistics
"""
def __init__(self):
self.current_balance = 0.0
self.current_betting_level = 1
self.start_time = int(time.time())
self.start_balance = None
self.end_time = None
self.end_balance = None
Logger.logn('Initialized BettingAnalyzer at ' + str(self.start_time))
def set_balance(self, new_balance):
"""
Set account balance
"""
self.current_balance = float(new_balance)
if self.start_balance is None:
self.start_balance = self.current_balance
Logger.logn('Starting balance recorded as ' + str(self.start_balance) + ' BTC')
def reset_betting_level(self):
"""
Reset current betting level to 1
"""
self.current_betting_level = 1
Logger.logn('Bet level reset to ' + str(self.current_betting_level))
def progress_betting_level(self):
"""
Progress current betting level by 1
"""
Logger.logn('Progressing bet level...')
self.current_betting_level += 1
if self.current_betting_level > MAX_BET_TIER:
Logger.logn('Max betting level exceeded')
raise RuntimeError('Max betting level exceeded')
Logger.logn('Bet level is at ' + str(self.current_betting_level))
def get_current_bet_amount(self):
"""
Get the appropriate amount in Bitcoin to bet at our current level
Returns:
(float)
"""
return BET_VALUE_GUIDE[self.current_betting_level]
def continue_betting(self):
"""
Evaluates whether betting can and should continue
Returns:
(bool)
"""
if BET_VALUE_GUIDE[self.current_betting_level] > self.current_balance:
return False
return True
def write_analysis_file(self):
"""
Writes an analytics file about this betting session
"""
out = 'Start time: ' + str(self.start_time)
out += '\n' + 'Start balance: ' + str(self.start_balance) + ' BTC'
out += '\n' + 'End time: ' + str(self.end_time)
out += '\n' + 'End balance: ' + str(self.end_balance) + ' BTC'
# TODO implement the following...
out += '\n' + 'Total profit: XX BTC'
out += '\n' + 'Mean profit per day: XX BTC'
out += '\n' + 'Mean profit per hour: XX BTC'
out += '\n' + 'Total risked: XX BTC'
out += '\n' + 'Average number of bets before a win: XX'
out += '\n' + 'Average profit per win: XX BTC'
with open(OUTPUT_PATH + ANALYTICS_FILE_NAME, 'w') as file:
file.write(out)
def __del__(self):
self.end_time = int(time.time())
self.end_balance = self.current_balance
self.write_analysis_file()
| [
"rjg26247@gmail.com"
] | rjg26247@gmail.com |
6c3fd9da7b87347b3492d331dcb2e31a479bf9b3 | f1c21afcd4816411568e6697ad3925126269d1fc | /tests.py | 0d2fc9556a688cb20025594bb7a22da4d4be10e8 | [
"Apache-2.0"
] | permissive | alphabet5/yamlarg | 22acbd34daf48a9585f27c15162bdb91aea0d821 | 22ae5886f7ef2171d777c17cb0c436ada1672904 | refs/heads/master | 2023-07-10T12:33:24.691796 | 2021-08-18T05:12:32 | 2021-08-18T05:12:32 | 285,971,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | import yamlarg
if __name__ == '__main__':
args = yamlarg.parse('arguments.yaml', description="This is an example description.\nPlace an overview here.")
print(args)
assert args == {'string': 'default string',
'bool_false': False,
'bool_true': True,
'list': ''}
| [
"alphabet5@users.noreply.github.com"
] | alphabet5@users.noreply.github.com |
ed36ee78b965fc7dec1022adbadd0b25044a33e8 | b2e57842d156ef97ce843a30bcd53d64cae1714f | /manage.py | 91ca64ac90352be6943720d97020ebd39c171397 | [] | no_license | jauharibill/datascience-pertanian-web-django | 2daeb85e3cd519af5fb5e77f6075b77ef6f1ee7c | be451c27340a40dffc2bf9fc01a2f3a5fd594c20 | refs/heads/master | 2020-04-05T21:36:20.620363 | 2018-11-24T23:25:34 | 2018-11-24T23:25:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SKRIPSI.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"xcodebill@gmail.com"
] | xcodebill@gmail.com |
7b3cf3ffe24d71c33b95a9fdea478716907e5d9a | 7e2978d164305adfafd1f856817750fc54e161a8 | /exp/tools/exp2-2.py | 07ce08a32f6b6a61cfd43c94f8367af88345199a | [] | no_license | ZoharAndroid/plot-script | 6d88a99bef776eff3f03300bcd9e6db83d70f66e | 064b853c321fe2d3648d32d5257ec7674ad11419 | refs/heads/master | 2021-01-13T23:26:32.079141 | 2020-02-23T14:27:07 | 2020-02-23T14:27:07 | 242,527,452 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,091 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author: zzh
# @Date: 2020/2/7 2:25
import numpy as np
from utils import Utils
from plot import *
from font import *
import sys
# 读取命令行参数
csv_filename = sys.argv[1] # csv文件名
platform = sys.argv[2] # 平台类型
# 读取csv文件中的内容
data = Utils.read_csv_row(csv_filename)
xtick_label = data.iloc[0: 10, 1]
# 从读取的csv文件中分别获取不同类别的数据
poptrie = []
y_max = 0
for i in np.arange(0, len(xtick_label), 1):
meam = np.mean(data.loc[i, 2:].tolist())
poptrie.append(meam)
sail = []
for i in np.arange(len(xtick_label), len(xtick_label) * 2, 1):
meam = np.mean(data.loc[i, 2:].tolist())
sail.append(meam)
art = []
for i in np.arange(len(xtick_label) * 2, len(xtick_label) * 3, 1):
meam = np.mean(data.loc[i, 2:].tolist())
art.append(meam)
dxr = []
for i in np.arange(len(xtick_label) * 3, len(xtick_label) * 4, 1):
meam = np.mean(data.loc[i, 2:].tolist())
dxr.append(meam)
art=art[1:]
sail=sail[1:]
poptrie=poptrie[1:]
dxr=dxr[1:]
y_max = max(art + sail + poptrie + dxr)
Plot.create_figure((2.5, 2.))
Plot.plot_setting(111)
line1, = Plot.plot(np.arange(0, len(art), 1), art, color=Color.red_color[0])
line2, = Plot.plot(np.arange(0, len(sail), 1), sail, linestyle='--', color=Color.dark_color[0])
line3, = Plot.plot(np.arange(0, len(poptrie), 1), poptrie, color=Color.green_color[0], marker='x', markersize=6)
line4, = Plot.plot(np.arange(0, len(dxr), 1), dxr, color=Color.color[0], marker='o', markersize=6)
Plot.plot_xlable('locality', font_size=13)
Plot.plot_ylabel('speed(MLPS)', font_size=13)
Plot.plot_ylim(0, y_max * 5 / 4)
Plot.plot_setYticksLabel(12)
ticks=['2','','8','','','64','','','512']
Plot.plot_xticks(np.arange(0, len(art), 1), ticks, font_size=12)
Plot.plot_grid()
Plot.plot_legend([line1, line2, line3, line4], ['Art', 'Sail', 'Poptrie', 'DxR'], ncol=2, bbox_to_anchor=(0.38, 1.08),
loc='upper center', columnspacing=0.5,
font_size=12, labelspacing=0.1)
Save.save_to_pdf('exp2-2' + '-' + platform)
| [
"1048132071@qq.com"
] | 1048132071@qq.com |
ec7acf98f9484508ac4aef0ff75457eae8bd99f0 | f05acf7451fe58b64ec11744e8afddf142014efa | /crowdsorter/views/_session.py | dd75bd5c8d2a06dddfad4e0d7a5a23f7570d19a0 | [
"MIT"
] | permissive | iCodeIN/crowdsorter | 899ac58b1df43ca134d3f966dcf2ec1c4a49e0df | 1c847f1f0284fc810ec1f2dd501acb4dbfa16bbb | refs/heads/master | 2023-03-26T16:09:04.914897 | 2020-11-11T18:25:55 | 2020-11-11T18:25:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | from flask import session
VERSION = 2 # increment when session logic changes to clear sessions
VOTED_NAMES = f"voted-names:{VERSION}:"
SKIPPED_NAMES = f"skipped-names:{VERSION}:"
VIEWED_PAIRS = f"viewed-pairs:{VERSION}:"
def get_voted_names(code):
return _get(VOTED_NAMES, code)
def set_voted_names(code, names):
_set(VOTED_NAMES, code, names)
def add_voted_name(code, name):
names = get_voted_names(code)
if name not in names:
names.append(name)
set_voted_names(code, names)
def get_skipped_names(code):
return _get(SKIPPED_NAMES, code)
def set_skipped_names(code, names):
_set(SKIPPED_NAMES, code, names)
def add_skipped_name(code, name):
names = get_skipped_names(code)
if name not in names:
names.append(name)
set_skipped_names(code, names)
def get_viewed_pairs(code):
return _get(VIEWED_PAIRS, code)
def set_viewed_pairs(code, pairs):
_set(VIEWED_PAIRS, code, pairs)
def add_viewed_pair(code, pair):
pairs = get_viewed_pairs(code)
if pair not in pairs:
pairs.append(pair)
set_viewed_pairs(code, pairs)
def _get(prefix, code):
key = prefix + code
value = session.get(key) or []
return value
def _set(prefix, code, value):
key = prefix + code
session[key] = value
session.permanent = True
| [
"jacebrowning@gmail.com"
] | jacebrowning@gmail.com |
c1d4e5f26be2344dfa2accb7a8e7e99f3bdd7265 | f659d3bf1a963705e1d1b14891e6e3162d0a704b | /homework_3/all_tasks.py | 051e8053c8dcef6997c8b29c9db3ee29009abdeb | [] | no_license | alexvinyar/python_homeworks_2017 | 178f84677d242d27c1fd890a6fd8cf8206b42c59 | adc87d7b54b37781dd974fb84ea534e97fe3e355 | refs/heads/master | 2021-09-09T16:18:30.862569 | 2018-03-17T21:11:06 | 2018-03-17T21:11:06 | 103,245,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,460 | py | from pattern.web import Wikipedia, plaintext
from nltk.util import ngrams
from collections import Counter
import string
import re
import numpy as np
class WikiParser:
def __init__(self):
pass
def get_articles(self, start):
start_article = Wikipedia().article(start)
links = start_article.links
list_of_strings = []
for l in links:
raw = Wikipedia().article(l)
text = plaintext(raw.source)
text = text.lower().split()
text = ' '.join([x.strip('"#$%&\'()*+,-/:;<=>@[\\]^_`{|}~') for x in text])
list_of_strings.append(text)
return list_of_strings
class TextStatistics:
def __init__(self, articles):
self.articles = articles
def get_top_3grams(self, n, use_idf):
in_sentences = {}
tg = []
super_article = '. '.join(self.articles)
super_article = super_article.lower().split()
super_article = ' '.join([x.strip('"#$%&\'()*+,-/:;<=>@[\\]^_`{|}~') for x in super_article])
sentences = filter(None,re.split('[.!?]',super_article))
for sentence in sentences:
stg = [''.join(x) for x in ngrams(sentence,3)]
tg += stg
for i in set(stg):
if i in in_sentences:
in_sentences[i] += 1
else:
in_sentences[i] = 1
count = Counter(tg)
if use_idf:
count = [(ngram, freq * np.log(float(len(sentences)) / in_sentences[ngram]))
for ngram,freq in count.items()]
else:
count = count.items()
top = sorted(count,key=lambda x: x[1],reverse=True)[:n]
list_of_3grams_in_descending_order_by_freq = []
list_of_their_corresponding_freq = []
for i in top:
list_of_3grams_in_descending_order_by_freq.append(i[0])
list_of_their_corresponding_freq.append(i[1])
return (list_of_3grams_in_descending_order_by_freq, list_of_their_corresponding_freq)
def get_top_words(self, n, use_idf):
stop = ['aboard','about','above','across','after','against','along','alongside','amid','among','amongst','around',
'as','aside','astride','at','atop','barring','before','behind','below','beneath','beside','besides','between',
'beyond','but','by','circa','concerning','considering','despite','down','during','except','excepting','excluding',
'failing','following','for','from','in','including','inside','into','like','minus','near','nearby','next',
'notwithstanding','of','off','on','onto','opposite','outside','over','past','per','plus','regarding','round',
'save','since','than','through','throughout','till','times','to','toward','towards','under','underneath','unlike',
'until','unto','up','upon','versus','via','with','within','without','worth','a','an','the']
in_articles = {}
all_words = []
for article in self.articles:
article = re.sub('[0-9]+ ','',article)
words = article.lower().split()
words = [x.strip(string.punctuation) for x in words if x not in stop]
all_words += words
for word in set(words):
if word in in_articles:
in_articles[word] += 1
else:
in_articles[word] = 1
count = Counter(all_words)
if use_idf:
count = [(word, freq * np.log(float(len(self.articles)) / in_articles[word]))
for word,freq in count.items()]
else:
count = count.items()
top = sorted(count,key=lambda x: x[1],reverse=True)[:n]
list_of_words_in_descending_order_by_freq = []
list_of_their_corresponding_freq = []
for i in top:
list_of_words_in_descending_order_by_freq.append(i[0])
list_of_their_corresponding_freq.append(i[1])
return (list_of_words_in_descending_order_by_freq, list_of_their_corresponding_freq)
class Experiment:
def show_results(self):
p = WikiParser()
articles = p.get_articles('Natural language processing')
ts = TextStatistics(articles)
ngr = ts.get_top_3grams(20,True)
nw = ts.get_top_words(20,True)
for g,freq in zip(ngr[0],ngr[1]):
print(''.join(g)+': '+str(freq))
print('\n')
for word,freq in zip(nw[0],nw[1]):
print(word+': '+str(freq))
## th: 49756.5389305
##the: 46323.7340107
##he : 40711.3938865
##ion: 37665.2329342
##tio: 35442.7981008
##ing: 34591.92297
## in: 34510.8592322
##on : 33593.4842788
##ati: 32348.2453289
## of: 32113.9048147
##ng : 32100.5164577
##of : 31294.0387485
## an: 31112.6643377
##ed : 31061.058146
##al : 30635.1173816
## co: 30489.4033129
##es : 29457.9769901
##and: 29107.4160881
##nd : 29004.3348625
##ent: 27980.6500252
##
##
##displaystyle: 2310.10666486
##turing: 1675.6819155
##arabic: 1088.58984025
##x: 849.586594457
##eu: 815.361355212
##tone: 811.861393169
##learning: 800.008421613
##chomsky: 799.302357237
##german: 797.592521573
##european: 778.368830252
##languages: 776.191835293
##english: 748.83102793
##turkish: 734.268297915
##retrieved: 720.344457971
##union: 708.95976536
##spanish: 682.142723497
##verbs: 673.23420318
##speech: 664.372230413
##dialects: 656.71754684
##i: 650.243980334
| [
"alexvinyar@yandex.ru"
] | alexvinyar@yandex.ru |
e04bf0edd831ae16bfc02fa43b3fe311a800cda5 | 90e88e14e77e5689bfaf3dfcbebf87da3e244e08 | /packages/urls.py | 1d4c96ecf518dc513750ba2062f4eb240e8dca4b | [] | no_license | Code-Institute-Submissions/Fullstack-milestone-project-2 | 5309978a080ae9dd834f52b6931636003c189e68 | cbf1b60b570a93950e04870900ec1424c4f37785 | refs/heads/master | 2022-12-11T08:56:20.897445 | 2020-08-28T14:52:32 | 2020-08-28T14:52:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.all_packages, name="packages")
]
| [
"02bburrell@live.co.uk"
] | 02bburrell@live.co.uk |
c1db4e92b7a964a33d0b63902ca1030ea5153541 | 44d0256cf2b74fb48f2787ce5b32b61103eb8e81 | /forest_type.py | 904456349336c9fdf6d0331331ee974f43a2b4bd | [] | no_license | KongHag/Danslesbois | c0fa3ac72e52d864e1776b5c903985adc37b0b02 | da7d98eb7138125831a9fdb080919fb80d342ace | refs/heads/master | 2020-03-28T09:44:49.188270 | 2018-09-16T03:30:47 | 2018-09-16T03:30:47 | 148,057,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 9 19:13:47 2018
@author: hugof
"""
import pandas as pd
import numpy as np
train = pd.read_csv(r'C:\Users\hugof\OneDrive\Documents\Kaggle\all\train.csv',sep=',')
test = pd.read_csv(r'C:\Users\hugof\OneDrive\Documents\Kaggle\all\test.csv',sep=',')
| [
"39108853+hugof38@users.noreply.github.com"
] | 39108853+hugof38@users.noreply.github.com |
20be70f63cae206591407f2d022ea4038bbcf037 | 17782c846f362582772c53e3a3a0e5ddf95d54b2 | /1course/P/warrior.py | 5d6eb3c24efe4bb24da9388d935e6a315748941c | [] | no_license | youngdanon/InfoLessons2021-2 | 07e3cf4d15c6c3a5630d264c5b5336a276ec9793 | f4e4c970e9c7eda161b21d20a642f517913e01e8 | refs/heads/main | 2023-07-23T22:52:39.714760 | 2021-09-03T07:31:26 | 2021-09-03T07:31:26 | 338,988,399 | 0 | 0 | null | 2021-02-21T20:40:04 | 2021-02-15T06:38:23 | null | UTF-8 | Python | false | false | 4,226 | py | from abc import ABCMeta, abstractmethod
from random import randint as ri
units_classes = {"MAGICIAN": "MAGICAL", "WARRIOR": "SIMPLE"}
class Unit:
max_health = 0
__metaclass__ = ABCMeta
_max_damage = 0
_block_chance = (0, 0, 0) # randint([0], [1])==[2]
@abstractmethod
def __init__(self, name, health=max_health):
self.name = name
self.health = health
@abstractmethod
def hit(self, other):
damage = ri(1, self._max_damage)
if ri(self._block_chance[0], self._block_chance[1]) == self._block_chance[2]:
other.health -= damage
print(f"Action:{other.name} took {damage} {units_classes[self.name.split()[1]]} damage from {self.name}")
else:
print(f"Action:{other.name} BLOCKED {damage} {units_classes[self.name.split()[1]]} damage from {self.name}")
return damage
class Warrior(Unit):
max_health = 100
_max_damage = 100
_block_chance = (0, 2, 0)
def __init__(self, name, health=max_health):
super().__init__(name, health)
def hit(self, other):
super().hit(other)
class Magician(Unit): # have a large damage
max_health = 90
_max_damage = 50
_block_chance = (0, 1, 0)
def __init__(self, name, health=max_health):
super().__init__(name, health)
def hit(self, other):
super().hit(other)
# print(f"Action:{other.name} took {damage} MAGICAL damage from {self.name}")
class KlirikBeginner(Unit): # can make a oneshot or heal opponent to full hp
max_health = 200
def __init__(self, name, health=max_health):
super().__init__(name, health)
def hit(self, other):
damage = other.health if ri(0, 50) == 1 else -other.max_health + other.health
other.health -= damage
if damage > 0:
print(f"Action: {other.name} took {damage} KLIRIKAL damage from {self.name}")
else:
print(f"Action:{other.name} HEALED to {-damage} by {self.name}")
def team_generator(quantity, team_name):
players = []
for i in range(quantity):
rand_cls_chooser = ri(1, 100)
if 1 < rand_cls_chooser < 20:
players.append(Magician(f"[{team_name}] MAGICIAN {i}"))
elif 20 < rand_cls_chooser < 30:
players.append(KlirikBeginner(f"[{team_name}] KLIRIK {i}"))
elif 30 < rand_cls_chooser < 100:
players.append(Warrior(f"[{team_name}] WARRIOR {i}"))
return players
def death_checker(inst):
return inst.health <= 0
class Game:
def __init__(self, teams_list):
self.teams_list = teams_list
def battle(self):
while len(self.teams_list) > 1:
t1_index, t2_index = ri(0, len(self.teams_list) - 1), ri(0, len(self.teams_list) - 1)
if t1_index != t2_index:
team1, team2 = self.teams_list[t1_index], self.teams_list[t2_index]
p1_t1_index, p2_t2_index = ri(0, len(team1) - 1), ri(0, len(team2) - 1)
player1, player2 = team1[p1_t1_index], team2[p2_t2_index]
player1.hit(player2)
if death_checker(player2):
print(f"######### Player {player2.name} DEAD #########")
self.teams_list[t2_index].pop(p2_t2_index)
else:
player2.hit(player1)
if death_checker(player1):
print(f"######### Player {player1.name} DEAD #########")
self.teams_list[t1_index].pop(p1_t1_index)
if not self.teams_list[t1_index]:
self.teams_list.pop(t1_index)
elif not self.teams_list[t2_index]:
self.teams_list.pop(t2_index)
return self.teams_list[0][0].name.split()[0]
if __name__ == "__main__":
team1 = team_generator(100, "GaYs")
team2 = team_generator(100, "SlAvEs")
team3 = team_generator(100, "MuRaT")
team4 = team_generator(100, "kFu")
teams = [team1, team2, team3, team4]
# fight = Game([team1, team2, team3, team4])
# teams = [team_generator(2, i) for i in range(100)] # not working with (1, i)
fight = Game(teams)
print(f"Winner: {fight.battle()}")
| [
"Danil4693"
] | Danil4693 |
9cc030bbca81a79c26e69685afe4616aebc404df | 0031bd210e25f9602a8ee3cf581c44e8e8f3a00f | /Junior/COSC0023-Py/Exercise/Queue at the School.py | 2a28b8f0c6fd269fc7ef8532c0aa0e06a1a0726b | [
"MIT"
] | permissive | TiffanyChou21/University | d991d30cad3b28bb5abc929faa6d530219a1d844 | 9584fa6b052a59ce01a256efc77add5bbec68d98 | refs/heads/master | 2020-09-29T10:54:00.297491 | 2020-08-16T03:47:57 | 2020-08-16T03:47:57 | 227,021,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | n,t=eval(input().replace(" ",",")) #老师讲的简便方法
s=input()
for i in range(t):
s=s.replace("BG","GB")
print(s)
| [
"TiffanyChou21@163.com"
] | TiffanyChou21@163.com |
66c5c64fb88018971d1bb341151a3e4ec51d0498 | 8f782f962ba2501a75d40fca88500d884e8229be | /Desafios/Desafio_079.py | b69a000240ee6d9aaa8fc8fff654d0c0a668dca3 | [] | no_license | HenriqueNO/Python-cursoemvideo | f6ec1284a9cbf7e86a12d6a541e45b4ad6290cf6 | 089f85fbfd88672cc1d9a89b075a40dd96354f40 | refs/heads/main | 2023-03-12T19:49:22.540090 | 2021-02-28T15:42:19 | 2021-02-28T15:42:19 | 343,133,567 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py |
lista = []
p = ''
while p != 'n':
n = int(input('Digite um número: '))
if n not in lista:
lista.append(n)
else:
print('Valor duplicado, número não adicionado')
p = str(input('Deseja continuar? [S/N] ')).lower().strip()[0]
while True:
if p != 's' and p != 'n':
p = input('Deseja continuar? [S/N] ').lower().strip()[0]
else:
break
lista.sort()
print(f'{"-=-"*13}')
print(f'Você digitou os números: {lista}')
| [
"henrique.nunes478@gmail.com"
] | henrique.nunes478@gmail.com |
d20e606c613d78c5971e9e9c8e93448c465bcbe1 | 68aa9bf99d62a5b991dc5aaa3d794f4bcd6e355a | /Programiranje/gui/Capital_Cities.py | 24cfdaf80e27e2162949498aef012db6a6261742 | [] | no_license | dujodujo/lemur | 82c9e695459597ab1b3430e566bc375af84d563c | 1e6350b33f86f89f89c5bddbd3924364f027160e | refs/heads/master | 2021-01-01T16:49:35.386172 | 2013-11-06T09:59:12 | 2013-11-06T09:59:12 | 14,150,163 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,022 | py | import sys, os, random
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class Form(QDialog):
def __init__(self,parent = None):
super(Form, self).__init__(parent)
self.get_data()
self.answers = 0
self.count = 0
self.countryLabel = QLabel("Country:")
self.fromCountryLabel = QLabel()
self.fromCountryLabel.setText("Slovenija")
self.capitalLabel = QLabel("Capital:")
self.fromLineEdit = QLineEdit()
self.countLabel = QLabel()
self.resultLabel = QLabel()
grid = QGridLayout()
grid.addWidget(self.countryLabel,0,0)
grid.addWidget(self.fromCountryLabel,0,1)
grid.addWidget(self.capitalLabel,1,0)
grid.addWidget(self.fromLineEdit,1,1)
grid.addWidget(self.countLabel,2,0)
grid.addWidget(self.resultLabel,2,1)
self.setLayout(grid)
self.connect(self.fromLineEdit, SIGNAL("returnPressed()"), self.update_ui)
def select(self):
self.fromCountryLabel.setText(random.choice([x for x in self.capitals.keys()]))
def update_ui(self):
capitals = self.capitals
country = self.fromCountryLabel.text()
name = self.fromLineEdit.text()
if name == capitals[country]:
self.resultLabel.setText("Pravilno")
self.count +=1
else:
self.resultLabel.setText("Nepravilno, pravilni odgovor je " + capitals[country] )
self.answers +=1
self.countLabel.setText("{}/{}".format(self.count,self.answers))
self.fromLineEdit.clear()
self.select()
def get_data(self):
self.capitals = {}
if os.path.exists(os.getcwd() + "\\imena.txt"):
for line in open("imena.txt", "rt"):
line = line.strip()
data = line.split(", ")
country = data[0]
capital = data[1]
self.capitals[country] = capital
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
| [
"avsic.ziga@gmail.com"
] | avsic.ziga@gmail.com |
12bb374cf8180e49e825035a2663e222b3cc0907 | adf092e31ce156b45a03f0411ab18d0a567e4a39 | /showdata/admin.py | acc54fa885a69d5669d9a05810f26ac170103ef7 | [] | no_license | jaggi-mohit/Q-Think- | b8280e8014872bcdfa6f90b21aca028d1bc28e5e | 01ed7ebac4a55c955b3eabdf3028f058203b8fc7 | refs/heads/main | 2023-07-31T18:59:41.565946 | 2021-09-07T13:17:06 | 2021-09-07T13:17:06 | 403,983,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | from django.contrib import admin
from .models import showdata,ans
admin.site.register(showdata)
admin.site.register(ans)
# Register your models here.
| [
"jaggimohit01@gmail.com"
] | jaggimohit01@gmail.com |
ccf1c9f6ef537d82ccc07b23265965dbb726f316 | 9aac8ad0ca3687536c4015b5f8e4035d3f600399 | /myenv/bin/pip | 015d5ca1221dc8b813fc27bf0d6620120a47d387 | [] | no_license | yespk/tweetme-2 | 9207f72c2099ba52e985a5f52a6c82d91944a18b | 5c2657483b9f3ac7b332127dd14e9f4becc346c5 | refs/heads/main | 2023-01-07T06:08:32.188335 | 2020-11-06T09:57:05 | 2020-11-06T09:57:05 | 310,548,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | #!/home/satya/clone/tweetme-2/myenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ksatyaprakash143@gmail.com"
] | ksatyaprakash143@gmail.com | |
14a22648976b34b515b577929448b3bc0e537ab8 | 7703e4b37056ddb8ab5154caa200747b6af398de | /shiv.py | 8e223b44d2d8c2e90960eca9c0e91d3cadf3ab30 | [] | no_license | svk1998/Learning_rep | 27ab54c3e70e5a465aa8840512b408ea32c6a3de | 127df1a1890dd61bb6bab2671d0d33bf82c04070 | refs/heads/master | 2022-12-01T08:19:09.922847 | 2020-08-17T15:29:46 | 2020-08-17T15:29:46 | 288,235,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26 | py | print("jkasdhfk")
ghfghfgh | [
"svk19998@gmail.com"
] | svk19998@gmail.com |
69aa022e185b5ec3bb7d2f6da610e01aedc92957 | fba1ae1672a770846eb219e4e092ba4c162aec40 | /2.py | 01fbf8c10fb3569d0961749560f345f35e124f7d | [] | no_license | peterbe/optimize-titles.json | 9272ad39d7565c448dce2b22a3d844ef0e7524d6 | ff7f8a01a5a742906ebb350c55cc963ca3b85e73 | refs/heads/master | 2022-11-23T05:33:05.004619 | 2020-07-15T18:18:48 | 2020-07-15T18:18:48 | 279,936,544 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | import json
import csv
with open('0.json') as f:
data = json.load(f)
import csv
with open('2.csv', 'w') as f:
writer = csv.writer(f, delimiter="|")
for each, value in data['titles'].items():
writer.writerow([each, value['title'], value['popularity']])
| [
"mail@peterbe.com"
] | mail@peterbe.com |
aa5b9abcf3f29620ae6f009d8186a34c7ef26686 | 3587930bb01a3935137a8ca5f83d93e88756f62c | /old/test/app.py | 18e5fd396e24b2989555dd8ce9c71843db9826bd | [] | no_license | gomi-kuzu/npb_bot | 2be518d8cbaa8be4833711fee43d278e074f1210 | dd1ae227650581231460a0822445f8fdeadf5584 | refs/heads/master | 2022-01-05T19:13:58.063004 | 2019-07-21T10:11:09 | 2019-07-21T10:11:09 | 198,041,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello():
name = "Hello World"
return name
@app.route('/good')
def good():
name = "Good"
return name
## おまじない
if __name__ == "__main__":
app.run(debug=False, host='0.0.0.0', port=5000) | [
"syookiti@gmail.com"
] | syookiti@gmail.com |
93739bdfaa56d59c46f716b0f96d1eb1e9bd5a03 | e8a59da64d0c7cb31b7b692a4b99664a24040b63 | /google.py | fcf3e0f3a26493095f8dc722e8a93e3e0c9be450 | [] | no_license | geroalonso/emailalgo | f3ab3de0e1093052fe100984a26b0698f08d7773 | c9a984954f2e47a28726026df16ba53daea7b290 | refs/heads/main | 2023-03-07T09:01:13.033947 | 2021-02-28T22:12:42 | 2021-02-28T22:12:42 | 321,402,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py | import requests
from bs4 import BeautifulSoup
import re
import urllib.parse
from urllib.parse import urlparse
#IMPORTANTE
#SOLO VA A LA PRIMER PAGINA
#TENGO QUE REACOMODARLO
def googleSearch(query):
g_clean = []
url = 'https://www.google.com/search?client=ubuntu&channel=fs&q={}&ie=utf-8&oe=utf-8'.format(query)
html = requests.get(url)
if html.status_code==200:
soup = BeautifulSoup(html.text, 'lxml')
a = soup.find_all('a')
for i in a:
k = i.get('href')
try:
m = re.search(r"(?P<url>https?://[^\s]+)", k)
n = m.group(0)
rul = n.split('&')[0]
domain = urlparse(rul)
if(re.search('google.com', domain.netloc)):
continue
else:
g_clean.append(rul)
except:
continue
#codeblock debajo elimina duplicados
n = 0
for link in g_clean:
cortado = urlparse(link.replace("www.","")).netloc
g_clean[n] = cortado
n += 1
g_clean = list(set(g_clean))
print(g_clean)
return g_clean
googleSearch('plantas miami')
| [
"geronimoalonso@icloud.com"
] | geronimoalonso@icloud.com |
ebff6e8a294d8b73e377247afd49c509be231ebf | a52e2dc412e81de22e1a56d22dbc2f4dce708ccc | /Systems/DT.py | 54926f136ac71af6859a77666110ba83150fb844 | [] | no_license | Team293/NoamsCode | 26f0858b32d7f7595db6d4dd803ef0e5ac4bc9d4 | 3052698e2373c13ebd86b8d6369c20467392e586 | refs/heads/master | 2021-01-22T11:55:12.126057 | 2015-10-19T19:30:35 | 2015-10-19T19:30:35 | 19,043,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | def main():
print("Oh yah driving \n")
| [
"noam999@veryspeedy.net"
] | noam999@veryspeedy.net |
3e7c227a882f2cd39cdaf02c0f17a021690effc5 | 40a04920dea94179878e25a0804ce4a6b459aca9 | /Python/Django/Portfolio/apps/first_app/urls.py | 8e117530ce911208aad1a83f1f376ca1c35e005b | [] | no_license | Kryptonian92/pythonAssignments | 5c7dd9140d07c94b19816ebbcaba579338479136 | 06355e0481307a77e5acd53b86b1fc144e98302a | refs/heads/master | 2021-01-20T15:50:46.355224 | 2017-10-28T19:02:52 | 2017-10-28T19:02:52 | 90,771,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | from django.conf.urls import url
from . import views # This line is new!
urlpatterns = [
url(r'^$', views.index), # This line has changed!
url(r'^testimonials$', views.show)
]
| [
"ausar_mcgruder@yahoo.com"
] | ausar_mcgruder@yahoo.com |
6105c024e51b9d2ded47f81f602e2f39c1080597 | aa9afcb895193a34e7622ae0b3e5efcd270f6f58 | /Pacico/0000/0000.py | 047a868714a24738ddda4c334f04fca2463b85a0 | [] | no_license | Pacico/python | f8542eba6e7cef91abeb0ca5e5adba9039095756 | d2df49c423cf7ec69c972a34db5f06a1dd0ec79d | refs/heads/master | 2020-05-29T11:46:32.416852 | 2015-12-02T12:35:39 | 2015-12-02T12:35:39 | 47,053,922 | 0 | 0 | null | 2015-11-29T07:17:21 | 2015-11-29T07:17:21 | null | UTF-8 | Python | false | false | 965 | py | # -*- coding:cp936 -*-
def imgaddingtext(filepath,text,fonttype='arial.ttf',color='black'):
from PIL import Image, ImageDraw, ImageFont
import os
img=Image.open(filepath)
x,y=img.size
draw=ImageDraw.Draw(img)
font=ImageFont.truetype(fonttype,(min(img.size))/5)
draw.text((x/2,y/2),text,fill=color,font=font)
img.save((os.path.dirname(filepath))+'\\''new.jpg')
#设置变量的默认随机值
import random
listfont=['arial.ttf']
listcolor=['red','black','brown','white','pink','yellow']
filepath=raw_input('enter exact path for you file(picture):')
text=raw_input('enter your text:')
fonttype=raw_input('enter the font:')
if fonttype not in listfont:
print 'The fonttype you choose does not exit'
fonttype=random.choice(listfont)
color=raw_input('enter the color:')
if color not in listcolor:
print 'The color you choose does not exit'
color=random.choice(listcolor)
imgaddingtext(filepath,text,fonttype,color)
| [
"trin.lai@qq.com"
] | trin.lai@qq.com |
6f1f44af6ed076ab8a8925a0c8cb7ecb6222c034 | bac3be1d6321b903157965f33a48d7d616176c14 | /laba_1-main/13.py | fb3e8f7ab345a7faa06577944e450ba138a946e3 | [] | no_license | IlyaZuna/Python_2_course | a7ceb622c17a49cfbd70837fafc8fa2e0bce86f8 | 83e98301df3e187d11af73478b598c881315d70b | refs/heads/main | 2023-05-04T09:58:49.729157 | 2021-05-28T18:25:49 | 2021-05-28T18:25:49 | 371,779,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | def extra_enumerate(someArray, start):
st = start
cum = 0
for elem in someArray:
yield st, elem #vozvrashaet generator
st += 1
cum = cum + elem
print('(',st,', ',elem,', ',cum,', ',cum*0.1,')')
x = [1,3,4,2]
print ('(id,num,sum,10%)')
for i in extra_enumerate(x,0):
print()
| [
"noreply@github.com"
] | noreply@github.com |
c2c72dd6a4037f0038e162dee8fcec72aba8097c | 68f6e727e8f95391d6381669c72857f8bc4ac142 | /source/demo_dqn_multi_new/dqfd_multi_largebasic_one_rocket_hate_exp036.py | ae1a5e8972266c05c9fe4b18f4b4c0a6df9c049f | [] | no_license | ushitora/vizdoom_experiments | db6b6279ce0b248c1e933340d518ce90189be37d | fcc6febcbd9039b0f0f0aea1d472b2226b5395e5 | refs/heads/master | 2020-04-28T09:40:56.209848 | 2019-03-12T10:00:28 | 2019-03-12T10:00:28 | 175,175,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71,570 | py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import multiprocessing
import skimage.color, skimage.transform
from vizdoom import *
import os, time, random, threading, h5py, math,pickle
import tensorflow as tf
import numpy as np
from game_instance_basic import GameInstanceBasic, GameInstanceSimpleDeathmatch
from global_constants import *
from datetime import datetime, timedelta, timezone
from PIL import Image
import matplotlib.pyplot as plt
from replay_memory import ReplayMemory
import pandas as pd
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from collections import defaultdict
# %matplotlib inline
# In[ ]:
JST = timezone(timedelta(hours=+9),'JST')
DATETIME = datetime.now(JST)
LOGDIR = "../data/demo_dqn/logs/log_"+DATETIME.strftime("%Y-%m-%d-%H-%M-%S")+"/"
CSV_DIR = "../data/demo_dqn/logs_csv/log_"+DATETIME.strftime("%Y-%m-%d-%H-%M-%S")+"/"
MODEL_PATH = "../data/demo_dqn/models/model_"+DATETIME.strftime("%Y-%m-%d-%H-%M-%S")+"/model.ckpt"
POSITION_DATA_PATH = "../data/demo_dqn/position_data/positiondata_"+DATETIME.strftime("%Y-%m-%d-%H-%M-%S")+".csv"
CONFIG_FILE_PATH = "./config/large_basic_rocket_hate.cfg"
PLAY_LOGDIR = "../data/demo_dqn/playlogs/playlog_"+DATETIME.strftime("%Y-%m-%d-%H-%M-%S")+"/"
DEMO_PATH = ["../demonstration/largebasic_rocket_hate/demo_largebasic_rocket_hate01.hdf5"]
run_mode = "learning_imitation"
# run_mode = "learning_async"
# run_mode = "test"
N_ACTION = 6
N_AGENT_ACTION = 2**6
BOTS_NUM = 1
N_WORKERS = 5
REWARDS = {'living':-1.0, 'healthloss':0.0, 'medkit':0.0, 'ammo':0.0, 'frag':0.0, 'dist':0.0, 'suicide':0.0, 'kill':100.0,'death':-100.0,'enemysight':0.0, 'ammoloss':0.0}
LSTM_SIZE = 1024
N_ADV = 5
N_SEQ = 5
LAMBDA_ONE = 1.0
LAMBDA1 = 1.0
LAMBDA2 = 1.0
# LAMBDA3 = 0.00001
LAMBDA3 = 0.0001
RESOLUTION = (120,120,3)
MERGIN_VALUE = 0.02
INTERVAL_BATCH_LEARNING = 10
INTERVAL_UPDATE_NETWORK = 10
INTERVAL_PULL_PARAMS = 1
N_BATCH = 64
INTERVAL_UPDATE_ORIGIN = 10
USED_GPU = "0"
BETA_MIN = 0.0
BETA_MAX = 0.4
EPS_MAX = 0.9
EPS_MIN = 0.85
N_STEPS = int(200000/N_WORKERS)
IMIT_MODEL_PATH = "../data/demo_dqn/models/model_2019-02-01-03-28-37/model.ckpt"
SAVE_DATA = True
# In[ ]:
from exp_condition.condition_exp036 import *
__name__ = run_mode
print(EXPERIMENT_NAME)
# In[ ]:
if SAVE_DATA == True:
if not os.path.exists(LOGDIR):
os.mkdir(LOGDIR)
if not os.path.exists(os.path.dirname(MODEL_PATH)):
os.mkdir(os.path.dirname(MODEL_PATH))
if not os.path.exists(PLAY_LOGDIR):
os.mkdir(PLAY_LOGDIR)
if not os.path.exists(CSV_DIR):
os.mkdir(CSV_DIR)
# In[ ]:
class Environment(object):
def __init__(self,sess, name, game_instance, network, agent, start_time=None, end_time=None, n_step=None, random_seed=0, position_data=None):
# def __init__(self,sess, name, start_time, end_time, parameter_server):
self.name = name
self.sess = sess
self.game = game_instance
self.game.game.set_seed(random_seed)
self.game.game.set_render_weapon(True)
self.game.game.set_render_crosshair(True)
self.game.game.set_episode_timeout(500)
self.game.game.init()
self.network = network
self.agent = agent
self.clear_obs()
self.clear_batch()
self.start_time = start_time
self.end_time = end_time
self.n_step = n_step
self.progress = 0.0
self.log_server = None
self.replay_memory = None
self.step = 0
self.model_gen_count = 0
self.times_act = None
self.times_update = None
self.count_update = 0
self.rewards_detail = None
self.position_data_buff = position_data
self.record_action = []
self.record_treeidx = []
self.count_idx = np.zeros_like(replaymemory.tree.tree, dtype=np.int32)
print(self.name," initialized...")
def run_learning(self, coordinator):
print(self.name + " start learning")
self.network.pull_parameter_server(self.sess)
self.network.copy_network_learning2target(self.sess)
self.game.new_episode()
try:
while not coordinator.should_stop():
self.learning_step()
if self.n_step is not None:
self.progress = self.step/self.n_step
else:
self.progress = (datetime.now().timestamp() - self.start_time)/(self.end_time - self.start_time)
# if self.progress >= 1.0:
# break
except Exception as e:
print(e)
print(self.name," ended")
# if self.log_server is not None:
# coordinator.request_stop()
return 0
def run_prelearning(self, coordinator):
assert self.replay_memory is not None
self.network.pull_parameter_server(self.sess)
self.network.copy_network_learning2target(self.sess)
try:
while not coordinator.should_stop():
loss_values = self.prelearning_step()
if self.n_step is not None:
self.progress = self.step/self.n_step
else:
self.progress = (datetime.now().timestamp() - self.start_time)/(self.end_time - self.start_time)
except Exception as e:
coordinator.request_stop(e)
coordinator.request_stop()
return 0
def run_exploring(self, coordinator):
print(self.name + " start exploring")
self.network.pull_parameter_server(self.sess)
self.network.copy_network_learning2target(self.sess)
self.game.new_episode()
try:
while not coordinator.should_stop():
self.exploring_step()
if self.n_step is not None:
if self.step % 1000 == 0:
print(self.name,":", self.step)
self.progress = self.step/self.n_step
else:
self.progress = (datetime.now().timestamp() - self.start_time)/(self.end_time - self.start_time)
if self.progress >= 1.0:
break
except Exception as e:
coordinator.request_stop(e)
if self.log_server is not None:
coordinator.request_stop()
return 0
def run_test(self, coordinator):
self.network.pull_parameter_server(self.sess)
self.network.copy_network_learning2target(self.sess)
try:
while not coordinator.should_stop():
play_log = []
reward,frag, death,kill,total_detail,steps = self.test_agent(reward_buff =play_log)
if SAVE_DATA == True:
with open(os.path.join(PLAY_LOGDIR, "playlog_step%02d.txt"%int(self.progress*100)), 'wb') as f:
pickle.dump(play_log, f)
if self.rewards_detail is not None:
self.rewards_detail.append(total_detail)
# print("----------TEST at %.1f ---------"%(self.progress*100))
# print("FRAG:",frag,"KILL:",kill, "DEATH:",death,"STEP:",steps)
# print("REWARD:",reward)
# print("REWARD_DETAIL", total_detail)
if self.log_server is not None:
if kill <= 0:
steps = 100
self.log_server.write_score(self.sess,self.step, reward, frag, death ,kill, steps)
if self.progress >= self.model_gen_count/12:
self.model_gen_count += 1
if SAVE_DATA == True:
self.log_server.save_model(sess=self.sess, model_path=MODEL_PATH, step=self.model_gen_count+1)
self.step += 1
if self.n_step is not None:
self.progress = self.step/self.n_step
else:
self.progress = (datetime.now().timestamp() - self.start_time)/(self.end_time - self.start_time)
if self.progress >= 1.0:
break
except Exception as e:
print(self.name, "killed ")
# coordinator.request_stop(e)
def learning_step(self):
if self.step % INTERVAL_PULL_PARAMS == 0:
self.network.pull_parameter_server(self.sess)
# self.network.push_parameter_server(self.sess)
loss_values = []
if not self.game.is_episode_finished() and self.game.get_screen_buff() is not None:
if self.times_act is not None:
start_time = datetime.now().timestamp()
s1_ = self.preprocess(self.game.get_screen_buff())
self.push_obs(s1_)
agent_action_idx = self.agent.act_eps_greedy(self.sess, self.obs['s1'], self.progress)
self.record_action.append(agent_action_idx)
if self.position_data_buff is not None:
enemy_label = self.game.get_label("Cacodemon")
if enemy_label is not None:
center_x = enemy_label.x + enemy_label.width/2
center_y = enemy_label.y + enemy_label.height/2
# enemy_position_class = 2
else:
center_x = 0
center_y = 0
# enemy_position_class = 0
player_position_x = self.game.get_pos_x()
player_position_y = self.game.get_pos_y()
player_angle = self.game.get_angle()
self.position_data_buff.append([center_x, center_y, player_position_x, player_position_y, player_angle])
# engin_action = self.convert_action_agent2engine_simple(agent_action_idx)
engin_action = self.convert_action_agent2engine(agent_action_idx)
r,r_detail = self.game.make_action(self.step,engin_action , FRAME_REPEAT)
isterminal = self.game.is_episode_finished()
if isterminal:
s2_ = np.zeros(RESOLUTION)
else:
s2_ = self.preprocess(self.game.get_screen_buff())
self.push_batch( self.obs['s1'], agent_action_idx, s2_, r , isterminal, False)
if self.times_act is not None:
self.times_act.append(datetime.now().timestamp() - start_time)
if len(self.memory) >= N_ADV or isterminal:
batch = self.make_advantage_data()
self.clear_batch()
for i,b in enumerate(batch):
if len(b) == 8:
self.replay_memory.store(b)
self.step += 1
if self.step % INTERVAL_UPDATE_NETWORK == 0:
self.network.copy_network_learning2target(self.sess)
if self.times_update is not None:
start_time = datetime.now().timestamp()
if self.step % INTERVAL_BATCH_LEARNING == 0 and len(self.replay_memory) >= N_BATCH:
s1, actions, r_one, r_adv, isdemo, is_weight, tree_idx = self.make_batch()
self.record_treeidx.append(tree_idx)
if self.log_server is not None:
self.count_idx[tree_idx] += 1
loss_values = self.network.update_parameter_server(self.sess, s1, actions, r_one, r_adv, isdemo, is_weight)
self.count_update += 1
tderror = loss_values[4]
l_one, l_n, l_m, l_l = loss_values[:-1]
# self.replay_memory.batch_update(tree_idx, tderror)
# self.replay_memory.batch_update_new(tree_idx, tderror,np.array(r_adv)>0)
if self.log_server is not None:
self.log_server.write_loss(self.sess,self.step ,np.mean(l_one), np.mean(l_n), np.mean(l_m), l_l)
# self.log_server.write_img(self.sess, self.step, s1[0:1])
self.log_server.write_weights(self.sess, self.step)
self.replay_memory.batch_update_new(tree_idx, np.copy(l_one),np.array(r_adv)>0)
if self.times_update is not None:
self.times_update.append(datetime.now().timestamp() - start_time)
else:
self.game.new_episode()
self.clear_batch()
self.clear_obs()
return loss_values
def prelearning_step(self):
self.network.pull_parameter_server(self.sess)
# self.network.push_parameter_server(self.sess)
s1, actions, r_one, r_adv, isdemo, is_weight, tree_idx = self.make_batch()
loss_values = self.network.update_parameter_server(self.sess, s1, actions, r_one, r_adv, isdemo, is_weight)
tderror = loss_values[4]
l_one, l_n, l_m, l_l = loss_values[:-1]
self.replay_memory.batch_update(tree_idx, tderror)
if self.step % INTERVAL_UPDATE_NETWORK == 0:
self.network.copy_network_learning2target(self.sess)
if self.log_server is not None:
if self.step % 10 == 0:
self.log_server.write_loss(self.sess, self.step, np.mean(l_one), np.mean(l_n), np.mean(l_m), l_l)
# self.log_server.write_img(self.sess, self.step, s1[0:1])
self.log_server.write_weights(self.sess, self.step)
self.step += 1
return loss_values
def test_agent(self, gif_buff=None, reward_buff=None, sample_imgs=None):
self.game.new_episode()
self.network.pull_parameter_server(self.sess)
step = 0
gif_img = []
total_reward = 0
total_detail = {}
self.clear_obs()
while not self.game.is_episode_finished():
s1_row = self.game.get_screen_buff()
s1 = self.preprocess(s1_row)
if sample_imgs is not None:
sample_imgs.append(s1)
if gif_buff is not None:
gif_img.append(s1_row.transpose(1,2,0))
self.push_obs(s1)
action = self.agent.act_greedy(self.sess,self.obs['s1'])
# engine_action = self.convert_action_agent2engine_simple(action)
engine_action = self.convert_action_agent2engine(action)
reward,reward_detail = self.game.make_action(step,engine_action,FRAME_REPEAT)
isterminal = self.game.is_episode_finished()
total_reward += reward
for k in reward_detail.keys():
if not k in total_detail.keys():
total_detail[k] = reward_detail[k]
else:
total_detail[k] += reward_detail[k]
step += 1
if reward_buff is not None:
reward_buff.append((engine_action, reward_detail))
save_img = []
if gif_buff is not None:
for i in range(len(gif_img)):
save_img.append(Image.fromarray(np.uint8(gif_img[i])))
gif_buff += save_img
return total_reward, self.game.get_frag_count(), self.game.get_death_count(), self.game.get_kill_count(), total_detail, step
def convert_action_engine2agent(self,engine_action):
# return engine_action.index(1)
assert type(engine_action) == type(list()), print("type: ", type(engine_action))
ans = 0
for i, e_a in enumerate(engine_action):
ans += e_a * 2**i
return ans
def convert_action_agent2engine(self,agent_action):
assert type(agent_action) == type(int()) or type(agent_action) == type(np.int64()), print("type(agent_action)=",type(agent_action))
ans = []
for i in range(N_ACTION):
ans.append(agent_action%2)
agent_action = int(agent_action / 2)
return ans
def convert_action_agent2engine_simple(self, agent_action):
assert type(agent_action) == type(int()) or type(agent_action) == type(np.int64()), print("type(agent_action)=",type(agent_action))
ans = np.zeros((N_AGENT_ACTION,))
ans[agent_action] = 1
return ans.tolist()
def preprocess(self,img):
if len(img.shape) == 3 and img.shape[0]==3:
img = img.transpose(1,2,0)
img = skimage.transform.resize(img, RESOLUTION, mode="constant")
img = img.astype(np.float32)
# img = (img)/255.0
return img
def push_obs(self, s1):
self.obs['s1'] = s1
def clear_obs(self):
self.obs = {}
self.obs['s1'] = np.zeros(RESOLUTION, dtype=np.float32)
def push_batch(self, s1, action,s2, reward, isterminal, isdemo):
self.memory.append([np.copy(s1), action, np.copy(s2) , reward, isterminal, isdemo])
def clear_batch(self):
self.memory = []
def make_advantage_data(self):
len_memory = len(self.memory)
ret_batch = []
R_adv = 0
_,_,s2_adv,_,_,_ = self.memory[-1]
for i in range(len_memory-1, -1, -1):
s1,a,s2,r,isterminal,isdemo = self.memory[i]
R_adv = r + GAMMA*R_adv
ret_batch.append(np.array([s1, a,s2,s2_adv,r ,R_adv ,isterminal, isdemo]))
self.memory = []
return ret_batch
def make_batch(self):
while True:
tree_idx, batch_row, is_weight = self.replay_memory.sample(N_BATCH, self.calc_beta(self.progress))
# tree_idx, batch_row, is_weight = self.replay_memory.sample(N_BATCH, 0.1)
s2_input = [ batch_row[i,2] for i in range(N_BATCH)]
s2_adv = [ batch_row[i,3] for i in range(N_BATCH)]
if (np.shape(s2_input) == ((N_BATCH,)+RESOLUTION) and np.shape(s2_adv) == ((N_BATCH,)+RESOLUTION)):
break
s1, actions, s2, r_one, r_adv, isdemo = [],[],[],[],[],[]
predicted_q_adv = self.network.get_qvalue_max_learningaction(self.sess,s2_adv)
predicted_q = self.network.get_qvalue_max_learningaction(self.sess,s2_input)
for i in range(N_BATCH):
s1.append(batch_row[i][0])
actions.append(batch_row[i][1])
R_one = batch_row[i][4] + GAMMA * predicted_q[i] if batch_row[i][6] == False else batch_row[i][4]
R_adv = batch_row[i][5] + GAMMA**N_ADV * predicted_q_adv[i] if batch_row[i][6] == False else batch_row[i][5]
r_one.append(R_one)
r_adv.append(R_adv)
isdemo.append(batch_row[i][7])
actions = np.array(actions)
return s1, actions.astype(np.int32), r_one, r_adv, isdemo, is_weight, tree_idx
def make_batch_uniform(self):
while True:
tree_idx, batch_row, is_weight = self.replay_memory.sample_uniform(N_BATCH)
s2_input = [ batch_row[i,2] for i in range(N_BATCH)]
s2_adv = [ batch_row[i,3] for i in range(N_BATCH)]
if (np.shape(s2_input) == (N_BATCH,5, 120,120,3) and np.shape(s2_adv) == (N_BATCH,5, 120,120,3)):
break
s1, actions, s2, r_one, r_adv, isdemo = [],[],[],[],[],[]
predicted_q_adv = self.network.get_qvalue_max_learningaction(self.sess,s2_adv)
predicted_q = self.network.get_qvalue_max_learningaction(self.sess,s2_input)
for i in range(N_BATCH):
s1.append(batch_row[i][0])
actions.append(batch_row[i][1])
R_one = batch_row[i][4] + GAMMA * predicted_q[i] if batch_row[i][6] == False else batch_row[i][4]
R_adv = batch_row[i][5] + GAMMA**N_ADV * predicted_q_adv[i] if batch_row[i][6] == False else batch_row[i][5]
r_one.append(R_one)
r_adv.append(R_adv)
isdemo.append(batch_row[i][7])
actions = np.array(actions)
return s1, actions.astype(np.int32), r_one, r_adv, isdemo, is_weight, tree_idx
def calc_beta(self, progress):
# return BETA_MIN
return (BETA_MAX - BETA_MIN) * progress + BETA_MIN
def exploring_step(self):
if self.step % INTERVAL_PULL_PARAMS == 0:
self.network.pull_parameter_server(self.sess)
loss_values = []
if not self.game.is_episode_finished() and self.game.get_screen_buff() is not None:
s1_ = self.preprocess(self.game.get_screen_buff())
self.push_obs(s1_)
agent_action_idx = self.agent.act_eps_greedy(self.sess, self.obs['s1'], self.progress)
# engin_action = self.convert_action_agent2engine_simple(agent_action_idx)
engin_action = self.convert_action_agent2engine(agent_action_idx)
r,r_detail = self.game.make_action(self.step,engin_action , FRAME_REPEAT)
isterminal = self.game.is_episode_finished()
if isterminal:
s2_ = np.zeros(RESOLUTION)
else:
s2_ = self.preprocess(self.game.get_screen_buff())
self.push_batch( self.obs['s1'], agent_action_idx, s2_, r , isterminal, False)
if len(self.memory) >= N_ADV or isterminal:
batch = self.make_advantage_data()
self.clear_batch()
for i,b in enumerate(batch):
if len(b) == 8:
self.replay_memory.store(b)
self.step += 1
else:
self.game.new_episode()
self.clear_batch()
self.clear_obs()
return loss_values
# In[ ]:
class ParameterServer:
def __init__(self, sess, log_dir):
self.sess = sess
with tf.variable_scope("parameter_server", reuse=tf.AUTO_REUSE):
self.state1_ = tf.placeholder(tf.float32, shape=(None,) + RESOLUTION)
self.q_value, self.conv1, self.conv2, self.q_prob = self._build_model(self.state1_)
self.weights_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="parameter_server")
# self.optimizer = tf.train.RMSPropOptimizer(LEARNING_RATE, RMSProbDecaly)
self.optimizer = tf.train.AdamOptimizer()
with tf.variable_scope("summary", reuse=tf.AUTO_REUSE):
self._build_summary(sess,log_dir)
self.saver = tf.train.Saver(max_to_keep = 20)
# print("-------GLOBAL-------")
# for w in self.weights_params:
# print(w)
def _build_model(self,state):
conv1 = NetworkSetting.conv1(state)
# maxpool1 = NetworkSetting.maxpool1(conv1)
conv2 = NetworkSetting.conv2(conv1)
# maxpool2 = NetworkSetting.maxpool2(conv2)
reshape = NetworkSetting.reshape(conv2)
fc1 = NetworkSetting.fc1(reshape)
q = NetworkSetting.q_value(fc1)
q_prob = tf.nn.softmax(q)
print("---------MODEL SHAPE-------------")
print(state.get_shape())
print(conv1.get_shape())
# print(maxpool1.get_shape())
print(conv2.get_shape())
# print(maxpool2.get_shape())
print(reshape.get_shape())
print(fc1.get_shape())
print(q.get_shape())
return q, conv1, conv2, q_prob
def _build_summary(self,sess, log_dir):
self.reward_ = tf.placeholder(tf.float32,shape=(), name="reward")
self.frag_ = tf.placeholder(tf.float32, shape=(), name="frag")
self.death_ = tf.placeholder(tf.float32, shape=(), name="death")
self.kill_ = tf.placeholder(tf.float32, shape=(), name="kill")
self.score_step_ = tf.placeholder(tf.float32, shape=(), name="step")
self.loss_one_ = tf.placeholder(tf.float32, shape=(), name="loss_one")
self.loss_adv_ = tf.placeholder(tf.float32, shape=(), name="loss_adv")
self.loss_cls_ = tf.placeholder(tf.float32, shape=(), name="loss_class")
self.loss_l2_ = tf.placeholder(tf.float32, shape=(), name="loss_l2")
with tf.variable_scope("Summary_Score"):
s = [tf.summary.scalar('reward', self.reward_, family="score"), tf.summary.scalar('frag', self.frag_, family="score"), tf.summary.scalar("death", self.death_, family="score"), tf.summary.scalar("kill", self.kill_, family="score"), tf.summary.scalar("step",self.score_step_, family="score")]
self.summary_reward = tf.summary.merge(s)
with tf.variable_scope("Summary_Loss"):
list_summary = [tf.summary.scalar('loss_onestep', self.loss_one_, family="loss"), tf.summary.scalar('loss_advantage', self.loss_adv_, family="loss"), tf.summary.scalar('loss_class', self.loss_cls_, family="loss"), tf.summary.scalar('loss_l2', self.loss_l2_, family='loss')]
self.summary_loss = tf.summary.merge(list_summary)
# with tf.variable_scope("Summary_Images"):
# conv1_display = tf.reshape(tf.transpose(self.conv1, [0,1,4,2,3]), (-1, self.conv1.get_shape()[1],self.conv1.get_shape()[2]))
# conv2_display = tf.reshape(tf.transpose(self.conv2, [0,1,4,2,3]), (-1, self.conv2.get_shape()[1],self.conv2.get_shape()[2]))
# conv1_display = tf.expand_dims(conv1_display, -1)
# conv2_display = tf.expand_dims(conv2_display, -1)
# state_shape = self.state1_.get_shape()
# conv1_shape = conv1_display.get_shape()
# conv2_shape = conv2_display.get_shape()
# s_img = []
# s_img.append(tf.summary.image('state',tf.reshape(self.state1_,[-1, state_shape[2], state_shape[3], state_shape[4]]), 1, family="state1"))
# s_img.append(tf.summary.image('conv1',tf.reshape(self.conv1,[-1, conv1_shape[1], conv1_shape[2], 1]), family="conv1"))
# s_img.append(tf.summary.image('conv2',tf.reshape(self.conv2,[-1, conv2_shape[1], conv2_shape[2], 1]), family="conv2"))
# self.summary_image = tf.summary.merge(s_img)
with tf.variable_scope("Summary_Weights"):
s = [tf.summary.histogram(values=w, name=w.name, family="weights") for w in self.weights_params]
self.summary_weights = tf.summary.merge(s)
self.writer = tf.summary.FileWriter(log_dir)
def write_graph(self, sess):
self.writer.add_graph(sess.graph)
def write_score(self,sess, step ,reward, frag, death, kill, score_step):
m = sess.run(self.summary_reward, feed_dict={self.reward_:reward, self.frag_:frag, self.death_:death, self.kill_:kill, self.score_step_:score_step})
return self.writer.add_summary(m, step)
def write_loss(self,sess, step, l_o, l_n,l_c, l_l):
m = sess.run(self.summary_loss, feed_dict={self.loss_one_: l_o, self.loss_adv_:l_n, self.loss_cls_:l_c, self.loss_l2_:l_l})
return self.writer.add_summary(m, step)
# def write_img(self,sess, step, state):
# m = sess.run(self.summary_image, feed_dict={self.state1_: state})
# return self.writer.add_summary(m, step)
def write_weights(self, sess, step):
m = sess.run(self.summary_weights)
return self.writer.add_summary(m, step)
def load_model(self, sess, model_path, step):
self.saver.restore(sess, model_path+'-'+str(step))
def save_model(self, sess, model_path, step):
self.saver.save(sess, model_path, global_step = step)
def load_cnnweights(self, sess, weights_path):
assert len(weights_path) == 4
cnn_weights = self.weights_params[:4]
w_demo = [np.load(w_p) for w_p in weights_path]
plh = [tf.placeholder(tf.float32, shape=w.shape) for w in w_demo]
assign_op = [w.assign(p) for w, p in zip(cnn_weights, plh)]
feed_dict = {p:w for w,p in zip(w_demo, plh)}
sess.run(assign_op, feed_dict)
# In[ ]:
class Agent(object):
def __init__(self, network,random_seed):
self.network = network
self.randomstate = np.random.RandomState(random_seed)
def calc_eps(self, progress):
if progress < 0.2:
return EPS_MIN
elif progress >= 0.2 and progress < 0.8:
return ((EPS_MAX - EPS_MIN)/ 0.6) * progress + ( EPS_MIN - (EPS_MAX - EPS_MIN)/ 0.6 * 0.2)
else :
return EPS_MAX
def act_eps_greedy(self, sess, s1, progress):
assert progress >= 0.0 and progress <=1.0
eps = self.calc_eps(progress)
if self.randomstate.rand() <= eps:
a_idx = self.randomstate.choice(range(N_AGENT_ACTION), p=self.network.get_policy(sess,[s1])[0])
# a_idx = self.network.get_best_action(sess, [s1])[0]
else:
a_idx = self.randomstate.randint(N_AGENT_ACTION)
return a_idx
def act_greedy(self, sess, s1):
a_idx = self.randomstate.choice(range(N_AGENT_ACTION), p=self.network.get_policy(sess,[s1])[0])
# a_idx = self.network.get_best_action(sess, [s1])[0]
return a_idx
def get_sum_prob(self,sess, s1):
q_value = self.network.get_qvalue_learning(sess, [s1])[0]
q_value = np.maximum(q_value,0) + 0.01
q_prob = (q_value)/sum(q_value)
a_idx = np.random.choice(N_AGENT_ACTION, p=q_prob)
return a_idx
# In[ ]:
class NetworkLocal(object):
def __init__(self,name, parameter_server):
self.name = name
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
with tf.variable_scope("learning_network"):
self.state1_ = tf.placeholder(tf.float32,shape=(None,)+RESOLUTION, name="state_1")
self.q_value, self.conv1, self.conv2,self.reshape,self.fc1 = self._build_model(self.state1_)
with tf.variable_scope("target_network"):
self.state1_target_ = tf.placeholder(tf.float32,shape=(None,)+RESOLUTION, name="state_1")
self.q_value_target,_,_,_,_ = self._build_model(self.state1_target_)
self.a_ = tf.placeholder(tf.int32, shape=(None,), name="action")
self.target_one_ = tf.placeholder(tf.float32, shape=(None,), name="target_one_")
self.target_n_ = tf.placeholder(tf.float32, shape=(None,), name="target_n_")
self.isdemo_ = tf.placeholder(tf.float32,shape=(None,), name="isdemo_")
self.mergin_ = tf.placeholder(tf.float32,shape=(None,N_AGENT_ACTION), name="mergin_")
self.is_weight_ = tf.placeholder(tf.float32, shape=(None,), name="is_weight")
self._build_graph()
# self.optimizer = tf.train.RMSPropOptimizer(LEARNING_RATE, RMSProbDecaly)
# self.optimizer = tf.train.AdamOptimizer()
# self.update = self.optimizer.minimize(self.loss_total, var_list = self.weights_params_learning)
# self.grads = parameter_server.optimizer.compute_gradients(self.loss_total, var_list=self.weights_params_learning)
self.update_global_weight_params = parameter_server.optimizer.apply_gradients([(g,w) for g, w in zip(self.grads, parameter_server.weights_params)])
self.pull_global_weight_params = [l_p.assign(g_p) for l_p,g_p in zip(self.weights_params_learning,parameter_server.weights_params)]
self.push_local_weight_params = [g_p.assign(l_p) for g_p,l_p in zip(parameter_server.weights_params,self.weights_params_learning)]
def _build_model(self,state):
conv1 = NetworkSetting.conv1(state)
# maxpool1 = NetworkSetting.maxpool1(conv1)
conv2 = NetworkSetting.conv2(conv1)
# maxpool2 = NetworkSetting.maxpool2(conv2)
reshape = NetworkSetting.reshape(conv2)
fc1 = NetworkSetting.fc1(reshape)
q_value = NetworkSetting.q_value(fc1)
return q_value, conv1, conv2,reshape,fc1
def _build_graph(self):
self.q_prob = tf.nn.softmax(self.q_value)
self.q_argmax = tf.argmax(self.q_value, axis=1)
self.q_learning_max = tf.reduce_max(self.q_value, axis=1)
self.q_target_max = tf.reduce_max(self.q_value_target, axis=1)
action_idxlist = tf.transpose([tf.range(tf.shape(self.q_value)[0]), self.a_])
self.weights_params_learning = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name+"/learning_network")
self.weights_params_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name+"/target_network")
# self.tderror_one = LAMBDA_ONE * tf.abs(self.target_one_ - tf.reduce_max(self.q_value, axis=1))
# self.loss_one = (LAMBDA_ONE * tf.square(self.target_one_ - tf.reduce_max(self.q_value, axis=1))) * self.is_weight_
# self.tderror_n = LAMBDA1 * tf.abs(self.target_n_ - tf.reduce_max(self.q_value, axis=1))
# self.loss_n = (LAMBDA1 * tf.square(self.target_n_ - tf.reduce_max(self.q_value, axis=1)))*self.is_weight_
# self.loss_l2 = LAMBDA3 * tf.reduce_sum([tf.nn.l2_loss(w) for w in self.weights_params_learning])
self.tderror_one = LAMBDA_ONE * tf.abs(self.target_one_ - tf.gather_nd(self.q_value, indices=action_idxlist))
self.loss_one = (LAMBDA_ONE * tf.square(self.target_one_ - tf.gather_nd(self.q_value, indices=action_idxlist))) * self.is_weight_
self.tderror_n = LAMBDA1 * tf.abs(self.target_n_ - tf.gather_nd(self.q_value, indices=action_idxlist))
self.loss_n = (LAMBDA1 * tf.square(self.target_n_ - tf.gather_nd(self.q_value, indices=action_idxlist)))*self.is_weight_
self.loss_l2 = LAMBDA3 * tf.reduce_sum([tf.nn.l2_loss(w) for w in self.weights_params_learning])
self.loss_mergin = LAMBDA2 * ((tf.stop_gradient(tf.reduce_max(self.q_value + self.mergin_, axis=1)) - tf.gather_nd(self.q_value,indices=action_idxlist))*self.isdemo_)
self.tderror_total = self.tderror_one + self.tderror_n + self.loss_mergin
self.loss_total = tf.reduce_mean(self.loss_one + self.loss_n + self.loss_mergin + self.loss_l2)
# self.tderror_total = self.tderror_n
# self.loss_total = tf.reduce_mean(self.loss_n+ self.loss_mergin + self.loss_l2)
self.grads = tf.gradients(self.loss_total ,self.weights_params_learning)
self.copy_params = [t.assign(l) for l,t in zip(self.weights_params_learning, self.weights_params_target)]
def copy_network_learning2target(self, sess):
return sess.run(self.copy_params)
def pull_parameter_server(self, sess):
return sess.run(self.pull_global_weight_params)
def push_parameter_server(self,sess):
return sess.run(self.push_local_weight_params)
def get_weights_learngin(self, sess):
return sess.run(self.weights_params_learning)
def get_weights_target(self, sess):
return sess.run(self.weights_params_target)
def get_loss(self, sess,s1, a, target_one,target_n, isdemo, is_weight):
mergin_value = np.ones((len(s1), N_AGENT_ACTION)) * MERGIN_VALUE
mergin_value[range(len(s1)), a] = 0.0
feed_dict = {self.state1_: s1,self.a_:a, self.target_one_:target_one, self.target_n_:target_n, self.isdemo_:isdemo, self.is_weight_:is_weight, self.mergin_:mergin_value}
# l_one, l_n, l_mergin, l_l2, tderror_total = sess.run([self.loss_one, self.loss_n, self.loss_mergin, self.loss_l2, self.tderror_total], feed_dict)
l_one, tderror_total = sess.run([self.loss_n, self.tderror_n], feed_dict)
return l_one, 0,0,0, tderror_total
def get_losstotal(self, sess,s1, a, target_one,target_n, isdemo, is_weight):
mergin_value = np.ones((len(s1), N_AGENT_ACTION)) * MERGIN_VALUE
mergin_value[range(len(s1)), a] = 0.0
feed_dict = {self.state1_: s1,self.a_:a, self.target_one_:target_one, self.target_n_:target_n, self.isdemo_:isdemo, self.is_weight_:is_weight, self.mergin_:mergin_value}
loss_total = sess.run([self.loss_total], feed_dict)
return loss_total[0]
def get_grads(self, sess,s1, a, target_one,target_n, isdemo, is_weight):
mergin_value = np.ones((len(s1), N_AGENT_ACTION)) * MERGIN_VALUE
mergin_value[range(len(s1)), a] = 0.0
feed_dict = {self.state1_: s1,self.a_:a, self.target_one_:target_one, self.target_n_:target_n, self.isdemo_:isdemo, self.is_weight_:is_weight, self.mergin_:mergin_value}
grads = sess.run(self.grads, feed_dict)
return grads
def update_parameter_server(self, sess, s1, a, target_one,target_n, isdemo, is_weight):
assert np.ndim(s1) == 4
mergin_value = np.ones((len(s1), N_AGENT_ACTION)) * MERGIN_VALUE
mergin_value[range(len(s1)), a] = 0.0
feed_dict = {self.state1_: s1,self.a_:a, self.target_one_:target_one, self.target_n_:target_n, self.isdemo_:isdemo, self.is_weight_:is_weight, self.mergin_:mergin_value}
# _,l_one, l_n, l_mergin, l_l2, tderror_total = sess.run([self.update, self.loss_one, self.loss_n, self.loss_mergin, self.loss_l2, self.tderror_total], feed_dict)
# _,l_one,l_mergin,l_l2 ,tderror_total = sess.run([self.update_global_weight_params,self.loss_n,self.loss_mergin,self.loss_l2, self.tderror_total], feed_dict)
# return l_one, 0,l_mergin,l_l2, tderror_total
_,l_one, l_n, l_mergin, l_l2, tderror_total = sess.run([self.update_global_weight_params, self.loss_one, self.loss_n, self.loss_mergin, self.loss_l2, self.tderror_total], feed_dict)
return l_one, l_n, l_mergin, l_l2, tderror_total
def check_weights(self, sess):
weights = SESS.run(self.weights_params_learning)
assert np.isnan([np.mean(w) for w in weights]).any()==False , print(weights)
def get_qvalue_learning(self, sess, s1):
assert np.ndim(s1) == 4
return sess.run(self.q_value, {self.state1_: s1})
def get_qvalue_lerning_max(self, sess, s1):
return sess.run(self.q_learing_max, {self.state1_:s1})
def get_qvalue_target(self, sess ,s1):
assert np.ndim(s1) == 4
return sess.run(self.q_value_target, {self.state1_target_:s1})
def get_qvalue_target_max(self, sess, s1):
assert np.ndim(s1) == 4
return sess.run(self.q_target_max, {self.state1_target_:s1})
def get_qvalue_max_learningaction(self, sess, s1):
assert np.ndim(s1) == 4
action_idx, q_value = sess.run([self.q_argmax, self.q_value_target], {self.state1_:s1, self.state1_target_:s1})
return q_value[range(np.shape(s1)[0]), action_idx]
def get_policy(self, sess, s1):
return sess.run(self.q_prob, {self.state1_: s1})
def get_best_action(self,sess, s1):
return sess.run(self.q_argmax, {self.state1_:s1})
# In[ ]:
class NetworkSetting:
def conv1(pre_layer):
num_outputs = 16
kernel_size = [6,6]
stride = [3,3]
padding = 'SAME'
activation = tf.nn.relu
weights_init = tf.contrib.layers.xavier_initializer_conv2d()
bias_init = tf.constant_initializer(0.1)
return tf.layers.conv2d(pre_layer,kernel_size=kernel_size, filters=num_outputs, strides=stride,padding=padding,activation=activation, kernel_initializer=weights_init, bias_initializer=bias_init)
def maxpool1(pre_layer):
return tf.nn.max_pool2d(pre_layer,[1,3,3,1],[1,2,2,1],'SAME')
def conv2(pre_layer):
num_outputs = 16
kernel_size = [3,3]
stride = [2,2]
padding = 'SAME'
activation = tf.nn.relu
weights_init = tf.contrib.layers.xavier_initializer_conv2d()
bias_init = tf.constant_initializer(0.1)
return tf.layers.conv2d(pre_layer,kernel_size=kernel_size,filters=num_outputs, strides=stride,padding=padding,activation=activation, kernel_initializer=weights_init,bias_initializer=bias_init)
def maxpool2(pre_layer):
return tf.nn.max_pool2d(pre_layer,[1,1,3,3,1],[1,1,2,2,1],'SAME')
def reshape(pre_layer):
shape = pre_layer.get_shape()
# return tf.reshape(pre_layer, shape=(-1,shape[1], shape[2]*shape[3]*shape[4]))
return tf.reshape(pre_layer, shape=(-1,shape[1]*shape[2]*shape[3]))
def fc1(pre_layer):
num_outputs =1024
activation_fn = tf.nn.relu
weights_init = tf.contrib.layers.xavier_initializer()
bias_init = tf.constant_initializer(0.1)
return tf.contrib.layers.fully_connected(pre_layer,num_outputs=num_outputs,activation_fn=activation_fn,weights_initializer=weights_init, biases_initializer=bias_init)
def q_value(pre_layer):
num_outputs=N_AGENT_ACTION
activation_fn = None
weights_init = tf.contrib.layers.xavier_initializer()
bias_init = tf.constant_initializer(0.1)
return tf.contrib.layers.fully_connected(pre_layer,num_outputs=num_outputs,activation_fn=activation_fn,weights_initializer=weights_init, biases_initializer=bias_init)
# In[ ]:
def load_demo_one(replay_memory, demo_path):
for demo in demo_path[:]:
print(demo)
file = h5py.File(demo, 'r')
episodes = list(file.keys())[1:]
game = GameInstanceBasic(DoomGame(),name="noname",n_bots=1,config_path=CONFIG_FILE_PATH, reward_param=REWARDS, steps_update_origin=10,timelimit=2)
total_n_transit = 0
for e in episodes:
states_row = file[e+"/states"][:]
action_row = file[e+"/action"][:]
# action_row = action_row[:,[2,4,6]]
action_row = action_row[:,[0,1,2,3,4,6]]
n_transit = len(states_row)
total_n_transit += n_transit
memory = []
for i in range(0, n_transit):
s1_ = states_row[i]
if i == n_transit -1 :
isterminal = True
s2_ = np.zeros(RESOLUTION)
r = REWARDS['kill'] + REWARDS['enemysight']
else:
isterminal = False
s2_ = states_row[i+1]
r = REWARDS['living'] + REWARDS['enemysight']
# action = np.where(action_row[i]==1)[0][0]
action = 0
for i, e_a in enumerate(action_row[i]):
action += e_a * 2**i
memory.append([s1_,action, s2_, r, isterminal, True])
if len(memory) == N_ADV or isterminal==True:
R_adv = 0
len_memory = len(memory)
_, _, s2_adv, _, _, _ = memory[-1]
for i in range(len_memory - 1, -1, -1):
s1,a, s2 ,r,isterminal,isdemo = memory[i]
R_adv = r + GAMMA*R_adv
replaymemory.store(np.array([s1, a,s2 ,s2_adv,r ,R_adv ,isterminal, isdemo]))
memory = []
file.close()
replay_memory.set_permanent_data(total_n_transit)
print(len(replay_memory), "data are stored")
file.close()
return 0
# In[ ]:
def load_positivedata(replay_memory, data_path_list):
for data_path in data_path_list:
print(data_path)
p_data = np.load(data_path)
for d in p_data:
replay_memory.store(d)
n_data = len(replay_memory)
replay_memory.set_permanent_data(n_data)
print(n_data, "data are stored")
return 0
# In[ ]:
def write_experiment_info(wall_time, exp_dir, n_agent,log_dir ,record_file):
with open(record_file, mode='a') as f:
output_str = str(wall_time) + "," + exp_dir + "," + str(n_agent)+ ","+log_dir+"\n"
f.write(output_str)
# In[ ]:
def set_random_seed(seed):
np.random.seed(seed)
random.seed(seed)
tf.set_random_seed(seed)
# In[ ]:
def convert_tensorboardlog_to_csv(dpath, output_dir):
summary_iterators = [EventAccumulator(os.path.join(dpath, dname)).Reload() for dname in os.listdir(dpath)]
tags = summary_iterators[0].Tags()['scalars']
for it in summary_iterators:
assert it.Tags()['scalars'] == tags
out = defaultdict(list)
steps = defaultdict(list)
walltimes = defaultdict(list)
for tag in tags:
for events in zip(*[acc.Scalars(tag) for acc in summary_iterators]):
assert len(set(e.step for e in events)) == 1
out[tag.split("/")[-1]].append([e.value for e in events][0])
steps[tag.split("/")[-1]].append([e.step for e in events][0])
walltimes[tag.split("/")[-1]].append([e.wall_time for e in events][0])
for key in out.keys():
pd.DataFrame({'Value':out[key], 'Step':steps[key], 'Wall time':walltimes[key]}).to_csv(os.path.join(output_dir, key+".csv"))
return out, steps, walltimes
# In[ ]:
if __name__=="learning_imitation":
print(LOGDIR)
replaymemory = ReplayMemory(10000, random_seed=random.randint(0,1000))
load_demo_one(replaymemory, DEMO_PATH)
config = tf.ConfigProto(gpu_options = tf.GPUOptions(visible_device_list=USED_GPU))
config.log_device_placement = False
config.allow_soft_placement = True
sess = tf.Session(config=config)
with tf.device('/gpu:0'):
parameter_server = ParameterServer(sess,LOGDIR)
starttime = datetime.now().timestamp()
end_time = (datetime.now() + timedelta(minutes=20)).timestamp()
coordinator = tf.train.Coordinator()
name = "worker_imitation"
game_instance = GameInstanceBasic(DoomGame(),name=name,n_bots=1,config_path=CONFIG_FILE_PATH, reward_param=REWARDS, steps_update_origin=10,timelimit=2)
network = NetworkLocal(name, parameter_server)
agent = Agent(network,random_seed=0)
# imitation_env = Environment(sess = sess ,name=name, agent=agent, game_instance=game_instance, network=network, start_time=starttime, end_time=end_time, random_seed=0)
imitation_env = Environment(sess = sess ,name=name, agent=agent, game_instance=game_instance, network=network, n_step=N_STEPS, random_seed=0)
if SAVE_DATA == True:
imitation_env.log_server = parameter_server
imitation_env.replay_memory = replaymemory
thread_imitation = threading.Thread(target=imitation_env.run_prelearning, args=(coordinator,))
name = "test"
game_instance = GameInstanceBasic(DoomGame(),name=name,n_bots=1,config_path=CONFIG_FILE_PATH, reward_param=REWARDS, steps_update_origin=10,timelimit=2)
network = NetworkLocal(name, parameter_server)
agent = Agent(network,random_seed=100)
test_env = Environment(sess = sess ,name=name, agent=agent, game_instance=game_instance, network=network, start_time=starttime, end_time=end_time, random_seed=100)
# test_env = Environment(sess = sess ,name=name, agent=agent, game_instance=game_instance, network=network, n_step=10000, random_seed=0)
if SAVE_DATA == True:
test_env.log_server = parameter_server
thread_test = threading.Thread(target=test_env.run_test, args=(coordinator,))
parameter_server.write_graph(sess)
sess.run(tf.global_variables_initializer())
print("-----Start IMITATION LEARNING----")
threads = [thread_imitation,thread_test]
for t in threads:
t.start()
# coordinator.join(threads)
while True:
time.sleep(10)
if imitation_env.progress >= 1.0:
coordinator.request_stop()
break
if SAVE_DATA == True:
parameter_server.save_model(sess=sess, step=15, model_path=MODEL_PATH)
print(LOGDIR)
# In[ ]:
if __name__=="learning_async":
print(LOGDIR)
set_random_seed(0)
replaymemory = ReplayMemory(50000,random_seed=random.randint(0,1000))
load_demo_one(replaymemory, DEMO_PATH)
# load_positivedata(replaymemory, POSITIVEDATA_PATH)
config = tf.ConfigProto(gpu_options = tf.GPUOptions(visible_device_list=USED_GPU))
config.gpu_options.allow_growth = True
config.log_device_placement = False
config.allow_soft_placement = True
sess = tf.Session(config=config)
with tf.device('/gpu:0'):
position_data_buff = []
parameter_server = ParameterServer(sess,LOGDIR)
starttime = datetime.now().timestamp()
end_time = (datetime.now() + timedelta(minutes=10)).timestamp()
coordinator = tf.train.Coordinator()
environments, threads = [], []
for i in range(N_WORKERS):
name = "worker_%d"%(i+1)
game_instance=GameInstanceSimpleDeathmatch(DoomGame(),name=name,n_bots=1,config_path=CONFIG_FILE_PATH, reward_param=REWARDS, steps_update_origin=10,timelimit=2)
# game_instance=GameInstanceSimpleBasic(DoomGame(),name=name,config_path=CONFIG_FILE_PATH)
network = NetworkLocal(name, parameter_server)
agent = Agent(network, random_seed=random.randint(0,1000))
# e = Environment(sess = sess ,name=name, agent=agent, game_instance=game_instance, network=network, start_time=starttime, end_time=end_time, random_seed=i)
e = Environment(sess = sess ,name=name, agent=agent, game_instance=game_instance, network=network, n_step=N_STEPS, random_seed=random.randint(0,1000), position_data=position_data_buff)
e.replay_memory = replaymemory
environments.append(e)
if SAVE_DATA == True:
environments[0].log_server = parameter_server
environments[0].times_act = []
environments[0].times_update = []
# name = "updating"
# game_instance_update=GameInstanceBasic(DoomGame(),name=name,n_bots=1,config_path=CONFIG_FILE_PATH, reward_param=REWARDS, steps_update_origin=10,timelimit=2)
# # game_instance=GameInstanceSimpleBasic(DoomGame(),name=name,config_path=CONFIG_FILE_PATH)
# network_update = NetworkLocal(name, parameter_server)
# agent_update = Agent(network, random_seed=99)
# update_env = Environment(sess = sess ,name=name, agent=agent, game_instance=game_instance, network=network, start_time=starttime, end_time=end_time, random_seed=99)
# update_env.replay_memory = replaymemory
# thread_update = threading.Thread(target=update_env.run_prelearning, args=(coordinator,))
# update_env.log_server = parameter_server
# threads.append(thread_update)
name = "test"
test_seed = random.randint(0,1000)
game_instance=GameInstanceSimpleDeathmatch(DoomGame(),name=name,n_bots=1,config_path=CONFIG_FILE_PATH, reward_param=REWARDS, steps_update_origin=10,timelimit=2)
# game_instance=GameInstanceSimpleBasic(DoomGame(),name=name,config_path=CONFIG_FILE_PATH)
network = NetworkLocal(name, parameter_server)
agent = Agent(network, random_seed=test_seed)
# test_env = Environment(sess = sess ,name=name, agent=agent, game_instance=game_instance, network=network, start_time=starttime, end_time=end_time, random_seed=test_seed)
test_env = Environment(sess = sess ,name=name, agent=agent, game_instance=game_instance, network=network, n_step=N_STEPS, random_seed=test_seed)
if SAVE_DATA == True:
test_env.log_server = parameter_server
test_env.rewards_detail = []
thread_test = threading.Thread(target=test_env.run_test, args=(coordinator,))
for e in environments:
# threads.append(threading.Thread(target=e.run_exploring, args=(coordinator,)))
threads.append(threading.Thread(target=e.run_learning, args=(coordinator,)))
threads.append(thread_test)
parameter_server.write_graph(sess)
sess.run(tf.global_variables_initializer())
# parameter_server.load_model(sess=sess, step=15, model_path="./models/model_imitation181221/model.ckpt")
# parameter_server.load_model(sess=sess, step=15, model_path="./models/largebasic_random/model_largebasicrandom_imitation190109/model.ckpt")
# parameter_server.load_model(sess=sess, step=15, model_path="models/model_temp/model_2019-01-16-15-32-54/model.ckpt")
parameter_server.load_model(sess=sess, step=15, model_path=IMIT_MODEL_PATH)
# parameter_server.load_model(sess=sess, step=15, model_path="./models/model_temp/model_2019-01-27-15-58-51/model.ckpt")
print("-----Start ASYNC LEARNING----")
for t in threads:
t.start()
# coordinator.join(threads)
while True:
time.sleep(10)
if np.array([e.progress >= 1.0 for e in environments]).all():
coordinator.request_stop()
break
if SAVE_DATA == True:
parameter_server.save_model(sess=sess, step=15, model_path=MODEL_PATH)
# GIF_BUFF = []
# REWARD_BUFF = []
# r,f,d,imgs,_,step = test_env.test_agent(gif_buff=GIF_BUFF,reward_buff=REWARD_BUFF)
# GIF_BUFF[0].save('gifs/test.gif',save_all=True, append_images=GIF_BUFF[1:], optimize=False, duration=40*4, loop=0)
position_data_buff = np.array(position_data_buff)
pd.DataFrame(data=position_data_buff, columns=['enemy_center_x', 'enemy_center_y', 'player_position_x', 'player_position_y', 'angle']).to_csv(POSITION_DATA_PATH)
convert_tensorboardlog_to_csv(LOGDIR, output_dir=CSV_DIR)
write_experiment_info(DATETIME.timestamp(), EXPERIMENT_NAME, N_WORKERS, LOGDIR,"record_dir.csv")
print(LOGDIR)
print(sum([e.step for e in environments]))
# In[ ]:
if __name__=="test":
print(LOGDIR)
replaymemory = ReplayMemory(50000)
# load_demo_one(replaymemory, DEMO_PATH)
# load_positivedata(replaymemory, POSITIVEDATA_PATH)
tf.set_random_seed(0)
# N_STEPS = 25000
config = tf.ConfigProto(gpu_options = tf.GPUOptions(visible_device_list=USED_GPU))
config.gpu_options.allow_growth = True
config.log_device_placement = False
config.allow_soft_placement = True
sess = tf.Session(config=config)
with tf.device('/gpu:0'):
parameter_server = ParameterServer(sess,LOGDIR)
starttime = datetime.now().timestamp()
end_time = (datetime.now() + timedelta(minutes=60)).timestamp()
coordinator = tf.train.Coordinator()
name = "test"
# test_seed = np.random.randint(1000)
test_seed = 100
game_instance=GameInstanceSimpleDeathmatch(DoomGame(),name=name,n_bots=1,config_path=CONFIG_FILE_PATH, reward_param=REWARDS, steps_update_origin=10,timelimit=2)
# game_instance=GameInstanceSimpleBasic(DoomGame(),name=name,config_path=CONFIG_FILE_PATH)
network = NetworkLocal(name, parameter_server)
agent = Agent(network, random_seed=test_seed)
test_env = Environment(sess = sess ,name=name, agent=agent, game_instance=game_instance, network=network, start_time=starttime, end_time=end_time, random_seed=test_seed)
# test_env = Environment(sess = sess ,name=name, agent=agent, game_instance=game_instance, network=network, n_step=N_STEPS, random_seed=test_seed)
test_env.log_server = parameter_server
# parameter_server.load_model(sess=sess, step=15, model_path="./models/model_imitation181221/model.ckpt")
# parameter_server.load_model(sess=sess, step=15, model_path="./models/largebasic_random/model_largebasicrandom_imitation190109/model.ckpt")
# parameter_server.load_model(sess=sess, step=15, model_path="models/model_temp/model_2019-01-16-15-32-54/model.ckpt")
# parameter_server.load_model(sess=sess, step=15, model_path="../data/demo_dqn/models/model_2019-02-04-00-35-30/model.ckpt")
sess.run(tf.global_variables_initializer())
# In[ ]:
def plot_priority(replaymemory):
size = len(replaymemory)
lengh = replaymemory.tree.capacity
start_idx = lengh - 1
end_idx = start_idx + size
priority = replaymemory.tree.tree[start_idx:end_idx]
plt.plot(priority)
def save_gif10(env):
GIF_BUFF_TOTAL = []
for i in range(10):
buff = []
val = test_env.test_agent(gif_buff=buff)
print("REWARD:",val[0])
GIF_BUFF_TOTAL = GIF_BUFF_TOTAL + buff
GIF_BUFF_TOTAL[0].save('gifs/test.gif',save_all=True, append_images=GIF_BUFF_TOTAL[1:], optimize=False, duration=40*4, loop=0)
def plot_conv(env,s1):
conv = sess.run(env.network.conv1,{env.network.state1_:[s1]})[0]
display_img = conv[-1]
print(display_img.shape)
fig,axes = plt.subplots(4,8,figsize=(20,15))
display_img = display_img.transpose((2,0,1))
for ax,img in zip(axes.ravel(), display_img):
ax.imshow(img)
def plot_conv_onw(env,s1):
conv = sess.run(env.network.conv1,{env.network.state1_:[s1]})[0]
display_img = conv
print(display_img.shape)
fig,axes = plt.subplots(4,8,figsize=(20,15))
display_img = display_img.transpose((2,0,1))
for ax,img in zip(axes.ravel(), display_img):
ax.imshow(img)
def plot_q_learning(env, s1):
q_value = env.network.get_qvalue_learning(sess,s1)
# q_value = env.network.get_qvalue_target(sess,s1)
fig,axes = plt.subplots(10,figsize=(20,20))
for ax,q in zip(axes.ravel(), q_value):
ax.bar(range(len(q)), q)
return q_value
def plot_q_target(env, s1):
q_value = env.network.get_qvalue_target(sess,s1)
fig,axes = plt.subplots(10,figsize=(20,20))
for ax,q in zip(axes.ravel(), q_value):
ax.bar(range(len(q)), q)
return q_value
def plot_q_softmax(env, s1):
q_value = env.network.get_policy(sess,s1)
fig,axes = plt.subplots(10,figsize=(20,20))
for ax,q in zip(axes.ravel(), q_value):
ax.bar(range(len(q)), q)
return q_value
def plot_diff_qvalue(env, s1):
q_l = env.network.get_qvalue_learning(sess,s1)
q_t = env.network.get_qvalue_target(sess,s1)
fig,axes = plt.subplots(10,figsize=(20,20))
for ax,q in zip(axes.ravel(),q_t-q_l):
ax.bar(range(len(q)), q)
def plot_s1(s1):
fig,axes = plt.subplots(10,5,figsize=(20,20))
for ax,s in zip(axes,s1):
for a,img in zip(ax,s):
a.imshow(img)
def plot_s1_one(s1):
fig,axes = plt.subplots(len(s1),figsize=(20,20))
for ax,s in zip(axes,s1):
ax.imshow(s)
def plot_tderror(env, s1,action, s2_one,s2_adv,r_one, r_adv,isdemo, isterminal):
predicted_q_adv = env.network.get_qvalue_max_learningaction(sess,s2_adv)
predicted_q_one = env.network.get_qvalue_max_learningaction(sess,s2_one)
isnotterminal = np.ones((len(isterminal),)) - isterminal
target_one = r_one + GAMMA*predicted_q_one * isnotterminal
target_adv = r_adv + GAMMA**N_ADV * predicted_q_adv * isnotterminal
action = [int(a) for a in action]
isweight = np.ones((len(action),))
loss_values = env.network.get_loss(sess, s1,action,target_one, target_adv,isdemo,isweight)
plt.bar(range(len(isweight)),loss_values[-1])
return loss_values[-1]
def plot_loss_one(env, s1,action, s2_one,s2_adv,r_one, r_adv,isdemo, isterminal):
predicted_q_adv = env.network.get_qvalue_max_learningaction(sess,s2_adv)
predicted_q_one = env.network.get_qvalue_max_learningaction(sess,s2_one)
isnotterminal = np.ones((len(isterminal),)) - isterminal
target_one = r_one + GAMMA*predicted_q_one * isnotterminal
target_adv = r_adv + GAMMA**N_ADV * predicted_q_adv * isnotterminal
action = [int(a) for a in action]
isweight = np.ones((len(action),))
loss_values = env.network.get_loss(sess, s1,action,target_one, target_adv,isdemo,isweight)
print(loss_values[0])
print(np.mean(loss_values[0]))
plt.bar(range(len(action)), loss_values[0])
def plot_loss_adv(env, s1,action, s2_one,s2_adv,r_one, r_adv,isdemo, isterminal):
predicted_q_adv = env.network.get_qvalue_max_learningaction(sess,s2_adv)
predicted_q_one = env.network.get_qvalue_max_learningaction(sess,s2_one)
isnotterminal = np.ones((len(isterminal),)) - isterminal
target_one = r_one + GAMMA*predicted_q_one * isnotterminal
target_adv = r_adv + GAMMA**N_ADV * predicted_q_adv * isnotterminal
action = [int(a) for a in action]
isweight = np.ones((len(action),))
loss_values = env.network.get_loss(sess, s1,action,target_one, target_adv,isdemo,isweight)
print(loss_values[1])
print(np.mean(loss_values[1]))
plt.bar(range(len(action)), loss_values[1])
def plot_losstotal(env, s1,action, s2_one,s2_adv,r_one, r_adv,isdemo, isterminal):
predicted_q_adv = env.network.get_qvalue_max_learningaction(sess,s2_adv)
predicted_q_one = env.network.get_qvalue_max_learningaction(sess,s2_one)
isnotterminal = np.ones((len(isterminal),)) - isterminal
target_one = r_one + GAMMA*predicted_q_one * isnotterminal
target_adv = r_adv + GAMMA**N_ADV * predicted_q_adv * isnotterminal
action = [int(a) for a in action]
isweight = np.ones((len(action),))
loss_total = env.network.get_losstotal(sess, s1,action,target_one, target_adv,isdemo,isweight)
print(loss_total)
def plot_loss_class(env, s1,action, s2_one,s2_adv,r_one, r_adv,isdemo, isterminal):
predicted_q_adv = env.network.get_qvalue_max_learningaction(sess,s2_adv)
predicted_q_one = env.network.get_qvalue_max_learningaction(sess,s2_one)
isnotterminal = np.ones((len(isterminal),)) - isterminal
target_one = r_one + GAMMA*predicted_q_one * isnotterminal
target_adv = r_adv + GAMMA**N_ADV * predicted_q_adv * isnotterminal
action = [int(a) for a in action]
isweight = np.ones((len(action),))
loss_values = env.network.get_loss(sess, s1,action,target_one, target_adv,isdemo,isweight)
print(loss_values[2])
plt.bar(range(len(action)), loss_values[2])
def plot_freq_sample(replaymemory):
idx = []
demo_r = []
for i in range(10000):
tree_idx,data,_ = replaymemory.sample(1, 0.5)
idx.append(tree_idx)
idx = np.array(idx).reshape((-1))
print(idx)
count = np.zeros((len(replaymemory),))
for i in idx:
count[i-replaymemory.tree.capacity]+= 1
plt.plot(count)
def play_games(env, t=50):
kill,reward,step = [],[],[]
for i in range(t):
r,f, d,k,t,s = env.test_agent()
kill.append(k)
reward.append(r)
step.append(s)
return np.array(kill), np.array(reward), np.array(step)
def plot_filter_conv1(env):
weights = env.network.get_weights_learngin(sess)
weights_conv1 = weights[0]
fig,axes = plt.subplots(3,16,figsize=(20,20))
weights_conv1 = weights_conv1.transpose(2,3,0,1)
for ax,img in zip(axes.ravel(), weights_conv1.reshape(-1,6,6)):
ax.imshow(img)
# In[ ]:
# data_range = range(0,10)
# # data_range = range(6000,6010)
# demo_s,demo_r_one,demo_r_adv, demo_s2_one, demo_s2_adv, demo_a,demo_t,demo_d = [],[],[],[],[],[],[],[]
# for i in data_range:
# demo_s.append(replaymemory.tree.data[i][0])
# demo_s2_one.append(replaymemory.tree.data[i][2])
# demo_s2_adv.append(replaymemory.tree.data[i][3])
# demo_a.append(replaymemory.tree.data[i][1])
# demo_r_one.append(replaymemory.tree.data[i][4])
# demo_r_adv.append(replaymemory.tree.data[i][5])
# demo_t.append(replaymemory.tree.data[i][6])
# demo_d.append(replaymemory.tree.data[i][7])
# In[ ]:
# plot_losstotal(env=imitation_env, s1=demo_s,action=demo_a,s2_one=demo_s2_one, s2_adv=demo_s2_adv, \
# r_one=demo_r_one, r_adv=demo_r_adv ,isterminal=demo_t,isdemo=demo_d)
# for i,d in enumerate(zip(demo_a, demo_r_one)):
# print(i,d[0],d[1])
# plot_tderror(env=environments[0], s1=demo_s,action=demo_a,s2_one=demo_s2_one, s2_adv=demo_s2_adv, \
# r_one=demo_r_one, r_adv=demo_r_adv ,isterminal=demo_t,isdemo=demo_d)
# plot_loss_one(env=imitation_env, s1=demo_s,action=demo_a,s2_one=demo_s2_one, s2_adv=demo_s2_adv, \
# r_one=demo_r_one, r_adv=demo_r_adv ,isterminal=demo_t,isdemo=demo_d)
# hoge = plot_q_learning(env=environments[0], s1=demo_s)
# kills, rewards,steps = play_games(test_env,t=100)
# steps[rewards < 0] = 100
# print(np.mean(kills),"+-",np.var(kills))
# print(np.mean(rewards),"+-",np.var(rewards))
# print(np.mean(steps),"+-",np.var(steps))
# In[ ]:
# tf.set_random_seed(0)
# config = tf.ConfigProto(gpu_options = tf.GPUOptions(visible_device_list=USED_GPU))
# config.gpu_options.allow_growth = True
# config.log_device_placement = False
# config.allow_soft_placement = True
# sess = tf.Session(config=config)
# parameter_server = ParameterServer(sess,LOGDIR)
# starttime = datetime.now().timestamp()
# end_time = (datetime.now() + timedelta(minutes=60)).timestamp()
# coordinator = tf.train.Coordinator()
# environments, threads = [], []
# name = "test"
# game_instance=GameInstanceBasic(DoomGame(),name=name,n_bots=1,config_path=CONFIG_FILE_PATH, reward_param=REWARDS, steps_update_origin=10,timelimit=2)
# # game_instance=GameInstanceSimpleBasic(DoomGame(),name=name,config_path=CONFIG_FILE_PATH)
# network = NetworkLocal(name, parameter_server)
# agent = Agent(network)
# test_env = Environment(sess = sess ,name=name, agent=agent, game_instance=game_instance, network=network, start_time=starttime, end_time=end_time, random_seed=0)
# test_env.log_server = parameter_server
# test_env.rewards_detail = []
# thread_test = threading.Thread(target=test_env.run_test, args=(coordinator,))
# In[ ]:
if __name__ == "test":
MODEL_PATH = "./models/model_test/model.ckpt"
TEST_GRADS = []
config = tf.ConfigProto(gpu_options = tf.GPUOptions(visible_device_list=USED_GPU))
config.log_device_placement = False
config.allow_soft_placement = True
sess = tf.Session(config=config)
starttime = datetime.now().timestamp()
end_time = (datetime.now() + timedelta(minutes=15)).timestamp()
with tf.device('/gpu:0'):
parameter_server = ParameterServer(sess,LOGDIR)
parameter_server.load_model(sess=sess, model_path=MODEL_PATH, step=15)
name = "test"
game_instance = GameInstanceSimpleDeathmatch(DoomGame(),name=name,n_bots=1,config_path=CONFIG_FILE_PATH, reward_param=REWARDS, steps_update_origin=10,timelimit=2)
network = NetworkLocal(name, parameter_server)
agent = Agent(network)
test_env = Environment(sess = sess ,name=name, agent=agent, game_instance=game_instance, network=network, start_time=starttime, end_time=end_time)
GIF_BUFF = []
REWARD_BUFF = []
r,f,d,_,imgs = test_env.test_agent(gif_buff=GIF_BUFF,reward_buff=REWARD_BUFF)
GIF_BUFF[0].save('gifs/test.gif',save_all=True, append_images=GIF_BUFF[1:], optimize=False, duration=40*4, loop=0)
# In[ ]:
if __name__ == "test_game_instance":
environments[0].game.new_episode()
pre_x = 0
pre_y = 0
print(environments[0].game.get_pos_x(),"diff:",environments[0].game.get_pos_x()-pre_x, ",", environments[0].game.get_pos_y(),"diff:",environments[0].game.get_pos_y()-pre_y)
pre_x = environments[0].game.get_pos_x()
pre_y = environments[0].game.get_pos_y()
print(environments[0].game.make_action([0,0,0,1,0,1], FRAME_REPEAT))
# print(environments[0].game.game)
plt.imshow(environments[0].preprocess( environments[0].game.get_screen_buff()))
if(environments[0].game.is_player_dead()):
environments[0].game.respawn_player()
# In[ ]:
if __name__ == "check_rewards":
files = os.listdir("./playlogs/playlog_test/")
files.sort()
rewards = []
for f in files[:]:
with open(os.path.join("./playlogs/playlog_test/", f), 'rb') as file:
rewards.append((f, pickle.load(file)))
def make_data(rewards_all):
data_frames = []
for f_n, reward in rewards_all:
reward_dist = []
reward_frag = []
reward_healthloss = []
reward_suicide = []
reward_total = []
for log in reward:
_,r = log
reward_dist.append(r['dist'])
reward_frag.append(r['frag'])
reward_healthloss.append(r['healthloss'])
reward_suicide.append(r['suicide'])
reward_total.append(sum(r.values()))
df = pd.DataFrame({'dist':reward_dist, 'frag': reward_frag, 'healthloss':reward_healthloss, 'suicide':reward_suicide})
data_frames.append(df)
return data_frames
def plot_rewards_all(rewards_all):
dist_all = [sum(r['dist'].values) for r in rewards_all]
frag_all = [sum(r['frag'].values) for r in rewards_all]
healthloss_all = np.array( [sum(r['healthloss'].values) for r in rewards_all])
healthloss_all= np.where(healthloss_all < -100, 0, healthloss_all)
suicide_all = [sum(r['suicide'].values) for r in rewards_all]
total = [sum(r) for r in zip(dist_all, frag_all, healthloss_all, suicide_all)]
f = plt.figure()
f.subplots_adjust(wspace=0.4, hspace=0.6)
ax_dist = f.add_subplot(2,3,1)
ax_frag = f.add_subplot(2,3,2)
ax_healthloss = f.add_subplot(2,3,3)
ax_suicide = f.add_subplot(2,3,4)
ax_total = f.add_subplot(2,3,5)
ax_dist.set_title("reward_dist")
ax_frag.set_title("rewad_frag")
ax_healthloss.set_title("reward_healthloss")
ax_suicide.set_title("reward_suicide")
ax_total.set_title("reward_total")
ax_dist.plot(dist_all)
ax_frag.plot(frag_all)
ax_healthloss.plot(healthloss_all)
ax_suicide.plot(suicide_all)
ax_total.plot(total)
return f
def plot_rewards_match(rewards):
reward_dist = rewards['dist'].values
reward_frag = rewards['frag'].values
reward_healthloss = rewards['healthloss'].values
reward_suicide = rewards['suicide'].values
reward_total = reward_frag + reward_healthloss + reward_suicide + reward_dist
f = plt.figure()
f.subplots_adjust(wspace=0.4, hspace=0.6)
ax_dist = f.add_subplot(2,3,1)
ax_frag = f.add_subplot(2,3,2)
ax_healthloss = f.add_subplot(2,3,3)
ax_suicide = f.add_subplot(2,3,4)
ax_total = f.add_subplot(2,3,5)
ax_dist.set_title("reward_dist")
ax_frag.set_title("rewad_frag")
ax_healthloss.set_title("reward_healthloss")
ax_suicide.set_title("reward_suicide")
ax_total.set_title("reward_total")
ax_dist.plot(reward_dist)
ax_frag.plot(reward_frag)
ax_healthloss.plot(reward_healthloss)
ax_suicide.plot(reward_suicide)
ax_total.plot(reward_total)
return f
# In[ ]:
def tabulate_events(dpath):
summary_iterators = [EventAccumulator(os.path.join(dpath, dname)).Reload() for dname in os.listdir(dpath)]
tags = summary_iterators[0].Tags()['scalars']
for it in summary_iterators:
assert it.Tags()['scalars'] == tags
out = defaultdict(list)
steps = defaultdict(list)
walltimes = defaultdict(list)
for tag in tags:
for events in zip(*[acc.Scalars(tag) for acc in summary_iterators]):
assert len(set(e.step for e in events)) == 1
out[tag].append([e.value for e in events])
steps[tag].append([e.step for e in events])
walltimes[tag].append([e.wall_time for e in events])
return out, steps, walltimes
| [
"kikuchi.yutaro.s1@dc.tohoku.ac.jp"
] | kikuchi.yutaro.s1@dc.tohoku.ac.jp |
986e4045b106ad579041853e9891735e06800efd | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/rottenOranges_20200810191228.py | 2e8869821ab7c96fc3b8d53eff3ef2e939c3ffb4 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | def markrotten(i,j,row,column)
def oranges(grid):
# loop through the grid
# if there is no fresh orange just return 0
# if there is a two check all its four neighbours
# recursive call
# count when a one becomes a two
row = len(grid)
column = len(grid[0])
for i in range(len(grid)):
for j in range(len(i)):
if grid[i][j] == 2:
markrotten(i,j,row,column,grid)
oranges( [[2,1,1],[0,1,1],[1,0,1]])
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
275b1304b2bd25a526a921b4065195cf4c92c6fd | 00d68c84c62f6708560e6f80344bf4f21ff6de1c | /jos_dup_wrd.py | 68b337e3afc3fab4f08b7460918ad5cdd053f0ed | [] | no_license | MimiDumpling/Mentors | 6b5f0dca9e8abdad595163fd3ea87251240a5ded | 231af0b667e647fdbaa775eb36ed1904b193b3fd | refs/heads/master | 2023-02-03T22:00:40.171114 | 2019-08-30T22:01:23 | 2019-08-30T22:01:23 | 102,072,578 | 1 | 0 | null | 2023-01-25T04:44:35 | 2017-09-01T04:03:22 | JavaScript | UTF-8 | Python | false | false | 578 | py | """
- takes a word
- return true if all letters are unique
- false if duplicate letters
"""
def dups(word):
letters = {}
word = word.lower()
for letter in word:
if letter in letters:
return False, "BOOOO"
else:
letters[letter] = 1
return True, "HAY"
result, msg = dups("Ss")
print result, msg
"""
Ransom Note:
- string1 is the ransom you want to write
- string2 is the magazine you have to cut out the letters (or words -- this is harder)
- return True if there's enough magazine letters/words
- else: False
"""
| [
"tienmiminguyen@gmail.com"
] | tienmiminguyen@gmail.com |
300d0550b3d9cd1ded41e503e66244eb8077a652 | 753b2846eeaf1c1642257d268db272d6fb1dfd8b | /pytorch/dqn_agent.py | 1c1d14044a5a68d5cacf205c610e9716c6e0716e | [] | no_license | JonasLeininger/deep-q-network-banana-navigation | bb2e199a25192f1475c366db0f61abcbec04d02d | 86b02edbd46df1a7cc1373e8bc28be04740bf5ac | refs/heads/master | 2020-05-22T22:41:13.290534 | 2019-07-17T04:46:48 | 2019-07-17T04:46:48 | 186,550,350 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,762 | py | import os
import numpy as np
import torch
import torch.nn.functional as F
from pytorch.dqn import DQN
from pytorch.replay_memory import ReplayMemory
class DQNAgent():
def __init__(self, state_size: int, action_size: int):
self.buffer_size = int(1e5)
self.batch_size = 64
self.tau = 1e-3
self.update_every = 4
self.state_size = state_size
self.action_size = action_size
self.gamma = 0.99
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.0005
self.checkpoint_path = "checkpoints/pytorch/dqn/cp-{epoch:04d}.pt"
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.qnetwork = DQN(self.state_size, self.action_size)
self.qnetwork.to(self.device)
self.optimizer = torch.optim.Adam(self.qnetwork.parameters(), lr=self.learning_rate)
self.tragetnetwork = DQN(self.state_size, self.action_size)
self.tragetnetwork.to(self.device)
self.tragetnetwork.load_state_dict(self.qnetwork.state_dict())
self.memory = ReplayMemory(self.action_size, self.buffer_size, self.batch_size)
def step(self, state, action, reward, next_state, done):
'''
Save experience in replay memory
Params
:param state: state
:param action: action
:param reward: reward
:param next_state: next_staet
:param done: done
:return:
'''
self.memory.add(state, action, reward, next_state, done)
def act(self, state):
'''
Agents choosen action for given state
Params
:param state: state to act on
:return: action
'''
state = torch.from_numpy(state).float().unsqueeze(0).to(self.device)
if np.random.rand() <= self.epsilon:
return np.random.randint(0, self.action_size)
act_values = self.qnetwork(state)
return np.argmax(act_values.cpu().data.numpy())
def replay(self):
'''
Train agent on the replay memory. DQN algorithm with local q-network and target-network
Params
:return: None
'''
states, actions, rewards, next_states, dones = self.memory.sample()
targetnetwork_outputs = self.tragetnetwork(next_states).max(dim=1)[0].unsqueeze(1)
targets = rewards + (self.gamma * targetnetwork_outputs)*(1 - dones)
expected = self.qnetwork(states).gather(1, actions)
predicts = self.qnetwork(states)
self.loss = F.mse_loss(expected, targets)
self.optimizer.zero_grad()
self.loss.backward()
self.optimizer.step()
self.update_target_network()
def update_target_network(self):
self.tragetnetwork.load_state_dict(self.qnetwork.state_dict())
def save_checkpoint(self, epoch: int):
torch.save({
'epoch': epoch,
'model_state_dict': self.qnetwork.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'loss': self.loss
}, self.checkpoint_path.format(epoch=epoch))
def load_checkpoint(self, checkpoint: str):
checkpoint = torch.load(checkpoint)
self.qnetwork.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.loss = checkpoint['loss']
self.qnetwork.eval()
self.tragetnetwork.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.loss = checkpoint['loss']
self.tragetnetwork.eval()
| [
"jonas.leininger@gmail.com"
] | jonas.leininger@gmail.com |
6165fd4fe24de731f158806ca62bf2c032c0ebf4 | eaa0f14677e111d3c2e83cebfdfcfd00b7575e61 | /radionator/radio/views/common.py | e7d7f8c6def12eae39dbebac76404fe0737784a9 | [
"MIT"
] | permissive | pierstoyanov/radionator | ae0f2ecfc18e8045a082c7b906db8fcbf4772f32 | 3c6212179ccd2e135727f2ce95fdd0e517ce1551 | refs/heads/main | 2023-07-16T15:46:58.539667 | 2021-09-07T16:58:36 | 2021-09-07T16:58:36 | 318,448,349 | 1 | 0 | MIT | 2020-12-15T16:07:20 | 2020-12-04T08:13:46 | JavaScript | UTF-8 | Python | false | false | 955 | py | from django.contrib.auth import get_user_model
from django.shortcuts import render
# Create your views here.
from django.views import View
from common.BackgroundMixin import BackgroundMixin
from common.CookieTestResultMixin import test_cookie
from radionator.profiles.models import Profile
RadioUser = get_user_model()
class About(BackgroundMixin, View):
""""Basic info about the site.
Background and cookie mixins don't work with views.View
Their logic needs to be added manually."""
def get(self, request):
request.session.set_test_cookie()
context = {
'cookie_state': test_cookie(request),
}
if self.request.user.is_authenticated:
profile = Profile.objects.get(user=self.request.user)
context['background'] = profile.background
return render(request, 'about.html', context)
def post(self, request):
return render(request, 'about.html')
| [
"0885095540gH!$!"
] | 0885095540gH!$! |
ac28490464c9de3e033a57f204f8d092c29bd4a1 | 8ddcd10b3f1edc78eeffa55dcf1562248dbee7ea | /encryption.py | 545a423aa38ca51f2fbbf89b4083f589c42f614a | [] | no_license | Perception12/EncryptionDecryption | fe660771e3b1ada0848e84d7fdf48412a76302ad | 415c97a6e9c659ed58957b23a2df8a33fa2efb96 | refs/heads/main | 2023-08-14T06:49:07.643362 | 2021-09-30T04:11:01 | 2021-09-30T04:11:01 | 411,842,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,505 | py | import tkinter as tk
import random
import string
class Application(tk.Frame):
def __init__(self, master):
super().__init__(master)
self.master = master
self.createWidget()
self.grid()
# Gets the Alphabets
self.alphabet = list(string.ascii_letters)
self.alphabetCopy = list(string.ascii_letters)
self.keyword = random.choices(self.alphabet, k=10)
while not self.is_unique(self.keyword):
self.keyword = random.choices(self.alphabet, k=10)
self.alphaKey = []
# Removes the keyword characters from the alphabet and add it to the front of the remainder
for k in self.keyword:
if k in self.alphabetCopy:
self.alphabetCopy.remove(k)
self.alphaKey.append(k)
self.alphaKey += self.alphabetCopy
def is_unique(self, word):
if len(word) > len(set(word)):
uniqueness = False
else:
uniqueness = True
return uniqueness
def encrypt(self):
self.cryptedList = []
# Gets the message
self.message = list(self.messageEntry.get('1.0', tk.END).strip())
# Iterate through the message and encrypt the message by swapping the characters
for m in self.message:
if m in self.alphabet:
i = self.alphabet.index(m)
self.cryptedList.append(self.alphaKey[i])
elif m == ' ':
self.cryptedList.append('#')
elif m == ',':
self.cryptedList.append('0')
elif m == "'":
self.cryptedList.append('1')
elif m == '?':
self.cryptedList.append('2')
elif m == '.':
self.cryptedList.append('3')
elif m not in self.alphabet:
self.cryptedList.append(m)
# print(self.alphabet)
# print(self.alphaKey)
n = str(len(self.cryptedList))
# Join the keyword and the length of the crypted message for decryption purpose
self.fullEncryption = n + '$' + ''.join(self.cryptedList) + ''.join(self.keyword)
# Inserts the encrypted message to the ouput screen
self.encryptedMessage.delete('1.0', tk.END)
self.encryptedMessage.insert(tk.END, self.fullEncryption)
def createWidget(self):
self.screenFrame = tk.LabelFrame(self, text="Enter your message")
self.messageEntry = tk.Text(self.screenFrame, width=57, height=2)
self.encryptBtn = tk.Button(self.screenFrame, text="Encrypt", command=self.encrypt, pady=5)
self.encryptionFrame = tk.LabelFrame(self, text="Encryption")
self.ELabel = tk.Label(self.encryptionFrame, text="Encrypted Message:")
self.encryptedMessage = tk.Text(self.encryptionFrame, width=40, height=10)
self.exit = tk.Button(self, text="Exit", command=self.master.destroy)
self.screenFrame.grid(row=0, column=0, columnspan=3)
self.messageEntry.grid(row=0, column=0, columnspan=3)
self.encryptBtn.grid(row=1, column=2)
self.encryptionFrame.grid(row=1, column=0, columnspan=3)
self.ELabel.grid(row=0, column=0)
self.encryptedMessage.grid(row=0, column=1, columnspan=2)
tk.Label(self.encryptionFrame).grid(row=1)
self.exit.grid(row=3, column=2)
if __name__ == "__main__":
root = tk.Tk()
root.title("Message Encryption")
app = Application(root)
app.mainloop()
| [
"kehindeadejumobi82@gmail.com"
] | kehindeadejumobi82@gmail.com |
6c7376c8231168cb83ab28cd66f7376c7363aa20 | 0b842bcb3bf20e1ce628d39bf7e11abd7699baf9 | /oscar/a/sys/platform/manager/blinky/__init__.py | 856452a17cd28db74867fb490c14dee2212dcaf8 | [] | no_license | afeset/miner2-tools | 75cc8cdee06222e0d81e39a34f621399e1ceadee | 81bcc74fe7c0ca036ec483f634d7be0bab19a6d0 | refs/heads/master | 2016-09-05T12:50:58.228698 | 2013-08-27T21:09:56 | 2013-08-27T21:09:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | #
# Copyright Qwilt, 2012
#
# The code contained in this file may not be used by any other entities without explicit written permission from Qwilt.
#
# Author: shmulika
#
G_GROUP_NAME_PLATFORM_POWER_BLINKY_ADAPTOR = "power-blinky-adaptor"
G_GROUP_NAME_PLATFORM_FANS_BLINKY_ADAPTOR = "fans-blinky-adaptor"
G_GROUP_NAME_PLATFORM_TEMPERATURE_BLINKY_ADAPTOR = "temperature-blinky-adaptor"
G_GROUP_NAME_PLATFORM_MANAGER_BLINKY_ADAPTOR = "platform-manager-blinky-adaptor"
G_GROUP_NAME_PLATFORM_SOURCE_BLINKY_ADAPTOR = "source-blinky-adaptor"
| [
"afeset@gmail.com"
] | afeset@gmail.com |
91e528925024ee60a3e68517f50cb0ef2c2dc7e3 | c71cde9642375b9365c41055c7b84bbef24cbdac | /ptp_nni_l2mc_basecase.py | d7c8b3026588939ab38a61a10716aed64d081a67 | [] | no_license | lightthgil/autotest | 10e59bc814690b6b298f70eb6822e4ec1d23494e | c2efb3f6d4eca3567a46aca15611a295337a38c0 | refs/heads/master | 2020-08-13T04:55:04.339047 | 2019-10-13T23:50:13 | 2019-10-13T23:50:13 | 214,910,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/3/14 15:03
# @Author : Jiang Bo
# @Site :
# @File : ptp_nni_l2mc_basecase.py
# @Software: PyCharm
from ptp_l2mc_basecase import *
class PtpNniL2McTestBaseCase (PtpL2McTestBaseCase) :
"""
PtpNniL2McTestBaseCase
"""
def change_interface_mode(self):
return self._change_interface_mode('nni')
def change_interface_mode_resume(self):
return self._change_interface_mode_resume('nni')
if __name__ == "__main__" :
case = PtpNniL2McTestBaseCase('ne1', "PTP_INTERFACE_GE", 'ne2', "PTP_INTERFACE_GE")
case.init_redirection()
case.execute() | [
"light"
] | light |
d84f4b126ec7829d18faeee96555f92262bc30de | 940c1c34c824886ddd6636ad5954c4c6dc708da3 | /InfluencerData/base/accounts/migrations/0064_current_package_user_package_exipry.py | 4151b614a46bd46867a438d4a8e172d5f15489ce | [] | no_license | saurabh69912162/Influencer-data | ba54cf5e1115674382b78334a0869aa7ed673cf9 | 908eab307ddb625d78e88fe6ad9aa5f3fa9af9df | refs/heads/master | 2020-09-17T01:29:14.871696 | 2019-12-04T18:22:07 | 2019-12-04T18:22:07 | 223,946,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | # Generated by Django 2.2.4 on 2019-11-24 12:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0063_myuser_new_user_notify'),
]
operations = [
migrations.AddField(
model_name='current_package_user',
name='package_exipry',
field=models.BigIntegerField(blank=True, default=0, null=True),
),
]
| [
"48159530+saurabh69912162@users.noreply.github.com"
] | 48159530+saurabh69912162@users.noreply.github.com |
4edadd1ac8a0f5adebe4a40176f97daf50fa871d | 47835cc96e9f09611347d9463420ae44cb2cd2e3 | /test.py | 7435f28d76e034292ce0a6f4b3cef87d2eedbe5d | [] | no_license | shanahanjrs/LR35902 | b53fc4c41cfc55117e168622c60099a24f1cb6b0 | 545b1a71628f62f0b4dea044d8506d5f4243925e | refs/heads/master | 2020-11-25T07:12:29.067576 | 2019-12-26T05:03:26 | 2019-12-26T05:03:26 | 228,552,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | #!/usr/bin/env python3
"""
main entrypoint
"""
from mb import MotherBoard
def main():
mb = MotherBoard()
# Some fake data to render
j = [3, 3, 3, 3, 3, 1, 1, 3, 1, 1, 1, 1, 3, 1, 1, 3, 1, 3, 1, 1, 3, 3, 3, 1, 1]
##mb.gpu.map(0x0, j)
print('==== INITIALIZED ====')
print('==== cpu: ====')
print(f'{mb.cpu}')
print('==== mem ====')
print(f'{mb.mem[:500]}')
print(f'{mb.cpu.ld8(0x99)}')
print(f'{mb.cpu.ld16(0x99)}')
print(f'{mb.cpu.ld8(0x190)}')
print(f'{mb.cpu.ld16(0x190)}')
##print(f'==== gpu ({mb.gpu.width}x{mb.gpu.height})({mb.gpu.resolution})====')
##print(f'{mb.gpu}')
# Create test NOP sled just to start executing _something_
nopsled = [0x00, 0x00, 0x00, 0x00, 0x00]
if __name__ == '__main__':
main()
| [
"shanahan.jrs@gmail.com"
] | shanahan.jrs@gmail.com |
38a653ec34d2ca137957d5833d3e623c6ffdf1c9 | e101ed2b3e9df32f0e7f9173117d9d457f259fe1 | /Day04/part02.py | 59057e887486ad343461cd16dd6c822c16d6cdb8 | [] | no_license | MiConnell/AOC2020 | f4d5462b79ac738d2ee43524f362244c3178e580 | 3a8350b6b5b7d6bddc62425e2e240f9622a6a54f | refs/heads/master | 2023-02-03T04:35:58.164964 | 2020-12-15T07:42:38 | 2020-12-15T07:42:38 | 317,375,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,983 | py | import os
import re
from typing import Dict
from typing import List
from typing import Match
from typing import Union
file = os.path.join(os.path.dirname(__file__), "blob.txt")
REQUIRED = {
"byr",
"iyr",
"eyr",
"hgt",
"hcl",
"ecl",
"pid",
}
"""
byr (Birth Year) - four digits; at least 1920 and at most 2002.
iyr (Issue Year) - four digits; at least 2010 and at most 2020.
eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
hgt (Height) - a number followed by either cm or in:
If cm, the number must be at least 150 and at most 193.
If in, the number must be at least 59 and at most 76.
hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
pid (Passport ID) - a nine-digit number, including leading zeroes.
cid (Country ID) - ignored, missing or not.
"""
def file_reader(file: str) -> List[str]:
with open(file, "r") as f:
return f.read().split("\n\n")
def value_checker(dct: Dict[str, str]) -> Union[bool, Match[str], None]:
return (
1920 <= int(dct["byr"]) <= 2002
and 2010 <= int(dct["iyr"]) <= 2020
and 2020 <= int(dct["eyr"]) <= 2030
and (m1 := re.match(r"^(\d+)(cm|in)$", dct["hgt"]))
and (
(m1[2] == "cm" and 150 <= int(m1[1]) <= 193)
or (m1[2] == "in" and 59 <= int(m1[1]) <= 76)
)
and re.match("^#[a-f0-9]{6}$", dct["hcl"])
and dct["ecl"] in ["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]
and re.match("^[0-9]{9}$", dct["pid"])
)
def solver(s: List[str]) -> int:
total = 0
for p in s:
p = p.replace("\n", " ").strip()
check = [a.split(":") for a in p.split(" ")]
passw_dict = {k: v for k, v in check}
if passw_dict.keys() >= REQUIRED and value_checker(passw_dict):
total += 1
return total
if __name__ == "__main__":
print(solver(file_reader(file)))
| [
"connellmp@gmail.com"
] | connellmp@gmail.com |
b2c1be6d03658e2b794333b2d98e59fda98d2e05 | 7d97daf9b9f46d68bbe29441d8db554918dfcdc4 | /leetcode/StringtoInteger8.py | 5a117868d64f0d0ad26bb4ae61baff99e7332feb | [] | no_license | hundyoung/LeetCode | 9a56c4f078dcb4e875a6178c14665b7784c1a0a2 | 803e164d3a21b593cb89206b3a362c1ab1eb9abf | refs/heads/master | 2020-09-23T02:51:13.657444 | 2020-05-06T12:53:07 | 2020-05-06T12:53:07 | 225,383,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | class Solution:
def myAtoi(self, str: str) -> int:
str1 = str.strip()
result = ""
for i in range(len(str1)):
char = str1[i]
if(i==0and (char=="+" or char=="-")):
result= result+char
elif char.isdigit():
result = result + char
else:
break
# print(str1)
try:
result = int(result)
result = min(2**31-1,result)
result = max((-2)**31,result)
return result
except:
return 0
if __name__ == '__main__':
solution = Solution()
print(solution.myAtoi("-5-")) | [
"285080301@qq.com"
] | 285080301@qq.com |
075b73b96d2de8a69d8a57d0af2dcb35b97451a2 | cb0a0d4630b50c56b7285364b42aa2d072a45480 | /bin/current.py | ec5b7e0694d38d0c5c94a984f0c1678747689186 | [
"BSD-3-Clause"
] | permissive | fatlinlin/python | 2b6b60a278d2dcc720fb82838d27e1659cd85bce | abd538ee925e2f5268199aa004ab6bb63b13fd16 | refs/heads/master | 2020-04-14T10:24:46.121789 | 2013-07-16T12:07:48 | 2013-07-16T12:07:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | import argparse
import logging
from efront import iohelpers as io
from efront import repo
def add_args(parser):
parser.add_argument("-l", "--dir_list", help="list available dirs", action="store_true")
parser.add_argument("-v", "--verbose", help="control the output level", action="store_true")
def run(args):
io.setup_log("c:/current", logging.DEBUG if args.verbose else logging.INFO)
logging.info("currently on {}".format(repo.get_current_target()))
if args.dir_list:
repo.list_dirs(logging.info)
def setup():
parser = argparse.ArgumentParser(description="Switch environnement")
add_args(parser)
args = parser.parse_args()
return args
if __name__ == "__main__":
run(setup())
| [
"sbergot@efront.com"
] | sbergot@efront.com |
fe8ba77340b3ae2ed12efb74c11081fc1aa16a59 | 0d475571500afea47e36a94fef19f329a46261d3 | /detect_capital.py | 4970eebdc1e3538fec4f70c0acb44dcaa376c2d1 | [] | no_license | ankitupadhyay9/Python_Samples | ea7ecb7c2bd30cd530ec29e9fc631e0ccb7788c8 | f436588aee6876e2fc94b46a38340bb741fb4bb8 | refs/heads/master | 2021-04-28T13:35:50.530059 | 2018-03-16T22:43:08 | 2018-03-16T22:43:08 | 122,109,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,772 | py | '''
520. Detect Capital
Given a word, you need to judge whether the usage of capitals in it is right or not.
We define the usage of capitals in a word to be right when one of the following cases holds:
1.All letters in this word are capitals, like "USA".
2.All letters in this word are not capitals, like "leetcode".
3.Only the first letter in this word is capital if it has more than one letter, like "Google".
Otherwise, we define that this word doesn't use capitals in a right way.
Example 1:
Input: "USA"
Output: True
Example 2:
Input: "FlaG"
Output: False
Note: The input will be a non-empty word consisting of uppercase and lowercase latin letters.
'''
# Based on ASCII value of small and capital letters
# Capital letters have range of 65-90 and small letter have range of 97-122
class Solution(object):
def detectCapitalUse(self, word):
"""
:type word: str
:rtype: bool
"""
arr = list(word)
flip = 0
if len(arr) == 1 or len(arr) == 0:
return True
if ord(arr[0]) <= 90 and ord(arr[0]) >=65 and ord(arr[1]) <= 90 and ord(arr[1]) >=65:
for i in range(2,len(arr)):
if ord(arr[i]) > 90:
return False
if ord(arr[0]) <= 90 and ord(arr[0]) >=65 and ord(arr[1]) > 90:
for i in range(2,len(arr)):
if ord(arr[i]) <= 90:
return False
if ord(arr[0]) > 90:
print "this case"
for i in range(1,len(arr)):
print ord(arr[i])
if ord(arr[i]) <= 90:
return False
return True | [
"noreply@github.com"
] | noreply@github.com |
e3b89978a46f073a7e64588a83c5a7135639bca4 | cbc817d47a448b048e5f50e90580a3b69560c757 | /thesisometer/wsgi.py | eb4c32a4392248af3d304a6995018ea57d19715e | [] | no_license | magnusmorton/thesisometer | 77411d213f766ea517b323e322c80b80519da053 | c7e3d138daa8f29f541e4e0ad0634d3ef178d99b | refs/heads/master | 2021-01-19T05:34:33.491353 | 2017-03-31T10:31:37 | 2017-03-31T10:31:37 | 58,637,908 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | """
WSGI config for thesisometer project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "thesisometer.settings")
application = get_wsgi_application()
| [
"magnusmorton@icloud.com"
] | magnusmorton@icloud.com |
168daad50747bc830f81b84fccc3f932b1cf4a0a | 3a2c6d04727e5150cd8308117fa3172f8e86a3db | /Fuentes/funciones_AG/naivef/generate.py | 634863794d19b439d75905749846e6c0eb454fa1 | [] | no_license | luisjimenezlinares/AGConsta | 2bc220d11f89aa3e3c86a12c0ee15ddcf0ad88ca | 9733bb27efc9dc29a9aeb215bf878249e336567c | refs/heads/main | 2023-06-16T06:20:28.958399 | 2021-07-07T17:17:58 | 2021-07-07T17:17:58 | 383,871,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | # -*- coding: utf-8 -*-
from deap import creator
import numpy as np
import random
def random_generate(L, S):
ind = []
mini, maxi = minimo(S), maximo(S)
for i in range(L):
ind.append(random.random() * (maxi - mini) + mini)
return creator.Individual(ind)
def sample_generate(S):
return creator.Individual(random.choice(S))
def minimo(S):
return min(map(min, S))
def maximo(S):
return max(map(max, S))
| [
"luis.jimenez@uclm.es"
] | luis.jimenez@uclm.es |
31038ec0649c0dc0010fe5156fb4c056577c0dac | 006b5eab331c9bf02a6b9297918977bf77c86c84 | /static_pages/urls.py | fe0ad0fdc9e1ae04abde0a8b02da6a9ff4f2db9f | [] | no_license | donnywdavis/Django_Template | 06c3b03a7cc4265229f437dc8d05ab276eac19ff | dea8d41a746aa0854fc3bb41ddf70668f7966805 | refs/heads/master | 2021-01-23T20:12:50.900182 | 2015-09-30T00:33:54 | 2015-09-30T00:33:54 | 38,582,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | from django.conf.urls import url
urlpatterns = [
url(r'^$', 'static_pages.views.index', name='home'),
]
| [
"donnywdavis@icloud.com"
] | donnywdavis@icloud.com |
1370807d55c14807b0c38bd17210d33afd2fd486 | deea011bef7b656eb579a9769d07f14478a32690 | /advanced-python/multi-threading/using-fork.py | 63a14fb4c9c33cde050881b61919018f42a390bd | [] | no_license | BhagyeshDudhediya/PythonPrograms | 38abcfd94e21b7cc8c323760a313cf3930bb07b2 | 185152b7644462d9dc75a1c91aedf2cf13da5015 | refs/heads/master | 2021-04-30T15:09:54.311064 | 2018-10-31T09:50:57 | 2018-10-31T09:50:57 | 121,232,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | import os
# This will fail on windows as fork() is not defined for windows machine
# fork() returns 2 values, it returns 0 in child's context and returns
# non-negative value (childs PID) in parents context
# When a process is forked, everything including a program counter is shared
# between child and parent. Hence, both child and parent will start executing
# the same code after fork as program counter points to the statement
# after call to fork()
var = 100 # global variable
x=os.fork()
# hi will be printed twice, one is child's conext and one in parent's context
print ('hi')
if (x==-1):
print "Error while forking"
elif (x == 0):
print "I am a child process"
print "CHILD: "+str(x),str(os.getpid())
# Change the value of global variable in child and check if it's same in parent as well
global var
var = 20
print "Value of var after changing in child="+str(var)
else:
print "I am a parent process"
print "PARENT: "+str(x),str(os.getpid())
print "Value of var in parent="+str(var)
| [
"dudhediyabv@gmail.com"
] | dudhediyabv@gmail.com |
eac0f7f17e26e4bcda87feea5ee4778d4e04131b | 18701cf61b7e46ef8f352798b07d895c8d1ba160 | /assignment3/inverted_index.py | 0fb3835b71760816b6322e8b818ae6108ac19e01 | [] | no_license | JuanIgnacioGil/datasci_course_materials | 776ae525ed7ebf8aff3e1245a8dc4e4f03ed7df6 | ebdada1aca68054eeed083f2af39942e20364c0a | refs/heads/master | 2021-01-20T19:14:21.401098 | 2014-08-15T15:36:53 | 2014-08-15T15:36:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | import MapReduce
import sys
"""
Word Count Example in the Simple Python MapReduce Framework
"""
mr = MapReduce.MapReduce()
# =============================
# Do not modify above this line
def mapper(record):
# key: document identifier
# value: document contents
key = record[0]
value = record[1]
words = value.split()
for w in words:
mr.emit_intermediate(w, key)
def reducer(key, list_of_values):
# key: word
# value: list of books
lista_unica=[]
for i in list_of_values:
if i not in lista_unica:
lista_unica.append(i)
mr.emit((key, lista_unica))
# Do not modify below this line
# =============================
if __name__ == '__main__':
inputdata = open(sys.argv[1])
mr.execute(inputdata, mapper, reducer)
| [
"iwan.manjak@gmail.com"
] | iwan.manjak@gmail.com |
9a68a892ee7454b8952addae4614751aba7824f7 | 0789e92ff05448f511352982dbc9fcc8b481e806 | /kikar_hamedina/reporting/management/commands/export_commentator_data_to_csv.py | 728384914535440e35a1486adb779002cbeca29e | [] | no_license | danielhers/kikar-hamedina | 9645dfc554c004092cb44bb5189b63e9940b3801 | a838a2fc675ea7100c620477bae438f215c741f7 | refs/heads/dev | 2020-06-14T14:08:05.069290 | 2017-05-04T17:22:03 | 2017-05-04T17:22:03 | 75,173,287 | 0 | 0 | null | 2016-11-30T09:47:01 | 2016-11-30T09:47:01 | null | UTF-8 | Python | false | false | 6,338 | py | #!encoding utf-8
from csv import DictWriter
from django.utils import timezone
from facebook_feeds.management.commands.kikar_base_commands import KikarBaseCommand
from facebook_feeds.models import Facebook_Feed, Facebook_Status
DELIMITER = '~'
class Command(KikarBaseCommand):
def add_arguments(self, parser):
parser.add_argument('--year',
action='store',
dest='year',
default=None,
help="choose year to filter on"
)
parser.add_argument('--feed',
action='store',
dest='feed',
default=None,
help="choose year to filter on"
)
parser.add_argument('--total',
action='store_true',
dest='total',
default=False,
help="Get statistics for total of activity, not separated by feed"
)
def build_commentator_data(self, statuses, year=None):
years = ['2014', '2015'] if not year else [year]
counter = dict()
counter['unique'] = {'likes_2014': set(), 'likes_2015': set(), 'comments_2014': set(),
'comments_2015': set()}
counter['full'] = {'likes_2014': long(), 'likes_2015': long(), 'comments_2014': long(),
'comments_2015': long()}
for year in years:
for status in statuses.filter(published__year=year).order_by('published'):
if not status.is_comment:
counter['unique']['likes_%s' % year] = counter['unique'][
'likes_%s' % year].union(
set(status.likes.values_list('user', flat=True)))
counter['unique']['comments_%s' % year] = counter['unique'][
'comments_%s' % year].union(
set(status.comments.values_list('comment_from_id', flat=True)))
counter['full']['likes_%s' % year] += status.likes.count()
counter['full']['comments_%s' % year] += status.comments.count()
print('\t%s' % status.published)
return counter
def handle(self, *args, **options):
print('Start.')
feed = options['feed']
feeds = Facebook_Feed.objects.filter(id=feed) if feed else Facebook_Feed.objects.all()
counter = dict()
if options['total']:
statuses = Facebook_Status.objects.all()
counter['total'] = self.build_commentator_data(statuses, year=options['year'])
else:
for feed in feeds.order_by('id'):
print(feed.id)
statuses = feed.facebook_status_set.filter(is_comment=False)
counter[feed.id] = self.build_commentator_data(statuses, year=options['year'])
file_name = 'commentator_data_{}.csv'.format(timezone.now().strftime('%Y_%m_%d_%H_%M_%S'))
with open(file_name, 'wb') as f:
field_names = [
'feed_id',
'link',
'mk_id',
'mk_name',
'mk_party',
'likes_2014_unique',
'likes_2015_unique',
'likes_2014_full',
'likes_2015_full',
'comments_2014_unique',
'comments_2015_unique',
'comments_2014_full',
'comments_2015_full'
]
csv_data = DictWriter(f, fieldnames=field_names, delimiter=DELIMITER)
headers = {field_name: field_name for field_name in field_names}
csv_data.writerow(headers)
if options['total']:
row = {'mk_id': 'total',
'mk_name': 'total',
'mk_party': None,
'feed_id': 'total',
'link': None,
'likes_2014_unique': len(counter['total']['unique']['likes_2014']),
'likes_2015_unique': len(counter['total']['unique']['likes_2015']),
'likes_2014_full': counter['total']['full']['likes_2014'],
'likes_2015_full': counter['total']['full']['likes_2015'],
'comments_2014_unique': len(counter['total']['unique']['comments_2014']),
'comments_2015_unique': len(counter['total']['unique']['comments_2015']),
'comments_2014_full': counter['total']['full']['comments_2014'],
'comments_2015_full': counter['total']['full']['comments_2015']
}
csv_data.writerow(row)
else:
for feed in feeds:
row = {'mk_id': feed.persona.object_id,
'mk_name': unicode(feed.persona.content_object.name).encode(
'utf-8') if feed.persona.content_object else feed.username,
'mk_party': unicode(feed.persona.content_object.current_party.name).encode(
'utf-8') if feed.persona.content_object else None,
'feed_id': feed.id,
'link': 'http://www.facebook.com/{}'.format(feed.vendor_id),
'likes_2014_unique': len(counter[feed.id]['unique']['likes_2014']),
'likes_2015_unique': len(counter[feed.id]['unique']['likes_2015']),
'likes_2014_full': counter[feed.id]['full']['likes_2014'],
'likes_2015_full': counter[feed.id]['full']['likes_2015'],
'comments_2014_unique': len(counter[feed.id]['unique']['comments_2014']),
'comments_2015_unique': len(counter[feed.id]['unique']['comments_2015']),
'comments_2014_full': counter[feed.id]['full']['comments_2014'],
'comments_2015_full': counter[feed.id]['full']['comments_2015']
}
csv_data.writerow(row)
print('Done.')
| [
"yotammanor@gmail.com"
] | yotammanor@gmail.com |
58084fce6dc4b6242e670298ab622851749142f7 | 3e873bb5532685ac8d4fdb6d6266ab653b6beb44 | /extract_skills_from_docx.py | befd3adcdb6461593cfd7afe02b7c753061a9579 | [] | no_license | nelsonchacko98/resumeParser123123 | 5ed6d32904bf386c414dc8baf999353db19f21d2 | 16f3fd85a6ffe7b25090bae70f801ff4c8f8df44 | refs/heads/main | 2023-05-30T19:24:36.139428 | 2021-06-11T08:08:47 | 2021-06-11T08:08:47 | 375,951,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,510 | py |
import docx2txt
import nltk
nltk.download('stopwords')
# you may read the database from a csv file or some other database
SKILLS_DB = [
'machine learning',
'data science',
'python',
'word',
'excel',
'English',
]
def extract_text_from_docx(docx_path):
txt = docx2txt.process(docx_path)
if txt:
return txt.replace('\t', ' ')
return None
def extract_skills(input_text):
stop_words = set(nltk.corpus.stopwords.words('english'))
word_tokens = nltk.tokenize.word_tokenize(input_text)
# remove the stop words
filtered_tokens = [w for w in word_tokens if w not in stop_words]
# remove the punctuation
filtered_tokens = [w for w in word_tokens if w.isalpha()]
# generate bigrams and trigrams (such as artificial intelligence)
bigrams_trigrams = list(map(' '.join, nltk.everygrams(filtered_tokens, 2, 3)))
# we create a set to keep the results in.
found_skills = set()
# we search for each token in our skills database
for token in filtered_tokens:
if token.lower() in SKILLS_DB:
found_skills.add(token)
# we search for each bigram and trigram in our skills database
for ngram in bigrams_trigrams:
if ngram.lower() in SKILLS_DB:
found_skills.add(ngram)
return found_skills
if __name__ == '__main__':
text = extract_text_from_docx('sample.docx')
skills = extract_skills(text)
print(skills) | [
"noreply@github.com"
] | noreply@github.com |
49c0d25246ab8592fad47c0d58fb7c76ee6c5fb9 | 4abfb04a77b4308c369e0cde7b2981ad661d8320 | /testing/test_calc.py | 572254a4ec3f98072d718e6a4df097ef79a503e4 | [] | no_license | yoyoxin/mytest | a7c9794174f80d5fbb0d0e6627b84e6df3959818 | 63f165b0a8a156258b337245dca7454de49025a7 | refs/heads/master | 2023-05-03T21:40:26.739109 | 2021-05-20T03:36:17 | 2021-05-20T03:36:17 | 368,783,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | import unittest
import sys
from pythontest.calc import Calc
class TestCalc(unittest.TestCase):
def test_add_1(self):
self.calc=Calc()
result = self.calc.add(1,4)
self.assertEqual(5,result)
unittest.main()
| [
"40452261@qq.com"
] | 40452261@qq.com |
c4a856dae562817734657a78568df2f522ab4f82 | 1ae2f3b5a1b194f819a50b0503384ba22c58cbc0 | /prob039.py | 545a6ed8fe6fa3c63549644d1ffcdd0e115ff141 | [] | no_license | KR4705/project_euler | 1ed7482d15d518f654879ee848b3b25b0d20b120 | bda51b83456a64cfe87ee0135110456321262c5c | refs/heads/master | 2021-01-01T16:29:11.824907 | 2018-02-03T07:53:31 | 2018-02-03T07:53:31 | 97,843,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | import time
start = time.time()
squares = []
for a in range(0,1000):
squares.append(a*a)
def triads(perimeter):
count = 0
for a in range(1,perimeter-2):
for b in range(a,perimeter-a-1):
c = perimeter-a-b
if squares[a] + squares[b] == squares[c]:
count += 1
return count
answer = 0
maximum = 0
for perimeter in range(1,1001):
num = triads(perimeter)
if num > maximum:
maximum = num
answer = perimeter
runtime = time.time() - start
print answer,runtime | [
"gamerdude9@gmail.com"
] | gamerdude9@gmail.com |
74a34341337584f7b311a8997ad78f4bb49b70cb | e6e63aaabebd62adacf389ea3ef3e10536460421 | /main-menu/setup.py | 0aad1baa59e77afa5c9911af257cea74603cd27f | [
"BSD-3-Clause"
] | permissive | jtpio/extension-examples | 0d45d7df977896f9b8ff904cd1d3d77935b4e5bc | 0ce165c4487e9f605e512cd7445e381fe870001b | refs/heads/master | 2022-11-17T19:59:05.915190 | 2022-10-17T12:49:22 | 2022-10-17T12:49:22 | 248,619,218 | 1 | 0 | BSD-3-Clause | 2020-03-19T22:41:15 | 2020-03-19T22:41:14 | null | UTF-8 | Python | false | false | 2,565 | py | """
jupyterlab_examples_main_menu setup
"""
import json
import sys
from pathlib import Path
import setuptools
HERE = Path(__file__).parent.resolve()
# The name of the project
name = "jupyterlab_examples_main_menu"
lab_path = (HERE / name.replace("-", "_") / "labextension")
# Representative files that should exist after a successful build
ensured_targets = [
str(lab_path / "package.json"),
str(lab_path / "static/style.js")
]
labext_name = "@jupyterlab-examples/main-menu"
data_files_spec = [
("share/jupyter/labextensions/%s" % labext_name, str(lab_path.relative_to(HERE)), "**"),
("share/jupyter/labextensions/%s" % labext_name, str("."), "install.json"),
]
long_description = (HERE / "README.md").read_text()
# Get the package info from package.json
pkg_json = json.loads((HERE / "package.json").read_bytes())
setup_args = dict(
name=name,
version=pkg_json["version"],
url=pkg_json["homepage"],
author=pkg_json["author"]["name"],
author_email=pkg_json["author"]["email"],
description=pkg_json["description"],
license=pkg_json["license"],
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
install_requires=[],
zip_safe=False,
include_package_data=True,
python_requires=">=3.6",
platforms="Linux, Mac OS X, Windows",
keywords=["Jupyter", "JupyterLab", "JupyterLab3"],
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Framework :: Jupyter",
],
)
try:
from jupyter_packaging import (
wrap_installers,
npm_builder,
get_data_files
)
post_develop = npm_builder(
build_cmd="install:extension", source_dir="src", build_dir=lab_path
)
setup_args["cmdclass"] = wrap_installers(post_develop=post_develop, ensured_targets=ensured_targets)
setup_args["data_files"] = get_data_files(data_files_spec)
except ImportError as e:
import logging
logging.basicConfig(format="%(levelname)s: %(message)s")
logging.warning("Build tool `jupyter-packaging` is missing. Install it with pip or conda.")
if not ("--name" in sys.argv or "--version" in sys.argv):
raise e
if __name__ == "__main__":
setuptools.setup(**setup_args)
| [
"noreply@github.com"
] | noreply@github.com |
c797fec39e87cec2724d05c13ea1be0f98111384 | 7f66c66eb82b480e8a23ecbfb8613aae02cb50f7 | /tests/integration/parity/utils.py | 572d4c4af3500566de67c6e37afa8c80f6465074 | [
"MIT"
] | permissive | y19818/web3.py | 03ddedcfdbd4dde2c1a458b31f5e796509b3c7c6 | 32a85a287ab63220d1e0c06d77be74de595ff02f | refs/heads/master | 2021-06-25T00:30:50.312173 | 2019-12-02T08:21:40 | 2019-12-02T08:21:40 | 225,276,093 | 0 | 0 | MIT | 2019-12-02T03:20:47 | 2019-12-02T03:20:47 | null | UTF-8 | Python | false | false | 1,939 | py | import signal
import socket
import subprocess
import time
from vns_utils import (
to_text,
)
import requests
def wait_for_socket(ipc_path, timeout=60):
start = time.time()
while time.time() < start + timeout:
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(ipc_path)
sock.settimeout(timeout)
except (FileNotFoundError, socket.error):
time.sleep(0.01)
else:
break
def wait_for_http(endpoint_uri, timeout=60):
start = time.time()
while time.time() < start + timeout:
try:
requests.get(endpoint_uri)
except requests.ConnectionError:
time.sleep(0.01)
else:
break
def get_process(command_list, terminates=False):
proc = subprocess.Popen(
command_list,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
)
if terminates:
wait_for_popen(proc, 30)
try:
yield proc
finally:
kill_proc_gracefully(proc)
output, errors = proc.communicate()
print(
"Parity Process Exited:\n"
"stdout:{0}\n\n"
"stderr:{1}\n\n".format(
to_text(output),
to_text(errors),
)
)
def wait_for_popen(proc, timeout):
start = time.time()
while time.time() < start + timeout:
if proc.poll() is None:
time.sleep(0.01)
else:
break
def kill_proc_gracefully(proc):
if proc.poll() is None:
proc.send_signal(signal.SIGINT)
wait_for_popen(proc, 13)
if proc.poll() is None:
proc.terminate()
wait_for_popen(proc, 5)
if proc.poll() is None:
proc.kill()
wait_for_popen(proc, 2)
| [
"y19818@gmail.com"
] | y19818@gmail.com |
98f50633d7a2f376fa62cba3433f8d1dd51588f3 | ebe5167148cfff43d24b6c66e44634bb55513b72 | /solutions/linkedlist/160.Intersection.of.Two.Linked.Lists.py | 2cf42f3f9cf91b328616216b08a513d48d5ff246 | [] | no_license | ljia2/leetcode.py | c90ac38a25331d61d3ff77fd135b82372da3a09f | 08c6d27498e35f636045fed05a6f94b760ab69ca | refs/heads/master | 2020-03-25T03:37:13.318582 | 2019-07-18T23:14:41 | 2019-07-18T23:14:41 | 143,351,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,122 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
Write a program to find the node at which the intersection of two singly linked lists begins.
For example, the following two linked lists:
begin to intersect at node c1.
Example 1:
Input: intersectVal = 8, listA = [4,1,8,4,5], listB = [5,0,1,8,4,5], skipA = 2, skipB = 3
Output: Reference of the node with value = 8
Input Explanation: The intersected node's value is 8 (note that this must not be 0 if the two lists intersect). From the head of A, it reads as [4,1,8,4,5]. From the head of B, it reads as [5,0,1,8,4,5]. There are 2 nodes before the intersected node in A; There are 3 nodes before the intersected node in B.
Example 2:
Input: intersectVal = 2, listA = [0,9,1,2,4], listB = [3,2,4], skipA = 3, skipB = 1
Output: Reference of the node with value = 2
Input Explanation: The intersected node's value is 2 (note that this must not be 0 if the two lists intersect). From the head of A, it reads as [0,9,1,2,4]. From the head of B, it reads as [3,2,4]. There are 3 nodes before the intersected node in A; There are 1 node before the intersected node in B.
Example 3:
Input: intersectVal = 0, listA = [2,6,4], listB = [1,5], skipA = 3, skipB = 2
Output: null
Input Explanation: From the head of A, it reads as [2,6,4]. From the head of B, it reads as [1,5]. Since the two lists do not intersect, intersectVal must be 0, while skipA and skipB can be arbitrary values.
Explanation: The two lists do not intersect, so return null.
Notes:
If the two linked lists have no intersection at all, return null.
The linked lists must retain their original structure after the function returns.
You may assume there are no cycles anywhere in the entire linked structure.
Your code should preferably run in O(n) time and use only O(1) memory.
:type head1, head1: ListNode
:rtype: ListNode
"""
if not headA or not headB:
return None
lenA = self.get_length(headA)
lenB = self.get_length(headB)
# adjust to ensure A is longer than B;
# swap both headA/B and lenA/B
if lenA < lenB:
headA, headB = headB, headA
lenA, lenB = lenB, lenA
stepA = 0
runnerA = headA
while stepA + lenB < lenA and runnerA:
stepA += 1
runnerA = runnerA.next
runnerB = headB
while runnerA and runnerB and runnerA != runnerB:
runnerA = runnerA.next
runnerB = runnerB.next
if runnerA != runnerB:
return None
else:
return runnerA
def get_length(self, head):
length = 0
runner = head
while runner:
length += 1
runner = runner.next
return length | [
"ljia@conversantmedia.com"
] | ljia@conversantmedia.com |
dc69296f710f59da9be6abb305d86c572b7c5bbc | 38875c008f33d588ec5827f5b1f19c56b1bb56d5 | /docs/python/resources/config.py | d6a774a2f0a5525e2a13cdc183a74de44a5e6aa9 | [] | no_license | bryanwhiting/ds-arxiv | 6c9c0825e827ed8a3897780dfc71ccf6a454d80a | 6be1718b739da1965511a4792afb05c097fccd49 | refs/heads/master | 2020-11-25T04:56:29.508241 | 2020-01-28T23:45:16 | 2020-01-28T23:45:16 | 228,509,886 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,274 | py | from datetime import date
feeds = {
'engineering': {
'openai': 'https://blog.openai.com/rss/',
'fb': 'https://research.fb.com/blog/feed/',
'airbnb-ds': 'https://medium.com/feed/airbnb-engineering/tagged/data-science',
'ggl_dev': 'http://feeds.feedburner.com/GDBcode',
'ggl_ai': 'http://feeds.feedburner.com/blogspot/gJZg',
'instacart': ' https://tech.instacart.com/feed',
'Google Developers': 'http://feeds.feedburner.com/GDBcode',
'Google Open Source': 'http://feeds.feedburner.com/GoogleOpenSourceBlog',
'fb_code': 'https://code.fb.com/feed/',
'uber_ai': 'https://eng.uber.com/tag/uber-ai-labs/feed',
'uber_eng': 'https://eng.uber.com/feed',
'netflix_tech': 'https://medium.com/feed/netflix-techblog',
'pinterest': 'https://medium.com/feed/@Pinterest_Engineering',
'sebrash': 'https://sebastianraschka.com/rss_feed.xml',
'zillow': 'https://www.zillow.com/data-science/feed/',
},
'tutorials': {
'databricks': 'https://databricks.com/feed',
'datacamp': 'https://www.datacamp.com/community/rss.xml',
'ml_mast': 'https://machinelearningmastery.com/blog/feed/',
'twrds': 'https://towardsdatascience.com/feed/',
'devto': 'https://dev.to/feed',
},
'general': {
'gnews': 'https://news.google.com/news/rss/?hl=en&ned=us&gl=US&ned=us&gl=US',
'espn': 'http://www.espn.com/espn/rss/news',
'Science': 'http://feeds.reuters.com/reuters/scienceNews',
'TopNews': 'http://feeds.reuters.com/reuters/topNews',
'World News': 'http://feeds.reuters.com/Reuters/worldNews',
'Sports News': 'http://feeds.reuters.com/reuters/sportsNews',
'BBC': 'http://feeds.bbci.co.uk/news/video_and_audio/news_front_page/rss.xml',
'BBC US': 'http://feeds.bbci.co.uk/news/video_and_audio/news_front_page/rss.xml?edition=us',
'BBC International': 'http://feeds.bbci.co.uk/news/rss.xml?edition=int',
'Snopes': 'https://www.snopes.com/feed/',
},
'tech': {
'mit': 'https://www.technologyreview.com/stories.rss',
'fc': 'https://www.fastcompany.com/technology/rss',
'reuters': 'http://feeds.reuters.com/reuters/technologyNews',
'bbc': 'http://feeds.bbci.co.uk/news/video_and_audio/technology/rss.xml',
'tc': 'https://techcrunch.com/startups/',
'vb': 'https://venturebeat.com/feed/'
},
'startups': {
'avc': 'http://feeds.feedburner.com/avc',
'andrew chen': 'https://andrewchen.co/feed/',
'ycombinator': 'https://blog.ycombinator.com/feed/',
'A Horowitz': 'https://a16z.com/feed/',
'AVC': 'https://avc.com/feed/',
'Sam Altman': 'http://blog.samaltman.com/posts.atom',
},
'religious': {
'lds': 'https://www.mormonnewsroom.org/rss'
},
}
# What categories I want
map = {
0: [], #monday arxiv is not above, but a separate function
1: ['tutorials'],
2: ['general'],
3: ['startups', 'tech'],
4: ['engineering'],
5: ['general'],
6: ['religious'], #sunday
}
# TODO: add arxiv more cleanly: check main.py for implementation
feeds = {c:feeds[c] for c in map[date.today().weekday()]} # 1 = monday
| [
"bryan.g.whiting@gmail.com"
] | bryan.g.whiting@gmail.com |
e97f390167367281707f1fb7bdcfdad03ff816b6 | 6420b91261248a9602207528683696510ee2e5da | /ecommerce/ecommerce/wsgi.py | b1b89363183244c151f07c117a776f6687264b94 | [] | no_license | Mounikabudiga/ecommerce | bfe0d5178586720575f6d7dd1c278b118b5655bb | 4688a3d6ccf53d406e2dfa41ef20c1f1872840a2 | refs/heads/master | 2020-09-02T16:52:49.976204 | 2019-11-03T06:59:40 | 2019-11-03T06:59:40 | 219,263,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | """
WSGI config for ecommerce project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
wsgi:-webserver gateway interface
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ecommerce.settings")
application = get_wsgi_application()
| [
"mounikabudiga98@gmail.com"
] | mounikabudiga98@gmail.com |
6b1515908b2fe16543fdcf82ee9325387b7d572b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_plagued.py | 33674872c908833ea1ef79864be519cf6ce0d184 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.verbs._plague import _PLAGUE
#calss header
class _PLAGUED(_PLAGUE, ):
def __init__(self,):
_PLAGUE.__init__(self)
self.name = "PLAGUED"
self.specie = 'verbs'
self.basic = "plague"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
2bd765f9129f0648c344eac691a54cae5729812b | cc0d06e2aad3d30152c4a3f3356befdc58748313 | /13nov_til_19nov/17_novin1900.py | 0e049e544ca995c89370175743c5e1de70beedec | [] | no_license | lasse-steinnes/IN1900 | db0bb4da33fa024d4fe9207337c0f1d956197c50 | c8d97c2903078471f8e419f88cc8488d9b8fc7da | refs/heads/master | 2020-12-14T15:34:36.429764 | 2020-01-18T19:59:46 | 2020-01-18T19:59:46 | 234,789,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | #### Forelesning in1900 ####
### Siste forelesning. Foilbasert.
## siste del av ODE-løsere og modellering av smittsomme sykdommer
## System av ODE´s
### Skal lage klassehierarki for ODE løsere.
###
| [
"lasse.steinnes93@gmail.com"
] | lasse.steinnes93@gmail.com |
65ab4f794bf809550a372a51156e69c862bc82ca | 3e2cf7cc9c3d0e3c89f5690d2d32ed157d828940 | /util/fuzz/fuzzloops.py | 2182ab188ccd3478f311199ebddd021cad082d39 | [
"ISC"
] | permissive | osresearch/prjtrellis | 2d989bdc838b9bbdfa24ec2e9e41000b5d9d1ca5 | 6d2d00db04d298ee228ec4cf37223f332d9f767d | refs/heads/master | 2020-06-05T09:07:04.559096 | 2019-06-17T14:29:39 | 2019-06-17T14:29:39 | 192,386,722 | 1 | 0 | ISC | 2019-06-17T17:01:35 | 2019-06-17T17:01:34 | null | UTF-8 | Python | false | false | 1,463 | py | """
General Utilities for Fuzzing
"""
import os
from threading import Thread, RLock
def parallel_foreach(items, func):
"""
Run a function over a list of values, running a number of jobs
in parallel. TRELLIS_JOBS should be set to the number of jobs to run,
defaulting to 4.
"""
if "TRELLIS_JOBS" in os.environ:
jobs = int(os.environ["TRELLIS_JOBS"])
else:
jobs = 4
items_queue = list(items)
items_lock = RLock()
def runner():
while True:
with items_lock:
if len(items_queue) == 0:
return
item = items_queue[0]
items_queue.pop(0)
func(item)
threads = [Thread(target=runner) for i in range(jobs)]
for t in threads:
t.start()
for t in threads:
t.join()
def journal_foreach(items, func):
"""
Run a function over a list of items, keeping a journal of which items have been visited. If the script is
interrupted, it will return where it stopped if the list of items have not changed.
If an exception occurs during an item, that exception will be logged in the journal also.
Items must have an unambiguous string conversion, and should normally be string keys, that can be saved in the
journal.
The journal is called "fuzz.journal" in the current working directory. At present, this implementation is not thread
safe.
"""
# TODO
pass
| [
"davey1576@gmail.com"
] | davey1576@gmail.com |
9ffd5ba7b0f83beb82f278a5432c9738b8cb7a4b | 7ac3fb82698097fbfca84de452534adedead7c80 | /polls_grievance/polls/admin.py | 77489f022a971642f46532c55a40f7eebf5e7dd6 | [] | no_license | shruthi-ms/grievance-redressal-system-B05 | c3a0113c8008a9a1db16cbc409c68e8bdeb603a6 | 1ad4917de6aab1487adfe42d70426e87fd2de489 | refs/heads/master | 2020-03-31T22:21:55.615852 | 2018-11-09T06:53:54 | 2018-11-09T06:53:54 | 152,615,805 | 0 | 3 | null | 2018-11-09T06:35:41 | 2018-10-11T15:38:25 | HTML | UTF-8 | Python | false | false | 159 | py | from django.contrib import admin
from .models import Question,Choice
# Register your models here.
admin.site.register(Question)
admin.site.register(Choice) | [
"noreply@github.com"
] | noreply@github.com |
c0832261e777b57d203ecb1bef7b9c07cbd0adaa | 0db0cd4a1632151d9a643f5e8fd1d96963c0b624 | /transformer/transformer.py | 9180465ece4d6b6eb6175bfa2beab6c468ec7f0a | [
"Apache-2.0"
] | permissive | Nniy/lark | f0a4bace3259090a94d2afa8a8c1581684b35670 | 09a9ee45006ebf07ad8abe6f03384cac7f407cc1 | refs/heads/master | 2020-03-20T07:18:10.276531 | 2019-02-07T09:49:36 | 2019-02-07T09:49:36 | 137,277,649 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,236 | py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import copy
import spacy
from torch.autograd import Variable
from torchtext import data, datasets
class EncoderDecoder(nn.Module):
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
def forward(self, src, tgt, src_mask, tgt_mask):
memory = self.encoder(self.src_embed(src), src_mask)
output = self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)
return output
def clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
return x + self.dropout(sublayer(self.norm(x)))
class EncoderLayer(nn.Module):
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class Decoder(nn.Module):
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, memory, src_mask, tgt_mask):
for layer in self.layers:
x = layer(x, memory, src_mask, tgt_mask)
return self.norm(x)
class DecoderLayer(nn.Module):
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 3)
def forward(self, x, memory, src_mask, tgt_mask):
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](x, self.feed_forward)
def subsequent_mask(size):
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def attention(query, key, value, mask=None, dropout=0.0):
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
p_attn = F.dropout(p_attn, p=dropout)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
self.d_k = d_model // h
self.h = h
self.p = dropout
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
def forward(self, query, key, value, mask=None):
if mask is not None:
mask = mask.unsqueeze(1)
nbatches = query.size(0)
query, key, value = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
x, self.attn = attention(query, key, value, mask=mask, dropout=self.p)
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
# Torch linears have a `b` by default.
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class Embeddings(nn.Module):
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0., max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0., d_model, 2) * -(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False)
return self.dropout(x)
class Generator(nn.Module):
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
def make_model(src_vocab, tgt_vocab, N=6, d_model=512, d_ff=2048, h=8, dropout=0.1):
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model, dropout)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N),
nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),
Generator(d_model, tgt_vocab))
# This was important from their code. Initialize parameters with Glorot or fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
class NoamOpt:
def __init__(self, model_size, factor, warmup, optimizer):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
def step(self):
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step=None):
if step is None:
step = self._step
return self.factor * (self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5)))
def get_std_opt(model):
return NoamOpt(model.src_embed[0].d_model, 2, 4000,
torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
class LabelSmoothing(nn.Module):
def __init__(self, size, padding_idx, smoothing=0.0):
super(LabelSmoothing, self).__init__()
self.criterion = nn.KLDivLoss(reduction='sum')
self.padding_idx = padding_idx
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.size = size
self.true_dist = None
def forward(self, x, target):
assert x.size(1) == self.size
true_dist = x.data.clone()
true_dist.fill_(self.smoothing / (self.size - 2))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
true_dist[:, self.padding_idx] = 0
mask = torch.nonzero(target.data == self.padding_idx)
if mask.dim() > 0:
true_dist.index_fill_(0, mask.squeeze(), 0.0)
self.true_dist = true_dist
return self.criterion(x, Variable(true_dist, requires_grad=False))
crit = LabelSmoothing(5, 0, 0.2)
def loss(x):
d = x + 3 * 1
predict = torch.FloatTensor([[0, x / d, 1 / d, 1 / d, 1 / d], ])
print(predict)
return crit(Variable(predict.log()), Variable(torch.LongTensor([1]))).item()
def loss_backprop(generator, criterion, out, targets, normalize):
assert out.size(1) == targets.size(1)
total = 0.0
out_grad = []
for i in range(out.size(1)):
out_column = Variable(out[:, i].data, requires_grad=True)
gen = generator(out_column)
loss = criterion(gen, targets[:, i]) / normalize
total += loss.item()
loss.backward()
out_grad.append(out_column.grad.data.clone())
out_grad = torch.stack(out_grad, dim=1)
out.backward(gradient=out_grad)
return total
def make_std_mask(src, tgt, pad):
src_mask = (src != pad).unsqueeze(-2)
tgt_mask = (tgt != pad).unsqueeze(-2)
tgt_mask = tgt_mask & Variable(subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))
return src_mask, tgt_mask
def train_epoch(train_iter, model, criterion, opt, transpose=False):
model.train()
for i, batch in enumerate(train_iter):
src, trg, src_mask, trg_mask = \
batch.src, batch.trg, batch.src_mask, batch.trg_mask
out = model.forward(src, trg[:, :-1], src_mask, trg_mask[:, :-1, :-1])
loss = loss_backprop(model.generator, criterion, out, trg[:, 1:], batch.ntokens)
model_opt.step()
model_opt.optimizer.zero_grad()
if i % 10 == 1:
print(i, loss, model_opt._rate)
def valid_epoch(valid_iter, model, criterion, transpose=False):
model.test()
total = 0
for batch in valid_iter:
src, trg, src_mask, trg_mask = \
batch.src, batch.trg, batch.src_mask, batch.trg_mask
out = model.forward(src, trg[:, :-1], src_mask, trg_mask[:, :-1, :-1])
loss = loss_backprop(model.generator, criterion, out, trg[:, 1:], batch.ntokens)
class Batch:
def __init__(self, src, trg, src_mask, trg_mask, ntokens):
self.src = src
self.trg = trg
self.src_mask = src_mask
self.trg_mask = trg_mask
self.ntokens = ntokens
def data_gen(V, batch, nbatches):
for i in range(nbatches):
data = torch.from_numpy(np.random.randint(1, V, size=(batch, 10)))
src = Variable(data, requires_grad=False)
tgt = Variable(data, requires_grad=False)
src_mask, tgt_mask = make_std_mask(src, tgt, 0)
yield Batch(src, tgt, src_mask, tgt_mask, (tgt[1:] != 0).data.sum())
# V = 11
# criterion = LabelSmoothing(size=V, padding_idx=0, smoothing=0.0)
# model = make_model(V, V, N=2)
# model_opt = get_std_opt(model)
# for epoch in range(2):
# train_epoch(data_gen(V, 30, 20), model, criterion, model_opt)
spacy_de = spacy.load('de')
spacy_en = spacy.load('en')
def tokenize_de(text):
return [tok.text for tok in spacy_de.tokenizer(text)]
def tokenize_en(text):
return [tok.text for tok in spacy_en.tokenizer(text)]
BOS_WORD = '<s>'
EOS_WORD = '</s>'
BLANK_WORD = "<blank>"
SRC = data.Field(tokenize=tokenize_de, pad_token=BLANK_WORD)
TGT = data.Field(tokenize=tokenize_en, init_token=BOS_WORD,
eos_token=EOS_WORD, pad_token=BLANK_WORD)
MAX_LEN = 100
train, val, test = datasets.IWSLT.splits(exts=('.de', '.en'), fields=(SRC, TGT),
filter_pred=lambda x: len(vars(x)['src']) <= MAX_LEN and
len(vars(x)['trg']) <= MAX_LEN)
MIN_FREQ = 1
SRC.build_vocab(train.src, min_freq=MIN_FREQ)
TGT.build_vocab(train.trg, min_freq=MIN_FREQ)
# Detail. Batching seems to matter quite a bit.
# This is temporary code for dynamic batching based on number of tokens.
# This code should all go away once things get merged in this library.
BATCH_SIZE = 4096
global max_src_in_batch, max_tgt_in_batch
def batch_size_fn(new, count, sofar):
"Keep augmenting batch and calculate total number of tokens + padding."
global max_src_in_batch, max_tgt_in_batch
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
max_src_in_batch = max(max_src_in_batch, len(new.src))
max_tgt_in_batch = max(max_tgt_in_batch, len(new.trg) + 2)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
class MyIterator(data.Iterator):
def create_batches(self):
if self.train:
def pool(d, random_shuffler):
for p in data.batch(d, self.batch_size * 100):
p_batch = data.batch(
sorted(p, key=self.sort_key),
self.batch_size, self.batch_size_fn)
for b in random_shuffler(list(p_batch)):
yield b
self.batches = pool(self.data(), self.random_shuffler)
else:
self.batches = []
for b in data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key))
def rebatch(pad_idx, batch):
"Fix order in torchtext to match ours"
src, trg = batch.src.transpose(0, 1), batch.trg.transpose(0, 1)
src_mask, trg_mask = make_std_mask(src, trg, pad_idx)
return Batch(src, trg, src_mask, trg_mask, (trg[1:] != pad_idx).data.sum())
train_iter = MyIterator(train, batch_size=BATCH_SIZE, device='cuda:0',
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=True)
valid_iter = MyIterator(val, batch_size=BATCH_SIZE, device='cuda:0',
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=False)
pad_idx = TGT.vocab.stoi["<blank>"]
model = make_model(len(SRC.vocab), len(TGT.vocab), N=6)
model_opt = get_std_opt(model)
model.cuda()
criterion = LabelSmoothing(size=len(TGT.vocab), padding_idx=pad_idx, smoothing=0.1)
criterion.cuda()
for epoch in range(15):
train_epoch((rebatch(pad_idx, b) for b in train_iter), model, criterion, model_opt)
valid_epoch((rebatch(pad_idx, b) for b in valid_iter), model, criterion)
| [
"alexyin1012@gmail.com"
] | alexyin1012@gmail.com |
74e9df0b5d0ebe6672192510a53a197fe7aea039 | abf443927d76b4df7af6e4fcee85bdb4c9458ecd | /sta_etl/plugins/plugin_dummy.py | 1ddf5bcda9181127c894b4882b76dfb99f1f2d7c | [
"MIT"
] | permissive | XeBoris/git-etl | 0374b234f477251473c1673bfc4b6badadf1d49d | 888f26e51a797dd111c9ca457a0c83b4f00296f0 | refs/heads/main | 2023-04-08T06:02:53.708342 | 2021-04-25T13:06:44 | 2021-04-25T13:06:44 | 343,483,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,657 | py | from sta_etl.plugin_handler.etl_collector import Collector
import pandas as pd
import numpy as np
# from geopy.distance import distance as geopy_distance
# import math
@Collector
class Plugin_Dummy():
"""
This is template development plugin for proving the processing chain right with
depending plugins. This plugin is part of Plugin_Dev2(...) and can be used together
such as:
:Example:
sta_cli process --hash 697b5d35 --type "Plugin_Dev2"
This will trigger a processing chain based on the module dependency of Plugin_Dev2
to Plugin_Dev1, create random data and stores the outcome in the STA database core.
As a developer you can set
- self._proc_success (True or False)
- self._proc_success (None or pd.DataFrame)
to simulate processing a processing chain along the way.
"""
def __init__(self):
"""
The class init function. This function holds only information
about the plugin itself. In that way we can always load the plugin
without initiating further variables and member functions.
"""
self._plugin_config = {
"plugin_name": "Plugin_Example",
"plugin_dependencies": ["gps"],
"plugin_description": """
This is a simple template plugin
""",
"leaf_name": "example"
}
def __del__(self):
"""
At this point, adjust the destructor of your plugin to remove unnecessary
objects from RAM. In that way we can keep the RAM usage low.
:return: None
"""
pass
def init(self):
"""
The "true" init is used here to setup the plugin. At this point, a dictionary
(self._data_dict) is created which holds data which are required that this plugin runs through.
(see set_plugin_data(...) for more information). If self._data_dict is not set
externally, it could also mean that there are no requirements for data sources.
Have a look at the processing instruction of this plugin to verify its function.
.. note::
- self._data_dict is always a dictionary which can be empty if not data are
required by this plugin
- self._proc_success is always False initially. Set to True if processing is
successful to notify the PluginLoader about the outcome.
- self._proc_result is initially None and becomes a pandas DataFrame or any
other data storage object. It is mandatory that the PluginLoader understands
how to handle the result and write it to the underlying storage facility.
:return: None
"""
self._data_dict = {}
self._proc_success = False
self._proc_result = None
def get_result(self):
"""
A return function for this plugin to transfer processed data the the PluginLoader.
This plugin returns None or pd.DataFrame as result. The plugin handler needs to
understand return object for creating the correct database entry and handle storage
of the plugin result on disk. See i_process(...) in loader.py for handling the result.
:return: Pandas DataFrame or None
"""
return self._proc_result
def get_processing_success(self):
"""
Reports the processing status back to the PluginLoader. This variable is set to False
by default and needs to be set to True if processing of the plugin is successful.
:return: bool
"""
return self._proc_success
def get_plugin_config(self):
"""
Standard function: Return
:return: A dictionary with the plugin configuration
"""
return self._plugin_config
def print_plugin_config(self):
"""
This one is just presenting the initial plugin configuration inside or outside this
plugin to users.
.. todo: This function uses Python print(...) right now. Change to logging soon.
:return: None
"""
print("<-----------")
print(f"Plugin name {self._plugin_config.get('name')}")
print(f"Plugin dependencies: {self._plugin_config.get('plugin_dependencies')}")
print(f"Plugin produces leaf name (aka data asset): {self._plugin_config.get('leaf_name')}")
print(f"Plugin description:")
print(self._plugin_config.get('plugin_description'))
print("<-----------")
def set_plugin_data(self, data_dict={}):
"""
A function to set the necessary data as a dictionary. The dictionary self._data_dict
is set before when running init(...) but have to set dictionary data beforehand when
your code below requires it for running.
:param data_dict: dictionary
A dictionary with data objects which can be understood by the processor code
below.
:return: None
"""
self._data_dict = data_dict
def run(self):
"""
A data processor can be sometimes more complicated. So you are supposed to use
run(...) as call for starting the processing instruction. You might like to put
control mechanism to it check the correct behavior of the plugin processor code.
.. note::
All processing instruction, helper functions,... are in the scope of "private"
of this plugin processor class. Therefore, stick to the _<name> convention when
defining names in your plugins.
:return: None
"""
#Run individual steps of the data processing:
self._processer()
def _processer(self):
"""
In this template plugin, we hold the processing instruction in this function.
The aim of this plugin to simulate the handling of processing dependencies.
:return:
"""
# Fetch all important data for calculations (Example):
# Use always .get(...) for self._data_dict to be in control of the existence
# of the data object.
gps_data = self._data_dict.get("gps")
# Implement you code here! We will just show the plugin configuration:
self.print_plugin_config()
# Create a fake result:
# As a developer you will use this section to simulate if a plugin processing
# chain is successful and what is reported back to the PluginLoader.
df = pd.DataFrame(np.random.randint(0, 100, size=(100, 4)),
columns=["devel1-A", "devel1-B", "devel1-C", "devel1-D"])
#Fake results:
self._proc_result = df
self._proc_success = True
| [
"Boris.Bauermeister@gmail.com"
] | Boris.Bauermeister@gmail.com |
cfe01a4b169c15eb94d8021b61f0dde6ad776745 | bec2c75169d382832ed4d7bef22875134e8f5905 | /calculadora simples.py | 75a0cb4b286a441895308343bcc80ae77f3219eb | [] | no_license | guhavila/Calculadora-simples | 6b4ad8c408355df9c494f8127049d92a7fe73b5b | 652238696841d35796a79acfeef68837eef4d8e1 | refs/heads/main | 2023-08-16T09:39:34.911622 | 2021-10-04T17:59:12 | 2021-10-04T17:59:12 | 413,525,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | numero1 = 0
operacao = " "
numero2 = 0
resultado = 0
numero1 = int(input ("Digite o numero1: "))
operacao = input ("Digite a operacao: ")
numero2 = int(input ("Digite o numero2:"))
if operacao == "+":
resultado = numero1 + numero2
elif operacao == "-":
resultado = numero1 - numero2
elif operacao == "/":
resultado = numero1 / numero2
elif operacao == "*":
resultado = numero1 * numero2
else:
resultado = "Operação Invalida"
print(str(numero1) + " " + str(operacao) + " " + str(numero2) + " = " + str(resultado)) | [
"gustavo_avila2005@hotmail.com"
] | gustavo_avila2005@hotmail.com |
c96d9fbb1ec55e009c3130b003d3cd72bee5a478 | a9760a1f2384fc1ab221fac2c06ce5b8fc21b518 | /.ipython/ipy_user_conf.py | d814cd8491f14cf9afab3ba439a4d2191815a7cf | [] | no_license | escalant3/dotfiles | 261c1ba5d77e4fe2ab61cfade7ea043a5c8260ea | 5b441884929b39086fa605f28686daad2c201470 | refs/heads/master | 2021-01-01T15:36:38.781979 | 2015-12-28T20:42:07 | 2015-12-28T20:42:07 | 2,710,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,511 | py | """ User configuration file for IPython
This is a more flexible and safe way to configure ipython than *rc files
(ipythonrc, ipythonrc-pysh etc.)
This file is always imported on ipython startup. You can import the
ipython extensions you need here (see IPython/Extensions directory).
Feel free to edit this file to customize your ipython experience.
Note that as such this file does nothing, for backwards compatibility.
Consult e.g. file 'ipy_profile_sh.py' for an example of the things
you can do here.
See http://ipython.scipy.org/moin/IpythonExtensionApi for detailed
description on what you could do here.
"""
# Most of your config files and extensions will probably start with this import
import IPython.ipapi
ip = IPython.ipapi.get()
# You probably want to uncomment this if you did %upgrade -nolegacy
# import ipy_defaults
import os
def main():
# uncomment if you want to get ipython -p sh behaviour
# without having to use command line switches
# import ipy_profile_sh
# Configure your favourite editor?
# Good idea e.g. for %edit os.path.isfile
#import ipy_editors
# Choose one of these:
#ipy_editors.scite()
#ipy_editors.scite('c:/opt/scite/scite.exe')
#ipy_editors.komodo()
#ipy_editors.idle()
# ... or many others, try 'ipy_editors??' after import to see them
# Or roll your own:
#ipy_editors.install_editor("c:/opt/jed +$line $file")
o = ip.options
# An example on how to set options
#o.autocall = 1
o.system_verbose = 0
#import_all("os sys")
#execf('~/_ipython/ns.py')
# -- prompt
# A different, more compact set of prompts from the default ones, that
# always show your current location in the filesystem:
#o.prompt_in1 = r'\C_LightBlue[\C_LightCyan\Y2\C_LightBlue]\C_Normal\n\C_Green|\#>'
#o.prompt_in2 = r'.\D: '
#o.prompt_out = r'[\#] '
# Try one of these color settings if you can't read the text easily
# autoexec is a list of IPython commands to execute on startup
o.autoexec.append('%colors LightBG')
#o.autoexec.append('%colors NoColor')
#o.autoexec.append('%colors Linux')
# for sane integer division that converts to float (1/2 == 0.5)
#o.autoexec.append('from __future__ import division')
# For %tasks and %kill
#import jobctrl
# For autoreloading of modules (%autoreload, %aimport)
#import ipy_autoreload
# For winpdb support (%wdb)
#import ipy_winpdb
# For bzr completer, requires bzrlib (the python installation of bzr)
#ip.load('ipy_bzr')
# Tab completer that is not quite so picky (i.e.
# "foo".<TAB> and str(2).<TAB> will work). Complete
# at your own risk!
#import ipy_greedycompleter
# If you are on Linux, you may be annoyed by
# "Display all N possibilities? (y or n)" on tab completion,
# as well as the paging through "more". Uncomment the following
# lines to disable that behaviour
#import readline
#readline.parse_and_bind('set completion-query-items 1000')
#readline.parse_and_bind('set page-completions no')
execf("~/.ipython/virtualenv.py")
# some config helper functions you can use
def import_all(modules):
""" Usage: import_all("os sys") """
for m in modules.split():
ip.ex("from %s import *" % m)
def execf(fname):
""" Execute a file in user namespace """
ip.ex('execfile("%s")' % os.path.expanduser(fname))
main()
| [
"escalant3@gmail.com"
] | escalant3@gmail.com |
9c4bab3b48be93ed860e10acba9161ddcdec7dd5 | 3de97a4bbe114e5ca809ba570de825ceb151d81b | /01_Complete-Python-3-Bootcamp-Practice/06-Modules and Packages/ex02/myprogram.py | 70fe306b0dd8204b3eaaf16e91bb92a372dd62be | [] | no_license | duvvurupriyanka/Python | 699c19326b73a80d8170f51d52cb9156d53c6bb7 | cab669a3b18203449054dfd2b27f89f233c13825 | refs/heads/master | 2020-07-07T03:37:26.470997 | 2019-10-17T21:59:45 | 2019-10-17T21:59:45 | 203,233,399 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | from MyMainPackage import my_main_script
from MyMainPackage.Subpackage import sub_script
my_main_script.main_func()
sub_script.sub_func()
| [
"duvvurupriyanka@gmail.com"
] | duvvurupriyanka@gmail.com |
14e9b8763448d24bfb6298dce83da3a2385c6ca9 | a7cf0faff89d15f364e5e245709392bcdfbb57bc | /tuple4.py | 1623d0fd81cbdde1fef9de2f3ff3e09700c551fc | [] | no_license | sanidhya12345/Python_lab | 3e5edd3ecdd42b45016d635d556c4377f2d319a6 | a90e1122de256d99a65abe83ac146e1eff2ea67e | refs/heads/master | 2020-12-28T14:29:42.365982 | 2020-10-01T05:24:49 | 2020-10-01T05:24:49 | 238,370,287 | 2 | 1 | null | 2020-10-03T06:39:36 | 2020-02-05T04:48:53 | Python | UTF-8 | Python | false | false | 35 | py | t=(1,2,3,4,5,6,7,8,9)
print(t[-1])
| [
"noreply@github.com"
] | noreply@github.com |
c2e730e85cb3784c6adfc50de37cf914ef909f5f | d784870a828760c910899bdfdfe075cf14e765bf | /S3FS/bin/rst2latex.py | e97c9c3605ec3c01b828472e3b703f1456393e0b | [] | no_license | Novandev/EasyS3FS | 61d91bb5046f2edb00ae0f0312e9884b31bdc497 | fa2128d4a80c963c933ec32e872f5b46c5b40528 | refs/heads/master | 2020-03-08T07:37:20.236148 | 2018-04-25T20:48:13 | 2018-04-25T20:48:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | #!/Users/donovanadams/Desktop/GitHub/EasyS3FS/S3FS/bin/python3.6
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description)
| [
"donovan.adams@students.makeschool.com"
] | donovan.adams@students.makeschool.com |
b94b33eee424c11a30b3cdc6afe55de6c68ac690 | 9a336624952a794a0cc1674e5782286f54ad39d9 | /cubista3.py | 464185f30100000924df075de7af81fc52bf5ebc | [] | no_license | JNBIMI/QVistaMio | 282a3362c5801360d8de615e76954171e927690a | 8a8e6274245b851099613df0e1d06810c2418dd0 | refs/heads/master | 2022-09-28T15:01:40.180735 | 2020-06-03T08:06:58 | 2020-06-03T08:06:58 | 267,923,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,472 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Cubista3.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1233, 777)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.frame_3 = QtWidgets.QFrame(self.centralwidget)
self.frame_3.setMinimumSize(QtCore.QSize(0, 40))
self.frame_3.setMaximumSize(QtCore.QSize(16777215, 40))
self.frame_3.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_3.setLineWidth(0)
self.frame_3.setObjectName("frame_3")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.frame_3)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.frame_11 = QtWidgets.QFrame(self.frame_3)
self.frame_11.setMinimumSize(QtCore.QSize(250, 0))
self.frame_11.setMaximumSize(QtCore.QSize(250, 16777215))
self.frame_11.setStyleSheet("background-color:#465A63;")
self.frame_11.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_11.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_11.setLineWidth(0)
self.frame_11.setObjectName("frame_11")
self.horizontalLayout.addWidget(self.frame_11)
self.frame_12 = QtWidgets.QFrame(self.frame_3)
self.frame_12.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_12.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_12.setLineWidth(0)
self.frame_12.setObjectName("frame_12")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.frame_12)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setSpacing(0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.frame_15 = QtWidgets.QFrame(self.frame_12)
self.frame_15.setStyleSheet("background-color: #79909B;\n"
"color: #FFFFFF")
self.frame_15.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_15.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_15.setLineWidth(0)
self.frame_15.setObjectName("frame_15")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.frame_15)
self.horizontalLayout_4.setContentsMargins(10, 1, 1, 1)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.leTitolProjecte = QtWidgets.QLineEdit(self.frame_15)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(14)
self.leTitolProjecte.setFont(font)
self.leTitolProjecte.setStyleSheet("background-color: #F9F9F9 transparent;\n"
" color: #38474F;\n"
" margin: 0px;\n"
" border: 0px;")
self.leTitolProjecte.setObjectName("leTitolProjecte")
self.horizontalLayout_4.addWidget(self.leTitolProjecte)
self.lblTitolProjecte = QtWidgets.QPushButton(self.frame_15)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lblTitolProjecte.sizePolicy().hasHeightForWidth())
self.lblTitolProjecte.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(14)
self.lblTitolProjecte.setFont(font)
self.lblTitolProjecte.setStyleSheet("background-color: #F9F9F9 transparent;\n"
"color: white;\n"
"margin: 0px;\n"
"border: 0px;\n"
"text-align: left;")
self.lblTitolProjecte.setObjectName("lblTitolProjecte")
self.horizontalLayout_4.addWidget(self.lblTitolProjecte)
self.leCercaPerAdreca = QtWidgets.QLineEdit(self.frame_15)
self.leCercaPerAdreca.setObjectName("leCercaPerAdreca")
self.horizontalLayout_4.addWidget(self.leCercaPerAdreca)
self.leNumCerca = QtWidgets.QLineEdit(self.frame_15)
self.leNumCerca.setObjectName("leNumCerca")
self.horizontalLayout_4.addWidget(self.leNumCerca)
self.bCercaPerAdreca = QtWidgets.QPushButton(self.frame_15)
self.bCercaPerAdreca.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("Imatges/cm_cercar2.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.bCercaPerAdreca.setIcon(icon)
self.bCercaPerAdreca.setObjectName("bCercaPerAdreca")
self.horizontalLayout_4.addWidget(self.bCercaPerAdreca)
self.lSpacer = QtWidgets.QLabel(self.frame_15)
self.lSpacer.setObjectName("lSpacer")
self.horizontalLayout_4.addWidget(self.lSpacer)
self.botoMetadades = QtWidgets.QPushButton(self.frame_15)
self.botoMetadades.setText("")
self.botoMetadades.setObjectName("botoMetadades")
self.horizontalLayout_4.addWidget(self.botoMetadades)
self.botoVeureLlegenda = QtWidgets.QPushButton(self.frame_15)
self.botoVeureLlegenda.setMinimumSize(QtCore.QSize(24, 24))
self.botoVeureLlegenda.setStyleSheet("")
self.botoVeureLlegenda.setText("")
self.botoVeureLlegenda.setObjectName("botoVeureLlegenda")
self.horizontalLayout_4.addWidget(self.botoVeureLlegenda)
self.botoMapeta = QtWidgets.QPushButton(self.frame_15)
self.botoMapeta.setText("")
self.botoMapeta.setObjectName("botoMapeta")
self.horizontalLayout_4.addWidget(self.botoMapeta)
self.botoObrirQGis = QtWidgets.QPushButton(self.frame_15)
self.botoObrirQGis.setMinimumSize(QtCore.QSize(24, 24))
self.botoObrirQGis.setStyleSheet("")
self.botoObrirQGis.setText("")
self.botoObrirQGis.setObjectName("botoObrirQGis")
self.horizontalLayout_4.addWidget(self.botoObrirQGis, 0, QtCore.Qt.AlignRight)
self.botoReload = QtWidgets.QPushButton(self.frame_15)
self.botoReload.setText("")
self.botoReload.setObjectName("botoReload")
self.horizontalLayout_4.addWidget(self.botoReload)
self.botoDesarProjecte = QtWidgets.QPushButton(self.frame_15)
self.botoDesarProjecte.setMinimumSize(QtCore.QSize(24, 0))
self.botoDesarProjecte.setStyleSheet("")
self.botoDesarProjecte.setText("")
self.botoDesarProjecte.setObjectName("botoDesarProjecte")
self.horizontalLayout_4.addWidget(self.botoDesarProjecte)
self.botoFavorits = QtWidgets.QPushButton(self.frame_15)
self.botoFavorits.setText("")
self.botoFavorits.setObjectName("botoFavorits")
self.horizontalLayout_4.addWidget(self.botoFavorits)
self.verticalLayout_4.addWidget(self.frame_15)
self.horizontalLayout.addWidget(self.frame_12)
self.frame_13 = QtWidgets.QFrame(self.frame_3)
self.frame_13.setMinimumSize(QtCore.QSize(40, 0))
self.frame_13.setMaximumSize(QtCore.QSize(40, 16777215))
self.frame_13.setStyleSheet("background-color: #465A63;")
self.frame_13.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_13.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_13.setLineWidth(0)
self.frame_13.setObjectName("frame_13")
self.horizontalLayout.addWidget(self.frame_13)
self.verticalLayout.addWidget(self.frame_3)
self.frame_9 = QtWidgets.QFrame(self.centralwidget)
self.frame_9.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_9.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_9.setLineWidth(0)
self.frame_9.setObjectName("frame_9")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.frame_9)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setSpacing(0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.frame_16 = QtWidgets.QFrame(self.frame_9)
self.frame_16.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_16.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_16.setLineWidth(0)
self.frame_16.setObjectName("frame_16")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.frame_16)
self.horizontalLayout_2.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.frameLlegenda = QtWidgets.QFrame(self.frame_16)
self.frameLlegenda.setMinimumSize(QtCore.QSize(250, 0))
self.frameLlegenda.setMaximumSize(QtCore.QSize(250, 16777215))
self.frameLlegenda.setStyleSheet("background-color: #DDDDDD")
self.frameLlegenda.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frameLlegenda.setFrameShadow(QtWidgets.QFrame.Raised)
self.frameLlegenda.setLineWidth(0)
self.frameLlegenda.setObjectName("frameLlegenda")
self.horizontalLayout_2.addWidget(self.frameLlegenda)
self.frameCentral = QtWidgets.QFrame(self.frame_16)
self.frameCentral.setStyleSheet("")
self.frameCentral.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frameCentral.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frameCentral.setLineWidth(0)
self.frameCentral.setObjectName("frameCentral")
self.horizontalLayout_2.addWidget(self.frameCentral)
self.frame_19 = QtWidgets.QFrame(self.frame_16)
self.frame_19.setMinimumSize(QtCore.QSize(40, 0))
self.frame_19.setMaximumSize(QtCore.QSize(40, 16777215))
self.frame_19.setStyleSheet("background-color: #DDDDDD\n"
"")
self.frame_19.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_19.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_19.setLineWidth(0)
self.frame_19.setObjectName("frame_19")
self.gridLayout = QtWidgets.QGridLayout(self.frame_19)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.lytBotoneraLateral = QtWidgets.QVBoxLayout()
self.lytBotoneraLateral.setContentsMargins(8, 8, 8, 8)
self.lytBotoneraLateral.setSpacing(10)
self.lytBotoneraLateral.setObjectName("lytBotoneraLateral")
self.gridLayout.addLayout(self.lytBotoneraLateral, 0, 0, 1, 1)
self.horizontalLayout_2.addWidget(self.frame_19)
self.frameCentral.raise_()
self.frameLlegenda.raise_()
self.frame_19.raise_()
self.verticalLayout_5.addWidget(self.frame_16)
self.frame_2 = QtWidgets.QFrame(self.frame_9)
self.frame_2.setMinimumSize(QtCore.QSize(250, 2))
self.frame_2.setMaximumSize(QtCore.QSize(16777215, 2))
self.frame_2.setStyleSheet("background-color: #DDDDDD")
self.frame_2.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setLineWidth(0)
self.frame_2.setObjectName("frame_2")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.frame_2)
self.horizontalLayout_6.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_6.setSpacing(0)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.verticalLayout_5.addWidget(self.frame_2)
self.verticalLayout.addWidget(self.frame_9)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1233, 34))
self.menubar.setMinimumSize(QtCore.QSize(0, 0))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(56, 71, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 106, 118))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(70, 88, 98))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(28, 35, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(37, 47, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(56, 71, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(56, 71, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(28, 35, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(56, 71, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 106, 118))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(70, 88, 98))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(28, 35, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(37, 47, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(56, 71, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(56, 71, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(28, 35, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(56, 71, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 106, 118))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(70, 88, 98))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(28, 35, 39))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(37, 47, 52))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(56, 71, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(56, 71, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(56, 71, 79))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.menubar.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Segoe UI Light")
font.setPointSize(15)
self.menubar.setFont(font)
self.menubar.setStyleSheet("background-color: rgb(56, 71, 79);\n"
"color: rgb(255, 255, 255);\n"
"")
self.menubar.setNativeMenuBar(False)
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.lblTitolProjecte.setToolTip(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:10pt; color:#38474f;\">Feu clic per canviar el títol del projecte</span></p></body></html>"))
self.lblTitolProjecte.setText(_translate("MainWindow", "Qualificacions urbanístiques i suspensions"))
self.lSpacer.setText(_translate("MainWindow", "TextLabel"))
self.botoVeureLlegenda.setToolTip(_translate("MainWindow", "<html><head/><body><p>Mostrar/ocultar llegenda</p></body></html>"))
self.botoMapeta.setToolTip(_translate("MainWindow", "<html><head/><body><p>Mostrar/ocultar mapa de situació</p></body></html>"))
self.botoObrirQGis.setToolTip(_translate("MainWindow", "<html><head/><body><p>Obrir projecte en QGIS</p></body></html>"))
self.botoReload.setToolTip(_translate("MainWindow", "<html><head/><body><p>Recarregar projecte</p></body></html>"))
self.botoDesarProjecte.setToolTip(_translate("MainWindow", "<html><head/><body><p>Desar projecte</p></body></html>"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"javier.nieva.benito@gmail.com"
] | javier.nieva.benito@gmail.com |
66480d0c87e44292792f32a89d103e7e937b6bee | 37afcd9d248512303297e595c40716ecb614ab8b | /tfidf.py | 38ab7a0cd16784ed2cc6cf341e1ca3ab14705362 | [] | no_license | guoyin90/2019-sohu-finals | 093cc7bf02f826d072e064f8e271b946b6de71cb | a73104bc68a64bc80b67c94d83957c6f3d522674 | refs/heads/master | 2020-07-01T03:11:47.060416 | 2019-06-26T03:53:16 | 2019-06-26T03:53:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | import gensim.downloader as api
from gensim.models import TfidfModel
from gensim.corpora import Dictionary
from joblib import load, dump
from sklearn.feature_extraction.text import TfidfVectorizer
import csv
train_ners = load('data/final_train_cut_v3.joblib')
test_ners = load('data/final_test_cut_v3.joblib')
nerCorpus = []
for ners in train_ners:
nerCorpus.append(' '.join(ners))
for ners in test_ners:
nerCorpus.append(' '.join(ners))
tfIdf = TfidfVectorizer()
tfIdf.fit(nerCorpus)
dump(tfIdf, 'features/final_nerTfIdf_bert_all.joblib',compress = 0) | [
"noreply@github.com"
] | noreply@github.com |
344080e25e9fc5e86575b64666d70cb7dc6f2f77 | 6779ba337409e99ba1b740ffd10972e4f4e13552 | /tensorflow_datasets/image_classification/isic2019_test.py | a656e9fd087ac9152af16da674b37e4416b34d6a | [
"Apache-2.0"
] | permissive | jmr137/datasets | 32fe843555a46a6966cf68bec97e0c51e6fd7f1b | 09276a35d75a453ec37db6e549a813e4575c796d | refs/heads/master | 2022-12-20T16:46:48.179924 | 2020-09-22T18:33:23 | 2020-09-22T18:33:23 | 297,554,966 | 0 | 0 | Apache-2.0 | 2020-09-22T06:22:05 | 2020-09-22T06:22:04 | null | UTF-8 | Python | false | false | 850 | py | """isic2019 dataset."""
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.image_classification import isic2019
class Isic2019Test(tfds.testing.DatasetBuilderTestCase):
# TODO(isic2019):
DATASET_CLASS = isic2019.Isic2019
SPLITS = {
"train": 3, # Number of fake train example
# Number of fake test example
}
# If you are calling `download/download_and_extract` with a dict, like:
# dl_manager.download({'some_key': 'http://a.org/out.txt', ...})
# then the tests needs to provide the fake output paths relative to the
# fake data directory
DL_EXTRACT_RESULT = {
"images_zip": "ISIC_2019_Training_Input.zip",
"label_csv": "ISIC_2019_Training_GroundTruth.csv",
"meta_csv": 'ISIC_2019_Training_Metadata.csv',
}
if __name__ == "__main__":
tfds.testing.test_main()
| [
"jmr137@duke.edu"
] | jmr137@duke.edu |
1aa1112d089891e91e257655c1e42a991a974838 | e7293c737c841cba9442f96b1573eb223badd731 | /func/common.py | 2c6c24d0539e5114903874beac29f651bb67649e | [] | no_license | KD1792035/XueQG | 50d8637f6acd9c5ce7b352dbae394c5ae92a3008 | 6362a2dbd0d53fdd7b43fa94c54f27e3e2c6d62c | refs/heads/main | 2023-08-27T13:53:11.284511 | 2021-11-01T03:01:23 | 2021-11-01T03:01:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,979 | py | import os, sys, random, time
import json, base64, pickle, requests, re
from requests.cookies import RequestsCookieJar
from configparser import ConfigParser
from func import color
from func.dingding import DingDingHandler
def get_appsyspatch():
application_path = './'
if getattr(sys, 'frozen', False):
application_path = os.path.dirname(sys.executable)
elif __file__:
application_path = os.path.split(os.path.abspath(sys.argv[0]))[0]
return application_path
def load_config(nologo = False):
if nologo == False:
print("=" * 60 + "\n" + load_logo())
else:
pass
xue_cfg = ConfigParser()
sys_patch = get_appsyspatch()
if(not os.path.exists(sys_patch + "/Config")):
os.mkdir(sys_patch + "/Config")
if(not os.path.exists(sys_patch + "/User")):
os.mkdir(sys_patch + "/User")
if(not os.path.exists(sys_patch + "/Config/config.cfg")):
print("=" * 60)
print("@启动失败,缺少配置文件: Config/config.cfg")
os._exit(0)
else:
xue_cfg.read(sys_patch + "/Config/config.cfg", encoding='utf-8')
return xue_cfg
def save_json_data(filename, filedata):
with open(filename,'w', encoding = 'utf-8') as j:
json.dump(filedata, j, indent=4, ensure_ascii=False)
def get_json_data(filename):
template_json_str = '''{}'''
if(os.path.exists(filename) and os.path.getsize(filename) != 0):
with open(filename, 'r', encoding = 'utf-8') as j:
try:
json_data = json.load(j)
except Exception as e:
print(filename, "解析错误:", str(e))
print("请检查", filename, "信息")
exit()
else:
json_data = json.loads(template_json_str)
return json_data
def check_delay(mintime = 2, maxtime = 5):
delay_time = random.randint(mintime, maxtime)
print('等待 ', delay_time, ' 秒')
time.sleep(delay_time)
def log_data(datapatch, logdata):
datapatch = get_appsyspatch() + datapatch
with open(datapatch, "a", encoding = 'utf-8') as f:
for i in logdata:
f.write(str(i) + "\n")
def get_time():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def sendDingDing(msg):
xue_cfg = load_config(True)
token = xue_cfg["useWS"]["DDtoken"]
secret = xue_cfg["useWS"]["DDsecret"]
ddhandler = DingDingHandler(token, secret)
ddhandler.ddmsgsend(msg, "msg")
def load_logo():
xue_logo = (" ____ ___ ________ ________ "+ "\n" +
r" \ \/ /__ __ ____ \_____ \ / _____/ " + "\n" +
r" \ /| | \_/ __ \ / / \ \/ \ ___ " + "\n" +
r" / \| | /\ ___// \_/ \ \_\ \ " + "\n" +
r" /___/\ \____/ \___ >_____\ \_/\______ /" + "\n" +
r" \_/ \/ \__> \/ ")
#xue_logo = color.cyan(xue_logo)
return xue_logo
| [
"noreply@github.com"
] | noreply@github.com |
23afbdc21f3c52e6711d6a97008f609df14f55bf | a2ad46d4995b2dbe182e645a15b7d5a7047d3b56 | /2018.12.05.provetta/all-CMS-submissions-2018-12-05/2018-12-05.12:18:30.099314.VR437605.conta_multipli.py | e4d78f797d2d4c80a6e99aaa5ded613795628fb5 | [] | no_license | romeorizzi/temi_prog_public | ccf634a0291dd943b503f8dc57ed03de4d9b1a68 | e9e3e98d4a9a3cddec45d514180b83fd5004fe7b | refs/heads/master | 2020-04-09T04:27:35.687265 | 2019-03-25T14:04:04 | 2019-03-25T14:04:04 | 160,024,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | """
* user: VR437605
* fname: ANNALISA
* lname: DETTORI
* task: conta_multipli
* score: 100.0
* date: 2018-12-05 12:18:30.099314
"""
#!/usr/bin/env python3
# Template per soluzione conta_multipli
from __future__ import print_function
import sys
if sys.version_info < (3, 0):
input = raw_input # in python2, l'equivalente di input è raw_input
# Devi modificare l'implementazione di questa funzione per fare
# quanto richiesto dal testo dell'esercizio
def conta_multipli(a, b, c):
p=0
for n in range (1,c+1):
if n%a==0 and n%b!=0 :
p+=1
return p
# Lettura input: non devi modificare il codice sotto questa riga
a, b, c = map(int, input().split())
print(conta_multipli(a, b, c))
| [
"romeo.rizzi@univr.it"
] | romeo.rizzi@univr.it |
92b81bdb559bb6564ba8c3e3179d1324f1e77352 | 93748d5565259361cdf880745f5f0242a02a37e0 | /src/server/views/UserView.py | 1f1e004b0b6400c2b5bd42298272b2a9520aa706 | [
"Apache-2.0"
] | permissive | daniel3303/sirs-project | 38a8aca51a376f91564f73be6ef9e40b0fce94b5 | 38a36ecf2373775c3a866f185dacb7597ad1e3cc | refs/heads/master | 2020-04-06T19:42:11.915440 | 2018-12-13T07:13:42 | 2018-12-13T07:13:42 | 157,746,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,468 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.http import JsonResponse
from django.views import View
from django.core import serializers
from django.contrib.auth import authenticate
import json
from server.models import User
class UserView(View):
def get(self, request):
users = []
for user in User.objects.all().order_by('username'):
users.append({
'id' : user.getId(),
'username' : user.getUsername(),
'name' : user.getName()
})
return JsonResponse({ "status" : "success", "users": users})
# Check user credentials
def post(self, request):
bodyUnicode = request.body.decode('utf-8')
jsonRequestData = json.loads(bodyUnicode)
# Check user authentication
username = jsonRequestData["username"]
password = jsonRequestData["password"]
user = authenticate(username=username, password=password)
if(user is None):
return JsonResponse({ "status" : "error", "message": "Autenticação falhou."})
else:
return JsonResponse({
"status" : "success",
"message": "Autenticação efectuada com sucesso.",
"username" : user.getUsername(),
"name": user.getName(),
"userId" : user.getId()
})
| [
"daniel-oliveira-11@hotmail.com"
] | daniel-oliveira-11@hotmail.com |
972a80ddb62fce85cc2a79f484049e457cf38d69 | 29cc07e52f8b31549df8900a9d3da155914c200a | /Client/Config.py | 6c229304e59517945da22d02f6bc9703bf09ea48 | [] | no_license | ingnelson/get-tunnel-py | c0a8a87054b60b81e351b6356212e6ba2a4740ae | 2560a950b944ba9586a42d3fca064ecdc36bddc4 | refs/heads/master | 2021-10-24T03:41:29.102157 | 2019-03-21T19:49:25 | 2019-03-21T19:49:25 | 261,303,786 | 1 | 0 | null | 2020-05-04T21:52:31 | 2020-05-04T21:52:30 | null | UTF-8 | Python | false | false | 476 | py | import base64
class Config:
def __init__(self, target, hostHeader, password, serverHostPort):
self.target = target
if len(hostHeader) == 0:
self.hostHeader = None
else:
self.hostHeader = hostHeader
if len(password) == 0:
self.password = None
else:
self.password = base64.b64encode(password)
self.serverHostPort = serverHostPort
self.userAgent = 'GetTunnelClient' | [
"ailton.novais@corregedoria.df"
] | ailton.novais@corregedoria.df |
122f1143e542f77ec9a512facbdde9cdf4939450 | 15d6a910b23d2cc0f1f7c634b7c0988d7a2dafb3 | /claire-f/2014_10_14CF.py | 70977eb6c70c7e01ff23fb22f7c8d73e00e87233 | [] | no_license | gitthabet/MS-BGD | c523451bb63a1723abccebe4086f0ba073999f41 | a157ba583b8a0f5bf8142e324a485bb7e1ee5f5f | refs/heads/master | 2021-07-08T12:33:17.933768 | 2016-12-30T00:28:24 | 2016-12-30T00:28:24 | 24,846,974 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | import unittest
# Given a string and a non-negative int n, return a larger string
# that is n copies of the original string.
def string_times(string, n):
i = 1
newstring = string
while i < n:
newstring = newstring + string
i = i+1
# print newstring
return newstring
# Given an array of ints, return True if one of the first 4 elements
# in the array is a 9. The array length may be less than 4.
def array_front9(nums):
bool = False
end=len(nums)
if end > 4:
end = 4
i=0
for i in range(end):
if nums[i] == 9:
bool = True
return bool
# Given a string, return the count of the number of times
# that a substring length 2 appears in the string and also as
# the last 2 chars of the string, so "hixxxhi" yields 1 (we won't count the end substring).
def last2(string):
pass
# Here's our "unit tests".
class Lesson1Tests(unittest.TestCase):
def testArrayFront9(self):
self.assertEqual(array_front9([1, 2, 9, 3, 4]) , True)
self.assertEqual(array_front9([1, 2, 3, 4, 9]) , False)
self.assertEqual(array_front9([1, 2, 3, 4, 5]) , False)
def testStringTimes(self):
self.assertEqual(string_times('Hel', 2),'HelHel' )
self.assertEqual(string_times('Toto', 1),'Toto' )
self.assertEqual(string_times('P', 4),'PPPP' )
def testLast2(self):
self.assertEqual(last2('hixxhi') , 1)
self.assertEqual(last2('xaxxaxaxx') , 1)
self.assertEqual(last2('axxxaaxx') , 2)
def main():
unittest.main()
if __name__ == '__main__':
main()
| [
"claire.feldman@telecom-paristech.fr"
] | claire.feldman@telecom-paristech.fr |
e82b8f60c7041d1ec12de66c1232989ec0bbaae1 | 896794d2ac6cd846ab81bee896831c2186897c7e | /qa/rpc-tests/merkle_blocks.py | 6b00f7bbf63642fb2e0f3addc36927e15775bf17 | [
"MIT"
] | permissive | pepcash/Pepcash-MN | d15fa60db6a9488f50eea400a5de2d7bae0c193c | 1ee89bc76f7f4f6354b7419a20c8532bbbc816d5 | refs/heads/master | 2020-03-19T14:49:29.694653 | 2018-06-09T09:25:58 | 2018-06-09T09:25:58 | 136,641,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,055 | py | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test merkleblock fetch/validation
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class MerkleBlockTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-txindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print "Mining blocks..."
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
node0utxos = self.nodes[0].listunspent(1)
tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 500})
txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"])
tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 500})
txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"])
assert_raises(JSONRPCException, self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
self.sync_all()
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = self.nodes[1].listunspent(1).pop()
tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 500})
self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"])
self.nodes[0].generate(1)
self.sync_all()
txid_spent = txin_spent["txid"]
txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
# We cant find the block from a fully-spent tx
# Doesn't apply to PepCash Core - we have txindex always on
# assert_raises(JSONRPCException, self.nodes[2].gettxoutproof, [txid_spent])
# ...but we can if we specify the block
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# ...or if the first tx is not fully-spent
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
try:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
except JSONRPCException:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1])), txlist)
# ...or if we have a -txindex
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
if __name__ == '__main__':
MerkleBlockTest().main()
| [
"marquisogre@gmail.com"
] | marquisogre@gmail.com |
c413695ffeb36ad73b5fc8d1b9c1bb011a87c2c6 | 1e9ca32f350f1434eaea85dad10af45d64d7fa00 | /conf_create_symlinks.py | 394dd0598d5d235de8adab67b4135e31d1705819 | [] | no_license | beneills/configuration | ffbc388e47eaf63cca580c6e458d13461f1d7512 | 0feb42037057fb0df584fb557809e23787146ced | refs/heads/master | 2021-01-10T20:04:27.341608 | 2018-12-14T17:16:31 | 2018-12-14T17:16:31 | 7,356,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | #!/usr/bin/env python
import os
import os.path
IGNORE_FILES = ( 'conf_create_symlinks.py', 'conf_add.py')
SOURCE_DIR = '/home/ben/files/conf'
TARGET_DIR = '/home/ben'
entries = [f for f in os.listdir(SOURCE_DIR) if f not in IGNORE_FILES and not f.startswith('.')]
count = 0
for f in entries:
source_path = os.path.join(SOURCE_DIR, f)
target_path = os.path.join(TARGET_DIR, '.' + f)
if os.path.exists(target_path):
type = "file"
if os.path.islink(target_path):
type = "link"
elif os.path.isdir(target_path):
type = "directory"
print "Skipping '{0}': {1} exists.".format(f, type)
else:
try:
print "Linking {1} -> {0}".format(source_path, target_path)
os.symlink(source_path, target_path)
count += 1
except OSError as e:
print "Error creating sylink: {0}".format(e)
print "Created {0} symlinks in {1}".format(count, TARGET_DIR)
| [
"ben@beneills.com"
] | ben@beneills.com |
497b09aec342a86f55cb820435ec603f2aab872a | 07fbdae51275b4bab2074524fc4c1ae58ac53d08 | /List's/Lists Basics/Exercise/Solutions/10. Bread Factory.py | ef0ba277e0e12230313d1b24fb7eeaa1489595d6 | [] | no_license | rimisarK-blue/Python-Fundamental-knowledge | 85c2afa4401f848c9919f672c7fa3d54a43e761f | a182fb1c7c3ce11f9e26ce0afefe5c2069d70e8d | refs/heads/main | 2023-03-09T02:08:34.411768 | 2021-02-15T20:19:52 | 2021-02-15T20:19:52 | 326,009,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py |
events = input().split('|')
energy = 100
coins = 100
good_day = True
for com in events:
command, value = com.split('-')
value = int(value)
if command == 'rest':
if energy == 100:
print("You gained 0 energy.")
print(f"Current energy: 100.")
elif energy + value > 100:
print(f"You gained {value} energy.")
print(f"Current energy: 100.")
else:
energy += value
print(f"You gained {value} energy.")
print(f"Current energy: {energy}.")
elif command == 'order':
if energy >= 30:
energy -= 30
coins += value
print(f"You earned {value} coins.")
else:
energy += 50
print("You had to rest!")
else:
if coins - value > 0:
coins -= value
print(f"You bought {command}.")
else:
good_day = False
print(f"Closed! Cannot afford {command}.")
break
if good_day and coins > 0 and energy > 0:
print("Day completed!")
print(f"Coins: {coins}")
print(f"Energy: {energy}")
| [
"rimisark92@gmail.com"
] | rimisark92@gmail.com |
095aa9c799dfc36066b3dcd4c9527c9ba3197cfc | 9c89fc160f6a8d3c55a58385e8e24bb1d0d5e508 | /helpers/psc.py | ad2ceb050b7736c20fb8d57c11648afd11fd046e | [] | no_license | mpmbq2/StimPy | 72559a84e0a4670ba5c2aaa9cdda870d073d24f8 | ff47e2a619f744d2e233caae74d5a0ed1a1c011d | refs/heads/master | 2020-12-31T07:32:31.964213 | 2018-06-13T16:53:01 | 2018-06-13T16:53:01 | 80,551,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,788 | py | import numpy as np
import pandas as pd
import stf
import stfio
"""Pseudocode describing the PSC analysis process to be used:
1) PSC object created by user, pointing to MetaData object
2) Psc.proceed() method called to open first file in list
3) User prompted to select traces to use for channel 1 cell 1
4) User prompted to set baseline, peak, and fit cursors for channel 1 cell 1
4.1) User enters 'measure' to save locations
5) User prompted to select traces to use for channel 1 cell 2
6) User prompted to set baseline, peak, and fit cursors for channel 1 cell 2
6.1) User enters 'measure' to save locations
7) User prompted to select traces to use for channel 2 cell 1
8) User prompted to set baseline, peak, and fit cursors for channel 2 cell 1
8.1) User enters 'measure' to save locations
9) User prompted to select traces to use for channel 2 cell 2
10) User prompted to set baseline, peak, and fit cursors for channel 2 cell 2
10.1) User enters 'measure' to save locations
11) Baseline is measured at all 4 conditions, stored as 'holding_current'
12) Peaks are measured by subtracting 'holding_current' from stf.get_peak for all traces (in for loop)
13) Fits are obtained using stf.leastsq(0), and 'Tau_0' is saved
But for now, just have user set baseline, peaks, and fit. Measure all selected, average together, and print to Table
It would also be useful to write the I/O function to read in files using a pd.DataFrame. Maybe create a thin wrapper
object with a state variable that is the index of the next file to be read. calling object.next() would advance
the state variable and open the file. Either use neo.AxonIO or stfio depending on which works.
"""
| [
"mpmbq2@gmail.com"
] | mpmbq2@gmail.com |
cd9cfd4cc9d87b805776c185af32ddc03ec9aa44 | 5cdaff662ccbdf0d6a17f9796352c923bde1f590 | /mysql/zsgc.py | d92dafe50f13e67a702f88de33608db90c4d4200 | [] | no_license | renxiaoqi/JingDong | bcf7fc418d6c9adcc23cf39b048d929c9f01995b | 8b9fb0dab6ea26dec55472bb750258a827aba74b | refs/heads/master | 2020-09-29T09:44:25.482051 | 2019-12-10T03:19:51 | 2019-12-10T03:19:51 | 227,012,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,545 | py | import pymysql
class Util:
__conn = None
__cursor = None
# 创建连接
@classmethod
def __get_conn(cls):
if cls.__conn is None:
cls.__conn = pymysql.connect(host="127.0.0.1",
user="root",
password="root",
port=3306,
database="books",
charset="utf8")
return cls.__conn
# 获取游标
@classmethod
def __get_cursor(cls):
if cls.__cursor is None:
cls.__cursor = Util.__get_conn().cursor()
return cls.__cursor
# 执行sql语句
@classmethod
def run_sql(cls,sql):
Util.__get_cursor()
try:
if sql.split()[0] == "select":
cls.__cursor.execute(sql)
return cls.__cursor.fetchall()
else:
raw = cls.__cursor.execute(sql)
cls.__conn.commit()
return raw
except:
cls.__conn.rollback()
raise
finally:
Util.__close_cursor()
Util.__close_conn()
# 关闭游标
@classmethod
def __close_cursor(cls):
if cls.__cursor is not None:
cls.__cursor.close()
cls.__cursor = None
# 关闭连接
@classmethod
def __close_conn(cls):
if cls.__conn is not None:
cls.__conn.close()
cls.__conn = None
| [
"renxiaoqi1222@163.com"
] | renxiaoqi1222@163.com |
8c4e25032a017464274c3783f28d6988a1017590 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/3673.py | 56a2a64feb4db88351e2187df1ddbb45f569ef30 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py |
import sys
def t_process():
n1 = int(sys.stdin.readline())
n1 -= 1
n1_matrix = [set(map(int, sys.stdin.readline().split())) for _ in range(4)]
n2 = int(sys.stdin.readline())
n2 -= 1
n2_matrix = [set(map(int, sys.stdin.readline().split())) for _ in range(4)]
sol = list(n1_matrix[n1].intersection(n2_matrix[n2]))
if len(sol) > 1:
return "Bad magician!"
if len(sol) == 0:
return "Volunteer cheated!"
if len(sol) == 1:
return int(sol[0])
def main():
t = int(sys.stdin.readline())
for k in range(1, t + 1):
print("Case #{0}: {1}".format(k, t_process()))
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
18f628bb34251a5e0265796a5d0ad8c3fc787498 | ca505019c94aec2a3f02675147689f9025fffc75 | /class_0601_리스트.py | 67df47354a402af6c2a68cc9dcf95b161d287803 | [] | no_license | broship0821/pythonbasic | f7efef7ee1e2b4f2e69dfca6d2cdb1689680b186 | 891c713d6c1cd3c9863b8fbb0f5b3493f077731f | refs/heads/master | 2022-10-21T01:56:20.068706 | 2020-06-15T07:22:31 | 2020-06-15T07:22:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,743 | py | # ###리스트
# 리스트는 데이터의 목록을 다루는 자료형
# []대괄호로 명명한다
# 리스트 안에는 어떠한 자료형도 포함시킬수 있음 C는 같은 자료형만 가능
# 변수가 많아지면 관리해야할 사항이 많아지고 실수할 확률이 높아짐
# 리스트는 연속적으로 되있어서 데이터 가져오기 편함
# 리스트를 가져올때는 인덱스를 사용 0번부터
# ls = [500, 200, 300, 400]
# Sum = 0
# print("ls:", ls)
# print("ls[0]:", ls[0])
# print("ls[1]:", ls[1])
# print("ls[2]:", ls[2])
# print("ls[3]:", ls[3])
# #맨 오른쪽이 -1 맨 왼쪽은 -n임
# ls = [500, 200, 300, 400]
# Sum = 0
# print("ls:", ls)
# print("ls[0]:", ls[-4])
# print("ls[1]:", ls[-3])
# print("ls[2]:", ls[-2])
# print("ls[3]:", ls[-1])
# ls = [0, 0, 0, 0] #박스를 생성해주는 일, 0이 아닌 다른게 들어가도 상관없음
# Sum = 0
# ls[0] = int(input("1번째 숫자 입력:"))
# ls[1] = int(input("2번째 숫자 입력:"))
# ls[2] = int(input("3번째 숫자 입력:"))
# ls[3] = int(input("4번째 숫자 입력:"))
# Sum = ls[0] + ls[1] + ls[2] + ls[3]
# print("ls[0]:", ls[0])
# print("ls[1]:", ls[1])
# print("ls[2]:", ls[2])
# print("ls[3]:", ls[3])
# print("리스트의 합: %d" % Sum)
# ls = [0, 0, 0, 0]
# Sum = 0
# print("len(ls):", len(ls))
# for i in range (len(ls)):
# ls[i] = int(input(str(i+1)+"번째 숫자 입력:"))
# Sum += ls[i]
# for i in range(len(ls)):
# print("ls[%d]:" % i, ls[i])
# print("리스트의 합:", Sum)
# ls = [10, 20, 30, 40]
# print("ls:", ls)
# print()
# print("ls[1:3] => ls[1]~[2]:", ls[1:3])
# print("ls[0:3] => ls[0]~[2]:", ls[0:3])
# print("ls[2:] => ls[2] ~ [끝까지]", ls[2:]) #비워두나 전체길이보다 큰 숫자를 적으면 끝까지
# print("ls[:2] => ls[0] ~ [1]", ls[:2])
#[:]우측은 그 전까지, 좌측은 그 숫자 포함
#### 중요 ####
# ## 리스트[얕은 복사] 데이터 하나를 공유
# ls = [10, 20, 30, 40]
# arr = ls
# print("ls: {}ls, id: {}".format(ls,id(ls)))
# print("arr: {}arr, id: {}".format(arr,id(arr)))
# #arr = ls = [10,20,30,40] 불리는 이름만 다르지 이 둘은 같은 개체임
# ls = [10, 20, 30, 40]
# arr = ls
# arr[2] = 20000
# print("ls: {}ls, id: {}".format(ls,id(ls)))
# print("arr: {}arr, id: {}".format(arr,id(arr)))
# ## ls arr 이 둘은 서로 동기화됨
# ## 리스트[깊은 복사] 똑같은 데이터 2개
# ls = [10, 20, 30, 40]
# arr = ls[:] # arr = [10, 20, 30, 40] 이거랑 똑같은 개념임
# arr[2] = 20000
# print("ls: {}ls, id: {}".format(ls,id(ls)))
# print("arr: {}arr, id: {}".format(arr,id(arr)))
#입고, 재고, 출고
#출고랑 재고는 동기화가 되야되서 얕은 복사
#입고랑 재고는 동기화가 되면 안됨(재고=입고+재고 라고 해서 입고까지 바뀌면 안됨) 깊은 복사
# import copy # copy 라는 묘듈을 가져와라 (묘듈: 함수의 모임)
# ls = [10, 20, 30, 40]
# #arr = ls[:]
# arr = copy.deepcopy(ls)
# arr[2] = "deepcopy"
# print("ls: {}ls, id: {}".format(ls,id(ls)))
# print("arr: {}arr, id: {}".format(arr,id(arr)))
##업데이트 연산
# ls = [10, 20, 30]
# arr = [40, 50, 60]
# print("ls:", ls)
# print("arr:", arr)
# Str = ls + arr
# print("ls + arr => Str", Str)
# string = ls * 3
# print("ls * 3 => string", string)
##숫자 연산
# ls = [10, 20, 30]
# arr = [40, 50, 60]
# for i in range(len(ls)):
# ls[i] = ls[i] + arr[i]
# print(ls)
# for i in range(len(ls)):
# ls[i] = ls[i] * 3
# print(ls)
#선생님 방법
# ls = [10, 20, 30]
# arr = [40, 50, 60]
# Str = [0, 0, 0]
# string = [0, 0, 0]
# for i in range(len(ls)):
# Str[i] = ls[i] + arr[i]
# for i in range(len(ls)):
# string[i] = ls[i] * 3
# print(Str)
# print(string) | [
"gudqo1995@naver.com"
] | gudqo1995@naver.com |
911d35984b9006f922ee1d30cf3c164969880479 | a8e4e84d62caf664e973a016fda5161ef1a775dd | /utils/layers.py | ca5fa3425c7855689ad153e5025f6502c6d1e768 | [] | no_license | HJ-Xu/SPS-LCNN | 3f560d53b0c5f1ecfc33d1c301b951e755dbe47f | 4598c5ba46ebc453fa1cce079cc25b23d9c6898c | refs/heads/master | 2023-07-20T02:46:46.154668 | 2023-07-06T02:24:42 | 2023-07-06T02:24:42 | 287,871,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,967 | py | import tensorflow as tf
import numpy as np
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import tf_util
############################################################################################################
# Convolution layer Methods
def __conv2d_p(name, x, w=None, num_filters=16, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0):
"""
Convolution 2D Wrapper
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, H, W, C).
:param w: (tf.tensor) pretrained weights (if None, it means no pretrained weights)
:param num_filters: (integer) No. of filters (This is the output depth)
:param kernel_size: (integer tuple) The size of the convolving kernel.
:param padding: (string) The amount of padding required.
:param stride: (integer tuple) The stride required.
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias. (if not float, it means pretrained bias)
:return out: The output of the layer. (N, H', W', num_filters)
"""
with tf.variable_scope(name):
stride = [1, stride[0], stride[1], 1]
kernel_shape = [kernel_size[0], kernel_size[1], x.shape[-1], num_filters]
with tf.name_scope('layer_weights'):
if w == None:
w = __variable_with_weight_decay(kernel_shape, initializer, l2_strength)
__variable_summaries(w)
with tf.name_scope('layer_biases'):
if isinstance(bias, float):
bias = tf.get_variable('biases', [num_filters], initializer=tf.constant_initializer(bias))
__variable_summaries(bias)
with tf.name_scope('layer_conv2d'):
conv = tf.nn.conv2d(x, w, stride, padding)
out = tf.nn.bias_add(conv, bias)
return out
def conv2d(name, x, w=None, num_filters=16, kernel_size=(1, 1), padding='SAME', stride=(1, 1),
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0,
activation=None, batchnorm_enabled=False, max_pool_enabled=False, dropout_keep_prob=-1,
is_training=True):
"""
This block is responsible for a convolution 2D layer followed by optional (non-linearity, dropout, max-pooling).
Note that: "is_training" should be passed by a correct value based on being in either training or testing.
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, H, W, C).
:param num_filters: (integer) No. of filters (This is the output depth)
:param kernel_size: (integer tuple) The size of the convolving kernel.
:param padding: (string) The amount of padding required.
:param stride: (integer tuple) The stride required.
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias.
:param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied.
:param batchnorm_enabled: (boolean) for enabling batch normalization.
:param max_pool_enabled: (boolean) for enabling max-pooling 2x2 to decrease width and height by a factor of 2.
:param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout
:param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout)
:return: The output tensor of the layer (N, H', W', C').
"""
with tf.variable_scope(name) as scope:
conv_o_b = __conv2d_p('conv', x=x, w=w, num_filters=num_filters, kernel_size=kernel_size, stride=stride,
padding=padding,
initializer=initializer, l2_strength=l2_strength, bias=bias)
if batchnorm_enabled:
conv_o_bn = tf.layers.batch_normalization(conv_o_b, training=is_training, epsilon=1e-5)
if not activation:
conv_a = conv_o_bn
else:
conv_a = activation(conv_o_bn)
else:
if not activation:
conv_a = conv_o_b
else:
conv_a = activation(conv_o_b)
def dropout_with_keep():
return tf.nn.dropout(conv_a, dropout_keep_prob)
def dropout_no_keep():
return tf.nn.dropout(conv_a, 1.0)
if dropout_keep_prob != -1:
conv_o_dr = tf.cond(is_training, dropout_with_keep, dropout_no_keep)
else:
conv_o_dr = conv_a
conv_o = conv_o_dr
if max_pool_enabled:
conv_o = max_pool_2d(conv_o_dr)
return conv_o
def grouped_conv2d(name, x, is_training, bn_decay, w=None, num_filters=16,initializer=tf.contrib.layers.xavier_initializer(), num_groups=1,activation=None, batchnorm_enabled=False, dropout_keep_prob=-1):
with tf.variable_scope(name) as scope:
sz = x.get_shape()[3].value // num_groups
# conv_side_layers = [
# conv2d(name + "_" + str(i), x[:, :, :, i * sz:i * sz + sz], w, num_filters // num_groups, kernel_size,
# padding,
# stride,
# initializer,
# l2_strength, bias, activation=None,
# batchnorm_enabled=False, max_pool_enabled=False, dropout_keep_prob=dropout_keep_prob,
# is_training=is_training) for i in
# range(num_groups)]
conv_side_layers = []
for i in range(num_groups):
conv_side=tf_util.conv2d(x[:, :, :, i * sz:i * sz + sz], num_filters//num_groups, [1,1], padding='VALID', stride=[1,1],
is_training=is_training ,scope='conv%d'%(i), bn_decay=bn_decay)
conv_side_layers.append(conv_side)
conv_g = tf.concat(conv_side_layers, axis=-1)
if batchnorm_enabled:
conv_o_bn = tf.layers.batch_normalization(conv_g, training=is_training, epsilon=1e-5)
if not activation:
conv_a = conv_o_bn
else:
conv_a = activation(conv_o_bn)
else:
if not activation:
conv_a = conv_g
else:
conv_a = activation(conv_g)
return conv_a
def __depthwise_conv2d_p(name, x, w=None, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0):
with tf.variable_scope(name):
stride = [1, stride[0], stride[1], 1]
kernel_shape = [kernel_size[0], kernel_size[1], x.shape[-1], 1]
with tf.name_scope('layer_weights'):
if w is None:
w = __variable_with_weight_decay(kernel_shape, initializer, l2_strength)
__variable_summaries(w)
with tf.name_scope('layer_biases'):
if isinstance(bias, float):
bias = tf.get_variable('biases', [x.shape[-1]], initializer=tf.constant_initializer(bias))
__variable_summaries(bias)
with tf.name_scope('layer_conv2d'):
conv = tf.nn.depthwise_conv2d(x, w, stride, padding)
out = tf.nn.bias_add(conv, bias)
return out
def depthwise_conv2d(name, x, w=None, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0, activation=None,
batchnorm_enabled=False, is_training=True):
with tf.variable_scope(name) as scope:
conv_o_b = __depthwise_conv2d_p(name='conv', x=x, w=w, kernel_size=kernel_size, padding=padding,
stride=stride, initializer=initializer, l2_strength=l2_strength, bias=bias)
if batchnorm_enabled:
conv_o_bn = tf.layers.batch_normalization(conv_o_b, training=is_training, epsilon=1e-5)
if not activation:
conv_a = conv_o_bn
else:
conv_a = activation(conv_o_bn)
else:
if not activation:
conv_a = conv_o_b
else:
conv_a = activation(conv_o_b)
return conv_a
############################################################################################################
# ShuffleNet unit methods
def shufflenet_unit(name, x, is_training, bn_decay, w=None, num_groups=1, group_conv_bottleneck=True, num_filters=16, stride=(1, 1),
l2_strength=0.0, bias=0.0, batchnorm_enabled=True, fusion='add', shuffled_list=None):
# Paper parameters. If you want to change them feel free to pass them as method parameters.
activation = tf.nn.relu
with tf.variable_scope(name) as scope:
residual = x
bottleneck_filters = (num_filters // 4) if fusion == 'add' else (num_filters - residual.get_shape()[
3].value) // 4
if len(shuffled_list) == 1:
x = tf.concat([x,shuffled_list[-1]], axis=2)
# residual = tf.concat([residual,shuffled_list[-1]], axis=2)
elif len(shuffled_list) == 2:
x = tf.concat([x,shuffled_list[-1],shuffled_list[-2]], axis=2)
# residual = tf.concat([residual,shuffled_list[-1],shuffled_list[-2]], axis=2)
# bottleneck_filters = residual.get_shape()[3].value // 2
# top, bottom = tf.split(x, num_or_size_splits=2, axis=3)
# bottleneck1 = grouped_conv2d('top', x=top, is_training=is_training, bn_decay=bn_decay, w=None, num_filters=bottleneck_filters,
# num_groups=num_groups, activation=activation,
# batchnorm_enabled=batchnorm_enabled)
# shuffled1 = channel_shuffle('channel_shuffle1', bottleneck1, num_groups)
# bottleneck2 = grouped_conv2d('bottom', x=bottom, is_training=is_training, bn_decay=bn_decay, w=None, num_filters=bottleneck_filters,
# num_groups=num_groups,activation=activation,
# batchnorm_enabled=batchnorm_enabled)
# shuffled2 = channel_shuffle('channel_shuffle2', bottleneck1, num_groups)
bottleneck = grouped_conv2d('Gbottleneck', x=x, is_training=is_training, bn_decay=bn_decay, w=None, num_filters=num_filters,
num_groups=num_groups, activation=activation,
batchnorm_enabled=batchnorm_enabled)
# shuffled = channel_shuffle('channel_shuffle', bottleneck, num_groups)
# if group_conv_bottleneck:
# bottleneck = grouped_conv2d('Gbottleneck', x=x, w=None, num_filters=bottleneck_filters, kernel_size=(1, 1),
# padding='VALID',
# num_groups=num_groups, l2_strength=l2_strength, bias=bias,
# activation=activation,
# batchnorm_enabled=batchnorm_enabled, is_training=is_training)
# shuffled = channel_shuffle('channel_shuffle', bottleneck, num_groups)
# else:
# bottleneck = conv2d('bottleneck', x=x, w=None, num_filters=bottleneck_filters, kernel_size=(1, 1),
# padding='VALID', l2_strength=l2_strength, bias=bias, activation=activation,
# batchnorm_enabled=batchnorm_enabled, is_training=is_training)
# shuffled = bottleneck
# depthwise = tf.pad(shuffled, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
# depthwise = depthwise_conv2d('depthwise', x=depthwise, w=None, stride=stride, l2_strength=l2_strength,
# padding='VALID', bias=bias,
# activation=None, batchnorm_enabled=batchnorm_enabled, is_training=is_training)
# if stride == (2, 2):
# residual_pooled = avg_pool_2d(residual, size=(3, 3), stride=stride, padding='SAME')
# else:
# residual_pooled = residual
# group_conv = grouped_conv2d('Gconv1x1', x=shuffled, is_training=is_training, bn_decay=bn_decay, w=None, num_filters=num_filters,
# num_groups=num_groups, activation=activation,
# batchnorm_enabled=batchnorm_enabled)
# shuffled_list.append(group_conv)
# shuffled = channel_shuffle('group_conv_channel_shuffle', group_conv, num_groups)
# residual_pooled=tf_util.conv2d(residual, group_conv.get_shape()[-1].value, [1,1], padding='VALID', stride=[1,1],
# is_training=is_training ,scope='conv', bn_decay=bn_decay)
# return activation(tf.concat([residual, group_conv], axis=-1)),group_conv
return activation(bottleneck)
if fusion == 'concat':
group_conv1x1 = grouped_conv2d('Gconv1x1', x=depthwise, w=None,
num_filters=num_filters - residual.get_shape()[3].value,
kernel_size=(1, 1),
padding='VALID',
num_groups=num_groups, l2_strength=l2_strength, bias=bias,
activation=None,
batchnorm_enabled=batchnorm_enabled, is_training=is_training)
return activation(tf.concat([residual_pooled, group_conv1x1], axis=-1))
elif fusion == 'add':
group_conv1x1 = grouped_conv2d('Gconv1x1', x=depthwise, w=None,
num_filters=num_filters,
kernel_size=(1, 1),
padding='VALID',
num_groups=num_groups, l2_strength=l2_strength, bias=bias,
activation=None,
batchnorm_enabled=batchnorm_enabled, is_training=is_training)
residual_match = residual_pooled
# This is used if the number of filters of the residual block is different from that
# of the group convolution.
if num_filters != residual_pooled.get_shape()[3].value:
residual_match = conv2d('residual_match', x=residual_pooled, w=None, num_filters=num_filters,
kernel_size=(1, 1),
padding='VALID', l2_strength=l2_strength, bias=bias, activation=None,
batchnorm_enabled=batchnorm_enabled, is_training=is_training)
return activation(group_conv1x1 + residual_match)
else:
raise ValueError("Specify whether the fusion is \'concat\' or \'add\'")
def channel_shuffle(name, x, num_groups):
with tf.variable_scope(name) as scope:
n, h, w, c = x.shape.as_list()
x_reshaped = tf.reshape(x, [-1, h, w, num_groups, c // num_groups])
x_transposed = tf.transpose(x_reshaped, [0, 1, 2, 4, 3])
output = tf.reshape(x_transposed, [-1, h, w, c])
return output
############################################################################################################
# Fully Connected layer Methods
def __dense_p(name, x, w=None, output_dim=128, initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0,
bias=0.0):
"""
Fully connected layer
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, D).
:param output_dim: (integer) It specifies H, the output second dimension of the fully connected layer [ie:(N, H)]
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias. (if not float, it means pretrained bias)
:return out: The output of the layer. (N, H)
"""
n_in = x.get_shape()[-1].value
with tf.variable_scope(name):
if w == None:
w = __variable_with_weight_decay([n_in, output_dim], initializer, l2_strength)
__variable_summaries(w)
if isinstance(bias, float):
bias = tf.get_variable("layer_biases", [output_dim], tf.float32, tf.constant_initializer(bias))
__variable_summaries(bias)
output = tf.nn.bias_add(tf.matmul(x, w), bias)
return output
def dense(name, x, w=None, output_dim=128, initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0,
bias=0.0,
activation=None, batchnorm_enabled=False, dropout_keep_prob=-1,
is_training=True
):
"""
This block is responsible for a fully connected followed by optional (non-linearity, dropout, max-pooling).
Note that: "is_training" should be passed by a correct value based on being in either training or testing.
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, D).
:param output_dim: (integer) It specifies H, the output second dimension of the fully connected layer [ie:(N, H)]
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias.
:param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied.
:param batchnorm_enabled: (boolean) for enabling batch normalization.
:param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout
:param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout)
:return out: The output of the layer. (N, H)
"""
with tf.variable_scope(name) as scope:
dense_o_b = __dense_p(name='dense', x=x, w=w, output_dim=output_dim, initializer=initializer,
l2_strength=l2_strength,
bias=bias)
if batchnorm_enabled:
dense_o_bn = tf.layers.batch_normalization(dense_o_b, training=is_training, epsilon=1e-5)
if not activation:
dense_a = dense_o_bn
else:
dense_a = activation(dense_o_bn)
else:
if not activation:
dense_a = dense_o_b
else:
dense_a = activation(dense_o_b)
def dropout_with_keep():
return tf.nn.dropout(dense_a, dropout_keep_prob)
def dropout_no_keep():
return tf.nn.dropout(dense_a, 1.0)
if dropout_keep_prob != -1:
dense_o_dr = tf.cond(is_training, dropout_with_keep, dropout_no_keep)
else:
dense_o_dr = dense_a
dense_o = dense_o_dr
return dense_o
def flatten(x):
"""
Flatten a (N,H,W,C) input into (N,D) output. Used for fully connected layers after conolution layers
:param x: (tf.tensor) representing input
:return: flattened output
"""
all_dims_exc_first = np.prod([v.value for v in x.get_shape()[1:]])
o = tf.reshape(x, [-1, all_dims_exc_first])
return o
############################################################################################################
# Pooling Methods
def max_pool_2d(x, size=(2, 2), stride=(2, 2), name='pooling'):
"""
Max pooling 2D Wrapper
:param x: (tf.tensor) The input to the layer (N,H,W,C).
:param size: (tuple) This specifies the size of the filter as well as the stride.
:param name: (string) Scope name.
:return: The output is the same input but halfed in both width and height (N,H/2,W/2,C).
"""
size_x, size_y = size
stride_x, stride_y = stride
return tf.nn.max_pool(x, ksize=[1, size_x, size_y, 1], strides=[1, stride_x, stride_y, 1], padding='VALID',
name=name)
def avg_pool_2d(x, size=(2, 2), stride=(2, 2), name='avg_pooling', padding='VALID'):
"""
Average pooling 2D Wrapper
:param x: (tf.tensor) The input to the layer (N,H,W,C).
:param size: (tuple) This specifies the size of the filter as well as the stride.
:param name: (string) Scope name.
:return: The output is the same input but halfed in both width and height (N,H/2,W/2,C).
"""
size_x, size_y = size
stride_x, stride_y = stride
return tf.nn.avg_pool(x, ksize=[1, size_x, size_y, 1], strides=[1, stride_x, stride_y, 1], padding=padding,
name=name)
############################################################################################################
# Utilities for layers
def __variable_with_weight_decay(kernel_shape, initializer, wd):
"""
Create a variable with L2 Regularization (Weight Decay)
:param kernel_shape: the size of the convolving weight kernel.
:param initializer: The initialization scheme, He et al. normal or Xavier normal are recommended.
:param wd:(weight decay) L2 regularization parameter.
:return: The weights of the kernel initialized. The L2 loss is added to the loss collection.
"""
w = tf.get_variable('weights', kernel_shape, tf.float32, initializer=initializer)
collection_name = tf.GraphKeys.REGULARIZATION_LOSSES
if wd and (not tf.get_variable_scope().reuse):
weight_decay = tf.multiply(tf.nn.l2_loss(w), wd, name='w_loss')
tf.add_to_collection(collection_name, weight_decay)
return w
# Summaries for variables
def __variable_summaries(var):
"""
Attach a lot of summaries to a Tensor (for TensorBoard visualization).
:param var: variable to be summarized
:return: None
"""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
| [
"noreply@github.com"
] | noreply@github.com |
a7d31f45a48d70fbdb87cc92dfb0bfe2a7e60989 | 39de8c1d3dc87361d3d0048c02a50d6aacd0769d | /gallery/urls.py | 34165fc9eaa6311bf17f7514ce8034500ec0b1e2 | [] | no_license | yeamin21/ubayy | 9890360fd5aca869e9101e67c6fd3bcfc5244629 | d2c071bcc5df0d74917b95f8566c50178bad5425 | refs/heads/main | 2023-06-15T18:29:46.052379 | 2021-07-05T21:55:52 | 2021-07-05T21:55:52 | 370,625,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | from django.http.request import QueryDict
from django.urls import path
from .views import CreateProduct, ProductDetails, ProductList
app_name = 'gallery'
urlpatterns = [
path('', ProductList.as_view(), name='list'),
path('posts/<user>/', ProductList.as_view(), name='listself'),
path('product/<pk>/', ProductDetails.as_view(), name='details'),
path('create/', CreateProduct.as_view(), name='create'),
]
| [
"yeamin21@outlook.com"
] | yeamin21@outlook.com |
a439ef9d883b1ab7920ba877d8585795143600ec | c4d3466458d386dda17ab382ec5ae9dd5f3a7405 | /Graph/bfs/bfs.py | 8bf1415f743ef6c976a13994f3e1e296e2dde6ff | [] | no_license | faramarz-hosseini/Algorithms | 1db8dcc07f4c6baf7c22d525d7f1a46dc2311d16 | c51d7a48ac34f1433074295003609a451a2803e4 | refs/heads/master | 2023-03-14T19:53:25.712288 | 2021-03-20T13:00:39 | 2021-03-20T13:00:39 | 349,725,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | n = int(input())
m = int(input())
edges = []
q_nums = []
for _ in range(m):
edges.append(list(map(int, input().split())))
q = int(input())
for _ in range(q):
q_nums.append(list(map(int, input().split())))
class Graph:
def __init__(self, nodes_count, edges_count, edges):
self.nodes_count = nodes_count
self.edges_count = edges_count
self.nodes = list(range(nodes_count))
self.edges = {}
for i in range(self.nodes_count):
self.edges[i] = []
for edge in edges:
self.edges[edge[0]].append(edge[1])
self.edges[edge[1]].append(edge[0])
def check_edge(self, first_node, second_node):
return second_node in self.edges[first_node]
graph = Graph(n, m, edges)
for query in q_nums:
print(graph.check_edge(query[0], query[1]))
test = """
3
3
0 1
0 2
1 2
1
0 1
"""
| [
"moein.mirzaei@cafebazaar.ir"
] | moein.mirzaei@cafebazaar.ir |
ea37b3a336b7925ac1e8dccd5a4d05ee874e0c6f | 88f0db4c85649101cc845a0afb71e428cc758c2f | /blog/app/migrations/0002_auto_20200517_1624.py | 897826f240e236c0e5adb8379b80dc1bac7e3e67 | [] | no_license | pmjabcd/django_blog | 1271732bb7458345339af7870466198592ca1578 | e64c6ae179d71beff7d692d4a04ec55f94473917 | refs/heads/master | 2023-07-18T22:48:14.890381 | 2020-05-17T08:59:06 | 2020-05-17T08:59:06 | 264,620,818 | 0 | 0 | null | 2021-09-22T19:13:03 | 2020-05-17T08:57:23 | Python | UTF-8 | Python | false | false | 542 | py | # Generated by Django 3.0.6 on 2020-05-17 07:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='category',
field=models.CharField(default='movie', max_length=30),
),
migrations.AlterField(
model_name='article',
name='title',
field=models.CharField(max_length=30),
),
]
| [
"wisely_@naver.com"
] | wisely_@naver.com |
1152f9facac5c0cb34d89abe0989f056a54199fe | 0ab3ab2cda94a700f015ff172ef37abc3402ed75 | /drawfromfile.py | 3150dd3f92c114e2f97a979d71243be2403f76c8 | [] | no_license | mikerr/laserPOV | 719c85493f8a4dc05e92267695e9e0804aac0b64 | 215ee38db2c3a2ff6e92e1c4f5aa18615ec76839 | refs/heads/master | 2016-09-06T07:49:40.767385 | 2015-04-11T20:49:39 | 2015-04-11T20:49:39 | 33,660,512 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | #!/usr/bin/python
import subprocess,time
file = open('drawing','r')
x,y = [], []
for line in file:
row = line.split()
x.append(row[0])
y.append(row[1])
SPEED = 0.09
REPS = 10
XOFFSET = 160
YOFFSET = 110
for loop in range (REPS):
for i in range (len(x)):
xpos = int(x[i]) + XOFFSET
ypos = int(y[i]) + YOFFSET
command = "echo 7=" + str(xpos) + ">/dev/servoblaster"
subprocess.call (command, shell=True)
command = "echo 0=" + str(ypos) + ">/dev/servoblaster"
subprocess.call (command, shell=True)
time.sleep(SPEED)
for i in reversed (range (len(x))):
xpos = int(x[i]) + XOFFSET
ypos = int(y[i]) + YOFFSET
command = "echo 7=" + str(xpos) + ">/dev/servoblaster"
subprocess.call (command, shell=True)
command = "echo 0=" + str(ypos) + ">/dev/servoblaster"
subprocess.call (command, shell=True)
time.sleep(SPEED)
| [
"pi@raspberrypi.(none)"
] | pi@raspberrypi.(none) |
462e7fa139213f3626fb0387c7ce113c72553c75 | 520ee398e5b1c24a44c62d639679b230afbba2b5 | /problem2.py | 7d690068dfe209030ebebb962ca9732e5d2c4700 | [] | no_license | HydroZA/Project_Euler | 1603e938552ff137b49f087e9299203fba51abc8 | 75c946d1e47fd583feebcfa0b8e4327793e6783a | refs/heads/master | 2020-12-23T21:50:20.261339 | 2020-01-30T19:19:19 | 2020-01-30T19:19:19 | 237,286,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | #Problem 2
#Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be:
#
#1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
#
#By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.
import math
def FibonacciGenerator(highest):
fibonacci = []
#Use Binets formula for nth term of fibonacci
phi = ( math.sqrt(5) + 1 ) / 2
negphi = ( math.sqrt(5) - 1 ) / 2
n = 1
fib = 0
while fib < highest:
x = ( math.pow(phi, n) / math.sqrt(5) )
y = ( math.pow(negphi, n) / math.sqrt(5) )
fib = int(x+y)
if (int(x+y)) % 2 == 0:
fibonacci.append(fib)
n+=1
sum_fibs = 0
for i in fibonacci:
sum_fibs += i
return sum_fibs
highest = int(input("enter highest: "))
print ("The sum of even numbers in the fibonnaci sequence up to " + str(highest) + " = " + str(FibonacciGenerator(highest))) | [
"jmslegge0@gmail.com"
] | jmslegge0@gmail.com |
aaa9b0cd3ab96bdc110c54a91d771b1e93dced88 | 6251c24d4118d9bd743c1660be8cf3f697a0cb1b | /simple_calculator_using_python/main.py | c01e8cf26f64059df1888c47a882f0e4cf6e9943 | [] | no_license | kohinoor12/Simple_calculator_using_python | 67947663bd1170466a5a42daf5baa70b966d9927 | b17fb323e92a169201331e133eb33aa716b60da2 | refs/heads/master | 2022-10-20T14:38:20.943211 | 2020-06-26T09:47:36 | 2020-06-26T09:47:36 | 275,120,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,765 | py | import tkinter
import tkinter as tk
from tkinter import messagebox
# setting up the tkinter window
root = tkinter.Tk()
root.geometry("250x400+300+300")
root.resizable(0,0)
root.title("Calculator")
val = ""
A = 0
operator = ""
# function for numerical button clicked
def btn_1_isclicked():
global val
val = val + "1"
data.set(val)
def btn_2_isclicked():
global val
val = val + "2"
data.set(val)
def btn_3_isclicked():
global val
val = val + "3"
data.set(val)
def btn_4_isclicked():
global val
val = val + "4"
data.set(val)
def btn_5_isclicked():
global val
val = val + "5"
data.set(val)
def btn_6_isclicked():
global val
val = val + "6"
data.set(val)
def btn_7_isclicked():
global val
val = val + "7"
data.set(val)
def btn_8_isclicked():
global val
val = val + "8"
data.set(val)
def btn_9_isclicked():
global val
val = val + "9"
data.set(val)
def btn_0_isclicked():
global val
val = val + "0"
data.set(val)
# functions for the operator button click
def btn_plus_clicked():
global A
global operator,val
A = int(val)
operator = "+"
val = val + "+"
data.set(val)
def btn_minus_clicked():
global A
global operator,val
A = int(val)
operator = "-"
val = val + "-"
data.set(val)
def btn_mult_clicked():
global A
global operator,val
A = int(val)
operator = "*"
val = val + "*"
data.set(val)
def btn_div_clicked():
global A
global operator,val
A = int(val)
operator = "/"
val = val + "/"
data.set(val)
def btn_c_pressed():
global A,operator,val
val = ""
A = 0
operator = ""
data.set(val)
# function to find the result
def result():
global A,operator,val
val2 = val
if operator == "+":
x = int((val2.split("+")[1]))
C = A + x
val = str(C)
data.set(val)
if operator == "-":
x = int((val2.split("-")[1]))
C = A - x
val = str(C)
data.set(val)
if operator == "*":
x = int((val2.split("*")[1]))
C = A * x
val = str(C)
data.set(val)
if operator == "/":
x = int((val2.split("/")[1]))
if x == 0:
messagebox.showerror("Error", "Division By 0 Not Supported")
A = ""
val = ""
data.set(val)
else:
C = int(A / x)
data.set(C)
# the label that shows the result
data = tk.StringVar()
lbl = tk.Label(
root,
text = "Label",
anchor = "se",
font = ("Verdana", 20),
textvariable = data,
background = "#ffffff",
fg = "#000000",
)
lbl.pack(expand = True, fill = "both")
# the frames section
btnrow1 = tk.Frame(root)
btnrow1.pack(expand = True, fill = "both")
btnrow2 = tk.Frame(root)
btnrow2.pack(expand = True, fill = "both")
btnrow3 = tk.Frame(root)
btnrow3.pack(expand = True, fill = "both")
btnrow4 = tk.Frame(root)
btnrow4.pack(expand = True, fill = "both")
# The buttons section
btn1 = tk.Button(
btnrow1,
text = "1",
font = ("Verdana", 22),
relief = "groove",
border = 0,
command = btn_1_isclicked,
)
btn1.pack(side = "left", expand = True, fill = "both",)
btn2 = tk.Button(
btnrow1,
text = "2",
font = ("Verdana", 22),
relief = "groove",
border = 0,
command = btn_2_isclicked,
)
btn2.pack(side = "left", expand = True, fill = "both",)
btn3 = tk.Button(
btnrow1,
text = "3",
font = ("Verdana", 22),
relief = "groove",
border = 0,
command = btn_3_isclicked,
)
btn3.pack(side = "left", expand = True, fill = "both",)
btnplus = tk.Button(
btnrow1,
text = "+",
font = ("Verdana", 22),
relief = "groove",
border = 0,
command = btn_plus_clicked,
)
btnplus.pack(side = "left", expand = True, fill = "both",)
# buttons for frame 2
btn4 = tk.Button(
btnrow2,
text = "4",
font = ("Verdana", 22),
relief = "groove",
border = 0,
command = btn_4_isclicked,
)
btn4.pack(side = "left", expand = True, fill = "both",)
btn5 = tk.Button(
btnrow2,
text = "5",
font = ("Verdana", 22),
relief = "groove",
border = 0,
command = btn_5_isclicked,
)
btn5.pack(side = "left", expand = True, fill = "both",)
btn6 = tk.Button(
btnrow2,
text = "6",
font = ("Verdana", 22),
relief = "groove",
border = 0,
command = btn_6_isclicked,
)
btn6.pack(side = "left", expand = True, fill = "both",)
btnminus = tk.Button(
btnrow2,
text = "-",
font = ("Verdana", 22),
relief = "groove",
border = 0,
command = btn_minus_clicked,
)
btnminus.pack(side = "left", expand = True, fill = "both",)
# button for frame 3
btn7 = tk.Button(
btnrow3,
text = "7",
font = ("Verdana", 22),
relief = "groove",
border = 0,
command = btn_7_isclicked,
)
btn7.pack(side = "left", expand = True, fill = "both",)
btn8 = tk.Button(
btnrow3,
text = "8",
font = ("Verdana", 22),
relief = "groove",
border = 0,
command = btn_8_isclicked,
)
btn8.pack(side = "left", expand = True, fill = "both",)
btn9 = tk.Button(
btnrow3,
text = "9",
font = ("Verdana", 22),
relief = "groove",
border = 0,
command = btn_9_isclicked,
)
btn9.pack(side = "left", expand = True, fill = "both",)
btnmult = tk.Button(
btnrow3,
text = "*",
font = ("Verdana", 22),
relief = "groove",
border = 0,
command = btn_mult_clicked,
)
btnmult.pack(side = "left", expand = True, fill = "both",)
# button for frame4
btnc = tk.Button(
btnrow4,
text = "C",
font = ("Verdana", 22),
relief = "groove",
border = 0,
command = btn_c_pressed,
)
btnc.pack(side = "left", expand = True, fill = "both",)
btn0 = tk.Button(
btnrow4,
text = "0",
font = ("Verdana", 22),
relief = "groove",
border = 0,
command = btn_0_isclicked,
)
btn0.pack(side = "left", expand = True, fill = "both",)
btnequal = tk.Button(
btnrow4,
text = "=",
font = ("Verdana", 22),
relief = "groove",
border = 0,
command = result,
)
btnequal.pack(side = "left", expand = True, fill = "both",)
btndiv = tk.Button(
btnrow4,
text = "/",
font = ("Verdana", 22),
relief = "groove",
border = 0,
command = btn_div_clicked,
)
btndiv.pack(side = "left", expand = True, fill = "both",)
root.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
28768b939d37f3492aab2f6c7d61c2166d775a84 | 89aa0190f9e902b9a7990f90d584e9f290f77f66 | /Finished Game/Game5/gameDemo.py | aa7514098accbe0ddf5425cf039a922d04112513 | [] | no_license | ElvedinDzihanovic/BluBlu | d88a12f666313268876345a7c40eb192c3c8d1ae | b4491a6b8ffacaf3ab53f7f2ff7ee8beb43372e5 | refs/heads/master | 2020-04-28T19:40:14.054412 | 2019-04-11T14:29:29 | 2019-04-11T14:29:29 | 175,518,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,043 | py | import sys
#Change the following line
sys.path.append('opencv\build\python\2.7')
import numpy as np
import cv2
import socket
import time
UDP_IP = "127.0.0.1"
UDP_PORT = 5065
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
#print "message:", MESSAGE
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
class App(object):
def __init__(self, video_src):
self.cam = cv2.VideoCapture(video_src)
ret, self.frame = self.cam.read()
cv2.namedWindow('camshift')
cv2.setMouseCallback('camshift', self.onmouse)
self.selection = None
self.drag_start = None
self.tracking_state = 0
self.show_backproj = False
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv2.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
self.tracking_state = 0
if self.drag_start:
if flags & cv2.EVENT_FLAG_LBUTTON:
h, w = self.frame.shape[:2]
xo, yo = self.drag_start
x0, y0 = np.maximum(0, np.minimum([xo, yo], [x, y]))
x1, y1 = np.minimum([w, h], np.maximum([xo, yo], [x, y]))
self.selection = None
if x1-x0 > 0 and y1-y0 > 0:
self.selection = (x0, y0, x1, y1)
else:
self.drag_start = None
if self.selection is not None:
self.tracking_state = 1
def show_hist(self):
bin_count = self.hist.shape[0]
bin_w = 24
img = np.zeros((256, bin_count*bin_w, 3), np.uint8)
for i in xrange(bin_count):
h = int(self.hist[i])
cv2.rectangle(img, (i*bin_w+2, 255), ((i+1)*bin_w-2, 255-h), (int(180.0*i/bin_count), 255, 255), -1)
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
cv2.imshow('hist', img)
def run(self):
while True:
ret, self.frame = self.cam.read()
vis = self.frame.copy()
hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
if self.selection:
x0, y0, x1, y1 = self.selection
self.track_window = (x0, y0, x1-x0, y1-y0)
hsv_roi = hsv[y0:y1, x0:x1]
mask_roi = mask[y0:y1, x0:x1]
hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX);
self.hist = hist.reshape(-1)
self.show_hist()
vis_roi = vis[y0:y1, x0:x1]
cv2.bitwise_not(vis_roi, vis_roi)
vis[mask == 0] = 0
if self.tracking_state == 1:
self.selection = None
prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
prob &= mask
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit)
xPos = track_box[0][0]
print "position - X:", str(xPos)
sock.sendto(str(xPos) , (UDP_IP, UDP_PORT))
#If you want to see Y Position
yPos = track_box[0][1]
print "position - Y:", str(yPos)
if self.show_backproj:
vis[:] = prob[...,np.newaxis]
try: cv2.ellipse(vis, track_box, (0, 0, 255), 2)
except: print track_box
cv2.imshow('camshift', vis)
ch = 0xFF & cv2.waitKey(5)
if ch == 27:
break
if ch == ord('b'):
self.show_backproj = not self.show_backproj
cv2.destroyAllWindows()
if __name__ == '__main__':
import sys
try: video_src = sys.argv[1]
except: video_src = 0
print __doc__
App(video_src).run()
| [
"elvedin.dzihanovic@edu.fit.ba"
] | elvedin.dzihanovic@edu.fit.ba |
f1fe68306f6c1e9af39926fb8fa2bb9d1638168f | 16df3eb8b83df2bfed7e0516e0d9385b39f47f89 | /walle/model/server.py | 42aa9e5673d408c54553103df20d67a88276d517 | [
"Apache-2.0"
] | permissive | fu648126437/walle-web | 3732545db54e7669659a899b4fca39aca397ea36 | a306f8212a2671411125f61a850b5869d315e283 | refs/heads/master | 2020-04-08T11:22:42.945715 | 2018-11-27T08:15:24 | 2018-11-27T08:15:24 | 159,303,729 | 2 | 0 | null | 2018-11-27T08:46:00 | 2018-11-27T08:45:59 | null | UTF-8 | Python | false | false | 3,816 | py | # -*- coding: utf-8 -*-
"""
walle-web
:copyright: © 2015-2019 walle-web.io
:created time: 2018-11-24 06:15:11
:author: wushuiyong@walle-web.io
"""
from datetime import datetime
from sqlalchemy import String, Integer, DateTime
from walle.model.database import SurrogatePK, db, Model
from walle.service.extensions import permission
from walle.service.rbac.role import *
# server
class ServerModel(SurrogatePK, Model):
__tablename__ = 'servers'
current_time = datetime.now()
# 表的结构:
id = db.Column(Integer, primary_key=True, autoincrement=True)
name = db.Column(String(100))
host = db.Column(String(100))
status = db.Column(Integer)
created_at = db.Column(DateTime, default=current_time)
updated_at = db.Column(DateTime, default=current_time, onupdate=current_time)
def list(self, page=0, size=10, kw=None):
"""
获取分页列表
:param page:
:param size:
:param kw:
:return:
"""
query = self.query.filter(ServerModel.status.notin_([self.status_remove]))
if kw:
query = query.filter(ServerModel.name.like('%' + kw + '%'))
count = query.count()
data = query.order_by(ServerModel.id.desc()) \
.offset(int(size) * int(page)).limit(size) \
.all()
server_list = [p.to_json() for p in data]
return server_list, count
def item(self, id=None):
"""
获取单条记录
:param role_id:
:return:
"""
id = id if id else self.id
data = self.query.filter(ServerModel.status.notin_([self.status_remove])).filter_by(id=id).first()
return data.to_json() if data else []
def add(self, name, host):
# todo permission_ids need to be formated and checked
server = ServerModel(name=name, host=host, status=self.status_available)
db.session.add(server)
db.session.commit()
if server.id:
self.id = server.id
return server.id
def update(self, name, host, id=None):
# todo permission_ids need to be formated and checked
id = id if id else self.id
role = ServerModel.query.filter_by(id=id).first()
if not role:
return False
role.name = name
role.host = host
ret = db.session.commit()
return ret
def remove(self, id=None):
"""
:param role_id:
:return:
"""
id = id if id else self.id
self.query.filter_by(id=id).update({'status': self.status_remove})
ret = db.session.commit()
return ret
@classmethod
def fetch_by_id(cls, ids=None):
"""
用户列表
:param uids: []
:return:
"""
if not ids:
return None
query = ServerModel.query.filter(ServerModel.id.in_(ids))
data = query.order_by(ServerModel.id.desc()).all()
return [p.to_json() for p in data]
def to_json(self):
item = {
'id': self.id,
'name': self.name,
'host': self.host,
'created_at': self.created_at.strftime('%Y-%m-%d %H:%M:%S'),
'updated_at': self.updated_at.strftime('%Y-%m-%d %H:%M:%S'),
}
item.update(self.enable())
return item
def enable(self):
# current_app.logger.info(dir(permission.app))
# current_app.logger.info(permission.enable_uid(3))
return {
'enable_update': permission.enable_role(DEVELOPER),
'enable_delete': permission.enable_role(DEVELOPER),
'enable_create': False,
'enable_online': False,
'enable_audit': permission.enable_role(OWNER),
'enable_block': False,
}
| [
"861459320@qq.com"
] | 861459320@qq.com |
54babb345d5504b38949a02ef542b19c5f8afd30 | 3e8c7cc3c41a1db10f65800e999fa7976c4ee0d3 | /old/server/urls.py | eda76961fcf6f905fdbb81c6aca4c7cc6a50202a | [] | no_license | oryband/gov-review | 070313ab92de211ded2ff7a16ac4305274053a38 | c176d0e104d1303ec923447d883743ad18230d47 | refs/heads/master | 2021-01-19T18:33:17.941539 | 2016-01-18T19:48:22 | 2016-01-18T19:48:22 | 5,661,620 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^', include('gov_inspector.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
from settings import DEBUG
if DEBUG:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns += staticfiles_urlpatterns()
| [
"oryband@gmail.com"
] | oryband@gmail.com |
7137d606dfc5163eb516e2df441af37841898baf | 4247840118d743435a9f6fe59020fc6c57a89c6a | /3.BAYES/bayes.py | d0e39a2032a691679b388a81f470dbe087cd4e1d | [] | no_license | TotallyFine/ML-In-Action | c65fb90fa7fa6fa47c5eb9cfbb6d97d9eccabff1 | 2a25efb3e9f2137049a17e97740a39b27770e6ec | refs/heads/master | 2020-03-22T20:10:26.041995 | 2018-07-12T03:37:45 | 2018-07-12T03:37:45 | 140,578,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,663 | py | # coding:utf-8
# 朴素贝叶斯方法进行分类,不会直接给出类别只会给出概率
# 在数据较少的情况下仍然有效,可以处理多类别问题
# 对输入数据的准备方式比较敏感
# 有大量特征时,绘制特征作用不大,此时使用直方图效果更好
# 如果每个特征需要N个样本,那么10个特征就需要N**10个样本
# 对于包含1000个特征的词汇表将需要N**1000个样本
# 所需要的样本数会随着特征数目增大而迅速增长
# 但是如果特征之间相互独立,那么样本数就可以减少到1000*N
# 当然实际中并不是这个样子的,而朴素贝叶斯就直接当成了独立
# 这也是朴素的原因,朴素贝叶斯的另一个假设是每个特征同等重要
# 这份代码实现的功能是根据一个文档中含有的单词来判断这个文档是否是侮辱性言论
# ci是判断出的类别,w1-n是文档中含有这个单词的概率
# p(ci | w1, w2,...wn) = p(w1,w2,...wn|ci) * p(ci) / p(w1,w2,...wn)
# p(ci) 直接统计文档得出
# 因为朴素贝叶斯直接假设每个特征是相互独立的所以得到下式
# p(w1, w2, ..., wn | ci) = p(w1|ci) * p(w2 | ci) *...*p(wn | ci)
#
# 统计类别为ci的文档中w1-wn出现的次数即可得到p(wi, ci)
# 从而得到p(w1, w2,...,wn | ci) = p(w1|ci) * p(w2 | ci) *...*p(wn | ci) = p(w1,ci)/p(ci) * p(w2|ci)/p(ci) * ...
#
# 具体的操作如下:
# wi ci的取值都是1或0,那么对单词i来说p(wi=0, ci=0) + p(wi=1,ci=0) + p(wi=0,ci=1) + p(wi=1,ci=1)=1
# 那么p(wi=0 | ci=0) = p(wi=0, ci=0)/p(ci=0), 那么p(wi=1|ci=0) = 1 - p(wi=0|ci=0)
# 遍历文档的时候设置计数器,记录ci=0以及ci=1的个数
# 并统计当ci=0时wi=1,以及ci=1时wi=1的个数,最后(ci=0时wi=1次数)/(ci=0的次数)=p(wi=1|ci=0)
import numpy as np
def load_dataset():
posting_list = [['my', 'dog', 'has', 'flea',
'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him',
'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute',
'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how',
'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
class_vec = [0, 1, 0, 1, 0, 1] # 1代表这个文档中含有侮辱性文字 0代表正常言论
return posting_list, class_vec
def create_vocab_list(dataset):
"""
dataset: list, 数据集 二维,dataset[i]是第i条数据,dataset[i][0]第i个数据的第0个特征
这个函数将单词变成唯一的集合,产生单词表,用于构造文档的向量
由于中间使用集合进行运算,所以同一个输入,每次的输出的顺序可能不同
"""
vocab_set = set([])
for document in dataset:
# 将两个字典合并,逻辑或
vocab_set = vocab_set | set(document)
return list(vocab_set)
def set_of_words2vec(vocab_list, input_set):
"""
vocab_list: list, 所有文档中的单词构成的列表,没有重复
input_set: set, 输入的单词集合,也即一个文档中的单词惟一化之后的集合
这个函数用于判断输入的单词是否已经在构造好向量的列表中
并据此产生这个文档的向量,若文档中有单词表中的单词则将这个文档的向量
代表这个单词的位置置为1,否则置为0
change a set of words to vector,a set of words means document
这个函数构建了文档词集模型,只包含了文档出现与否
"""
return_vec = [0]*len(vocab_list)
for word in input_set:
if word in vocab_list:
return_vec[vocab_list.index(word)] = 1
else:
print('the word: {} is not in my vocabulary!'.format(word))
return return_vec
def bag_of_word2vec(vocab_list, input_set):
"""
构建文档词袋模型,记录文中出现的单词的次数
"""
return_vec = [0] * len(vocab_list)
for word in input_set:
if word in vocab_list:
return_vec[vocab_list.index(word)] +=1
return return_vec
def trainNB0(train_matrix, train_category):
"""
train_matrix: list, 文档矩阵,train_matrix[i]是第i个文档
train_category: list, 文档的类别向量,train_category[i]第i个文档的类别
类别只有0、1 不是/是侮辱性文档
这个函数用来训练一个朴素贝叶斯分类器,实质上就是统计训练样本,计算各种概率
"""
#print(len(train_matrix))
#print(len(train_category))
assert len(train_matrix) == len(train_category)
# 训练文档的数目
num_train_docs = len(train_matrix)
# 每个训练文档中含有的单词数
num_wrods = len(train_matrix[0])
# 侮辱性文档占比(概率)
p_abusive = sum(train_category)/float(num_train_docs)
# [单词i出现时这个文档不属于侮辱性文档的次数 for 单词i in 单词列表]
p0_num = np.ones(num_wrods)
# [单词i出现时这个文档属于侮辱性文档的次数 for 单词i in 单词列表]
p1_num = np.ones(num_wrods)
# 侮辱性文档的个数,以及不是侮辱性文档的个数
# 为了防止某个单词对的 概率是0相乘概率就变成0的情况,把这个基数变为2
p0_denom = 2.0
p1_denom = 2.0
for i in range(num_train_docs):
if train_category[i] == 1:
# train_matrix[i]是一个文档向量,由1/0组成,长度为num_words
p1_num += train_matrix[i]
p1_denom += 1 # 此处勘误,原书中是sum(trainMatrix[i])
else:
p0_num += train_matrix[i]
p0_denom += 1 # 此处勘误,原书中是sum(trainMatrix[i])
# p1_vec = [p(wi=1|ci=1) for wi in 单词表]
# log是为了防止概率太小而下溢
p1_vec = np.log(p1_num/p1_denom)
p0_vec = np.log(p0_num/p0_denom)
return p0_vec, p1_vec, p_abusive
def classifyNB(vec2classify, p0_vec, p1_vec, p_class1):
"""
vec2classify: list,需要被分类的向量
其长度需要和单词表一样长,且单词的排列顺序需要和单词表一样
p0_vec: list, [p(wi=1|ci=0) for wi in 单词表]
p1_vec: list, [p(wi=1|ci=1) for wi in 单词表]
p_class1: float, 文档输出类别1,侮辱性文档的概率
"""
# vec2classify 中的值为1或者0,所以和p1_vec相乘之后再求和
# 就得到log(p(w1,w2,...,wn|ci=1)/p(w1,w2,...,wn))
# 再加上log(p_class1)就相当于除以log(p_class0)
# 那么就得到了log(p(ci=1)*p(w1,w2,...,wn|c)/p(w1,w2,...,wn)) = log(p(ci=1|w1,w2,...,wn))
p1 = sum(vec2classify * p1_vec) + np.log(p_class1)
p0 = sum(vec2classify * p0_vec) + np.log(1.0 - p_class1)
return 1 if p1 > p0 else 0
def test_NB():
list_posts, list_classes = load_dataset()
my_vocab_list = create_vocab_list(list_posts)
train_mat = []
for post_in_doc in list_posts:
train_mat.append(set_of_words2vec(my_vocab_list, post_in_doc))
p0_v, p1_v, p_ab = trainNB0(train_mat, list_classes)
test_entry = ['love', 'my', 'dalmation']
this_doc = np.array(set_of_words2vec(my_vocab_list, test_entry))
print(test_entry, 'classified as: ', classifyNB(this_doc, p0_v, p1_v, p_ab))
test_entry = ['stupid', 'garbage']
this_doc = np.array(set_of_words2vec(my_vocab_list, test_entry))
print(test_entry, 'classified as: ', classifyNB(this_doc, p0_v, p1_v, p_ab))
if __name__ == '__main__':
test_NB()
| [
"2278650208@qq.com"
] | 2278650208@qq.com |
1cbb9f01ef2201a746f5699df2bc97856ab41c02 | a79ba703611779e23d42c2be04cabd7c43a795fc | /log_utils/jlog.py | c0ab1d70f1e6f86e0479bbd4fa0e3737c8286fdb | [] | no_license | airjason13/pyTestMulticast | 89f19370c7f3716615ef26b192738f247afbf7b5 | a2c1e8c35c57abd03c77c1cd4808b37f7e33bab1 | refs/heads/main | 2023-05-03T22:45:42.247025 | 2021-05-27T10:16:53 | 2021-05-27T10:16:53 | 367,759,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | import logging
import os
from logging.handlers import RotatingFileHandler
FORMAT = '%(asctime)s %(name)-12s %(levelname)s : %(message)s'
def logging_init(s, log_level):
try:
logging.basicConfig(level=log_level, format=FORMAT)
log = logging.getLogger(s)
os.makedirs('log', exist_ok=True)
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)s : %(message)s')
handler = RotatingFileHandler('log/my_log.log', maxBytes=10**6, backupCount=6)
handler.setFormatter(formatter)
log.addHandler(handler)
except Exception as e:
print(e)
#log.basicConfig(level=logging.DEBUG, format=FORMAT)
return log | [
"airjason13@gmail.com"
] | airjason13@gmail.com |
dc72b2bde8cad68dee72340a53e427e8b6ea6c23 | 06513636eb9253fc7652f605c3cc05b284bc2a8d | /CH_3_EX_3_3.py | 295756ee25524d64cea48c6efa5ea859a860585a | [] | no_license | jch427/directed-study | 70981aeab148687786c465dde5b42e84837480fe | 069a303ea5303c767e027170f57c83cf3be78114 | refs/heads/master | 2020-09-19T10:44:36.700123 | 2020-03-27T00:24:57 | 2020-03-27T00:24:57 | 224,225,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | # how many iteams? 5
# how many messages? 5
# how many prints? 5
# are they all in the same message/print? no
car = ['tesla', 'ford', 'honda', 'GMC', 'doge']
message = f'the new {car[0].title()} truck is unlike any other befor it.'
message2 = f'comparied to the old{car[1].title()} F150 it is very interesting.'
message3 = f'unlike {car[1].title()}, {car[2].title()}, {car[3].title()}, {car[4].title()} the look is radicaly diferent.'
message4 = f'{car[0].title()} chose to go for a more futeristic look that reminds me of a video game truck.'
message5 = f'all the negative press i have seen for the new {car[0].title()} truck has been about how it looks but i do not see any of the other componys such as {car[1].title()} doing anything about it.'
print(message)
print(message2)
print(message3)
print(message4)
print(message5)
| [
"noreply@github.com"
] | noreply@github.com |
0e7737cccb51a80b11f78c5e07adef62d4d64487 | e61fa7205d3d01787ca0b3d512efa67a94c1105b | /Raspberry/config.py | ea6164f0af539e884b8d16c78501a11231c89ffb | [] | no_license | cyrilvincent/IoT | 4d95c6def9d4c822edee71d547e8b151846f26ee | d73a4d78501975beeeddc9fd90704f5982f66a66 | refs/heads/master | 2021-12-28T08:42:50.088460 | 2021-09-09T10:12:33 | 2021-09-09T10:12:33 | 215,299,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | mac="00:0E:EA:CF:47:5A"
usb="COM4"
usb2="COM5" | [
"contact@cyrilvincent.com"
] | contact@cyrilvincent.com |
ae5f8fa2f0055b22974ed3c8865abea3b4050980 | 1306fad75658a9f83320243b64ffd570586007a4 | /Files1.py | 1244d47863794ad56737d6237cc6a45f3a1891fb | [] | no_license | sandhya74/Python | 9447bad37bf18188ae2d20ab173591de18f90556 | e4a1bae1ee023dee89cb83a697483e087d6cad3d | refs/heads/master | 2023-07-06T17:42:20.457180 | 2021-08-12T10:21:22 | 2021-08-12T10:21:22 | 345,895,148 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | #f=open("demo.txt","a")
#f.write("hi")
#f.close()
#f=open("demo.txt","r")
#print(f.read())
"""
fp = open("demo.txt", "w")
for _ in range(10):
fp.write("Edureka is a platform for developing market based skills \n")
fp.close()
"""
"""
fp=open("demo.txt","a")
for _ in range(5):
fp.write("hello\n")
fp.close()
fp=open("demo.txt","r")
print(fp.read())
"""
#fp=open("mydemo.txt","x")
"""
fp=open("mydemo.txt","w")
fp.write("hi hello")
fp.close()
fp=open("mydemo.txt","r")
print(fp.read())
"""
with open("python.txt","w") as f:
f.write("i love program")
f.write("sandhiya") | [
"sandhiya@5gindia.net"
] | sandhiya@5gindia.net |
a85ca73047ac8b70960d9f1fc9b2a54f698a1a3e | 3f2fb2e887194105919c75166615100d73bf6785 | /test.py | 574f73e29895537114fb4a1e0dd602879efc6ff2 | [
"MIT"
] | permissive | hermesfeet/summarize_text | 7007be904cadaff41621eec3d0d9e8ddae1d4ff4 | ce2d42f065f01a4ff5b16df5f0fa12cd3c9eeb3d | refs/heads/master | 2020-04-29T14:01:14.886436 | 2019-03-18T02:04:24 | 2019-03-18T02:04:24 | 176,184,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,185 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from textteaser.textteaser import TextTeaser
# article source: https://blogs.dropbox.com/developers/2015/03/limitations-of-the-get-method-in-http/
title = "Limitations of the GET method in HTTP"
text = "We spend a lot of time thinking about web API design, and we learn a lot from other APIs and discussion with their authors. In the hopes that it helps others, we want to share some thoughts of our own. In this post, we’ll discuss the limitations of the HTTP GET method and what we decided to do about it in our own API. As a rule, HTTP GET requests should not modify server state. This rule is useful because it lets intermediaries infer something about the request just by looking at the HTTP method. For example, a browser doesn’t know exactly what a particular HTML form does, but if the form is submitted via HTTP GET, the browser knows it’s safe to automatically retry the submission if there’s a network error. For forms that use HTTP POST, it may not be safe to retry so the browser asks the user for confirmation first. HTTP-based APIs take advantage of this by using GET for API calls that don’t modify server state. So if an app makes an API call using GET and the network request fails, the app’s HTTP client library might decide to retry the request. The library doesn’t need to understand the specifics of the API call. The Dropbox API tries to use GET for calls that don’t modify server state, but unfortunately this isn’t always possible. GET requests don’t have a request body, so all parameters must appear in the URL or in a header. While the HTTP standard doesn’t define a limit for how long URLs or headers can be, most HTTP clients and servers have a practical limit somewhere between 2 kB and 8 kB. This is rarely a problem, but we ran up against this constraint when creating the /delta API call. Though it doesn’t modify server state, its parameters are sometimes too long to fit in the URL or an HTTP header. The problem is that, in HTTP, the property of modifying server state is coupled with the property of having a request body. We could have somehow contorted /delta to mesh better with the HTTP worldview, but there are other things to consider when designing an API, like performance, simplicity, and developer ergonomics. In the end, we decided the benefits of making /delta more HTTP-like weren’t worth the costs and just switched it to HTTP POST. HTTP was developed for a specific hierarchical document storage and retrieval use case, so it’s no surprise that it doesn’t fit every API perfectly. Maybe we shouldn’t let HTTP’s restrictions influence our API design too much. For example, independent of HTTP, we can have each API function define whether it modifies server state. Then, our server can accept GET requests for API functions that don’t modify server state and don’t have large parameters, but still accept POST requests to handle the general case. This way, we’re opportunistically taking advantage of HTTP without tying ourselves to it."
tt = TextTeaser()
sentences = tt.summarize(title, text)
for sentence in sentences:
print(sentence) | [
"hermesfeet@gmail.com"
] | hermesfeet@gmail.com |
d27208389d6ba8b4e8b86ffee2adb7a585b2ae5e | 15745463dda76d2f9419200aef24736521a07c1c | /src/descriptores.py | 981d3d460ffcdbdd4c53f5962d3efb4fbf2926db | [] | no_license | jjdenis/diagrama_caja | 7b27ae0e40bdb8901c4692aa34bc2c6db1af563d | dd31dbf629d91cd2997c4e39af5b8edf705c53af | refs/heads/master | 2020-05-23T05:07:24.401821 | 2017-03-15T07:37:56 | 2017-03-15T07:37:56 | 84,751,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class Descriptors(object):
def __init__(self, valores):
self.maximo = max(valores)
self.ultimo = valores[-1]
valores = sorted(valores)
num_valores = len(valores)
self.media = sum(valores) / num_valores
index = int(num_valores * .25)
self.quartile_1 = valores[index]
index = num_valores / 2
self.mediana = valores[index]
index = int(num_valores * .75)
self.quartile_3 = valores[index]
intercuartil = self.quartile_3 - self.quartile_1
self.lower_regular_range = self.quartile_1 - 1.5 * intercuartil
self.upper_regular_range = self.quartile_3 + 1.5 * intercuartil
self.first_non_outlier = next(v for v in valores if v > self.lower_regular_range)
self.last_non_outlier = next(v for v in reversed(valores) if v < self.upper_regular_range)
self.atipicos = [v for v in valores if v < self.lower_regular_range or v > self.upper_regular_range] | [
"jjdenis@gmail.com"
] | jjdenis@gmail.com |
68cf3e5e2413d7edeffddb03c066dfb7a3e78310 | 4e187a73d451f8c500955098e8f7d466b90d05de | /Flasky0.1.py | 299910de2cdaa6ebb388c7732ee6b2261932d8dc | [] | no_license | LinZiYU1996/Flask_Login | 21c3592b6116ca49a17bab98eb4171ea4721b551 | 420d540cf18f4627054ecf589872611e6e6ff8b6 | refs/heads/master | 2021-01-02T08:48:38.669567 | 2017-08-02T03:14:37 | 2017-08-02T03:14:37 | 99,066,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | from flask import Flask,render_template,flash,url_for,redirect
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_wtf import FlaskForm
from flask_login import LoginManager,login_user,UserMixin,logout_user,login_required
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SECRET_KEY']='kkk'
bootstrap = Bootstrap(app)
moment=Moment(app)
login_manger=LoginManager()
login_manger.session_protection='strong'
login_manger.login_view='login'
login_manger.init_app(app)
if __name__ == '__main__':
app.run()
| [
"2669093302@qq.com"
] | 2669093302@qq.com |
757bb5db334a4b6518bf2b293c9f9cc451d67ebf | 5891051796778cfb44a255248ce38789bfef9e70 | /P_base/python_pdf/kp.py | 1849b1e321f912c79b6c02533938157eb9a214ea | [] | no_license | Faithlmy/Python_base | cc546a5d86b123e102a69df1227cde9b6e567493 | 5a43557e6375dc9dbe5f6701d7c10e549873a5ab | refs/heads/master | 2021-01-01T17:07:04.097978 | 2018-03-31T16:44:01 | 2018-03-31T16:44:01 | 98,000,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,258 | py | #!/usr/bin/env python3
# encoding: utf-8
import sys
import importlib
importlib.reload(sys)
from pdfminer.pdfparser import PDFParser,PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTTextBoxHorizontal,LAParams
from pdfminer.pdfinterp import PDFTextExtractionNotAllowed
'''
解析pdf 文本,保存到txt文件中
'''
path = r'/home/faith/Desktop/phtoword.pdf'
def parse():
fp = open(path, 'rb') # 以二进制读模式打开
#用文件对象来创建一个pdf文档分析器
praser = PDFParser(fp)
# 创建一个PDF文档
doc = PDFDocument()
# 连接分析器 与文档对象
praser.set_document(doc)
doc.set_parser(praser)
# 提供初始化密码
# 如果没有密码 就创建一个空的字符串
doc.initialize()
# 检测文档是否提供txt转换,不提供就忽略
if not doc.is_extractable:
raise PDFTextExtractionNotAllowed
else:
# 创建PDf 资源管理器 来管理共享资源
rsrcmgr = PDFResourceManager()
# 创建一个PDF设备对象
laparams = LAParams()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
# 创建一个PDF解释器对象
interpreter = PDFPageInterpreter(rsrcmgr, device)
# 循环遍历列表,每次处理一个page的内容
for page in doc.get_pages(): # doc.get_pages() 获取page列表
interpreter.process_page(page)
# 接受该页面的LTPage对象
layout = device.get_result()
# print(layout)
# 这里layout是一个LTPage对象 里面存放着 这个page解析出的各种对象 一般包括LTTextBox, LTFigure, LTImage, LTTextBoxHorizontal 等等 想要获取文本就获得对象的text属性,
for x in layout:
print(x.get_text())
# if (isinstance(x, LTTextBoxHorizontal)):
# with open(r'/root/pdf/t_pdf/turn_pdf2.txt', 'a') as f:
# results = x.get_text().encode('utf-8')
# print(results)
# f.write(results + '\n')
if __name__ == '__main__':
parse() | [
"lmengyy@126.com"
] | lmengyy@126.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.