repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
markhuyong/galaxy | rest/decorators.py | # -*- coding: utf-8 -*-
from functools import wraps
import warnings
from .exceptions import GalaxyDeprecationWarning
def deprecated(use_instead=None):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
Taken from Scrapy.
"""
def deco(func):
@wraps(func)
def wrapped(*args, **kwargs):
message = "Call to deprecated function %s." % func.__name__
if use_instead:
message += " Use %s instead." % use_instead
warnings.warn(message, category=GalaxyDeprecationWarning,
stacklevel=2)
return func(*args, **kwargs)
return wrapped
if callable(use_instead):
deco = deco(use_instead)
use_instead = None
return deco
|
markhuyong/galaxy | crawler/misc/mobile_agents.py | <gh_stars>0
# -*- coding: utf-8 -*-
AGENTS = [
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_2_1 like Mac OS X; da-dk) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420 (KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 2_0 like Mac OS X; en-us) AppleWebKit/525.18.1 (KHTML, like Gecko) Version/3.1.1 Mobile/5A347 Safari/525.200",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/531.22.7",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_2_1 like Mac OS X; da-dk) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; da-dk) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3",
]
|
markhuyong/galaxy | crawler/qq/qq/middleware.py | # -*- coding: utf-8 -*-
import sys
import random
import redis
import json
import logging
from cookies import initCookie, updateCookie, removeCookie
from scrapy.exceptions import IgnoreRequest
from scrapy.utils.response import response_status_message
from scrapy.downloadermiddlewares.retry import RetryMiddleware
from crawler.qq.qq.utils import BaseHelper
class CookiesMiddleware(RetryMiddleware):
""" 维护Cookie """
def __init__(self, settings, crawler):
RetryMiddleware.__init__(self, settings)
self.rconn = settings.get("RCONN", redis.Redis(
crawler.settings.get('REDIS_HOST', 'localhsot'),
crawler.settings.get('REDIS_PORT', 6379),
crawler.settings.get('REDIS_DB', 0),
crawler.settings.get('REDIS_PASS', None)))
initCookie(self.rconn, crawler.spider)
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings, crawler)
def process_request(self, request, spider):
prefix = BaseHelper.get_cookie_key_prefix(spider)
redisKeys = self.rconn.keys("{}:*".format(prefix))
while len(redisKeys) > 0:
elem = random.choice(redisKeys)
if prefix in elem:
cookie = json.loads(self.rconn.get(elem))
spider.logger.debug("cookies= {}, request.url =={}"
.format(cookie, request.url))
new_url = self._replace_token(request.url, cookie, spider)
if not request.url == new_url:
# FIXME: hack protected method
request._set_url(new_url)
spider.logger \
.debug("request.replaced new request url ======{}"
.format(new_url))
request.cookies = cookie
request.meta["accountText"] = elem.split("Cookies:")[-1]
break
else:
redisKeys.remove(elem)
def process_response(self, request, response, spider):
prefix = BaseHelper.get_cookie_key_prefix(spider)
if response.status in [300, 301, 302, 303]:
try:
redirect_url = response.headers["location"]
if "login.qq" in redirect_url or "login.qq" in redirect_url: # Cookie失效
spider.logger.warning("One Cookie need to be updating...")
updateCookie(request.meta['accountText'], self.rconn,
spider)
elif "qq.cn/security" in redirect_url: # 账号被限
spider.logger.warning("One Account is locked! Remove it!")
removeCookie(request.meta["accountText"], self.rconn,
spider)
elif "qq.cn/pub" in redirect_url:
spider.logger.warning(
"Redirect to 'http://qq.com'!( Account:%s )" %
request.meta["accountText"].split("--")[0])
reason = response_status_message(response.status)
return self._retry(request, reason, spider) or response # 重试
except Exception, e:
raise IgnoreRequest
elif response.status in [403, 414]:
spider.logger.error("%s! Stopping..." % response.status)
reason = response_status_message(response.status)
return self._retry(request, reason, spider) or response
# os.system("pause")
elif u'登录态失效,请重新登录' in response.body or u'请先登录' in response.body:
spider.logger.warning("One Cookie need to be updating...")
updateCookie(request.meta['accountText'], self.rconn,
spider)
reason = response_status_message(response.status)
return self._retry(request, reason, spider) or response
else:
return response
@classmethod
def _replace_token(cls, url, cookies, spider):
if "GTK" in url:
p_skey = cookies.get("p_skey")
skey = cookies.get("skey")
rv2 = cookies.get("rv2")
spider.logger \
.debug("p_skey={}, skey={}, rv2={}".format(p_skey, skey, rv2))
str_key = p_skey or skey or rv2
g_tk = cls._gen_token(str_key)
# g_tk = cls.getNewGTK(p_skey, skey, rv2)
url = url.replace("GTK", g_tk)
return url
#
# @classmethod
# def _replace_token(cls, url, cookies):
# if "GTK" in url:
# p_skey = None
# skey = None
# rv2 = None
# for cookie in cookies:
# if "p_skey" in cookie['name']:
# p_skey = cookie['value']
# if "skey" == cookie['name']:
# skey = cookie['value']
# if "rv2" == cookie['name']:
# rv2 = cookie['value']
# g_tk = cls.getNewGTK(p_skey, skey, rv2)
# url = url.replace("GTK", g_tk)
# return url
# @classmethod
# def _replace_token(cls, url, cookies):
# if "GTK" in url:
# for cookie in cookies:
# # if "p_skey" in cookie['name']:
# if "skey" == cookie['name']:
# skey = cookie['value']
# g_tk = cls._gen_p_skey(skey)
# url = url.replace("GTK", g_tk)
# return url
#
# @staticmethod
# def _gen_p_skey(str_key):
# """
# Generate g_tk token required by fetcher
# :param str_key:
# :return:
# """
# hash_code = 5381
# for c in str_key:
# hash_code += (hash_code << 5) + ord(c)
# return str(hash_code & 0x7fffffff)
@staticmethod
def _gen_token(str_key):
"""
Generate g_tk token required by fetcher
:param str_key:
:return:
"""
hash_code = 5381
for c in str_key:
hash_code += (hash_code << 5) + ord(c)
return str(hash_code & 0x7fffffff)
@staticmethod
def LongToInt(value): # 由于int+int超出范围后自动转为long型,通过这个转回来
if isinstance(value, int):
return int(value)
else:
return int(value & sys.maxint)
@staticmethod
def LeftShiftInt(number, step): # 由于左移可能自动转为long型,通过这个转回来
if isinstance((number << step), long):
return int((number << step) - 0x200000000L)
else:
return int(number << step)
@classmethod
def getOldGTK(cls, skey):
a = 5381
for i in range(0, len(skey)):
a = a + cls.LeftShiftInt(a, 5) + ord(skey[i])
a = cls.LongToInt(a)
return str(a & 0x7fffffff)
@classmethod
def getNewGTK(cls, p_skey, skey, rv2):
b = p_skey or skey or rv2
a = 5381
for i in range(0, len(b)):
a = a + cls.LeftShiftInt(a, 5) + ord(b[i])
a = cls.LongToInt(a)
return str(a & 0x7fffffff)
|
markhuyong/galaxy | crawler/misc/log.py | import logging
# from logging.config import fileConfig
# fileConfig('logging_config.ini')
logger = logging.getLogger(__name__)
# handler = logging.StreamHandler()
handler = logging.FileHandler('hello.log')
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
|
markhuyong/galaxy | crawler/jianshu/jianshu/settings.py | # -*- coding: utf-8 -*-
BOT_NAME = 'jianshu'
SPIDER_MODULES = ['jianshu.spiders']
NEWSPIDER_MODULE = 'jianshu.spiders'
DOWNLOADER_MIDDLEWARES = {
# Engine side
# 'crawler.misc.middleware.agent.CustomUserAgentMiddleware': 401,
#'scrapy_splash.SplashCookiesMiddleware': 723,
#'scrapy_splash.SplashMiddleware': 725,
#'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
# Downloader side
}
SPIDER_MIDDLEWARES = {
#'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
}
# SPLASH_URL = 'http://127.0.0.1:8050/'
#SPLASH_URL = 'http://192.168.127.12:8050/'
#DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
#HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
USER_AGENT = "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36"
COOKIES_ENABLED = True
COOKIES_DEBUG = True
LOG_ENABLED = True
LOG_STDOUT = True
LOG_LEVEL = 'DEBUG'
ROBOTSTXT_OBEY = False
# DOWNLOAD_DELAY = 0.25
DOWNLOAD_TIMEOUT = 360 |
markhuyong/galaxy | tests/shell/qq/launch.py | from scrapy import cmdline
cmdline.execute("scrapy crawl qq".split())
|
markhuyong/galaxy | tests/test_crawler.py | # -*- coding: utf-8 -*-
from scrapy.utils.engine import get_engine_status
from twisted.internet import defer
from twisted.trial import unittest
from rest.core import GalaxyCrawler
from .servers import MockServer
from .spiders import SingleRequestSpider
from .utils import get_settings
class TestCrawler(unittest.TestCase):
"""Spider shouldn't make start requests if list of start_requests
wasn't passed to 'crawl' method.
"""
def setUp(self):
self.site = MockServer()
self.site.start()
self.settings = get_settings()
self.settings['EXTENSIONS']['scrapy.contrib.corestats.CoreStats'] = 0
self.engine_status = []
def tearDown(self):
self.site.stop()
def cb(self, response):
self.engine_status.append(get_engine_status(self.crawler.engine))
def _assert_no_requests(self):
self.assertEqual(len(self.engine_status), 0, self.engine_status)
stats = self.crawler.stats.get_stats()
self.assertNotIn('scheduler/enqueued', stats)
self.assertNotIn('scheduler/dequeued', stats)
self.assertNotIn('downloader/request_count', stats)
self.assertNotIn('downloader/response_count', stats)
def _assert_engine_worked(self):
stats = self.crawler.stats.get_stats()
self.assertIn('start_time', stats)
self.assertIn('finish_time', stats)
self.assertEquals(stats['finish_reason'], 'finished')
@defer.inlineCallbacks
def test_crawl_start_requests_disabled(self):
self.crawler = GalaxyCrawler(
SingleRequestSpider, self.settings, start_requests=False)
yield self.crawler.crawl(seed=self.site.url(), callback_func=self.cb)
self._assert_engine_worked()
self._assert_no_requests()
@defer.inlineCallbacks
def test_crawl_start_requests_enabled(self):
self.crawler = GalaxyCrawler(
SingleRequestSpider, self.settings, start_requests=True)
yield self.crawler.crawl(seed=self.site.url(), callback_func=self.cb)
self._assert_engine_worked()
self.assertEqual(len(self.engine_status), 1, self.engine_status)
est = dict(self.engine_status[0])
self.assertEqual(est['engine.spider.name'], self.crawler.spider.name)
self.assertEqual(est['len(engine.scraper.slot.active)'], 1)
stats = self.crawler.stats.get_stats()
self.assertEqual(stats['scheduler/enqueued'], 1)
self.assertEqual(stats['scheduler/dequeued'], 1)
self.assertEqual(stats['downloader/request_count'], 1)
self.assertEqual(stats['downloader/response_count'], 1)
@defer.inlineCallbacks
def test_crawl_start_requests_default(self):
self.crawler = GalaxyCrawler(SingleRequestSpider, self.settings)
yield self.crawler.crawl(seed=self.site.url(), callback_func=self.cb)
self._assert_engine_worked()
self._assert_no_requests()
|
markhuyong/galaxy | rest/cmdline.py | # -*- coding: utf-8 -*-
from ConfigParser import SafeConfigParser, NoOptionError, NoSectionError
import argparse
import os
import sys
from scrapy.utils.conf import closest_scrapy_cfg
from scrapy.utils.misc import load_object
from twisted.application import app
from twisted.application.internet import TCPServer
from twisted.application.service import Application
from twisted.internet import reactor
from twisted.web.server import Site
from .log import setup_logging
from .conf import settings
def parse_arguments():
def valid_setting(string):
key, sep, value = string.partition('=')
if not key or not sep:
raise argparse.ArgumentTypeError(
u'expected name=value: {}'.format(repr(string)))
return key, value
parser = argparse.ArgumentParser(
description='HTTP API server for Scrapy project.')
parser.add_argument('-p', '--port', dest='port',
type=int,
default=9080,
help='port number to listen on')
parser.add_argument('-i', '--ip', dest='ip',
default='localhost',
help='IP address the server will listen on')
parser.add_argument('-c', '--crawler', dest='crawler',
default='jianshu',
help='crawler the server will start at')
parser.add_argument('--project', dest='project',
default='default',
help='project name from scrapy.cfg')
parser.add_argument('-s', '--set', dest='set',
type=valid_setting,
action='append',
default=[],
metavar='name=value',
help='set/override setting (may be repeated)')
parser.add_argument('-S', '--settings', dest='settings',
metavar='project.settings',
help='custom project settings module path')
return parser.parse_args()
def get_application(arguments):
ServiceRoot = load_object(settings.SERVICE_ROOT)
site = Site(ServiceRoot())
application = Application('rest')
server = TCPServer(arguments.port, site, interface=arguments.ip)
server.setServiceParent(application)
return application
def find_scrapy_project(project, crawler="jianshu"):
# project_config_path = closest_scrapy_cfg("./crawler/jianshu/")
project_dir = "./crawler/{}/".format(crawler)
print("project_dir======={}".format(project_dir))
project_config_path = closest_scrapy_cfg(project_dir)
if not project_config_path:
raise RuntimeError('Cannot find scrapy.cfg file')
project_config = SafeConfigParser()
project_config.read(project_config_path)
try:
project_settings = project_config.get('settings', project)
except (NoSectionError, NoOptionError) as e:
raise RuntimeError(e.message)
if not project_settings:
raise RuntimeError('Cannot find scrapy project settings')
project_location = os.path.dirname(project_config_path)
sys.path.append(project_location)
return project_settings
def execute():
sys.path.insert(0, os.getcwd())
arguments = parse_arguments()
print("arguments======={}".format(arguments))
if arguments.settings:
settings.setmodule(arguments.settings)
if arguments.set:
for name, value in arguments.set:
settings.set(name.upper(), value)
settings.set('PROJECT_SETTINGS', find_scrapy_project(arguments.project, arguments.crawler))
settings.freeze()
setup_logging()
application = get_application(arguments)
app.startApplication(application, save=False)
reactor.run()
if __name__ == '__main__':
execute()
|
markhuyong/galaxy | crawler/misc/xicidaili.py | <gh_stars>0
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#-------------------------------------------------------------------------
# 程序:xicidaili.py
# 版本:0.1
# 作者:ly
# 日期:编写日期201l/11/16
# 语言:Python 2.7
# 操作:python referer_forge.py
# 功能:从www.xicidaili.com网站采集代理信息并存入数据库
#-------------------------------------------------------------------------
import requests,re,json
import sys,os,time
# --------------------------------------------------
# 中文编码设置
reload(sys)
sys.setdefaultencoding('utf-8')
Type = sys.getfilesystemencoding()
# 数据库设置
MYSQL_HOST = ''
MYSQL_DBNAME = ''
MYSQL_USER = ''
MYSQL_PASSWD = ''
MYSQL_PORT= 3306
# 此处修改数据库插入修改语句
install_str = '''
INSERT INTO proxy_xici( `proxy_ip`, `proxy_port`, `proxy_country`, `proxy_type`, `addtime`, `Last_test_time`, `proxy_status`, `Remarks` )
VALUES (%s,%s,%s,%s,%s,%s,%s,%s) '''
# 此处修改伪造的头字段,
headers = {
'Host':"www.xicidaili.com",#需要修改为当前网站主域名
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,en;q=0.5",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
# "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:39.0) Gecko/20100101 Firefox/39.0",
#"referer" : '192.168.3.11'#随意的伪造值
}
#发起请求,
def get_request(url,headers):
'''参数引入及头信息'''
html=requests.get(url,headers=headers, timeout=20).text.decode('utf8')
print html
return html
# 将页面源代码正则匹配并解析,返回列表,其中每一项是json的数据
def re_html_code(html_code,proxy_list_json):
# re正则取出数据
try:
re_list_ip = re.findall(r'<td>\d*\.\d*\.\d*\.\d*</td>',html_code)
re_list_port = re.findall(r'<td>[\d]*</td>',html_code)
re_list_live_time = re.findall(u'<td>\d*[小时分钟天]+</td>',html_code)
print re_list_live_time
#print type(html_code),type(r'<td>\d*[小时分钟天]+</td>')
re_list_time = re.findall(r'<td>\d*-\d*-\d* \d*:\d*</td>',html_code)
#print re_list_ip
l = len(re_list_ip)
for i in range(l):
PROXY_IP = re_list_ip[i].replace('<td>','').replace('</td>',"")
PROXY_PORT = re_list_port[i].replace('<td>','').replace('</td>',"")
PROXY_COUNTRY = 'China'
PROXY_TYPE= 'Elite'
addtime = re_list_time[i].replace('<td>','').replace('</td>',"")
Last_test_time = re_list_live_time[i].replace('<td>','').replace('</td>',"")
#print Last_test_time
#time.sleep(10)
proxy_status = '1'
Remarks = 'ly'
# `id`, `proxy_ip`, `proxy_port`, `proxy_country`, `proxy_type`, `addtime`, `Last_test_time`, `proxy_status`, `Remarks`
# list_i = [PROXY_IP,PROXY_PORT,PROXY_COUNTRY,PROXY_TYPE,addtime,Last_test_time,proxy_status,Remarks]
list_i = (PROXY_IP, PROXY_PORT)
#print list_i
proxy_list_json.append(list_i)
print proxy_list_json
return proxy_list_json
except Exception,e:
print Exception,e
#{'PROXY_STATUS': 'OK', 'PROXY_CITY': '', 'PROXY_TIME': '548', 'PROXY_STATE': '', 'PROXY_REFS': '', 'PROXY_TYPE': 'Transparent', 'PROXY_COUNTRY': 'China', 'PROXY_LAST_UPDATE': '1 59', 'PROXY_UPTIMELD': '105/16', 'PROXY_UID': '', 'PROXY_PORT': '1F90', 'PROXY_IP': '172.16.31.10'}
if __name__ == '__main__':
proxy_list_json = []
# for i in range(1,2):
# url = "http://www.xicidaili.com/nn/"+str(i)
# print 'begin',url
# try:
# #html_code = get_request(url,headers)
# html_code = get_request(url,headers)
# proxy_list = []
# now_url = url
# proxy_list_json = re_html_code(html_code, proxy_list)
# except Exception,e:
# print Exception,e
url = "http://3360623093271490.standard.hutoudaili.com/?num=20&area_type=1&ports=8123&anonymity=3&order=1"
try:
# html_code = get_request(url,headers)
html_code = requests.get(url, timeout=20).text.decode('utf8')
proxy_list = []
now_url = url
proxy_list_json = html_code.split()
except Exception, e:
print Exception, e
with open("cnproxy.py", 'w') as f:
f.write("PROXIES = [\n")
for model in proxy_list_json:
print model
f.write("\t\t\t{\"ip_port\":\"%s\"},\n" % model)
f.write("]\n")
|
markhuyong/galaxy | crawler/misc/middleware/proxy.py | <gh_stars>0
# -*- coding: utf-8 -*-
from ..proxy import PROXIES, FREE_PROXIES
import requests
import redis
import random
class CustomHttpProxyFromRedisMiddleware(object):
def __init__(self, settings, crawler):
self.rconn = settings.get("RCONN", redis.Redis(
crawler.settings.get('REDIS_HOST', 'localhsot'),
crawler.settings.get('REDIS_PORT', 6379),
crawler.settings.get('REDIS_DB', 0),
crawler.settings.get('REDIS_PASS', None)))
self.redis_key = "http:proxies"
self.adsl = crawler.settings.get('ADSL', False)
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings, crawler)
def process_request(self, request, spider):
# TODO implement complex proxy providing algorithm
if self.use_proxy(request):
try:
if not self.adsl:
if self.rconn.scard(self.redis_key) < 2:
self.update_proxy(spider)
if request.meta.get('retry_times', 0):
ban_proxy = request.meta.get('proxy', '').replace("http://", '')
self.rconn.srem(self.redis_key, ban_proxy)
request.meta['proxy'] = "http://%s" % self.rconn.srandmember(self.redis_key)
else:
# url = 'http://127.0.0.1:5000'
url = 'http://172.16.58.3:5000'
request.meta['proxy'] = "http://%s" % requests.get(url, auth=('admin', '123456')).text
spider.logger.debug("http proxy is {}".format(request.meta['proxy']))
except Exception, e:
spider.logger.critical("Exception %s" % e)
def use_proxy(self, request):
"""
using direct download for depth <= 2
using proxy with probability 0.3
"""
if self.adsl:
return True
if "depth" in request.meta and int(request.meta['depth']) <= 2:
return False
i = random.randint(1, 10)
return i <= 2
def update_proxy(self, spider):
url = "http://3360623093271490.standard.hutoudaili.com/?num=10&area_type=1&ports=8123&anonymity=3&order=1"
try:
html_code = requests.get(url, timeout=20).text.decode('utf8')
proxies = html_code.split()
if not proxies:
spider.logger.critical("http proxies is used up.")
return
self.rconn.sadd(self.redis_key, *proxies)
except Exception, e:
spider.logger.critical("Exception %s" % e)
class CustomHttpProxyMiddleware(object):
def process_request(self, request, spider):
# TODO implement complex proxy providing algorithm
if self.use_proxy(request):
p = random.choice(PROXIES)
try:
request.meta['proxy'] = "http://%s" % p['ip_port']
except Exception, e:
# log.msg("Exception %s" % e, _level=log.CRITICAL)
spider.logger.critical("Exception %s" % e)
def use_proxy(self, request):
"""
using direct download for depth <= 2
using proxy with probability 0.3
"""
# if "depth" in request.meta and int(request.meta['depth']) <= 2:
# return False
# i = random.randint(1, 10)
# return i <= 2
return True
|
markhuyong/galaxy | tests/utils.py | # -*- coding: utf-8 -*-
import os
import socket
from scrapy.settings import Settings
from . import TESTS_PATH
LOCALHOST = 'localhost'
def get_testenv():
env = os.environ.copy()
env['PYTHONPATH'] = os.path.realpath(os.path.join(TESTS_PATH, '..'))
return env
def get_settings():
"""Settings with all extensions disabled."""
return Settings({
'EXTENSIONS': {
'scrapy.contrib.throttle.AutoThrottle': None,
'scrapy.contrib.feedexport.FeedExporter': None,
'scrapy.contrib.logstats.LogStats': None,
'scrapy.contrib.closespider.CloseSpider': None,
'scrapy.contrib.corestats.CoreStats': None,
'scrapy.contrib.memusage.MemoryUsage': None,
'scrapy.contrib.memdebug.MemoryDebugger': None,
'scrapy.contrib.spiderstate.SpiderState': None,
'scrapy.telnet.TelnetConsole': None,
}
})
|
markhuyong/galaxy | tests/qq/test_photo_status.py | '''
Offline tests
'''
import json
from builtins import range
from unittest import TestCase
from mock import MagicMock
from crawler.qq.qq.spiders.photo import QqPhotoSpider
from scrapy.http import TextResponse
from scrapy.http import Request
from crawler.qq.qq.items import QqStatusItem, QqPhotoItem
class TestQqPhotoOffline(TestCase):
def setUp(self):
self.spider = QqPhotoSpider()
self.spider._logger = MagicMock()
self.spider.stats_dict = {}
# def test_parse_album(self):
#
# with open('qq_album.json', 'r') as file:
# json_file = json.load(file)
# text = json.dumps(json_file)
# request = Request(url='http://www.qq.com')
# response = TextResponse('qq_status.url', body=text, request=request,
# encoding='utf8')
#
# for item in self.spider.parse(response):
# if isinstance(item, QqStatusItem):
# print item
# # self.assertEqual(item['text'], "lebooks")
# else:
# self.fail("returned item in not a instance of QqPhotoItem.")
def test_parse_photo(self):
with open('qq_photo.json', 'r') as file:
json_file = json.load(file)
text = json.dumps(json_file)
request = Request(url='http://www.qq.com')
response = TextResponse('qq_status.url', body=text, request=request,
encoding='utf8')
for item in self.spider.parse_photo(response):
if isinstance(item, QqStatusItem):
self.assertEqual(item['text'], ' ')
else:
self.fail("returned item in not a instance of QqPhotoItem.")
|
markhuyong/galaxy | tests/__init__.py | # -*- coding: utf-8 -*-
import os
TESTS_PATH = os.path.realpath(os.path.dirname(__file__))
PROJECT_PATH = os.path.realpath(os.path.join(TESTS_PATH, '..'))
|
markhuyong/galaxy | crawler/qq/qq/items.py | <gh_stars>0
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
from scrapy import Item, Field
class QqStatusItem(Item):
text = Field()
publishTime = Field()
pictures = Field()
class QqPhotoItem(Item):
publishTime = Field()
images = Field()
class PictureItem(Item):
url = Field()
width = Field()
height = Field()
class ImageItem(Item):
text = Field()
pictures = Field()
|
markhuyong/galaxy | tests/test_crawl_manager.py | # -*- coding: utf-8 -*-
import os
from time import sleep
import datetime
import pytest
from mock import patch, MagicMock
from scrapy import Item
from scrapy.exceptions import DontCloseSpider
from scrapy.http import Response
from scrapy.settings import Settings
from scrapy.utils.test import get_crawler
from twisted.internet.defer import Deferred
from twisted.python.failure import Failure
from twisted.trial import unittest
from twisted.web.error import Error
from rest.core import CrawlManager
from rest.conf import settings
from .spiders import MetaSpider
class TestCrawlManager(unittest.TestCase):
def setUp(self):
self.url = 'http://localhost'
self.kwargs = {'url': self.url, 'dont_filter': True}
self.crawler = MagicMock()
self.spider = MetaSpider.from_crawler(self.crawler)
self.crawler.spider = self.spider
self.crawl_manager = self._create_crawl_manager()
self.crawl_manager.crawler = self.crawler
self.item = Item()
self.response = Response('http://localhost')
self.another_spider = MetaSpider.from_crawler(self.crawler)
def _create_crawl_manager(self):
crawl_manager = CrawlManager(self.spider.name, self.kwargs.copy())
crawl_manager.crawler = self.crawler
return crawl_manager
@patch('rest.core.galaxyCrawlerProcess.crawl', return_value=Deferred())
class TestCrawl(TestCrawlManager):
def test_crawl(self, crawler_process_mock):
result = self.crawl_manager.crawl()
self.assertIsInstance(result, Deferred)
self.assertGreater(len(result.callbacks), 0)
self.assertEqual(
result.callbacks[0][0][0], self.crawl_manager.return_items)
def test_no_spider(self, crawler_process_mock):
# spider wasn't found
crawler_process_mock.side_effect = KeyError
exception = self.assertRaises(
Error, self.crawl_manager.crawl)
self.assertTrue(crawler_process_mock.called)
self.assertEqual(exception.status, '404')
def test_spider_exists(self, crawler_process_mock):
result = self.crawl_manager.crawl()
self.assertTrue(crawler_process_mock.called)
self.assertIs(result, crawler_process_mock.return_value)
def test_spider_arguments_are_passed(self, crawler_process_mock):
spider_args = ['a', 'b']
spider_kwargs = {'a': 1, 'b': 2}
self.crawl_manager.crawl(*spider_args, **spider_kwargs)
self.assertTrue(crawler_process_mock.called)
call_args, call_kwargs = crawler_process_mock.call_args
for arg in spider_args:
self.assertIn(arg, call_args)
self.assertDictContainsSubset(spider_kwargs, call_kwargs)
class TestGetProjectSettings(TestCrawlManager):
def test_get_project_settings(self):
result = self.crawl_manager.get_project_settings()
self.assertIsInstance(result, Settings)
class TestSpiderIdle(TestCrawlManager):
def setUp(self):
super(TestSpiderIdle, self).setUp()
self.crawler.spider = self.spider
# test callback
self.spider.parse_something = lambda: None
self.crawl_manager.callback_name = 'parse_something'
self.request = self.crawl_manager.request
def _call_spider_idle(self):
try:
self.crawl_manager.spider_idle(self.spider)
except DontCloseSpider:
pass
def test_spider_opened(self):
self.assertIsNone(self.crawl_manager.request.callback)
self._call_spider_idle()
self.crawler.engine.crawl.assert_called_once_with(
self.crawl_manager.request, self.spider)
self.assertNotEqual(self.request, self.crawl_manager.request)
self.assertEquals(
self.crawl_manager.request.callback, self.spider.parse_something)
def test_raise_error_if_not_callable(self):
self.spider.parse_something = None
self.assertRaises(
AssertionError, self.crawl_manager.spider_idle, self.spider)
self.assertFalse(self.crawler.engine.crawl.called)
def test_modify_realtime_request(self):
self.assertDictEqual(self.crawl_manager.request.meta, {})
self.assertEqual(self.crawl_manager.request.method, 'GET')
def modify_realtime_request(request):
request = request.replace(method='POST')
request.meta['foo'] = 'bar'
return request
self.spider.modify_realtime_request = modify_realtime_request
self._call_spider_idle()
self.crawler.engine.crawl.assert_called_once_with(
self.crawl_manager.request, self.spider)
self.assertEqual(self.crawl_manager.request.method, 'POST')
self.assertEqual(self.crawl_manager.request.meta['foo'], 'bar')
def test_modify_realtime_request_is_not_callable(self):
self.spider.modify_realtime_request = None
self._call_spider_idle()
self.crawler.engine.crawl.assert_called_once_with(
self.crawl_manager.request, self.spider)
self.assertNotEqual(self.request, self.crawl_manager.request)
class TestHandleScheduling(TestCrawlManager):
def setUp(self):
super(TestHandleScheduling, self).setUp()
self.crawl_manager.limit_requests = MagicMock()
self.crawl_manager.limit_runtime = MagicMock()
def test_handle_scheduling(self):
self.crawl_manager.handle_scheduling(
self.crawl_manager.request, self.spider)
self.crawl_manager.limit_requests.assert_called_once_with(self.spider)
self.crawl_manager.limit_runtime.assert_called_once_with(self.spider)
def test_handle_scheduling_another_spider(self):
self.crawl_manager.handle_scheduling(
self.crawl_manager.request, self.another_spider)
self.assertFalse(self.crawl_manager.limit_requests.called)
self.assertFalse(self.crawl_manager.limit_runtime.called)
class TestLimitRuntime(TestCrawlManager):
def setUp(self):
super(TestLimitRuntime, self).setUp()
self.crawl_manager.timeout_limit = 1
self.crawler.stats.get_value.return_value = datetime.datetime.utcnow()
def _test_limit_runtime(self):
self.crawl_manager.limit_runtime(self.spider)
self.assertFalse(self.crawler.engine.close_spider.called)
sleep(1)
self.crawl_manager.limit_runtime(self.spider)
self.assertTrue(self.crawler.engine.close_spider.called)
def test_limit_runtime(self):
self._test_limit_runtime()
def test_string_number_timeout_value(self):
_timeout = settings.TIMEOUT_LIMIT
try:
settings.TIMEOUT_LIMIT = '1'
self.crawl_manager = self._create_crawl_manager()
self._test_limit_runtime()
finally:
settings.TIMEOUT_LIMIT = _timeout
def test_wrong_timeout_value(self):
_timeout = settings.TIMEOUT_LIMIT
try:
settings.TIMEOUT_LIMIT = 'foo'
self.assertRaises(
ValueError, CrawlManager, self.spider.name, self.kwargs.copy())
finally:
settings.TIMEOUT_LIMIT = _timeout
class TestHandleSpiderError(TestCrawlManager):
def setUp(self):
super(TestHandleSpiderError, self).setUp()
self.exception_message = 'Foo'
self.exception = Exception(self.exception_message)
self.failure = Failure(self.exception)
def test_handle_spider_error_debug_true(self):
self.assertEqual(len(self.crawl_manager.errors), 0)
self.crawl_manager.handle_spider_error(self.failure, self.spider)
self.assertEqual(len(self.crawl_manager.errors), 1)
self.assertIn('Traceback', self.crawl_manager.errors[0])
self.assertIn(self.exception.__class__.__name__,
self.crawl_manager.errors[0])
self.assertIn(self.exception_message, self.crawl_manager.errors[0])
def test_handle_spider_error_debug_false(self):
self.crawl_manager.debug = False
self.assertEqual(len(self.crawl_manager.errors), 0)
self.crawl_manager.handle_spider_error(self.failure, self.spider)
self.assertEqual(len(self.crawl_manager.errors), 0)
def test_handle_spider_error_another_spider(self):
self.assertEqual(len(self.crawl_manager.errors), 0)
self.crawl_manager.handle_spider_error(
self.failure, self.another_spider)
self.assertEqual(len(self.crawl_manager.errors), 0)
class TestLimitRequests(TestCrawlManager):
def test_max_requests_not_set(self):
for i in xrange(100):
self.crawl_manager.limit_requests(self.spider)
self.assertFalse(self.crawler.engine.close_spider.called)
def test_max_requests_set(self):
self.crawl_manager.max_requests = 10
for i in xrange(self.crawl_manager.max_requests):
self.crawl_manager.limit_requests(self.spider)
self.assertFalse(self.crawler.engine.close_spider.called)
self.crawl_manager.limit_requests(self.spider)
self.assertTrue(self.crawler.engine.close_spider.called)
class TestGetItem(TestCrawlManager):
def setUp(self):
super(TestGetItem, self).setUp()
self.item = Item()
def test_get_item(self):
self.assertEqual(len(self.crawl_manager.items), 0)
self.crawl_manager.get_item(self.item, self.response, self.spider)
self.assertEqual(len(self.crawl_manager.items), 1)
self.assertEqual(self.crawl_manager.items[0], self.item)
def test_get_item_another_spider(self):
self.assertEqual(len(self.crawl_manager.items), 0)
self.crawl_manager.get_item(
self.item, self.response, self.another_spider)
self.assertEqual(len(self.crawl_manager.items), 0)
class TestCollectDropped(TestCrawlManager):
def setUp(self):
super(TestCollectDropped, self).setUp()
self.exception = Exception('foo')
self.expected_result = {
'item': self.item,
'response': self.response,
'exception': self.exception.message
}
def test_collect_dropped(self):
self.assertEqual(len(self.crawl_manager.items_dropped), 0)
self.crawl_manager.collect_dropped(
self.item, self.response, self.exception, self.spider)
self.assertEqual(len(self.crawl_manager.items_dropped), 1)
self.assertEqual(len(self.crawl_manager.items_dropped), 1)
self.assertEqual(
self.crawl_manager.items_dropped[0], self.expected_result)
def test_collect_dropped_another_spider(self):
self.assertEqual(len(self.crawl_manager.items_dropped), 0)
self.crawl_manager.collect_dropped(
self.item, self.response, self.exception, self.another_spider)
self.assertEqual(len(self.crawl_manager.items_dropped), 0)
class TestReturnItems(TestCrawlManager):
def setUp(self):
super(TestReturnItems, self).setUp()
self.stats = {
'log_count/INFO': 6,
'scheduler/enqueued/memory': 4,
'scheduler/dequeued/memory': 4,
}
self.crawl_manager.crawler = MagicMock()
self.crawl_manager.crawler.stats.get_stats.return_value = self.stats
self.expected_result = {
'items': self.crawl_manager.items,
'items_dropped': self.crawl_manager.items_dropped,
'stats': self.stats.copy(),
'spider_name': self.spider.name,
}
def test_return_items(self):
result = self.crawl_manager.return_items(None)
self.assertDictContainsSubset(self.expected_result, result)
self.assertListEqual(sorted(self.stats.keys()), result['stats'].keys())
# debug = True by default
self.assertIn('errors', result)
self.assertEquals(result['errors'], self.crawl_manager.errors)
def test_return_items_without_debug(self):
self.crawl_manager.debug = False
result = self.crawl_manager.return_items(None)
self.assertDictEqual(self.expected_result, result)
self.assertNotIn('errors', result)
class TestCreateSpiderRequest(TestCrawlManager):
def test_valid_arguments(self):
req = self.crawl_manager.create_spider_request(self.kwargs)
self.assertTrue(req.dont_filter)
self.assertEqual(req.url, self.url)
def test_invalid_arguments(self):
self.kwargs['url1'] = 'http://localhost/foo'
exception = self.assertRaises(
Error, self.crawl_manager.create_spider_request, self.kwargs)
self.assertEqual(exception.status, '400')
def test_invalid_url(self):
self.kwargs['url'] = '//localhost/foo'
exception = self.assertRaises(
Error, self.crawl_manager.create_spider_request, self.kwargs)
self.assertEqual(exception.status, '400')
self.kwargs['url'] = 'localhost/foo'
exception = self.assertRaises(
Error, self.crawl_manager.create_spider_request, self.kwargs)
self.assertEqual(exception.status, '400')
class TestStartRequests(unittest.TestCase):
def setUp(self):
self.url = 'http://localhost'
self.kwargs = {'url': self.url}
self.start_requests_mock = MagicMock()
self.spidercls = MetaSpider
self._start_requests = self.spidercls.start_requests
self.spidercls.start_requests = self.start_requests_mock
self.crawler = get_crawler(self.spidercls)
class CustomCrawlManager(CrawlManager):
def get_project_settings(self):
crawl_settings = super(
CustomCrawlManager, self).get_project_settings()
crawl_settings.setdict(
{'SPIDER_MODULES': 'tests.spiders'}, priority='cmdline')
return crawl_settings
self.crawl_manager = CustomCrawlManager(
self.spidercls.name, self.kwargs.copy())
self.crawl_manager.crawler = self.crawler
def tearDown(self):
self.spidercls.start_requests = self._start_requests
@patch('scrapy.crawler.ExecutionEngine')
def test_start_requests_true(self, _):
self.crawl_manager.start_requests = True
self.crawl_manager.crawl()
self.assertEqual(self.start_requests_mock.call_count, 1)
@patch('scrapy.crawler.ExecutionEngine')
def test_start_requests_false(self, _):
self.crawl_manager.start_requests = False
self.crawl_manager.crawl()
self.assertEqual(self.start_requests_mock.call_count, 0)
class TestCreateProperLogFile(TestCrawlManager):
def test_filename(self):
logdir = "some_dir_name"
self.crawl_manager.log_dir = logdir
path = self.crawl_manager._get_log_file_path()
filename = os.path.basename(path)
expected_format = '%Y-%m-%dT%H%M%S.%f.log'
datetime_object = datetime.datetime.strptime(filename, expected_format)
now = datetime.datetime.now()
assert datetime_object
delta = now - datetime_object
assert delta.seconds < 60
|
markhuyong/galaxy | tests/test_settings/__init__.py | <gh_stars>0
# -*- coding: utf-8 -*-
from mock import patch
from twisted.trial import unittest
from rest.conf import Settings
from . import default_settings
class TestSettings(unittest.TestCase):
@patch('rest.conf.default_settings', default_settings)
def setUp(self):
self.settings = Settings()
def test_getattr(self):
self.assertEqual(self.settings.A, 'A')
self.assertEqual(self.settings.TEST, [1, 2, 3])
# invalid (or hidden in this way) settings should not be visible
self.assertRaises(AttributeError, getattr, self.settings, '_HIDDEN')
self.assertRaises(AttributeError, getattr, self.settings, 'hidden')
self.assertRaises(AttributeError, getattr, self.settings, 'HiDdEn')
def test_setmodule(self):
from . import settings
self.assertEqual(self.settings.A, 'A')
self.settings.setmodule(settings)
self.assertEqual(self.settings.A, 'B')
self.assertEqual(self.settings.TEST, [1, 2, 3])
def test_setmodule_string(self):
self.assertEqual(self.settings.A, 'A')
self.settings.setmodule('tests.test_settings.settings')
self.assertEqual(self.settings.A, 'B')
self.assertEqual(self.settings.TEST, [1, 2, 3])
def test_set(self):
self.assertEqual(self.settings.A, 'A')
self.settings.set('A', 'C')
self.assertEqual(self.settings.A, 'C')
self.assertEqual(self.settings.TEST, [1, 2, 3])
self.settings.set('TEST', [])
self.assertEqual(self.settings.TEST, [])
# invalid setting names
self.settings.set('_HIDDEN', 1)
self.assertRaises(AttributeError, getattr, self.settings, '_HIDDEN')
self.settings.set('hidden', 1)
self.assertRaises(AttributeError, getattr, self.settings, 'hidden')
self.settings.set('HiDdEn', 1)
self.assertRaises(AttributeError, getattr, self.settings, 'HiDdEn')
def test_freeze(self):
self.assertEqual(self.settings.A, 'A')
self.settings.set('A', 'D')
self.assertEqual(self.settings.A, 'D')
self.assertFalse(self.settings.frozen)
self.settings.freeze()
self.assertTrue(self.settings.frozen)
self.assertRaises(TypeError, self.settings.set, 'A', 'E')
|
markhuyong/galaxy | tests/test_resource_crawl.py | # -*- coding: utf-8 -*-
import pytest
from twisted.trial import unittest
from twisted.web.error import Error
import requests
from rest.resources import CrawlResource
from .servers import galaxyTestServer, MockServer
@pytest.fixture()
def server(request):
target_site = MockServer()
target_site.start()
server = galaxyTestServer(site=target_site)
def close():
server.stop()
target_site.stop()
request.addfinalizer(close)
server.target_site = target_site
server.start()
return server
class TestCrawlResource(unittest.TestCase):
def test_is_leaf(self):
self.assertTrue(CrawlResource.isLeaf)
class TestCrawlResourceGetRequiredArgument(unittest.TestCase):
def setUp(self):
self.resource = CrawlResource()
self.url = 'http://localhost:1234'
self.data = {'url': self.url}
def test_get_argument(self):
self.assertEqual(
self.resource.get_required_argument(self.data, 'url'), self.url)
def test_raise_error(self):
exception = self.assertRaises(
Error, self.resource.get_required_argument, self.data, 'key')
self.assertEqual(exception.status, '400')
def test_empty_argument(self):
self.data['url'] = ''
exception = self.assertRaises(
Error, self.resource.get_required_argument, self.data, 'url')
self.assertEqual(exception.status, '400')
def perform_get(url, api_params, spider_data):
api_params.update(spider_data)
return requests.get(url, params=api_params)
def perform_post(url, api_params, spider_data):
post_data = {"request": spider_data}
post_data.update(api_params)
return requests.post(url, json=post_data)
class TestCrawlResourceIntegration(object):
@pytest.mark.parametrize("method", [
perform_get, perform_post
])
def test_no_parameters(self, server, method):
res = method(server.url('crawl.json'), {}, {})
assert res.status_code == 400
res_json = res.json()
expected_result = {u'status': u'error', u'code': 400}
for key, value in expected_result.items():
assert res_json.get(key) == value
if res.request.method == "GET":
assert 'url' in res_json['message']
else:
assert "request" in res_json["message"]
@pytest.mark.parametrize("method", [
perform_get, perform_post
])
def test_no_url_no_start_requests(self, server, method):
res = method(server.url('crawl.json'), {'spider_name': 'test'},
{})
assert res.status_code == 400
expected_result = {
u'status': u'error',
u'code': 400
}
res_json = res.json()
for key, value in expected_result.items():
assert res_json[key] == value
if res.request.method == "GET":
assert 'url' in res_json['message']
else:
assert "request" in res_json["message"]
@pytest.mark.parametrize("method", [
perform_get, perform_post
])
def test_no_url_but_start_requests_present(self, server, method):
res = method(server.url("crawl.json"), {
'spider_name': "test_with_sr",
"start_requests": True
}, {})
assert res.status_code == 200
result = res.json()
assert result.get("status") == "ok"
assert result.get("stats") is not None
assert len(result.get("items", [])) == 2
items = result["items"]
assert len(items) == 2
for item in items:
name = item["name"][0]
if name == "Page 1":
assert "page1" in item["referer"]
elif name == "Page 2":
assert "page2" in item["referer"]
spider_errors = result.get("errors", [])
assert len(spider_errors) == 0
assert result["stats"].get("downloader/request_count") == 2
def test_no_request_but_start_requests_present(self, server):
"""Test for POST handler checking if everything works fine
if there is no 'request' argument, but 'start_requests' are
present. Not checked above because of the way default test fixtures
are written.
"""
post_data = {
"no_request": {},
"start_requests": True,
"spider_name": "test_with_sr"
}
post_data.update(post_data)
res = requests.post(server.url("crawl.json"),
json=post_data)
assert res.status_code == 200
data = res.json()
assert len(data["items"]) == 2
assert data.get("errors") is None
def test_no_request_in_POST_handler(self, server):
"""Test for POST handler checking if everything works fine
if there is no 'request' argument at all.
"""
post_data = {
"no_request": {},
"spider_name": "test_with_sr"
}
post_data.update(post_data)
res = requests.post(server.url("crawl.json"),
json=post_data)
assert res.status_code == 400
data = res.json()
msg = u"Missing required parameter: 'request'"
assert data["message"] == msg
assert data["status"] == "error"
assert data.get("items") is None
@pytest.mark.parametrize("method", [
perform_get, perform_post
])
def test_url_and_start_requests_present(self, server, method):
spider_data = {
"url": server.target_site.url("page3.html")
}
api_params = {
"spider_name": "test_with_sr",
"start_requests": True,
}
res = method(server.url("crawl.json"), api_params,
spider_data)
assert res.status_code == 200
output = res.json()
assert len(output.get("errors", [])) == 0
items = output.get("items", [])
assert len(items) == 3
for item in items:
name = item["name"][0]
if name == "Page 1":
assert "page1" in item["referer"]
elif name == "Page 2":
assert "page2" in item["referer"]
elif name == "Page 3":
assert item.get("referer") is None
@pytest.mark.parametrize("method", [
perform_get, perform_post
])
def test_no_spider_name(self, server, method):
res = method(server.url("crawl.json"),
{},
{"url": server.target_site.url("page1.html")})
assert res.status_code == 400
res_json = res.json()
expected_result = {
u'status': u'error',
u'code': 400
}
for key, value in expected_result.items():
assert res_json[key] == value
assert 'spider_name' in res_json['message']
def test_invalid_scrapy_request_detected_in_api(self, server):
res = perform_post(server.url("crawl.json"),
{"spider_name": "test"},
{'url': server.target_site.url("page1.html"),
"not_an_argument": False})
assert res.status_code == 400
res_json = res.json()
expected_result = {
u'status': u'error',
u'code': 400
}
for k, v in expected_result.items():
assert res_json[k] == v
assert "'not_an_argument' is not a valid arg" in res_json['message']
@pytest.mark.parametrize("method", [
perform_get, perform_post
])
def test_invalid_scrapy_request_detected_by_scrapy(self, server, method):
res = method(
server.url("crawl.json"),
{"spider_name": "test"},
{'url': "no_rules"}
)
assert res.status_code == 400
res_json = res.json()
assert res_json["status"] == "error"
assert res_json["code"] == 400
assert "Error while creating Scrapy Request" in res_json['message']
@pytest.mark.parametrize("method", [
perform_get, perform_post
])
def test_crawl(self, server, method):
url = server.url("crawl.json")
res = method(url,
{"spider_name": "test"},
{"url": server.target_site.url("page1.html")})
expected_items = [{
u'name': ['Page 1'],
}]
res_json = res.json()
assert res_json["status"] == "ok"
assert res_json["items_dropped"] == []
assert res_json['items']
assert len(res_json['items']) == len(expected_items)
assert res_json["items"] == expected_items
|
markhuyong/galaxy | crawler/jianshu/jianshu/spiders/lectures.py | # -*- coding: utf-8 -*-
import json
import logging
from scrapy import Selector
from scrapy.http.cookies import CookieJar
from scrapy.http.request import Request
from ..items import LecturesItem
from ..utils import CommonSpider
from ..utils import BaseHelper
logger = logging.getLogger(__name__)
class LectureSpider(CommonSpider):
name = "jianshu_lectures"
def __init__(self, *args, **kwargs):
super(CommonSpider, self).__init__(*args, **kwargs)
uid = kwargs.get('uid')
if uid:
self.logger.debug("uid item = {}".format(uid))
self.start_urls = [BaseHelper.get_user_url(uid)]
def parse(self, response):
uid = response.request.url.split('/')[-1]
cookie_jar = response.meta.setdefault('cookiejar', CookieJar())
cookie_jar.extract_cookies(response, response.request)
headers = BaseHelper.get_headers_json()
item = LecturesItem()
res = Selector(response)
titles = res.css('title::text').re(ur'(\w+)') or ['']
item['authorName'] = titles[0]
item['url'] = response.request.url
next_link = BaseHelper.get_lectures_url(uid)
request = Request(next_link, callback=self.parse_lecture_json,
headers=headers)
cookie_jar.add_cookie_header(request) # apply Set-Cookie ourselves
request.meta['item'] = item
yield request
def parse_lecture_json(self, response):
body = json.loads(response.body)
logger.debug("json_body============{}".format(body))
item = response.request.meta['item']
collection = []
for record in body['notebooks']:
lecture = self._get_partial_lecture(record, item['authorName'])
lecture['articleNumber'] = self._get_parse_article_number_request(lecture)
collection += [lecture]
item['lectures'] = collection
collection = []
for record in body['own_collections']:
lecture = self._get_partial_lecture(record, item['authorName'],
isSpecial=True)
lecture['articleNumber'] = self._get_parse_article_number_request(lecture)
collection += [lecture]
item['specials'] = collection
yield item
def parse_article_number(self, response):
lecture = response.request.meta['lecture']
res = Selector(response)
numbers = res.css('div.info::text').re(ur'([0-9]+)') or [0]
lecture['articleNumber'] = numbers[0]
def _get_partial_lecture(self, record, author_name, isSpecial=False):
if isSpecial:
return self._init_partial_lecture(record, author_name,
title_key='title', link_infix='c',
link_suffix='slug')
return self._init_partial_lecture(record, author_name, title_key='name',
link_infix='nb', link_suffix='id')
def _init_partial_lecture(self, record, author_name, title_key, link_infix,
link_suffix):
lecture = dict()
lecture['authorName'] = author_name
lecture['name'] = record[title_key]
lecture['url'] = "{base}/{infix}/{id}".format(base=BaseHelper.BASE,
infix=link_infix,
id=record[link_suffix])
return lecture
def _get_parse_article_number_request(self, record):
import requests
req = requests.Request('GET', record['url'])
r = req.prepare()
s = requests.Session()
response = s.send(r)
numbers = Selector(response).css('div.info::text').re(ur'([0-9]+)') or [0]
return numbers[0]
|
markhuyong/galaxy | rest/protocols.py | <filename>rest/protocols.py
# -*- coding: utf-8 -*-
from twisted.internet.protocol import Protocol
class HTTPReturner(Protocol):
def __init__(self, finished):
self._data = ""
self.deferred = finished
def dataReceived(self, data):
self._data += data
def connectionLost(self, reason):
self.deferred.callback(self._data)
|
markhuyong/galaxy | crawler/weibo/weibo/cookies.py | <filename>crawler/weibo/weibo/cookies.py<gh_stars>0
# -*- coding: utf-8 -*-
import base64
import binascii
import json
import logging
import random
import time
import requests
import rsa
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from yumdama import identify
try:
from PIL import Image
except:
pass
try:
from urllib.parse import quote_plus
except:
from urllib import quote_plus
from crawler.weibo.weibo.utils import BaseHelper
from crawler.misc.proxy import FREE_PROXIES
IDENTIFY = 2 # 验证码输入方式: 1:看截图aa.png,手动输入 2:云打码
logger = logging.getLogger(__name__)
logging.getLogger("selenium").setLevel(logging.WARNING) # 将selenium的日志级别设成WARNING,太烦人
"""
输入你的微博账号和密码,可去淘宝买,一元5个。
建议买几十个,实际生产建议100+,微博反爬得厉害,太频繁了会出现302转移。
"""
weiBo_str = """
15210347246----q123123
15853404528----q123123
13071443293----q123123
13403386148----q123123
13264021637----q123123
17071343719----q123123
13129003893----q123123
17704957527----q123123
15554986846----q123123
18363114994----q123123
17074225175----q123123
15360605109----q123123
13202887896----q123123
15725444017----q123123
13185864496----q123123
18160841549----q123123
17726741039----q123123
13643734360----q123123
18684945047----q123123
13190151040----q123123
13780289747----q123123
17076605834----q123123
15524809425----q123123
14723394480----q123123
15992045800----q123123
13172388028----q123123
17726744941----q123123
13266426443----q123123
17704964091----q123123
18221464745----q123123
"""
# weiBo_str = """
# 13754672984----q123123
# 15253312554----q123123
# 13229912842----q123123
# 17074139958----q123123
# 17184957472----q123123
# 15659030463----q123123
# 15694414587----q123123
# """
def get_su(account):
"""
对 email 地址和手机号码 先 javascript 中 encodeURIComponent
对应 Python 3 中的是 urllib.parse.quote_plus
然后在 base64 加密后decode
"""
username_quote = quote_plus(account)
username_base64 = base64.b64encode(username_quote.encode("utf-8"))
return username_base64.decode("utf-8")
# 预登陆获得 servertime, nonce, pubkey, rsakv
def get_server_data(su, session, headers):
pre_url = "http://login.sina.com.cn/sso/prelogin.php?entry=weibo&callback=sinaSSOController.preloginCallBack&su="
pre_url = pre_url + su + "&rsakt=mod&checkpin=1&client=ssologin.js(v1.4.18)&_="
pre_url = pre_url + str(int(time.time() * 1000))
pre_data_res = session.get(pre_url, headers=headers)
sever_data = eval(pre_data_res.content.decode("utf-8").replace("sinaSSOController.preloginCallBack", ''))
return sever_data
def get_password(password, servertime, nonce, pubkey):
rsaPublickey = int(pubkey, 16)
key = rsa.PublicKey(rsaPublickey, 65537) # 创建公钥
message = str(servertime) + '\t' + str(nonce) + '\n' + str(password) # 拼接明文js加密文件中得到
message = message.encode("utf-8")
passwd = rsa.encrypt(message, key) # 加密
passwd = binascii.b2a_hex(passwd) # 将加密信息转换为16进制。
return passwd
def get_cha(pcid, session, headers):
cha_url = "http://login.sina.com.cn/cgi/pin.php?r="
cha_url = cha_url + str(int(random.random() * 100000000)) + "&s=0&p="
cha_url = cha_url + pcid
cha_page = session.get(cha_url, headers=headers)
with open("cha.jpg", 'wb') as f:
f.write(cha_page.content)
f.close()
try:
im = Image.open("cha.jpg")
im.show()
im.close()
except:
print(u"请到当前目录下,找到验证码后输入")
def getCookie_mapi(account, password, spider):
session = requests.Session()
proxies = FREE_PROXIES
p = random.choice(proxies)
proxies = {'http': "http://%s" % p['ip_port']}
headers = requests.utils.default_headers()
headers.update(BaseHelper.get_login_headers())
# 访问 初始页面带上 cookie
login_url = "https://passport.weibo.cn/sso/login"
postdata = {
'entry': 'mweibo',
'savestate': '1',
'username': account,
'password': password,
'r': 'http://m.weibo.cn/',
'ec': '0',
'pagerefer': "https://passport.weibo.cn/signin/welcome?entry=mweibo&r=http%3A%2F%2Fm.weibo.cn%2F",
'wentry': '',
'loginfrom': '',
'client_id': '',
'code': '',
'qq': '',
'mainpageflag': '1',
'hff': '',
'hfp': '',
}
# if showpin == 0:
login_page = session.post(login_url, data=postdata, headers=headers)
# else:
# pcid = sever_data["pcid"]
# get_cha(pcid, session, headers)
# postdata['door'] = raw_input(u"请输入验证码")
# login_page = session.post(login_url, data=postdata, headers=headers)
jsonStr = login_page.content.decode("GBK")
info = json.loads(jsonStr)
if info["retcode"] == 20000000:
spider.logger.warning("Get Cookie Success!( Account:%s )" % account)
cookie = session.cookies.get_dict()
return json.dumps(cookie)
elif info["retcode"] == "4049":
# pcid = sever_data["pcid"]
# get_cha(pcid, session, headers)
# postdata['door'] = raw_input(u"请输入验证码")
# login_page = session.post(login_url, data=postdata, headers=headers)
# jsonStr = login_page.content.decode("GBK")
return ""
else:
spider.logger.warning("Failed!( Reason:%s )" % info["reason"])
return ""
def getCookie_api(account, password, spider):
session = requests.Session()
proxies = FREE_PROXIES
p = random.choice(proxies)
proxies = {'http': "http://%s" % p['ip_port']}
headers = requests.utils.default_headers()
headers.update(BaseHelper.get_login_headers())
# 访问 初始页面带上 cookie
index_url = "http://weibo.com/login.php"
try:
session.get(index_url, headers=headers, timeout=2)
except:
session.get(index_url, headers=headers)
# try:
# input = raw_input
# except:
# pass
# su 是加密后的用户名
su = get_su(account)
sever_data = get_server_data(su, session, headers)
servertime = sever_data["servertime"]
nonce = sever_data['nonce']
rsakv = sever_data["rsakv"]
pubkey = sever_data["pubkey"]
showpin = sever_data["showpin"]
password_secret = get_password(password, servertime, nonce, pubkey)
postdata = {
'entry': 'weibo',
'gateway': '1',
'from': '',
'savestate': '7',
'useticket': '1',
'pagerefer': "http://login.sina.com.cn/sso/logout.php?entry=miniblog&r=http%3A%2F%2Fweibo.com%2Flogout.php%3Fbackurl",
'vsnf': '1',
'su': su,
'service': 'miniblog',
'servertime': servertime,
'nonce': nonce,
'pwencode': 'rsa2',
'rsakv': rsakv,
'sp': password_secret,
'sr': '1366*768',
'encoding': 'UTF-8',
'prelt': '115',
'url': 'http://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack',
'returntype': 'TEXT'
}
login_url = 'http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.18)'
if showpin == 0:
login_page = session.post(login_url, data=postdata, headers=headers)
else:
pcid = sever_data["pcid"]
get_cha(pcid, session, headers)
postdata['door'] = raw_input(u"请输入验证码")
login_page = session.post(login_url, data=postdata, headers=headers)
jsonStr = login_page.content.decode("GBK")
info = json.loads(jsonStr)
if info["retcode"] == "0":
spider.logger.warning("Get Cookie Success!( Account:%s )" % account)
cookie = session.cookies.get_dict()
return json.dumps(cookie)
elif info["retcode"] == "4049":
pcid = sever_data["pcid"]
get_cha(pcid, session, headers)
postdata['door'] = raw_input(u"请输入验证码")
login_page = session.post(login_url, data=postdata, headers=headers)
jsonStr = login_page.content.decode("GBK")
return ""
else:
spider.logger.warning("Failed!( Reason:%s )" % info["reason"])
return ""
def getCookie_old(account, password, spider):
""" 获取一个账号的Cookie """
loginURL = "https://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.15)"
username = base64.b64encode(account.encode("utf-8")).decode("utf-8")
postData = {
"entry": "sso",
"gateway": "1",
"from": "null",
"savestate": "30",
"useticket": "0",
"pagerefer": "",
"vsnf": "1",
"su": username,
"service": "sso",
"sp": password,
"sr": "1440*900",
"encoding": "UTF-8",
"cdult": "3",
"domain": "sina.com.cn",
"prelt": "0",
"returntype": "TEXT",
}
session = requests.Session()
proxies = FREE_PROXIES
p = random.choice(proxies)
# proxies = {'http': "http://%s" % p['ip_port']}
headers = requests.utils.default_headers()
headers.update(BaseHelper.get_headers())
# headers = BaseHelper.get_login_headers()
# r = session.post(loginURL, data=postData, proxies=proxies, headers=headers)
r = session.post(loginURL, data=postData, headers=headers)
jsonStr = r.content.decode("gbk")
info = json.loads(jsonStr)
if info["retcode"] == "0":
spider.logger.warning("Get Cookie Success!( Account:%s )" % account)
cookie = session.cookies.get_dict()
return json.dumps(cookie)
else:
spider.logger.warning("Failed!( Reason:%s )" % info["reason"])
return ""
def getCookie(account, password, spider):
""" 获取一个账号的Cookie """
dcap = dict(DesiredCapabilities.PHANTOMJS) # PhantomJS需要使用老版手机的user-agent,不然验证码会无法通过
dcap["phantomjs.page.settings.userAgent"] = (
"Mozilla/5.0 (Linux; U; Android 2.3.6; en-us; Nexus S Build/GRK39F) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"
)
# dcap["phantomjs.page.settings.userAgent"] = BaseHelper.random_user_agent()
browser = webdriver.PhantomJS(desired_capabilities=dcap)
try:
# browser.set_window_size(480, 320)
browser.get("https://weibo.cn/login/")
time.sleep(3)
import os
try:
os.remove("aa.png")
except OSError:
pass
browser.save_screenshot("aa.png")
failure = 0
while "微博" in browser.title and failure < 5:
failure += 1
username = browser.find_element_by_id("loginName")
username.clear()
username.send_keys(account)
psd = browser.find_element_by_id("loginPassword")
psd.clear()
psd.send_keys(password)
try:
code = browser.find_element_by_id("loginVCode")
code.clear()
if IDENTIFY == 1:
try:
from PIL import Image
im = Image.open("aa.png")
im.show()
im.close()
except:
print(u"请到当前目录下,找到验证码后输入")
code_txt = raw_input("请查看路径下新生成的aa.png,然后输入验证码:") # 手动输入验证码
else:
from PIL import Image
img = browser.find_element_by_xpath(
'//form[@method="post"]/div/img[@alt="请打开图片显示"]')
x = img.location["x"]
y = img.location["y"]
im = Image.open("aa.png")
im.crop((x, y, x + img.size.get("width"), y + img.size.get("height"))).save("ab.png") # 剪切出验证码
code_txt = identify() # 验证码打码平台识别
code.send_keys(code_txt)
except Exception, e:
pass
commit = browser.find_element_by_id("loginAction")
commit.click()
time.sleep(3)
time.sleep(4)
# if "我的首页" not in browser.title:
# time.sleep(4)
# if '未激活微博' in browser.page_source:
# print '账号未开通微博'
# return {}
cookie = {}
# if "我的首页" in browser.title or True:
if browser.title is not None:
for elem in browser.get_cookies():
cookie[elem["name"]] = elem["value"]
spider.logger.warning("Get Cookie Success!( Account:%s )" % account)
return json.dumps(cookie)
except Exception, e:
logger.warning("Failed %s!" % account)
return ""
finally:
try:
browser.quit()
except Exception, e:
pass
def initCookie(rconn, spider):
""" 获取所有账号的Cookies,存入Redis。如果Redis已有该账号的Cookie,则不再获取。 """
prefix = BaseHelper.get_cookie_key_prefix(spider)
myWeiBo = [(line.split('----')[0], line.split('----')[1]) for line in filter(lambda l: l, weiBo_str.split('\n'))]
throttle = 5
for weibo in myWeiBo:
if rconn.get("%s:%s--%s" % (prefix, weibo[0], weibo[1])) is None: # 'weibo:Cookies:账号--密码',为None即不存在。
cookie = getCookie(weibo[0], weibo[1], spider)
if len(cookie) > 0:
rconn.set("%s:%s--%s" % (prefix, weibo[0], weibo[1]), cookie)
throttle -= 1
if throttle < 0:
break
cookieNum = len(rconn.keys("{}:*".format(prefix)))
spider.logger.warning("The num of the cookies is %s" % cookieNum)
if cookieNum == 0:
spider.logger.error('initCookie: Stopping...')
def updateCookie(accountText, rconn, spider):
""" 更新一个账号的Cookie """
prefix = BaseHelper.get_cookie_key_prefix(spider)
account = accountText.split("--")[0]
password = accountText.split("--")[1]
cookie = getCookie(account, password)
if len(cookie) > 0:
spider.logger.warning(
"The cookie of %s has been updated successfully!" % account)
rconn.set("%s:%s" % (prefix, accountText), cookie)
else:
spider.logger.error(
"The cookie of %s updated failed! Remove it!" % accountText)
removeCookie(accountText, rconn, spider)
def removeCookie(accountText, rconn, spider):
""" 删除某个账号的Cookie """
prefix = BaseHelper.get_cookie_key_prefix(spider)
rconn.delete("%s:%s" % (prefix, accountText))
cookieNum = len(rconn.keys("{}:*".format(prefix)))
spider.logger.warning("The num of the cookies left is %s" % cookieNum)
if cookieNum == 0:
spider.logger.error('removeCookie, cookie is used up. Stopping...')
|
markhuyong/galaxy | crawler/qq/qq/utils.py | # -*- coding: utf-8 -*-
import random
import urllib
from scrapy.http.headers import Headers
from crawler.misc.spider import CommonSpider
from crawler.misc import agents
class BaseHelper(object):
PROFILE_URL = "https://mobile.qzone.qq.com/profile?hostuin=USER_QQ_NUMBER"
SHUOSHU_URL = "https://mobile.qzone.qq.com/list?g_tk=GTK&format=json&list_type=shuoshuo&action=0&res_uin=USER_QQ_NUMBER&count=PAGECOUNT"
CODE_URL = "https://graph.qq.com/oauth2.0/token?grant_type=authorization_code&client_id=101347930&client_secret=68270da4c08fddb26486283c1fab1b0a&code=CODE&redirect_uri=http%3a%2f%2f29060abb.nat123.net%2fPBMSWEBOOK%2fqqlogin&state=203"
OPENID_URL = "https://graph.qq.com/oauth2.0/me?access_token=ACCESS_TOKEN"
ALBUMLIST_URL = "https://graph.qq.com/photo/list_album?access_token=ACCESS_TOKEN&oauth_consumer_key=101347930&openid=OPENID&format=json"
NICKNAME_URL = "https://graph.qq.com/user/get_user_info?access_token=ACCESS_TOKEN&oauth_consumer_key=101347930&openid=OPENID"
POTOLIST_URL = "https://graph.qq.com/photo/list_photo?access_token=ACCESS_TOKEN&oauth_consumer_key=101347930&openid=OPENID&format=json&albumid=ALBUMID"
ALBUM_URL = "https://mobile.qzone.qq.com/list?g_tk=GTK&format=json&list_type=album&action=0&res_uin=USER_QQ_NUMBER"
PHOTO_URL = "http://h5.qzone.qq.com/webapp/json/mqzone_photo/getPhotoList2?g_tk=GTK&uin=USER_QQ_NUMBER&albumid=ALBUMID&ps=PS&pn=PN"
PAGE_COUNT = '40'
@classmethod
def get_headers(cls):
return Headers({
# 'User-Agent': self._get_user_agent(),
# 'Content-Type': 'application/json',
# "Connection": "keep-alive",
'Accept': 'application/json',
# 'Host': cls.BASE_URL,
})
@classmethod
def get_profile_url(cls, uid):
return cls.PROFILE_URL.replace("USER_QQ_NUMBER", uid)
@classmethod
def get_shuoshuo_url(cls, uid, last_attach=None):
url = cls.SHUOSHU_URL.replace("USER_QQ_NUMBER", uid) \
.replace("PAGECOUNT", cls.PAGE_COUNT)
return url if last_attach is None \
else url + "&res_attach=" + cls._quote_url(last_attach)
def get_code_url(self, uid):
return self.SHUOSHU_URL.replace("USER_QQ_NUMBER", uid)
def get_openid_url(self, uid):
return self.SHUOSHU_URL.replace("USER_QQ_NUMBER", uid)
def get_album_list_url(self, uid):
return self.SHUOSHU_URL.replace("USER_QQ_NUMBER", uid)
def get_photo_list_url(self, uid):
return self.SHUOSHU_URL.replace("USER_QQ_NUMBER", uid)
@classmethod
def get_album_url(cls, uid, last_attach=None):
url = cls.ALBUM_URL.replace("USER_QQ_NUMBER", uid)
return url if last_attach is None \
else url + "&res_attach=" + cls._quote_url(last_attach)
@classmethod
def get_photo_url(cls, uid, album_id, ps, pn, last_attach=None):
url = cls.PHOTO_URL.replace("USER_QQ_NUMBER", uid) \
.replace("ALBUMID", album_id) \
.replace("PS", ps) \
.replace("PN", pn)
return url if last_attach is None \
else url + "&res_attach=" + cls._quote_url(last_attach)
@staticmethod
def get_cookie_key_prefix(spider):
sep = "_"
assert spider.name.index(sep) > 0
return "{}:Cookies".format(spider.name.split(sep)[0])
@staticmethod
def _quote_url(url):
return urllib.quote(unicode(str(url), "UTF-8"))
|
JLivingston01/JasonvsData | data_to_string.py | <reponame>JLivingston01/JasonvsData
# -*- coding: utf-8 -*-
"""
Created on Thu May 11 10:07:20 2017
@author: <NAME>
"""
##Transform pandas DFs for other uses
##DF to query string creates string of form 'date=2017-01-01&a=8&b=5&c=6\ndate=2017-01-02&a=3&b=3&c=0' from DF
##df_to_html writes html table from contents of pandas DF
##df_to_lists_of_lists creates a list of rows, with each row as a list of elements. This was designed for my gradient descent experiments. I know its not a string.
class data_to_string:
def df_to_str(X):
string1 = []
for j in range(len(X[list(X.columns.values)[0]])):
for i in list(X.columns.values):
y = str(X[i][j])
string = i+"="+y
amp = "&"
string1.append(string)
if i != list(X.columns.values)[len(list(X.columns.values))-1]:
string1.append(amp)
lin = "\n"
if j != len(X[list(X.columns.values)[0]])-1:
string1.append(lin)
str1 = "".join(string1)
return(str1)
def df_to_html(X):
string = ['<table><tr>']
for j in list(X.columns.values):
x = '<th>'+j+'</th>'
string.append(x)
string.append('</tr>')
for i in range(len(X[list(X.columns.values)[0]])):
string.append('<tr>')
for k in list(X.columns.values):
x = str(X[k][i])
y = '<td>'+x+'</td>'
string.append(y)
string.append('</tr>')
string.append('</table>')
string1 = "".join(string)
return(string1)
def df_to_list_of_lists(df):
df2 = []
for i in range(len(df[list(df.columns.values)[0]])):
templist = []
for col in list(df.columns.values):
x = df[col][i]
templist.append(x)
df2.append(templist)
return df2
##transform data string in form 'category equal element elem_space lin_space' into Pandas DF
##example of query string 'date=2017-01-01&a=8&b=5&c=6\ndate=2017-01-02&a=3&b=3&c=0'
##casting would be manual after application
class strings_to_df:
def query_str_to_tbl(X,lin_space,equal,elem_space):
import pandas as pd
import numpy as np
rows = X.split(lin_space)
fields = []
data = []
for i in range(len(rows)):
row_n = rows[i].replace(equal+elem_space,equal+"nan"+elem_space)
row_n = rows[i].replace(equal,",")
row_n = row_n.replace(elem_space,",")
row1_sep = row_n.split(",")
for j in range(len(row1_sep)):
if i == 0:
if j % 2 == 0:
fields.append(row1_sep[j])
else:
data.append(row1_sep[j])
else:
if j % 2 != 0:
data.append(row1_sep[j])
data1 = pd.DataFrame()
for i in range(len(fields)):
tempfields = []
for j in range(len(rows)):
tempfields.append(data[i+j*len(fields)])
data1[fields[i]] = tempfields
data1 = data1.replace("nan",np.NaN)
return(data1)
|
JLivingston01/JasonvsData | correlation functions.py | <reponame>JLivingston01/JasonvsData<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 27 15:39:50 2017
@author: <NAME>
"""
##R-Squared of two arrays/lists/pandas DF columns
def rsquaredfn(X,Y):
import pandas as pd
import numpy as np
R = pd.DataFrame()
R['X'] = X
R['Y'] = Y
MeanX = np.mean(R['X'])
MeanY = np.mean(R['X'])
R['A'] = R['X'] - MeanX
R['B'] = R['Y'] - MeanY
R['AXB'] = R['A']*R['B']
R['A2'] = R['A']*R['A']
R['B2'] = R['B']*R['B']
AXB = np.sum(R['AXB'])
A2 = np.sum(R['A2'])
B2 = np.sum(R['B2'])
correl = AXB/np.sqrt(A2*B2)
rsquared = correl*correl
return(rsquared)
## After importing an SKlearn predictive model, Returning adjusted R-Squared. Intended for regression problems.
## Can be adapted to return model coefficients/projections along with R-Squared
def adjustedrsquaredfn(model, training_data, target_data):
import pandas as pd
import numpy as np
lm = model()
lm.fit(training_data,target_data)
data = pd.DataFrame()
data['Target'] = target_data
data['Fit'] = lm.predict(training_data)
MeanX = np.mean(data['Target'])
MeanY = np.mean(data['Fit'])
data['A'] = data['Target'] - MeanX
data['B'] = data['Fit'] - MeanY
data['AXB'] = data['A']*data['B']
data['A2'] = data['A']*data['A']
data['B2'] = data['B']*data['B']
AXB = np.sum(data['AXB'])
A2 = np.sum(data['A2'])
B2 = np.sum(data['B2'])
correl = AXB/np.sqrt(A2*B2)
rsquared = correl*correl
adjustedrsquared = 1-((1-rsquared)*(len(data['Target'])-1)/((len(data['Target'])-1)-len(list(training_data.columns.values))-1))
return(adjustedrsquared) |
JLivingston01/JasonvsData | perceptronNN.py | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 21 10:25:21 2017
@author: <NAME>
"""
import pandas as pd
import numpy as np
#Credit to <NAME> for inspiring descent method
def perceptron_fit(W0,lrate,iteration,data, sig_weight):
w = W0
lrate = lrate
n = len(data)
k = len(list(data.columns.values))
e = iteration
error = 0
for a in range(e):
for b in list(range(n)):
z = 0
for c in range(k-1):
z = z+w[c]*data[list(data.columns.values)[c]][b]
gz = 1/(1+(np.exp(-z)))
oz = 1 if gz > sig_weight else 0
error = (data[list(data.columns.values)[k-1]][b] - oz)
for d in range(k-1):
w[d] = w[d]+error*lrate*(data[list(data.columns.values)[d]][b])
return(w)
def internal_neuron_out(W0,lrate,iteration,data, sig_weight):
wnew = perceptron_fit(W0 = W0, lrate = lrate, iteration = iteration, data = data, sig_weight = sig_weight)
k = len(list(data.columns.values))
J = 0
for a in range(k-1):
J = J+data[list(data.columns.values)[a]]*wnew[a]
J = np.array(J)
gJ = 1/(1+np.exp(-J))
#oz = np.where(gJ > .5,1, 0)
return(gJ)
def neuron_out(W0,lrate,iteration,data, sig_weight):
wnew = perceptron_fit(W0 = W0, lrate = lrate, iteration = iteration, data = data, sig_weight = sig_weight)
k = len(list(data.columns.values))
J = 0
for a in range(k-1):
J = J+data[list(data.columns.values)[a]]*wnew[a]
J = np.array(J)
gJ = 1/(1+np.exp(-J))
oz = np.where(gJ > .5,1, 0)
return(oz)
data = pd.DataFrame()
data['Bias'] = [1,1,1,1,1]
data['X1'] = [1,2,2,1,7]
data['X2'] = [1,1,4,4,7]
data['Y'] = [1,0,1,0,1]
wnew = perceptron_fit(W0 = [0,0,0], lrate = .1, iteration = 50, data = data, sig_weight = .5)
n1 = neuron_out(W0 = [0,0,0], lrate = .1, iteration = 50, data = data, sig_weight = .1)
n2 = neuron_out(W0 = [0,0,0], lrate = .1, iteration = 50, data = data, sig_weight = .2)
n3 = neuron_out(W0 = [0,0,0], lrate = .1, iteration = 50, data = data, sig_weight = .3)
n4 = neuron_out(W0 = [0,0,0], lrate = .1, iteration = 50, data = data, sig_weight = .4)
n5 = neuron_out(W0 = [0,0,0], lrate = .1, iteration = 50, data = data, sig_weight = .5)
n6 = neuron_out(W0 = [0,0,0], lrate = .1, iteration = 50, data = data, sig_weight = .6)
n7 = neuron_out(W0 = [0,0,0], lrate = .1, iteration = 50, data = data, sig_weight = .7)
n8 = neuron_out(W0 = [0,0,0], lrate = .1, iteration = 50, data = data, sig_weight = .8)
n9 = neuron_out(W0 = [0,0,0], lrate = .1, iteration = 50, data = data, sig_weight = .9)
data_level_2 = pd.DataFrame()
data_level_2['bias'] = data['Bias']
data_level_2['n1'] = n1
data_level_2['n2'] = n2
data_level_2['n3'] = n3
data_level_2['n4'] = n4
data_level_2['n5'] = n5
data_level_2['n6'] = n6
data_level_2['n7'] = n7
data_level_2['n8'] = n8
data_level_2['n9'] = n9
data_level_2['Y'] = data['Y']
print(data_level_2)
h1 = neuron_out(W0 = [0,0,0,0,0,0,0,0,0,0], lrate = .1, iteration = 50, data = data_level_2, sig_weight = .525)
h2 = neuron_out(W0 = [0,0,0,0,0,0,0,0,0,0], lrate = .1, iteration = 50, data = data_level_2, sig_weight = .483)
h3 = neuron_out(W0 = [0,0,0,0,0,0,0,0,0,0], lrate = .1, iteration = 50, data = data_level_2, sig_weight = .518)
data_level_3 = pd.DataFrame()
data_level_3['bias'] = data['Bias']
data_level_3['h1'] = h1
data_level_3['h2'] = h2
data_level_3['h3'] = h3
data_level_3['Y'] = data['Y']
print(data_level_3)
out = neuron_out(W0 = [0,0,0,0], lrate = .1, iteration = 50, data = data_level_3, sig_weight = .5)
print(out) |
JLivingston01/JasonvsData | Linear Regression by OLGD and Closed Form.py | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 24 10:52:46 2017
@author: <NAME>
"""
import pandas as pd
import numpy as np
from sklearn import linear_model
##CLOSED FORM LINEAR REGRESSION
X = pd.DataFrame()
X['int'] = [1,1,1,1,1]
X['A'] = [1,2,3,4,5]
X['B'] = [3,4,4,6,7]
X = X.values
Y = [4,5,7,7,9]
#Y = Y.values
Xt=X.transpose()
XtX=np.dot(Xt,X)
XtY = np.dot(Xt,Y)
XtXinv = np.linalg.inv(XtX)
B = np.dot(XtXinv,XtY)
##SKLEARN LINEAR REGRESSION FOR COMPARISON
X = pd.DataFrame()
X['A'] = [1,2,3,4,5]
X['B'] = [3,4,4,6,7]
explin = linear_model.LinearRegression(fit_intercept = True)
explin.fit(X,Y)
explin.coef_
##ONLINE GRADIENT DESCENT
def predict(row,coefficients):
row = row
yhat = coefficients[0]
for i in range(len(row) - 1):
yhat = yhat+coefficients[i+1]*row[i]
return yhat
def df_to_list_of_lists(df):
df2 = []
for i in range(len(df[list(df.columns.values)[0]])):
templist = []
for col in list(df.columns.values):
x = df[col][i]
templist.append(x)
df2.append(templist)
return df2
def coefficients_sgd(train,l_rate,n_epoch):
if type(train) == pd.core.frame.DataFrame:
train = df_to_list_of_lists(train)
coefs = [0 for i in range(len(train[0]))]
for epoch in range(n_epoch):
for row in train:
yhat = predict(row,coefs)
error = yhat - row[-1]
coefs[0] = coefs[0] - l_rate*error
for i in range(len(row)-1):
coefs[i+1] = coefs[i+1]-l_rate*error*row[i]
return coefs
X = pd.DataFrame()
#X['int'] = [1,1,1,1,1]
X['A'] = [1,2,3,4,5]
X['B'] = [3,4,4,6,7]
X['Y'] = Y
coefficients = coefficients_sgd(train = X,l_rate = .001,n_epoch = 1000)
print("Intercept = %f Beta_A = %f Beta_B = %f"%tuple(coefficients) )
|
JLivingston01/JasonvsData | data manipulation.py | # -*- coding: utf-8 -*-
"""
Created on Wed May 10 12:58:35 2017
@author: <NAME>
"""
import pandas as pd
import numpy as np
check = pd.DataFrame()
check['date'] = ['2017-01-01','2017-01-02','2017-01-03','2017-01-04','2017-01-05','2017-01-06','2017-01-07','2017-01-08','2017-01-09','2017-01-10']
check['a'] = list(np.random.randint(0,10,10))
check['b'] = list(np.random.randint(0,10,10))
check['c'] = list(np.random.randint(0,10,10))
#Hardcode Table to Query String
string1 = []
for j in range(len(check['date'])):
for i in list(check.columns.values):
y = str(check[i][j])
string = i+"="+y
amp = "&"
string1.append(string)
if i != list(check.columns.values)[len(list(check.columns.values))-1]:
string1.append(amp)
lin = "\n"
if j != len(check[list(check.columns.values)[0]])-1:
string1.append(lin)
str1 = "".join(string1)
#def to_data_str(X):
# string1 = []
# for j in range(len(X[list(X.columns.values)[0]])):
# for i in list(X.columns.values):
# y = str(X[i][j])
# string = i+"="+y
# amp = "&"
# string1.append(string)
# if i != list(X.columns.values)[len(list(X.columns.values))-1]:
# string1.append(amp)
# lin = "\n"
# if j != len(X[list(X.columns.values)[0]])-1:
# string1.append(lin)
# str1 = "".join(string1)
# return(str1)
#Function Table to String
def to_data_str(X):
string1 = []
for j in range(len(X[list(X.columns.values)[0]])):
for i in list(X.columns.values):
y = str(X[i][j])
string = i+"="+y
amp = "&"
string1.append(string)
if i != list(X.columns.values)[len(list(X.columns.values))-1]:
string1.append(amp)
lin = "\n"
if j != len(X[list(X.columns.values)[0]])-1:
string1.append(lin)
str1 = "".join(string1)
return(str1)
str1 = to_data_str(check)
#Function Table to HTML
def table_to_html(X):
string = ['<table><tr>']
for j in list(X.columns.values):
x = '<th>'+j+'</th>'
string.append(x)
string.append('</tr>')
for i in range(len(X[list(X.columns.values)[0]])):
string.append('<tr>')
for k in list(X.columns.values):
x = str(X[k][i])
y = '<td>'+x+'</td>'
string.append(y)
string.append('</tr>')
string.append('</table>')
string1 = "".join(string)
return(string1)
html = table_to_html(check)
testdata = pd.DataFrame()
testdata['A'] = [1,2,3,4,5,6,7,8,9,0]
testdata['B'] = [0,9,8,7,6,5,4,3,2,1]
testdata['C'] = [5,6,4,7,3,8,2,9,1,0]
html2 = table_to_html(testdata)
#Hardcode HTML Elements to HTML Page
filename = 'test table to html.html'
html_file = open(filename,"w")
wrapper = ("""<html>
<head>
<title> Test Table to Html </title>
</head>
<body><p>"""
+ html2 +
"""</p></body>
</html>""")
html_file.write(wrapper)
html_file.close()
#Function HTML Elements to HTML page
def html_page(Title, Elements, filename):
wrapper = ("""<html>
<head>
<title>"""+ Title +"""</title>
</head>"""
+"\n".join(Elements)+
"""</p></body>
</html>""")
html_file = open(filename,"w")
html_file.write(wrapper)
html_file.close()
html_page('Test HTML 1',[html,html2],'test html 1.html')
#Hardcode Query String to Table
rows = str1.split("\n")
row1 = rows[0]
row1 = row1.replace("=",",")
row1 = row1.replace("&",",")
row1_sep = row1.split(",")
fields = []
data = []
for i in range(len(row1_sep)):
if i % 2 == 0:
fields.append(row1_sep[i])
else:
data.append(row1_sep[i])
fields = []
data = []
for i in range(len(rows)):
row_n = rows[i].replace("=",",")
row_n = row_n.replace("&",",")
row1_sep = row_n.split(",")
for j in range(len(row1_sep)):
if i == 0:
if j % 2 == 0:
fields.append(row1_sep[j])
else:
data.append(row1_sep[j])
else:
if j % 2 != 0:
data.append(row1_sep[j])
data1 = pd.DataFrame()
for i in range(len(fields)):
tempfields = []
for j in range(len(rows)):
tempfields.append(data[i+j*len(fields)])
data1[fields[i]] = tempfields
#Function Query String to Table
def str_to_tbl(X,lin_space,equal,elem_space):
rows = X.split(lin_space)
fields = []
data = []
for i in range(len(rows)):
row_n = rows[i].replace(equal+elem_space,equal+"nan"+elem_space)
row_n = rows[i].replace(equal,",")
row_n = row_n.replace(elem_space,",")
row1_sep = row_n.split(",")
for j in range(len(row1_sep)):
if i == 0:
if j % 2 == 0:
fields.append(row1_sep[j])
else:
data.append(row1_sep[j])
else:
if j % 2 != 0:
data.append(row1_sep[j])
data1 = pd.DataFrame()
for i in range(len(fields)):
tempfields = []
for j in range(len(rows)):
tempfields.append(data[i+j*len(fields)])
data1[fields[i]] = tempfields
data1 = data1.replace("nan",np.NaN)
return(data1)
new_table = str_to_tbl(str1,"\n","=","&")
##
class data_manipulation:
def to_data_str(X):
string1 = []
for j in range(len(X[list(X.columns.values)[0]])):
for i in list(X.columns.values):
y = str(X[i][j])
string = i+"="+y
amp = "&"
string1.append(string)
if i != list(X.columns.values)[len(list(X.columns.values))-1]:
string1.append(amp)
lin = "\n"
if j != len(X[list(X.columns.values)[0]])-1:
string1.append(lin)
str1 = "".join(string1)
return(str1)
def table_to_html(X):
string = ['<table><tr>']
for j in list(X.columns.values):
x = '<th>'+j+'</th>'
string.append(x)
string.append('</tr>')
for i in range(len(X[list(X.columns.values)[0]])):
string.append('<tr>')
for k in list(X.columns.values):
x = str(X[k][i])
y = '<td>'+x+'</td>'
string.append(y)
string.append('</tr>')
string.append('</table>')
string1 = "".join(string)
return(string1)
|
JLivingston01/JasonvsData | Model Samples.py | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 30 10:07:21 2017
@author: <NAME>
"""
##Several SKlearn example fits with functions to quickly return fit, useful for determining the method which best describes the data (AR2)
import pandas as pd
import numpy as np
from treeinterpreter import treeinterpreter as ti
from sklearn import linear_model
data = pd.DataFrame(np.random.randint(0,100,size = (100,4)), columns = ['A','B','C','D'])
target = list(data['D'])
train = pd.DataFrame.copy(data.drop('D', axis = 1))
#Random Forest
forest = ti.RandomForestRegressor().fit(train,target)
forestpredict = forest.predict(train)
results = pd.DataFrame()
results['D'] = target
results['forest'] = forestpredict
#Linear Regression
linear = linear_model.LinearRegression().fit(train,target)
linearpredict = linear.predict(train)
results['linear'] = linearpredict
#Ridge Regression
ridge = linear_model.ridge.Ridge().fit(train,target)
ridgepredict = ridge.predict(train)
results['ridge'] = ridgepredict
#Baysian Ridge Regression
baysianridge = linear_model.BayesianRidge().fit(train,target)
baysianridgepredict = baysianridge.predict(train)
results['baysian ridge'] = baysianridgepredict
#Baysian Ridge Regression
logistic = linear_model.LogisticRegression().fit(train,target)
logisticpredict = logistic.predict(train)
results['logistic'] = logisticpredict
#MAPE Calculations
results['forest PE'] = abs(results['forest']-results['D'])/results['D']
results['linear PE'] = abs(results['linear']-results['D'])/results['D']
results['ridge PE'] = abs(results['ridge']-results['D'])/results['D']
results['baysian ridge PE'] = abs(results['baysian ridge']-results['D'])/results['D']
results['logistic PE'] = abs(results['logistic']-results['D'])/results['D']
forestMAPE = np.mean(results['forest PE'])
linearMAPE = np.mean(results['linear PE'])
ridgeMAPE = np.mean(results['ridge PE'])
baysianridgeMAPE = np.mean(results['baysian ridge PE'])
logisticMAPE = np.mean(results['logistic PE'])
def rsquaredfn(X,Y):
import pandas as pd
import numpy as np
R = pd.DataFrame()
R['X'] = X
R['Y'] = Y
MeanX = np.mean(R['X'])
MeanY = np.mean(R['X'])
R['A'] = R['X'] - MeanX
R['B'] = R['Y'] - MeanY
R['AXB'] = R['A']*R['B']
R['A2'] = R['A']*R['A']
R['B2'] = R['B']*R['B']
AXB = np.sum(R['AXB'])
A2 = np.sum(R['A2'])
B2 = np.sum(R['B2'])
correl = AXB/np.sqrt(A2*B2)
rsquared = correl*correl
return(rsquared)
forestR2 = rsquaredfn(results['D'],results['forest'])
linearR2 = rsquaredfn(results['D'],results['linear'])
ridgeR2 = rsquaredfn(results['D'],results['ridge'])
baysianridgeR2 = rsquaredfn(results['D'],results['baysian ridge'])
logisticR2 = rsquaredfn(results['D'],results['logistic'])
def adjustedrsquaredfn(model, training_data, target_data):
import pandas as pd
lm = model()
lm.fit(training_data,target_data)
data = pd.DataFrame()
data['Target'] = target_data
data['Fit'] = lm.predict(training_data)
MeanX = np.mean(data['Target'])
MeanY = np.mean(data['Fit'])
data['A'] = data['Target'] - MeanX
data['B'] = data['Fit'] - MeanY
data['AXB'] = data['A']*data['B']
data['A2'] = data['A']*data['A']
data['B2'] = data['B']*data['B']
AXB = np.sum(data['AXB'])
A2 = np.sum(data['A2'])
B2 = np.sum(data['B2'])
correl = AXB/np.sqrt(A2*B2)
rsquared = correl*correl
adjustedrsquared = 1-((1-rsquared)*(len(data['Target'])-1)/((len(data['Target'])-1)-len(list(training_data.columns.values))-1))
return(adjustedrsquared)
forestAR2 = adjustedrsquaredfn(ti.RandomForestRegressor, train, target)
linearAR2 = adjustedrsquaredfn(linear_model.LinearRegression, train, target)
ridgeAR2 = adjustedrsquaredfn(linear_model.Ridge, train, target)
baysianridgeAR2 = adjustedrsquaredfn(linear_model.BayesianRidge, train, target)
logisticAR2 = adjustedrsquaredfn(linear_model.LogisticRegression, train, target) |
minchen57/TextClassifyKeras | temp.py | <gh_stars>0
import os
import pandas as pd
from sklearn.model_selection import train_test_split
CUSTOM_SEED = 43
TEST_SPLIT = 0.2
VALIDATION_SPLIT = 0.25
relativePath = os.getcwd()
sentencePath = relativePath + "/data/sample5_sentences_08132018.csv"
sentences = pd.read_csv(sentencePath, index_col="Sentence#")
sentences = sentences[list(sentences.columns.values)[0:18]+["Sentence"]]
train, test = train_test_split(sentences, test_size=TEST_SPLIT,random_state=CUSTOM_SEED + 10)
truetrain, val = train_test_split(sentences, test_size=VALIDATION_SPLIT,random_state=CUSTOM_SEED )
truetrain.to_csv(relativePath + "/data/sample5_sentences_08132018_18_train.csv")
test.to_csv(relativePath + "/data/sample5_sentences_08132018_18_test.csv") |
minchen57/TextClassifyKeras | review_classification_test_dependent.py | <reponame>minchen57/TextClassifyKeras<filename>review_classification_test_dependent.py
#!/usr/bin/env python
import nltk
import pandas as pd
import numpy as np
import os
from gensim.models import Word2Vec
from keras.utils import plot_model
import matplotlib.pyplot as plt
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Embedding
from keras.layers import Dense
from keras.layers import LSTM, Bidirectional
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras.models import Sequential
from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score, precision_recall_fscore_support
# PARAMETERS ================
MAX_SEQUENCE_LENGTH = 100
CUSTOM_SEED = 43
TEST_SPLIT = 0.2
VALIDATION_SPLIT = 0.25
def cal_test_baseline(df):
df1 = df[df.columns[0:-1]]
return (1 - df1.sum().sum()/(len(df1)*len(df1.columns)))
def token2vec(token,w2vmodel):
return w2vmodel.wv[token]
def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc, save_figure_path):
""" Plot model loss and accuracy through epochs. """
green = '#72C29B'
orange = '#FFA577'
with plt.xkcd():
fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 8))
ax1.plot(range(1, len(train_loss) + 1), train_loss, green, linewidth=5,
label='training')
ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, orange,
linewidth=5, label='validation')
ax1.set_xlabel('# epoch')
ax1.set_ylabel('loss')
ax1.tick_params('y')
ax1.legend(loc='upper right', shadow=False)
ax1.set_title('Model loss through #epochs', fontweight='bold')
ax2.plot(range(1, len(train_acc) + 1), train_acc, green, linewidth=5,
label='training')
ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, orange,
linewidth=5, label='validation')
ax2.set_xlabel('# epoch')
ax2.set_ylabel('accuracy')
ax2.tick_params('y')
ax2.legend(loc='lower right', shadow=False)
ax2.set_title('Model accuracy through #epochs', fontweight='bold')
plt.tight_layout()
#plt.show()
fig.savefig(save_figure_path)
plt.close(fig)
def runScript(a,b,epoch, sentences):
#sentences = sentences[['Overall Print Quality', 'Color Print Quality', 'Monochrome Print Quality',"Sentence"]]
sentences = sentences[list(sentences.columns.values)[0:18]+["Sentence"]]
numberOfClasses = len(sentences.columns)-1
print("classes selected", sentences.columns[0:-1])
print("number of classes/labels: ", numberOfClasses)
print("total number of sentences: ", len(sentences))
w2vmodel = Word2Vec.load("word2vec.model")
print("vector size used in w2v: ",w2vmodel.vector_size)
path = "Results/08132018-18-detailed2/"+"LSTM-"+str(a)+"hidden-"+str(b)+"/"
# split data into train and test
train, test = train_test_split(sentences, test_size=TEST_SPLIT,random_state=CUSTOM_SEED + 10)
print(len(test))
word2int = {}
counter = -1
def prepare_inputs(df, word2int, counter, sent_token_list, multilabel):
dropped = []
for index, row in df.iterrows():
tokens = nltk.word_tokenize(row["Sentence"])
tokenstoIDs = []
for token in tokens:
if token not in word2int:
counter += 1
word2int[token] = counter
tokenstoIDs.append(word2int[token])
if len(tokenstoIDs) <= MAX_SEQUENCE_LENGTH:
sent_token_list.append(tokenstoIDs)
multilabel.append(list(row[0:numberOfClasses].values))
else:
dropped.append(index)
X = np.array(sent_token_list)
y = np.array(multilabel)
X = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH)
return X, y, word2int, counter,dropped
(X,y,word2int,counter,not_used) = prepare_inputs(train, word2int,counter,[],[])
(X_test,y_test,word2int,counter,dropped) = prepare_inputs(test, word2int,counter,[],[])
print('size of volcabulary: ',len(word2int))
print(type(dropped[0]))
print(dropped)
print(test.index)
test.drop(dropped, axis=0, inplace=True)
print(len(test))
# split training data into train and validation
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=VALIDATION_SPLIT, random_state=CUSTOM_SEED)
n_train_samples = X_train.shape[0]
n_val_samples = X_val.shape[0]
n_test_samples = X_test.shape[0]
print('We have %d TRAINING samples' % n_train_samples)
print('We have %d VALIDATION samples' % n_val_samples)
print('We have %d TEST samples' % n_test_samples)
# + 1 to include the unkown word
embedding_matrix = np.random.random((len(word2int) + 1, w2vmodel.vector_size))
for word in word2int:
embedding_vector = token2vec(word,w2vmodel)
if embedding_vector is not None:
# words not found in embeddings_index will remain unchanged and thus will be random.
embedding_matrix[word2int[word]] = embedding_vector
print('Embedding matrix shape', embedding_matrix.shape)
print('X_train shape', X_train.shape)
model = Sequential()
embedding_layer = Embedding(len(word2int) + 1,
w2vmodel.vector_size,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=True)
model.add(embedding_layer)
model.add(Bidirectional(LSTM(a, return_sequences=False)))
model.add(Dense(b, activation='relu'))
model.add(Dense(numberOfClasses, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print("model fitting - Bidirectional LSTM")
model.summary()
x= model.fit(X_train, y_train,
batch_size=256,
epochs=epoch,
validation_data = (X_val, y_val),
shuffle = True,
verbose = 1
)
if not os.path.exists(path):
print('MAKING DIRECTORY to save model file')
os.makedirs(path)
plot_model_performance(
train_loss=x.history.get('loss', []),
train_acc=x.history.get('acc', []),
train_val_loss=x.history.get('val_loss', []),
train_val_acc=x.history.get('val_acc', []),
save_figure_path = path +'model_performance.png'
)
# Visualize model architecture
#plot_model(model, to_file=path +'model_structure.png', show_shapes=True)
preds = model.predict(X_test, verbose=1)
preds[preds>=0.5] = int(1)
preds[preds<0.5] = int(0)
y_test = test[test.columns[0:-1]].values
print("baseline point-wise acc: ", cal_test_baseline(test))
print("poitwise accuracy", np.sum(preds == y_test) / (preds.shape[0] * preds.shape[1]))
print("f1: ", f1_score(y_test, preds, average='weighted'))
print("accuracy: ", accuracy_score(y_test, preds))
print("precision: ", precision_score(y_test, preds, average='weighted'))
print("recall: ", recall_score(y_test, preds, average='weighted'))
columns = []
for col in sentences.columns[0:-1]:
columns.append(col+"_predicted")
predd = pd.DataFrame(preds, columns=columns, index=test.index)
df = pd.concat([test,predd], axis=1)
jump = int((len(df.columns)+1)/2)
values=[]
Yesyes= 0
Yesno = 0
Noyes = 0
Nono = 0
for j in range(jump-1):
yesyes=0
yesno=0
noyes=0
nono=0
a=df[df.columns[j]].values
b=df[df.columns[j+jump]].values
for i in range(len(a)):
if a[i]==1:
if b[i]==1:
yesyes+=1
else:
yesno+=1
else:
if b[i]==1:
noyes+=1
else:
nono+=1
total = yesyes+yesno
Yesyes += yesyes
Yesno += yesno
Noyes += noyes
Nono += nono
if (total != 0) and (yesyes+noyes != 0):
values.append([df.columns[j],total,yesyes,yesno,noyes,nono, yesyes/total, yesyes/(yesyes+noyes), noyes/(yesyes+noyes), yesno/total])
else:
values.append([df.columns[j], total, yesyes, yesno, noyes, nono, "NA", "NA","NA","NA"])
Total = Yesyes + Yesno
values.append(["Overall",Total,Yesyes,Yesno,Noyes,Nono, Yesyes/Total, Yesyes/(Yesyes+Noyes), Noyes/(Yesyes+Noyes), Yesno/Total])
result = pd.DataFrame(values, columns=["subtopic", "TotalGroundTruth","YesYes", "YesNo", "NoYes", "NoNo",
"Recall", "Precision", "FalsePositive%", "FalseNegative%"])
result.to_csv(path + 'predicted_result_summary.csv')
print("see results in " + path)
relativePath = os.getcwd()
sentencePath = relativePath + "/data/sample5_sentences_08132018.csv"
sentences = pd.read_csv(sentencePath, index_col="Sentence#")
print(sentences.columns)
for a in [512,1024]:
for b in [50, 250, 500, 1000]:
if a+b > 700:
runScript(a, b, 25, sentences)
else:
runScript(a, b, 20, sentences)
|
minchen57/TextClassifyKeras | review_classification.py | #!/usr/bin/env python
import nltk
import pandas as pd
import numpy as np
import os
from gensim.models import Word2Vec
from keras.utils import plot_model
import matplotlib.pyplot as plt
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Embedding
from keras.layers import Dense
from keras.layers import LSTM, Bidirectional
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras.models import Sequential
from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score, precision_recall_fscore_support
# PARAMETERS ================
MAX_SEQUENCE_LENGTH = 100
CUSTOM_SEED = 42
TEST_SPLIT = 0.2
VALIDATION_SPLIT = 0.25
def token2vec(token,w2vmodel):
return w2vmodel.wv[token]
def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc, save_figure_path):
""" Plot model loss and accuracy through epochs. """
green = '#72C29B'
orange = '#FFA577'
with plt.xkcd():
fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 8))
ax1.plot(range(1, len(train_loss) + 1), train_loss, green, linewidth=5,
label='training')
ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, orange,
linewidth=5, label='validation')
ax1.set_xlabel('# epoch')
ax1.set_ylabel('loss')
ax1.tick_params('y')
ax1.legend(loc='upper right', shadow=False)
ax1.set_title('Model loss through #epochs', fontweight='bold')
ax2.plot(range(1, len(train_acc) + 1), train_acc, green, linewidth=5,
label='training')
ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, orange,
linewidth=5, label='validation')
ax2.set_xlabel('# epoch')
ax2.set_ylabel('accuracy')
ax2.tick_params('y')
ax2.legend(loc='lower right', shadow=False)
ax2.set_title('Model accuracy through #epochs', fontweight='bold')
plt.tight_layout()
plt.show()
fig.savefig(save_figure_path)
plt.close(fig)
relativePath = os.getcwd()
sentencePath = relativePath + "/data/sample1_sentences_08062018.csv"
sentences = pd.read_csv(sentencePath, index_col = "Sentence#")
print(sentences.columns)
sentences = sentences[list(sentences.columns.values)[0:3]+["Sentence"]]
numberOfClasses = len(sentences.columns)-1
#print(sentences.tail(4))
print("classes selected", sentences.columns[0:-1])
print("number of classes/labels: ", numberOfClasses)
print("total number of sentences: ", len(sentences))
w2vmodel = Word2Vec.load("word2vec.model")
print("vector size used in w2v: ",w2vmodel.vector_size)
path = "Results/08062018-"+ str(numberOfClasses)+"/"
multilabel = []
sent_token_list = []
word2int = {}
counter = -1
for index, row in sentences.iterrows():
tokens = nltk.word_tokenize(row["Sentence"])
tokenstoIDs = []
for token in tokens:
if token not in word2int:
counter += 1
word2int[token] = counter
tokenstoIDs.append(word2int[token])
if len(tokenstoIDs) <= MAX_SEQUENCE_LENGTH:
sent_token_list.append(tokenstoIDs)
multilabel.append(list(row[0:numberOfClasses].values))
print('size of volcabulary: ',len(word2int))
X = np.array(sent_token_list)
y = np.array(multilabel)
#print(type(X),X.shape, X[0:3])
#print(type(y),y.shape,y[0:3])
#print(len([len(l) for l in sent_token_list if len(l)>MAX_SEQUENCE_LENGTH]))
print("total number of sentences kept: ", len(X))
#padding 0s infront to make it same size
X = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH)
X, y = shuffle(X, y)
# split data into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SPLIT,random_state=CUSTOM_SEED)
# split training data into train and validation
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=VALIDATION_SPLIT, random_state=1)
n_train_samples = X_train.shape[0]
n_val_samples = X_val.shape[0]
n_test_samples = X_test.shape[0]
print('We have %d TRAINING samples' % n_train_samples)
print('We have %d VALIDATION samples' % n_val_samples)
print('We have %d TEST samples' % n_test_samples)
# + 1 to include the unkown word
embedding_matrix = np.random.random((len(word2int) + 1, w2vmodel.vector_size))
for word in word2int:
embedding_vector = token2vec(word,w2vmodel)
if embedding_vector is not None:
# words not found in embeddings_index will remain unchanged and thus will be random.
embedding_matrix[word2int[word]] = embedding_vector
print('Embedding matrix shape', embedding_matrix.shape)
print('X_train shape', X_train.shape)
model = Sequential()
embedding_layer = Embedding(len(word2int) + 1,
w2vmodel.vector_size,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=True)
model.add(embedding_layer)
model.add(Bidirectional(LSTM(64, return_sequences=False)))
model.add(Dense(500, activation='tanh'))
model.add(Dense(numberOfClasses, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print("model fitting - Bidirectional LSTM")
model.summary()
#earlystopper = EarlyStopping(monitor='val_acc', patience=5, verbose=1)
#callbacks=[earlystopper]
x= model.fit(X_train, y_train,
batch_size=256,
epochs=15,
validation_data=(X_val, y_val),
shuffle = True,
verbose = 1
)
# if not os.path.exists(path):
# print('MAKING DIRECTORY to save model file')
# os.makedirs(path)
#
# plot_model_performance(
# train_loss=x.history.get('loss', []),
# train_acc=x.history.get('acc', []),
# train_val_loss=x.history.get('val_loss', []),
# train_val_acc=x.history.get('val_acc', []),
# save_figure_path = path +'model_performance.png'
# )
#
# # Visualize model architecture
# plot_model(model, to_file=path +'model_structure.png', show_shapes=True)
preds = model.predict(X_test)
print ("f1: ", f1_score(y_test, preds, average='weighted'))
print ("accuracy: ", accuracy_score(y_test, preds))
print ("precision: ", precision_score(y_test, preds, average='weighted'))
print ("recall: ", recall_score(y_test, preds, average='weighted'))
print ("precision_recall_fscore_support: ", precision_recall_fscore_support(y_test, preds, average='weighted'))
print("see results in " + path)
|
minchen57/TextClassifyKeras | process_results.py | import pandas as pd
import numpy as np
import os
relativePath = os.getcwd()
path = relativePath + "/Results/08082018-1/"
df = pd.read_csv(path+"predicted_result.csv", index_col="Sentence#")
jump = int((len(df.columns)+1)/2)
print(jump)
print(df.columns[jump-1])
#print(df.columns[5], " ", df.columns[5+jump])
values=[]
for j in range(jump-1):
yesyes=0
yesno=0
noyes=0
nono=0
a=df[df.columns[j]].values
b=df[df.columns[j+jump]].values
for i in range(len(a)):
if a[i]==1:
if b[i]==1:
yesyes+=1
else:
yesno+=1
else:
if b[i]==1:
noyes+=1
else:
nono+=1
total = yesyes+yesno
if (total != 0) and (yesyes+noyes != 0):
values.append([df.columns[j],total,yesyes,yesno,noyes,nono, yesyes/total, yesyes/(yesyes+noyes), noyes/(yesyes+noyes), yesno/total])
else:
values.append([df.columns[j], total, yesyes, yesno, noyes, nono, "NA", "NA","NA","NA"])
result = pd.DataFrame(values, columns=["subtopic", "TotalGroundTruth","YesYes", "YesNo", "NoYes", "NoNo",
"Recall", "Precision", "FalsePositive%", "FalseNegative%"])
result.to_csv(path + 'predicted_result_summary.csv') |
evature/botkit-integrations | main.py | <gh_stars>1-10
'''
Created on May 18, 2016
@author: marina
'''
from enums import MessagingProviders
from amadeus import flights_low_fare_search, amadeus_results_to_facebook
from expedia import get_ean_tags_from_webhook_input, expedia_search_request_to_facebook
def get_structured_message(messaging_provider, text=None, image_url=None):
""" helper function that returns structured response for text and image url """
response = None
if text is not None or image_url is not None and messaging_provider in [MessagingProviders.facebook, MessagingProviders.line]:
response = []
if messaging_provider == MessagingProviders.facebook:
if text is not None:
response.append(dict(text=text))
if image_url is not None:
response.append(dict(attachment=dict(type="image", payload=dict(url=image_url))))
elif messaging_provider == MessagingProviders.line:
if text is not None:
response.append(dict(contentType=1, toType=1, text=text))
if image_url is not None:
response.append(dict(contentType=2, toType=1,
originalContentUrl=image_url,
previewImageUrl=image_url))
return response
def amadeus_flight_search_webhook(body):
"""
body input: {origin, destination, departDateMin, departDateMax,
returnDateMin, returnDateMax, travelers,
attributes, sortBy, sortOrder}
response format of the messagingProvider
"""
if body.get('messagingProvider') == MessagingProviders.facebook:
origin = body.get('origin', {})
origin_airport = origin.get('allAirportsCode')
if not origin_airport:
if origin.get('airports'):
origin_airport = origin.get('airports')[0]
destination = body.get('destination', {})
destination_airport = destination.get('allAirportsCode')
if not destination_airport:
if destination.get('airports'):
destination_airport = destination.get('airports')[0]
departure_date = body.get('departDateMin', '').split('T')[0]
return_date = body.get('returnDateMin', None)
if return_date:
return_date = return_date.split('T')[0]
adults = None
travelers = body.get('travelers', {})
if 'Adult' in travelers or 'Elderly' in travelers:
adults = int(travelers.get('Adult', 0)) + int(travelers.get('Elderly', 0))
amadeus_results = flights_low_fare_search(origin_airport, destination_airport, departure_date,
return_date=return_date,
adults=adults,
children=travelers.get('Child', 0),
infants=travelers.get('Infant', 0),
max_price=None, currency=None,
number_of_results=3,
non_stop=None,
arrive_by=None,
return_by=None,
include_airlines=None,
exclude_airlines=None,
travel_class=None,
)
return amadeus_results_to_facebook(amadeus_results, origin, destination)
def expedia_hotel_search_webhook(body):
"""
body input: {location, arriveDate, duration, travelers, attributes, sortBy,
sortOrder, messagingProvider}
response format of the messagingProvider
"""
ean_tags = get_ean_tags_from_webhook_input(body)
if body.get('messagingProvider') == MessagingProviders.facebook:
return expedia_search_request_to_facebook(ean_tags)
def flight_boarding_pass_webhook(body):
"""
body input: {messagingProvider}
response format of the messagingProvider
"""
say_it = "Here is your boarding pass"
image_url = "https://d2hbukybm05hyt.cloudfront.net/images/singapore-bp.jpg"
return get_structured_message(body.get('messagingProvider'), say_it, image_url)
def flight_itinerary_webhook(body):
"""
body input: {messagingProvider}
response format of the messagingProvider
"""
say_it = "Here is your Itinerary"
image_url = "https://d2hbukybm05hyt.cloudfront.net/images/itinerary.jpg"
return get_structured_message(body.get('messagingProvider'), say_it, image_url)
def reservation_cancel_webhook(body):
"""
body input: {messagingProvider}
response format of the messagingProvider
"""
say_it = "Please follow this link https://www.checkmytrip.com to cancel your reservation"
return get_structured_message(body.get('messagingProvider'), text=say_it)
def flight_gate_number_webhook(body):
"""
body input: {messagingProvider}
response format of the messagingProvider
"""
say_it = "Your flight is boarding in 25 minutes at Gate D4"
return get_structured_message(body.get('messagingProvider'), text=say_it)
def flight_boarding_time_webhook(body):
"""
body input: {messagingProvider}
response format of the messagingProvider
"""
say_it = "Your flight starts boarding in 20 minutes"
return get_structured_message(body.get('messagingProvider'), text=say_it)
def flight_departure_time_webhook(body):
"""
body input: {messagingProvider}
response format of the messagingProvider
"""
say_it = "Your flight departs at 7:35 am"
return get_structured_message(body.get('messagingProvider'), text=say_it)
def flight_arrival_time_webhook(body):
"""
body input: {messagingProvider}
response format of the messagingProvider
"""
say_it = "Your flight arrives at 9:45 pm"
return get_structured_message(body.get('messagingProvider'), text=say_it)
|
bee-hive/scGeneFit-python | setup.py | <filename>setup.py<gh_stars>10-100
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='scGeneFit',
version='1.0.0',
author="<NAME>",
author_email="<EMAIL>",
description="Genetic marker selection with linear programming",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/solevillar/scGeneFit-python",
packages=setuptools.find_packages(),
include_package_data=True,
install_requires=['numpy', 'matplotlib', 'scipy', 'sklearn'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
|
bee-hive/scGeneFit-python | scGeneFit/functions.py | <filename>scGeneFit/functions.py
import numpy as np
import matplotlib.pyplot as plt
import scipy
import time
import sklearn
import sklearn.manifold
import scipy.io
from . import data_files
def get_markers(data, labels, num_markers, method='centers', epsilon=1, sampling_rate=1, n_neighbors=3, max_constraints=1000, redundancy=0.01, verbose=True):
"""marker selection algorithm
data: Nxd numpy array with point coordinates, N: number of points, d: dimension
labels: list with labels (N labels, one per point)
num_markers: target number of markers to select. num_markers<d
method: 'centers', 'pairwise', or 'pairwise_centers'
epsilon: constraints will be of the form expr>Delta, where Delta is chosen to be epsilon times the norm of the smallest constraint (default 1)
(This is the most important parameter in this problem, it determines the scale of the constraints,
the rest the rest of the parameters only determine the size of the LP)
sampling_rate: (if method=='pairwise' or 'pairwise_centers') selects constraints from a random sample of proportion sampling_rate (default 1)
n_neighbors: (if method=='pairwise') chooses the constraints from n_neighbors nearest neighbors (default 3)
max_constraints: maximum number of constraints to consider (default 1000)
redundancy: (if method=='centers') in this case not all pairwise constraints are considered
but just between centers of consecutive labels plus a random fraction of constraints given by redundancy
if redundancy==1 all constraints between pairs of centers are considered """
d = data.shape[1]
t = time.time()
samples, samples_labels, idx = __sample(data, labels, sampling_rate)
if method == 'pairwise_centers':
constraints, smallest_norm = __select_constraints_centers(
data, labels, samples, samples_labels)
elif method == 'pairwise':
constraints, smallest_norm = __select_constraints_pairwise(
data, labels, samples, samples_labels, n_neighbors)
else:
constraints, smallest_norm = __select_constraints_summarized(data, labels, redundancy)
num_cons = constraints.shape[0]
if num_cons > max_constraints:
p = np.random.permutation(num_cons)[0:max_constraints]
constraints = constraints[p, :]
if verbose:
print('Solving a linear program with {} variables and {} constraints'.format(
constraints.shape[1], constraints.shape[0]))
sol = __lp_markers(constraints, num_markers, smallest_norm * epsilon)
if verbose:
print('Time elapsed: {} seconds'.format(time.time() - t))
x = sol['x'][0:d]
markers = sorted(range(len(x)), key=lambda i: x[i], reverse=True)[
: num_markers]
return markers
def get_markers_hierarchy(data, labels, num_markers, method='centers', sampling_rate=0.1, n_neighbors=3, epsilon=10, max_constraints=1000, redundancy=0.01, verbose=True):
"""marker selection algorithm with hierarchical labels
data: Nxd numpy array with point coordinates, N: number of points, d: dimension
labels: list with T lists of labels, where T is the number of layers in the hierarchy (N labels per list, one per point)
num_markers: target number of markers to select. num_markers<d
sampling_rate: selects constraints from a random sample of proportion sampling_rate (default 1)
n_neighbors: chooses the constraints from n_neighbors nearest neighbors (default 3)
epsilon: Delta is chosen to be epsilon times the norm of the smallest constraint (default 10)
max_constraints: maximum number of constraints to consider (default 1000)
method: 'centers', 'pairwise' or 'pairwise_centers' (default 'centers')
redundancy: (if method=='centers') in this case not all pairwise constraints are considered
but just between centers of consecutive labels plus a random fraction of constraints given by redundancy
if redundancy==1 all constraints between pairs of centers are considered"""
t = time.time()
[N, d] = data.shape
num_levels = len(labels)
prev_label = [1 for i in range(N)]
constraints = None
smallest_norm = np.inf
for i in range(num_levels):
s = set(prev_label)
for l in s:
if l is not None:
aux_data = [data[x, :]
for x in range(len(labels[i])) if prev_label[x] == l]
aux_labels = [labels[i][x]
for x in range(len(labels[i])) if prev_label[x] == l]
samples, samples_labels, idx = __sample(
aux_data, aux_labels, sampling_rate)
aux_data = np.array(aux_data)
if method == 'pairwise_centers':
con, sm_norm = __select_constraints_centers(
aux_data, aux_labels, samples, samples_labels)
elif method == 'pairwise':
con, sm_norm = __select_constraints_pairwise(
aux_data, aux_labels, samples, samples_labels, n_neighbors)
else:
con, sm_norm = __select_constraints_summarized(aux_data, aux_labels, redundancy)
if constraints is not None:
constraints = np.concatenate((constraints, con))
else:
constraints = con
if sm_norm < smallest_norm:
smallest_norm = sm_norm
prev_label = labels[i]
constraints = np.array(constraints)
num_cons = constraints.shape[0]
if num_cons > max_constraints:
p = np.random.permutation(num_cons)[0:max_constraints]
constraints = constraints[p, :]
if verbose:
print('Solving a linear program with {} variables and {} constraints'.format(constraints.shape[1], constraints.shape[0]))
sol = __lp_markers(constraints, num_markers, smallest_norm * epsilon)
if verbose:
print('Time elapsed: {} seconds'.format(time.time() - t))
x = sol['x'][0:d]
markers = sorted(range(len(x)), key=lambda i: x[i], reverse=True)[
: num_markers]
return markers
def __sample(data, labels, sampling_rate):
"""subsample data"""
indices = []
for i in set(labels):
idxs = [x for x in range(len(labels)) if labels[x] == i]
n = len(idxs)
s = int(np.ceil(len(idxs) * sampling_rate))
aux = np.random.permutation(n)[0:s]
indices += [idxs[x] for x in aux]
return [data[i] for i in indices], [labels[i] for i in indices], indices
def __select_constraints_summarized(data, labels, redundancy=0.01):
"""selects constraints of the form c_a-c_(a+1) where c_i's are the empirical centers of different classes"""
constraints = []
centers = {}
smallest_norm = np.inf
labels_set = list(set(labels))
k = len(labels_set)
for idx in labels_set:
X = [data[x, :] for x in range(len(labels)) if labels[x] == idx]
centers[idx] = np.array(X).mean(axis=0)
for i in range(len(labels_set)):
v = centers[labels_set[i]]-centers[labels_set[(i+1) % k]]
constraints += [v]
if np.linalg.norm(v) ** 2 < smallest_norm:
smallest_norm = np.linalg.norm(v) ** 2
for j in range(len(labels_set)):
if j != i and j != (i+1) % k:
if np.random.rand() < redundancy:
v = centers[labels_set[j]]-centers[labels_set[(j+1) % k]]
constraints += [v]
if np.linalg.norm(v) ** 2 < smallest_norm:
smallest_norm = np.linalg.norm(v) ** 2
constraints = np.array(constraints)
return -constraints * constraints, smallest_norm
def __select_constraints_pairwise(data, labels, samples, samples_labels, n_neighbors):
"""select constraints of the form x-y where x,y have different labels"""
constraints = []
# nearest neighbors are selected from the entire set
neighbors = {}
data_by_label = {}
smallest_norm = np.inf
for i in set(labels):
X = [data[x, :] for x in range(len(labels)) if labels[x] == i]
data_by_label[i] = X
neighbors[i] = sklearn.neighbors.NearestNeighbors(
n_neighbors=n_neighbors).fit(np.array(X))
# compute nearest neighbor for samples
for i in neighbors.keys():
Y = [samples[x]
for x in range(len(samples_labels)) if samples_labels[x] == i]
for j in neighbors.keys():
if i != j:
idx = neighbors[j].kneighbors(Y)[1]
for s in range(len(Y)):
for t in idx[s]:
v = Y[s] - data_by_label[j][t]
constraints += [v]
if np.linalg.norm(v) ** 2 < smallest_norm:
smallest_norm = np.linalg.norm(v) ** 2
constraints = np.array(constraints)
return -constraints * constraints, smallest_norm
def __select_constraints_centers(data, labels, samples, samples_labels):
"""select constraints of the form (x-ct')^2 - (x-ct)^2> Delta^2 y where x belongs to cluster with center ct"""
constraints = []
# nearest neighbors are selected from the entire set
centers_by_label = {}
smallest_norm = np.inf
for i in set(labels):
X = np.array([data[x, :]
for x in range(len(labels)) if labels[x] == i])
centers_by_label[i] = np.sum(X, axis=0) / X.shape[0]
# compute nearest neighbor for samples
for p in range(len(samples)):
# distance to it's own center
aux0 = (samples[p] - centers_by_label[samples_labels[p]]) * \
(samples[p] - centers_by_label[samples_labels[p]])
for i in set(labels):
if samples_labels[p] != i:
# distance to other centers
aux1 = (samples[p] - centers_by_label[i]) * \
(samples[p] - centers_by_label[i])
constraints += [aux0 - aux1]
if np.linalg.norm(aux0 - aux1) < smallest_norm:
smallest_norm = np.linalg.norm(aux0-aux1)
constraints = np.array(constraints)
return constraints, smallest_norm
def __lp_markers(constraints, num_markers, epsilon):
m, d = constraints.shape
c = np.concatenate((np.zeros(d), np.ones(m)))
l = np.zeros(d + m)
u = np.concatenate((np.ones(d), np.array([None for i in range(m)])))
aux1 = np.concatenate((constraints, -np.identity(m)), axis=1)
aux2 = np.concatenate((np.ones((1, d)), np.zeros((1, m))), axis=1)
A = np.concatenate((aux1, aux2), axis=0)
b = np.concatenate((-epsilon * np.ones(m), np.array([num_markers])))
bounds = [(l[i], u[i]) for i in range(d + m)]
sol = scipy.optimize.linprog(c, A, b, None, None, bounds)
return sol
def circles_example(N=30, d=5):
num_markers = 2
X = np.concatenate((np.array([[np.sin(2 * np.pi * i / N), np.cos(2 * np.pi * i / N)] for i in range(N)]),
np.random.random((N, d - 2))), axis=1)
Y = np.concatenate((np.array([[2 * np.sin(2 * np.pi * i / N), 2 * np.cos(2 * np.pi * i / N)] for i in range(N)]),
np.random.random((N, d - 2))), axis=1)
data = np.concatenate((X, Y), axis=0)
labels = np.concatenate((np.zeros(10), np.ones(10)))
fig = plt.figure()
ax = fig.add_subplot(121, projection='3d')
ax.scatter(data[0:N, 0], data[0:N, 1], data[0:N, 2], c='r', marker='o')
ax.scatter(data[N + 1:2 * N, 0], data[N + 1:2 * N, 1],
data[N + 1:2 * N, 2], c='g', marker='x')
plt.show()
sol = get_markers(data, labels, num_markers, 1, 3, 10)
x = sol['x'][0:d]
markers = sorted(range(len(x)), key=lambda i: x[i], reverse=True)[
:num_markers]
for i in range(d):
if i not in markers:
data[:, i] = np.zeros(2 * N)
ax2 = fig.add_subplot(122, projection='3d')
ax2.scatter(data[0:N, 0], data[0:N, 1], data[0:N, 2], c='r', marker='o')
ax2.scatter(data[N + 1:2 * N, 0], data[N + 1:2 * N, 1],
data[N + 1:2 * N, 2], c='g', marker='x')
plt.show()
def plot_marker_selection(data, markers, names, perplexity=40):
print('Computing TSNE embedding')
t = time.time()
X_original = sklearn.manifold.TSNE(
n_components=2, perplexity=perplexity).fit_transform(data)
X_embedded = sklearn.manifold.TSNE(n_components=2, perplexity=perplexity).fit_transform(
data[:, markers])
print('Elapsed time: {} seconds'.format(time.time() - t))
cmap = plt.cm.jet
unique_names = list(set(names))
num_labels = len(unique_names)
colors = [cmap(int(i * 256 / num_labels)) for i in range(num_labels)]
aux = [colors[unique_names.index(name)] for name in names]
fig = plt.figure()
ax = fig.add_subplot(121)
for g in unique_names:
i = [s for s in range(len(names)) if names[s] == g]
ax.scatter(X_original[i, 0], X_original[i, 1],
c=[aux[i[0]]], s=5, label=names[i[0]])
ax.set_title('Original data')
ax2 = fig.add_subplot(122)
for g in np.unique(names):
i = [s for s in range(len(names)) if names[s] == g]
ax2.scatter(X_embedded[i, 0], X_embedded[i, 1],
c=[aux[i[0]]], s=5, label=names[i[0]])
ax2.set_title('{} markers'.format(len(markers)))
plt.legend(bbox_to_anchor=(1, 1))
plt.subplots_adjust(right=0.7)
return fig
def one_vs_all_selection(data, labels, num_bins=20):
data_by_label = {}
unique_labels = list(set(labels))
number_classes = len(unique_labels)
[N, d] = data.shape
for lab in unique_labels:
X = [data[x, :] for x in range(len(labels)) if labels[x] == lab]
data_by_label[lab] = X
markers = [None for i in range(number_classes)]
bins = data.max() / num_bins * range(num_bins + 1)
for idx in range(number_classes):
c = unique_labels[idx]
current_class = np.array(data_by_label[c])
others = np.concatenate([data_by_label[lab]
for lab in unique_labels if lab != c])
big_dist = 0
for gene in range(d):
if gene not in markers[0:idx]:
[h1, b1] = np.histogram(current_class[:, gene], bins)
h1 = np.array(h1).reshape(1, -1) / current_class.shape[0]
[h2, b2] = np.histogram(others[:, gene], bins)
h2 = np.array(h2).reshape(1, -1) / others.shape[0]
dist = -sklearn.metrics.pairwise.additive_chi2_kernel(h1, h2)
if dist > big_dist:
markers[idx] = gene
big_dist = dist
return markers
def optimize_epsilon(data_train, labels_train, data_test, labels_test, num_markers, method='centers', fixed_parameters={}, bounds=[(0.2 , 10)], x0=[1], max_fun_evaluations=20, n_experiments=5, clf=None, hierarchy=False, verbose=True):
"""
Finds the optimal value of epsilon using scipy.optimize.dual_annealing
"""
if clf==None:
clf=sklearn.neighbors.NearestCentroid()
Instance=__ScGeneInstance(data_train, labels_train, data_test, labels_test, clf, num_markers, method, fixed_parameters, n_experiments, hierarchy)
print('Optimizing epsilon for', num_markers, 'markers and', method, 'method.')
res = scipy.optimize.dual_annealing(Instance.error_epsilon, bounds=bounds, x0=x0, maxfun=max_fun_evaluations, no_local_search=True)
return [res.x, 1-res.fun]
class __ScGeneInstance:
def __init__(self, X_train, y_train, X_test, y_test, clf, num_markers, method, fixed_parameters, n_experiments, hierarchy):
self.X_train=X_train
self.y_train=y_train
self.X_test=X_test
self.y_test=y_test
self.clf=clf
self.num_markers=num_markers
self.method=method
self.fixed_parameters=fixed_parameters
self.n_experiments=n_experiments
self.hierarchy=hierarchy
def error_epsilon(self, epsilon):
return 1-self.accuracy(epsilon)
def accuracy(self, epsilon):
#compute avg over n_experiments random samples for stability
if self.hierarchy:
markers=[get_markers_hierarchy(self.X_train, self.y_train, self.num_markers, self.method, epsilon=epsilon, verbose=False, **self.fixed_parameters) for i in range(self.n_experiments)]
else:
markers=[get_markers(self.X_train, self.y_train, self.num_markers, self.method, epsilon=epsilon, verbose=False, **self.fixed_parameters) for i in range(self.n_experiments)]
val=[self.performance( markers[i] ) for i in range(self.n_experiments)]
return np.mean(val)
def performance(self, markers):
if self.hierarchy:
self.clf.fit(self.X_train[:,markers], self.y_train[0])
return self.clf.score(self.X_test[:,markers], self.y_test[0])
else:
self.clf.fit(self.X_train[:,markers], self.y_train)
return self.clf.score(self.X_test[:,markers], self.y_test)
def load_example_data(name):
if name=="CITEseq":
a = scipy.io.loadmat(data_files.get_data("CITEseq.mat"))
data= a['G'].T
N,d=data.shape
#transformation from integer entries
data=np.log(data+np.ones(data.shape))
for i in range(N):
data[i,:]=data[i,:]/np.linalg.norm(data[i,:])
#load labels from file
a = scipy.io.loadmat(data_files.get_data("CITEseq-labels.mat"))
l_aux = a['labels']
labels = np.array([i for [i] in l_aux])
#load names from file
a = scipy.io.loadmat(data_files.get_data("CITEseq_names.mat"))
names=[a['citeseq_names'][i][0][0] for i in range(N)]
return [data, labels, names]
elif name=="zeisel":
#load data from file
a = scipy.io.loadmat(data_files.get_data("zeisel_data.mat"))
data= a['zeisel_data'].T
N,d=data.shape
#load labels (first level of the hierarchy) from file
a = scipy.io.loadmat(data_files.get_data("zeisel_labels1.mat"))
l_aux = a['zeisel_labels1']
l_0=[l_aux[i][0] for i in range(l_aux.shape[0])]
#load labels (second level of the hierarchy) from file
a = scipy.io.loadmat(data_files.get_data("zeisel_labels2.mat"))
l_aux = a['zeisel_labels2']
l_1=[l_aux[i][0] for i in range(l_aux.shape[0])]
#construct an array with hierarchy labels
labels=np.array([l_0, l_1])
# load names from file
a = scipy.io.loadmat(data_files.get_data("zeisel_names.mat"))
names0=[a['zeisel_names'][i][0][0] for i in range(N)]
names1=[a['zeisel_names'][i][1][0] for i in range(N)]
return [data, labels, [names0,names1]]
else:
print("currently available options are only 'CITEseq' and 'zeisel'")
|
bee-hive/scGeneFit-python | scGeneFit/data_files/__init__.py |
try:
import importlib.resources as importlib_resources
except ImportError:
# In PY<3.7 fall-back to backported `importlib_resources`.
import importlib_resources
def get_data(filename):
with importlib_resources.path(__name__, filename) as foo:
return str(foo) |
YTCdev/yangubot | src/misc.py | <reponame>YTCdev/yangubot
import discord
from config import Config
class Misc:
@staticmethod
def is_staff(ctx):
roles = [role.id for role in ctx.author.roles]
return any(id in roles for id in Config.STAFF_IDS)
@staticmethod
def is_owner(ctx):
return ctx.author.id in Config.OWNER_IDS
@staticmethod
async def send_error(ctx, message, footer=None):
embed = discord.Embed(title=message, colour=Config.COLOURS['failed'])
if footer is not None:
embed.set_footer(text=footer)
await ctx.send(embed=embed)
print(message)
print('-----')
@staticmethod
async def send_msg(ctx, message):
embed = discord.Embed(title=message, colour=Config.COLOURS['completed'])
await ctx.send(embed=embed)
print(message)
print('-----')
|
YTCdev/yangubot | src/ccm.py | <filename>src/ccm.py<gh_stars>0
from dataclasses import dataclass
from discord.ext import commands
from discord import Embed, Message
import jsonpickle
from config import Config
from misc import Misc
class CustomCommandsManager(commands.Cog):
def __init__(self, bot, prefix=''):
self.bot = bot
self.prefix = prefix
self.commands = self.load_commands()
@commands.group()
@commands.check(Misc.is_staff)
async def cc(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send('Available subcommands: add, remove, list')
@cc.command()
async def add(self, ctx, trigger: str, title: str, content: str):
# prevent blocking the command if prefix here is identical to the main one
if trigger == 'cc':
return
command_list = self.commands[:]
if any(x.trigger == trigger for x in command_list):
await Misc.send_error(
ctx, 'Trigger {} already exists.'.format(trigger))
return
command = CustomCommand(trigger, title, content)
command_list.append(command)
if self.save_commands(command_list):
await Misc.send_msg(
ctx, 'Command {} added.'.format(trigger))
else:
await Misc.send_error(ctx, 'Couldn\'t save command.')
@cc.command()
async def remove(self, ctx, trigger: str):
command_list = self.commands[:]
if not any(x.trigger == trigger for x in command_list):
await Misc.send_error(
ctx, 'Trigger {} not found.'.format(trigger))
return
command = [x for x in command_list if x.trigger == trigger][0]
command_list.remove(command)
if self.save_commands(command_list):
await Misc.send_msg(
ctx, 'Command {} removed.'.format(trigger))
else:
await Misc.send_error(ctx, 'Couldn\'t remove command.')
@cc.command()
async def list(self, ctx):
command_list = self.commands
if len(command_list) > 0:
response = 'List of custom commands (trigger -> title):\n```'
for i, command in enumerate(command_list, 1):
response += '{}. {} -> {}\n'.format(
i, command.trigger, command.title)
response += '```'
else:
response = 'No custom commands yet.'
await ctx.send(response)
async def check_message(self, message: Message) -> bool:
command_list = self.commands
triggers = [self.prefix + x.trigger for x in command_list]
if message.content in triggers:
command = next(
x for x in command_list if self.prefix + x.trigger == message.content)
await self.send_response(message.channel, command)
return True
return False
async def send_response(self, channel, command):
embed = Embed(
title=command.title,
description=command.content,
colour=Config.COLOURS['completed'])
await channel.send(embed=embed)
def load_commands(self):
try:
with open('commands.json', 'r') as file:
return jsonpickle.decode(file.read())
except IOError:
print('commands.json not found; creating file')
self.save_commands([])
return self.load_commands()
# returns false if unable to save commands
def save_commands(self, command_list) -> bool:
try:
with open('commands.json', 'w') as file:
file.write(jsonpickle.encode(command_list))
self.commands = command_list
return True
except IOError:
print('couldn\'t save commands')
return False
@dataclass
class CustomCommand:
trigger: str
title: str
content: str
|
YTCdev/yangubot | src/config_ex.py | from discord import Colour
class Config:
# WooCommerce API settings
#
# Get an API key by going to WooCommerce > Settings > Advanced > REST API on
# your WordPress installation.
WCM_URL = ''
WCM_KEY = ''
WCM_SECRET = ''
# Discord API settings
#
# Get your bot token at https://discord.com/developers/applications
BOT_TOKEN = ''
# Discord server settings
# -----------------------
# The user IDs of the owners of the bot
OWNER_IDS = []
# The role IDs of the staff role(s) on the server
STAFF_IDS = []
# The role to give users if they have purchased a product
# (set to 0 if none)
PATRON_ID = 0
# The text channels designated to customer support, general discussion, and
# a product gallery, respectively (set to 0 if none)
CHANNELS = {
'support': 0,
'lounge': 0,
'gallery': 0
}
# List of countries with faster shipping
# TODO: WooCommerce integration to calculate shipping estimates
COUNTRIES = ['CA', 'US', 'AU', 'AT', 'BE', 'BG', 'CY', 'CZ', 'DK', 'EE',
'FI', 'FR', 'DE', 'GR', 'HU', 'HR', 'IE', 'IT', 'LV', 'LT',
'LU', 'MT', 'NL', 'PL', 'PT', 'RO', 'SK', 'SI', 'ES', 'SE',
'GB']
# Colours to use for each order status
COLOURS = {
'completed': Colour(0x4caf50),
'cancelled': Colour(0xf44336),
'failed': Colour(0xf44336),
'pending': Colour(0xffeb3b),
'processing': Colour(0xffeb3b),
'on-hold': Colour(0xff9800),
'other': Colour(0xffeb3b)
}
|
YTCdev/yangubot | src/bot.py | from time import monotonic
from sys import exit
import discord
from discord.ext import commands
from config import Config
from ccm import CustomCommandsManager
from store import Store
from order import Order
from misc import Misc
ytc = Store(Config.WCM_URL, Config.WCM_KEY, Config.WCM_SECRET)
bot = commands.Bot(command_prefix='!')
bot.remove_command("help")
@bot.event
async def on_ready():
bot.add_cog(CustomCommandsManager(bot))
print('logged in as {}'.format(bot.user))
print(discord.utils.oauth_url(
bot.user.id, permissions=discord.Permissions(8)))
print('-----')
await bot.change_presence(
activity=discord.Activity(
type=discord.ActivityType.watching, name='#support'))
@bot.command()
async def status(ctx, order_id):
start_time = monotonic()
print("Check order id {}".format(order_id))
if not order_id.isdigit():
await Misc.send_error(
ctx,
':warning: Order ID must only consist of numbers',
'Example: !status 12345')
return
await ctx.trigger_typing()
response = ytc.get_order(order_id)
if 'code' in response:
if response['code'] == 'woocommerce_rest_shop_order_invalid_id':
await Misc.send_error(
ctx, ':warning: No order exists with the given ID')
else:
await Misc.send_error(ctx, ':warning: Unknown error occurred, please ping a staff member for assistance',
response['code'])
return
notes = ytc.get_order_notes(order_id)
order = Order(response, notes)
embed = order.create_embed()
await ctx.send(embed=embed)
if order.status in ['processing', 'completed']:
await add_patron_role(ctx, order)
end_time = monotonic()
print('ok, took {:.2f} s'.format(end_time - start_time))
print('-----')
async def add_patron_role(ctx, order: Order):
# if customer didnt provide his discord ID
if not order.discord_id:
return
user = ctx.guild.get_member_named(order.discord_id)
if not user:
print('User not in server')
elif any(role.id == Config.PATRON_ID for role in user.roles):
print('User already has role')
else:
try:
role = discord.utils.get(ctx.guild.roles, id=Config.PATRON_ID)
await ctx.author.add_roles(role)
print('Added role to user {}'.format(order.discord_id))
except discord.Forbidden:
print('Missing permissions to add role')
@bot.command()
@commands.check(Misc.is_staff)
async def wcm(ctx, order_id):
await ctx.author.send(
"{}/wp-admin/post.php?post={}&action=edit".format(
Config.WCM_URL, order_id))
await ctx.message.delete()
@bot.command()
@commands.check(Misc.is_owner)
async def stop_bot(ctx):
await Misc.send_msg(ctx, "Attempting to stop bot...")
exit()
@bot.event
async def on_message(message: discord.Message):
# prevent bot from reacting to its own messages
if message.author.id == bot.user.id:
return
# prevent bot from responding to already-removed messages
if message.channel.id == Config.CHANNELS['gallery']:
if await check_gallery_message(message):
await bot.process_commands(message)
else:
ccm = bot.get_cog('CustomCommandsManager')
if not await ccm.check_message(message):
await bot.process_commands(message)
async def check_gallery_message(message: discord.Message) -> bool:
# ignore message if it has attachments
if len(message.attachments) > 0:
return True
# check if user has previously uploaded any pics
allowed = False
async for old_message in message.channel.history(limit=75):
if old_message.author == message.author and len(old_message.attachments) > 0:
allowed = True
break
# if no pics from that user found, delete and notify
if not allowed:
await message.delete()
await message.channel.send(content=get_gallery_warning(message.author),
delete_after=12.5)
return allowed
def get_gallery_warning(user: discord.Member) -> str:
return '{} - this channel is for **pics only**. Please use <#{}> for ' \
'discussion or ask in <#{}> if you have any questions.\n\nIf you ' \
'want to voice your opinion - feel free to do so, just don\'t ' \
'forget to send some pictures first!'.format(
user.mention, Config.CHANNELS['lounge'], Config.CHANNELS['support'])
if __name__ == "__main__":
bot.run(Config.BOT_TOKEN)
|
YTCdev/yangubot | src/order.py | import dateutil.parser
from pandas.tseries.offsets import BDay
from discord import Embed
from config import Config
class Order:
def __init__(self, json, notes):
self.json = json
self.notes = notes
self.id = json['id']
self.status = json['status']
self.product = json['line_items'][0]['name']
self.sku = json['line_items'][0]['sku']
self.country = json['shipping']['country'] or json['billing']['country']
self.order_date = self.parse_date(json['date_created'])
self.modified_date = self.parse_date(json['date_modified'])
self.shipping_method = json['shipping_lines'][0]['method_title']
self.arrival_estimate = self.estimate_shipping()
self.has_tracking = self.is_tracked()
self.discord_id = self.get_discord_id()
def parse_date(self, date_str):
return dateutil.parser.parse(date_str)
def estimate_shipping(self):
if 'tablet cover' not in self.product.lower():
return None
if self.status != 'completed':
return None
# Tablet cover shipping
# 8-10 days for countries in config list
# 15-23 everywhere else
# Rush shipping: 4-7 business days
shipped_on = self.ship_date()
if not shipped_on:
return None
if self.shipping_method == 'Rush':
return self.calc_bdays(shipped_on, 7)
else:
if self.country in Config.COUNTRIES:
return self.calc_bdays(shipped_on, 10)
return self.calc_bdays(shipped_on, 23)
def calc_bdays(self, date, days):
return date + BDay(days)
def ship_date(self):
for order_note in self.notes:
if 'order confirmation' in order_note['note'].lower():
return self.parse_date(order_note['date_created'])
return None
def is_tracked(self):
for order_note in self.notes:
if 'track' in order_note['note'].lower():
return True
return False
def get_discord_id(self):
return next(
(x['value'] for x in self.json['meta_data'] if x['key'] == 'myfield5'), None
)
def create_embed(self):
embed = Embed(title='Order Status', timestamp=self.order_date)
if self.status in Config.COLOURS:
embed.colour = Config.COLOURS[self.status]
else:
embed.colour = Config.COLOURS['other']
ship_method_print = "**{}**\n{}".format(
self.status.capitalize(), self.shipping_method.capitalize())
if 'shipping' not in ship_method_print.lower():
ship_method_print += " shipping"
embed.add_field(
name="#{} • :flag_{}:".format(self.id, self.country.lower()),
value=ship_method_print,
inline=True
)
temp = ''
if self.arrival_estimate is not None:
temp += '*Estimated* arrival: {}\n'.format(
self.arrival_estimate.strftime(
"%d %b %Y").lstrip("0").replace(" 0", " "))
if self.has_tracking:
temp += 'Check email for tracking info\n'
if self.status == 'on-hold':
temp += 'Please check your email'
if temp == '':
temp = 'No updates yet'
embed.add_field(
name="Notes",
value=temp,
inline=True
)
embed.add_field(
name='Last updated',
value=self.modified_date.strftime(
"%d %b %Y, %H:%M:%S").lstrip("0").replace(" 0", " "),
inline=False
)
embed.set_footer(text=self.sku)
return embed
|
YTCdev/yangubot | src/store.py | <filename>src/store.py
from woocommerce import API
class Store:
def __init__(self, url, key, secret):
self.wcapi = API(
url=url,
consumer_key=key,
consumer_secret=secret,
wp_api=True,
version="wc/v3"
)
def get_order(self, order_id):
return self.wcapi.get('orders/' + order_id).json()
def get_order_notes(self, order_id):
return self.wcapi.get('orders/' + order_id + '/notes').json()
|
BeanyZoldyck/MTAG | haganai.py | <filename>haganai.py
import time
import random
import math
from functools import reduce
playerEgo = 50
playerBag = ['stick']#random even is you find money or gifts
playerBread = 35
possibleInterests = ["Lexi", "Ali", "Chuck", "Socrates"]
relations = {"Lexi": 7,
"Ali": 12,
"Chuck": -4,
"Socrates": 10}
info={"Lexi": "A mean-pretty girl who is relatively popular for how unspoken they are. Better than everyone, smart, kind, pretty, and spiritual. \nLikes: Soccer, makeup, boys, texting. \nDislikes: Driving, boys, people.",
"Ali": "A short and witty girl who seems quiet, but has a very bubbly personality when you get to know them. Strange obsession with a different celebrity or fictional character every week. \nLikes: Shows that prominently feature queer-coded characters, cats. \nDislikes: Public Speaking, Animal cruelty",
"Chuck": "A cantankerous and egotistical individual who makes everything about himself while still finding a way to berate others. Can bench 250. \nLikes: Lifting, Video Games, Bubble Wrap. \nDislikes: Humid weather, cold showers, people (x3)",
"Socrates": "Greek philosopher from Athens accredited as a founder of Western Pilosophy. An enigmatic figure you can only learn about from dialogue. \nLikes: Soliloquies, Debating, Public Speaking, Irony. \nDislikes: Writing, Status-quo, Hemlock"
}
items='''cabbage
rose
magazine
stick
Thrustmaster T80 Ferrari 488 GTB'''.split('\n')
prices=[2.0,
5.0,
20.0,
.5,
130.0]
descriptions='''A literal head of cabbage.
A rose flower, a woody perennial flowering plant of the genus Rosa said to represent love.
A random popular culture magazine filled with the latest celebrity gossip
a stick from outside. Why is this on the shelf?
8:10 scale replica of the genuine Ferrari 488 GTB wheel, officially licensed by Ferrari and PlayStation 4, and designed to provide total realism.'''.split('\n')
samples={}#link interests to respective sample texts
def Markov(sampleText, initial=''):
ngrams={}
sampleTexts=sampleText.split('\n')
inputs=len(sampleTexts)
length=0
for i in sampleTexts:
length+=len(i.split())/inputs
length=round(length)
sample=sampleText.split()
for i in enumerate(sample):
try:
ngrams[i[1]].append(sample[i[0]+1])
except KeyError:
ngrams[i[1]] = []
try:
ngrams[i[1]].append(sample[i[0]+1])
except IndexError:
continue
except IndexError:
pass
if not initial:
currentgram = random.choice(sample)
else:
currentgram=initial
result = currentgram
for i in range(length):
poss = ngrams[currentgram]
nexT = random.choice(poss)
result += ' '+nexT
currentgram = random.choice(ngrams[result.split()[-1]])
return result
def show(sample, buffer):
print('-'*buffer)
line=0
for i in sample.split(' '):
line+=len(i)+1
if '\n' in i:
line=len(i)+1
if line> buffer:
print()
print(i, end=' ')
line=len(i)+1
else:
print(i,end=' ')
print()
print('-'*buffer)
def printSlow(text, wpm=800):
for i in text:
print(i, flush=True, end='')
time.sleep(12/wpm)
time.sleep(1)
print()
def displayItems(offSet):
print('\n')
for i in enumerate(items[offSet:]):
print(i[0]+offSet+1,i[1])
def displaySingle():
userItem=int(input("Enter a number to select an item: "))-1
print(f'{descriptions[userItem]}\n{items[userItem]} costs ${prices[userItem]}.')
def flex(playerMoney, inventory):
return f"Your net worth is ${playerMoney} and you have {[', '.join([str(i)+' (x'+str(inventory.count(i))+')' for i in set(inventory)]),'nothing'][inventory==[]]}"
def buyItem(playerMoney, inventory):
userItem=int(input("Enter a number to select an item to buy: "))-1
if playerMoney >= prices[userItem]:
inventory.append(items[userItem])
print(f"You bought {items[userItem]} for {prices[userItem]}, so your new balance is ${playerMoney-prices[userItem]}")
playerMoney = playerMoney-prices[userItem]
if userItem > 4:
items.pop()
prices.pop()
descriptions.pop()
return playerMoney
else:
print(f"You dont have enough money to buy {items[userItem]}!\nYou would need ${prices[userItem]-playerMoney} more.")
return playerMoney
def sell(playerMoney, inventory):
product=input(f"You have {[', '.join([str(i)+' (x'+str(inventory.count(i))+')' for i in set(inventory)]),'nothing'][inventory==[]]}\nWhat do you want to sell?: ")
if product in inventory:
price=float(input("How much do you want to sell it for?\n"))
if price>160:
print("That is not a reasonable price")
return playerMoney
else:
inventory.remove(product)
items.append(product)
prices.append(price)
descriptions.append(input("Describe the item: "))
return playerMoney+price
else:
if product != 'nothing':
print("You do not have",product)
else:
print("That makes sense.")
return playerMoney
def shop(playerMoney, inventory):
global items
global prices
global descriptions
while True:
displayItems(0)
option=input("\nEnter 0 to buy item\nEnter 1 to display an item\nEnter 2 to display your possesions\nEnter 3 to sell\nEnter q to quit\n")
if option=='1':
displaySingle()
elif option == '0':
playerMoney=buyItem(playerMoney, inventory)
elif option =='2':
print(flex(playerMoney, inventory))
elif option =='3':
playerMoney=sell(playerMoney, inventory)
else:
break
return playerMoney, inventory
def firstDate(interest):
global playerEgo
global playerBread
global playerBag
global relations
playerDate = input(f"Are you sure you want to ask {interest} to go out? (y/n) ")
if playerDate == "y":
printSlow("They say yes. You pick them up from their home, and are now driving around town. After some small talk the actual content of the date is brought up.")
converse(interest)
playerPlace = input("Do you go to a restaurant? (y/n) ")
if playerPlace == "y":
printSlow("You now are at Coney Island.")
playerRude = input("After a 15 minute wait, the staff brings you the wrong meal. You ordered the cheeseburger and they brought you nothing at all, as you are still waiting.\nThe waitress comes back around and asks how everything is going. Do you let her have it? (y/n) ")
converse(interest)
if playerRude == "y":
converse(interest)
printSlow(f"{interest} thinks less of you because the waitress was only 12 years old (it's a family business). {interest} then 'remembers' that they left their wallet in their car. Which is at their home because you drove them here and you have to pick up the tab.\n-15 RP\n-40 dollars")
playerEgo -= 1
playerBread -= 40
if playerBread < 0:
printSlow(f"You didn't have enough money, but luckily {interest} spotted you the rest..")
relations[interest] -= -playerBread
playerBread=0
relations[interest] -= 15
backInBlood(interest)
return
else:
converse(interest)
printSlow(f"{interest} had a fun time, and found it to be a relaxing evening. You two split the bill.\n+10 RP\n-20 dollars")
playerEgo += 8
playerBread -= 20
relations[interest] += 10
#put code here to start the school_quest
else:
converse(interest)
print(f"{interest} visibly loses some interest. They bring up an amusement park that is having a sale for $10. This may be the last chance.")#lol
userAmuse = input("Do you go to the amusement park? (y/n) ")
if userAmuse == "y":
converse(interest)
printSlow(f"{interest} had a fun time and associate you with thrills.\n+15 RP -10 dollars")
playerEgo += 10
playerBread -= 10
if playerBread < 0:
printSlow(f"You didn't have enough money, but luckily {interest} spotted you the rest..")
relations[interest] -= -playerBread
playerBread=0
relations[interest] += 15
#put code here to start the school_quest
else:
converse(interest)
printSlow(f"You drive around aimlessly for a while, but {interest} enjoys the time.\n+1 RP")
relations[interest] += 1
#put code here to start the school_quest
else:
userTime = input("Do You seek time with your interest? (y/n) ")
if userTime == "y":
printSlow(f"You find yourself near {interest} a lot.")
userTalk = input("Do you talk to them? (y/n) ")
if userTalk == "y":
converse(interest)
printSlow("You two have a lot in common.\n+7 RP")
relations[interest] += 7
#put code here to start the school_quest
else:
printSlow(f"{interest} finds it weird that you just stand near them.\n-5RP")
converse(interest)
relations[interest] -= 5
backInBlood(interest)
return
else:
printSlow(f"You don't talk to {interest} much that week.")
user_hole = input("Do you regret it? (y/n) ")
if user_hole == "y":
printSlow("-10 Ego")
playerEgo -= 10
backInBlood(interest)
return
else:
printSlow("Time with you is a privilege anyways.\n+2 Ego")
playerEgo += 2
print()
goNext(interest)
def latterDate(interest):
global playerEgo
global playerBread
global playerBag
global relations
playerDate = input(f"Where do you want to ask {interest} to go out to? (restaurant/park/nowhere) ")
if playerDate == "restaurant":
printSlow(f"You now are at {random.choice('Coney Island,<NAME>,Applebees,Chillis,McDonalds'.split(','))}.")
playerRude = input("After a 15 minute wait, the staff brings you the wrong meal. You ordered the cheeseburger and they brought you nothing at all, as you are still waiting.\nThe waitress comes back around and asks how everything is going. Do you let her have it? (y/n) ")
converse(interest)
if playerRude == "y":
converse(interest)
printSlow(f"{interest} thinks less of you because the waitress was only 12 years old (it's a family business). {interest} then 'remembers' that they left their wallet in the car. Which is at their home because you drove them here and you have to pick up the tab.\n-15 RP\n-40 dollars")
playerEgo -= 1
playerBread -= 40
if playerBread < 0:
printSlow(f"You didn't have enough money, but luckily {interest} spotted you the rest..")
relations[interest] -= -playerBread
playerBread=0
relations[interest] -= 15
backInBlood(interest)
return
else:
converse(interest)
printSlow(f"{interest} had a fun time, and found it to be a relaxing evening. You two split the bill.\n+10 RP\n-20 dollars")
playerEgo += 8
playerBread -= 20
if playerBread < 0:
printSlow(f"You didn't have enough money, but luckily {interest} spotted you the rest..")
relations[interest] -= -playerBread
playerBread=0
relations[interest] += 10
elif playerDate=='park':
converse(interest)
printSlow(f"{interest} had a fun time at {random.choice('the state fair,Cedar Point,Kalahari'.split(','))} and associates you with thrills.\n+15 RP -10 dollars")
playerEgo += 10
playerBread -= 10
if playerBread < 0:
printSlow(f"You didn't have enough money, but luckily {interest} spotted you the rest..")
relations[interest] -= -playerBread
playerBread=0
relations[interest] += 15
else:
printSlow(f"You and {interest} enjoy some time together +4 RP")
print()
def backInBlood(interest):#the action of getting the RP back in blood
global playerEgo
global playerBread
global playerBag
global relations
printSlow(f"Recent events have not left {interest} seeming interested in you.")
userBuy = input("Do you solve the problem with material things? (y/n) ")
if userBuy == 'y':
printSlow("Entering shop.")
playerBread, playerBag=shop(playerBread, playerBag)
if len(playerBag) == 1:
userGift = input(f'Do you give {interest} "{playerBag[0]}"? (y/n) ')
if userGift == 'y':
userGift = playerBag[0]
else:
userGift = input(f"What will you give {interest}? You have {[', '.join([str(i)+' (x'+str(inventory.count(i))+')' for i in set(inventory)]),'nothing'][inventory==[]]}")
if userGift in playerBag:
printSlow(f'{interest} loved the gift!\n+10 RP')
relations[interest] += 10
else:
printSlow("You can't do that. Nothing changes")
else:
userTalk = printSlow(f'Do you try to talk to {interest}? (y/n) ')
if userTalk == 'y':
printSlow('You come to somewhat of an understanding with them.\n+5 RP')
relations[interest] += 5
else:
printSlow('No change occurs in their feelings. It may be time to go next.')
def goNext(interest):
global playerEgo
global playerBread
global playerBag
global relations
printSlow(f"Recent events have not left {interest} hating you.")
userPush = input("Do you seek further ? (y/n) ")
if userPush == 'y':
if playerEgo >= 55:
printSlow(f'You are strangey confident in future conversations. {interest} respects someone who can talk about themselves for 10 minutes straight.')
relations[interest] += 5
else:
converse(interest)
printSlow(f"{interest} realizes that you are pretty cringe when considered in a vacuum.")
relations[interest] -= 3
else:
userTalk = input(f'You give {interest} some space. Do you give them more space? (y/n) ')
if userTalk == 'y':
printSlow(f'It\'s been a while... {interest} starts to forget the fun date you guys had a while back.')
relations[interest] -= 3
else:
converse(interest)
printSlow(f'{interest} remembers the fun date you guys had a while back. +3 RP')
relations[interest] += 3
def love(x):
return 1/(1+10*(math.exp(-0.055*x)))
def like(x):
return (2/math.pi)*(math.atan(x/14))
def finale(interest):
printSlow(f"Today is the day. You will finally confess to {interest}")
converse(interest)
relationship = relations[interest]
chance = love(relationship)
if chance>random.random():
printSlow("They...",100)
printSlow("feel the same way!")
converse(interest)
return True
else:
printSlow("They...",100)
printSlow("don't feel the same way..")
converse(interest)
return False
def converse(interest):
key=interest.encode()+b':'
with open('sources.txt','rb') as f:
text = f.readlines()
for i in wordList:
if i == key:
input(i)
.decode('utf-8').lower()
f.close()
printSlow(f'{interest}: '+Markov(wordList))
def appraise(num):
boundaries=[9,29,75]
boundaries.append(num)
boundaries.sort()
indice=boundaries.index(num)
statuses="doesn't like you,is not that familiar with you,is friends with you,is fond of you".split(',')
return statuses[indice]
def gameOver():
show(f"Stats -\nInterest:{interest}\nRelation Points: {relations[interest]}, Net Worth:{playerBread}, Possessions:{playerBag}", 40)
exit()
print(', '.join(possibleInterests))
interest = input("Enter in a name to select your crush, or i to view info on an interest: ")
while interest not in possibleInterests:
if interest == 'i':
try:
tempInterest=input("Type out a name to display info on them: ")
show(info[tempInterest[0].upper()+tempInterest[1:]], 58)
except KeyError:
pass
finally:
interest = input("Enter in a name to select your crush, or i to view info on an interest: ")
elif interest[0].upper()+interest[1:] not in possibleInterests:
interest = input("Select someone from the list: ")
else:
interest = interest[0].upper()+interest[1:]
show("Welcome to Haganai! (or BWTGS, or \"I don't have many friends\") Haganai is a text based dating game where you try to win over non reciprocating lovers, ranging from high schoolers to ancient Greek philosophers. Actually, those are the only two options... Anyways the goal of the game is to get one of them to like you back. You can talk to them, shop, and even give them gifts. However, gifting is not an aboslute way to someones heart >:). Fondness is quantifies by Relationship Points (rp!). The more rp, the more your inscrutable LOYL likes you!",100)#information dump
print(f'You have a crush on {interest}, and {interest} {appraise(relations[interest])}')
times=0
dates=0
while 1:
choices=f'''\nEnter s to go shopping
Enter c to talk to {interest}
Enter g to gift {interest}
Enter a to appraise your relationship'''
print(choices,end='')
if times > 3:
print(f'\nEnter f to confess to {interest}')
else:
print()
opt = input("").lower()
if opt == 's':
playerBread, playerBag = shop(playerBread, playerBag)
print(f"After shopping, {flex(playerBread, playerBag)}")
elif opt == 'c':
print(f"You talk to {interest}.")
converse(interest)
positive = like(relations[interest])>random.random()
rpChange=[-1,1][positive]*random.choice(range(1,5))
print(f"\nConversation with {interest} went {['rough','smooth'][positive]}. {['','+'][positive]}{rpChange} rp!")
relations[interest]+=rpChange
playerEgo+=rpChange
if positive:
date=input(f"Do you want to on an outing with {interest}? (y/n): ")
if date=='y' and dates==0:
dates+=1
firstDate(interest)
elif date=='y' and dates>0:
latterDate(interest)
elif opt == 'g':
print(f"You have {[', '.join([str(i)+' (x'+str(playerBag.count(i))+')' for i in set(playerBag)]),'nothing'][playerBag==[]]}")
gift = input(f"What do you want to give {interest}? (\"nothing\" to exit): ")
if gift != 'nothing':
if gift in playerBag:
playerBag.remove(gift)
rpChange=random.choice(range(10))
converse(interest)
print(f"{interest}{[' acts like they',''][rpChange>0]} appreciated the {gift}{['...','!'][rpChange>0]} +{rpChange} rp{['.','!'][rpChange>0]}")
relations[interest]+=rpChange
playerEgo-=rpChange
else:
print("You do not have",gift)
elif opt =='a':
printSlow(f'I think {interest} {appraise(relations[interest])}.')
elif opt == 'f' and times>3:
confess=input('Continue? (y/n)')
if confess=='y':
pulled=finale(interest)
if pulled:
print(f"You and {interest} live happily ever after! What a good ending.")
gameOver()
else:
printSlow(f"You fumbled {interest} bad...",100)
print('Better luck next time')
gameOver()
if playerEgo<=0:
printSlow(f"You lost all of your ego.. You have no change with {interest} anymore\nGAME OVER",100)
gameOver()
times+=1
|
BeanyZoldyck/MTAG | quests.py | <reponame>BeanyZoldyck/MTAG
import time
try:
import gift_shop#this file
except ModuleNotFoundError:
print("(Btw this code will be used where it can access my modular gift shop code)")
playerEgo = 100
playerBag = ['stick']
playerBread = 50
possibleInterests = ["Lexi", "Ali", "Chuka", "Socrates"]
relations = {"Lexi": 10,
"Ali": 15,
"Chuka": -4,
"Socrates": 10}
print('Ego:',playerEgo, '$:',playerBread, 'Interest:',relations[possibleInterests[-1]])
def printSlow(text):
for i in text:
print(i, flush=True, end='')
time.sleep(.02)
time.sleep(1)
print()
def date(interest):
global playerEgo
global playerBread
global playerBag
global relations
playerDate = input(f"Do you ask {interest} to go out? (y/n) ")
if playerDate == "y":
printSlow("They say yes. You pick them up from their home, and are now driving around town. After some small talk the actual content of the date is brought up.")
playerPlace = input("Do you go to a restaurant? (y/n) ")
if playerPlace == "y":
printSlow("You now are at Coney Island.")
playerRude = input("After a 15 minute wait, the staff brings you the wrong meal. You ordered the cheeseburger and they brought you nothing at all, as you are still waiting.\nThe waitress comes back around and asks how everything is going. Do you let her have it? (y/n) ")
if playerRude == "y":
printSlow(f"{interest} thinks less of you because the waitress was only 12 years old (it's a family business). {interest} then 'remembers' that they left their wallet in the car. Which is at their home because you drove them here and you have to pick up the tab.\n-15 RP\n-40 dollars")
playerEgo -= 1
playerBread -= 40
relations[interest] -= 15
backInBlood(interest)
return
else:
printSlow(f"{interest} had a fun time, and found it to be a relaxing evening. You two split the bill.\n+10 RP\n-20 dollars")
playerEgo += 8
playerBread -= 20
relations[interest] += 10
#put code here to start the school_quest
else:
print(f"{interest} visibly loses some interest. They bring up an amusement park that is having a sale for $15. This may be the last chance.")#lol
userAmuse = input("Do you go to the amusement park? (y/n) ")
if userAmuse == "y":
printSlow(f"{interest} had a fun time and associate you with thrills.\n+15 RP -10 dollars")
playerEgo += 10
playerBread -= 10
relations[interest] += 15
#put code here to start the school_quest
else:
printSlow(f"You drive around aimlessly for a while, but {interest} enjoys the time.\n+1 RP")
relations[interest] += 1
#put code here to start the school_quest
else:
userTime = input("Do You seek time with your interest? (y/n) ")
if userTime == "y":
printSlow(f"You find yourself near {interest} a lot.")
userTalk = input("Do you talk to them? (y/n) ")
if userTalk == "y":
printSlow("You two have a lot in common.\n+7 RP")
relations[interest] += 7
#put code here to start the school_quest
else:
printSlow(f"{interest} finds it weird that you just stand near them.\n-5RP")
relations[interest] -= 5
backInBlood(interest)
return
else:
printSlow(f"You don't talk to {interest} much that week.")
user_hole = input("Do you regret it? (y/n) ")
if user_hole == "y":
printSlow("-10 Ego")
playerEgo -= 10
backInBlood(interest)
return
else:
printSlow("Time with you is a privilege anyways.\n+2 Ego")
playerEgo += 2
print()
goNext(interest)
def backInBlood(interest):#the action of getting the RP back in blood
global playerEgo
global playerBread
global playerBag
global relations
printSlow(f"Recent events have not left {interest} seeming interested in you.")
userBuy = input("Do you solve the problem with material things? (y/n) ")
if userBuy == 'y':
#playerBread, playerBag = gift_shop(playerBread, playerBag)
#in the implementation this won't be commmented out
if len(playerBag) == 1:
userGift = input(f'Do you give {interest} "{playerBag[0]}"? (y/n) ')
if userGift == 'y':
userGift = playerBag[0]
else:
userGift = input(f"What will you give {interest}? You have {[', '.join([str(i)+' (x'+str(inventory.count(i))+')' for i in set(inventory)]),'nothing'][inventory==[]]}")
if userGift in playerBag:
printSlow(f'{interest} loved the gift!\n+10 RP')
relations[interest] += 10
else:
printSlow("You can't do that. Nothing changes")
else:
userTalk = printSlow(f'Do you try to talk to {interest}? (y/n) ')
if userTalk == 'y':
printSlow('You come to somewhat of an understanding with them.\n+5 RP')
relations[interest] += 5
else:
printSlow('No change occurs in their feelings. It may be time to go next.')
def goNext(interest):
global playerEgo
global playerBread
global playerBag
global relations
printSlow(f"Recent events have not left {interest} hating you.")
userPush = input("Do you pursue further ? (y/n) ")
if userPush == 'y':
if playerEgo >= 110:
printSlow(f'You are strangey confident in future conversations. {interest} respects someone who can talk about themselves for 10 minutes straight. (y/n)')
relations[interest] += 5
else:
printSlow(f"{interest} realizes that you are pretty cringe when considered in a vacuum.")
relations[interest] -= 3
else:
userTalk = input(f'You give {interest} some space. Do you give them more space? (y/n) ')
if userTalk == 'y':
printSlow(f'It\'s been a while... {interest} starts to forget the fun date you guys had a while back.')
relations[interest] -= 3
else:
printSlow(f'{interest} remembers the fun date you guys had a while back. +3 RP')
relations[interest] += 3
date(possibleInterests[-1])
print('Ego:',playerEgo, '$:',playerBread, 'Interest:',relations[possibleInterests[-1]])
|
BeanyZoldyck/MTAG | markov_wordgram.py | <gh_stars>0
import random
#the main algorithm that will run the game
def Markov(sample, length):
for i in enumerate(sample):
try:
ngrams[i[1]].append(sample[i[0]+1])
except KeyError:
ngrams[i[1]] = []
try:
ngrams[i[1]].append(sample[i[0]+1])
except IndexError:
continue
currentgram = random.choice(sample)
result = currentgram
for i in range(length):
poss = ngrams[currentgram]
nexT = random.choice(poss)
result += ' '+nexT
currentgram = random.choice(ngrams[result.split()[-1]])
return result
if __name__ == '__main__':
path=input("Path: ").replace('"','')
with open(path) as f:
wordList = f.read().lower().split()
f.close()
print(Markov(wordList,random.choice(range(10,50))))
|
BeanyZoldyck/MTAG | instagramScraped/instagram_scraper.py | <filename>instagramScraped/instagram_scraper.py
from time import sleep as s
import keyboard
import clipboard
from guide import file
import pyautogui as pag
peachsocne = file('lex')
textPath = 'lex.txt'
def area(t1, t2):
return (t1[0],t1[1],t2[0]-t1[0],t2[1]-t1[1])
def capture():
click=True
post = pag.screenshot(region=area((1060,151),(1611,888)))
#if pag.locate(file('at'),post,confidence=.8) or pag.locate(file('hashtag'),post,confidence=.8):
# click=False
if click:
with open(textPath,'ab') as f:
for name in pag.locateAll(peachsocne,post,confidence=.9):
pag.moveTo(name[0]+1060+90+7,name[1]+151+9)
for _ in range(3):
pag.click()
s(.15)
s(.35)
keyboard.press_and_release('ctrl+c')
s(.1)
data=clipboard.paste()
f.write(data.encode())
pag.move(0,-100)
f.close()
keyboard.press_and_release('right')
s(2)
s(.3)
else:
keyboard.press_and_release('right')
while 1:
capture()
|
BeanyZoldyck/MTAG | gift_shop.py | #This is a freestlye by <NAME>
def displayItems(offSet):
print('\n')
for i in enumerate(items[offSet:]):
print(i[0]+offSet+1,i[1])
def displaySingle():
userItem=int(input("Enter a number to select an item: "))-1
print(f'{descriptions[userItem]}\n{items[userItem]} costs ${prices[userItem]}.')
def flex(playerMoney, inventory):
print(f"Your net worth is ${playerMoney} and you have {[', '.join([str(i)+' (x'+str(inventory.count(i))+')' for i in set(inventory)]),'nothing'][inventory==[]]}")
def buyItem(playerMoney, inventory):
userItem=int(input("Enter a number to select an item to buy: "))-1
if playerMoney >= prices[userItem]:
inventory.append(items[userItem])
print(f"You bought {items[userItem]} for {prices[userItem]}. Your new balance is ${playerMoney-prices[userItem]}")
return playerMoney-prices[userItem]
else:
print(f"You dont have enough money to buy {items[userItem]}!\nYou would need ${prices[userItem]-playerMoney} more.")
return playerMoney
def sell(playerMoney, inventory):
product=input(f"You have {[', '.join([str(i)+' (x'+str(inventory.count(i))+')' for i in set(inventory)]),'nothing'][inventory==[]]}\nWhat do you want to sell?: ")
if product in inventory:
price=float(input("How much do you want to sell it for?\n"))
if price>160:
print("That is not a reasonable price")
return playerMoney
else:
inventory.remove(product)
items.append(product)
prices.append(price)
descriptions.append(input("Describe the item: "))
return playerMoney+price
else:
print("You do not have",product)
return playerMoney
if __name__ == '__main__':
playerMoney=40.0
items='''Head of Cabbage
Acer Chromebook 311 CB311-10H-41M9, Military Standard (MIL-STD 810G) impact-resistant body; AMD A-Series Dual-Core A4-9120C, 11.6" HD, 4GB DDR4, 64GB eMMC, 802.11ac WiFi 5, Bluetooth 4.2, Chrome OS
MSI GF65 Thin i7 GTX 1660Ti 8GB/512GB Gaming Laptop
Nintendo Switch™ Fortnite Wildcat Bundle
Bose Noise Cancelling Wireless Bluetooth Headphones 700, Black'''.split('\n')
prices=[2.0,
179.0,
1_091.65,
497.0,
379.0]
descriptions='''A literal head of cabbage.
The Acer Chromebook 311 is the ideal laptop for all ages from the very young upwards. With its safety certification, state-of-the-art low-energy consuming AMD processor, military standard specs and a long battery life, it can stand up to the daily rigors and intense usage of students inside or outside the classroom.
Play your favorite games in style and with ease with the MSI GF65 Thin i7 GTX 1660Ti 8GB/512GB Gaming Laptop. With dedicated thermal solutions for both the CPU and GPU with up to 6 heat pipes, they work harmoniously by minimizing the heat and maximizing the airflow.
This bundle includes a uniquely designed Nintendo Switch system with special art on the system and Nintendo Switch dock, a yellow Joy-Con (L) and blue Joy-Con (R), the Fortnite game pre-installed, 2,000 V-Bucks, and a download code for The Wildcat Bundle.
The unrivaled microphone system in the noise cancelling Bose Headphones 700 adapts to noisy and windy environments so your voice always sounds clear. The design of Bose Headphones 700 has a stainless steel headband and a comfortable fit that’s perfect for all-day listening'''.split('\n')
inventory=["earbuds"]
while True:
displayItems(0)
option=input("\nEnter 0 to buy item\nEnter 1 to display an item\nEnter 2 to display your possesions\nEnter 3 to sell\nEnter q to quit\n")
if option=='1':
displaySingle()
elif option == '0':
playerMoney=buyItem(playerMoney, inventory)
elif option =='2':
flex(playerMoney, inventory)
elif option =='3':
playerMoney=sell(playerMoney, inventory)
else:
break
else:
pass#giftShop variable definition
|
BeanyZoldyck/MTAG | discordScraped/discord_scraper.py | <filename>discordScraped/discord_scraper.py
import mouse as m
from time import sleep as s
import keyboard
import clipboard
textPath = 'data.txt'
beanPath = 'BeanZ.txt'
def capture():
"""
scrape BeanZ's messages then go next
"""
topPageLeftCorner = (882,179)#(1329,266)
dragAt = (1150, 719)
nextButton = (1209, 648)
m.move(topPageLeftCorner[0], topPageLeftCorner[1])
s(1)
m.press()
s(.5)
m.move(dragAt[0],dragAt[1],duration=1.2)
s(2.5)
m.move(nextButton[0],nextButton[1],duration=.5)
m.release()
s(.5)
keyboard.press_and_release('ctrl+c')
s(.01)
data=clipboard.paste()
m.click()
s(3)
with open(textPath,'wb') as f:
f.write(data.encode())
f.close()
def clean():
user = b'WINGU INTJ Sigma Lifter yb stan'
tempList =[]
index = 0
endIndex=0
wrongIndices=[]
with open(textPath,'rb') as f:
textList = f.readlines()
for line in textList:
if user in line or b'GIF' in line or b'Image' in line:
continue
elif b'//twitter.com/' in line:
endIndex = index+10
elif b'parsec.gg/' in line:
endIndex = index+4
pass
if index > endIndex:
tempList.append(line)
index+=1
f.close()
tempList=list(map(lambda x: x.replace(b'\r\n',b'')+b'\n',tempList))
with open(beanPath,'ab') as q:
q.writelines(tempList)
q.close()
for i in range(26):
capture()
clean()
|
yeladlouni/AnswerSelection | tests/experiment/qa/model/test_pooling_helper.py | import unittest
import numpy as np
import numpy.testing as npt
import tensorflow as tf
from experiment.qa.model.helper.pooling_helper import non_zero_tokens, attention_softmax, soft_alignment, \
attentive_pooling_weights, weighted_pooling
class TestPoolingHelper(unittest.TestCase):
def setUp(self):
self.sess = tf.InteractiveSession()
def tearDown(self):
self.sess.close()
def test_non_zero_tokens(self):
tokens = tf.constant([
[24., 22., 11234., 0., 0.],
[31., 0., 0., 0., 0.]
])
result = self.sess.run(non_zero_tokens(tokens))
reference_value = np.array([
[1., 1., 1., 0., 0.],
[1., 0., 0., 0., 0.]
])
npt.assert_array_equal(result, reference_value)
def test_attention_softmax(self):
vector_in = tf.constant([
[1., 2., 1., 2.0],
[.3, .2, .9, .3]
])
padding = tf.constant([
[1., 1., 1., 0.],
[1., 1., 0., 0.]
])
result = self.sess.run(attention_softmax(vector_in, padding))
reference_value = np.array([
[0.21194156, 0.57611692, 0.21194156, 0.],
[0.52497919, 0.47502081, 0., 0.]
])
npt.assert_array_almost_equal(result, reference_value)
def test_soft_alignment(self):
"""Tests the soft alignment function and its capability to handle minibatches with zero-padding"""
U_AP = tf.constant(
[
[1., 1.],
[1., 1.]
]
)
raw_question_rep = tf.constant(
[[
[.2, .7],
[.4, .8],
[.1, .9],
[.7, .8]
]] * 2
)
raw_answer_rep = tf.constant(
[[
[.3, .9],
[.5, .9],
[.7, .6],
[.9, .7]
]] * 2
)
tokens_question_non_zero = tf.constant(
[
[1., 1., 0., 0.]
] * 2
)
tokens_answer_non_zero = tf.constant(
[
[1., 1., 1., 0.]
] * 2
)
result = self.sess.run(soft_alignment(
U_AP, raw_question_rep, raw_answer_rep, tokens_question_non_zero, tokens_answer_non_zero
))
# QU = [[0.9, 0.9], [1.2, 1.2]]
# QU(A^T) = [[1.08, 1.26, 1.17], [1.44, 1.68, 1.56]]
# tanh(...) = [[0.7931991, 0.85106411, 0.82427217], [0.89369773, 0.93286155, 0.91542046]]
# Due to padding, the resulting tensor will have a different shape. We verify that the relevant part of the
# result has the correct values, and the rest holds values less than -1
reference_value = np.array(
[[
[0.7931991, 0.85106411, 0.82427217],
[0.89369773, 0.93286155, 0.91542046]
]] * 2
)
npt.assert_array_almost_equal(result[:, 0:2, 0:3], reference_value)
npt.assert_array_less(result, np.array(
[[
[1.01, 1.01, 1.01, -1.],
[1.01, 1.01, 1.01, -1.],
[-1., -1., -1., -1.],
[-1., -1., -1., -1.]
]] * 2
))
def test_attentive_pooling(self):
"""Test the full functionality with the same values as before"""
U_AP = tf.constant(
[
[1., 1.],
[1., 1.]
]
)
raw_question_rep = tf.constant(
[[
[.2, .7],
[.4, .8],
[.1, .9],
[.7, .8]
]] * 2
)
raw_answer_rep = tf.constant(
[[
[.3, .9],
[.5, .9],
[.7, .6],
[.9, .7]
]] * 2
)
tokens_question = tf.constant(
[
[123, 6, 0., 0.]
] * 2
)
tokens_answer = tf.constant(
[
[33, 1, 12, 0.]
] * 2
)
ap_weights_q, ap_weights_a = attentive_pooling_weights(
U_AP, raw_question_rep, raw_answer_rep, tokens_question, tokens_answer
)
result_repr_q = self.sess.run(weighted_pooling(raw_question_rep, ap_weights_q, tokens_question))
result_repr_a = self.sess.run(weighted_pooling(raw_answer_rep, ap_weights_a, tokens_answer))
# tanh(...) = [[0.7931991, 0.85106411, 0.82427217], [0.89369773, 0.93286155, 0.91542046]]
# max over rows = [[0.85106411, 0.93286155]]
# max over colums = [[0.89369773, 0.93286155, 0.91542046]]
# attention question = [ 0.47956203, 0.52043797]
# attention answer = [ 0.32659447, 0.33963892, 0.33376661]
# question-rep = [0.304088, 0.752044]
# answer-rep = [0.501434, 0.79987]
reference_value_repr_q = np.array(
[
[0.304088, 0.752044]
] * 2
)
reference_value_repr_a = np.array(
[
[0.501434, 0.79987]
] * 2
)
npt.assert_array_almost_equal(result_repr_q, reference_value_repr_q)
npt.assert_array_almost_equal(result_repr_a, reference_value_repr_a)
|
yeladlouni/AnswerSelection | experiment/qa/model/helper/lw.py | <reponame>yeladlouni/AnswerSelection
import tensorflow as tf
from tensorflow.python.ops import rnn_cell
from experiment import ComponentBase
from experiment.qa.model import weight_variable
from experiment.qa.model.helper.pooling_helper import non_zero_tokens, attention_softmax
class LW(ComponentBase):
def __init__(self, config, config_global, logger):
super(LW, self).__init__(config, config_global, logger)
self.__lstm_history = set()
@property
def lw_cell_size(self):
return self.config.get('lw_cell_size', 50)
def positional_weighting(self, raw_representation, indices, item_type, apply_softmax=True):
re_use = item_type in self.__lstm_history
self.__lstm_history.add(item_type)
if item_type == 'question':
lstm_cell_fw = self.lstm_cell_weighting_Q_fw
lstm_cell_bw = self.lstm_cell_weighting_Q_bw
dense_weight = self.dense_weighting_Q
else:
lstm_cell_fw = self.lstm_cell_weighting_A_fw
lstm_cell_bw = self.lstm_cell_weighting_A_bw
dense_weight = self.dense_weighting_A
tensor_non_zero_token = non_zero_tokens(tf.to_float(indices))
sequence_length = tf.to_int64(tf.reduce_sum(tensor_non_zero_token, 1))
with tf.variable_scope('lstm_{}'.format(item_type), reuse=re_use):
lstm_outputs, _last = tf.nn.bidirectional_dynamic_rnn(
lstm_cell_fw,
lstm_cell_bw,
raw_representation,
dtype=tf.float32,
sequence_length=sequence_length
)
lstm_output = tf.concat(2, lstm_outputs)
# apply dense over each individual lstm output
flat_lstm_output = tf.reshape(lstm_output, [-1, self.lw_cell_size + self.lw_cell_size])
dense_mul_flat = tf.matmul(flat_lstm_output, dense_weight)
h1_layer = tf.reshape(dense_mul_flat, [-1, tf.shape(raw_representation)[1]])
if apply_softmax:
return attention_softmax(h1_layer, tensor_non_zero_token)
else:
return h1_layer
def initialize_weights(self):
cell_size = self.lw_cell_size
self.dense_weighting_Q = weight_variable('dense_weighting_Q', [cell_size + cell_size, 1])
self.dense_weighting_A = weight_variable('dense_weighting_A', [cell_size + cell_size, 1])
with tf.variable_scope('lstm_cell_weighting_Q_fw'):
self.lstm_cell_weighting_Q_fw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
with tf.variable_scope('lstm_cell_weighting_Q_bw'):
self.lstm_cell_weighting_Q_bw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
with tf.variable_scope('lstm_cell_weighting_A_fw'):
self.lstm_cell_weighting_A_fw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
with tf.variable_scope('lstm_cell_weighting_A_bw'):
self.lstm_cell_weighting_A_bw = rnn_cell.BasicLSTMCell(cell_size, state_is_tuple=True)
|
yeladlouni/AnswerSelection | experiment/qa/model/cnn_lstm.py | import tensorflow as tf
from experiment.qa.model.cnn import CNNModel
from experiment.qa.model.helper.pooling_helper import maxpool
from experiment.qa.model.lstm import BiLSTMModel
class LSTMCNNModel(CNNModel, BiLSTMModel):
def __init__(self, config, config_global, logger):
super(LSTMCNNModel, self).__init__(config, config_global, logger)
def build(self, data, sess):
self.build_input(data, sess)
self.initialize_weights()
raw_representation_question = self.cnn_representation_raw(self.embeddings_question, self.question_length)
raw_representation_answer_good = self.cnn_representation_raw(self.embeddings_answer_good, self.answer_length)
raw_representation_answer_bad = self.cnn_representation_raw(self.embeddings_answer_bad, self.answer_length)
lstm_representation_question = self.bilstm_representation_raw(
tf.nn.tanh(raw_representation_question),
self.input_question,
False
)
lstm_representation_answer_good = self.bilstm_representation_raw(
tf.nn.tanh(raw_representation_answer_good),
self.input_answer_good,
True
)
lstm_representation_answer_bad = self.bilstm_representation_raw(
tf.nn.tanh(raw_representation_answer_bad),
self.input_answer_bad,
True
)
pooled_representation_question = maxpool(lstm_representation_question)
pooled_representation_answer_good = maxpool(lstm_representation_answer_good)
pooled_representation_answer_bad = maxpool(lstm_representation_answer_bad)
self.create_outputs(
pooled_representation_question,
pooled_representation_answer_good,
pooled_representation_question,
pooled_representation_answer_bad
)
def initialize_weights(self):
"""Global initialization of weights for the representation layer
"""
CNNModel.initialize_weights(self)
BiLSTMModel.initialize_weights(self)
component = LSTMCNNModel
|
yeladlouni/AnswerSelection | experiment/qa/evaluation/evaluation.py | <reponame>yeladlouni/AnswerSelection<gh_stars>1-10
from __future__ import division
import math
import numpy as np
import experiment
class QAEvaluation(experiment.Evaluation):
def __init__(self, config, config_global, logger):
super(QAEvaluation, self).__init__(config, config_global, logger)
self.batchsize_test = self.config.get('batchsize_test', 50)
def start(self, model, data, sess):
evaluation_data = data.archive.test
if self.config.get('include_valid', False):
evaluation_data = [data.archive.valid] + evaluation_data
for split in evaluation_data:
self.logger.info("Evaluating {}".format(split.split_name))
ranks = []
average_precisions = []
length_question = self.config_global['question_length']
length_answer = self.config_global['answer_length']
for pool in split.qa:
test_questions = np.array(
[data.get_item_vector(pool.question, length_question)] * len(pool.pooled_answers)
)
test_answers = np.array(
[data.get_item_vector(a, length_answer) for a in pool.pooled_answers]
)
scores = []
for test_batch in range(int(math.ceil(len(test_answers) / float(self.batchsize_test)))):
test_batch_indices = self.batchsize_test * test_batch, self.batchsize_test * (test_batch + 1)
test_batch_scores, = sess.run([model.predict], feed_dict={
model.input_question: test_questions[test_batch_indices[0]:test_batch_indices[1]],
model.input_answer_good: test_answers[test_batch_indices[0]:test_batch_indices[1]],
model.dropout_keep_prob: 1.0,
})
scores += test_batch_scores.tolist()
sorted_answers = sorted(zip(scores, pool.pooled_answers), key=lambda x: -x[0])
rank = 0
precisions = []
for i, (score, answer) in enumerate(sorted_answers, start=1):
if answer in pool.ground_truth:
if rank == 0:
rank = i
precisions.append((len(precisions) + 1) / float(i))
ranks.append(rank)
average_precision = np.mean(precisions)
average_precisions.append(average_precision)
self.logger.debug('Rank: {}, AP: {}'.format(rank, average_precision))
correct_answers = len([a for a in ranks if a == 1])
accuracy = correct_answers / float(len(ranks))
mrr = np.mean([1 / float(r) for r in ranks])
map = np.mean(average_precisions)
self.logger.info('Correct answers: {}/{}'.format(correct_answers, len(split.qa)))
self.logger.info('Accuracy: {}'.format(accuracy))
self.logger.info('MRR: {}'.format(mrr))
self.logger.info('MAP: {}'.format(map))
component = QAEvaluation
|
yeladlouni/AnswerSelection | experiment/qa/model/lw_cnn.py | from experiment.qa.model.cnn import CNNModel
from experiment.qa.model.helper.lw import LW
from experiment.qa.model.helper.pooling_helper import weighted_pooling
class LWCNNModel(CNNModel, LW):
def __init__(self, config, config_global, logger):
super(LWCNNModel, self).__init__(config, config_global, logger)
self.shared_lw = self.config.get('shared_lw', False)
def build(self, data, sess):
self.build_input(data, sess)
self.initialize_weights()
raw_representation_question = self.cnn_representation_raw(self.embeddings_question, self.question_length)
raw_representation_answer_good = self.cnn_representation_raw(self.embeddings_answer_good, self.answer_length)
raw_representation_answer_bad = self.cnn_representation_raw(self.embeddings_answer_bad, self.answer_length)
self.question_pooling_weight = self.positional_weighting(
raw_representation_question,
self.input_question,
item_type='question' if not self.shared_lw else 'shared'
)
self.answer_good_pooling_weight = self.positional_weighting(
raw_representation_answer_good,
self.input_answer_good,
item_type='answer' if not self.shared_lw else 'shared'
)
self.answer_bad_pooling_weight = self.positional_weighting(
raw_representation_answer_bad,
self.input_answer_bad,
item_type='answer' if not self.shared_lw else 'shared'
)
pooled_representation_question = weighted_pooling(
raw_representation_question, self.question_pooling_weight, self.input_question
)
pooled_representation_answer_good = weighted_pooling(
raw_representation_answer_good, self.answer_good_pooling_weight, self.input_answer_good
)
pooled_representation_answer_bad = weighted_pooling(
raw_representation_answer_bad, self.answer_bad_pooling_weight, self.input_answer_bad
)
self.create_outputs(
pooled_representation_question,
pooled_representation_answer_good,
pooled_representation_question,
pooled_representation_answer_bad
)
def initialize_weights(self):
"""Global initialization of weights for the representation layer
"""
CNNModel.initialize_weights(self)
LW.initialize_weights(self)
component = LWCNNModel
|
yeladlouni/AnswerSelection | experiment/qa/data/semeval/semeval.py | import os
from collections import OrderedDict
import xml.etree.ElementTree as ET
from experiment.qa.data import QAData
from experiment.qa.data.reader import ArchiveReader
from experiment.qa.data.models import Token, Sentence, TextItem, QAPool, Data, Archive
def _get_text_item(text, id):
question_tokens = [Token(t) for t in text.split()]
question_sentence = Sentence(' '.join([t.text for t in question_tokens]), question_tokens)
ti = TextItem(question_sentence.text, [question_sentence])
ti.metadata['id'] = id
return ti
class SemevalCQAReader(ArchiveReader):
def read_split(self, name):
semeval_path = os.path.join(self.archive_path, '{}.xml'.format(name))
root = ET.parse(semeval_path).getroot()
datapoints = []
split_answers = []
for q in root.findall('Question'):
qid = q.get('QID')
question = q.findall('Qtext')[0].text
question_item = _get_text_item(question, 'question-{}-{}'.format(name, qid))
ground_truth = []
candidate_answers = []
for p in q.findall('QApair'):
qaid = p.get('QAID')
qaquestion = p.findall('QAquestion')[0].text
qaanswer = p.findall('QAanswer')[0].text
qarel = p.get('QArel')
answer_item = _get_text_item(qaquestion, 'answer-{}-{}'.format(name, qaid))
if qarel == 'R' or qarel == 'D':
ground_truth.append(answer_item)
candidate_answers.append(answer_item)
split_answers += candidate_answers
if len(ground_truth) > 0:
datapoints.append(QAPool(question_item, candidate_answers, ground_truth))
return Data(name, datapoints, split_answers)
def read(self):
train = self.read_split("train")
valid = self.read_split("dev")
test = self.read_split("test")
questions = [qa.question for qa in (train.qa + valid.qa + test.qa)]
answers = train.answers + valid.answers + test.answers
return Archive(train, valid, [test], questions, answers)
class SemevalCQAData(QAData):
def _get_reader(self):
return SemevalCQAReader(self.config['semeval'], self.lowercased, self.logger)
component = SemevalCQAData
|
yeladlouni/AnswerSelection | experiment/qa/model/ap_lstm.py | from experiment.qa.model import weight_variable
from experiment.qa.model.helper.pooling_helper import attentive_pooling_weights, weighted_pooling
from experiment.qa.model.lstm import BiLSTMModel
class AttentivePoolingLSTMModel(BiLSTMModel):
def build(self, data, sess):
self.build_input(data, sess)
self.initialize_weights()
raw_representation_question = self.bilstm_representation_raw(
self.embeddings_question,
self.input_question,
re_use_lstm=False
)
raw_representation_answer_good = self.bilstm_representation_raw(
self.embeddings_answer_good,
self.input_answer_good,
re_use_lstm=True
)
raw_representation_answer_bad = self.bilstm_representation_raw(
self.embeddings_answer_bad,
self.input_answer_bad,
re_use_lstm=True
)
self.question_good_pooling_weight, self.answer_good_pooling_weight = attentive_pooling_weights(
self.U_AP,
raw_representation_question,
raw_representation_answer_good,
self.input_question,
self.input_answer_good
)
self.question_bad_pooling_weight, self.answer_bad_pooling_weight = attentive_pooling_weights(
self.U_AP,
raw_representation_question,
raw_representation_answer_bad,
self.input_question,
self.input_answer_bad
)
self.question_pooling_weight = self.question_good_pooling_weight
pooled_representation_question_good = weighted_pooling(
raw_representation_question, self.question_good_pooling_weight, self.input_question
)
pooled_representation_answer_good = weighted_pooling(
raw_representation_answer_good, self.answer_good_pooling_weight, self.input_answer_good
)
pooled_representation_question_bad = weighted_pooling(
raw_representation_question, self.question_bad_pooling_weight, self.input_question
)
pooled_representation_answer_bad = weighted_pooling(
raw_representation_answer_bad, self.answer_bad_pooling_weight, self.input_answer_bad
)
self.create_outputs(
pooled_representation_question_good,
pooled_representation_answer_good,
pooled_representation_question_bad,
pooled_representation_answer_bad
)
def initialize_weights(self):
"""Global initialization of weights for the representation layer
"""
super(AttentivePoolingLSTMModel, self).initialize_weights()
self.U_AP = weight_variable('U_AP', [self.lstm_cell_size * 2, self.lstm_cell_size * 2])
component = AttentivePoolingLSTMModel
|
yeladlouni/AnswerSelection | experiment/qa/model/lstm_lstm.py | <filename>experiment/qa/model/lstm_lstm.py<gh_stars>1-10
from experiment.qa.model.helper.pooling_helper import maxpool
from experiment.qa.model.lstm import BiLSTMModel
class LSTMLSTMModel(BiLSTMModel):
def __init__(self, config, config_global, logger):
super(LSTMLSTMModel, self).__init__(config, config_global, logger)
def build(self, data, sess):
self.build_input(data, sess)
self.initialize_weights()
lstm_representation_question_1 = self.bilstm_representation_raw(
self.embeddings_question,
self.input_question,
re_use_lstm=False,
name='lstm_1'
)
lstm_representation_answer_good_1 = self.bilstm_representation_raw(
self.embeddings_answer_good,
self.input_answer_good,
re_use_lstm=True,
name='lstm_1'
)
lstm_representation_answer_bad_1 = self.bilstm_representation_raw(
self.embeddings_answer_bad,
self.input_answer_bad,
re_use_lstm=True,
name='lstm_1'
)
lstm_representation_question_2 = self.bilstm_representation_raw(
lstm_representation_question_1,
self.input_question,
re_use_lstm=False,
name='lstm_2'
)
lstm_representation_answer_good_2 = self.bilstm_representation_raw(
lstm_representation_answer_good_1,
self.input_answer_good,
re_use_lstm=True,
name='lstm_2'
)
lstm_representation_answer_bad_2 = self.bilstm_representation_raw(
lstm_representation_answer_bad_1,
self.input_answer_bad,
re_use_lstm=True,
name='lstm_2'
)
pooled_representation_question = maxpool(lstm_representation_question_2)
pooled_representation_answer_good = maxpool(lstm_representation_answer_good_2)
pooled_representation_answer_bad = maxpool(lstm_representation_answer_bad_2)
self.create_outputs(
pooled_representation_question,
pooled_representation_answer_good,
pooled_representation_question,
pooled_representation_answer_bad
)
component = LSTMLSTMModel
|
yeladlouni/AnswerSelection | experiment/qa/data/wikiqa/wikiqa.py | import os
from collections import OrderedDict
from experiment.qa.data import QAData
from experiment.qa.data.reader import TSVArchiveReader
from experiment.qa.data.models import Token, Sentence, TextItem, QAPool, Data, Archive
def _get_text_item(text, id):
question_tokens = [Token(t) for t in text.split()]
question_sentence = Sentence(' '.join([t.text for t in question_tokens]), question_tokens)
ti = TextItem(question_sentence.text, [question_sentence])
ti.metadata['id'] = id
return ti
class WikiQAReader(TSVArchiveReader):
def read_split(self, name):
wikiqa_path = os.path.join(self.archive_path, 'WikiQA-{}.txt'.format(name))
data = self.read_tsv(wikiqa_path)
questions_answers = OrderedDict()
for i, line in enumerate(data):
question_line = line[0]
answer_line = line[1]
label_line = line[2]
if question_line not in questions_answers:
questions_answers[question_line] = []
questions_answers[question_line].append((answer_line, label_line))
datapoints = []
split_answers = []
for i, (question, answers) in enumerate(questions_answers.items()):
question_item = _get_text_item(question, 'question-{}-{}'.format(name, i))
ground_truth = []
candidate_answers = []
for j, (answer, label) in enumerate(answers):
answer_item = _get_text_item(answer, 'answer-{}-{}'.format(name, j))
if label == '1':
ground_truth.append(answer_item)
candidate_answers.append(answer_item)
split_answers += candidate_answers
if len(ground_truth) > 0:
datapoints.append(QAPool(question_item, candidate_answers, ground_truth))
return Data(name, datapoints, split_answers)
def read(self):
train = self.read_split("train")
valid = self.read_split("dev")
test = self.read_split("test")
questions = [qa.question for qa in (train.qa + valid.qa + test.qa)]
answers = train.answers + valid.answers + test.answers
return Archive(train, valid, [test], questions, answers)
class WikiQAData(QAData):
def _get_reader(self):
return WikiQAReader(self.config['wikiqa'], self.lowercased, self.logger)
component = WikiQAData
|
yeladlouni/AnswerSelection | experiment/qa/train/training.py | <filename>experiment/qa/train/training.py
import math
import numpy as np
from experiment.qa.train import QABatchedTraining
class InsuranceQATrainingSimple(QABatchedTraining):
"""This is a simple training method that runs over the training data in a linear fashion, just like in keras."""
def __init__(self, config, config_global, logger):
super(InsuranceQATrainingSimple, self).__init__(config, config_global, logger)
self._train_questions, self._train_answers_good, self._train_answers_bad = [], [], []
self.batch_i = 0
self.epoch_random_indices = []
def prepare_next_epoch(self, model, data, sess, epoch):
"""Prepares the next epoch, especially the batches"""
super(InsuranceQATrainingSimple, self).prepare_next_epoch(model, data, sess, epoch)
self.batch_i = 0
# training examples are prepared in the first epoch
if len(self._train_questions) == 0:
self.logger.debug('Preparing training examples')
self._train_questions, self._train_answers_good, self._train_answers_bad = data.get_items(
data.archive.train.qa,
self.negative_answers
)
# shuffle the indices of each batch
self.epoch_random_indices = np.random.permutation(len(self._train_questions))
def get_n_batches(self):
return math.ceil(len(self._train_questions) / self.batchsize)
def get_next_batch(self, model, data, sess):
"""Return the training data for the next batch
:return: questions, good answers, bad answers
:rtype: list, list, list
"""
indices = self.epoch_random_indices[self.batch_i * self.batchsize: (self.batch_i + 1) * self.batchsize]
batch_questions = [self._train_questions[i] for i in indices]
batch_answers_good = [self._train_answers_good[i] for i in indices]
batch_answers_bad = [self._train_answers_bad[i] for i in indices]
self.batch_i += 1
return batch_questions, batch_answers_good, batch_answers_bad
component = InsuranceQATrainingSimple
|
yeladlouni/AnswerSelection | scripts/run_hyperparameter_optimization.py | import importlib
import itertools
import json
import logging
import sys
import click
import numpy as np
import tensorflow as tf
from experiment.util import replace_dict_values
from experiment.config import load_config
# Allows the gpu to be used in parallel
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
@click.command()
@click.argument('config_file')
def run(config_file):
"""This program allows to perform hyperparameter optimization with grid search and cross validation.
"""
config = load_config(config_file)
# to reproduce the results
np.random.seed(1)
tf.set_random_seed(1)
# We are now fetching all relevant modules. It is strictly required that these module contain a variable named
# 'component' that points to a class which inherits from experiment.Data, experiment.Experiment, experiment.Trainer
# or experiment.Evaluator
data_module = config['data-module']
model_module = config['model-module']
training_module = config['training-module']
# The modules are now dynamically loaded
DataClass = importlib.import_module(data_module).component
ModelClass = importlib.import_module(model_module).component
TrainingClass = importlib.import_module(training_module).component
# setup a logger
logger = logging.getLogger('experiment')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler_stdout = logging.StreamHandler(sys.stdout)
handler_stdout.setLevel(config['logger']['level'])
handler_stdout.setFormatter(formatter)
logger.addHandler(handler_stdout)
if 'path' in config['logger']:
handler_file = logging.FileHandler(config['logger']['path'])
handler_file.setLevel(config['logger']['level'])
handler_file.setFormatter(formatter)
logger.addHandler(handler_file)
logger.setLevel(config['logger']['level'])
# We then wire together all the modules
config_global = config['global']
config_optimization = config['optimization']
logger.info('Setting up the data')
data_complete = DataClass(config['data'], config_global, logger)
data_complete.setup()
# Cross fold optimization
# We first need to create all the configuration choices for grid search
optimization_parameters = config_optimization['parameters']
grid = list(itertools.product(*config_optimization['parameters'].values()))
logger.info('We have {} different hyperparameter combinations'.format(len(grid)))
# We now go over all choices and perform cross-fold validation
avg_scores = []
for configuration_values_overwrite in grid:
parameter_choices = list(zip(optimization_parameters.keys(), configuration_values_overwrite))
config_run = replace_dict_values(config, parameter_choices)
logger.info('-' * 40)
logger.info('Checking configuration {}'.format(json.dumps(parameter_choices)))
# Run each fold
n_folds = config_optimization['folds']
scores = []
for fold_i in range(n_folds):
logger.info('Starting fold {}/{}'.format(fold_i + 1, n_folds))
with tf.Session(config=sess_config) as sess:
training = TrainingClass(config_run['training'], config_global, logger)
model = ModelClass(config_run['model'], config_global, logger)
model.build(data_complete, sess)
data_fold = data_complete.get_fold_data(fold_i, n_folds)
best_epoch, best_score = training.start(model, data_fold, sess)
scores.append(best_score)
logger.info('Fold {}/{} performance: {}'.format(fold_i + 1, n_folds, best_score))
logger.info('-' * 20)
training.remove_checkpoints()
tf.reset_default_graph()
logger.info('Training ended')
logger.info('All scores: {}'.format(json.dumps(scores)))
avg_score = np.mean(scores)
logger.info('Avg score for current configuration: {}'.format(avg_score))
avg_scores.append(avg_score)
best_configuration = grid[np.argmax(avg_scores)]
logger.info('Grid search completed')
logger.info('Best configuration: {} (score={})'.format(best_configuration, max(avg_scores)))
logger.info('-')
logger.info('All configurations: {}'.format(json.dumps(grid)))
logger.info('All scores: {}'.format(avg_scores))
logger.info('DONE')
if __name__ == '__main__':
run()
|
yeladlouni/AnswerSelection | experiment/qa/model/cnn.py | <filename>experiment/qa/model/cnn.py<gh_stars>1-10
import tensorflow as tf
from experiment.qa.model import QAModel, bias_variable, weight_variable
from experiment.qa.model.helper.pooling_helper import maxpool_tanh
class CNNModel(QAModel):
def __init__(self, config, config_global, logger):
super(CNNModel, self).__init__(config, config_global, logger)
self.n_filters = self.config['filters']
self.window_size = self.config['filter_size']
def build(self, data, sess):
self.build_input(data, sess)
# we initialize the weights of the representation layers globally so that they can be applied to both, questions
# and (good/bad)answers. This is an important part, otherwise results would be much worse.
self.initialize_weights()
representation_question = maxpool_tanh(
self.cnn_representation_raw(
self.embeddings_question,
self.question_length
),
self.input_question
)
representation_answer_good = maxpool_tanh(
self.cnn_representation_raw(
self.embeddings_answer_good,
self.answer_length
),
self.input_answer_good
)
representation_answer_bad = maxpool_tanh(
self.cnn_representation_raw(
self.embeddings_answer_bad,
self.answer_length
),
self.input_answer_bad
)
self.create_outputs(
representation_question,
representation_answer_good,
representation_question,
representation_answer_bad
)
def initialize_weights(self):
"""Global initialization of weights for the representation layer
"""
self.W_conv1 = weight_variable('W_conv', [self.window_size, self.embedding_size, 1, self.n_filters])
self.b_conv1 = bias_variable('b_conv', [self.n_filters])
def cnn_representation_raw(self, item, sequence_length):
"""Creates a representation graph which retrieves a text item (represented by its word embeddings) and returns
a vector-representation
:param item: the text item. Can be question or (good/bad) answer
:param sequence_length: maximum length of the text item
:return: representation tensor
"""
# we need to add another dimension, because cnn works on 3d data only
cnn_input = tf.expand_dims(item, -1)
convoluted = tf.nn.bias_add(
tf.nn.conv2d(
cnn_input,
self.W_conv1,
strides=[1, 1, self.embedding_size, 1],
padding='SAME'
),
self.b_conv1
)
return tf.reshape(convoluted, [-1, sequence_length, self.n_filters])
component = CNNModel
|
yeladlouni/AnswerSelection | experiment/qa/train/training_dynamic.py | <filename>experiment/qa/train/training_dynamic.py
# coding=utf-8
import math
import numpy as np
from experiment.qa.train import QABatchedTraining
class InsuranceQATrainingDynamic(QABatchedTraining):
"""This is a training method that tries to replicate training method used in recent work on QA (esp. insuranceqa):
For each question, we randomly sample a list of N (e.g. 50) negative answers and choose the negative answer which
has the highest similarity to the question. This is then used to compute the training loss and update gradients.
"""
def __init__(self, config, config_global, logger):
super(InsuranceQATrainingDynamic, self).__init__(config, config_global, logger)
self.batch_i = 0
self.examples_incomplete = []
def prepare_next_epoch(self, model, data, sess, epoch):
super(InsuranceQATrainingDynamic, self).prepare_next_epoch(model, data, sess, epoch)
self.batch_i = 0
# we prepare all questions and good answers in advance. Bad answers will be pooled at run-time
if len(self.examples_incomplete) == 0:
self.logger.debug('Preparing training examples (incomplete)')
self.examples_incomplete = []
for pool in data.archive.train.qa:
question_vec = data.get_item_vector(pool.question, self.length_question)
ground_truth_vecs = [data.get_item_vector(ga, self.length_answer) for ga in pool.ground_truth]
negative_answers_pool = data.archive.train.answers
self.examples_incomplete.append(
(question_vec, ground_truth_vecs, negative_answers_pool, pool.ground_truth)
)
# shuffle the indices of each batch
self.epoch_random_indices = np.random.permutation(len(self.examples_incomplete))
def get_n_batches(self):
return int(math.ceil(len(self.examples_incomplete) / float(self.batchsize)))
def get_next_batch(self, model, data, sess):
"""We calculate the training examples as follows: for each of the questions and associated good answers, we
are fetching 50 random negative answers. For each of those answers, we calculate the similarity to the
question using the model prediction. We use the negative answer with the highest similarity to construct
triples for training.
:return: questions, good answers, bad answers
:rtype: list, list, list
"""
batch_questions = []
batch_answers_good = []
batch_answers_bad = []
indices = self.epoch_random_indices[self.batch_i * self.batchsize: (self.batch_i + 1) * self.batchsize]
incomplete_data_epoch = [self.examples_incomplete[i] for i in indices]
# we create all the data that is required to run the predictions for this batch. We bundle all predictions
# because of computational efficiency
data_info = [] # (question, ground_truth, answers_bad, prediction_start_index)
prediction_questions = []
prediction_answers_bad = []
prediction_results = []
for question, ground_truth_vecs, pool, ground_truth in incomplete_data_epoch:
begin_index = len(prediction_questions)
negative_answer_indices = np.random.random_integers(0, len(pool) - 1, self.negative_answers)
answers_bad = [pool[i] for i in negative_answer_indices if pool[i] not in ground_truth]
if len(answers_bad) > 0:
answers_bad_vecs = [data.get_item_vector(a, self.length_answer) for a in answers_bad]
prediction_questions += [question] * len(answers_bad)
prediction_answers_bad += answers_bad_vecs
data_info.append((question, ground_truth_vecs, answers_bad_vecs, begin_index))
if self.negative_answers > 1:
# we now run all the predictions in batched mode, which is significantly faster than running a prediction
# for each individual question
for predict_batch in range(int(math.ceil(len(prediction_questions) / float(self.batchsize_valid)))):
batch_start_idx = predict_batch * self.batchsize_valid
predict_batch_questions = prediction_questions[batch_start_idx: batch_start_idx + self.batchsize_valid]
predict_batch_answers = prediction_answers_bad[batch_start_idx: batch_start_idx + self.batchsize_valid]
predictions, = sess.run([model.predict], feed_dict={
model.input_question: predict_batch_questions,
model.input_answer_good: predict_batch_answers,
model.dropout_keep_prob: 1.0
})
prediction_results += list(predictions)
else:
prediction_results = [1.0] * len(data_info)
# now we are processing all the predictions and generate the batch data
for question, ground_truth_vecs, answers_bad, prediction_start_index in data_info:
predictions_question = prediction_results[prediction_start_index:prediction_start_index + len(answers_bad)]
most_similar_answer_bad_vector = answers_bad[np.argmax(predictions_question)]
batch_questions.append(question)
batch_answers_bad.append(most_similar_answer_bad_vector)
# we choose only one of the good answers to not over-train on the questions which have more good answers
# than others
batch_answers_good.append(ground_truth_vecs[self.state.recorded_epochs % len(ground_truth_vecs)])
self.batch_i += 1
return batch_questions, batch_answers_good, batch_answers_bad
component = InsuranceQATrainingDynamic
|
yeladlouni/AnswerSelection | experiment/qa/model/lw_lstm.py | <filename>experiment/qa/model/lw_lstm.py
from experiment.qa.model.helper.lw import LW
from experiment.qa.model.helper.pooling_helper import weighted_pooling
from experiment.qa.model.lstm import BiLSTMModel
class LWLSTMModel(BiLSTMModel, LW):
def __init__(self, config, config_global, logger):
super(LWLSTMModel, self).__init__(config, config_global, logger)
self.shared_lw = self.config.get('shared_lw', False)
def build(self, data, sess):
self.build_input(data, sess)
self.initialize_weights()
raw_representation_question = self.bilstm_representation_raw(
self.embeddings_question, self.input_question, False
)
raw_representation_answer_good = self.bilstm_representation_raw(
self.embeddings_answer_good, self.input_answer_good, True
)
raw_representation_answer_bad = self.bilstm_representation_raw(
self.embeddings_answer_bad, self.input_answer_bad, True
)
self.question_pooling_weight = self.positional_weighting(
raw_representation_question,
self.input_question,
item_type='question' if not self.shared_lw else 'shared'
)
self.answer_good_pooling_weight = self.positional_weighting(
raw_representation_answer_good,
self.input_answer_good,
item_type='answer' if not self.shared_lw else 'shared'
)
self.answer_bad_pooling_weight = self.positional_weighting(
raw_representation_answer_bad,
self.input_answer_bad,
item_type='answer' if not self.shared_lw else 'shared'
)
pooled_representation_question = weighted_pooling(
raw_representation_question, self.question_pooling_weight, self.input_question
)
pooled_representation_answer_good = weighted_pooling(
raw_representation_answer_good, self.answer_good_pooling_weight, self.input_answer_good
)
pooled_representation_answer_bad = weighted_pooling(
raw_representation_answer_bad, self.answer_bad_pooling_weight, self.input_answer_bad
)
self.create_outputs(
pooled_representation_question,
pooled_representation_answer_good,
pooled_representation_question,
pooled_representation_answer_bad
)
def initialize_weights(self):
"""Global initialization of weights for the representation layer
"""
BiLSTMModel.initialize_weights(self)
LW.initialize_weights(self)
component = LWLSTMModel
|
ambeimers/cs257 | webapp/convert.py | <gh_stars>1-10
'''
<NAME> and <NAME>
Converts the dataset from Kaggle into a smaller one to be converted into a postgresql DB
'''
import csv
import sys
csv_folder = 'archive/'
def main():
try:
artists_file = open(csv_folder + "data_by_artist.csv", newline='')
songs_file = open(csv_folder + "data.csv", newline='')
except:
print("Invalid Filepath", file=sys.stderr)
sys.exit()
artist_id = {}
#arrays that will be dumped into csvs with generate_csv()
artists_data = []
songs_data = []
songs_artists_data = []
artists_reader = csv.reader(artists_file)
songs_reader = csv.reader(songs_file)
#parse artists
next(artists_reader)
for row in artists_reader:
id = len(artists_data) + 1
artist_name = row[0]
acousticness = row[1]
danceability = row[2]
duration = row[3]
energy = row[4]
loudness = row[7]
speechiness = row[8]
tempo = row[9]
valence = row[10]
popularity = row[11]
artist_id[artist_name] = id
artists_data.append([id, artist_name, acousticness, danceability, duration, energy, loudness, speechiness, tempo, valence, popularity])
#parse songs and make songs_artists_data simultaneously
next(songs_reader)
for row in songs_reader:
id = len(songs_data) + 1
spotify_id = row[6]
song_name = row[12]
acousticness = row[0]
danceability = row[2]
duration = row[3]
energy = row[4]
loudness = row[10]
speechiness = row[15]
tempo = row[16]
valence = row[17]
popularity = row[13]
year = row[18]
songs_data.append([id, spotify_id, song_name, acousticness, danceability, duration, energy, loudness, speechiness, tempo, valence, popularity, year])
artists = eval(row[1])
for artist in artists:
if artist in artist_id:
songs_artists_data.append([id, artist_id[artist]])
generate_csv(artists_data, "artists")
generate_csv(songs_data, "songs")
generate_csv(songs_artists_data, "songs_artists")
def generate_csv(data, file_name):
'''generates a csv called file_name from the data list'''
file_to_create = file_name + ".csv"
with open(file_to_create, 'w', newline='') as file:
writer = csv.writer(file)
for entry in data:
writer.writerow(entry)
if __name__ == '__main__':
main()
|
ambeimers/cs257 | database/olympics.py | '''
Author: <NAME>, 28 Jan, 2021
This program does one of five things based on command-line input:
Prints a version statement
Prints a usage statement for "python3 olympics.py -h" (or --help). You may use argparse or not for command-line parsing and usage statement printing.
Lists the names of all the athletes from a specified NOC.
Lists all the NOCs and the number of gold medals they have won, in decreasing order of the number of gold medals.
Lists top 5 athletes and how many medals they have won in a given sport.
'''
import psycopg2
import argparse
def get_parsed_arguments():
"""Analyzes command-line arguments to provide search parameters, and returns a namespace object."""
parser = argparse.ArgumentParser(description = 'Queries olympics data by given parameters.')
parser.add_argument('--athletes', '-a', default='', nargs='*')
parser.add_argument('--medals', '-m', default='', nargs='*')
parser.add_argument('--sport', '-s', default='', nargs='*')
parser.add_argument('--version', '-v', action='version', version = '%(prog)s 1.0, <NAME>, CS 257, January 28, 2021')
return parser.parse_args()
def query_athletes(search_string, cursor):
query = '''SELECT DISTINCT athlete_name FROM medals WHERE noc = %s'''
try:
cursor.execute(query, (search_string,))
except Exception as e:
print(e)
exit()
print('===== Athletes from NOC {0} ====='.format(search_string))
for row in cursor:
print(row[0])
print()
def query_medals(cursor):
try:
search_string = 'Gold'
query = 'SELECT COUNT(medal), noc FROM medals WHERE medal = %s GROUP BY noc ORDER BY COUNT(medal) DESC'
cursor.execute(query, (search_string,))
print(cursor.query)
except Exception as e:
print(e)
exit()
print('===== Medals by NOC (DESC) ====='.format(search_string))
for row in cursor:
print(row[0], row[1])
print()
def query_sport(search_string, cursor):
query = 'SELECT athlete_name, COUNT(medal) FROM medals WHERE sport = %s GROUP BY athlete_name ORDER BY COUNT(medal) DESC LIMIT 5'
try:
cursor.execute(query, (search_string,))
except Exception as e:
print(e)
exit()
print('===== Top 5 Athletes by Sport and Number of Medals Won ====='.format(search_string))
for row in cursor:
print(row[0], row[1])
print()
def main():
#Connect to Database
from config import password
from config import database
from config import user
try:
connection = psycopg2.connect(database=database, user=user, password=password)
except Exception as e:
print(e)
exit()
#Create cursor
cursor = connection.cursor()
#Get arguments from command line
arguments = get_parsed_arguments()
#Query Database
if arguments.athletes:
noc = ' '.join(arguments.athletes)
query_athletes(noc, cursor)
if arguments.medals is not None:
query_medals(cursor)
if arguments.sport:
sport = ' '.join(arguments.sport)
query_sport(sport, cursor)
#Close database connection
connection.close()
if __name__ == '__main__':
main()
|
ambeimers/cs257 | books/books.py | """<NAME> and <NAME>, CS 257 : books.py
January 22, 2021
This program sorts a file by parameters from the command-line."""
import argparse
import csv
def get_parsed_arguments():
"""Analyzes command-line arguments to provide search parameters, and returns a namespace object."""
parser = argparse.ArgumentParser(description = 'Sorts books by provided parameters.')
parser.add_argument('file')
parser.add_argument('--author', '-a', default='', nargs='*')
parser.add_argument('--title', '-t', default='', nargs='*')
parser.add_argument('--start_year', '-sy', default=['0'], nargs='*')
parser.add_argument('--end_year', '-ey', default=['2021'], nargs='*')
parser.add_argument('--version', '-v', action='version', version = '%(prog)s 1.0, <NAME> and <NAME>, CS 257, January 22, 2021')
return parser.parse_args()
def filter_by_author(filter_from_list, author):
new_list = []
author_index = 2
for i in range(len(filter_from_list)):
if author.lower() in filter_from_list[i][author_index].lower():
if new_list.count(filter_from_list[i]) == 0:
new_list.append(filter_from_list[i])
return new_list
def filter_by_title(filter_from_list, title):
new_list = []
title_index = 0
for i in range(len(filter_from_list)):
if title.lower() in filter_from_list[i][title_index].lower():
if new_list.count(filter_from_list[i]) == 0:
new_list.append(filter_from_list[i])
return new_list
def filter_by_year(filter_from_list, start_year, end_year):
new_list = []
year_index = 1
if start_year == []:
print("hey")
start_year = ['0']
if end_year == []:
print("hey")
end_year = ['2021']
for i in range(len(filter_from_list)):
if filter_from_list[i][year_index] >= start_year[0] and filter_from_list[i][year_index] <= end_year[0]:
if new_list.count(filter_from_list[i]) == 0:
new_list.append(filter_from_list[i])
'''else:
for i in range(len(filter_from_list)):
if filter_from_list[i][year_index] == year[0]:
if new_list.count(filter_from_list[i]) == 0:
new_list.append(filter_from_list[i])'''
return new_list
def print_list(list_to_print):
for i in range(len(list_to_print)):
print(list_to_print[i][0] + ", " + list_to_print[i][1] + ", " + list_to_print[i][2])
def main():
arguments = get_parsed_arguments()
#joins arguments into single string
author = ' '.join(arguments.author)
title = ' '.join(arguments.title)
book_list = []
with open('books.csv', newline='') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
book_list.append([row[0],row[1],row[2]])
filter_to_list = book_list.copy()
if author != '':
filter_to_list = filter_by_author(filter_to_list, author)
if title != '':
filter_to_list = filter_by_title(filter_to_list, title)
if arguments.start_year != '0' or arguments.end_year != '2021':
print(arguments.start_year, arguments.end_year)
filter_to_list = filter_by_year(filter_to_list, arguments.start_year, arguments.end_year)
print_list(filter_to_list)
if __name__ == '__main__':
main()
|
ambeimers/cs257 | database/config.py | # config.py
user = 'beimersa'
password = ' '
database = 'olympics'
|
ambeimers/cs257 | webapp/api.py | '''
<NAME> and <NAME>
'''
import sys
import flask
import json
import config
import psycopg2
#sensitive infromation about the database to access
from config import password
from config import database
from config import user
api = flask.Blueprint('api', __name__)
@api.route('/year/<year>')
def get_year(year):
parameter = (str(year),)
query = '''
SELECT year, acousticness, danceability, duration, energy, loudness, speechiness, tempo, valence, popularity
FROM years
WHERE
year = %s
'''
connection = get_connection(database, user, password)
year_data = get_query(query, parameter, connection)
year_dict = {}
year_dict["year"] = year_data[0][0]
year_dict["acousticness"] = year_data[0][1]
year_dict["danceability"] = year_data[0][2]
year_dict["duration"] = year_data[0][3]
year_dict["energy"] = year_data[0][4]
year_dict["loudness"] = year_data[0][5]
year_dict["speechiness"] = year_data[0][6]
year_dict["tempo"] = year_data[0][7]
year_dict["valence"] = year_data[0][8]
year_dict["popularity"] = int(year_data[0][9])
return json.dumps(year_dict)
@api.route('/artist/<artist_id>')
def get_artist(artist_id):
parameter = (str(artist_id),)
query = '''
SELECT artist_name, acousticness, danceability, duration, energy, loudness, speechiness, tempo, valence, popularity
FROM artists
WHERE
id = %s
'''
connection = get_connection(database, user, password)
artist_data = get_query(query, parameter, connection)
artist_dict = {}
artist_dict["artist_name"] = artist_data[0][0]
artist_dict["acousticness"] = artist_data[0][1]
artist_dict["danceability"] = artist_data[0][2]
artist_dict["duration"] = artist_data[0][3]
artist_dict["energy"] = artist_data[0][4]
artist_dict["loudness"] = artist_data[0][5]
artist_dict["speechiness"] = artist_data[0][6]
artist_dict["tempo"] = artist_data[0][7]
artist_dict["valence"] = artist_data[0][8]
artist_dict["popularity"] = int(artist_data[0][9])
return json.dumps(artist_dict)
@api.route('/song/<song_id>')
def get_song(song_id):
parameter = (str(song_id),)
query = '''
SELECT song_name, acousticness, danceability, duration, energy, loudness, speechiness, tempo, valence, popularity, year, spotify_id
FROM songs
WHERE
id = %s
'''
connection = get_connection(database, user, password)
song_data = get_query(query, parameter, connection)
song_dict = {}
song_dict["song_name"] = song_data[0][0]
song_dict["acousticness"] = song_data[0][1]
song_dict["danceability"] = song_data[0][2]
song_dict["duration"] = song_data[0][3]
song_dict["energy"] = song_data[0][4]
song_dict["loudness"] = song_data[0][5]
song_dict["speechiness"] = song_data[0][6]
song_dict["tempo"] = song_data[0][7]
song_dict["valence"] = song_data[0][8]
song_dict["popularity"] = int(song_data[0][9])
song_dict["year"] = int(song_data[0][10])
song_dict["spotify_id"] = song_data[0][11]
return json.dumps(song_dict)
@api.route('/song/artist/<artist_id>/<attribute_name>')
def get_song_artist_attribute(artist_id, attribute_name):
#stop sql injection, since this has to be passed in through string interpolation
potential_attributes = ['acousticness', 'danceability', 'duration', 'energy', 'loudness', 'speechiness', 'tempo', 'valence', 'popularity']
if attribute_name not in potential_attributes:
flask.abort(400)
#default to the song with the most of the attribute, and stop sql injection
sort_by = flask.request.args.get('sort_by')
order = 'DESC'
if sort_by == 'least':
order = 'ASC'
parameter = (str(artist_id),)
query = '''
SELECT songs.id, songs.spotify_id, songs.song_name
FROM songs, artists, songs_artists
WHERE
artists.id = %s
AND artists.id = songs_artists.artist_id
AND songs.id = songs_artists.song_id
ORDER BY songs.''' + attribute_name + ''' ''' + order + '''
LIMIT 1
'''
connection = get_connection(database, user, password)
song_data = get_query(query, parameter, connection)
song_dict = {}
song_dict["song_id"] = song_data[0][0]
song_dict["spotify_id"] = song_data[0][1]
song_dict["song_name"] = song_data[0][2]
return json.dumps(song_dict)
@api.route('/song/year/<year>/<attribute_name>')
def get_song_year_attribute(year, attribute_name):
#stop sql injection, since this has to be passed in through string interpolation
potential_attributes = ['acousticness', 'danceability', 'duration', 'energy', 'loudness', 'speechiness', 'tempo', 'valence', 'popularity']
if attribute_name not in potential_attributes:
flask.abort(400)
#default to the song with the most of the attribute, and stop sql injection
sort_by = flask.request.args.get('sort_by')
order = 'DESC'
if sort_by == 'least':
order = 'ASC'
parameter = (str(year),)
query = '''
SELECT songs.id, songs.spotify_id, songs.song_name
FROM songs
WHERE
songs.year = %s
ORDER BY songs.''' + attribute_name + ''' ''' + order + '''
LIMIT 1
'''
connection = get_connection(database, user, password)
song_data = get_query(query, parameter, connection)
song_dict = {}
song_dict["song_id"] = song_data[0][0]
song_dict["spotify_id"] = song_data[0][1]
song_dict["song_name"] = song_data[0][2]
return json.dumps(song_dict)
@api.route('/most/songs/year/<year>')
def get_most_songs_year_attributes(year):
'''Get the songs with the most of an attribute for a given artist'''
parameter = (str(year),) * 9
query = '''
(SELECT 'acousticness' as attribute, songs.spotify_id, songs.song_name, songs.acousticness as value FROM songs WHERE songs.year = %s ORDER BY acousticness DESC LIMIT 1) UNION
(SELECT 'danceability' as attribute, songs.spotify_id, songs.song_name, songs.danceability as value FROM songs WHERE songs.year = %s ORDER BY danceability DESC LIMIT 1) UNION
(SELECT 'duration' as attribute, songs.spotify_id, songs.song_name, songs.duration as value FROM songs WHERE songs.year = %s ORDER BY duration DESC LIMIT 1) UNION
(SELECT 'energy' as attribute, songs.spotify_id, songs.song_name, songs.energy as value FROM songs WHERE songs.year = %s ORDER BY energy DESC LIMIT 1) UNION
(SELECT 'loudness' as attribute, songs.spotify_id, songs.song_name, songs.loudness as value FROM songs WHERE songs.year = %s ORDER BY loudness DESC LIMIT 1) UNION
(SELECT 'speechiness' as attribute, songs.spotify_id, songs.song_name, songs.speechiness as value FROM songs WHERE songs.year = %s ORDER BY speechiness DESC LIMIT 1) UNION
(SELECT 'tempo' as attribute, songs.spotify_id, songs.song_name, songs.tempo as value FROM songs WHERE songs.year = %s ORDER BY tempo DESC LIMIT 1) UNION
(SELECT 'valence' as attribute, songs.spotify_id, songs.song_name, songs.valence as value FROM songs WHERE songs.year = %s ORDER BY valence DESC LIMIT 1) UNION
(SELECT 'popularity' as attribute, songs.spotify_id, songs.song_name, songs.popularity as value FROM songs WHERE songs.year = %s ORDER BY popularity DESC LIMIT 1)
'''
connection = get_connection(database, user, password)
year_data = get_query(query, parameter, connection)
year_dict = {}
for i in range(len(year_data)):
year_dict[year_data[i][0]] = {"spotify_id": year_data[i][1], "song_name": year_data[i][2], "value": year_data[i][3]}
return json.dumps(year_dict)
@api.route('/least/songs/year/<year>')
def get_least_songs_year_attributes(year):
'''Get the songs with the least of an attribute for a given year'''
parameter = (str(year),) * 9
query = '''
(SELECT 'acousticness' as attribute, songs.spotify_id, songs.song_name, songs.acousticness as value FROM songs WHERE songs.year = %s ORDER BY acousticness ASC LIMIT 1) UNION
(SELECT 'danceability' as attribute, songs.spotify_id, songs.song_name, songs.danceability as value FROM songs WHERE songs.year = %s ORDER BY danceability ASC LIMIT 1) UNION
(SELECT 'duration' as attribute, songs.spotify_id, songs.song_name, songs.duration as value FROM songs WHERE songs.year = %s ORDER BY duration ASC LIMIT 1) UNION
(SELECT 'energy' as attribute, songs.spotify_id, songs.song_name, songs.energy as value FROM songs WHERE songs.year = %s ORDER BY energy ASC LIMIT 1) UNION
(SELECT 'loudness' as attribute, songs.spotify_id, songs.song_name, songs.loudness as value FROM songs WHERE songs.year = %s ORDER BY loudness ASC LIMIT 1) UNION
(SELECT 'speechiness' as attribute, songs.spotify_id, songs.song_name, songs.speechiness as value FROM songs WHERE songs.year = %s ORDER BY speechiness ASC LIMIT 1) UNION
(SELECT 'tempo' as attribute, songs.spotify_id, songs.song_name, songs.tempo as value FROM songs WHERE songs.year = %s ORDER BY tempo ASC LIMIT 1) UNION
(SELECT 'valence' as attribute, songs.spotify_id, songs.song_name, songs.valence as value FROM songs WHERE songs.year = %s ORDER BY valence ASC LIMIT 1) UNION
(SELECT 'popularity' as attribute, songs.spotify_id, songs.song_name, songs.popularity as value FROM songs WHERE songs.year = %s ORDER BY popularity ASC LIMIT 1)
'''
connection = get_connection(database, user, password)
year_data = get_query(query, parameter, connection)
year_dict = {}
for i in range(len(year_data)):
year_dict[year_data[i][0]] = {"spotify_id": year_data[i][1], "song_name": year_data[i][2], "value": year_data[i][3]}
return json.dumps(year_dict)
@api.route('/most/songs/artist/<artist_id>')
def get_most_songs_artist_attributes(artist_id):
'''Get the songs with the most of an attribute for a given artist'''
parameter = (str(artist_id),) * 9
query = '''
(SELECT 'acousticness' as attribute, songs.spotify_id, songs.song_name, songs.acousticness as value FROM songs, songs_artists WHERE songs.id = songs_artists.song_id AND songs_artists.artist_id = %s ORDER BY acousticness DESC LIMIT 1) UNION
(SELECT 'danceability' as attribute, songs.spotify_id, songs.song_name, songs.danceability as value FROM songs, songs_artists WHERE songs.id = songs_artists.song_id AND songs_artists.artist_id = %s ORDER BY danceability DESC LIMIT 1) UNION
(SELECT 'duration' as attribute, songs.spotify_id, songs.song_name, songs.duration as value FROM songs, songs_artists WHERE songs.id = songs_artists.song_id AND songs_artists.artist_id = %s ORDER BY duration DESC LIMIT 1) UNION
(SELECT 'energy' as attribute, songs.spotify_id, songs.song_name, songs.energy as value FROM songs, songs_artists WHERE songs.id = songs_artists.song_id AND songs_artists.artist_id = %s ORDER BY energy DESC LIMIT 1) UNION
(SELECT 'loudness' as attribute, songs.spotify_id, songs.song_name, songs.loudness as value FROM songs, songs_artists WHERE songs.id = songs_artists.song_id AND songs_artists.artist_id = %s ORDER BY loudness DESC LIMIT 1) UNION
(SELECT 'speechiness' as attribute, songs.spotify_id, songs.song_name, songs.speechiness as value FROM songs, songs_artists WHERE songs.id = songs_artists.song_id AND songs_artists.artist_id = %s ORDER BY speechiness DESC LIMIT 1) UNION
(SELECT 'tempo' as attribute, songs.spotify_id, songs.song_name, songs.tempo as value FROM songs, songs_artists WHERE songs.id = songs_artists.song_id AND songs_artists.artist_id = %s ORDER BY tempo DESC LIMIT 1) UNION
(SELECT 'valence' as attribute, songs.spotify_id, songs.song_name, songs.valence as value FROM songs, songs_artists WHERE songs.id = songs_artists.song_id AND songs_artists.artist_id = %s ORDER BY valence DESC LIMIT 1) UNION
(SELECT 'popularity' as attribute, songs.spotify_id, songs.song_name, songs.popularity as value FROM songs, songs_artists WHERE songs.id = songs_artists.song_id AND songs_artists.artist_id = %s ORDER BY popularity DESC LIMIT 1)
'''
connection = get_connection(database, user, password)
year_data = get_query(query, parameter, connection)
year_dict = {}
for i in range(len(year_data)):
year_dict[year_data[i][0]] = {"spotify_id": year_data[i][1], "song_name": year_data[i][2], "value": year_data[i][3]}
return json.dumps(year_dict)
@api.route('/least/songs/artist/<artist_id>')
def get_least_songs_artist_attributes(artist_id):
'''Get the songs with the least of an attribute for a given artist'''
parameter = (str(artist_id),) * 9
query = '''
(SELECT 'acousticness' as attribute, songs.spotify_id, songs.song_name, songs.acousticness as value FROM songs, songs_artists WHERE songs.id = songs_artists.song_id AND songs_artists.artist_id = %s ORDER BY acousticness ASC LIMIT 1) UNION
(SELECT 'danceability' as attribute, songs.spotify_id, songs.song_name, songs.danceability as value FROM songs, songs_artists WHERE songs.id = songs_artists.song_id AND songs_artists.artist_id = %s ORDER BY danceability ASC LIMIT 1) UNION
(SELECT 'duration' as attribute, songs.spotify_id, songs.song_name, songs.duration as value FROM songs, songs_artists WHERE songs.id = songs_artists.song_id AND songs_artists.artist_id = %s ORDER BY duration ASC LIMIT 1) UNION
(SELECT 'energy' as attribute, songs.spotify_id, songs.song_name, songs.energy as value FROM songs, songs_artists WHERE songs.id = songs_artists.song_id AND songs_artists.artist_id = %s ORDER BY energy ASC LIMIT 1) UNION
(SELECT 'loudness' as attribute, songs.spotify_id, songs.song_name, songs.loudness as value FROM songs, songs_artists WHERE songs.id = songs_artists.song_id AND songs_artists.artist_id = %s ORDER BY loudness ASC LIMIT 1) UNION
(SELECT 'speechiness' as attribute, songs.spotify_id, songs.song_name, songs.speechiness as value FROM songs, songs_artists WHERE songs.id = songs_artists.song_id AND songs_artists.artist_id = %s ORDER BY speechiness ASC LIMIT 1) UNION
(SELECT 'tempo' as attribute, songs.spotify_id, songs.song_name, songs.tempo as value FROM songs, songs_artists WHERE songs.id = songs_artists.song_id AND songs_artists.artist_id = %s ORDER BY tempo ASC LIMIT 1) UNION
(SELECT 'valence' as attribute, songs.spotify_id, songs.song_name, songs.valence as value FROM songs, songs_artists WHERE songs.id = songs_artists.song_id AND songs_artists.artist_id = %s ORDER BY valence ASC LIMIT 1) UNION
(SELECT 'popularity' as attribute, songs.spotify_id, songs.song_name, songs.popularity as value FROM songs, songs_artists WHERE songs.id = songs_artists.song_id AND songs_artists.artist_id = %s ORDER BY popularity ASC LIMIT 1)
'''
connection = get_connection(database, user, password)
year_data = get_query(query, parameter, connection)
year_dict = {}
for i in range(len(year_data)):
year_dict[year_data[i][0]] = {"spotify_id": year_data[i][1], "song_name": year_data[i][2], "value": year_data[i][3]}
return json.dumps(year_dict)
@api.route('/artist/<artist1_id>/<artist2_id>/')
def artist_suggestion(artist1_id, artist2_id):
'''Recomend 3 artists who have the closest attributes to the average of artist1 and artist2'''
parameter = (str(artist1_id), str(artist2_id),str(artist1_id), str(artist2_id),)
potential_attributes = ['acousticness', 'danceability', 'duration', 'energy', 'loudness', 'speechiness', 'tempo', 'valence', 'popularity']
#some values are divided by a number which is the maximum range for that attribute (such as tempo) since they aren't normalized
query = '''SELECT artists.id, artists.artist_name
FROM (SELECT AVG(chosen.acousticness) as acousticness, AVG(chosen.danceability) as danceability, AVG(chosen.energy) as energy,
AVG(chosen.loudness) as loudness, AVG(chosen.speechiness) as speechiness, AVG(chosen.tempo) as tempo,
AVG(chosen.valence) as valence, AVG(chosen.popularity) as popularity
FROM (SELECT * FROM artists WHERE id = %s or id = %s) as chosen) as average, artists
WHERE artists.id != %s AND artists.id != %s
ORDER BY ((artists.danceability - average.danceability)^2 + (artists.acousticness - average.acousticness)^2
+ (artists.energy - average.energy)^2 + ((artists.loudness / 60) - (average.loudness / 60))^2
+ (artists.speechiness - average.speechiness)^2 + ((artists.tempo / 220) - (average.tempo / 220))^2
+ (artists.valence - average.valence)^2 + ((artists.popularity / 100) - (average.popularity / 100))^2)
ASC LIMIT 3;
'''
connection = get_connection(database, user, password)
suggestion_data = get_query(query, parameter, connection)
suggestions = []
for i in range(len(suggestion_data)):
artist_dict = {}
artist_dict["id"] = suggestion_data[i][0]
artist_dict["artist_name"] = suggestion_data[i][1]
suggestions.append(artist_dict)
return json.dumps(suggestions)
@api.route('/search/artist/<search_string>')
def search_artist(search_string):
'''Gives 5 artist suggestions based on search string'''
parameter = (str(search_string),)
#levenshtein helps get closest string matches
query = '''
SELECT id, artist_name FROM artists
ORDER BY levenshtein_less_equal(artists.artist_name, %s, 1, 1, 1, 25) LIMIT 5
'''
connection = get_connection(database, user, password)
search_data = get_query(query, parameter, connection)
artists = []
for i in range(5):
artist_dict = {}
artist_dict["id"] = search_data[i][0]
artist_dict["artist_name"] = search_data[i][1]
artists.append(artist_dict)
return json.dumps(artists)
@api.route('/search/song/<search_string>')
def search_song(search_string):
'''Gives 5 song suggestions based on search string'''
parameter = (str(search_string),)
#levenshtein helps get closest string matches
query = '''
SELECT id, song_name, year FROM songs
ORDER BY levenshtein(songs.song_name, %s, 1, 1, 1) LIMIT 5
'''
connection = get_connection(database, user, password)
search_data = get_query(query, parameter, connection)
songs = []
for i in range(5):
song_dict = {}
song_dict["id"] = search_data[i][0]
song_dict["song_name"] = search_data[i][1]
song_dict["year"] = int(search_data[i][2])
songs.append(song_dict)
return json.dumps(songs)
@api.route('/help')
def get_help():
return flask.render_template('help.html')
def get_connection(database, user, password):
'''Establishes and returns the connection with the postgres database'''
try:
connection = psycopg2.connect(database=database, user=user, password=password)
except Exception as e:
print(e)
exit()
return connection
def get_query(query, parameter, connection):
'''Returns the contents from a query with a parameter to the specified connection as a list'''
cursor = connection.cursor()
try:
if parameter == ():
cursor.execute(query)
else:
cursor.execute(query, parameter)
except Exception as e:
print(e)
exit()
data = []
for row in cursor:
data.append(row)
return data
|
ambeimers/cs257 | database/archive/convert.py | '''
Authors: <NAME> and <NAME>
Date: January 26th, 2021
This program converts 2 .csv files into 3 smaller .csv files.
'''
import csv
def remove_commas(input_string):
for i in range(len(input_string)):
if i > len(input_string)-1:
break
if input_string[i] == ',':
input_string = input_string[0:i] + input_string[i+1:]
return input_string
def convert_age_to_int(age):
if age != 'NA':
age = int(age)
else:
age = 'NULL'
return age
def convert_weight_to_int(weight):
if weight != 'NA':
weight = float(weight) // 1
weight = int(weight)
else:
weight = 'NULL'
return weight
def convert_height_to_int(height):
if height != 'NA':
height = int(height)
else:
height = 'NULL'
return height
def remove_double_quotes_from_name(name):
for i in range(len(name)):
if name[i] == '"':
name = name[0:i] + '~' + name[i+1:]
def read_and_write_files():
athletes = open('output_csv_files/athletes.csv', 'w')
medals = open('output_csv_files/medals.csv', 'w')
regions = open('output_csv_files/regions.csv', 'w')
count_of_usa_medals = 0
usa_medals_array = []
with open('input_csv_files/athlete_events.csv') as f:
csv_reader = csv.reader(f)
row_count = 0
for row in csv_reader:
if row_count == 0:
row_count += 1
continue
athlete_id = row[0]
athlete_name = row[1]
athlete_name = remove_commas(athlete_name)
athlete_name = remove_double_quotes_from_name(athlete_name)
sex = row[2]
age = convert_age_to_int(row[3])
height = convert_height_to_int(row[4])
weight = convert_weight_to_int(row[5])
team = row[6]
team = remove_commas(team)
noc = row[7]
game = row[8]
game = remove_commas(game)
year = row[9]
season = row[10]
city = row[11]
city = remove_commas(city)
sport = row[12]
event = row[13]
event = remove_commas(event)
medal = row[14]
print(f"{row_count},{athlete_name},{sex},{age},{height},{weight},{team}", file=athletes)
if medal != 'NA':
print(f"{row_count},{athlete_name},{noc},{medal},{event},{sport},{game},{year},{season},{city}", file=medals)
row_count += 1
with open('input_csv_files/noc_regions.csv') as f:
csv_reader = csv.reader(f)
row_number = 1
for row in csv_reader:
noc = row[0]
region = row[1]
region = remove_commas(region)
notes = row[2]
print(f"{row_number},{noc},{region},{notes}", file=regions)
row_number += 1
def main():
read_and_write_files()
if __name__ == '__main__':
main()
|
199-cmd/Jump-Space-Game-In-Tkinter | setting.py | <reponame>199-cmd/Jump-Space-Game-In-Tkinter<gh_stars>0
width = 800
height = 700
fps = 60
font_n = 'arial'
sheetload = "spritesheet_jumper.png"
mob_fq = 5000
player_acc = 0.5
player_friction = -0.12
player_gra = 0.8
player_jump = 21
platform_list = [(0,height-50),(width/2-50,height*3/4),
(235,height-350),(350,200),(175,100)]
white = (255,255,255)
black = (0,0,0)
red = (255,0,0)
green = (0,255,0)
blue = (0,0,255)
sky = (0,142,205)
|
199-cmd/Jump-Space-Game-In-Tkinter | new1.py | import pygame as pg
import random
from os import path
from setting import *
from sprites import *
class Game:
def __init__(self):
self.running = True
pg.init()
pg.mixer.init()
self.screen = pg.display.set_mode((width,height))
pg.display.set_caption("Nitya")
self.clock = pg.time.Clock()
self.font_name = pg.font.match_font(font_n)
self.load_data()
def load_data(self):
self.dir = path.dirname(__file__)
img_dir = path.join(self.dir, 'img')
self.spritesheet = Spritesheet(path.join(img_dir,sheetload))
def new(self):
self.score = 0
self.sprites = pg.sprite.Group()
self.platforms = pg.sprite.Group()
self.player = players(self)
self.sprites.add(self.player)
for plat in platform_list:
p = platforms(self,*plat)
self.sprites.add(p)
self.platforms.add(p)
self.run()
def run(self):
self.playing = True
while self.playing:
self.clock.tick(fps)
self.events()
self.update()
self.draw()
def update(self):
self.sprites.update()
if self.player.vel.y >0:
hits = pg.sprite.spritecollide(self.player , self.platforms, False)
if hits:
if self.player.pos.y < hits[0].rect.bottom:
self.player.pos.y = hits[0].rect.top
self.player.vel.y = 0
# if player is in 1/4 of screen
if self.player.rect.top <= height/4:
self.player.pos.y += max(abs(self.player.vel.y),2)
for plat in self.platforms:
plat.rect.y += max(abs(self.player.vel.y),2)
if plat.rect.top >= height:
plat.kill()
self.score += 10
if self.player.rect.bottom > height:
self.playing = False
#making platforms
while len(self.platforms) < 6:
wi = randrange(50, 100)
p = platforms(self,randrange(0,width - wi)
,randrange(-75, -30))
self.platforms.add(p)
self.sprites.add(p)
def events(self):
for event in pg.event.get():
if event.type == pg.QUIT:
if self.playing:
self.playing = False
self.running = False
if event.type == pg.KEYDOWN:
if event.key == pg.K_SPACE:
self.player.jump()
def draw(self):
self.screen.fill(sky)
self.sprites.draw(self.screen)
self.draw_text(str(self.score),22,white,width/2,15)
pg.display.flip()
def show_start_screen(self):
if not self.running:
return
self.screen.fill(sky)
self.draw_text("Jump into the Space",48,white,width/2,height/4)
self.draw_text("arrows to move, space to jump",22,white,width /2,height /2)
self.draw_text("press any key to play",22,white,width/2,height * 3/4)
pg.display.flip()
self.waiting()
def show_go_screen(self):
self.screen.fill(sky)
self.draw_text("GAME FINISHED",48,white,width/2,height/4)
self.draw_text("Score:"+str(self.score),22,white,width /2,height /2)
self.draw_text("Press any key to play",22,white,width/2,height * 3/4)
pg.display.flip()
self.waiting()
def waiting(self):
wait = True
while wait:
self.clock.tick(fps)
for event in pg.event.get():
if event.type == pg.QUIT:
wait = False
self.running = False
if event.type == pg.KEYUP:
wait =False
def draw_text(self,text,size,color, x ,y ):
font = pg.font.Font(self.font_name,size)
text_surface = font.render(text ,True ,color)
text_rect = text_surface.get_rect()
text_rect.midtop = (x,y)
self.screen.blit(text_surface,text_rect)
g = Game()
g.show_start_screen()
while g.running:
g.new()
g.show_go_screen()
pg.quit()
|
199-cmd/Jump-Space-Game-In-Tkinter | sprites.py | from setting import *
import pygame as pg
from random import *
vec = pg.math.Vector2
class Spritesheet:
def __init__(self, filename):
self.spsheet = pg.image.load(filename).convert()
def get_img(self,x,y,width,height):
image = pg.Surface((width,height))
image.blit(self.spsheet,(0,0),(x,y,width,height))
image = pg.transform.scale(image,(width//2,height//2))
return image
class players(pg.sprite.Sprite):
def __init__(self,game):
pg.sprite.Sprite.__init__(self)
self.game = game
self.walking = False
self.jumping = False
self.currentf = 0
self.lastup = 0
self.load_image()
self.image = self.standing_fr[0]
self.rect = self.image.get_rect()
self.rect.center = (40,height-100)
self.pos = vec(40 , height-100)
self.vel = vec(0,0)
self.acc = vec(0,0)
def load_image(self):
self.standing_fr = [self.game.spritesheet.get_img(581,1265,121,191),
self.game.spritesheet.get_img(584,0,121,201)]
for fr in self.standing_fr:
fr.set_colorkey(black)
self.walk_r = [self.game.spritesheet.get_img(584,203,121,201)
,self.game.spritesheet.get_img(678,651,121,207)]
for fr in self.walk_r:
fr.set_colorkey(black)
self.walk_l = []
for frame in self.walk_r:
frame.set_colorkey(black)
self.walk_l.append(pg.transform.flip(frame,True,False))
self.jump_fr = self.game.spritesheet.get_img(416,1660,150,181)
self.jump_fr.set_colorkey(black)
def jump(self):
#jump condition
self.rect.x += 2
hits = pg.sprite.spritecollide(self,self.game.platforms, False)
self.rect.x -= 2
if hits:
self.vel.y = -player_jump
def update(self):
self.animate()
self.acc = vec(0,player_gra)
keys = pg.key.get_pressed()
if keys[pg.K_LEFT]:
self.acc.x = -player_acc
if keys[pg.K_RIGHT]:
self.acc.x = player_acc
#fritions
self.acc.x += self.vel.x * player_friction
#motion equ
self.vel += self.acc
if abs(self.vel.x)< 0.2:
self.vel.x = 0
self.pos += self.vel +0.5*self.acc
if self.pos.x> width:
self.pos.x = 0
if self.pos.x <0:
self.pos.x = width
self.rect.midbottom = self.pos
def animate(self):
now = pg.time.get_ticks()
if self.vel.x != 0:
self.walking = True
else:
self.walking = False
if self.walking:
if now - self.lastup > 350:
self.lastup = now
self.currentf = (self.currentf +1)%len(self.walk_l)
if self.vel.x > 0:
self.image = self.walk_r[self.currentf]
else:
self.image = self.walk_l[self.currentf]
if not self.jumping and not self.walking:
if now - self.lastup >350:
self.lastup = now
self.currentf = (self.currentf +1)%len(self.standing_fr)
self.image = self.standing_fr[self.currentf]
class platforms(pg.sprite.Sprite):
def __init__(self,game,x,y):
pg.sprite.Sprite.__init__(self)
self.game = game
images = [self.game.spritesheet.get_img(0,672,380,94),
self.game.spritesheet.get_img(208,1879,201,100)]
self.image = choice(images)
self.image.set_colorkey(black)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
|
brianhowes/pynet_test | file_ex1.py | <reponame>brianhowes/pynet_test
#!/usr/bin/env python
f = open("new_file.txt","w")
f.write('I love to ride!!!')
f.close()
with open("new_file.txt","a") as f:
f.write("Always!!!/n")
|
zeineb/testzz | bittensor/utils/__init__.py | import binascii
import struct
import hashlib
import math
import bittensor
import rich
import time
def hex_bytes_to_u8_list( hex_bytes: bytes ):
hex_chunks = [int(hex_bytes[i:i+2], 16) for i in range(0, len(hex_bytes), 2)]
return hex_chunks
def u8_list_to_hex( values: list ):
total = 0
for val in reversed(values):
total = (total << 8) + val
return total
def create_seal_hash( block_hash:bytes, nonce:int ) -> bytes:
nonce_bytes = binascii.hexlify(nonce.to_bytes(8, 'little'))
block_bytes = block_hash.encode('utf-8')[2:]
pre_seal = nonce_bytes + block_bytes
seal = hashlib.sha256( bytearray(hex_bytes_to_u8_list(pre_seal)) ).digest()
return seal
def seal_meets_difficulty( seal:bytes, difficulty:int ):
seal_number = int.from_bytes(seal, "big")
product = seal_number * difficulty
limit = int(math.pow(2,256) - 1)
if product > limit:
return False
else:
return True
def solve_for_difficulty( block_hash, difficulty ):
meets = False
nonce = -1
while not meets:
nonce += 1
seal = create_seal_hash( block_hash, nonce )
meets = seal_meets_difficulty( seal, difficulty )
if nonce > 1:
break
return nonce, seal
def solve_for_difficulty_fast( subtensor ):
block_number = subtensor.get_current_block()
difficulty = subtensor.difficulty
block_hash = subtensor.substrate.get_block_hash( block_number )
while block_hash == None:
block_hash = subtensor.substrate.get_block_hash( block_number )
block_bytes = block_hash.encode('utf-8')[2:]
meets = False
nonce = -1
limit = int(math.pow(2,256) - 1)
best = math.inf
update_interval = 100000
start_time = time.time()
console = bittensor.__console__
with console.status("Solving") as status:
while not meets:
nonce += 1
# Create seal.
nonce_bytes = binascii.hexlify(nonce.to_bytes(8, 'little'))
pre_seal = nonce_bytes + block_bytes
seal = hashlib.sha256( bytearray(hex_bytes_to_u8_list(pre_seal)) ).digest()
seal_number = int.from_bytes(seal, "big")
product = seal_number * difficulty
if product - limit < best:
best = product - limit
best_seal = seal
if product < limit:
return nonce, block_number, block_hash, difficulty, seal
if nonce % update_interval == 0:
itrs_per_sec = update_interval / (time.time() - start_time)
start_time = time.time()
difficulty = subtensor.difficulty
block_number = subtensor.get_current_block()
block_hash = subtensor.substrate.get_block_hash( block_number)
while block_hash == None:
block_hash = subtensor.substrate.get_block_hash( block_number)
block_bytes = block_hash.encode('utf-8')[2:]
status.update("Solving\n Nonce: [bold white]{}[/bold white]\n Iters: [bold white]{}/s[/bold white]\n Difficulty: [bold white]{}[/bold white]\n Block: [bold white]{}[/bold white]\n Best: [bold white]{}[/bold white]".format( nonce, int(itrs_per_sec), difficulty, block_hash.encode('utf-8'), binascii.hexlify(best_seal) ))
def create_pow( subtensor ):
nonce, block_number, block_hash, difficulty, seal = solve_for_difficulty_fast( subtensor )
return {
'nonce': nonce,
'difficulty': difficulty,
'block_number': block_number,
'block_hash': block_hash,
'work': binascii.hexlify(seal)
}
|
zeineb/testzz | tests/unit_tests/bittensor_tests/test_metagraph.py | <reponame>zeineb/testzz
import bittensor
import torch
import unittest
class TestMetagraph(unittest.TestCase):
def setUp (self):
self.metagraph = bittensor.metagraph(network = 'akatsuki')
assert True
def test_print_empty(self):
print (self.metagraph)
def test_forward(self):
row = torch.ones( (self.metagraph.n), dtype = torch.float32 )
for i in range( self.metagraph.n ):
self.metagraph(i, row)
self.metagraph.sync()
row = torch.ones( (self.metagraph.n), dtype = torch.float32 )
for i in range( self.metagraph.n ):
self.metagraph(i, row)
def test_load_sync_save(self):
self.metagraph.sync()
self.metagraph.save()
self.metagraph.load()
self.metagraph.save()
def test_factory(self):
graph = self.metagraph.load().sync().save()
def test_state_dict(self):
self.metagraph.load()
state = self.metagraph.state_dict()
assert 'uids' in state
assert 'stake' in state
assert 'last_update' in state
assert 'block' in state
assert 'tau' in state
assert 'weights' in state
assert 'endpoints' in state
|
zeineb/testzz | tests/unit_tests/bittensor_tests/test_wallet.py | <filename>tests/unit_tests/bittensor_tests/test_wallet.py
import bittensor
from unittest.mock import MagicMock
import os
import shutil
from pytest import fixture
import subprocess
from loguru import logger
import time
import sys
from sys import platform
from bittensor.utils.balance import Balance
subtensor = bittensor.subtensor(network = 'akatsuki')
def init_wallet():
if os.path.exists('/tmp/pytest'):
shutil.rmtree('/tmp/pytest')
the_wallet = bittensor.wallet (
path = '/tmp/pytest',
name = 'pytest',
hotkey = 'pytest',
)
return the_wallet
def check_keys_exists(the_wallet = None):
# --- test file and key exists
assert os.path.isfile(the_wallet.coldkey_file.path)
assert os.path.isfile(the_wallet.hotkey_file.path)
assert os.path.isfile(the_wallet.coldkeypub_file.path)
assert the_wallet._hotkey != None
assert the_wallet._coldkey != None
# --- test _load_key()
the_wallet._hotkey = None
the_wallet._coldkey = None
the_wallet._coldkeypub = None
the_wallet.hotkey
the_wallet.coldkey
the_wallet.coldkeypub
assert the_wallet._hotkey != None
assert the_wallet._coldkey != None
assert the_wallet._coldkeypub != None
def test_create_wallet():
the_wallet = init_wallet().create(coldkey_use_password = False, hotkey_use_password = False)
check_keys_exists(the_wallet)
def test_create_keys():
the_wallet = init_wallet()
the_wallet.create_new_coldkey( use_password=False, overwrite = True )
the_wallet.create_new_hotkey( use_password=False, overwrite = True )
check_keys_exists(the_wallet)
the_wallet = init_wallet()
the_wallet.new_coldkey( use_password=False, overwrite = True )
the_wallet.new_hotkey( use_password=False, overwrite = True )
check_keys_exists(the_wallet)
def test_wallet_uri():
the_wallet = init_wallet()
the_wallet.create_coldkey_from_uri( uri = "/Alice", use_password=False, overwrite = True )
the_wallet.create_hotkey_from_uri( uri = "/Alice", use_password=False, overwrite = True )
check_keys_exists(the_wallet)
def test_wallet_mnemonic_create():
the_wallet = init_wallet()
the_wallet.regenerate_coldkey( mnemonic = "solve arrive guilt syrup dust sea used phone flock vital narrow endorse", use_password=False, overwrite = True )
the_wallet.regenerate_coldkey( mnemonic = "solve arrive guilt syrup dust sea used phone flock vital narrow endorse".split(), use_password=False, overwrite = True )
the_wallet.regenerate_hotkey( mnemonic = "solve arrive guilt syrup dust sea used phone flock vital narrow endorse", use_password=False, overwrite = True )
the_wallet.regenerate_hotkey( mnemonic = "solve arrive guilt syrup dust sea used phone flock vital narrow endorse".split(), use_password=False, overwrite = True )
check_keys_exists(the_wallet)
the_wallet = init_wallet()
the_wallet.regen_coldkey( mnemonic = "solve arrive guilt syrup dust sea used phone flock vital narrow endorse", use_password=False, overwrite = True )
the_wallet.regen_coldkey( mnemonic = "solve arrive guilt syrup dust sea used phone flock vital narrow endorse".split(), use_password=False, overwrite = True )
the_wallet.regen_hotkey( mnemonic = "solve arrive guilt syrup dust sea used phone flock vital narrow endorse", use_password=False, overwrite = True )
the_wallet.regen_hotkey( mnemonic = "solve arrive guilt syrup dust sea used phone flock vital narrow endorse".split(), use_password=False, overwrite = True )
check_keys_exists(the_wallet)
def test_wallet_add_stake():
subtensor = bittensor.subtensor(network = 'akatsuki')
the_wallet = init_wallet().create(coldkey_use_password = False, hotkey_use_password = False)
subtensor.add_stake = MagicMock(return_value = True)
the_wallet.is_registered = MagicMock(return_value = True)
the_wallet.add_stake(subtensor = subtensor)
# when not registered
the_wallet.is_registered = MagicMock(return_value = False)
the_wallet.add_stake(subtensor = subtensor)
def test_wallet_remove_stake():
subtensor = bittensor.subtensor(network = 'akatsuki')
the_wallet = init_wallet().create(coldkey_use_password = False, hotkey_use_password = False)
subtensor.unstake = MagicMock(return_value = True)
the_wallet.is_registered = MagicMock(return_value = True)
the_wallet.remove_stake(subtensor = subtensor)
#when not registered
the_wallet.is_registered = MagicMock(return_value = False)
the_wallet.remove_stake(subtensor = subtensor)
def test_wallet_transfer():
subtensor = bittensor.subtensor(network = 'akatsuki')
the_wallet = init_wallet().create(coldkey_use_password = False, hotkey_use_password = False)
subtensor.transfer = MagicMock(return_value = True)
# when registered
the_wallet.is_registered = MagicMock(return_value = True)
the_wallet.get_balance = MagicMock(return_value = Balance(20))
the_wallet.transfer(amount = 10, subtensor = subtensor, dest = "")
# when not enough tao
the_wallet.get_balance = MagicMock(return_value = Balance(5))
the_wallet.transfer(amount = 10, subtensor = subtensor, dest = "")
# when not registered
the_wallet.is_registered = MagicMock(return_value = False)
the_wallet.remove_stake(subtensor = subtensor) |
zeineb/testzz | bittensor/_receptor/receptor_impl.py | <reponame>zeineb/testzz
""" Encapsulates a grpc connection to an axon endpoint as a standard auto-grad torch.nn.Module.
"""
# The MIT License (MIT)
# Copyright © 2021 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
import time as clock
from types import SimpleNamespace
from typing import Tuple
import torch
import uuid
import time
import torch.nn as nn
import grpc
from loguru import logger
from grpc import _common
import bittensor
import bittensor.utils.stats as stat_utils
logger = logger.opt(colors=True)
# dummy tensor that triggers autograd in a RemoteExpert
DUMMY = torch.empty(0, requires_grad=True)
# Helper function for filling nill (zero) responses on failures.
def nill_response_for(inputs):
""" Empty response
"""
if torch.numel(inputs) == 0:
return torch.tensor([])
return torch.zeros( (inputs.size(0), inputs.size(1), bittensor.__network_dim__), dtype=torch.float32)
class Receptor(nn.Module):
""" Encapsulates a grpc connection to an axon endpoint as a standard auto-grad torch.nn.Module.
"""
def __init__(
self,
wallet: 'bittensor.wallet',
endpoint: 'bittensor.Endpoint',
channel: 'grpc._Channel',
stub: 'bittensor.grpc.BittensorStub',
):
r""" Initializes a receptor grpc connection.
Args:
wallet (:obj:`bittensor.Wallet`, `required`):
bittensor wallet with hotkey and coldkeypub.
endpoint (:obj:`bittensor.Endpoint`, `required`):
neuron endpoint descriptor proto.
channel (:obj:`grpc._Channel`, `required`):
grpc TCP channel.
endpoint (:obj:`bittensor.grpc.BittensorStub`, `required`):
bittensor protocol stub created from channel.
"""
super().__init__()
self.wallet = wallet # Keypair information
self.endpoint = endpoint # Endpoint information.
self.channel = channel
self.stub = stub
self.backoff = 0 # Number o queries to backoff.
self.next_backoff = 1 # Next backoff level.
self.receptor_uid = str(uuid.uuid1())
self.state_dict = _common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY
self.stats = SimpleNamespace(
forward_qps = stat_utils.timed_rolling_avg(0.0, 0.01),
backward_qps = stat_utils.timed_rolling_avg(0.0, 0.01),
forward_elapsed_time = stat_utils.timed_rolling_avg(0.0, 0.01),
forward_bytes_out = stat_utils.timed_rolling_avg(0.0, 0.01),
forward_bytes_in = stat_utils.timed_rolling_avg(0.0, 0.01),
backward_bytes_out = stat_utils.timed_rolling_avg(0.0, 0.01),
backward_bytes_in = stat_utils.timed_rolling_avg(0.0, 0.01),
codes = {
bittensor.proto.ReturnCode.NoReturn: 0,
bittensor.proto.ReturnCode.Success: 0,
bittensor.proto.ReturnCode.Timeout: 0,
bittensor.proto.ReturnCode.Backoff: 0,
bittensor.proto.ReturnCode.Unavailable: 0,
bittensor.proto.ReturnCode.NotImplemented: 0,
bittensor.proto.ReturnCode.EmptyRequest: 0,
bittensor.proto.ReturnCode.EmptyResponse: 0,
bittensor.proto.ReturnCode.InvalidResponse: 0,
bittensor.proto.ReturnCode.InvalidRequest: 0,
bittensor.proto.ReturnCode.RequestShapeException: 0,
bittensor.proto.ReturnCode.ResponseShapeException: 0,
bittensor.proto.ReturnCode.RequestSerializationException: 0,
bittensor.proto.ReturnCode.ResponseSerializationException: 0,
bittensor.proto.ReturnCode.RequestDeserializationException: 0,
bittensor.proto.ReturnCode.ResponseDeserializationException: 0,
bittensor.proto.ReturnCode.NotServingNucleus: 0,
bittensor.proto.ReturnCode.NucleusTimeout: 0,
bittensor.proto.ReturnCode.NucleusFull: 0,
bittensor.proto.ReturnCode.RequestIncompatibleVersion: 0,
bittensor.proto.ReturnCode.ResponseIncompatibleVersion: 0,
bittensor.proto.ReturnCode.SenderUnknown: 0,
bittensor.proto.ReturnCode.UnknownException: 0,
}
)
def __str__(self):
return "Receptor({})".format(self.endpoint)
def __repr__(self):
return self.__str__()
def __del__(self):
try:
result = self.channel._channel.check_connectivity_state(True)
if self.state_dict[result] != self.state_dict[result].SHUTDOWN:
self.channel.close()
except:
pass
def __exit__(self):
self.__del__()
def forward (
self,
inputs: torch.Tensor,
modality: bittensor.proto.Modality,
timeout: int,
) -> Tuple[torch.Tensor, int]:
r""" Torch.nn.Module forward call: Triggers the grpc call to the remote endpoint.
Call returns the output tensor and a bittensor.proto.ReturnCode.
Args:
inputs (:obj:`List[torch.Tensor]` of shape :obj:`(shape)`, `required`):
Single torch tensor to be sent to the remote endpoint.
modality (:obj:`bittensor.proto.Modality` of shape :obj:`(1)`, `required`):
Bittensor forward modality type. Enum in [TEXT, IMAGE, TENSOR]
Returns:
output (:obj:`Tuple[torch.FloatTensor, torch.LongTensor]`, `required`):
Result tuple from the forward call.
code (:obj:`bittensor.proto.ReturnCode`, `required`):
Return code associated with forward call.
time (:obj:`float`, `required`):
Time of call.
"""
outputs, code, time, _ = self._call_forward(
inputs = inputs,
modality = modality,
timeout = timeout
)
try:
self.stats.codes[code] += 1
except Exception:
pass
return outputs, code, time
def backward(
self,
inputs_x: torch.Tensor,
grads_dy: torch.Tensor,
modality: bittensor.proto.Modality,
timeout: int
) -> Tuple[ torch.Tensor, int ]:
r""" Backward call: Triggers the grpc Backward call to the associated endpoint.
Args:
inputs_x (:obj:`List[torch.Tensor]` of shape :obj:`(shape)`, `required`):
inputs from previous forward call.
grads_dy (:obj:`List[torch.Tensor]` of shape :obj:`(shape)`, `required`):
gradient outputs.
modality (:obj:`bittensor.proto.Modality` of shape :obj:`(1)`, `required`):
Bittensor forward modality type. Enum in [TEXT, IMAGE, TENSOR]
timeout (int):
request timeout.
Returns:
output (:obj:`Tuple[torch.FloatTensor, torch.LongTensor]`, `required`):
Result tuple from the forward call.
code (:obj:`bittensor.proto.ReturnCode`, `required`):
Return code associated with backward call.
time (:obj:`float`, `required`):
Time of call.
"""
outputs, code, time, _ = self._call_backward(
inputs_x = inputs_x,
grads_dy = grads_dy,
modality = modality,
timeout = timeout
)
try:
self.stats.codes[code] += 1
except Exception:
pass
return outputs, code, time
def _call_forward(
self,
inputs: torch.Tensor,
modality: bittensor.proto.Modality,
timeout: int
) -> Tuple[torch.Tensor, int, float, str]:
r""" Internal autograd-friendly Forward RPC call to a remote endpoint (calls the Forward method on an Axon terminal.)
Args:
inputs (:obj:`List[torch.Tensor]` of shape :obj:`(shape)`, `required`):
Torch tensor to be sent to this endpoint.
modality (:obj:`bittensor.proto.Modality` of shape :obj:`(1)`, `required`):
Bittensor forward modality of type Enum: [TEXT, IMAGE, TENSOR]
timeout (:type:`int`, `required`):
request timeout.
Returns:
output (:obj:`Tuple[torch.FloatTensor`, torch.LongTensor]`, `optional`):
Result from forward call. May be None in the case of failure.
code (:obj:`bittensor.proto.ReturnCode`, `required`):
Return code associated with forward call.
time (:type:`float`, `required`):
Length of call in seconds.
message (:type:`str`, `required`):
message associated with forward call, potentially error, or 'success'.
"""
start_time = clock.time()
zeros = nill_response_for(inputs)
try:
# ---- Check inputs size ----
if torch.numel(inputs) == 0:
code = bittensor.proto.ReturnCode.EmptyRequest
message = 'empty request'
call_time = clock.time() - start_time
bittensor.logging.rpc_log( axon=False, forward=True, is_response=False, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(inputs.shape), outputs=None, message=message )
return zeros, code, call_time, message
elif self.endpoint.uid == -1:
code = bittensor.proto.ReturnCode.EmptyRequest
message = 'bad endpoint'
call_time = clock.time() - start_time
bittensor.logging.rpc_log( axon=False, forward=True, is_response=False, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, inputs=list(inputs.shape), outputs=None, message=message )
return zeros, code, call_time, message
# ---- Inputs Serialization ----
try:
serializer = bittensor.serializer( bittensor.proto.Serializer.MSGPACK )
serialized_inputs = serializer.serialize(inputs, modality = modality, from_type = bittensor.proto.TensorType.TORCH)
except Exception as e:
code = bittensor.proto.ReturnCode.RequestSerializationException
message = 'Input serialization exception with error:{}'.format(str(e))
call_time = clock.time() - start_time
bittensor.logging.rpc_log( axon=False, forward=True, is_response=False, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(inputs.shape), outputs=None, message=message )
return zeros, code, call_time, message
# ---- Build request ----
request = bittensor.proto.TensorMessage (
version = bittensor.__version_as_int__,
hotkey = self.wallet.hotkey.ss58_address,
tensors = [serialized_inputs],
requires_grad = True,
)
# ---- Make RPC call ----
try:
self.stats.forward_qps.update(1)
self.stats.forward_bytes_out.update(sys.getsizeof(request))
call_time = clock.time() - start_time
bittensor.logging.rpc_log( axon=False, forward=True, is_response=False, code=bittensor.proto.ReturnCode.Success, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(serialized_inputs.shape), outputs=None, message=None )
#forwarding grpc request to the server
response = self.stub.Forward(request = request,
timeout = timeout,
metadata = (
('rpc-auth-header','Bittensor'),
('bittensor-signature',self.sign()),
('bittensor-version',str(bittensor.__version_as_int__)),
('request_type', str(bittensor.proto.RequestType.FORWARD)),
))
self.stats.forward_bytes_in.update(sys.getsizeof(response))
self.stats.forward_elapsed_time.update((clock.time()-start_time))
# Get message
try:
response_message = response.message
except Exception:
response_message = ''
# ---- Catch non-code ----
bittensor_code = response.return_code
if bittensor_code == bittensor.proto.ReturnCode.NoReturn:
code = bittensor.proto.ReturnCode.NoReturn
message = 'no return code.'
call_time = clock.time() - start_time
bittensor.logging.rpc_log( axon=False, forward=True, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(inputs.shape), outputs=None, message=response_message )
return zeros, code, call_time, message
# ---- Catch bittensor errors ----
if bittensor_code == bittensor.proto.ReturnCode.UnknownException:
call_time = clock.time() - start_time
bittensor.logging.rpc_log( axon=False, forward=True, is_response=True, code=bittensor_code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(inputs.shape), outputs=None, message=response_message )
return zeros, bittensor_code, clock.time() - start_time, response.message
elif bittensor_code != bittensor.proto.ReturnCode.Success:
call_time = clock.time() - start_time
bittensor.logging.rpc_log( axon=False, forward=True, is_response=True, code=bittensor_code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(inputs.shape), outputs=None, message=response_message)
return zeros, bittensor_code, call_time, response.message
# ---- Catch GRPC Errors ----
except grpc.RpcError as rpc_error_call:
grpc_code = rpc_error_call.code()
if grpc_code == grpc.StatusCode.DEADLINE_EXCEEDED:
code = bittensor.proto.ReturnCode.Timeout
message = 'grpc.StatusCode.DEADLINE_EXCEEDED'+': '+ rpc_error_call.details()
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=True, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(inputs.shape), outputs=None, message=message)
return zeros, code, call_time, message
elif grpc_code == grpc.StatusCode.UNAVAILABLE:
code = bittensor.proto.ReturnCode.Unavailable
message = 'grpc.StatusCode.UNAVAILABLE'+': '+ rpc_error_call.details()
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=True, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(inputs.shape), outputs=None, message=message)
return zeros, code, call_time, message
elif grpc_code == grpc.StatusCode.UNAUTHENTICATED:
code = bittensor.proto.ReturnCode.Unauthenticated
message = 'grpc.StatusCode.UNAUTHENTICATED'+': '+ rpc_error_call.details()
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=True, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(inputs.shape), outputs=None, message=message)
return zeros, code, call_time, message
else:
code = bittensor.proto.ReturnCode.UnknownException
message = 'GRPC error code: {}, details: {}'.format( grpc_code, str(rpc_error_call.details()) )
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=True, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(inputs.shape), outputs=None, message=message)
return zeros, code, call_time, message
# ---- Catch Unknown Errors ----
except Exception as e:
code = bittensor.proto.ReturnCode.UnknownException
message = str(e)
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=True, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(inputs.shape), outputs=None, message=message)
return zeros, code, call_time, message
# ---- Check tensor response length ----
if len(response.tensors) == 0:
code = bittensor.proto.ReturnCode.EmptyResponse
message = 'no tensors in response'
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=True, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(inputs.shape), outputs=None, message=message)
return zeros, code, call_time, message
# ---- Deserialize response ----
try:
outputs = response.tensors[0]
deserializer = bittensor.serializer( outputs.serializer )
outputs = deserializer.deserialize( outputs, to_type = bittensor.proto.TensorType.TORCH )
except Exception as e:
code = bittensor.proto.ReturnCode.ResponseDeserializationException
message = 'deserialziation exception with error:{}'.format(str(e))
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=True, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(inputs.shape), outputs=None, message=message)
return zeros, code, call_time, message
# ---- Check response shape ----
if (
outputs.size(0) != inputs.size(0) or
outputs.size(1) != inputs.size(1) or
outputs.size(2) != bittensor.__network_dim__
):
code = bittensor.proto.ReturnCode.ResponseShapeException
message = "output.shape:{} does not match inputs:{}".format(outputs.shape, inputs.shape)
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=True, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(inputs.shape), outputs=list(outputs.shape), message=message)
return zeros, code, call_time, message
# ---- Safe catch NaNs and replace with 0.0 ----
outputs = torch.where(torch.isnan(outputs), torch.zeros_like(outputs), outputs)
# ---- Catch all ----
except Exception as e:
code = bittensor.proto.ReturnCode.UnknownException
message = 'exception in forward call: {}'.format(e)
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=True, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(inputs.shape), outputs=None, message=message)
return zeros, code, call_time, message
# ---- Return ----
code = response.return_code
message = response_message
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=True, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(inputs.shape), outputs=list(outputs.shape), message=response_message)
return outputs, code, call_time, message
def _call_backward(
self,
inputs_x: torch.Tensor,
grads_dy: torch.FloatTensor,
modality: bittensor.proto.Modality,
timeout: int
) -> Tuple[torch.Tensor, int, float, str]:
""" Checks and makes RPC Forward call to a remote neuron (calls the Forward method on an Axon terminal of the endpoint)
Args:
inputs_x (:obj:`List[torch.Tensor]` of shape :obj:`(shape)`, `required`):
Torch tensor to be sent to the caller associated endpoint neurons.
grads_dy (:obj:`List[torch.Tensor]` of shape :obj:`(shape)`, `required`):
Gradients of this function's outputs computed during the loss.backward() call.
timeout (int):
request timeout.
Returns:
outputs (:obj:`Tuple[torch.FloatTensor`, torch.LongTensor]`, `optional`):
Gradients of the inputs with respect to the inputs and grads of the outputs.
code (:obj:`bittensor.proto.ReturnCode`, `required`):
Return code associated with backward call.
time (:type:`float`, `required`):
Length of call in seconds.
message (:type:`str`, `required`):
Message associated with forward call, potentially error, or 'success'.
"""
start_time = clock.time()
# ---- Zeros response in the case of failure ----
zeros = nill_response_for( inputs_x )
# ---- Check inputs size ----
if torch.numel( inputs_x ) == 0:
code = bittensor.proto.ReturnCode.EmptyRequest
message = 'empty request'
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=False, is_response=False, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(grads_dy.shape), outputs=None, message=message)
return zeros, code, call_time, message
if self.endpoint.uid == -1:
code = bittensor.proto.ReturnCode.EmptyRequest
message = 'bad endpoint'
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=False, is_response=False, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, inputs=list(grads_dy.shape), outputs=None, message=message )
return zeros, code, call_time, message
# ---- Check grads size ----
if torch.numel( grads_dy ) == 0:
code = bittensor.proto.ReturnCode.EmptyRequest
message = 'empty request'
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=False, is_response=False, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(grads_dy.shape), outputs=None, message=message)
return zeros, code, call_time, message
# ---- Serialization ----
try:
serializer = bittensor.serializer( bittensor.proto.Serializer.MSGPACK )
serialized_inputs = serializer.serialize (inputs_x, modality = modality, from_type = bittensor.proto.TensorType.TORCH )
serialized_grads = serializer.serialize (grads_dy, modality = bittensor.proto.Modality.TENSOR, from_type = bittensor.proto.TensorType.TORCH )
except Exception as e:
code = bittensor.proto.ReturnCode.RequestSerializationException
message = 'serializer exception with error:{}'.format(e)
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=False, is_response=False, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(grads_dy.shape), outputs=None, message=message)
return zeros, code, call_time, message
# ---- Make RPC call ----
try:
request = bittensor.proto.TensorMessage(
version = bittensor.__version_as_int__,
hotkey = self.wallet.hotkey.ss58_address,
tensors = [serialized_inputs, serialized_grads],
requires_grad = True,
)
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=False, is_response=False, code=bittensor.proto.ReturnCode.Success, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(grads_dy.shape), outputs=None, message=None)
response = self.stub.Backward(request = request,
timeout = timeout,
metadata = (
('rpc-auth-header','Bittensor'),
('bittensor-signature',self.sign()),
('bittensor-version',str(bittensor.__version_as_int__)),
('request_type', str(bittensor.proto.RequestType.BACKWARD)),
))
# Get message
try:
response_message = response.message
except Exception:
response_message = ''
# ---- Catch GRPC Errors ----
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED:
code = bittensor.proto.ReturnCode.Timeout
message = 'grpc.StatusCode.DEADLINE_EXCEEDED'+': '+ e.details()
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=False, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(grads_dy.shape), outputs=None, message=message)
return zeros, code, call_time, message
elif e.code() == grpc.StatusCode.UNAVAILABLE:
code = bittensor.proto.ReturnCode.Unavailable
message = 'grpc.StatusCode.UNAVAILABLE'+': '+ e.details()
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=False, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(grads_dy.shape), outputs=None, message=message)
return zeros, code, call_time, message
elif e.code() == grpc.StatusCode.UNAUTHENTICATED:
code = bittensor.proto.ReturnCode.Unauthenticated
message = 'grpc.StatusCode.UNAUTHENTICATED'+': '+ e.details()
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=False, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(grads_dy.shape), outputs=None, message=message)
return zeros, code, call_time, message
else:
code = bittensor.proto.ReturnCode.UnknownException
message = 'grpc error code:{}, details: {}'.format(str(e.code()), str(e.details()))
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=False, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(grads_dy.shape), outputs=None, message=message)
return zeros, code, call_time, message
# ---- Catch Unknown RPC Errors ----
except Exception as e:
code = bittensor.proto.ReturnCode.UnknownException
message = str(e)
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=False, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(grads_dy.shape), outputs=None, message=message)
return zeros, code, call_time, message
# ---- Catch Code Errors ----
try:
bittensor_code = response.return_code
except:
bittensor_code = bittensor.proto.ReturnCode.NoReturn
if bittensor_code == bittensor.proto.ReturnCode.NoReturn:
code = bittensor.proto.ReturnCode.NoReturn
message = 'no response code.'
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=False, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(grads_dy.shape), outputs=None, message=message)
return zeros, code, call_time, message
# ---- Catch negative codes ----
if bittensor_code != bittensor.proto.ReturnCode.Success:
code = bittensor_code
message = response_message
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=False, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(grads_dy.shape), outputs=None, message=response_message)
return zeros, code, call_time, message
# ---- Check for empty response ----
if len(response.tensors) == 0:
code = bittensor.proto.ReturnCode.EmptyResponse
message = 'empty response tensor.'
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=False, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(grads_dy.shape), outputs=None, message=message)
return zeros, code, call_time, message
# ---- Post-process request ----
try:
outputs = response.tensors[0]
deserializer = bittensor.serializer( outputs.serializer )
outputs = deserializer.deserialize( outputs, to_type = bittensor.proto.TensorType.TORCH )
except Exception as e:
code = bittensor.proto.ReturnCode.ResponseDeserializationException
message = 'deserialization exception with error:{}'.format(e)
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=False, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(grads_dy.shape), outputs=None, message=message)
return zeros, code, call_time, message
try:
# ---- Check response shape is same as inputs ----
if outputs.size() != inputs_x.size():
code = bittensor.proto.ReturnCode.ResponseShapeException
message = 'output shape does not match inputs shape'
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=False, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(grads_dy.shape), outputs=list(outputs.shape), message=message)
return zeros, code, call_time, message
except Exception as e:
code = bittensor.proto.ReturnCode.UnknownException
message = 'Size Error: {}'.format(e)
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=False, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, inputs=list(grads_dy.shape), outputs=None, message=message )
return zeros, code, call_time, message
# ---- Safe catch NaNs and replace with 0.0 ----
outputs = torch.where(torch.isnan(outputs), torch.zeros_like(outputs), outputs)
# ---- Return ----
code = bittensor.proto.ReturnCode.Success
message = 'success'
call_time = clock.time() - start_time
bittensor.logging.rpc_log(axon=False, forward=False, is_response=True, code=code, call_time=call_time, pubkey=self.endpoint.hotkey, uid = self.endpoint.uid, inputs=list(grads_dy.shape), outputs=list(outputs.shape), message=response_message)
return outputs, code, clock.time() - start_time, message
def sign(self):
r""" Uses the wallet pubkey to sign a message containing the pubkey and the time
"""
nounce = self.nounce()
message = str(nounce) + str(self.wallet.hotkey.ss58_address) + str(self.receptor_uid)
spliter = 'bitxx'
signature = spliter.join([ str(nounce), str(self.wallet.hotkey.ss58_address), self.wallet.hotkey.sign(message), str(self.receptor_uid) ])
return signature
def nounce(self):
r"""creates a string representation of the time
"""
nounce = int(time.time() * 1000)
return nounce
def state(self):
try:
return self.state_dict[self.channel._channel.check_connectivity_state(True)]
except ValueError:
return "Channel closed" |
zeineb/testzz | bittensor/_neuron/text/template_validator/nucleus_impl.py | <reponame>zeineb/testzz<filename>bittensor/_neuron/text/template_validator/nucleus_impl.py
import bittensor
import torch
from torch.nn import TransformerEncoder, TransformerEncoderLayer
import torch.nn.functional as F
class Validator( torch.nn.Module ):
def __init__(self, config, metagraph, dendrite, device):
super(Validator, self).__init__()
self.layers = TransformerEncoderLayer( bittensor.__network_dim__, config.nucleus.nhead, config.nucleus.nhid, config.nucleus.dropout )
self.encoder = TransformerEncoder( self.layers, config.nucleus.nlayers )
self.decoder = torch.nn.Linear( bittensor.__network_dim__, bittensor.__vocab_size__ , bias=False)
self.loss_fct = torch.nn.CrossEntropyLoss()
self.peer_weights = torch.nn.Parameter(torch.ones( [ metagraph().n.item() ] , requires_grad=True, device = device))
self.noise_offset = 0.0000001
self.metagraph = metagraph
self.dendrite = dendrite
self.config = config
self.device = device
def forward ( self, inputs ):
# Apply model.
query_hidden = self.query( inputs )
encoded_hidden = self.encoder( query_hidden )
decoded_targets = self.decoder ( encoded_hidden )
# Compute loss.
shift_logits = decoded_targets[..., :-1, :].contiguous()
shift_labels = inputs[..., 1:].contiguous()
self.loss = self.loss_fct( shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1) )
return self.loss, decoded_targets
def scores ( self ):
"""Computes salience scores for each peer in the network w.r.t the loss.
We use a simplified fishers information score. score_i = hessian_ii * peer_weight_i^2
"""
peer_weights_d1 = torch.autograd.grad(self.loss, self.peer_weights, create_graph=True, retain_graph=True, allow_unused=True)[0]
if peer_weights_d1 == None: return torch.ones_like( self.peer_weights ) * (1 / self.metagraph().n.item()) # None if no grad w.r.t the chain weights.
peer_weights_d2 = torch.autograd.grad(peer_weights_d1.sum(), self.peer_weights, retain_graph=True, allow_unused=True )[0]
validator_scores = peer_weights_d2 * (self.peer_weights**2)/2
return validator_scores
def query ( self, inputs ):
# ---- Get active peers and their weights ----
active_uids = torch.where(self.metagraph().active > 0)[0]
active_peer_weights = self.peer_weights[active_uids]
# ---- Topk Weights ---- (TODO: check if the gaussians are enough disrupt the chain weights)
real_topk = min( self.config.nucleus.topk, self.metagraph().n.item(), len(active_uids))
noise = torch.normal( 0, torch.std(active_peer_weights).item()+self.noise_offset, size=( active_peer_weights.size())).to( self.config.neuron.device )
topk_weights, topk_idx = torch.topk(active_peer_weights + noise , real_topk, dim=0)
topk_uids = active_uids[topk_idx]
# ---- Query network ----
responses, return_ops, query_times = self.dendrite.forward_text (
endpoints = self.metagraph().endpoints[ topk_uids ],
inputs = inputs
)
# ---- Join based on weights ----
joining_uids = torch.where(return_ops== bittensor.proto.ReturnCode.Success)[0]
joining_weights = F.softmax( topk_weights[(return_ops == bittensor.proto.ReturnCode.Success)], dim = 0 )
output = torch.zeros( (inputs.shape[0], inputs.shape[1], bittensor.__network_dim__)).to( self.device )
for index, joining_weight in enumerate( joining_weights ):
output += responses[joining_uids[index]].to( self.device ) * joining_weight
# ---- Punish peers with non-successful return ops ----
with torch.no_grad():
self.peer_weights[topk_uids[(return_ops != bittensor.proto.ReturnCode.Success)]] -= self.config.nucleus.punishment
self.peer_weights[ self.peer_weights < -1 ] = -1 # lower bound for chain weights
return output |
zeineb/testzz | bittensor/_config/config_impl.py | <gh_stars>0
"""
Implementation of the config class, which manages the config of different bittensor modules.
"""
# The MIT License (MIT)
# Copyright © 2021 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import yaml
from munch import Munch
import bittensor
class Config ( Munch ):
"""
Implementation of the config class, which manages the config of different bittensor modules.
"""
def __init__(self, loaded_config = None ):
super().__init__()
if loaded_config:
raise NotImplementedError('Function load_from_relative_path is not fully implemented.')
def __repr__(self) -> str:
return self.__str__()
def __str__(self) -> str:
return "\n" + yaml.dump(self.toDict())
def to_string(self, items) -> str:
""" Get string from items
"""
return "\n" + yaml.dump(items.toDict())
def update_with_kwargs( self, kwargs ):
""" Add config to self
"""
for key,val in kwargs.items():
self[key] = val
def to_defaults(self):
try:
if 'axon' in self.keys():
bittensor.defaults.axon.port = self.axon.port
bittensor.defaults.axon.ip = self.axon.ip
bittensor.defaults.axon.max_workers = self.axon.max_workers
bittensor.defaults.axon.maximum_concurrent_rpcs = self.axon.maximum_concurrent_rpcs
if 'dataset' in self.keys():
bittensor.defaults.dataset.batch_size = self.dataset.batch_size
bittensor.defaults.dataset.block_size = self.dataset.block_size
bittensor.defaults.dataset.max_corpus_size = self.dataset.max_corpus_size
bittensor.defaults.dataset.num_workers = self.dataset.num_workers
bittensor.defaults.dataset.dataset_name = self.dataset.dataset_name
bittensor.defaults.dataset.data_dir = self.dataset.data_dir
bittensor.defaults.dataset.save_dataset = self.dataset.save_dataset
if 'dendrite' in self.keys():
bittensor.defaults.dataset.batch_size = self.dataset.batch_size
bittensor.defaults.dataset.block_size = self.dataset.block_size
bittensor.defaults.dataset.max_corpus_size = self.dataset.max_corpus_size
bittensor.defaults.dataset.num_workers = self.dataset.num_workers
bittensor.defaults.dataset.dataset_name = self.dataset.dataset_name
bittensor.defaults.dataset.data_dir = self.dataset.data_dir
bittensor.defaults.dataset.save_dataset = self.dataset.save_dataset
if 'logging' in self.keys():
bittensor.defaults.logging.debug = self.logging.debug
bittensor.defaults.logging.trace = self.logging.trace
bittensor.defaults.logging.record_log = self.logging.record_log
bittensor.defaults.logging.logging_dir = self.logging.logging_dir
if 'subtensor' in self.keys():
bittensor.defaults.subtensor.network = self.subtensor.network
bittensor.defaults.subtensor.chain_endpoint = self.subtensor.chain_endpoint
if 'threadpool' in self.keys():
bittensor.defaults.threadpool.max_workers = self.threadpool.max_workers
bittensor.defaults.threadpool.maxsize = self.threadpool.maxsize
if 'wallet' in self.keys():
bittensor.defaults.wallet.name = self.wallet.name
bittensor.defaults.wallet.hotkey = self.wallet.hotkey
bittensor.defaults.wallet.path = self.wallet.path
if 'wandb' in self.keys():
bittensor.defaults.wandb.name = self.wandb.name
bittensor.defaults.wandb.project = self.wandb.project
bittensor.defaults.wandb.tags = self.wandb.tags
bittensor.defaults.wandb.run_group = self.wandb.run_group
bittensor.defaults.wandb.directory = self.wandb.directory
bittensor.defaults.wandb.offline = self.wandb.offline
except Exception as e:
print('Error when loading config into defaults {}'.format(e)) |
zeineb/testzz | tests/unit_tests/bittensor_tests/test_dendrite.py |
from bittensor._endpoint import endpoint
import torch
import pytest
import time
import bittensor
wallet = bittensor.wallet(
path = '/tmp/pytest',
name = 'pytest',
hotkey = 'pytest',
)
wallet.create_new_coldkey(use_password=False, overwrite = True)
wallet.create_new_hotkey(use_password=False, overwrite = True)
dendrite = bittensor.dendrite( wallet = wallet )
neuron_obj = bittensor.endpoint(
version = bittensor.__version_as_int__,
uid = 0,
ip = '0.0.0.0',
ip_type = 4,
port = 12345,
hotkey = dendrite.wallet.hotkey.ss58_address,
coldkey = dendrite.wallet.coldkey.ss58_address,
modality = 0
)
def test_dendrite_forward_text_endpoints_tensor():
endpoints = neuron_obj.to_tensor()
x = torch.tensor( [[ 1,2,3 ], [ 1,2,3 ]] )
resp1, _, _ = dendrite.forward_text( endpoints, x )
assert list(torch.stack(resp1, dim=0).shape) == [1, 2, 3, bittensor.__network_dim__]
assert dendrite.stats.total_requests == 1
assert dendrite.to_wandb()['dendrite_total_requests'] == 1
def test_dendrite_forward_text_multiple_endpoints_tensor():
endpoints_1 = neuron_obj.to_tensor()
endpoints_2 = neuron_obj.to_tensor()
endpoints = torch.stack( [endpoints_1, endpoints_2], dim=0)
x = torch.tensor( [[ 1,2,3 ], [ 1,2,3 ]] )
resp1, _, _ = dendrite.forward_text( endpoints, x )
assert list(torch.stack(resp1, dim=0).shape) == [2, 2, 3, bittensor.__network_dim__]
def test_dendrite_forward_text_multiple_endpoints_tensor_list():
endpoints_1 = neuron_obj.to_tensor()
endpoints_2 = neuron_obj.to_tensor()
endpoints_3 = neuron_obj.to_tensor()
endpoints = [torch.stack( [endpoints_1, endpoints_2], dim=0), endpoints_3]
x = torch.tensor( [[ 1,2,3 ], [ 1,2,3 ]] )
resp1, _, _ = dendrite.forward_text( endpoints, x )
assert list(torch.stack(resp1, dim=0).shape) == [3, 2, 3, bittensor.__network_dim__]
def test_dendrite_forward_text_singular():
x = torch.tensor( [[ 1,2,3 ], [ 1,2,3 ]] )
resp1, _, _ = dendrite.forward_text( [neuron_obj], x )
assert list(torch.stack(resp1, dim=0).shape) == [1, 2, 3, bittensor.__network_dim__]
resp2, _, _ = dendrite.forward_text( [neuron_obj], [x] )
assert list(torch.stack(resp2, dim=0).shape) == [1, 2, 3, bittensor.__network_dim__]
resp3, _, _ = dendrite.forward_text( [neuron_obj, neuron_obj], x )
assert list(torch.stack(resp3, dim=0).shape) == [2, 2, 3, bittensor.__network_dim__]
with pytest.raises(ValueError):
dendrite.forward_text( [neuron_obj, neuron_obj], [x] )
def test_dendrite_forward_text_singular_no_batch_size():
x = torch.tensor( [ 1,2,3 ] )
resp1, _, _ = dendrite.forward_text( [neuron_obj], x )
assert list(torch.stack(resp1, dim=0).shape) == [1, 1, 3, bittensor.__network_dim__]
resp2, _, _ = dendrite.forward_text( [neuron_obj], [x] )
assert list(torch.stack(resp2, dim=0).shape) == [1, 1, 3, bittensor.__network_dim__]
resp3, _, _ = dendrite.forward_text( [neuron_obj, neuron_obj], x )
assert list(torch.stack(resp3, dim=0).shape) == [2, 1, 3, bittensor.__network_dim__]
with pytest.raises(ValueError):
dendrite.forward_text( [neuron_obj, neuron_obj], [x] )
def test_dendrite_forward_text_tensor_list_singular():
x = [ torch.tensor( [ 1,2,3 ] ) for _ in range(2) ]
with pytest.raises(ValueError):
resp1, _, _ = dendrite.forward_text( [neuron_obj], x )
resp1, _, _ = dendrite.forward_text( [neuron_obj, neuron_obj], x )
assert list(torch.stack(resp1, dim=0).shape) == [2, 1, 3, bittensor.__network_dim__]
def test_dendrite_forward_text_tensor_list():
x = [ torch.tensor( [[ 1,2,3 ], [ 1,2,3 ]] ) for _ in range(2) ]
with pytest.raises(ValueError):
resp1, _, _ = dendrite.forward_text( [neuron_obj], x )
resp1, _, _ = dendrite.forward_text( [neuron_obj, neuron_obj], x )
assert list(torch.stack(resp1, dim=0).shape) == [2, 2, 3, bittensor.__network_dim__]
def test_dendrite_forward_text_singular_string():
x = "the cat"
resp1, _, _ = dendrite.forward_text( [neuron_obj], x )
assert list(torch.stack(resp1, dim=0).shape) == [1, 1, 2, bittensor.__network_dim__]
resp2, _, _ = dendrite.forward_text( [neuron_obj], [x] )
assert list(torch.stack(resp2, dim=0).shape) == [1, 1, 2, bittensor.__network_dim__]
resp3, _, _ = dendrite.forward_text( [neuron_obj, neuron_obj], x )
assert list(torch.stack(resp3, dim=0).shape) == [2, 1, 2, bittensor.__network_dim__]
resp4, _, _ = dendrite.forward_text( [neuron_obj, neuron_obj], [x] )
assert list(torch.stack(resp4, dim=0).shape) == [2, 1, 2, bittensor.__network_dim__]
def test_dendrite_forward_text_list_string():
x = ["the cat", 'the dog', 'the very long sentence that needs to be padded']
resp1, _, _ = dendrite.forward_text( [neuron_obj], x )
assert list(torch.stack(resp1, dim=0).shape) == [1, 3, 9, bittensor.__network_dim__]
resp2, _, _ = dendrite.forward_text( [neuron_obj, neuron_obj], x )
assert list(torch.stack(resp2, dim=0).shape) == [2, 3, 9, bittensor.__network_dim__]
def test_dendrite_forward_tensor_shape_error():
x = torch.rand(3, 3, 3, dtype=torch.float32)
with pytest.raises(ValueError):
dendrite.forward_tensor( [neuron_obj], [x])
def test_dendrite_forward_image_shape_error():
x = torch.rand(3, 3, 3, dtype=torch.float32)
with pytest.raises(ValueError):
dendrite.forward_image( [neuron_obj], [x])
def test_dendrite_forward_text_shape_error():
x = torch.zeros((3, 3, 3), dtype=torch.int64)
with pytest.raises(ValueError):
dendrite.forward_image( [neuron_obj], [x])
def test_dendrite_forward_tensor_type_error():
x = torch.zeros(3, 3, bittensor.__network_dim__, dtype=torch.int32)
with pytest.raises(ValueError):
dendrite.forward_tensor( [neuron_obj], x)
def test_dendrite_forward_image_type_error():
x = torch.tensor([ [ [ [ [ 1 ] ] ] ] ], dtype=torch.int64)
with pytest.raises(ValueError):
dendrite.forward_image( [neuron_obj], x)
def test_dendrite_forward_text_type_error():
x = torch.tensor([[1,2,3,4],[5,6,7,8]], dtype=torch.float32)
with pytest.raises(ValueError):
dendrite.forward_image( [neuron_obj], x)
def test_dendrite_forward_tensor_endpoint_type_error():
x = torch.rand(3, 3, bittensor.__network_dim__, dtype=torch.float32)
with pytest.raises(ValueError):
dendrite.forward_tensor( [dict()], [x])
def test_dendrite_forward_image_endpoint_type_error():
x = torch.tensor([ [ [ [ [ 1 ] ] ] ] ], dtype=torch.float32)
with pytest.raises(ValueError):
dendrite.forward_image( [dict()], [x])
def test_dendrite_forward_text_endpoint_type_error():
x = torch.tensor([[1,2,3,4],[5,6,7,8]], dtype=torch.long)
with pytest.raises(ValueError):
dendrite.forward_image( [dict()], [x])
def test_dendrite_forward_tensor_endpoint_len_error():
x = torch.rand(3, 3, bittensor.__network_dim__, dtype=torch.float32)
with pytest.raises(ValueError):
dendrite.forward_tensor( [], [x])
def test_dendrite_forward_image_endpoint_len_error():
x = torch.tensor([ [ [ [ [ 1 ] ] ] ] ], dtype=torch.float32)
with pytest.raises(ValueError):
dendrite.forward_image( [], [x])
def test_dendrite_forward_text_endpoint_len_error():
x = torch.tensor([[1,2,3,4],[5,6,7,8]], dtype=torch.long)
with pytest.raises(ValueError):
dendrite.forward_image( [], [x])
def test_dendrite_forward_tensor_input_len_error():
x = torch.rand(3, 3, bittensor.__network_dim__, dtype=torch.float32)
with pytest.raises(ValueError):
dendrite.forward_tensor( [neuron_obj], [])
def test_dendrite_forward_image_input_len_error():
x = torch.tensor([ [ [ [ [ 1 ] ] ] ] ], dtype=torch.float32)
with pytest.raises(ValueError):
dendrite.forward_image( [neuron_obj], [])
def test_dendrite_forward_text_input_len_error():
x = torch.tensor([[1,2,3,4],[5,6,7,8]], dtype=torch.long)
with pytest.raises(ValueError):
dendrite.forward_image( [neuron_obj], [])
def test_dendrite_forward_tensor_mismatch_len_error():
x = torch.rand(3, 3, bittensor.__network_dim__, dtype=torch.float32)
with pytest.raises(ValueError):
dendrite.forward_tensor( [neuron_obj], [x,x])
def test_dendrite_forward_image_mismatch_len_error():
x = torch.tensor([ [ [ [ [ 1 ] ] ] ] ], dtype=torch.float32)
with pytest.raises(ValueError):
dendrite.forward_image( [neuron_obj], [x,x])
def test_dendrite_forward_text_mismatch_len_error():
x = torch.tensor([[1,2,3,4],[5,6,7,8]], dtype=torch.long)
with pytest.raises(ValueError):
dendrite.forward_image( [neuron_obj], [x,x])
def test_dendrite_forward_text_non_list():
x = torch.tensor([[1,2,3,4],[5,6,7,8]], dtype=torch.long)
out, ops, times = dendrite.forward_text( neuron_obj, x)
assert ops[0].item() == bittensor.proto.ReturnCode.Unavailable
assert list(out[0].shape) == [2, 4, bittensor.__network_dim__]
def test_dendrite_forward_image_non_list():
x = torch.tensor([ [ [ [ [ 1 ] ] ] ] ], dtype=torch.float32)
out, ops, times = dendrite.forward_image( neuron_obj, x)
assert ops[0].item() == bittensor.proto.ReturnCode.Unavailable
assert list(out[0].shape) == [1, bittensor.__network_dim__]
def test_dendrite_forward_tensor_non_list():
x = torch.rand(3, 3, bittensor.__network_dim__, dtype=torch.float32)
out, ops, times = dendrite.forward_tensor( neuron_obj, x)
assert ops[0].item() == bittensor.proto.ReturnCode.Unavailable
assert list(out[0].shape) == [3, bittensor.__network_dim__]
def test_dendrite_forward_text():
x = torch.tensor([[1,2,3,4],[5,6,7,8]], dtype=torch.long)
out, ops, times = dendrite.forward_text( [neuron_obj], [x])
assert ops[0].item() == bittensor.proto.ReturnCode.Unavailable
assert list(out[0].shape) == [2, 4, bittensor.__network_dim__]
def test_dendrite_forward_image():
x = torch.tensor([ [ [ [ [ 1 ] ] ] ] ], dtype=torch.float32)
out, ops, times = dendrite.forward_image( [neuron_obj], [x])
assert ops[0].item() == bittensor.proto.ReturnCode.Unavailable
assert list(out[0].shape) == [1, 1, bittensor.__network_dim__]
def test_dendrite_forward_tensor():
x = torch.rand(3, 3, bittensor.__network_dim__, dtype=torch.float32)
out, ops, times = dendrite.forward_tensor( [neuron_obj], [x])
assert ops[0].item() == bittensor.proto.ReturnCode.Unavailable
assert list(out[0].shape) == [3, 3, bittensor.__network_dim__]
def test_dendrite_backoff():
_dendrite = bittensor.dendrite( wallet = wallet )
_endpoint_obj = bittensor.endpoint(
version = bittensor.__version_as_int__,
uid = 0,
ip = '0.0.0.0',
ip_type = 4,
port = 12345,
hotkey = _dendrite.wallet.hotkey.ss58_address,
coldkey = _dendrite.wallet.coldkey.ss58_address,
modality = 0
)
print (_endpoint_obj)
# Normal call.
x = torch.rand(3, 3, bittensor.__network_dim__, dtype=torch.float32)
out, ops, times = _dendrite.forward_tensor( [_endpoint_obj], [x])
assert ops[0].item() == bittensor.proto.ReturnCode.Unavailable
assert list(out[0].shape) == [3, 3, bittensor.__network_dim__]
if __name__ == "__main__":
# test_dendrite_forward_tensor_shape_error ()
# test_dendrite_forward_image_shape_error ()
# test_dendrite_forward_text_shape_error ()
# test_dendrite_forward_text ()
# test_dendrite_forward_image ()
# test_dendrite_forward_tensor ()
# test_dendrite_backoff ()
# test_dendrite_forward_text_singular_no_batch_size()
# test_dendrite_forward_text_singular()
# test_dendrite_forward_text_singular_string()
# test_dendrite_forward_text_list_string()
# test_dendrite_forward_text_tensor_list_singular()
# test_dendrite_forward_text_tensor_list()
# test_dendrite_forward_text_endpoints_tensor()
# test_dendrite_forward_text_multiple_endpoints_tensor()
# test_dendrite_forward_text_multiple_endpoints_tensor_list()
test_dendrite_forward_text_endpoints_tensor() |
zeineb/testzz | bittensor/_dataset/dataset_impl.py | """ Implementation for the dataset and GenesisTextDataset class, which handles dataloading from ipfs
"""
# The MIT License (MIT)
# Copyright © 2021 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import random
from re import I
from torch.utils.data.dataloader import DataLoader
from torch.utils.data import Subset
import torch
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import requests
from loguru import logger
import bittensor
logger = logger.opt(colors=True)
class Dataset():
""" Implementation for the dataset class, which handles dataloading from ipfs
"""
def __init__(self):
# Used to retrieve directory contentx
self.file_get = 'http://ipfs2.opentensor.ai/api/v0/object/get'
self.pin_get = 'http://ipfs2.opentensor.ai/api/v0/pin/ls'
# Used when current corpus has been exhausted
self.refresh_corpus = False
@staticmethod
def requests_retry_session(
retries=10,
backoff_factor=0.5,
status_forcelist=(104, 500, 502, 504),
session=None,
):
""" Creates a retriable session for request calls. This enables
automatic retries and back-off retries should any request calls fail.
Args:
retries (int, optional): Maximum number of retries. Defaults to 3.
backoff_factor (float, optional): Factor by which to back off if a retry fails. Defaults to 0.3.
status_forcelist (tuple, optional): A set of integer HTTP status codes that we should force a retry on. Defaults to (500, 502, 504).
session ([type], optional): Session for which to set up the retries. Defaults to None.
Returns:
requests.Session(): A Requests Session object set up for retries and backoff.
"""
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
def retrieve_directory(self, address: str, params = None, action: str = 'post'):
r"""Connects to Pinata IPFS gateway and retrieves directory.
Returns:
dict: A dictionary of the files inside of the genesis_datasets and their hashes.
"""
session = requests.Session()
session.params.update(params)
if action == 'get':
response = Dataset.requests_retry_session(session=session).get(address)
elif action == 'post':
response = Dataset.requests_retry_session(session=session).post(address)
return response
def __len__(self):
""" Returns length of the dataset that the dataset is processing
"""
def __getitem__(self, idx):
""" Returns the next batch from the dataset.
"""
class GenesisTextDataset( Dataset ):
""" One kind of dataset that caters for the data from ipfs
"""
def __init__(
self,
block_size,
batch_size,
max_corpus_size,
num_workers,
dataset_name,
data_dir,
save_dataset
):
super().__init__()
self.block_size = block_size
self.batch_size = batch_size
self.max_corpus_size = max_corpus_size
self.num_workers = num_workers
self.tokenizer = bittensor.tokenizer( version = bittensor.__version__ )
self.dataset_name = dataset_name
self.data_dir = data_dir
self.save_dataset = save_dataset
self.datafile_size_bound = 262158
self.__infinite_dataset_iterator = None
# Retrieve a random slice of the genesis dataset
self.data = []
self.data_remained = []
# Used to refresh corpus if we've exhausted the whole dataset
self.refresh_corpus = True
if not os.path.isdir(os.path.expanduser(data_dir)):
os.makedirs(os.path.expanduser(data_dir))
def get_random_directories(self):
r""" Getting directories from a random dataset_hash
Where a directory could be leading to a data file or a directory file
"""
# --- Getting dataset hashes from pin/ls.
dataset_hashes = []
response = self.retrieve_directory(self.pin_get, (('type', 'recursive'),), action = 'post')
if response.status_code != 200:
dataset_hashes= [
'QmPbAqDsMpufa2eNsE8X9TRh43JsAPxbj7tz3PmprouH7U',
'<KEY>',
'QmSJJtZa37kX7ABBJyani9i3cFTq86zebTLQqioRCvgDei',
'<KEY>1AQePSeKvbaFejizj5HP3',
'<KEY>3A6atbFeKUZ63DnTeW',
'<KEY>',
'QmVbNzncoJK8WwyAoWxLndk4999iyyYZbCKpEvUxrFXp1N',
'QmWiHsJ6z2LbZnEcidgz2vPq9ZsgrKUQ4QdB83pFcFvug3',
'QmXa1SDyVK6f876JYHwoQZcpXGMi8aPYKWvHzKTDXuqU5z',
'QmYg67pZwPsX3qH31tEc1qexrPc88zUkZG4AqsNDZo5FEX',
'QmZawcgwiT9S5Vk5WX41RRaBPb73KByQej9JmRCNgNVxjz',
'QmeSNvZVtHeMmJSuJQAUyTTW9LZbQkAqLDgVVXhzqJHrvY',
'Qmefa9xMdu7HZyr3U1zH8MaCayPngPJ9iZnnddXfXMrA2N',
'Qmf3BjH7SzK8WHGWBngt4WK6jGCpUtgPEBCw2pFZvYimto'
]
else:
for hash, v in response.json()['Keys'].items():
dataset_hashes.append(hash)
# --- Getting directories from a random dataset hash.
# --- directories: List[ Map{Name: str, Hash: str, Size: int} ]
i = 0
directories = []
dataset_hashes_order = list(range(len(dataset_hashes)))
random.shuffle(dataset_hashes_order)
while len(directories) == 0 and i < len(dataset_hashes):
dataset_hash = dataset_hashes[dataset_hashes_order[i]]
i += 1
response = self.retrieve_directory(self.file_get, (('arg', dataset_hash),))
if response.status_code != 200:
logger.warning("Failed to retrieve directory, ignoring directory:".ljust(20) + "<blue>{}</blue>".format(dataset_hash))
else:
# --- Get the directory links if there is valid response, else check on another dataset_hash
directory = response.json()
if directory and 'Links' in directory.keys():
directories += directory['Links']
logger.success("Loaded dataset hash:".ljust(20) + "<blue>{}</blue>".format(dataset_hash))
if len(directories) == 0:
directories = None
return directories
def extract_datafile_dir(self, directory):
r"""
With recursion, from the given directory, get a directory that leads to a datafile.
Args:
directory: Map{ Name: str, Hash: str, Size: int }:
The original directory to look up a datafile for.
Returns:
directory: Map{ Name: str, Hash: str, Size: int }:
A random directory that lead to a datafile.
"""
# --- If the size of directory is small, it is leads to data file, return the data file.
if directory['Size'] <= self.datafile_size_bound:
return directory
# --- Else, the directory leads to more directories, return a random data file within the directories.
else:
response = self.retrieve_directory(self.file_get, (('arg', directory['Hash']),))
# --- Return none if the request failed.
if response.status_code != 200:
logger.warning("Failed to retrieve directory, ignoring directory:".ljust(20) + "<blue>{}</blue>".format(directory))
return None
# --- Pick a random sub_directory, run recursion until we have found a data file
else:
sub_directories = response.json()
if sub_directories and 'Links' in sub_directories.keys() and len(sub_directories['Links']) >= 1:
random_sub_directory = random.choice(sub_directories['Links'])
# --- Fill the name of the random_sub_directory if it is empty.
if random_sub_directory['Name'] == '':
random_sub_directory['Name'] = directory['Name']
return self.extract_datafile_dir(random_sub_directory)
else:
logger.warning("Directory seems empty, ignoring directory:".ljust(20) + "<blue>{}</blue>". format(dir_hash))
return None
def get_text(self, file):
r"""
Load the text data from disk if it is already in the the data_dir,
else download it from IPFS and save it
Args:
file: Map{ Name: str, Hash: str, Size: int }
The directory to get text file from.
Returns:
text: str:
The text data.
"""
text = None
file_name = file['Name']
file_hash = file['Hash']
full_path = os.path.expanduser(os.path.join(self.data_dir, file_name))
# --- Load text from path
if os.path.exists(full_path):
try:
with open(full_path, mode='r') as f:
text = f.read()
logger.success("Loaded:".ljust(20) + "<blue>{}</blue>".format(file_name))
except Exception:
logger.warning("Load failed:".ljust(20) + "<blue>{}</blue>".format(file_name))
# --- If couldnt load from path, download text.
if text == None:
response = self.retrieve_directory(self.file_get, (('arg', file_hash),))
if response.status_code != 200:
logger.warning("Failed to retrieve file, ignoring file:".ljust(20) + "<blue>{}</blue>".format(file_name))
else:
text = response.text
logger.success("Downloaded:".ljust(20) + "<blue>{}</blue>".format(file_name))
# --- Save text if the save_dataset flag is on.
if self.save_dataset:
try:
with open(full_path, mode = 'w+') as f:
f.write(text)
logger.success("Saved:".ljust(20) + "<blue>{}</blue>".format(file_name))
except Exception:
logger.warning("Save failed:".ljust(20) + "<blue>{}</blue>".format(file_name))
return text
def construct_text_corpus(self, min_data_len = 0):
""" Main function for generating the text data.
1. Get directories from a random dataset_hash (dataset_hash is the result from calling pin/ls).
2. Pick a random directory and get the directory that would lead to a datafile.
3. Get text from the directory.
4. Repeat 2,3 until we have reached the max_corpus_size
Returns:
text: str:
Contents of the text data.
"""
try:
logger.success("Retrieving a dataset files from the IPFS gateway...")
# --- Get directories from a random dataset_hash
directories = self.get_random_directories()
data_corpus = []
# --- Generate a random order of the directories
directory_order = list(range(len(directories)))
random.shuffle(directory_order)
# --- Pick random directories and get their text contents.
if directories:
total_dataset_size = 0
total_dataset_len = 0
i = 0
# --- Dont stop until the corpus size and the minimum data_length was reached.
while (total_dataset_size <= self.max_corpus_size) or (total_dataset_len < min_data_len):
# --- Get a directory that leads to a datafile.
random_datafile_dir = self.extract_datafile_dir(directories[directory_order[i]])
if random_datafile_dir == None:
pass
# --- Get text from the datafile directory
try:
text = self.get_text(random_datafile_dir)
except:
text = None
if text != None:
text_list = text.split()
data_corpus.extend(text_list)
total_dataset_size += int(random_datafile_dir['Size'])
total_dataset_len += len(text_list)
i += 1
return data_corpus
logger.error("It appears the directory is empty... Restart your miner to try again.")
return None
except Exception as e:
logger.error("Ran into exception when trying to retrieve dataset from IPFS: {}".format(e))
return None
def dataloader(self, epoch_length = 100):
""" Creates a torch dataloader out of a subclass of this class.
Args:
epoch_length (int, optional): The epoch length of the miner. If this length is not set or if it is larger than the dataset,
then a dataloader for the entire dataset is returned. Otherwise, a dataloader for a subset of the dataset of epoch_length
is returned. Defaults to None.
Returns:
torch.utils.data.dataloader.DataLoader: Pytorch dataloader.
"""
data_size = epoch_length * self.batch_size * self.block_size
# Make sure the data remained is at least as big as data_size
if len(self.data_remained) < (data_size) :
self.data_remained += self.construct_text_corpus(min_data_len = data_size)
self.data = self.data_remained[:data_size]
del self.data_remained[:data_size]
# Datalaoder calls self._getitem_ functions until the self.data uses up, and group the result by batch size
return DataLoader(self,
shuffle=True,
batch_size=self.batch_size,
num_workers=self.num_workers,
drop_last=True)
def __next__(self):
"""Returns the next element from the dataset.
"""
if self.__infinite_dataset_iterator == None:
self.__infinite_dataset_iterator = iter([input for input in self.dataloader(1000)]) # should set it to 1000
try:
return next(self.__infinite_dataset_iterator)
except StopIteration:
self.__infinite_dataset_iterator = iter([input for input in self.dataloader(1000)])
return next(self.__infinite_dataset_iterator)
def __len__(self):
"""Returns number of samples (blocks) of dataset
Returns:
length: int
"""
if (self.data == None) or (self.block_size == None) or (self.block_size == 0):
return 0
return round( len(self.data) / self.block_size )
def __getitem__(self, idx):
""" Returns a block of sentences from text dataset.
Args:
idx: index of data input
Returns:
torch.tensor(dix)
"""
start_idx = (idx * self.block_size) % len(self.data)
end_idx = start_idx + self.block_size
tokenized_text = torch.tensor(self.tokenizer(" ".join(self.data[start_idx:end_idx]), padding=True, truncation=True)['input_ids'], dtype=torch.long)
return tokenized_text[:self.block_size]
|
erosson/PyPoE | PyPoE/index_dat.py | # Generate a list of all .dat file names, based on PyPoE specifications.
# Run me from the PyPoE package directory! `scripts/dist` copies me there.
from poe.file.specification.data.stable import specification
import json
print(json.dumps(list(specification.keys()), indent=4))
|
erosson/PyPoE | PyPoE/poe/file/specification/generation/virtual_fields.py | <gh_stars>1-10
from PyPoE.poe.file.specification.fields import VirtualField
virtual_fields = {
'BaseItemTypes.dat': [
VirtualField(
name='NormalPurchase',
fields=('NormalPurchase_BaseItemTypesKeys', 'NormalPurchase_Costs'),
zip=True,
),
VirtualField(
name='MagicPurchase',
fields=('MagicPurchase_BaseItemTypesKeys', 'MagicPurchase_Costs'),
zip=True,
),
VirtualField(
name='RarePurchase',
fields=('RarePurchase_BaseItemTypesKeys', 'RarePurchase_Costs'),
zip=True,
),
VirtualField(
name='UniquePurchase',
fields=('UniquePurchase_BaseItemTypesKeys', 'UniquePurchase_Costs'),
zip=True,
),
],
'CraftingBenchOptions.dat': [
VirtualField(
name='Cost',
fields=('Cost_BaseItemTypesKeys', 'Cost_Values'),
zip=True,
),
],
'DelveUpgrades.dat': [
VirtualField(
name='Stats',
fields=('StatsKeys', 'StatValues'),
zip=True,
),
],
'GrantedEffectsPerLevel.dat': [
VirtualField(
name='StatValues',
fields=(
'Stat1Value', 'Stat2Value', 'Stat3Value', 'Stat4Value',
'Stat5Value', 'Stat6Value', 'Stat7Value', 'Stat8Value',
'Stat9Value',
),
),
VirtualField(
name='StatFloats',
fields=(
'Stat1Float', 'Stat2Float', 'Stat3Float', 'Stat4Float',
'Stat5Float', 'Stat6Float', 'Stat7Float', 'Stat8Float'
),
),
VirtualField(
name='Stats',
fields=('StatsKeys', 'StatValues'),
zip=True,
),
VirtualField(
name='Costs',
fields=('CostTypesKeys', 'CostAmounts'),
zip=True,
),
],
'MapPurchaseCosts.dat': [
VirtualField(
name='NormalPurchase',
fields=('NormalPurchase_BaseItemTypesKeys', 'NormalPurchase_Costs'),
zip=True,
),
VirtualField(
name='MagicPurchase',
fields=('MagicPurchase_BaseItemTypesKeys', 'MagicPurchase_Costs'),
zip=True,
),
VirtualField(
name='RarePurchase',
fields=('RarePurchase_BaseItemTypesKeys', 'RarePurchase_Costs'),
zip=True,
),
VirtualField(
name='UniquePurchase',
fields=('UniquePurchase_BaseItemTypesKeys', 'UniquePurchase_Costs'),
zip=True,
),
],
'Mods.dat': [
VirtualField(
name='SpawnWeight',
fields=('SpawnWeight_TagsKeys', 'SpawnWeight_Values'),
zip=True,
),
VirtualField(
name='Stat1',
fields=('StatsKey1', 'Stat1Min', 'Stat1Max'),
),
VirtualField(
name='Stat2',
fields=('StatsKey2', 'Stat2Min', 'Stat2Max'),
),
VirtualField(
name='Stat3',
fields=('StatsKey3', 'Stat3Min', 'Stat3Max'),
),
VirtualField(
name='Stat4',
fields=('StatsKey4', 'Stat4Min', 'Stat4Max'),
),
VirtualField(
name='Stat5',
fields=('StatsKey5', 'Stat5Min', 'Stat5Max'),
),
VirtualField(
name='Stat6',
fields=('StatsKey6', 'Stat6Min', 'Stat6Max'),
),
VirtualField(
name='StatsKeys',
fields=('StatsKey1', 'StatsKey2', 'StatsKey3', 'StatsKey4',
'StatsKey5', 'StatsKey6'),
),
VirtualField(
name='Stats',
fields=('Stat1', 'Stat2', 'Stat3', 'Stat4', 'Stat5', 'Stat6'),
),
VirtualField(
name='GenerationWeight',
fields=('GenerationWeight_TagsKeys', 'GenerationWeight_Values'),
zip=True,
),
],
'MonsterMapBossDifficulty.dat': [
VirtualField(
name='Stat1',
fields=('StatsKey1', 'Stat1Value'),
),
VirtualField(
name='Stat2',
fields=('StatsKey2', 'Stat2Value'),
),
VirtualField(
name='Stat3',
fields=('StatsKey3', 'Stat3Value'),
),
VirtualField(
name='Stat4',
fields=('StatsKey4', 'Stat4Value'),
),
VirtualField(
name='Stat5',
fields=('StatsKey5', 'Stat5Value'),
),
VirtualField(
name='Stats',
fields=('Stat1', 'Stat2', 'Stat3', 'Stat4', 'Stat5'),
),
],
'MonsterMapDifficulty.dat': [
VirtualField(
name='Stat1',
fields=('StatsKey1', 'Stat1Value'),
),
VirtualField(
name='Stat2',
fields=('StatsKey2', 'Stat2Value'),
),
VirtualField(
name='Stat3',
fields=('StatsKey3', 'Stat3Value'),
),
VirtualField(
name='Stat4',
fields=('StatsKey4', 'Stat4Value'),
),
VirtualField(
name='Stats',
fields=('Stat1', 'Stat2', 'Stat3', 'Stat4'),
),
],
'PassiveSkills.dat': [
VirtualField(
name='StatValues',
fields=('Stat1Value', 'Stat2Value', 'Stat3Value', 'Stat4Value', 'Stat5Value'),
),
VirtualField(
name='Stats',
fields=('StatsKeys', 'StatValues'),
zip=True,
),
],
}
|
erosson/PyPoE | PyPoE/poe/file/specification/generation/custom_attributes.py |
class CustomizedField:
def __init__(self,
enum: str = None):
self.enum = enum
custom_attributes = {
'BaseItemTypes.dat': {
'ModDomainsKey': CustomizedField(
enum='MOD_DOMAIN',
),
},
'BestiaryRecipeComponent.dat': {
'RarityKey': CustomizedField(
enum='RARITY',
),
},
'BetrayalUpgrades.dat': {
'BetrayalUpgradeSlotsKey': CustomizedField(
enum='BETRAYAL_UPGRADE_SLOTS',
),
},
'DelveUpgrades.dat': {
'DelveUpgradeTypeKey': CustomizedField(
enum='DELVE_UPGRADE_TYPE',
),
},
'GrantedEffectsPerLevel.dat': {
'StatInterpolationTypesKeys': CustomizedField(
enum='STAT_INTERPOLATION_TYPES',
),
},
'HarvestObjects.dat': {
'ObjectType': CustomizedField(
enum='HARVEST_OBJECT_TYPES',
),
},
'MapFragmentMods.dat': {
'MapFragmentFamilies': CustomizedField(
enum='MAP_FRAGMENT_FAMILIES',
),
},
'Mods.dat': {
'Domain': CustomizedField(
enum='MOD_DOMAIN',
),
'GenerationType': CustomizedField(
enum='MOD_GENERATION_TYPE',
),
},
'Scarabs.dat': {
'ScarabType': CustomizedField(
enum='SCARAB_TYPES',
),
},
'ShopPaymentPackage.dat': {
'ShopPackagePlatformKeys': CustomizedField(
enum='SHOP_PACKAGE_PLATFORM',
),
},
'SupporterPackSets.dat': {
'ShopPackagePlatformKey': CustomizedField(
enum='SHOP_PACKAGE_PLATFORM',
),
},
'Words.dat': {
'WordlistsKey': CustomizedField(
enum='WORDLISTS',
),
},
}
|
erosson/PyPoE | PyPoE/poe/file/specification/generation/template.py | <filename>PyPoE/poe/file/specification/generation/template.py
# template for data/stable.py
"""
Description
===============================================================================
Contains the specification for the stable version of the game.
This file is generated automatically based on
https://github.com/poe-tool-dev/dat-schema. Do not modify it manually.
Please see the following for more details:
:py:mod:`PyPoE.poe.file.specification.fields`
Information about the Field classes
:py:mod:`PyPoE.poe.file.specification`
Specification loader
:py:mod:`PyPoE.poe.file.specification.generation`
Automatic generation
Agreement
===============================================================================
See PyPoE/LICENSE
"""
# =============================================================================
# Imports
# =============================================================================
# 3rd-party
from PyPoE.poe.file.specification.fields import *
# self
# =============================================================================
# Globals
# =============================================================================
__all__ = ['specification', ]
specification = Specification({
'SkillTotems.dat': File(
),
# <specification>
})
|
erosson/PyPoE | PyPoE/poe/file/dat.py | """
Overview
===============================================================================
+----------+------------------------------------------------------------------+
| Path | PyPoE/poe/file/dat.py |
+----------+------------------------------------------------------------------+
| Version | 1.0.0a0 |
+----------+------------------------------------------------------------------+
| Revision | $Id: 67a27d9930ef4f276ec274fad623bd4c21fc711e $ |
+----------+------------------------------------------------------------------+
| Author | Omega_K2 |
+----------+------------------------------------------------------------------+
Description
===============================================================================
Support for .dat file format.
.dat files can be found in Data/ and are read by :class:`DatFile`.
Unfortunately, there is no magic keyword for identifying the GGG .dat format,
so advise caution when trying to read dat files.
The GGG .dat format uses a fixed-width table and a variable-length data section.
In the fixed-width table, the number of rows is defined, however, the data
format stored has to be reverse-engineered and is currently not stored in the
file itself.
The data is a continuous amount of binary data; reading values form there is
generally done by pointers (int) or list pointers (size, int) from the
table-data.
A list of default specification is included with PyPoE; to set the correct
version :func:`set_default_spec` may be used.
Agreement
===============================================================================
See PyPoE/LICENSE
.. todo::
* DatValue.get_value might hit the python recursion limit, but it is not a
problem for any of the actual dat files.
* Update RR with the new indexing
Documentation
===============================================================================
Public API
-------------------------------------------------------------------------------
.. autoclass:: DatFile
.. autoclass:: RelationalReader
.. autofunction:: set_default_spec
Internal API
-------------------------------------------------------------------------------
.. autoclass:: DatReader
.. autoclass:: DatValue
"""
# =============================================================================
# Imports
# =============================================================================
# Python
import struct
import warnings
from enum import IntEnum
from io import BytesIO
from collections import OrderedDict, defaultdict
from collections.abc import Iterable
# 3rd-party
# self
from PyPoE.shared.decorators import deprecated, doc
from PyPoE.shared.mixins import ReprMixin
from PyPoE.poe import constants
from PyPoE.poe.file.shared import AbstractFileReadOnly
from PyPoE.poe.file.shared.cache import AbstractFileCache
from PyPoE.poe.file.specification import load
from PyPoE.poe.file.specification.errors import SpecificationError, \
SpecificationWarning
# =============================================================================
# Globals
# =============================================================================
_default_spec = None
__all__ = [
'DAT_FILE_MAGIC_NUMBER',
'DatFile', 'RelationalReader',
'set_default_spec',
]
DAT_FILE_MAGIC_NUMBER = b'\xBB\xbb\xBB\xbb\xBB\xbb\xBB\xbb'
# =============================================================================
# Classes
# =============================================================================
class DatValue:
"""
Representation of a value found in a dat file.
DatValue instances are created by reading or writing a DatValue and should
not be directly be created. The purpose of DatValues is to keep information
regarding the placement of the value in the respective DatFile intact.
Support for built-ins:
DatValue do support comparison, however is it performed on the dereferenced
value it holds, not the equality of the dat value itself.
This means generally DatValues can be compared to anything, the actual
comparison is however performed depending on the data type.
Example 1: dat_value < 0
* works if the dat_value holds an integer
* raises TypeError if it holds a list
Example 2: dat_value1 < dat_value2
* works if both dat values have the same or comparable types
* raises TypeError if one holds a list, and the other an integer
Dev notes:
Must keep the init
"""
# Very important to cut down the cost of class creation
# In some dat files we may be creating millions of instances, simply using
# slots can make a significant difference (~35% speedup)
__slots__ = [
'value', 'size', 'offset', 'parent', 'specification', 'children',
'child',
]
def __init__(self, value=None, offset=None, size=None, parent=None,
specification=None):
self.value = value
self.size = size
self.offset = offset
self.parent = parent
self.specification = specification
self.children = None
self.child = None
def __repr__(self):
# TODO: iterative vs recursive?
if self.is_pointer:
return repr(self.child)
elif self.is_list:
return repr([repr(dv) for dv in self.children])
else:
return 'DatValue(' + repr(self.value) +')'
def __lt__(self, other):
if not isinstance(other, DatValue):
return self.get_value() < other
return self.get_value() < other.get_value()
def __le__(self, other):
if not isinstance(other, DatValue):
return self.get_value() <= other
return self.get_value() <= other.get_value()
def __eq__(self, other):
if not isinstance(other, DatValue):
return self.get_value() == other
return self.get_value() == other.get_value()
def __ne__(self, other):
if not isinstance(other, DatValue):
return self.get_value() != other
return self.get_value() != other.get_value()
def __gt__(self, other):
if not isinstance(other, DatValue):
return self.get_value() > other
return self.get_value() > other.get_value()
def __ge__(self, other):
if not isinstance(other, DatValue):
return self.get_value() >= other
return self.get_value() >= other.get_value()
# Properties
def _get_data_size(self):
"""
Retrieves size of the data held by the current instance in the data
section.
Returns
-------
int
size of data
Raises
------
TypeError
If performed on DatValue instances without data
"""
if self.is_list:
if self.children:
size = self.children[0].size * self.value[0]
else:
size = 0
elif self.is_pointer:
size = self.child.size
else:
raise TypeError('Only supported on DatValue instances with data (lists, pointers)')
return size
def _get_data_start_offset(self):
"""
Retrieves the start offset of the data held by the current instance in
the data section.
Returns
-------
int
start offset of data
Raises
------
TypeError
If performed on DatValue instances without data
"""
if self.is_list:
return self.value[1]
elif self.is_pointer:
return self.value
else:
raise TypeError('Only supported on DatValue instances with data (lists, pointers)')
def _get_data_end_offset(self):
"""
Retrieves the end offset of the data held by the current instance in the
data section.
Returns
-------
int
end offset of data
Raises
------
TypeError
If performed on DatValue instances without data
"""
return self._get_data_start_offset() + self._get_data_size()
def _is_data(self):
"""
Whether this DatValue instance is data or not.
Returns
-------
bool
"""
return self.parent is not None
def _has_data(self):
"""
Whether this DatValue instance has data or not; this applies to types
that hold a pointer.
Returns
-------
bool
"""
return self.is_list or self.is_pointer
def _is_list(self):
"""
Whether this DatValue instance is a list.
Returns
-------
bool
"""
return self.children is not None
def _is_pointer(self):
"""
Whether this DatValue instance is a pointer.
Returns
-------
bool
"""
return self.child is not None
def _is_parsed(self):
"""
Whether this DatValue instance is parsed (i.e. non bytes).
Returns
-------
bool
"""
return not isinstance(self.value, bytes)
data_size = property(fget=_get_data_size)
data_start_offset = property(fget=_get_data_start_offset)
data_end_offset = property(fget=_get_data_end_offset)
is_data = property(fget=_is_data)
has_data = property(fget=_has_data)
is_list = property(fget=_is_list)
is_pointer = property(fget=_is_pointer)
is_parsed = property(fget=_is_parsed)
# Public
def get_value(self):
"""
Returns the value that is held by the DatValue instance. This is done
recursively, i.e. pointers will be dereferenced accordingly.
This means if you want the actual value of the DatValue, you should
probably access the value attribute instead.
If this DatValue instance is a list, this means a python list of items
will be returned.
If this DatValue instance is a pointer, this means whatever value the
child of this instance holds will be returned.
Otherwise the value of the DatValue instance itself will be returned.
Note, that values may be nested i.e. if a list contains a list, a
nested list will be returned accordingly.
Returns
-------
object
the dereferenced value
"""
if self.is_pointer:
return self.child.get_value()
elif self.is_list:
return [dv.get_value() for dv in self.children]
else:
return self.value
class DatRecord(list):
"""
Attributes
----------
parent : DatReader
The parent DatReader instance this DatRecord instance belongs to
rowid : int
The rowid of this DatRecord instance
"""
__slots__ = ['parent', 'rowid']
def __init__(self, parent, rowid):
"""
Parameters
----------
parent : DatReader
The parent DatReader instance this DatRecord instance belongs to
rowid : int
The rowid of this DatRecord instance
"""
list.__init__(self)
self.parent = parent
self.rowid = rowid
def __getitem__(self, item):
if isinstance(item, str):
if item in self.parent.table_columns:
value = list.__getitem__(self, self.parent.table_columns[item]['index'])
if isinstance(value, DatValue):
value = value.get_value()
return value
elif item in self.parent.specification['virtual_fields']:
field = self.parent.specification['virtual_fields'][item]
value = [self[fn] for fn in field['fields']]
if field['zip']:
value = zip(*value)
return value
else:
raise KeyError(item)
return list.__getitem__(self, item)
def __repr__(self):
stuff = ["{%s: %s}" % (k, self[i]) for i, k in enumerate(self.parent.table_columns)]
return '[%s]' % ', '.join(stuff)
'''def find_all(self, key, value):
row_index = self._get_column_index(key)
values = []
for row in self:
if row[row_index] == value:
values.append(value)
return values'''
def __hash__(self):
return hash((self.parent.file_name, self.rowid))
def iter(self):
"""
Iterates over the DatRecord and returns key, value and index
Yields
------
str
key
object
the value
int
index
"""
for index, key in enumerate(self.parent.table_columns):
yield key, self[key], index
def keys(self):
"""
Returns
-------
"""
return self.parent.table_columns.keys()
class DatReader(ReprMixin):
"""
Attributes
----------
_table_offset : int
Starting offset of table data in bytes
_cast_table : dict[str, list[str, int]]
Mapping of cast type to the corresponding struct
type and the size
of the cast in bytes
auto_build_index : bool
Whether the index is automatically build after reading
x64 : bool
Whether this the reader is running in 64 bit mode
file_name : str
File name
file_length : int
File length in bytes
table_data : list[DatRecord[object]]
List of rows containing DatRecord entries.
table_length : int
Length of table in bytes
table_record_length : int
Length of each record in bytes
table_rows : int
Number of rows in table
data_offset : int
Data section offset
columns : OrderedDict
Shortened list of columns excluding intermediate columns
columns_zip : OrderedDict
Shortened list of columns excluding zipped columns
columns_all : OrderedDict
Complete list of columns, including all intermediate and virtual columns
columns_data : OrderedDict
List of all columns directly derived from the data
columns_unique: OrderedDict
List of all unique columns (which are also considered indexable)
table_columns : OrderedDict
Used for mapping columns to indexes
"""
_table_offset = 4
_cast_table = {
'bool': ['?', 1],
'byte': ['b', 1],
'ubyte': ['B', 1],
'short': ['h', 2],
'ushort': ['H', 2],
'int': ['i', 4],
'uint': ['I', 4],
'long': ['q', 8],
'ulong': ['Q', 8],
'float': ['f', 4],
'double': ['d', 8],
}
class CastTypes(IntEnum):
VALUE = 1
STRING = 2
POINTER_LIST = 3
POINTER = 4
POINTER_SELF = 5
def __init__(self, file_name, *args, use_dat_value=True, specification=None,
auto_build_index=False, x64=False):
"""
Parameters
----------
file_name : str
Name of the dat file
use_dat_value : bool
Whether to use :class:`DatValue` instances or values
specification : Specification
Specification to use
auto_build_index : bool
Whether to automatically build the index for unique columns after
reading.
x64 : bool
Whether the reader should run in 64 bit mode for dat64 files.
Raises
------
errors.SpecificationError
if the dat file is not in the specification
"""
self.auto_build_index = auto_build_index
self.x64 = x64
self.index = {}
self.data_parsed = []
self.data_offset = 0
self.file_length = 0
self._file_raw = b''
self.table_data = []
self.table_length = 0
self.table_record_length = 0
self.table_rows = 0
self.file_name = file_name
# Fix for the look up
if x64:
_file_name = file_name.replace('.dat64', '.dat')
else:
_file_name = file_name
self.use_dat_value = use_dat_value
# Process specification
if specification is None:
if _file_name in _default_spec:
specification = _default_spec[_file_name]
else:
raise SpecificationError(
SpecificationError.ERRORS.RUNTIME_MISSING_SPECIFICATION,
'No specification for "%s"' % file_name
)
else:
specification = specification[_file_name]
self.specification = specification
# Prepare the casts
self.table_columns = OrderedDict()
self.cast_size = 0
self.cast_spec = []
self.cast_row = []
for i, key in enumerate(specification.columns_data):
k = specification.fields[key]
self.table_columns[key] = {'index': i, 'section': k}
casts = []
remainder = k.type
while remainder:
remainder, cast_type = self._get_cast_type(remainder)
casts.append(cast_type)
self.cast_size += casts[0][1]
self.cast_spec.append((k, casts))
self.cast_row.append(casts[0][2])
self.cast_row = '<' + ''.join(self.cast_row)
for var in ('columns', 'columns_all', 'columns_zip', 'columns_data',
'columns_unique'):
setattr(self, var, getattr(specification, var))
def __iter__(self):
return iter(self.table_data)
def __getitem__(self, item):
return self.table_data[item]
def build_index(self, column=None):
"""
Builds or rebuilds the index for the specified column.
Indexed columns can be accessed though the instance variable index and
will return a single value for unique columns and a list for non-unique
columns.
For example:
self.index[column_name][indexed_value]
.. warning::
This method only works for columns that are marked as unique in the
specification.
Parameters
----------
column : str or Iterable or None
if specified the index will the built for the specified column
or iterable of columns
if not specified, the index will be build for any 'unique' columns
by default
"""
columns = set()
if column is None:
for column in self.columns_unique:
columns.add(column)
elif isinstance(column, str):
columns.add(column)
elif isinstance(column, Iterable):
for c in column:
columns.add(c)
columns_1to1 = set()
columns_1toN = set()
columns_NtoN = set()
for column in columns:
if column in self.columns_unique:
self.index[column] = {}
columns_1to1.add(column)
elif self.specification.fields[column].type.startswith('ref|list'):
columns_NtoN.add(column)
self.index[column] = defaultdict(list)
else:
columns_1toN.add(column)
self.index[column] = defaultdict(list)
# Second loop
for row in self:
for column in columns_1to1:
self.index[column][row[column]] = row
for column in columns_1toN:
self.index[column][row[column]].append(row)
for column in columns_NtoN:
for value in row[column]:
self.index[column][value].append(row)
def row_iter(self):
"""
Returns
-------
iter
Iterator over the rows
"""
return iter(self.table_data)
def column_iter(self):
"""
Iterators over the columns
Yields
------
list
Values per column
"""
for ci, column in enumerate(self.table_columns):
yield [item[ci] for item in self]
def _get_cast_type(self, caststr):
size = None
cast = None
remainder = ''
if caststr in self._cast_table:
cast_type = self.CastTypes.VALUE
size = self._cast_table[caststr][1]
cast = self._cast_table[caststr][0]
elif caststr == 'string':
cast_type = self.CastTypes.STRING
elif caststr.startswith('ref|list|'):
cast_type = self.CastTypes.POINTER_LIST
if self.x64:
size = 16
cast = 'QQ'
else:
size = 8
cast = 'II'
remainder = caststr[9:]
elif caststr.startswith('ref|'):
if self.x64:
size = 8
cast = 'Q'
else:
size = 4
cast = 'I'
if caststr.startswith('ref|generic'):
cast_type = self.CastTypes.POINTER_SELF
else:
cast_type = self.CastTypes.POINTER
remainder = caststr[4:]
return remainder, (cast_type, size, cast)
def _cast_from_spec(self, specification, casts, parent=None, offset=None, data=None, queue_data=None):
if casts[0][0] in (self.CastTypes.VALUE, self.CastTypes.POINTER_SELF):
ivalue = data[0] if data else struct.unpack('<' + casts[0][2], self._file_raw[offset:offset+casts[0][1]])[0]
if ivalue in (-0x1010102, 0xFEFEFEFE, -0x101010101010102, 0xFEFEFEFEFEFEFEFE, 0xFFFFFFFF):
ivalue = None
if self.use_dat_value:
value = DatValue(ivalue, offset, casts[0][1], parent, specification)
else:
value = ivalue
elif casts[0][0] == self.CastTypes.STRING:
# Beginning of the sequence, +1 to adjust for it
offset_new = self._file_raw.find(b'\x00\x00\x00\x00', offset)
# Account for 0 size strings
if offset == offset_new:
string = ''
else:
# It's possible that a string ends in \x00 and the next starts
# with \x00
# UTF-16 must be at least a multiple of 2
while (offset_new-offset) % 2:
offset_new = self._file_raw.find(b'\x00\x00\x00\x00', offset_new+1)
string = self._file_raw[offset:offset_new].decode('utf-16')
# Store the offset including the null terminator
if self.use_dat_value:
value = DatValue(string, offset, offset_new-offset+4, parent, specification)
else:
value = string
elif casts[0][0] in (self.CastTypes.POINTER_LIST, self.CastTypes.POINTER):
data = data if data else struct.unpack('<' + casts[0][2], self._file_raw[offset:offset+casts[0][1]])
data_offset = data[-1] + self.data_offset
# Instance..
if self.use_dat_value:
value = DatValue(data[0] if casts[0][0] == 4 else data, offset, casts[0][1], parent, specification)
if casts[0][0] == self.CastTypes.POINTER_LIST:
value.children = []
for i in range(0, data[0]):
'''if offset < self._data_offset_current:
print(self._data_offset_current, offset)
raise SpecificationError("Overlapping offset for cast %s:%s" % (parent.is_list, casts[0]))'''
value.children.append(self._cast_from_spec(specification, casts[1:], value, data_offset+i*casts[1:][0][1]))
elif casts[0][0] == self.CastTypes.POINTER:
value.child = self._cast_from_spec(specification, casts[1:], value, data_offset)
self.data_parsed.append(value)
else:
if casts[0][0] == self.CastTypes.POINTER_LIST:
value = []
for i in range(0, data[0]):
value.append(self._cast_from_spec(specification, casts[1:], value, data_offset+i*casts[1:][0][1]))
elif casts[0][0] == self.CastTypes.POINTER:
value = self._cast_from_spec(specification, casts[1:], None, data_offset)
# TODO:
# if parent:
# self._data_offset_current = offset
# self.data_parsed.append(value)
return value
def _process_row(self, rowid):
offset = 4 + rowid * self.table_record_length
row_data = DatRecord(self, rowid)
data_raw = self._file_raw[offset:offset+self.cast_size]
# We don't have any data, return early
if len(data_raw) == 0:
return row_data
# Unpacking the entire row in one go will help breaking down the
# function calls significantly
row_unpacked = struct.unpack(self.cast_row, data_raw)
i = 0
for spec, casts in self.cast_spec:
if casts[0][0] == 3:
cell_data = row_unpacked[i:i+2]
i += 1
else:
cell_data = (row_unpacked[i], )
row_data.append(self._cast_from_spec(spec, casts, data=cell_data, offset=offset))
offset += casts[0][1]
i += 1
return row_data
def read(self, raw):
# TODO consider memory issues for saving raw contents
if isinstance(raw, bytes):
self._file_raw = raw
elif isinstance(raw, BytesIO):
self._file_raw = raw.read()
else:
raise TypeError('Raw must be bytes or BytesIO instance, got %s' %
type)
# Jump to last byte to get length
self.file_length = len(self._file_raw)
self.data_offset = self._file_raw.find(DAT_FILE_MAGIC_NUMBER)
if self.data_offset == -1:
raise ValueError(
'Did not find data magic number in "%(file)s"' % {
'file': self.file_name,
}
)
self.table_rows = struct.unpack('<I', self._file_raw[0:4])[0]
self.table_length = self.data_offset - self._table_offset
if self.table_rows > 0:
self.table_record_length = self.table_length//self.table_rows
elif self.table_rows == 0 and self.table_length == 0:
self.table_record_length = 0
else:
# TODO
raise ValueError("WTF")
if self.specification is None:
self.cast_size = self.table_record_length
# If we haven't specified the last few columns of the table, ignore them.
# This makes spec errors harder to detect than a simple inequality check
# - but allows us to output *something* when a poe patch adds a column
# (very common). You could get a similar effect by padding the end of
# the spec with "unknownXX" fields.
#
# We still fail here if the spec is too *large*; deleted fields must be
# specced manually. It seems we fail later in some cases too, when
# columns change in incompatible ways.
#
# TODO: CLI flag for this behavior!
if self.cast_size > self.table_record_length:
raise SpecificationError(
SpecificationError.ERRORS.RUNTIME_ROWSIZE_MISMATCH,
'"%(name)s": Specification row size %(spec_size)s vs real size %(real_size)s' % {
'name': self.file_name,
'spec_size': self.cast_size,
'real_size': self.table_record_length
}
)
# if self.cast_size != self.table_record_length:
# print(self.file_name, {'spec_size': self.cast_size, 'real_size': self.table_record_length}, flush=True)
self.table_data = []
# Prepare data section
self.data_parsed = list()
for i in range(0, self.table_rows):
self.table_data.append(self._process_row(i))
if self.auto_build_index:
self.build_index()
return self.table_data
def print_data(self):
"""
For debugging. Prints out data.
"""
for row in self.table_data:
print('Row: %s' % row.rowid)
for k in row.keys():
v = row[k]
print('|- %s: %s' % (k, v))
@deprecated
def export_to_html(self, export_table=True, export_data=False):
outstr = []
if export_table:
outstr.append('<table>')
outstr.append('<thead>')
outstr.append('<tr>')
outstr.append('<th>')
outstr.append('ROW')
outstr.append('</th>')
for key in self.specification['fields']:
outstr.append('<th>')
disp = self.specification['fields'][key]['display']
if not disp:
disp = key
outstr.append(disp)
outstr.append('</th>')
outstr.append('</tr>')
outstr.append('</thead>')
outstr.append('<tbody>')
for row in self.table_data:
outstr.append('<tr>')
outstr.append('<th>')
outstr.append(str(row.rowid))
outstr.append('</th>')
for dv in row:
outstr.append('<td>')
if self.use_dat_value:
outstr.append(str(dv.get_value()))
elif isinstance(dv, DatRecord):
outstr.append(str(dv.rowid))
else:
outstr.append(str(dv))
outstr.append('</td>')
outstr.append('</tr>')
outstr.append('</tbody>')
outstr.append('</table>')
if export_data:
outstr.append('<table>')
outstr.append('<thead>')
outstr.append('<tr>')
outstr.append('</tr>')
outstr.append('</thead>')
outstr.append('<tbody>')
outstr.append('</tbody>')
outstr.append('</table>')
return ''.join(outstr)
class DatFile(AbstractFileReadOnly):
"""
Representation of a .dat file.
Attributes
----------
reader : DatReader
reference to the DatReader instance once :meth:`read` has been called
"""
def __init__(self, file_name):
"""
Parameters
----------
file_name : str
Name of the .dat file
"""
self._file_name = file_name
self.reader = None
def __repr__(self):
return 'DatFile<%s>(file_name="%s")' % (hex(id(self)), self._file_name)
def _read(self, buffer, *args, **kwargs):
self.reader = DatReader(self._file_name, **kwargs)
self.reader.read(buffer.read())
return self.reader
@doc(doc=AbstractFileCache, prepend="""
Read dat files in a relational matter and cache them for further use.
The relational reader will process **all** relations upon accessing a dat
file; this means any field marked as relation or enum in the specification
will be processed and the pointer will be replaced with the actual value.
For example, if a row "OtherKey" points to another file "OtherDatFil.dat",
the contents of "OtherKey" will no longer be a reference like 0, but instead
the actual row from the file "OtherDatFile.dat".
As a result you have equivalence of:
* rr["DatFile.dat"]["OtherKey"]["OtherDatFileValue"]
* rr["OtherDatFile.dat"][0]["OtherDatFileValue"]
Enums are processed in a similar fashion, except they'll be replaced with
the according enum instance from :py:mod:`PyPoE.poe.constants` for the
specific value.
""")
class RelationalReader(AbstractFileCache):
FILE_TYPE = DatFile
@doc(doc=AbstractFileCache.__init__, append="""
Parameters
----------
raise_error_on_missing_relation : bool
Raises error instead of issuing an warning when a relation is broken
language : str
language subdirectory in data directory
""")
def __init__(self, raise_error_on_missing_relation=False,
language=None, *args, **kwargs):
self.raise_error_on_missing_relation = raise_error_on_missing_relation
if language == 'English' or language is None:
self._language = ''
else:
self._language = language + '/'
super().__init__(*args, **kwargs)
def __getitem__(self, item):
"""
Shortcut that also appends Data/ if missing
The following calls are equivalent:
* self['DF.dat'] <==> read_file('Data/DF.dat').reader
* self['Data/DF.dat'] <==> read_file('Data/DF.dat').reader
"""
if not item.startswith('Data/'):
item = 'Data/' + self._language + item
return self.get_file(item).reader
def _set_value(self, obj, other, key, offset):
if obj is None:
obj = None
elif key:
try:
obj = other.index[key][obj]
except KeyError:
msg = 'Did not find proper value for foreign key "%s" with ' \
'value "%s"' % (key, obj)
if self.raise_error_on_missing_relation:
raise SpecificationError(
SpecificationError.ERRORS.RUNTIME_MISSING_FOREIGN_KEY,
msg
)
else:
warnings.warn(msg, SpecificationWarning)
obj = None
else:
# offset is default 0
try:
obj = other[obj-offset]
except IndexError:
msg = 'Did not find proper value at index %s in %s' % (
obj-offset, other.file_name)
if self.raise_error_on_missing_relation:
raise SpecificationError(
SpecificationError.ERRORS.RUNTIME_MISSING_FOREIGN_KEY,
msg
)
else:
warnings.warn(msg, SpecificationWarning)
obj = None
return obj
def _dv_set_value(self, value, other, key, offset):
if value.is_pointer:
self._dv_set_value(value.child, other, key, offset)
elif value.is_list:
[self._dv_set_value(dv, other, key, offset) for dv in value.children]
else:
value.value = self._set_value(value.value, other, key, offset)
return value
def _simple_set_value(self, value, other, key, offset):
if isinstance(value, list):
return [self._set_value(item, other, key, offset) for item in value]
else:
return self._set_value(value, other, key, offset)
def _get_file_instance_args(self, file_name, *args, **kwargs):
opts = super()._get_file_instance_args(file_name)
opts['file_name'] = file_name.replace('Data/' + self._language, '')
return opts
def get_file(self, file_name):
"""
Attempts to return a dat file from the cache and if it isn't available,
reads it in.
During the process any relations (i.e. fields that have a "key" to
other .dat files specified) will be read. This will result in the
appropriate fields being replaced by the related row.
Note that a related row may be "None" if no key was specified in the
read dat file.
Parameters
----------
file_name : str
The name of the .dat to read. Extension is required.
Returns
-------
DatFile
Returns the given DatFile instance
"""
if file_name in self.files:
return self.files[file_name]
df = self._create_instance(file_name)
self.files[file_name] = df
vf = self._dv_set_value if df.reader.use_dat_value else self._simple_set_value
for key, spec_row in df.reader.specification.fields.items():
if spec_row.key:
if df.reader.x64:
spec_row_key = spec_row.key.replace('.dat', '.dat64')
else:
spec_row_key = spec_row.key
df_other_reader = self[spec_row_key]
key_id = spec_row.key_id
key_offset = spec_row.key_offset
# Don't need to rebuild the index if it was specified as generic
# read option already.
if not self.read_options.get('auto_build_index') \
and not key_offset and key_id:
df_other_reader.build_index(key_id)
index = df.reader.table_columns[key]['index']
for i, row in enumerate(df.reader.table_data):
try:
df.reader.table_data[i][index] = vf(
row[index],
df_other_reader,
key_id,
key_offset,
)
except SpecificationError as e:
raise SpecificationError(
e.code,
'%(fn)s:%(rn)s->%(on)s:%(msg)s' % {
'fn': file_name,
'rn': key,
'on': spec_row.key,
'msg': e.msg,
},
)
elif spec_row.enum:
const_enum = getattr(constants, spec_row.enum)
index = df.reader.table_columns[key]['index']
for i, row in enumerate(df.reader.table_data):
df.reader.table_data[i][index] = vf(
value=row[index],
other=const_enum,
key=None,
offset=0
)
return df
# =============================================================================
# Functions
# =============================================================================
def set_default_spec(version=constants.VERSION.DEFAULT, reload=False):
"""
Sets the default specification to use for the dat reader.
See :py:mod:`PyPoE.poe.file.specification.__init__` for more info
Parameters
----------
version : constants.VERSION
Version of the game to load the default specification for.
reload : bool
Whether to reload the version.
"""
global _default_spec
_default_spec = load(version=version, reload=reload)
# =============================================================================
# Init
# =============================================================================
set_default_spec()
|
sanorris/homebridge-Irrigation | python-scripts/stationOn.py | from gpiozero import LED
from time import sleep
led1 = LED(14)
led1.on()
sleep(2)
|
ocean2333/crawer | spider-3.py | # coding=utf-8
# work with vpn !!!
import requests
from bs4 import BeautifulSoup
import os
import time
import zipfile
# http请求头
Hostreferer = {
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
'Referer': 'https://www.wnacg.com/albums'
}
same_url = 'https://www.wnacg.com/albums'
main_url = 'https://www.wnacg.com'
all_url = 'https://www.wnacg.com/albums.html'
# 保存地址
path = 'D:/lyt/comic'
if __name__ == "__main__":
if not os.path.exists('D:/lyt/comic'):
os.makedirs('D:/lyt/comic')
# 找寻最大页数
print("this crawler work with vpn!!!\n")
content = input('输入要搜索的内容:')
start = int(input('从第几页开始'))
path = 'D:/lyt/comic' + '/' + content
if not os.path.exists(path):
os.makedirs(path)
os.chdir(path)
ul = same_url + '-index-page-' + '1' + '-sname-' + content + '.html'
try:
start_html = requests.get(ul, headers=Hostreferer)
soup = BeautifulSoup(start_html.text, "html.parser")
page = soup.find('div', class_='f_left paginator').find_all('a')
if len(page) == 0:
max_page = 1
print('共1頁')
else:
max_page = int(page[-2].get_text())
print('共', max_page, '页')
except:
print("error in step0, maybe internet error")
for n in range(start, max_page + 1): # 页数循环
# 基础处理
print('第', n, '页')
try:
ul = same_url + '-index-page-' + str(n) + '-sname-' + content + '.html'
start_html = requests.get(ul, headers=Hostreferer)
soup = BeautifulSoup(start_html.text, "html.parser")
except:
print('error in step1')
break
# 获得一页内所有图集的名字
all_a = soup.find('div', class_='gallary_wrap').find_all('a')
# 逐个图集进行处理
for a in all_a: # 图集循环
start_time = time.time()
os.chdir(path)
title = a.get_text() # 提取标题
if (title != ''):
print("准备扒取:" + title)
# 创建目录
# win不能创建带?的目录
try:
if (os.path.exists(title.strip().replace('?', ''))):
print('目录已存在')
flag = 1
break
else:
os.makedirs(title.strip().replace('?', ''))
flag = 0
os.chdir(title.strip().replace('?', ''))
except:
print('error in step2,making path progress')
break
# 获取下载地址
download_href = ''
try:
href = main_url + a['href']
html = requests.get(href, headers=Hostreferer)
mess = BeautifulSoup(html.text, "html.parser")
download_href = mess.find_all('a', class_='btn')[2]['href']
# print(download_href)
except:
print('error in step3,geting href error')
break
# 下载zip文件并以二进制形式储存到文件中
try:
if (download_href != ''):
down_url = main_url + download_href
html = requests.get(down_url, headers=Hostreferer)
mess = BeautifulSoup(html.text, "html.parser")
zip_url = mess.find('a', class_='down_btn')['href']
print(zip_url)
zip = requests.get(zip_url, headers=Hostreferer)
if html.status_code == 200:
file_name = ('title' + '.zip')
f = open(file_name, 'wb')
f.write(zip.content)
f.close()
extracting = zipfile.ZipFile(file_name)
extracting.extractall()
extracting.close()
os.remove(file_name)
else:
print("status code: ", html.status_code)
except:
print('error in step5')
break
print(title, '完成,', '目前第', n, '頁')
end_time = time.time()
try:
print('%d' % ((end_time - start_time) / 60), 'min', '%d' % ((end_time - start_time) % 60), 'second')
except:
print('error in step6')
break
print('第', n, '页完成')
|
ocean2333/crawer | spider-2.py | <filename>spider-2.py
#coding=utf-8
import requests
from bs4 import BeautifulSoup
import os
import random
import time
import multiprocessing as mp
import sys
#http请求头
Hostreferer = {
'User-Agent':'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
'Referer':'https://www.wnacg.com/albums'
}
same_url = 'https://www.wnacg.com/albums'
main_url = 'https://www.wnacg.com'
all_url = 'https://www.wnacg.com/albums.html'
#保存地址
path = 'D:/lyt/comic1'
user_agent = [
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
"Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
"Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
"Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
"Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
"UCWEB7.0.2.37/28/999",
"NOKIA5700/ UCWEB7.0.2.37/28/999",
"Openwave/ UCWEB7.0.2.37/28/999",
"Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999"]
def get_user_agent():
return random.choice(user_agent)
def download(task,name_set):
while task:
num = task[0]
pic_name = name_set[num]
print(pic_name)
del(task[0])
pic_url = main_url + pic_name
pic_html = requests.get(pic_url,headers = Hostreferer)
pic_soup = BeautifulSoup(pic_html.text,"html.parser")
Picreferer = {'User-Agent':get_user_agent(),'Referer':'pic_url'}
pic = 'https:' + pic_soup.find('img',class_='photo')['src']
html = requests.get(pic,headers = Picreferer)
mess = BeautifulSoup(html.text,"html.parser")
print(pic)
if html.status_code != 404:
file_name = ('pic_'+str(num)+'.jpg')
f = open(file_name,'wb')
f.write(html.content)
f.close()
else:
print("error in pic ",num)
if not os.path.exists('D:/lyt/comic1'):
os.makedirs('D:/lyt/comic1')
if __name__ == "__main__":
#找寻最大页数
content = input('输入要搜索的内容的编码(不知道是什么编码(原来打字也可以))')
start = int(input('从第几页开始'))
ul = same_url+'-index-page-'+'1'+'-sname-'+content+'.html'
start_html = requests.get(ul, headers = Hostreferer)
soup = BeautifulSoup(start_html.text,"html.parser")
page = soup.find('div',class_='f_left paginator').find_all('a')
if len(page) == 0:
max_page = 1
print('共1頁')
else:
max_page = int(page[-2].get_text())
print('共',max_page,'页')
for n in range(start,max_page+1):#页数循环
#基础处理
print('第',n,'页')
try:
ul = same_url+'-index-page-'+str(n)+'-sname-'+content+'.html'
start_html = requests.get(ul, headers = Hostreferer)
soup = BeautifulSoup(start_html.text,"html.parser")
except:
print('error in step1')
#获得一页内所有图集的名字
all_a = soup.find('div',class_='gallary_wrap').find_all('a')#也可能是直接去掉target。。。
#逐个图集进行处理
for a in all_a:#图集循环
flag = 0
title = a.get_text() #提取标题
start_time = time.time()
if(title != ''):
print("准备扒取:"+title)
#创建目录
#win不能创建带?的目录
try:
if(os.path.exists(path+title.strip().replace('?',''))):
print('目录已存在')
flag=1
else:
os.makedirs(path+title.strip().replace('?',''))
flag=0
os.chdir(path + title.strip().replace('?',''))
except:
print('error in step2')
#获得页面数量
try:
if(flag == 1):
print('已经保存完毕,跳过')
continue
href = main_url + a['href']
html = requests.get(href,headers = Hostreferer)
mess = BeautifulSoup(html.text,"html.parser")
pic_num = mess.find('div',class_='asTBcell uwthumb').find('img')
page_set = mess.find('div',class_='f_left paginator').find_all('a')
if len(page_set) == 0:
page_max = 1
else :
page_max = int(page_set[-2].get_text())
except:
print('error in step3')
#获得所有图片的地址
try:
#图集页面循环
name_set = []
for page_num in range(1,page_max+1):
#page_url = 'https://www.wnacg.com/photos-index-page-'+str(page_num)+'-aid-'++'.html'
page_url = main_url+page_set[-2]['href'][0:19]+str(page_num)+page_set[-1]['href'][-15:]
print(page_url)
page_html = requests.get(page_url,headers = Hostreferer)
page_soup = BeautifulSoup(page_html.text,"html.parser")
all_name = page_soup.find('ul',class_='cc').find_all('a')
#all_span = page_soup.find_all('span',class_='name')
for photo_href in all_name:
name = photo_href['href']
name_set.append(name)
print('目录获取完成')
except:
print('error in step4')
task_set1=[]
task_set2=[]
task_num = len(name_set)
for _ in range(task_num-1):
if _ % 2 == 0:
task_set1.append(_)
elif _ % 2 == 1:
task_set2.append(_)
print(task_set1,task_set2)
p1 = mp.Process(target=download,args=(task_set1,name_set))
p2 = mp.Process(target=download,args=(task_set2,name_set))
p1.start()
p2.start()
p1.join()
p2.join()
print('完成,','目前第',n,'頁')
end_time = time.time()
try:
print('%d'%((end_time - start_time)/60),'min','%d'%((end_time - start_time)%60),'second')
print('each photo','%d'%((end_time - start_time)/len(name_set)),'s')
except:
print('error in step6')
print('第',n,'页完成')
|
ocean2333/crawer | spider_bzhan.py | # coding=utf-8
import os
import random
import sys
import time
from multiprocessing import Pool
import requests
from bs4 import BeautifulSoup
# 1.输入cv号
# 1.5.获得该合集标题并创建文件夹
# 2.得到该网址下所有<figure class="img-box"中的图片地址
# 3.截去.jpg之后的部分
# 4.下载并储存
# v2.0:加入批量下载功能
#
head = 'https:'
http_head = 'https://www.bilibili.com'
http_site = http_head
http_cv = 'https://www.bilibili.com/read/cv'
http_ = 'https://h.bilibili.com/'
path = '/home/lyt/b/'
Hostreferer = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'
}
num = 1
cvnum = []
while True:
cv = input("输入cv号,输入空行结束")
if cv == '':
break
cvnum.append(cv)
count = 0
#mod = input('选择模式:专栏=1,相簿=2')
mod = 1
for cv_num in cvnum:
count = count + 1
if int(mod) == 1:
http_cv_new = http_cv + cv_num
print(http_cv_new)
start_html = requests.get(http_cv_new, headers=Hostreferer)
start_soup = BeautifulSoup(start_html.text, "html.parser")
# print(picset_title)
'''
try:
picset_title = start_soup.find('h1', class_='title').text
if(os.path.exists(path+picset_title.strip().replace('?', ''))):
print('目录已存在')
# exit()
else:
os.makedirs(path+picset_title.strip().replace('?', ''))
flag = 0
os.chdir(path + picset_title.strip().replace('?', ''))
except:
print('目录创建失败')
'''
os.chdir(path + '_paper')
# print(start_soup)
all_pic_figure = start_soup.find_all('img', attrs={'data-src': True})
for fig in all_pic_figure:
# print(fig)
pic_site = fig['data-src']
print(pic_site)
#length = len(pic_site['data-src'])
#pic_site = pic_site[:(length-(int(fig['width']) + int(fig['heeight']) + 8))]
pic_html = requests.get((head + pic_site), headers=Hostreferer)
try:
if pic_html.status_code != 404:
file_name = (str(num)+'.jpg')
f = open(file_name, 'wb')
f.write(pic_html.content)
f.close()
num = num+1
else:
break
except:
print(pic_html.status_code)
elif int(mod) == 2:
http_cv_new = http_ + cv_num
print(http_cv_new)
start_html = requests.get(http_cv_new, headers=Hostreferer)
start_html.encoding = 'utf-8'
start_soup = BeautifulSoup(start_html.text, "html.parser")
picset_title = cv_num
os.chdir(path + '_total_paper')
'''
try:
if(os.path.exists(path+picset_title.strip().replace('?', ''))):
print('目录已存在')
# exit()
else:
os.makedirs(path + picset_title.strip().replace('?', ''))
flag = 0
os.chdir(path + picset_title.strip().replace('?', ''))
except:
print('目录创建失败')
'''
all_pic_figure = start_soup.find_all('div', class_='ssr-content')
print(start_soup)
print(all_pic_figure)
for fig in all_pic_figure:
# print(fig)
pic_site = fig['data-photo-imager-src']
print(pic_site)
#length = len(pic_site['data-src'])
#pic_site = pic_site[:(length-(int(fig['width']) + int(fig['heeight']) + 8))]
pic_html = requests.get((head + pic_site), headers=Hostreferer)
try:
if pic_html.status_code != 404:
file_name = (str(num)+'.jpg')
f = open(file_name, 'wb')
f.write(pic_html.content)
f.close()
num = num+1
else:
break
except:
print(pic_html.status_code)
print("第", count, "个下载完了")
|
Niirok/Pyckstarter | src/pkgexemple2/anotherex.py | def format_exception_numpy(etype, value, tb, limit=None):
"""
Format the exception with a traceback.
Parameters
----------
etype : str
exception type
value : int
exception value
tb : traceback
traceback object
limit : int or None
maximum number of stack frames to show
Returns
-------
out : list of strings
list of strings
See Also
--------
numpy : a numerical package
Notes
-----
This is an example of autodoc using numpydoc, the Numpy documentation format
with the numpydoc extension [1]_
This explanation of the column headers is not complete, for an exhaustive
specification see [2]_.
References
----------
.. [1] `numpydoc <https://github.com/numpy/numpy/tree/master/doc/sphinxext>`_, \
Numpy Documentation.
.. [2] `Sphinx <http://sphinx-doc.org/domains.html#domains>`_, Sphinx Domains \
Documentation.
Examples
--------
>>> data = format_exception_numpy('dumb', 0, IOError)
"""
return etype, value, tb
|
Niirok/Pyckstarter | src/pkgexemple/welcome.py | <filename>src/pkgexemple/welcome.py
def int_randomizer():
"""
This function will always return 42 (int), test: /test/test_welcome.py
Extended description of function should be placed here, yet there is not
much to say about this one...
Returns
-------
int
will always be 42
"""
return 42
def say_hello():
"""This function print a simple welcome message to the user
Returns
-------
None
just a message in terminal :
\"Welcome to this complicated tutorial, World\"
"""
print "Welcome to this complicated tutorial, World"
def say_something(arg1):
"""This function print a message to the user, which can be choose
Parameters
----------
arg1 : type optional
The first parameter. It will automatiquelly be converted to string.
Returns
-------
str
\"Hello World, I must tell you something: \", arg1
"""
strReturned = "Hello World, I must tell you something: " + str(arg1) +"."
print strReturned
return strReturned
def _private_function():
"""This one will never appear in the documentation beause of
the \"_\" before it's name yet I can write it doc.
Returns
-------
None
just a message in terminal:
\"This is a secret function, dont use it too much =)\"
"""
print "This is a secret function, dont use it too much =)"
def func(arg1, arg2):
"""Summary line.
Extended description of function.
Parameters
----------
arg1 : int
Description of arg1
arg2 : str
Description of arg2
Returns
-------
bool
Description of return value
"""
return True
say_hello()
say_something(4)
say_something("I just call to say : \"I love you\" ")
|
Niirok/Pyckstarter | test/test_welcome.py | import pytest
from src.pkgexemple.welcome import *
def test_int_randomizer_return42():
assert int_randomizer() == 42
def test_say_something_string():
toTest = say_something("Test string")
assert toTest =="Hello World, I must tell you something: Test string."
|
pietow/demo_project | clean_data.py | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import pandas as pd
import re
data = pd.read_csv("bmw.csv")
df_dealer = data.Dlodealer
df_dealer
def get_index_street(list_, str_, i):
#str_ = 'Straße'
if str_ in new:
index = list_.index(str_)
#print(i,index, list_[index])
return (i, list_[index])
def get_index_street_first(list_, str_, i):
tuple_ = [(i, word) for i, word in enumerate(list_) if str_ in word and word!='Autohaus' and word!='Oberhausen' and word!='Langenfeld' and word!='Holsterhauser' and word!='Recklinghausen' and word!='Eichenhofer']
if tuple_:
#print(i, tuple_)
return (i, list_[:tuple_[0][0]])
name_list = []
plz_list = []
ort_list = []
r = re.compile("\d{5}")
for i, value in df_dealer.iteritems():
new = value.split(' ')
slice_ = new.index('\n')
name_list.append(' '.join(new[:slice_]))
plz_list.append(list(filter(r.match, new))[0])
ort_list.append(new[-6])
dict_dealer = {'Autohaus': name_list, 'PLZ': plz_list}
#list(filter(None, i_list))
df_dealer = pd.DataFrame(dict_dealer)
df_dealer.to_csv('clean_bmw.csv', index=False)
# %%
df_plz = pd.read_csv("PLZ.csv", sep=';',names=['id', 'Ort', 'Longitude', 'Latitude'], header=None)
def convert(place):
if 'ü' in place:
conv_pla = list(map(lambda x: x if x != '¼' else 'ü', place))
return ''.join(conv_pla)
elif 'ö' in place:
conv_pla = list(map(lambda x: x if x != '¶' else 'ö', place))
return ''.join(conv_pla)
elif 'ß' in place:
conv_pla = list(map(lambda x: x if x != 'Ÿ' else 'ß', place))
return ''.join(conv_pla)
elif 'ä' in place:
conv_pla = list(map(lambda x: x if x != '¤' else 'ä', place))
return ''.join(conv_pla)
else:
return place
def remove_A(place):
if 'Ã' in place:
conv_pla = list(map(lambda x: x, place))
conv_pla.pop(conv_pla.index('Ã'))
return ''.join(conv_pla)
else:
return ''.join(conv_pla)
new = df_plz.Ort.str.replace('¼', 'ü')
new = new.str.replace('¶', 'ö')
new = new.str.replace('Ÿ', 'ß')
new = new.str.replace('¤', 'ä')
new = new.str.replace('Ã', '')
df_plz['Ort'] = new
# %%
|
pietow/demo_project | webscraper.py | import requests
from bs4 import BeautifulSoup
URL = 'https://www.bmw.de/de/fastlane/bmw-partner.html#/dlo/DE/de/BMW_BMWM?type=location&term=Nordrhein-Westfalen,%20Deutschland'
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.body
cookies = dict(cc_digital_sessionCookie='461099141488')
print(results.prettify())
#url = "https://google-search3.p.rapidapi.com/api/v1/search"
#
#querystring = {"get_total":"false","country":"DE","language":"lang_de","max_results":"100",
# "uule":"w%2BCAIQICIbSG91c3RvbixUZXhhcyxVbml0ZWQgU3RhdGVz","hl":"de","q":"BMW Autohändler"}
#
#headers = {
# 'x-rapidapi-host': "google-search3.p.rapidapi.com",
# 'x-rapidapi-key': "<KEY>"
# }
#
#response = requests.request("GET", url, headers=headers, params=querystring)
#
#print(response.text) |
ying-yee/qunarHotelSpider | config.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
MONGO_URL = 'localhost'
MONGO_DB = 'hotel'
inter_city_list = [
'bangkok',
'seoul',
'singapore_city',
'tokyo',
'pattaya',
'chiang_mai',
'koh_phuket_tha',
'kuala_lumpur',
'surat_thani_th',
'jeju',
'osaka',
'sabah',
'maldives',
'kyoto_kyo',
'bali',
]
main_land_city_list = [
'beijing_city',
'chengdu',
'hangzhou',
'shanghai_city',
'sanya',
'nanjing',
'haerbin',
'lijiang',
'qingdao',
'guangzhou',
'shenzhen',
'wuhan',
'chongqing_city',
'suzhou_jiangsu',
'changsha',
]
test_city_list = [
'chengdu',
'haerbin',
'guangzhou',
'chongqing_city',
'chiang_mai',
]
|
ying-yee/qunarHotelSpider | main.py | <reponame>ying-yee/qunarHotelSpider<filename>main.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*
import codecs
import json
import pprint
import re
import collections
from operator import itemgetter
import argparse
from bs4 import BeautifulSoup
import requests
import pandas
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import pymongo
from config import inter_city_list, main_land_city_list, test_city_list, MONGO_URL, MONGO_DB
class qunarSpider():
def __init__(self, city_name):
self.city_name = city_name
self.chromeOptions = webdriver.ChromeOptions()
self.prefs = {"profile.managed_default_content_settings.images": 2}
self.chromeOptions.add_experimental_option("prefs", self.prefs)
self.browser = webdriver.Chrome(chrome_options=self.chromeOptions)
self.browser.set_window_size(1400, 900)
self.wait = WebDriverWait(self.browser, 10)
self.client = pymongo.MongoClient(MONGO_URL)
self.db = self.client[MONGO_DB]
self.parse_basic_info()
self.hotels = self.get_hotel_ids()
def save_to_mongo(self, collection, data):
if not self.db[collection].find_one({'hotel-id': data['hotel-id']}):
self.db[collection].insert_one(data)
print(data)
def parse_basic_info(self):
try:
url = 'http://hotel.qunar.com/city/{}/'.format(self.city_name)
self.browser.get(url)
soup = BeautifulSoup(self.browser.page_source, 'lxml')
_hotels = soup.find_all('div', attrs={'class': 'b_result_bd'})
for hotel in _hotels:
_hotel = {}
if hotel.find_all('a', attrs={'class': 'comment-none'}):
continue
if hotel.find_all('span', attrs={'class': 'no-comment'}):
continue
item = hotel.find_all('span', attrs={'class': 'hotel_item'})[0]
_hotel['url'] = item.a.get('href').split('?')[0]
_hotel['name'] = item.a.get('title')
_hotel['hotel-id'] = self.city_name + '/' + _hotel['url'].split('/')[-2]
try:
_hotel['sleeper_cnt'] = int(hotel.find_all('span', attrs={"class": "num", })[0].get_text())
except IndexError:
_hotel['sleeper_cnt'] = 0
item_price = hotel.find_all('p', attrs={'class': 'item_price'})
ref_price = hotel.find_all('p', attrs={'class': 'ref_price'})
if item_price:
_hotel['lowest_price'] = item_price[0].b.get_text()
elif ref_price:
_hotel['lowest_price'] = ref_price[0].a.get('title').split(':')[-1].split('元')[0]
else:
_hotel['lowest_price'] = 0
self.save_to_mongo('hotels', _hotel)
except TimeoutException as e:
return self.parse_basic_info()
def get_hotel_ids(self):
# <city_name>/dt-<id> => dt-<id>
return self.db.hotels.distinct('hotel-id')
def get_dangci(self, hotel_id):
url = r'http://hotel.qunar.com/city/{city_name}/{hotel_id}'.format(
city_name=self.city_name,
hotel_id=hotel_id.split('/')[-1],
)
r = requests.get(url).text
dangci = re.compile(r'var dangci="\d+"').findall(r)[0].split('\"')[1]
return dangci
def get_hotel_quotes(self, hotel_id):
url = r'http://travel.qunar.com/travelbook/api/getQuoteByHotelSeq?hotelSeq={city_name}_{hotel_id}'.format(
city_name=self.city_name,
hotel_id=hotel_id.strip('-')[-1],
)
r = requests.get(url)
quotes = json.loads(r.text)
return quotes
def get_hotel_fqas(self, hotel_id):
url = r'http://review.qunar.com/api/h/faq/{city_name}_{hotel_id}/list?start=0&step=15'.format(
city_name=self.city_name,
hotel_id=hotel_id.strip('-')[-1],
)
r = requests.get(url)
fqas = json.loads(r.text)
return fqas
def get_hotel_scores(self, hotel_id):
url = r'http://review.qunar.com/api/h/{city_name}_{hotel_id}/v2/detail'.format(
city_name=self.city_name,
hotel_id=hotel_id.strip('-')[-1],
)
r = requests.get(url)
try:
scores = json.loads(r.text)['data']
except KeyError:
scores = {}
scores['hotelScore'] = 0
scores['countStat'] = {}
scores['countStat']['guruCnt'] = 0
return scores
def get_comments(self, hotel_id):
try:
url = r'http://hotel.qunar.com/city/{city_name}/{hotel_id}/?tag=chengdu#fromDate={from_date}&toDate={to_date}'.format(
city_name=self.city_name,
hotel_id=hotel_id.split('/')[-1],
from_date='2017-05-06',
to_date='2017-05-07',
)
self.browser.get(url)
commentCnts = self.wait.until(EC.presence_of_element_located(
(By.CSS_SELECTOR,
'#jd_comments > div > div.b_ugcheader > div.b_ugcfilter > div:nth-child(2) > form > dl.rank')
))
positive_cnt = int(commentCnts.text.split()[2].strip('(').strip(')'))
neutral_cnt = int(commentCnts.text.split()[4].strip('(').strip(')'))
negative_cnt = int(commentCnts.text.split()[6].strip('(').strip(')'))
cmmcnt = positive_cnt + neutral_cnt + negative_cnt
except TimeoutException:
return self.get_comments(hotel_id)
# finally:
# self.browser.quit()
return cmmcnt, positive_cnt, neutral_cnt, negative_cnt
def parse_dangci(self):
for hotel_id in self.hotels:
if hotel_id not in self.db.dangci.distinct('hotel-id'):
_dangci = {}
_dangci['hotel-id'] = self.city_name + '/' + hotel_id
_dangci['dangci'] = self.get_dangci(hotel_id)
self.save_to_mongo('dangci', _dangci)
def parse_quotes(self):
for hotel_id in self.hotels:
if hotel_id not in self.db.quotes.distinct('hotel-id'):
_quote = {}
_quote['hotel-id'] = self.city_name + '/' + hotel_id
try:
_quote['多少家旅行攻略提到'] = self.get_hotel_quotes(hotel_id)['data']['quoteCount']
except TypeError:
_quote['多少家旅行攻略提到'] = 0
self.save_to_mongo('quotes', _quote)
def parse_fqas(self):
for hotel_id in self.hotels:
if hotel_id not in self.db.fqas.distinct('hotel-id'):
_fqa = {}
_fqa['hotel-id'] = self.city_name + '/' + hotel_id
_fqa['问答数目'] = self.get_hotel_fqas(hotel_id)['count']
self.save_to_mongo('fqas', _fqa)
def parse_scores(self):
for hotel_id in self.hotels:
if hotel_id not in self.db.scores.distinct('hotel-id'):
_scores = {}
_scores['hotel-id'] = self.city_name + '/' + hotel_id
qunar_scores = self.get_hotel_scores(hotel_id)
try:
_scores['整体评分'] = float(qunar_scores['hotelScore'])
_scores['专家点评数目'] = int(qunar_scores['countStat']['guruCnt'])
except KeyError:
_scores['整体评分'] = 0
_scores['专家点评数目'] = 0
for score in qunar_scores['itemList']:
_scores[score['name']] = float(score['score'])
print('scores:', _scores)
if not self.db.scores.find_one({'hotel-id': _scores['hotel-id']}):
self.db.scores.insert_one(_scores)
self.save_to_mongo('scores', _scores)
def parse_comments(self):
for hotel_id in self.hotels:
if hotel_id not in self.db.comment_cnts.distinct('hotel-id'):
_cmmCnt = {}
_cmmCnt['hotel-id'] = hotel_id
_comments = self.get_comments(hotel_id)
if not self.get_comments(hotel_id):
# continue
pass
else:
_cmmCnt['评价总数'] = _comments[0]
_cmmCnt['好评数目'] = _comments[1]
_cmmCnt['中评数目'] = _comments[2]
_cmmCnt['差评数目'] = _comments[3]
self.save_to_mongo('comment_cnts', _cmmCnt)
def parse_to_xls(self, ):
writer = pandas.ExcelWriter('hotels/' + self.city_name + '_hotels.xls')
uL = self.reduce_collections()
oL = []
for item in uL:
oD = collections.OrderedDict()
attr_list = [
('序号', '序号',),
('酒店名', '酒店名'),
('酒店链接', '酒店链接'),
('星级', '星级'),
('最低房价', '最低房价'),
('问答数目', '问答数目'),
('多少家旅行攻略提到', '多少家旅行攻略提到'),
('评价总数', '评价总数'),
('好评数目', '好评数目'),
('中评数目', '中评数目'),
('差评数目', '差评数目'),
('设备设施', '设备设施'),
('环境卫生', '环境卫生'),
('服务质量', '服务质量'),
('地理位置', '地理位置'),
('餐饮服务', '餐饮服务'),
('性价比评分', '性价比'),
('整体评分', '整体评分'),
('专家点评数目', '专家点评数目'),
('多少位试睡员推荐', '多少位试睡员推荐'),
]
print(item)
for attr in attr_list:
if attr[0] in item:
oD[attr[1]] = item[attr[0]]
else:
oD[attr] = ''
oL.append(oD)
df = pandas.DataFrame(oL, index=range(1, len(oL) + 1))
df.to_excel(writer)
writer.save()
def reduce_collections(self):
COLLECTIONS = ['comment_cnts', 'dangci', 'fqas', 'hotels', 'quotes', 'scores']
def extract_collection_data(collection):
cursor = self.db[collection].find({})
result_list = []
for data in cursor:
result_list.append(data)
return result_list
data_list = []
for c in COLLECTIONS:
data_list.append(extract_collection_data(c))
merged_list = []
for i in range(len(data_list[0])):
K = {**data_list[0][i], **data_list[1][i], **data_list[2][i], **data_list[3][i], **data_list[4][i],
**data_list[5][i]}
merged_list.append(K)
# pprint.pprint(merged_list)
print(len(merged_list))
result_list = []
for info in merged_list:
hotel_info = collections.OrderedDict()
hotel_info['序号'] = info['hotel-id']
hotel_info['酒店名'] = info['name']
hotel_info['酒店链接'] = info['url']
hotel_info['星级'] = info['dangci']
hotel_info['最低房价'] = info['lowest_price']
hotel_info['问答数目'] = info['问答数目']
hotel_info['多少家旅行攻略提到'] = info['多少家旅行攻略提到']
hotel_info['多少位试睡员推荐'] = info['sleeper_cnt']
hotel_info['评价总数'] = info['评价总数']
hotel_info['好评数目'] = info['好评数目']
hotel_info['中评数目'] = info['中评数目']
hotel_info['差评数目'] = info['差评数目']
key_tuple_list = [
('设备设施', '设备设施'),
('环境卫生', '环境卫生'),
('服务质量', '服务质量'),
('地理位置', '地理位置'),
('餐饮服务', '餐饮服务'),
('性价比评分', '性价比'),
('整体评分', '整体评分'),
('专家点评数目', '专家点评数目'),
]
for t in key_tuple_list:
if t[1] in info:
hotel_info[t[0]] = info[t[1]]
else:
hotel_info[t[0]] = ''
result_list.append(hotel_info)
return result_list
if __name__ == '__main__':
def crawl(city_name):
s = qunarSpider(city_name)
s.parse_dangci()
s.parse_quotes()
s.parse_fqas()
s.parse_scores()
s.parse_comments()
s.reduce_collections()
s.parse_to_xls()
s.browser.quit()
parser = argparse.ArgumentParser()
parser.add_argument("city_name", help=u"你要爬的酒店名称")
args = parser.parse_args()
crawl(args.city_name)
|
neeyongliang/gists | tool-code-counter.py | <gh_stars>0
#! /usr/bin/python3
# -*- coding: utf-8 -*-
import os
import subprocess
# 存放修改后的源码目录,可修改
SOURCE_CODE_DIR = '/home/someone/SourceCode'
# 存放上游源码的目录,可修改
UPSTREAM_CODE_DIR = '/home/someone/Downloads/open-source'
class CodeCount:
"""代码统计脚本
说明:
DebianCodeCount 是专门为源码统计而编写的脚本, 使用本脚本之前,要在系统中安装 cloc 和 dpkg-dev 包。
其中 cloc 统计代码数量, dpkg-dev 中 dpkg-source -x 用于解压源码包
使用本脚本之前,根据需要修改 SOURCE_CODE_DIR、UPSTREAM_CODE_DIR 如果出现卡死等情况,
需要更新 __init__ 函数中的参数
注意:
此脚本在运行仍然存在失败的风险,原因在于根据我们的命名规则,我们默认上游源码解压的文件夹名称,
与维护的文件夹名称相同, 或者是子集,即:
gnome-help-1.4 --> upstream
gnome-help --> source, will FAILED
gnome-help-1.2 --> source, will FAILED
gnome-help-1.4 --> source, will SUCCESS
gnome-help-1.2+1debian1 --> source, will SUCCESS
Debian 软件包名称太复杂:
python3.6-dev
libgtk-3-dev
libvte-2.91-common
无法简单的通过 -[0-9](.*) 匹配到
作者:
yongliang
版本:
0.2
"""
def __init__(self):
"""初始化"""
# 不适合运行解压缩的包的黑名单
# debian-webrt-1.4.9 太过庞大,cloc 直接会卡死
self.black_dsc_list = ['debian-webrt_1.4.9.dsc']
# 根据需要修改
self.diff_cmd = "diff -Nur -x \".git\" -x \".pc\" "
self.cloc_cmd = " cloc --autoconf --by-file --exclude-dir .pc "
@staticmethod
def get_folders_or_files(object_path, is_get_directory, file_suffix):
""" 获取指定地址的文件夹列表或者文件列表
:param object_path: 指定的地址
:param is_get_directory: 获取类型是否为文件夹
:param file_suffix: 文件后缀名
:return: 目标类型的列表
"""
if object_path is None or len(object_path) == 0:
print("Path get failed, cannot get directories or files.")
return None
all_files = os.listdir(object_path)
if is_get_directory is True:
object_list = []
for path in all_files:
if path == 'code-count':
continue
if os.path.isdir(object_path + '/' + path) is False:
continue
object_list.append(path)
else:
object_list = []
for path in all_files:
if os.path.isdir(object_path + '/' + path):
# print("not file!")
continue
if len(path) < len(file_suffix):
# print("file name too short!")
continue
if path[-len(file_suffix):] != file_suffix:
# print("not correct suffix!")
continue
object_list.append(path)
return object_list
def get_code_dirs(self, dir_path):
"""获取 dir_path 下所有目录
:param dir_path:指定目录
:return: 给定目录下的所有文件夹
"""
dirs = self.get_folders_or_files(dir_path, True, "")
return dirs
def test_get_code_dirs(self, dir_path):
"""get_code_dirs 的测试函数
:param dir_path: 获取指定子目录的地址
"""
dirs = self.get_code_dirs(dir_path)
if dirs is None or len(dirs) == 0:
print("directory is None")
return
print("test_get_code_dirs:")
for file in dirs:
print('dir list %s' % file)
def get_dsc_files(self, dsc_files_path):
"""获取指定目录下的 dsc 文件
:param dsc_files_path: dsc文件指定目录
:return: dsc 文件列表
"""
dsc_files = self.get_folders_or_files(dsc_files_path, False, ".dsc")
return dsc_files
def test_get_dsc_files(self, dsc_files_path):
"""get_dsc_files的测试函数
:param dsc_files_path: dsc文件指定目录
"""
dsc_files = self.get_dsc_files(dsc_files_path)
if dsc_files is None or len(dsc_files) == 0:
print("dsc files is None")
return
print("find following dsc files:")
for dsc in dsc_files:
print(dsc_files_path + '/' + dsc)
@staticmethod
def subprocess_run_command(cmd, success_message, output_file):
""" 使用 subprocess 运行命令
:param cmd: 运行的 shell 命令
:param success_message: 成功之后输出的信息
:param output_file: 是否要输出到文件
"""
try:
if output_file is None:
subprocess.run(cmd, shell=True)
else:
p = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
# print(p.stdout)
with open(output_file, 'wb') as f:
f.write(p.stdout)
except subprocess.TimeoutExpired as e:
print(e)
except subprocess.SubprocessError as e:
print(e)
except Exception as e:
print(e)
else:
print(success_message)
def unpack_source_packages(self, dsc_directory):
"""解压源码包
:param dsc_directory: 路径下的 dsc 文件列表
:return:
"""
if dsc_directory is None or len(dsc_directory) == 0:
print("Cannot find directory to unpack!")
return
dsc_files = self.get_dsc_files(dsc_directory)
if dsc_files is None or len(dsc_files) == 0:
print("No package need to unpack!")
return
for dsc in dsc_files:
if dsc in self.black_dsc_list:
continue
cmd = 'cd ' + dsc_directory + ';dpkg-source -x ' + dsc
print(cmd)
self.subprocess_run_command(cmd, "package unpack success!", None)
def generic_cloc_log(self, directory, unpack_dirs):
"""生成 cloc 日志
:param directory: 解压后的目录存放地址:
:param unpack_dirs: 解压生成的目录列表
"""
if unpack_dirs is None:
return
for path in unpack_dirs:
cmd = 'cd ' + directory + ';' + self.cloc_cmd + path
# print(cmd)
self.subprocess_run_command(cmd, "generate cloc log OK!", path + '.count')
def generic_diff_log(self, directory_source_dirs, directory_upstream_dirs):
"""生成 diff 日志
:param directory_source_dirs: 上游源码目录
:param directory_upstream_dirs: 修改后的源码目录
"""
if directory_source_dirs is None or len(directory_source_dirs) == 0:
print("source dirs not None failed!")
return
if directory_upstream_dirs is None or len(directory_upstream_dirs) == 0:
print("object dirs not None failed!")
return
for i in directory_upstream_dirs:
tmp_len = len(i)
# print("%s length: %d" %(i, tmp_len))
for j in directory_source_dirs:
if len(j) < len(i):
continue
if j[0:tmp_len] == i:
cmd = self.diff_cmd + UPSTREAM_CODE_DIR + "/" + i + " " + SOURCE_CODE_DIR + "/" + j
print(cmd)
self.subprocess_run_command(cmd, "Diff " + i + " log generate OK!", i + '.diff')
# else:
# print("jump %s.diff generate, maybe directory name error in source directory" % i)
@staticmethod
def analyze_log_file(log_path, log_type, message):
"""分析日志的函数
:param log_path: 日志存放地址
:param log_type: 日志类型
:param message: 提示信息
"""
all_files = os.listdir(log_path)
if all_files is None or len(all_files) == 0:
return
for f in all_files:
if f[-len(log_type):] != log_type:
print(f + message)
continue
print("log name is %s" % f)
if log_type == ".diff":
cmd = "cd " + log_path + "; diffstat " + f + " | tail -n 1"
else:
cmd = "cd " + log_path + "; cat " + f + " | tail -n 2"
p = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)
if log_type == ".diff":
print(str(p.stdout).split('\\n')[0].replace('b\' ', ''))
else:
log_out_list = str(p.stdout).split('\\n')[0].split()[1:]
log_out_list = list(map(int, log_out_list))
log_out = log_out_list[0] + log_out_list[1] + log_out_list[2]
print("blank:%d comment:%d code:%d" % (log_out_list[0], log_out_list[1], log_out_list[2]))
print("cloc code number: %d" % log_out)
def analyze_diff_log(self, diff_log_path):
""" 输出 diff 结果
:param diff_log_path: diff log 存放目录
:return:
"""
self.analyze_log_file(diff_log_path, ".diff", " is not diff file, skip!")
def analyze_cloc_log(self, cloc_log_path):
""" 输出 count 结果
:param cloc_log_path: cloc log 存放目录
:return:
"""
self.analyze_log_file(cloc_log_path, ".count", " is not count file, skip!")
if __name__ == "__main__":
cc = CodeCount()
# test get dsc files
# ================================================
print("test get %s dsc files" % SOURCE_CODE_DIR)
cc.test_get_dsc_files(SOURCE_CODE_DIR)
print("test get %s dsc files" % UPSTREAM_CODE_DIR)
cc.test_get_dsc_files(UPSTREAM_CODE_DIR)
# 以下函数可以根据需要打开,如果调制到一般终止,又不想从头再来,可以注释掉几个步骤
# unpack fix code
print("==================================")
cc.unpack_source_packages(SOURCE_CODE_DIR)
cc.unpack_source_packages(UPSTREAM_CODE_DIR)
# generate diff log
print("==================================")
source_dirs = cc.get_code_dirs(SOURCE_CODE_DIR)
# cc.test_get_code_dirs(SOURCE_CODE_DIR)
upstream_dirs = cc.get_code_dirs(UPSTREAM_CODE_DIR)
# cc.test_get_code_dirs(UPSTREAM_CODE_DIR)
cc.generic_diff_log(source_dirs, upstream_dirs)
# generate cloc log
cc.generic_cloc_log(SOURCE_CODE_DIR, source_dirs)
# analyze diff log
print("==================================")
cc.analyze_diff_log(".")
# analyze cloc log
print("==================================")
cc.analyze_cloc_log(".")
|
hurutoriya/model-analysis | examples/chicago_taxi/process_tfma.py | <gh_stars>1-10
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs a batch job for performing Tensorflow Model Analysis."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tempfile
import apache_beam as beam
import tensorflow as tf
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis.eval_saved_model.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.slicer import slicer
from tensorflow_transform.coders import example_proto_coder
from tensorflow_transform.tf_metadata import dataset_schema
from trainer import taxi
def main():
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--eval_model_dir',
help='Input path to the model which will be evaluated.')
parser.add_argument(
'--eval_result_dir',
help='Output directory in which the model analysis result is written.')
parser.add_argument(
'--big_query_table',
help='BigQuery path to input examples which will be evaluated.')
parser.add_argument(
'--input_csv',
help='CSV file containing raw data which will be evaluated.')
parser.add_argument(
'--max_eval_rows',
help='Maximum number of rows to evaluate on.',
default=None,
type=int)
known_args, pipeline_args = parser.parse_known_args()
if known_args.eval_result_dir:
eval_result_dir = known_args.eval_result_dir
else:
eval_result_dir = tempfile.mkdtemp()
slice_spec = [
slicer.SingleSliceSpec(),
slicer.SingleSliceSpec(columns=['trip_start_hour'])
]
with beam.Pipeline(argv=pipeline_args) as pipeline:
if known_args.input_csv:
csv_coder = taxi.make_csv_coder()
raw_data = (
pipeline
| 'ReadFromText' >> beam.io.ReadFromText(
known_args.input_csv, skip_header_lines=1)
| 'ParseCSV' >> beam.Map(csv_coder.decode))
elif known_args.big_query_table:
query = taxi.make_sql(
known_args.big_query_table, known_args.max_eval_rows, for_eval=True)
raw_data = (
pipeline
| 'ReadBigQuery' >> beam.io.Read(
beam.io.BigQuerySource(query=query, use_standard_sql=True)))
else:
raise ValueError('one of --input_csv or --big_query_table should be '
'provided.')
# Examples must be in clean tf-example format.
raw_feature_spec = taxi.get_raw_feature_spec()
raw_schema = dataset_schema.from_feature_spec(raw_feature_spec)
coder = example_proto_coder.ExampleProtoCoder(raw_schema)
_ = (
raw_data
| 'CleanData' >> beam.Map(taxi.clean_raw_data_dict)
| 'ToSerializedTFExample' >> beam.Map(coder.encode)
| 'EvaluateAndWriteResults' >> tfma.EvaluateAndWriteResults(
eval_saved_model_path=known_args.eval_model_dir,
slice_spec=slice_spec,
add_metrics_callbacks=[
post_export_metrics.calibration_plot_and_prediction_histogram(),
post_export_metrics.auc_plots()
],
output_path=eval_result_dir))
if __name__ == '__main__':
main()
|
hurutoriya/model-analysis | tensorflow_model_analysis/extractors/predict_extractor.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Public API for performing evaluations using the EvalSavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import apache_beam as beam
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import types
from tensorflow_model_analysis.eval_saved_model import dofn
from tensorflow_transform.beam import shared
from tensorflow_model_analysis.types_compat import Any, Callable, Dict, List, Optional, Tuple
MetricVariablesType = List[Any] # pylint: disable=invalid-name
# For use in Beam type annotations, because Beam's support for Python types
# in Beam type annotations is not complete.
_BeamSliceKeyType = beam.typehints.Tuple[ # pylint: disable=invalid-name
beam.typehints.Tuple[bytes, beam.typehints.Union[bytes, int, float]], Ellipsis]
_METRICS_NAMESPACE = 'tensorflow_model_analysis'
@beam.typehints.with_input_types(beam.typehints.List[types.ExampleAndExtracts])
@beam.typehints.with_output_types(beam.typehints.Any)
class _TFMAPredictionDoFn(dofn.EvalSavedModelDoFn):
"""A DoFn that loads the model and predicts."""
def __init__(self, eval_saved_model_path,
add_metrics_callbacks,
shared_handle):
super(_TFMAPredictionDoFn, self).__init__(
eval_saved_model_path, add_metrics_callbacks, shared_handle)
self._predict_batch_size = beam.metrics.Metrics.distribution(
_METRICS_NAMESPACE, 'predict_batch_size')
self._num_instances = beam.metrics.Metrics.counter(_METRICS_NAMESPACE,
'num_instances')
def process(self, element
):
result = []
batch_size = len(element)
self._predict_batch_size.update(batch_size)
self._num_instances.inc(batch_size)
serialized_examples = [x.example for x in element]
# Compute FeaturesPredictionsLabels for each serialized_example
for example_and_extracts, fpl in zip(
element, self._eval_saved_model.predict_list(serialized_examples)):
# Make a a shallow copy, so we don't mutate the original.
element_copy = (
example_and_extracts.create_copy_with_shallow_copy_of_extracts())
element_copy.extracts[constants.FEATURES_PREDICTIONS_LABELS_KEY] = fpl
result.append(element_copy)
return result
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.typehints.List[types.ExampleAndExtracts])
@beam.typehints.with_output_types(beam.typehints.Any)
def TFMAPredict( # pylint: disable=invalid-name
examples,
eval_saved_model_path,
desired_batch_size = None):
"""A PTransform that adds predictions to ExamplesAndExtracts."""
batch_args = {}
if desired_batch_size:
batch_args = dict(
min_batch_size=desired_batch_size, max_batch_size=desired_batch_size)
return (examples
| 'Batch' >> beam.BatchElements(**batch_args)
| beam.ParDo(
_TFMAPredictionDoFn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=None,
shared_handle=shared.Shared())))
|
lbartnik/rl | 4.9/main.py | <gh_stars>0
#!/usr/bin/python3
import pickle
import seaborn
class ValueIteration:
def __init__(self, ph = .4, win_state = 100):
# state is the amount of money held by the player
self.win_state = win_state
# value of each state is the probability of winning with that much money
self.V = [0 for _ in range(win_state + 1)] # also account for state=0
# once player reaches 100, the win is definite
#self.V[-1] = 1
# policy is the best action to take
self.P = [0 for _ in range(win_state+1)]
self.ph = ph
def possible_actions(self, s):
values = []
# action is the value of the bet; it can go from 1 to s
actions = list(range(0, min(s, self.win_state-s)+1))
for a in actions:
new_state_value = 0
# lose
next_state = max(0, s-a)
reward = 0
new_state_value += (1-self.ph) * (reward + self.V[next_state])
# win
next_state = min(100, s+a)
reward = 1 if s+a >= self.win_state else 0
new_state_value += self.ph * (reward + self.V[next_state])
values.append(new_state_value)
return values
def best_action(self, s):
best_actions, best_value = self.best_actions(s)
#return random.choice(best_actions), best_value
# if only one choice
if len(best_actions) == 1:
return best_actions[0], best_value
# if two choices, don't pick action zero
if len(best_actions) == 2:
if best_actions[0] == 0:
return best_actions[1], best_value
else:
return best_actions[0], best_value
# if multiple actions, don't pick zero nor the last one
return best_actions[1], best_value
def best_actions(self, s, eps=1e-9):
values = self.possible_actions(s)
best_value = max(values)
return list(map(lambda x: x[0], filter(lambda x: abs(x[1]-best_value) < eps, enumerate(values)))), best_value
def equal_policies(self):
ans = []
for s in range(1, self.win_state):
best_actions, _ = self.best_actions(s)
for a in best_actions:
ans.append([s, a])
return map(list, zip(*ans))
# s is the state: amount of money held
def updateV(self, s):
best_action, best_value = self.best_action(s)
delta = abs(self.V[s]-best_value)
self.V[s] = best_value
self.P[s] = best_action
return delta
def updateS(self):
delta = 0
states = list(range(1, self.win_state))
#random.shuffle(states)
for s in states:
delta = max(delta, self.updateV(s))
return delta
def loop(self, stop_delta=.000001, stop_iterations=100000):
ans = []
for i in range(stop_iterations):
delta = self.updateS()
ans.append(delta)
#print(f"iteration {i}, delta {delta}")
if delta < stop_delta:
break
return ans
def save(self, path):
with open(path, "wb") as output:
pickle.dump((self.V, self.P, self.ph), output)
def load(self, path):
with open(path, "rb") as input:
self.V, self.P, self.ph = pickle.load(input)
def print(self):
for s in range(1, self.win_state):
print("state {}: {:.5f}, {}".format(s, self.V[s], self.P[s]))
if __name__ == '__main__':
vi = ValueIteration()
d = vi.loop(stop_delta=0, stop_iterations=32)
|
pwinslow/change-org-app | runner.py | <reponame>pwinslow/change-org-app
"""
This file sets up multiple jobs to run on a PBS cluster. Each job collects data on a list of petitions.
"""
# Miscellaneous imports
from sys import exit
from csv import reader
from os import getcwd, listdir, remove, system
from os.path import join, isfile, isdir
class Runner(object):
def __init__(self):
self.key_path = join(getcwd(), "API_key_list")
self.data_path = join(getcwd(), "data/xml_data")
self.key_list = self.get_keys(self.key_path)
self.file_list = self.get_files(self.data_path)
self.run_names = [name.split('-')[1].split('.')[0] for name in self.file_list]
@staticmethod
def get_keys(key_path):
# If API key file exists, read it and extract list of keys
if isfile(key_path):
key_list = []
with open(key_path, "rb") as f:
key_csv = reader(f)
for row in key_csv:
key = row[3].strip()
if key != "api_key":
key_list.append(key)
return key_list
else:
print "API key file is not in {}.".format(key_path)
exit(2)
@staticmethod
def get_files(data_path):
# If data folder exists, list files in it and extract file names for all .dat files
if isdir(data_path):
file_list = []
files = listdir(data_path)
for file_name in files:
if file_name.endswith("dat"):
file_list.append(file_name)
return file_list
else:
print "Data files are not in {}.".format(data_path)
def run(self):
# For each data file, write script to collect petition data using GetData methods running on cluster
for run_name, file_name, api_key in zip(self.run_names, self.file_list, self.key_list):
# Check if run script exists, if so then rm it
script_path = join(getcwd(), "script.sh")
if isfile(script_path):
remove(script_path)
# Create submission script
run_cmd = ("python {0}/get_data.py --url_list_path={1}"
" --api_key={2}\n").format(getcwd(),
join(self.data_path, file_name),
api_key)
with open(script_path, "w") as f:
f.write("#/bin/sh\n")
f.write("#PBS -N Scan-{}\n".format(run_name))
f.write(run_cmd)
# Run submission script
system("chmod +x script.sh")
system("qsub -e berr.log -o bout.log script.sh")
if __name__ == "__main__":
runner = Runner()
runner.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.