max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
stograde/specs/file_options.py
|
StoDevX/stograde
| 7
|
12774551
|
<gh_stars>1-10
from dataclasses import dataclass
@dataclass
class FileOptions:
"""Represents options available for files in specs"""
compile_optional: bool = False
hide_contents: bool = False
optional: bool = False
timeout: int = 4
truncate_contents: int = 10000
truncate_output: int = 10000
web_file: bool = False
def update(self, options: dict):
self.compile_optional = options.get('optional_compile', self.compile_optional)
self.hide_contents = options.get('hide_contents', self.hide_contents)
self.optional = options.get('optional', self.optional)
self.timeout = options.get('timeout', self.timeout)
self.truncate_contents = options.get('truncate_contents', self.truncate_contents)
self.truncate_output = options.get('truncate_output', self.truncate_output)
self.web_file = options.get('web', self.web_file)
return self
| 2.484375
| 2
|
tflitehub/manual_test.py
|
rsuderman/iree-samples
| 12
|
12774552
|
import absl.flags
import absl.testing
import test_util
absl.flags.DEFINE_string('model', None, 'model path to execute')
class ManualTest(test_util.TFLiteModelTest):
def __init__(self, *args, **kwargs):
super(ManualTest, self).__init__(absl.flags.FLAGS.model, *args, **kwargs)
def compare_results(self, iree_results, tflite_results, details):
super(ManualTest, self).compare_results(iree_results, tflite_results, details)
def test_compile_tflite(self):
if self.model_path is not None:
self.compile_and_execute()
if __name__ == '__main__':
absl.testing.absltest.main()
| 2.296875
| 2
|
app1/app/api/v1/hello/hello.py
|
hzjsea/fastapi-admin
| 0
|
12774553
|
<gh_stars>0
#!/usr/bin/env python3
# encoding: utf-8
"""
@author: hzjsea
@file: hello.py
@time: 2021/11/19 10:34 上午
"""
from fastapi import APIRouter
from app1.app.utils.xhr import response_ok, response_error
router = APIRouter()
@router.get("/hello", summary="hello get请求格式")
async def hello():
data = {
"hahaha": "hello word!"
}
return response_ok(data)
@router.post("/hello", summary="hello post请求格式")
async def hello():
data = "hello word!"
return response_ok(data)
@router.put("/hello", summary="hello put请求格式")
async def hello():
data = "hello word!"
return response_ok(data)
@router.delete("/hello", summary="hello delete请求格式")
async def hello():
data = "hello word!"
return response_ok(data)
| 2.578125
| 3
|
Profile/migrations/0002_rename_name_profile.py
|
higornobrega/Cadastrar-formulario-sem-refrash
| 0
|
12774554
|
<gh_stars>0
# Generated by Django 3.2.4 on 2021-07-16 19:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Profile', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Name',
new_name='Profile',
),
]
| 1.6875
| 2
|
tests/google_analytics/test_google_analytics.py
|
T-FitAndFat/toucan-connectors
| 0
|
12774555
|
import json
import pytest
from toucan_connectors.google_analytics.google_analytics_connector import (
GoogleAnalyticsConnector, GoogleAnalyticsDataSource)
def test_google_analytics(mocker):
gac = GoogleAnalyticsConnector(
type="GoogleAnalytics",
name="Test",
credentials={
"type": "test",
"project_id": "test",
"private_key_id": "test",
"private_key": "test",
"client_email": "test",
"client_id": "test",
"auth_uri": "test",
"token_uri": "test",
"auth_provider_x509_cert_url": "test",
"client_x509_cert_url": "test"
}
)
gads = GoogleAnalyticsDataSource(
name="Test", domain="test",
report_request={
"viewId": "0123456789",
"dateRanges": [
{"startDate": "2018-06-01", "endDate": "2018-07-01"}
]
})
fixture = json.load(open('tests/google_analytics/fixtures/reports.json'))
module = 'toucan_connectors.google_analytics.google_analytics_connector'
mocker.patch(f'{module}.ServiceAccountCredentials.from_json_keyfile_dict')
mocker.patch(f'{module}.build')
mocker.patch(f'{module}.get_query_results').return_value = fixture['reports'][0]
df = gac.get_df(gads)
assert df.shape == (3, 11)
@pytest.mark.skip(reason="This uses a live instance")
def test_live_instance():
gac = GoogleAnalyticsConnector(
type="GoogleAnalytics",
name="Test",
credentials={
"type": "",
"project_id": "",
"private_key_id": "",
"private_key": "",
"client_email": "",
"client_id": "",
"auth_uri": "",
"token_uri": "",
"auth_provider_x509_cert_url": "",
"client_x509_cert_url": ""
}
)
gads = GoogleAnalyticsDataSource(
name="Test", domain="test",
report_request={
"viewId": "119151898",
"pageSize": 100,
"orderBys": [
{
"fieldName": "ga:date",
"orderType": "VALUE",
"sortOrder": "%(sortOrder)s"
}
],
"dimensions": [
{"name": "ga:hostname"},
{"name": "ga:date"},
{"name": "ga:dimension1"},
{"name": "ga:deviceCategory"},
{"name": "ga:eventLabel"}
],
"dateRanges": [
{"startDate": "2018-06-01", "endDate": "2018-07-01"}
],
"metrics": [
{"expression": "ga:sessions"},
{"expression": "ga:sessionDuration"}
]
},
parameters={'sortOrder': 'DESCENDING'}
)
df = gac.get_df(gads)
assert df.shape == (230, 11)
| 2.09375
| 2
|
python_ne/core/ga/matplotlib_logger.py
|
MatheusZickuhr/python-neat
| 1
|
12774556
|
<gh_stars>1-10
from python_ne.utils.observer import Observer
import matplotlib.pyplot as plt
import numpy as np
import os
class MatplotlibLogger(Observer):
def __init__(self):
self.logged_data = []
def notify(self, *args, **kwargs):
data = {
'generation': kwargs['current_generation'],
'best_element_fitness': kwargs['best_element_fitness'],
'time_to_run_generation': kwargs['generation_time'],
'population_fitness_std': kwargs['population_fitness_std']
}
self.logged_data.append(data)
def save_fitness_chart(self, file_path='', fitness_label='fitness', generation_label='generation',
chart_title='generation - fitness'):
x = [data['generation'] for data in self.logged_data]
y = [data['best_element_fitness'] for data in self.logged_data]
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set(xlabel=generation_label, ylabel=fitness_label,
title=chart_title)
ax.grid()
fig.savefig(file_path)
def save_time_chart(self, file_path, time_label='time (s)', generation_label='generation',
chart_title='generation - time'):
x = [data['generation'] for data in self.logged_data]
y = [data['time_to_run_generation'] for data in self.logged_data]
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set(xlabel=generation_label, ylabel=time_label,
title=chart_title)
ax.grid()
fig.savefig(file_path)
def save_std_chart(self, file_path, std_label='std', generation_label='generation', chart_title='generation - std'):
x = [data['generation'] for data in self.logged_data]
y = [data['population_fitness_std'] for data in self.logged_data]
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set(xlabel=generation_label, ylabel=std_label,
title=chart_title)
ax.grid()
fig.savefig(file_path)
| 2.625
| 3
|
hardware-check.py
|
thysia-zosa/hardware-check
| 0
|
12774557
|
#! /usr/bin/env python3
#
# Authors: <NAME>, <NAME>, <NAME>
# (c) 2021
from pathlib import Path
from datetime import datetime
import os.path
import requests
import yaml
import json
from pyspectator.processor import Cpu
from crontab import CronTab
import sys
# constants
CONFIG_FILE = 'config.yaml'
MAX_CPU_TEMP = 'maxCpuTemp'
CHECK_INTERVAL = 'checkInterval'
TELEGRAM_CHAT = 'telegramChatID'
TELEGRAM_API = 'telegramApiUrl'
TELEGRAM_TOKEN = 'telegramToken'
# initialize main variables
maxCpuTemp = None
checkInterval = None
telegramChatID = None
telegramToken = None
time = str(datetime.now())
log = {}
warnings = []
warningMessage = ''
codePath = str(Path(__file__).parent.absolute()) + '/'
if os.path.isfile(codePath + CONFIG_FILE):
# read config file
try:
with open(codePath + CONFIG_FILE, 'r') as yamlFile:
config = yaml.load(yamlFile, Loader=yaml.CLoader)
if MAX_CPU_TEMP in config:
maxCpuTemp = config[MAX_CPU_TEMP]
if CHECK_INTERVAL in config:
checkInterval = config[CHECK_INTERVAL]
if TELEGRAM_CHAT in config:
telegramChatID = config[TELEGRAM_CHAT]
if TELEGRAM_TOKEN in config:
telegramToken = config[TELEGRAM_TOKEN]
except BaseException as err:
print('Error:', err)
else:
sys.exit('config file missing')
# In case something went wrong, assign default values
if maxCpuTemp == None or isinstance(maxCpuTemp, float) != True:
maxCpuTemp = 80.0
if checkInterval == None or isinstance(checkInterval, int) != True:
checkInterval = 10
# In case something telegrammy is missing, abort: Programm is not runnable
if telegramChatID == None or isinstance(telegramChatID, str) != True or \
telegramToken == None or isinstance(telegramToken, str) != True:
sys.exit('telegram config missing')
# update cronjob, if the user has changed interval time
myCron = CronTab(user=True)
intTime = '*/' + str(checkInterval)
for job in myCron:
if job.comment == 'hardwareCheck' and str(job.minute) != intTime:
job.minute.every(checkInterval)
myCron.write()
# read cpu-temperature
cpu = Cpu(monitoring_latency=1)
temperature = cpu.temperature
log['cpu-temp'] = temperature
# check if cpu-temperature exceeds max
if temperature > maxCpuTemp:
warnings.append('Temperature is too high: ' + \
str(temperature) + ' (max: ' + str(maxCpuTemp) + ')')
# save data to logfile
try:
with open(codePath + 'log.json', 'r+') as logFile:
data = json.load(logFile)
data.update({time: log})
logFile.seek(0)
json.dump(data, logFile, indent=2, ensure_ascii=False)
except BaseException as err:
print('Error:', err)
# write telegram message
if len(warnings) > 0:
warnings.insert(0, 'Your Computer has occurred a problem:')
warningMessage = '\n'.join(warnings)
send_text = 'https://api.telegram.org/' + telegramToken + \
'/sendMessage?chat_id=' + telegramChatID + \
'&parse_mode=Markdown&text=' + warningMessage
try:
response = requests.get(send_text)
except requests.exceptions as err:
print('Error:', err)
| 2.25
| 2
|
src/slippinj/cli/scripts/basic_script.py
|
scm-spain/slippin-jimmy
| 7
|
12774558
|
class BasicScript(object):
def __init__(self, parser):
"""
Initialize the class
:param parser: ArgumentParser
"""
super(BasicScript, self).__init__()
self._parser = parser
def get_arguments(self):
"""
Get the arguments to configure current script, should be implementd in children classes
:return: list
"""
raise StandardError('Implement get_arguments method')
def run(self, args, injector):
raise StandardError('Implement run method')
def configure(self):
"""
Configure the component before running it
:rtype: Class instance
"""
self.__set_arguments()
return self
def __set_arguments(self):
parser = self._parser.add_parser(self.__class__.__name__.lower(), help=self.__class__.__doc__,
conflict_handler='resolve')
arguments = self.get_arguments()
for argument in arguments:
short = argument['short']
long = argument['long']
del argument['short']
del argument['long']
parser.add_argument(short, long, **argument)
def get_wf_configuration(self, args, injector):
object_configuration = injector.get('object_configuration')
if 1 >= len(object_configuration):
configuration_file = args.configuration_file if 'configuration_file' in args and None != args.configuration_file else injector.get(
'interactive_configuration_file').get(args.wf_dir)
configuration = injector.get('wf_configuration').get_workflow_configuration(configuration_file)
configuration['config_paths'] = configuration_file
for key in configuration:
object_configuration[key] = configuration[key]
return object_configuration
| 3.0625
| 3
|
lib/kube_objs/volumes.py
|
olx-global/rubiks
| 60
|
12774559
|
<filename>lib/kube_objs/volumes.py
# (c) Copyright 2017-2018 OLX
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
from kube_obj import KubeSubObj, KubeObj
from kube_types import *
import_relative('selectors', 'BaseSelector')
import_relative('pod', 'Memory')
class AWSVolID(String):
validation_text = 'Expected amazon volume id'
def do_check(self, value, path):
if not String().do_check(self, value, path):
return False
if value.startswith('aws://'):
value = value.split('/')[-1]
if not value.startswith('vol-'):
return False
if len(value) == 4:
return False
if len(value[4:].rstrip('0123456789abcdef')) != 0:
return False
return True
class AWSElasticBlockStore(KubeSubObj):
_defaults = {
'volumeID': None,
'fsType': 'ext4',
}
_types = {
'volumeID': AWSVolID,
'fsType': Enum('ext4'),
}
def render(self):
return self.renderer(order=('volumeID', 'fsType'))
class PersistentVolumeRef(KubeSubObj):
_defaults = {
'apiVersion': None,
'kind': 'PersistentVolumeClaim',
'name': '',
'ns': '',
}
_types = {
'apiVersion': Nullable(String),
'name': Nullable(Identifier),
'kind': Nullable(CaseIdentifier),
'ns': Nullable(Identifier),
}
_parse = {
'ns': ('namespace',),
}
_exclude = {
'.resourceVersion': True,
'.uid': True,
}
def render(self):
ret = self.renderer(order=('apiVersion', 'name', 'kind', 'ns'))
if 'ns' in ret:
ret['namespace'] = ret['ns']
del ret['ns']
return ret
class PersistentVolume(KubeObj):
apiVersion = 'v1'
kind = 'PersistentVolume'
kubectltype = 'persistentvolume'
_uses_namespace = False
_output_order = 35
_defaults = {
'accessModes': ['ReadWriteOnce'],
'capacity': None,
'awsElasticBlockStore': None,
'persistentVolumeReclaimPolicy': None,
'claimRef': None,
}
_types = {
'accessModes': List(Enum('ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany')),
'capacity': Memory,
'awsElasticBlockStore': Nullable(AWSElasticBlockStore),
'persistentVolumeReclaimPolicy': Nullable(Enum('Retain', 'Recycle', 'Delete')),
'claimRef': Nullable(PersistentVolumeRef),
}
_parse_default_base = ('spec',)
_parse = {
'capacity': ('spec', 'capacity', 'storage'),
}
_exclude = {
'.status': True,
}
def do_validate(self):
if len(filter(lambda x: self._data[x] is not None, ('awsElasticBlockStore',))) != 1:
raise KubeObjValidationError(self, "awsElasticBlockStore must be specified")
return True
def render(self):
ret = self.renderer(order=('accessModes', 'capacity'))
del ret['name']
ret['capacity'] = {'storage': ret['capacity']}
return {'metadata': {'name': self._data['name']}, 'spec': ret}
class PersistentVolumeClaim(KubeObj):
apiVersion = 'v1'
kind = 'PersistentVolumeClaim'
kubectltype = 'persistentvolumeclaim'
_output_order = 40
_defaults = {
'accessModes': ['ReadWriteOnce'],
'request': None,
'selector': None,
'volumeName': None,
}
_types = {
'accessModes': List(Enum('ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany')),
'request': Memory,
'selector': Nullable(BaseSelector),
'volumeName': Nullable(Identifier),
}
_parse_default_base = ('spec',)
_parse = {
'request': ('spec', 'resources', 'requests', 'storage'),
}
_exclude = {
'.status': True,
}
def xf_volumeName(self, v):
if isinstance(v, PersistentVolume):
return v.name
return v
def render(self):
ret = self.renderer(return_none=True)
spec = OrderedDict()
if 'accessModes' in ret:
spec['accessModes'] = ret['accessModes']
if 'request' in ret:
spec['resources'] = OrderedDict(requests=OrderedDict(storage=ret['request']))
if 'selector' in ret:
spec['selector'] = ret['selector']
if 'volumeName' in ret:
spec['volumeName'] = ret['volumeName']
return {'metadata': {'name': ret['name']}, 'spec': spec}
| 2.03125
| 2
|
services/exchangeservice.py
|
kimi0230/gogopowerkimibot
| 4
|
12774560
|
from bs4 import BeautifulSoup
# fix: InsecureRequestWarning: Unverified HTTPS request is being made to host
import requests.packages.urllib3
# 測試table
# import prettytable as pt
url = "https://rate.bot.com.tw/xrt?Lang=zh-TW"
herders = {
'User-Agent': 'Mozilla/5.0 (Macintosh Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36'
}
defaultCurrency = ["美金 (USD)", "日圓 (JPY)", "英鎊 (GBP)", "人民幣 (CNY)", "歐元 (EUR)"]
allCurrency = [
"美金 (USD)",
"港幣 (HKD)",
"英鎊 (GBP)",
"澳幣 (AUD)",
"加拿大幣 (CAD)",
"新加坡幣 (SGD)",
"瑞士法郎 (CHF)",
"日圓 (JPY)",
"南非幣 (ZAR)",
"瑞典幣 (SEK)",
"紐元 (NZD)",
"泰幣 (THB)",
"菲國比索 (PHP)",
"印尼幣 (IDR)",
"歐元 (EUR)",
"韓元 (KRW)",
"越南盾 (VND)",
"馬來幣 (MYR)",
"人民幣 (CNY)",
]
def getBoTExchange(msg=""):
try:
if msg != "":
msg = msg.upper()
for a in allCurrency:
if msg in a:
msg = a
found = True
break
if not found:
return None
# fix: InsecureRequestWarning: Unverified HTTPS request is being made to host
requests.packages.urllib3.disable_warnings()
res = requests.get(url, headers=herders, verify=False)
res.encoding = 'UTF-8'
soup = BeautifulSoup(res.text, "lxml")
time = soup.find(
"span", class_="time").text.strip()
table = [s for s in soup.select("table.table tbody tr")]
queryResult = {}
for t in table:
currency = t.select("td div.visible-phone")[0].text.strip()
cashRateBuy = t.select("td")[1].text.strip()
cashRateSell = t.select("td")[2].text.strip()
spotRateBuy = t.select("td")[3].text.strip()
spotRateSell = t.select("td")[4].text.strip()
queryResult[currency] = [cashRateBuy,
cashRateSell, spotRateBuy, spotRateSell]
result = {}
if msg == "":
# 只抓預設值
result = {d: queryResult[d] for d in defaultCurrency}
else:
result = {msg: queryResult[msg]}
if len(result) > 0:
return result
return None
except:
return None
def toMsg(source=None):
try:
if source != None:
resMsg = "|幣別\t\t|即期買\t|即期賣\t|\n"
for r in source:
resMsg += "|%s | %s | %s |\n" % (
r, source[r][2], source[r][3])
resMsg += "https://rate.bot.com.tw/xrt?Lang=zh-TW"
return resMsg
else:
return None
except:
return None
if __name__ == "__main__":
print(toMsg(getBoTExchange()))
| 2.8125
| 3
|
applications/CoSimulationApplication/mpi_extension/MPIExtension.py
|
clazaro/Kratos
| 778
|
12774561
|
import KratosMultiphysics.mpi # importing the MPI-Core, since the MPIExtension directly links to it
from KratosCoSimulationMPIExtension import *
| 1.039063
| 1
|
cadise-blender/core/__init__.py
|
xh5a5n6k6/cadise-blender
| 1
|
12774562
|
<gh_stars>1-10
from . import (
exporter,
renderer
)
import bpy
class CadiseAddonPreferences(bpy.types.AddonPreferences):
# Reference to T.C. Chang's work, this must match add-on's name
# https://github.com/TzuChieh/Photon-v2/blob/develop/BlenderAddon/PhotonBlend/bmodule/__init__.py
bl_idname = __package__.split('.')[0]
install_path: bpy.props.StringProperty (
name = "Installation Path",
description = "Installation path to Cadise renderer (binary)",
subtype = "DIR_PATH",
default = ""
)
def draw(self, context):
layout = self.layout
layout.prop(self, "install_path")
def include_submodule(moduleManager):
moduleManager.add_class(CadiseAddonPreferences)
exporter.include_submodule(moduleManager)
renderer.include_submodule(moduleManager)
| 1.640625
| 2
|
summary/__init__.py
|
svven/summary
| 12
|
12774563
|
<filename>summary/__init__.py
"""
Summary class improves extraction.Extracted by providing
incremental load mechanism, and especially image validation.
But the main difference is that it performs the requests.
Extraction is performed gradually by parsing the HTML <head>
tag first, applying specific head extraction techniques, and
goes on to the <body> only if Summary data is not complete.
"""
import logging
import config, request, extraction, filters
from urlparse import urlparse
from url import canonicalize_url
from urlnorm import norm
from contextlib import closing
# try:
# import lxml
# parser = 'lxml'
# except:
# parser = None
# from bs4 import BeautifulSoup, Comment
site = lambda url: urlparse(url).netloc
decode = lambda mystr, encoding: \
isinstance(mystr, str) and mystr.decode(encoding, 'ignore') or mystr
class URLError(Exception):
pass
class HTMLParseError(Exception):
pass
class Summary(object):
"Provides incremental load mechanism and validation."
def __init__(self, source_url=None):
"""
Unlike Extracted ctor, this one just sets the source_url.
Extracted data is loaded later gradually by calling extract.
"""
self._html = ""
self.titles = []
self.descriptions = []
self.images = []
self.urls = []
self.source_url = source_url
self.clean_url = self.source_url
# Non-plural properties
@property
def title(self):
"Return the best title, if any."
if self.titles:
return self.titles[0]
else:
return None
@property
def description(self):
"Return the best description, if any."
if self.descriptions:
return self.descriptions[0]
else:
return None
@property
def image(self):
"Return the best image, if any."
if self.images:
return self.images[0]
else:
return None
@property
def url(self):
"Return the best canonical url, or the cleaned source url."
if self.urls:
return self.urls[0]
else:
return self.clean_url
def _is_clear(self):
return not (self.titles or self.descriptions or self.images or self.urls)
def _is_complete(self):
return self.titles and self.descriptions and self.images and self.urls and True
def _clear(self):
self.titles = []
self.descriptions = []
self.images = []
self.urls = []
def _load(self, titles=[], descriptions=[], images=[], urls=[], **kwargs):
"""
Loads extracted data into Summary.
Performs validation and filtering on-the-fly, and sets the
non-plural fields to the best specific item so far.
If GET_ALL_DATA is False, it gets only the first valid item.
"""
enough = lambda items: items # len(items) >= MAX_ITEMS
if config.GET_ALL_DATA or not enough(self.titles):
titles = filter(None, map(self._clean_text, titles))
self.titles.extend(titles)
if config.GET_ALL_DATA or not enough(self.descriptions):
descriptions = filter(None, map(self._clean_text, descriptions))
self.descriptions.extend(descriptions)
## Never mind the urls, they can be bad not worth it
# if config.GET_ALL_DATA or not enough(self.urls):
# # urls = [self._clean_url(u) for u in urls]
# urls = filter(None, map(self._clean_url, urls))
# self.urls.extend(urls)
if config.GET_ALL_DATA:
# images = [i for i in [self._filter_image(i) for i in images] if i]
images = filter(None, map(self._filter_image, images))
self.images.extend(images)
elif not enough(self.images):
for i in images:
image = self._filter_image(i)
if image:
self.images.append(image)
if enough(self.images):
break
# Picking the best item by sorting
# self.titles = sorted(self.titles, key=len)
# self.descriptions = sorted(self.descriptions, key=len, reverse=True)
# self.images = sorted(self.images, key=lambda i: sum(i.size), reverse=True)
def _clean_text(self, text):
"""
Checks for bad text like "{{ metatags.title }}" and such
"""
if text.startswith('{{') and text.endswith('}}'):
return None
return text
def _clean_url(self, url):
"""
Canonicalizes the url, as it is done in Scrapy.
And keeps only USEFUL_QUERY_KEYS. It also strips the
trailing slash to help identifying dupes.
"""
# TODO: Turn this into regex
if not url.startswith('http') or url.endswith('}}') or 'nojs_router' in url:
return None
if site(norm(url).lower()) in config.NONCANONIC_SITES:
clean_url = canonicalize_url(url, keep_params=True)
else:
clean_url = canonicalize_url(url)
return clean_url
def _filter_image(self, url):
"The param is the image URL, which is returned if it passes all the filters."
return reduce(lambda f, g: f and g(f),
[
filters.AdblockURLFilter()(url),
filters.NoImageFilter(),
filters.SizeImageFilter(),
filters.MonoImageFilter(),
filters.FormatImageFilter(),
])
def _get_tag(self, response, tag_name="html", encoding="utf-8"):
"""
Iterates response content and returns the tag if found.
If not found, the response content is fully consumed so
self._html equals response.content, and it returns None.
"""
def find_tag(tag_name):
tag_start = tag_end = None
found = lambda: \
tag_start is not None and tag_end is not None
html = self._html.lower()
start = html.find("<%s" % tag_name)
if start >= 0:
tag_start = start
else:
return None # no tag
end = html.find("</%s>" % tag_name)
if end > tag_start:
tag_end = end+len(tag_name)+3
elif consumed:
tag_end = -1 # till the end
if found():
return self._html[tag_start:tag_end]
return None
consumed = getattr(response, 'consumed', False)
if not consumed:
stream = getattr(response, 'stream', None)
if stream is None:
stream = response.iter_content(config.CHUNK_SIZE) # , decode_unicode=True
response.stream = stream
while True:
try:
chunk = next(stream)
self._html += chunk
tag = find_tag(tag_name)
if tag:
return tag
if len(self._html) > config.HTML_MAX_BYTESIZE:
raise HTMLParseError('Maximum response size reached.')
except StopIteration:
response.consumed = True
tag = find_tag(tag_name)
return decode(tag, encoding) # decode here
def _extract(self, html, url, techniques):
extractor = extraction.SvvenExtractor(techniques=techniques)
extracted = extractor.extract(html, source_url=url)
self._load(**extracted)
def extract(self, check_url=None, http_equiv_refresh=True):
"""
Downloads HTML <head> tag first, extracts data from it using
specific head techniques, loads it and checks if is complete.
Otherwise downloads the HTML <body> tag as well and loads data
extracted by using appropriate semantic techniques.
Eagerly calls check_url(url) if any, before parsing the HTML.
Provided function should raise an exception to break extraction.
E.g.: URL has been summarized before; URL points to off limits
websites like foursquare.com, facebook.com, bitly.com and so on.
"""
# assert self._is_clear()
logger = logging.getLogger(__name__)
logger.info("Extract: %s", self.clean_url)
with closing(request.get(self.clean_url, stream=True)) as response:
response.raise_for_status()
mime = response.headers.get('content-type')
if mime and not ('html' in mime.lower()):
raise HTMLParseError('Invalid Content-Type: %s' % mime)
self.clean_url = self._clean_url(response.url)
if self.clean_url is None:
raise URLError('Bad url: %s' % response.url)
if check_url is not None:
check_url(url=self.clean_url)
encoding = config.ENCODING or response.encoding
self._html = ""
if config.PHANTOMJS_BIN and \
site(self.clean_url) in config.PHANTOMJS_SITES:
self._html = request.phantomjs_get(self.clean_url)
response.consumed = True
head = self._get_tag(response, tag_name="head", encoding=encoding)
if http_equiv_refresh:
# Check meta http-equiv refresh tag
html = head or decode(self._html, encoding)
self._extract(html, self.clean_url, [
"summary.techniques.HTTPEquivRefreshTags",
])
new_url = self.urls and self.urls[0]
if new_url and new_url != self.clean_url:
logger.warning("Refresh: %s", new_url)
self._clear()
self.clean_url = new_url
return self.extract(check_url=check_url, http_equiv_refresh=False)
if head:
logger.debug("Got head: %s", len(head))
self._extract(head, self.clean_url, [
"extraction.techniques.FacebookOpengraphTags",
"extraction.techniques.TwitterSummaryCardTags",
"extraction.techniques.HeadTags"
])
else:
logger.debug("No head: %s", self.clean_url)
if config.GET_ALL_DATA or not self._is_complete():
body = self._get_tag(response, tag_name="body", encoding=encoding)
if body:
logger.debug("Got body: %s", len(body))
self._extract(body, self.clean_url, [
"extraction.techniques.HTML5SemanticTags",
"extraction.techniques.SemanticTags"
])
else:
logger.debug("No body: %s", self.clean_url)
if not head and not body:
raise HTMLParseError('No head nor body tags found.')
del self._html # no longer needed
# that's it
| 3.140625
| 3
|
Fluid/io/fluid-cloudnative/module/data_load_spec.py
|
Rui-Tang/fluid-client-python
| 1
|
12774564
|
# coding: utf-8
"""
fluid
client for fluid # noqa: E501
OpenAPI spec version: v0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from Fluid.io.fluid-cloudnative.module.target_dataset import TargetDataset # noqa: F401,E501
from Fluid.io.fluid-cloudnative.module.target_path import TargetPath # noqa: F401,E501
class DataLoadSpec(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'dataset': 'TargetDataset',
'load_metadata': 'bool',
'target': 'list[TargetPath]'
}
attribute_map = {
'dataset': 'dataset',
'load_metadata': 'loadMetadata',
'target': 'target'
}
def __init__(self, dataset=None, load_metadata=None, target=None): # noqa: E501
"""DataLoadSpec - a model defined in Swagger""" # noqa: E501
self._dataset = None
self._load_metadata = None
self._target = None
self.discriminator = None
if dataset is not None:
self.dataset = dataset
if load_metadata is not None:
self.load_metadata = load_metadata
if target is not None:
self.target = target
@property
def dataset(self):
"""Gets the dataset of this DataLoadSpec. # noqa: E501
Dataset defines the target dataset of the DataLoad # noqa: E501
:return: The dataset of this DataLoadSpec. # noqa: E501
:rtype: TargetDataset
"""
return self._dataset
@dataset.setter
def dataset(self, dataset):
"""Sets the dataset of this DataLoadSpec.
Dataset defines the target dataset of the DataLoad # noqa: E501
:param dataset: The dataset of this DataLoadSpec. # noqa: E501
:type: TargetDataset
"""
self._dataset = dataset
@property
def load_metadata(self):
"""Gets the load_metadata of this DataLoadSpec. # noqa: E501
LoadMetadata specifies if the dataload job should load metadata # noqa: E501
:return: The load_metadata of this DataLoadSpec. # noqa: E501
:rtype: bool
"""
return self._load_metadata
@load_metadata.setter
def load_metadata(self, load_metadata):
"""Sets the load_metadata of this DataLoadSpec.
LoadMetadata specifies if the dataload job should load metadata # noqa: E501
:param load_metadata: The load_metadata of this DataLoadSpec. # noqa: E501
:type: bool
"""
self._load_metadata = load_metadata
@property
def target(self):
"""Gets the target of this DataLoadSpec. # noqa: E501
Target defines target paths that needs to be loaded # noqa: E501
:return: The target of this DataLoadSpec. # noqa: E501
:rtype: list[TargetPath]
"""
return self._target
@target.setter
def target(self, target):
"""Sets the target of this DataLoadSpec.
Target defines target paths that needs to be loaded # noqa: E501
:param target: The target of this DataLoadSpec. # noqa: E501
:type: list[TargetPath]
"""
self._target = target
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DataLoadSpec, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DataLoadSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1.984375
| 2
|
run.py
|
mrprogrammer2938/Translate
| 0
|
12774565
|
<reponame>mrprogrammer2938/Translate
#!/usr/bin/python3
# This Programm write by Mr.nope
# Version 1.3.0
from os import system as command
import sys
try:
from deep_translator import GoogleTranslator
except ImportError:
os.system("pip3 install deep_translator")
from platform import uname
try:
from colorama import Fore,init
init()
except ImportError:
os.system("pip3 install colorama")
End = '\033[0m'
opt = "\nEnter Word: "
system = uname()[0]
banner = Fore.BLUE + """
d888888P dP dP
88 88 88
88 88d888b. .d8888b. 88d888b. .d8888b. 88 .d8888b. d8888P .d8888b.
88 88' `88 88' `88 88' `88 Y8ooooo. 88 88' `88 88 88ooood8
88 88 88. .88 88 88 88 88 88. .88 88 88. ...
dP dP `88888P8 dP dP `88888P' dP `88888P8 dP `88888P'
""" + End
def cls():
if system == 'Linux':
command("clear")
elif system == 'Windows':
command("cls")
else:
print("\nPlease, Run This Programm on Linux, Windows and MacOS!\n")
sys.exit()
def main():
command("printf '\033]2;Translate\a'")
cls()
print(banner)
print("\nUsage: " + Fore.GREEN + "Ctrl + D " + End + "To Exit...!\n")
word = input(opt)
if word == '':
try1()
else:
run(word)
def try1():
try_to_menu = input("\nDo you want to try again? [y/n] ")
if try_to_menu == 'y':
main()
elif try_to_menu == 'n':
ext()
else:
try1()
def ext():
cls()
print("\nExiting...")
sys.exit()
def run(var):
print("\nExample(fa/en/es)\n")
len = input("\nEnter Source Language: ")
len_2 = input("\nEnter target language: ")
run_translate = GoogleTranslator(source=len,target=len_2).translate(var)
print("\n-----------------------------\n")
print(run_translate)
try1()
if __name__ == '__main__':
try:
try:
main()
except KeyboardInterrupt:
print("\nCtrl + C")
try2()
except EOFError:
print("\nCtrl + D")
print("\nExiting...")
sys.exit()
| 2.546875
| 3
|
examples/using_steem_offline.py
|
creativechain/crea-python-lib
| 0
|
12774566
|
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
from datetime import datetime, timedelta
import time
import io
import logging
from crea.blockchain import Blockchain
from crea.block import Block
from crea.account import Account
from crea.amount import Amount
from crea.witness import Witness
from creabase import operations
from crea.transactionbuilder import TransactionBuilder
from creagraphenebase.account import PasswordKey, PrivateKey, PublicKey
from crea.crea import Crea
from crea.utils import parse_time, formatTimedelta
from creaapi.exceptions import NumRetriesReached
from crea.nodelist import NodeList
from creabase.transactions import getBlockParams
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# example wif
wif = "<KEY>"
if __name__ == "__main__":
stm_online = Crea()
ref_block_num, ref_block_prefix = getBlockParams(stm_online)
print("ref_block_num %d - ref_block_prefix %d" % (ref_block_num, ref_block_prefix))
stm = Crea(offline=True)
op = operations.Transfer({'from': 'creabot',
'to': 'holger80',
'amount': "0.001 CBD",
'memo': ""})
tb = TransactionBuilder(crea_instance=stm)
tb.appendOps([op])
tb.appendWif(wif)
tb.constructTx(ref_block_num=ref_block_num, ref_block_prefix=ref_block_prefix)
tx = tb.sign(reconstruct_tx=False)
print(tx.json())
| 1.890625
| 2
|
python/pyclaw/evolve/__init__.py
|
geoflows/geoclaw-4.x
| 2
|
12774567
|
<filename>python/pyclaw/evolve/__init__.py
#!/usr/bin/env python
# encoding: utf-8
"""
__init__.py
Created by <NAME> on 2008-08-21.
Copyright (c) 2008 University of Washington. All rights reserved.
"""
# This __init__ script only imports common utilities, most of the import
# should be done depending on the solver needed
__all__ = ['ClawSolver1D','rp']
from clawpack import ClawSolver1D
from rp import *
| 1.46875
| 1
|
process.py
|
nrc/rustc-timing-scripts
| 2
|
12774568
|
<reponame>nrc/rustc-timing-scripts<gh_stars>1-10
import json
import os
import re
import sys
VERBOSE = False
LL_RATIO = False
re_commit = re.compile("commit (.*)")
re_date = re.compile("Date: (.*)")
re_rustc = re.compile("rustc: .*/([\w\-_\.]*)")
re_time_and_mem = re.compile("( *)time: ([0-9\.]*); rss: ([0-9]*)MB\s*(.*)")
re_time = re.compile("( *)time: ([0-9\.]*)\s*(.*)")
re_incremental_reuse = re.compile(" *incremental: re-using (\d+) out of (\d+) modules")
re_incremental_dirty = re.compile(" *module .* is dirty because .* changed or was removed")
re_loc = re.compile("Lines of code: ([0-9]*)")
re_pre_nc = re.compile("Pre\-expansion node count: ([0-9]*)")
re_post_nc = re.compile("Post\-expansion node count: ([0-9]*)")
def process(label, arg, n):
in_files = []
for i in range(0, n):
in_name = os.path.join('raw', '%s--%s--%s.log'%(label, arg, i))
in_files.append(open(in_name))
out_name = os.path.join('processed', '%s--%s.json'%(label, arg))
if VERBOSE:
print "input:", in_files
print "output:", out_name
with open(out_name, 'w') as out_file:
process_files(in_files, out_file)
for f in in_files:
f.close()
def process_files(in_files, out_file):
data = {}
data['header'] = mk_header(in_files[0])
times = map(lambda f: mk_times(f), in_files)
data['times'] = map(post_process_times, merge_times(times))
json.dump(data, out_file, indent=4)
def mk_header(in_file):
commit_line = in_file.readline()
# skip merge and author lines
author_line = in_file.readline()
if author_line.startswith('Merge'):
in_file.readline()
date_line = in_file.readline()
header = {}
header['commit'] = re_commit.match(commit_line).group(1)
header['date'] = re_date.match(date_line).group(1)
return header
def mk_times(in_file):
all_times = []
# The last mentioned crate being compiled.
last_file = None
cur_times = None
loc = None
pre_nc = None
post_nc = None
for line in in_file:
time_and_mem_match = re_time_and_mem.match(line)
if time_and_mem_match:
assert(last_file)
if not cur_times:
cur_times = {}
cur_times['crate'] = last_file
cur_times['times'] = []
cur_times['rss'] = []
indent = time_and_mem_match.group(1)
# TODO do something with 'sub-times'
if not indent:
time = time_and_mem_match.group(2)
mem = time_and_mem_match.group(3)
label = time_and_mem_match.group(4)
cur_times['times'].append((label, float(time)))
cur_times['rss'].append((label, int(mem)))
else:
time_match = re_time.match(line)
if time_match:
assert(last_file)
if not cur_times:
cur_times = {}
cur_times['crate'] = last_file
cur_times['times'] = []
cur_times['rss'] = []
indent = time_match.group(1)
# TODO do something with 'sub-times'
if not indent:
time = time_match.group(2)
label = time_match.group(3)
cur_times['times'].append((label, float(time)))
cur_times['rss'].append((label, int(0)))
else:
incremental_reuse_match = re_incremental_reuse.match(line)
incremental_dirty_match = re_incremental_dirty.match(line)
if incremental_reuse_match or incremental_dirty_match:
# FIXME -- might be useful to plot the reuse data somewhere
pass
else:
loc_match = re_loc.match(line)
pre_nc_match = re_pre_nc.match(line)
post_nc_match = re_post_nc.match(line)
if loc_match:
loc = loc_match.group(1)
elif pre_nc_match:
pre_nc = pre_nc_match.group(1)
elif post_nc_match:
post_nc = post_nc_match.group(1)
elif cur_times:
if loc:
cur_times['loc'] = int(loc)
cur_times['pre_nc'] = int(pre_nc)
cur_times['post_nc'] = int(post_nc)
all_times.append(cur_times)
cur_times = None
last_file = None
loc = None
pre_nc = None
post_nc = None
rustc_match = re_rustc.match(line)
if rustc_match:
last_file = rustc_match.group(1)
return all_times
# Takes an array of times and returns a single object of times,
def merge_times(times):
for t in times:
t.sort(key=lambda t: t['crate'])
if len(t) != len(times[0]):
print "Inconsistent data: len(t)=%s len(times[0])=%s" % (
len(t), len(times[0]))
return
crates = []
for ci in range(len(times[0])):
c = times[0][ci]
cur = {}
cur['crate'] = c['crate']
if 'loc' in c:
cur['loc'] = c['loc']
cur['pre_nc'] = c['pre_nc']
cur['post_nc'] = c['post_nc']
else:
cur['loc'] = 0
cur['pre_nc'] = 0
cur['post_nc'] = 0
cur['times'] = []
for i in range(len(c['times'])):
cur['times'].append((c['times'][i][0], average(times, lambda t: t[ci]['times'][i][1])))
cur['rss'] = []
for i in range(len(c['rss'])):
cur['rss'].append((c['rss'][i][0], average(times, lambda t: t[ci]['rss'][i][1])))
crates.append(cur)
return crates
def average(times, f):
if len(times) <= 4:
total = sum(map(f, times))
return total/len(times)
# Exclude the highest and lowest values.
times = map(f, times)
times.sort()
return sum(times[1:-1])/(len(times)-2)
def post_process_times(times):
total = 0
llvm = 0
for (l, t) in times['times']:
total += t
if LL_RATIO and l in ['translation', 'LLVM passes', 'linking']:
llvm += t
new_times = {}
for (l, t) in times['times']:
time = {
'time': t,
'percent': (t/total)*100
}
if LL_RATIO:
time['ratio_llvm'] = (t/llvm)
new_times[l] = time
new_mem = {}
for (l, m) in times['rss']:
new_mem[l] = m
times['times'] = new_times
times['rss'] = new_mem
times['total'] = total
return times
if len(sys.argv) <= 3:
print "Requires label, filename of log, and number of logs as arguments"
exit(1)
process(sys.argv[1], sys.argv[2], int(sys.argv[3]))
| 2.1875
| 2
|
setup.py
|
pjdelport/django-timezone-utils
| 1
|
12774569
|
<filename>setup.py
# coding: utf-8
from setuptools import setup
import os
version = __import__('timezone_utils').VERSION
setup(
name='django-timezone-utils',
version=version,
description='Time Zone Utilities for Django Models',
long_description=open(
os.path.join(
os.path.dirname(__file__),
"README.rst"
)
).read(),
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
packages=['timezone_utils'],
install_requires=[
'pytz',
'django>=1.8'
],
zip_safe=False,
platforms='any',
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Database',
'Topic :: Software Development :: Libraries',
],
url='http://github.com/michaeljohnbarr/django-timezone-utils/',
)
| 1.3125
| 1
|
hbaselines/base_policies/actor_critic.py
|
jesbu1/h-baselines
| 0
|
12774570
|
"""Script containing the abstract policy class."""
import numpy as np
import tensorflow as tf
from hbaselines.utils.tf_util import get_trainable_vars
from hbaselines.utils.tf_util import get_target_updates
class ActorCriticPolicy(object):
"""Base Actor Critic Policy.
Attributes
----------
sess : tf.compat.v1.Session
the current TensorFlow session
ob_space : gym.spaces.*
the observation space of the environment
ac_space : gym.spaces.*
the action space of the environment
co_space : gym.spaces.*
the context space of the environment
buffer_size : int
the max number of transitions to store
batch_size : int
SGD batch size
actor_lr : float
actor learning rate
critic_lr : float
critic learning rate
verbose : int
the verbosity level: 0 none, 1 training information, 2 tensorflow debug
tau : float
target update rate
gamma : float
discount factor
layer_norm : bool
enable layer normalisation
layers : list of int or None
the size of the Neural network for the policy
act_fun : tf.nn.*
the activation function to use in the neural network
use_huber : bool
specifies whether to use the huber distance function as the loss for
the critic. If set to False, the mean-squared error metric is used
instead
"""
def __init__(self,
sess,
ob_space,
ac_space,
co_space,
buffer_size,
batch_size,
actor_lr,
critic_lr,
verbose,
tau,
gamma,
layer_norm,
layers,
act_fun,
use_huber):
"""Instantiate the base policy object.
Parameters
----------
sess : tf.compat.v1.Session
the current TensorFlow session
ob_space : gym.spaces.*
the observation space of the environment
ac_space : gym.spaces.*
the action space of the environment
co_space : gym.spaces.*
the context space of the environment
buffer_size : int
the max number of transitions to store
batch_size : int
SGD batch size
actor_lr : float
actor learning rate
critic_lr : float
critic learning rate
verbose : int
the verbosity level: 0 none, 1 training information, 2 tensorflow
debug
tau : float
target update rate
gamma : float
discount factor
layer_norm : bool
enable layer normalisation
layers : list of int or None
the size of the Neural network for the policy
act_fun : tf.nn.*
the activation function to use in the neural network
use_huber : bool
specifies whether to use the huber distance function as the loss
for the critic. If set to False, the mean-squared error metric is
used instead
"""
self.sess = sess
self.ob_space = ob_space
self.ac_space = ac_space
self.co_space = co_space
self.buffer_size = buffer_size
self.batch_size = batch_size
self.actor_lr = actor_lr
self.critic_lr = critic_lr
self.verbose = verbose
self.layers = layers
self.tau = tau
self.gamma = gamma
self.layer_norm = layer_norm
self.act_fun = act_fun
self.use_huber = use_huber
print(locals())
def initialize(self):
"""Initialize the policy.
This is used at the beginning of training by the algorithm, after the
model parameters have been initialized.
"""
raise NotImplementedError
def update(self, update_actor=True, **kwargs):
"""Perform a gradient update step.
Parameters
----------
update_actor : bool
specifies whether to update the actor policy. The critic policy is
still updated if this value is set to False.
Returns
-------
float
critic loss
float
actor loss
"""
raise NotImplementedError
def get_action(self, obs, context, apply_noise, random_actions, env_num=0):
"""Call the actor methods to compute policy actions.
Parameters
----------
obs : array_like
the observation
context : array_like or None
the contextual term. Set to None if no context is provided by the
environment.
apply_noise : bool
whether to add Gaussian noise to the output of the actor. Defaults
to False
random_actions : bool
if set to True, actions are sampled randomly from the action space
instead of being computed by the policy. This is used for
exploration purposes.
env_num : int
the environment number. Used to handle situations when multiple
parallel environments are being used.
Returns
-------
array_like
computed action by the policy
"""
raise NotImplementedError
def store_transition(self, obs0, context0, action, reward, obs1, context1,
done, is_final_step, env_num=0, evaluate=False):
"""Store a transition in the replay buffer.
Parameters
----------
obs0 : array_like
the last observation
context0 : array_like or None
the last contextual term. Set to None if no context is provided by
the environment.
action : array_like
the action
reward : float
the reward
obs1 : array_like
the current observation
context1 : array_like or None
the current contextual term. Set to None if no context is provided
by the environment.
done : float
is the episode done
is_final_step : bool
whether the time horizon was met in the step corresponding to the
current sample. This is used by the TD3 algorithm to augment the
done mask.
env_num : int
the environment number. Used to handle situations when multiple
parallel environments are being used.
evaluate : bool
whether the sample is being provided by the evaluation environment.
If so, the data is not stored in the replay buffer.
"""
raise NotImplementedError
def get_td_map(self):
"""Return dict map for the summary (to be run in the algorithm)."""
raise NotImplementedError
@staticmethod
def _get_obs(obs, context, axis=0):
"""Return the processed observation.
If the contextual term is not None, this will look as follows:
-----------------
processed_obs = | obs | context |
-----------------
Otherwise, this method simply returns the observation.
Parameters
----------
obs : array_like
the original observation
context : array_like or None
the contextual term. Set to None if no context is provided by the
environment.
axis : int
the axis to concatenate the observations and contextual terms by
Returns
-------
array_like
the processed observation
"""
if context is not None and context[0] is not None:
context = context.flatten() if axis == 0 else context
obs = np.concatenate((obs, context), axis=axis)
return obs
@staticmethod
def _get_ob_dim(ob_space, co_space):
"""Return the processed observation dimension.
If the context space is not None, it is included in the computation of
this term.
Parameters
----------
ob_space : gym.spaces.*
the observation space of the environment
co_space : gym.spaces.*
the context space of the environment
Returns
-------
tuple
the true observation dimension
"""
ob_dim = ob_space.shape
if co_space is not None:
ob_dim = tuple(map(sum, zip(ob_dim, co_space.shape)))
return ob_dim
@staticmethod
def _setup_target_updates(model_scope, target_scope, scope, tau, verbose):
"""Create the soft and initial target updates.
The initial model parameters are assumed to be stored under the scope
name "model", while the target policy parameters are assumed to be
under the scope name "target".
If an additional outer scope was provided when creating the policies,
they can be passed under the `scope` parameter.
Parameters
----------
model_scope : str
the scope of the model parameters
target_scope : str
the scope of the target parameters
scope : str or None
the outer scope, set to None if not available
tau : float
target update rate
verbose : int
the verbosity level: 0 none, 1 training information, 2 tensorflow
debug
Returns
-------
tf.Operation
initial target updates, to match the target with the model
tf.Operation
soft target update operations
"""
if scope is not None:
model_scope = scope + '/' + model_scope
target_scope = scope + '/' + target_scope
return get_target_updates(
get_trainable_vars(model_scope),
get_trainable_vars(target_scope),
tau, verbose)
@staticmethod
def _remove_fingerprint(val, ob_dim, fingerprint_dim, additional_dim):
"""Remove the fingerprint from the input.
This is a hacky procedure to remove the fingerprint elements from the
computation. The fingerprint elements are the last few elements of the
observation dimension, before any additional concatenated observations
(e.g. contexts or actions).
Parameters
----------
val : tf.Variable
the original input
ob_dim : int
number of environmental observation elements
fingerprint_dim : int
number of fingerprint elements
additional_dim : int
number of additional elements that were added to the input variable
Returns
-------
tf.Variable
the input with the fingerprints zeroed out
"""
return val * tf.constant([1.0] * (ob_dim - fingerprint_dim) +
[0.0] * fingerprint_dim +
[1.0] * additional_dim)
| 2.609375
| 3
|
demo4.py
|
SysOptLab/Problems
| 0
|
12774571
|
import numpy as np
import opt_prob
import scipy.optimize
# -- problem setup
name = '2.4 GOLDPR'
problem = opt_prob.Cons(name)
def cns(x):
g = -1.0*np.array(problem.cns(x))
return g.tolist()
# -- start optimization
x0 = ((np.array(problem.lb) + np.array(problem.ub)) / 2.0).tolist()
bounds = []
for lb_i, ub_i in zip(problem.lb, problem.ub):
bounds.append((lb_i, ub_i))
ineq_cons = {'type':'ineq', 'fun': cns}
method = 'SLSQP'
options = {'disp': True}
res = scipy.optimize.minimize(problem.obj, x0, method=method, bounds=bounds,
constraints=ineq_cons, options=options)
print(res)
| 2.796875
| 3
|
create_number_text.py
|
sp1007/ViTacotron2
| 0
|
12774572
|
from tacotron2.text.vi_number_and_units import normalize_vi
from random import randrange
print("Start generate vietnamese number strings")
with open("training_data/vietnamese_number.txt", 'w', encoding='utf-8') as f:
for i in range(20000):
n = randrange(1000000000, 2000000000)
f.write(normalize_vi(str(n)) + '\n')
print("done!")
| 2.796875
| 3
|
hello_world.py
|
albertonietos/git-project
| 0
|
12774573
|
print("Hello world!")
print("Hello darkness my old friend")
print("I've come to talk with you again")
| 2.328125
| 2
|
ozpcenter/api/subscription/views.py
|
emosher/ozp-backend
| 1
|
12774574
|
<filename>ozpcenter/api/subscription/views.py
"""
Subscription Views
"""
import logging
from django.shortcuts import get_object_or_404
from rest_framework import filters
from rest_framework import status
from rest_framework import viewsets
from rest_framework.response import Response
from ozpcenter import errors
from ozpcenter import permissions
import ozpcenter.api.subscription.model_access as model_access
import ozpcenter.api.subscription.serializers as serializers
logger = logging.getLogger('ozp-center.' + str(__name__))
class SubscriptionViewSet(viewsets.ModelViewSet):
"""
ModelViewSet for getting all Subscription entries for all users
URIs
======
GET /api/subscription/
Summary:
Get a list of all system-wide Subscription entries
Response:
200 - Successful operation - [SubscriptionSerializer]
POST /api/subscription/
Summary:
Add a Subscription
Request:
data: SubscriptionSerializer Schema
Response:
200 - Successful operation - SubscriptionSerializer
DELETE /api/subscription/{pk}
Summary:
Delete a Subscription Entry by ID
"""
serializer_class = serializers.SubscriptionSerializer
permission_classes = (permissions.IsUser,)
def get_queryset(self):
queryset = model_access.get_all_subscriptions()
# listing_id = self.request.query_params.get('listing', None)
# if listing_id is not None:
# queryset = queryset.filter(subscription_type='listing', entity_id=listing_id)
# Maybe filter by entity_type
return queryset
def create(self, request):
serializer = serializers.SubscriptionSerializer(data=request.data,
context={'request': request}, partial=True)
if not serializer.is_valid():
logger.error('{0!s}'.format(serializer.errors))
raise errors.ValidationException('{0!s}'.format(serializer.errors))
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
# def update(self, request, pk=None):
# """
# Update is used only change the expiration date of the message
# """
# instance = self.get_queryset().get(pk=pk)
# serializer = serializers.SubscriptionSerializer(instance,
# data=request.data, context={'request': request}, partial=True)
#
# if not serializer.is_valid():
# logger.error('{0!s}'.format(serializer.errors))
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# serializer.save()
#
# return Response(serializer.data, status=status.HTTP_200_OK)
def destroy(self, request, pk=None):
current_request_profile = model_access.get_self(request.user.username)
if not current_request_profile.is_steward():
raise errors.PermissionDenied('Only Stewards can delete subscriptions')
queryset = self.get_queryset()
subscription_instance = get_object_or_404(queryset, pk=pk)
subscription_instance.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class UserSubscriptionViewSet(viewsets.ModelViewSet):
"""
ModelViewSet for getting all UserSubscription entries for all users
URIs
======
GET /api/self/subscription/
Summary:
Get a list of all user Subscription entries
Response:
200 - Successful operation - [SubscriptionSerializer]
DELETE /api/self/subscription/{pk}
Summary:
Delete a user Subscription Entry by ID
"""
permission_classes = (permissions.IsUser,)
serializer_class = serializers.SubscriptionSerializer
filter_backends = (filters.OrderingFilter,)
def get_queryset(self):
"""
Get current user's subscriptions
"""
return model_access.get_self_subscriptions(self.request.user.username)
def destroy(self, request, pk=None):
"""
Dismiss subscription
"""
queryset = self.get_queryset()
subscription = get_object_or_404(queryset, pk=pk)
model_access.delete_self_subscription(subscription, self.request.user.username)
return Response(status=status.HTTP_204_NO_CONTENT)
| 2.109375
| 2
|
src/ling_409/tests/test_join.py
|
seanbethard/corpuswork
| 0
|
12774575
|
<reponame>seanbethard/corpuswork
import nltk
grammar = nltk.data.load('file:agree_join.fcfg',cache=False)
parser = nltk.parse.FeatureChartParser(grammar)
agreement_test_sentences = ['Mary sang and danced',
'Mary and John danced',
'Mary and John',
'Mary and John saw Kim',
'Mary saw John and Kim',
'<NAME> and John danced',
'Mary baked a soft and tasty cake',
'Mary baked a tasty quiche and a soft cake']
for sent in agreement_test_sentences:
print sent + '\n'
trees = parser.nbest_parse(sent.split())
if len(trees) == 0:
print '--> ungrammatical\n'
else:
for tree in trees:
print tree
print '\n'
| 2.84375
| 3
|
sanitise/sanitise.py
|
wapdat/ppi-sanistise
| 4
|
12774576
|
import re
import nltk
debug = False
list = ''
def tokenise(doc):
tokenized_doc = nltk.word_tokenize(doc)
tagged_sentences = nltk.pos_tag(tokenized_doc)
ne_chunked_sents = nltk.ne_chunk(tagged_sentences)
named_entities = []
for tagged_tree in ne_chunked_sents:
if hasattr(tagged_tree, 'label'):
entity_name = ' '.join(c[0] for c in tagged_tree.leaves()) #
entity_type = tagged_tree.label() # get NE category
named_entities.append((entity_name, entity_type))
doc = doc.replace(entity_name, entity_type)
if (debug) : print(named_entities)
if (debug) : print('%-20s "%s"' % ('NER', doc))
return doc
def regexReplace( str, token, desc, regex):
global list
list = list + ', ' + desc
r = re.compile(regex)
cleanStr = re.sub( r, token, str, re.I)
if (debug) :
if (str != cleanStr):
print('%-20s "%s"' % (desc, cleanStr))
return cleanStr
def replacePPI(str):
str = regexReplace(str, '<EMAIL>', 'email address', '[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+')
str = regexReplace(str, 'UKPOSTCODE', 'uk postcode', '(gir ?0aa|GIR ?0AA|[a-pr-uwyzA-PR-UWYZ]([0-9]{1,2}|([a-hk-yA-HK-Y][0-9]([0-9abehmnprv-yABEHMNPRV-Y])?)|[0-9][a-hjkps-uwA-HJKPS-UW]) ?[0-9][abd-hjlnp-uw-zABD-HJLNP-UW-Z]{2})')
# Amex numbers look like US phone numbers
str = regexReplace(str, 'CARDNUM', 'Amex', '3[47][0-9]{13}')
str = regexReplace(str, 'CARDNUM', 'BCGlobal', '(6541|6556)[0-9]{12}')
str = regexReplace(str, 'CARDNUM', 'Carte Blanche Card', '389[0-9]{11}')
str = regexReplace(str, 'CARDNUM', 'Diners Club Card', '3(?:0[0-5]|[68][0-9])[0-9]{11}')
str = regexReplace(str, 'CARDNUM', 'Discover Card', '65[4-9][0-9]{13}|64[4-9][0-9]{13}|6011[0-9]{12}|(622(?:12[6-9]|1[3-9][0-9]|[2-8][0-9][0-9]|9[01][0-9]|92[0-5])[0-9]{10})')
str = regexReplace(str, 'CARDNUM', 'Insta Payment Card', '63[7-9][0-9]{13}')
str = regexReplace(str, 'CARDNUM', 'JCB Card', '(?:2131|1800|35\d{3})\d{11}$')
str = regexReplace(str, 'CARDNUM', 'KoreanLocalCard', '9[0-9]{15}')
str = regexReplace(str, 'CARDNUM', 'Laser Card', '(6304|6706|6709|6771)[0-9]{12,15}')
str = regexReplace(str, 'CARDNUM', 'Maestro Card', '(5018|5020|5038|6304|6759|6761|6763)[0-9]{8,15}')
str = regexReplace(str, 'CARDNUM', 'Mastercard', '5[1-5][0-9]{14}')
str = regexReplace(str, 'CARDNUM', 'Solo Card', '(6334|6767)[0-9]{12}|(6334|6767)[0-9]{14}|(6334|6767)[0-9]{15}')
str = regexReplace(str, 'CARDNUM', 'Switch Card', '(4903|4905|4911|4936|6333|6759)[0-9]{12}|(4903|4905|4911|4936|6333|6759)[0-9]{14}|(4903|4905|4911|4936|6333|6759)[0-9]{15}|564182[0-9]{10}|564182[0-9]{12}|564182[0-9]{13}|633110[0-9]{10}|633110[0-9]{12}|633110[0-9]{13}')
str = regexReplace(str, 'CARDNUM', 'Union Pay Card', '(62[0-9]{14,17})')
str = regexReplace(str, 'CARDNUM', 'Visa Card', '4[0-9]{12}(?:[0-9]{3})?')
str = regexReplace(str, 'CARDNUM', 'Visa Master Card', '(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14})')
str = regexReplace(str, 'ZIPCODEUS' , 'zip code', '[0-9]{5}(-[0-9]{4})?')
str = regexReplace(str, 'POSTCODECA', 'Canada postcode', '[abceghj-nprstvxyABCEGHJ-NPRSTVXY]{1}[0-9]{1}[abceghj-nprstv-zABCEGHJ-NPRSTV-Z]{1}[ ]?[0-9]{1}[abceghj-nprstv-zABCEGHJ-NPRSTV-Z]{1}[0-9]{1}')
### after all the more specific matches
# Problem with chomping leading and training space
str = regexReplace(str, ' USPHONE ', 'US phone', '(1?\W*([2-9][0-8][0-9])\W*([2-9][0-9]{2})\W*([0-9]{4})(\se?x?t?(\d*))?)')
str = regexReplace(str, 'USPHONE', 'US phone', '(\s|^)(?:(?:\+?1\s*(?:[.-]\s*)?)?(?:\(\s*([2-9]1[02-9]|[2-9][02-8]1|[2-9][02-8][02-9])\s*\)|([2-9]1[02-9]|[2-9][02-8]1|[2-9][02-8][02-9]))\s*(?:[.-]\s*)?)?([2-9]1[02-9]|[2-9][02-9]1|[2-9][02-9]{2})\s*(?:[.-]\s*)?([0-9]{4})(?:\s*(?:#|x\.?|ext\.?|extension)\s*(\d+))?(\s|$)')
str = regexReplace(str, 'SSN', 'ssn', '(?!219-09-9999|078-05-1120)(?!666|000|9\d{2})\d{3}-(?!00)\d{2}-(?!0{4})\d{4}')
str = regexReplace(str, 'UKPHONE', 'uk phone', '(?:(?:\(?(?:0(?:0|11)\)?[\s-]?\(?|\+)44\)?[\s-]?(?:\(?0\)?[\s-]?)?)|(?:\(?0))(?:(?:\d{5}\)?[\s-]?\d{4,5})|(?:\d{4}\)?[\s-]?(?:\d{5}|\d{3}[\s-]?\d{3}))|(?:\d{3}\)?[\s-]?\d{3}[\s-]?\d{3,4})|(?:\d{2}\)?[\s-]?\d{4}[\s-]?\d{4}))(?:[\s-]?(?:x|ext\.?|\#)\d{3,4})?')
str = regexReplace(str, 'ACCOUNTNO', 'account number', '\d{5-12')
return str
def getSubstituteText(key, type):
return ""
| 2.75
| 3
|
01-intro/bow-simple-pytorch.py
|
bastings/nn4nlp2017-code-pytorch
| 82
|
12774577
|
<filename>01-intro/bow-simple-pytorch.py
# coding: utf-8
"""
BOW (simple version)
Based on <NAME>'s DyNet code examples:
https://github.com/neubig/nn4nlp2017-code
http://phontron.com/class/nn4nlp2017/
"""
from collections import defaultdict
import time
import random
import torch
from torch.autograd import Variable
import torch.nn as nn
torch.manual_seed(1)
# Functions to read in the corpus
w2i = defaultdict(lambda: len(w2i))
t2i = defaultdict(lambda: len(t2i))
UNK = w2i["<unk>"]
def read_dataset(filename):
with open(filename, "r") as f:
for line in f:
tag, words = line.lower().strip().split(" ||| ")
yield ([w2i[x] for x in words.split(" ")], t2i[tag])
# Read in the data
train = list(read_dataset("../data/classes/train.txt"))
w2i = defaultdict(lambda: UNK, w2i)
dev = list(read_dataset("../data/classes/test.txt"))
nwords = len(w2i)
ntags = len(t2i)
# The parameters for our BoW-model
dtype = torch.FloatTensor # enable CUDA here if you like
w = Variable(torch.randn(nwords, ntags).type(dtype), requires_grad=True)
b = Variable(torch.randn(ntags).type(dtype), requires_grad=True)
# A function to calculate scores for one sentence
def calc_scores(words):
lookup_tensor = Variable(torch.LongTensor(words))
embed = w[lookup_tensor]
score = torch.sum(embed, 0) + b
return score.view((1, -1))
for ITER in range(100):
# train
random.shuffle(train)
train_loss = 0.0
start = time.time()
for words, tag in train:
# forward pass
scores = calc_scores(words)
target = Variable(torch.LongTensor([tag]))
loss = nn.CrossEntropyLoss()
output = loss(scores, target)
train_loss += output.data[0]
# backward pass (compute gradients)
output.backward()
# update weights with SGD
lr = 0.01
w.data -= lr * w.grad.data
b.data -= lr * b.grad.data
# clear gradients for next step
w.grad.data.zero_()
b.grad.data.zero_()
print("iter %r: train loss/sent=%.4f, time=%.2fs" %
(ITER, train_loss/len(train), time.time()-start))
# evaluate
correct = 0.0
for words, tag in dev:
scores = calc_scores(words)
predict = scores.data.numpy().argmax(axis=1)
if predict == tag:
correct += 1
print("iter %r: test acc=%.4f" %
(ITER, correct/len(dev)))
| 3.140625
| 3
|
docs/usage-reference/parser/parser_omegaconf.py
|
Mbompr/fromconfig
| 19
|
12774578
|
"""OmegaConfParser example."""
import fromconfig
import random
def random_hex() -> str:
return hex(hash(random.random()))
if __name__ == "__main__":
config = {
"host": "localhost",
"port": "8008",
"url": "${host}:${port}",
"path": "models/${now:}/${random_hex:}", # Use default resolver now + custom resolver
"resolvers": {"random_hex": random_hex}, # Register custom resolver
}
parser = fromconfig.parser.OmegaConfParser()
parsed = parser(config)
print(parsed)
assert parsed["url"] == "localhost:8008"
| 3.125
| 3
|
exercises/chapter01/test_01_07.py
|
deep-diver/fastai-course
| 0
|
12774579
|
def test():
# Here we can either check objects created in the solution code, or the
# string value of the solution, available as __solution__. A helper for
# printing formatted messages is available as __msg__. See the testTemplate
# in the meta.json for details.
# If an assertion fails, the message will be displayed
assert "L(range(12))" in __solution__, "range를 이용해 초기 L을 생성하였나요?"
assert "t *= 2" in __solution__, "*= 연산자를 사용하였나요?"
assert "t[0, 12]" in __solution__, "튜플 방식으로 찾아서 반환하였나요?"
assert "t[mask]" in __solution__, "마스킹 방식으로 찾아서 반환하였나요?"
__msg__.good("잘 하셨습니다!")
| 2.265625
| 2
|
ship_class.py
|
BTCallahan/super-ds9
| 0
|
12774580
|
<reponame>BTCallahan/super-ds9<filename>ship_class.py
from dataclasses import dataclass
from functools import lru_cache
from posixpath import split
from random import choice
import re
from string import digits
from typing import Dict, Final, Optional, Tuple, List
from frozendict import frozendict
from energy_weapon import ALL_ENERGY_WEAPONS, EnergyWeapon
from global_functions import get_first_group_in_pattern
from nation import ALL_NATIONS, Nation
from torpedo import ALL_TORPEDO_TYPES, Torpedo
VALID_SHIP_TYPES:Final = {
"ESCORT",
"ATTACK_FIGHTER",
"CRUISER",
"WARSHIP",
"RESUPPLY",
"PLATFORM",
"STATION",
"BIRD_OF_PREY",
"WARBIRD"
}
@lru_cache
def get_system_names(
*,
has_torpedo_launchers:bool=False,
has_cloaking_device:bool=False,
has_transporters:bool=True,
has_warp:bool=True,
has_shields:bool,
has_polerized_hull:bool,
has_impulse:bool=True,
beam_weapon_name:str="",
cannon_weapon_name:str=""
):
names = [
"Warp Core:",
"Sensors:",
"Scanners:"
]
keys = [
"sys_warp_core",
"sys_sensors",
"sys_scanners"
]
"""
if has_shields:
names.append("Shields:")
keys.append("shield")
if has_polerized_hull:
names.append("Polarization:")
keys.append("polarization")
"""
if has_shields:
names.append("Shield Gen.:")
keys.append("sys_shield")
if has_polerized_hull:
names.append("P. Hull:")
keys.append("sys_polarize")
if has_warp:
names.append("Warp Drive:")
keys.append("sys_warp_drive")
if has_impulse:
names.append("I. Engines:")
keys.append("sys_impulse")
if beam_weapon_name:
names.append(f"{beam_weapon_name}:")
keys.append("sys_beam_array")
if cannon_weapon_name:
names.append(f"{cannon_weapon_name}:")
keys.append("sys_cannon_weapon")
if has_cloaking_device:
names.append("Cloak Dev.:")
keys.append("sys_cloak")
if has_torpedo_launchers:
names.append("Torp. Launchers:")
keys.append("sys_torpedos")
if has_transporters:
names.append("Transporters:")
keys.append("sys_transporter")
return tuple(names), tuple(keys)
@dataclass(frozen=True)
class ShipClass:
ship_type:str
name:str
symbol:str
max_hull:int
max_crew:int
max_energy:int
power_generated_per_turn:int
damage_control:float
energy_weapon:EnergyWeapon
scanner_range:int
nation:Nation
system_names:Tuple[str]
system_keys:Tuple[str]
detection_strength:float
targeting:float
size:float
torp_dict:frozendict[Torpedo, int]
transporters:int=0
max_shields:int=0
evasion:float=0.0
max_beam_energy:int=0
max_beam_targets:int=1
max_cannon_energy:int=0
max_armor:int=0
polarized_hull:int=0
max_warp:int=0
torp_tubes:int=0
warp_breach_damage:int=0
cloak_strength:float=0.0
cloak_cooldown:int=2
"""
def __init__(self, *,
ship_type:str,
name:str,
symbol:str,
max_shields:int,
max_armor:int=0,
max_hull:int,
max_torps:int=0,
max_crew:int,
max_energy:int,
damage_control:float,
torp_types:Optional[List[str]]=None,
torp_tubes:int=0,
max_beam_energy:int,
warp_breach_damage:int=2,
energy_weapon_code:str,
nation_code:str,
system_names:Tuple[str],
system_keys:Tuple[str]
):
self.ship_type = ship_type
self.symbol = symbol
self.name = name
self.max_shields = max_shields
self.max_armor = max_armor
self.max_hull = max_hull
self.max_crew = max_crew
self.max_energy = max_energy
self.damage_control = damage_control
self.nation_code = nation_code
self.energy_weapon_code = energy_weapon_code
if (torp_types is None or len(torp_types) == 0) != (torp_tubes < 1) != (max_torps < 1):
raise IndexError(
f'''The length of the torp_types list is {len(torp_types)}, but the value of torp_tubes is
{torp_tubes}, and the value of maxTorps is {max_torps}. All of these should be less then one, OR greater then or equal
to one.'''
)
if torp_types:
torp_types.sort(key=lambda t: ALL_TORPEDO_TYPES[t].damage, reverse=True)
self.torp_types:Tuple[str] = tuple(["NONE"] if not torp_types else torp_types)
self.max_torpedos = max_torps
self.torp_tubes = torp_tubes
self.max_beam_energy = max_beam_energy
self.warp_breach_damage = warp_breach_damage
"""
@classmethod
def create_ship_class(
cla,
*,
ship_type:str,
name:str,
symbol:str,
max_shields:int=0,
polarized_hull:int=0,
max_armor:int=0,
max_hull:int,
max_crew:int=0,
transporters:int=0,
max_energy:int,
max_warp:int,
power_generated_per_turn:int,
damage_control:float,
scanner_range:int,
torp_dict:Optional[Dict[Torpedo,int]]=None,
torp_tubes:int=0,
max_beam_energy:int=0,
max_beam_targets:int=1,
max_cannon_energy:int=0,
warp_breach_damage:int=2,
energy_weapon:EnergyWeapon,
nation:Nation,
cloak_strength:float=0.0,
detection_strength:float,
size:float,
targeting:float,
evasion:float=0.0,
cloak_cooldown:int=2
):
try:
max_torpedos = sum([t for t in torp_dict.values()])
except AttributeError:
max_torpedos = 0
short_beam_name_cap = energy_weapon.short_beam_name_cap if max_beam_energy else ""
short_can_name_cap = energy_weapon.short_cannon_name_cap if max_cannon_energy else ""
system_names, system_keys = get_system_names(
has_torpedo_launchers=max_torpedos > 0 and torp_tubes > 0,
has_cloaking_device=cloak_strength > 0.0,
has_transporters=max_crew > 0,
beam_weapon_name=f"{short_beam_name_cap}s",
cannon_weapon_name=f"{short_can_name_cap}",
has_impulse=evasion > 0.0,
has_warp=max_warp > 0,
has_shields=max_shields > 0,
has_polerized_hull=polarized_hull > 0
)
fd = frozendict(torp_dict)
return cla(
ship_type=ship_type,
name=name,
symbol=symbol,
max_shields=max_shields,
max_armor=max_armor,
max_hull=max_hull,
max_crew=max_crew,
transporters=transporters,
scanner_range=scanner_range,
max_energy=max_energy,
polarized_hull=polarized_hull,
power_generated_per_turn=power_generated_per_turn,
damage_control=damage_control,
torp_dict=fd,
torp_tubes=torp_tubes,
max_warp=max_warp,
max_beam_energy=max_beam_energy,
max_beam_targets=max_beam_targets,
max_cannon_energy=max_cannon_energy,
warp_breach_damage=warp_breach_damage,
energy_weapon=energy_weapon,
nation=nation,
system_names=system_names,
system_keys=system_keys,
cloak_strength=cloak_strength,
cloak_cooldown=cloak_cooldown,
detection_strength=detection_strength,
size=size,
targeting=targeting,
evasion=evasion
)
@lru_cache
def get_torp_dict(self):
return {
k:v for k,v in self.torp_dict
}
def create_name(self):
has_proper_name = self.has_proper_name
return choice(self.nation.ship_names) if has_proper_name else "".join([choice(digits) for a in range(8)])
@property
@lru_cache
def has_proper_name(self):
"""Does this ship/station have a propper name, or just a sequence of numbers?
Returns:
bool: True if the ship's nation has names AND it has crew, False otherwise
"""
return self.max_crew > 0 and self.nation.ship_names
@property
@lru_cache
def ship_type_has_shields(self):
return self.max_shields > 0
@property
@lru_cache
def max_torpedos(self):
return sum(v for v in self.torp_dict.values())
@property
@lru_cache
def ship_type_can_fire_torps(self):
return self.max_torpedos > 0
@property
@lru_cache
def get_most_powerful_torpedo_type(self):
if not self.ship_type_can_fire_torps:
return ALL_TORPEDO_TYPES["NONE"]
t = [k for k in self.torp_dict.keys()]
t.sort(key=lambda a: a.damage, reverse=True)
return t[0]
@property
@lru_cache
def allowed_torpedos_set(self):
return frozenset(self.torp_dict.keys()) if self.torp_dict else frozenset([ALL_TORPEDO_TYPES["NONE"]])
@property
@lru_cache
def allowed_torpedos_tuple(self):
return tuple(self.torp_dict.keys()) if self.torp_dict else tuple([ALL_TORPEDO_TYPES["NONE"]])
@property
@lru_cache
def ship_type_can_fire_beam_arrays(self):
return self.max_beam_energy > 0
@property
@lru_cache
def ship_type_can_fire_cannons(self):
return self.max_cannon_energy > 0
@property
@lru_cache
def is_automated(self):
return self.max_crew <= 0
@property
@lru_cache
def ship_type_can_cloak(self):
return self.cloak_strength > 0.0
@property
@lru_cache
def is_mobile(self):
return self.evasion > 0.0
@property
@lru_cache
def can_be_docked_with(self):
return self.max_crew <= 0 and self.evasion == 0.0
@property
@lru_cache
def get_stragic_values(self):
"""Determins the stragic value of the ship class for scoring purpousess.
Returns:
Tuple[int,float]: A tuple ontaining values for the max hull, max shields, max energy, max crew members, max weapon energy, and torpedo effectiveness
"""
torpedo_value = (self.max_torpedos * self.torp_tubes *
ALL_TORPEDO_TYPES[self.get_most_powerful_torpedo_type].damage
) if self.ship_type_can_fire_torps else 0
try:
cloaking = self.cloak_strength / self.cloak_cooldown
except ZeroDivisionError:
cloaking = 0.0
evasion = self.size / (1.0 + self.evasion)
return (
self.max_hull * (1 + self.damage_control) * 4, self.max_shields, self.polarized_hull,
self.max_energy * 0.25, self.power_generated_per_turn, self.max_warp,
self.max_crew, self.max_beam_energy, self.max_cannon_energy, torpedo_value,
self.detection_strength, cloaking,
evasion, self.targeting, self.scanner_range
)
@property
@lru_cache
def get_added_stragic_values(self):
hull, shields, polarized_hull, energy, power_generated_per_turn, max_warp, crew, weapon_energy, cannon_energy, torpedos, detection_strength, cloaking, evasion, targeting, scanner_range = self.get_stragic_values
return hull + shields + energy + crew + weapon_energy + cannon_energy + torpedos + detection_strength
def create_ship_classes():
shipdata_pattern = re.compile(r"SHIPCLASS:([A-Z\_]+)\n([^#]+)END_SHIPCLASS")
symbol_pattern = re.compile(r"SYM:([a-zA-Z])\n")
type_pattern = re.compile(r"TYPE:([A-Z_]+)\n")
name_pattern = re.compile(r"NAME:([\w\-\ \'\(\)]+)\n")
shields_pattern = re.compile(r"SHIELDS:([\d]+)\n")
polarized_hull_pattern = re.compile(r"POLARIZED_HULL:([\d]+)\n")
hull_pattern = re.compile(r"HULL:([\d]+)\n")
scanner_pattern = re.compile(r"SCANNER_RANGE:([\d]+)\n")
energy_pattern = re.compile(r"ENERGY:([\d]+)\n")
power_generation_pattern = re.compile(r"POWER:([\d]+)\n")
energy_weapon_pattern = re.compile(r"ENERGY_WEAPON:([A-Z_]+)\n")
crew_pattern = re.compile(r"CREW:([\d]+)\n")
transporters_pattern = re.compile(r"TRANSPORTERS:([\d]+)\n")
torpedos_pattern = re.compile(r"TORPEDOS:([\w,]+)\n")
cloak_strength_pattern = re.compile(r"CLOAK_STRENGTH:([\d.]+)\n")
cloak_cooldown_pattern = re.compile(r"CLOAK_COOLDOWN:([\d]+)\n")
size_pattern = re.compile(r"SIZE:([\d.]+)\n")
targeting_pattern = re.compile(r"TARGETING:([\d.]+)\n")
evasion_pattern = re.compile(r"EVASION:([\d.]+)\n")
detection_strength_pattern = re.compile(r"DETECTION_STRENGTH:([\d.]+)\n")
max_warp_pattern = re.compile(r"MAX_WARP:([\d]+)\n")
damage_control_pattern = re.compile(r"DAMAGE_CONTROL:([\d.]+)\n")
torpedos_tubes_pattern = re.compile(r"TORPEDO_TUBES:([\d]+)\n")
max_beam_energy_pattern = re.compile(r"MAX_BEAM_ENERGY:([\d]+)\n")
max_beam_targets_pattern = re.compile(r"MAX_BEAM_TARGETS:([\d])\n")
max_cannon_energy_pattern = re.compile(r"MAX_CANNON_ENERGY:([\d]+)\n")
warp_core_breach_damage_pattern = re.compile(r"WARP_CORE_BREACH_DAMAGE:([\d]+)\n")
nation_types_pattern = re.compile(r"NATION:([A-Z\_]+)\n")
with open("library/ships.txt") as shipclass_text:
contents = shipclass_text.read()
shipclasses = shipdata_pattern.finditer(contents)
shipclass_dict:Dict[str,ShipClass] = {}
for shipclass in shipclasses:
shipclass_code = shipclass.group(1)
shipclass_txt = shipclass.group(2)
type_ = get_first_group_in_pattern(
shipclass_txt, type_pattern,
error_message=f"The entry {shipclass_code} file 'library/ships.txt' did not contain an entry for 'TYPE:'"
)
assert type_ in VALID_SHIP_TYPES
symbol = get_first_group_in_pattern(
shipclass_txt, symbol_pattern,
error_message=f"The entry {shipclass_code} file 'library/ships.txt' did not contain an entry for 'SYM:'"
)
nation_ = get_first_group_in_pattern(
shipclass_txt, nation_types_pattern,
error_message=f"The entry {shipclass_code} file 'library/ships.txt' did not contain an entry for 'NATION:'"
)
if nation_ not in ALL_NATIONS:
raise KeyError(
f"The nation code {nation_} was not found in the dictionary of nations. Valid code are: {ALL_NATIONS.keys()}")
else:
nation = ALL_NATIONS[nation_]
name = get_first_group_in_pattern(
shipclass_txt, name_pattern,
error_message=f"The entry {shipclass_code} file 'library/ships.txt' did not contain an entry for 'NAME:'"
)
shields = get_first_group_in_pattern(
shipclass_txt, shields_pattern, return_aux_if_no_match=True,
aux_valute_to_return_if_no_match=0, type_to_convert_to=int
)
polarized_hull = get_first_group_in_pattern(
shipclass_txt, polarized_hull_pattern, return_aux_if_no_match=True,
aux_valute_to_return_if_no_match=0, type_to_convert_to=int
)
hull = get_first_group_in_pattern(
shipclass_txt, hull_pattern, type_to_convert_to=int,
error_message=f"The entry {shipclass_code} file 'library/ships.txt' did not contain an entry for 'HULL:'"
)
crew = get_first_group_in_pattern(
shipclass_txt, crew_pattern, return_aux_if_no_match=True,
aux_valute_to_return_if_no_match=0, type_to_convert_to=int
)
scanner_range = get_first_group_in_pattern(
shipclass_txt, scanner_pattern, type_to_convert_to=int,
error_message=f"The entry {shipclass_code} file 'library/ships.txt' did not contain an entry for 'SCANNER_RANGE:'"
)
transporters = get_first_group_in_pattern(
shipclass_txt, transporters_pattern, return_aux_if_no_match=True,
aux_valute_to_return_if_no_match=0, type_to_convert_to=int
)
energy = get_first_group_in_pattern(
shipclass_txt, energy_pattern, type_to_convert_to=int,
error_message=f"The entry {shipclass_code} file 'library/ships.txt' did not contain an entry for 'ENERGY:'"
)
torpedos = get_first_group_in_pattern(
shipclass_txt, torpedos_pattern, return_aux_if_no_match=True
)
if torpedos:
tt = torpedos.split(",")
t_types = tt[::2]
t_numbers = tt[1::2]
torp_dict_ = {
ALL_TORPEDO_TYPES[k] :int(v) for k,v in zip(t_types, t_numbers)
}
else:
torp_dict_ = {}
torp_dict = frozendict(torp_dict_)
torpedo_tubes = get_first_group_in_pattern(
shipclass_txt, torpedos_tubes_pattern, return_aux_if_no_match=True, aux_valute_to_return_if_no_match=0,
type_to_convert_to=int
)
if (len(torp_dict) == 0) and (torpedo_tubes > 0):
raise ValueError(
f"In the ship class {shipclass_code} there are {len(torp_dict)} items in the torpedo dictionary, but the ship class has {torpedo_tubes} torpedo tubes."
)
power_generation = get_first_group_in_pattern(
shipclass_txt, power_generation_pattern, type_to_convert_to=int,
error_message=f"The entry {shipclass_code} file 'library/ships.txt' did not contain an entry for 'POWER:'"
)
energy_weapon_ = get_first_group_in_pattern(
shipclass_txt, energy_weapon_pattern,
error_message=f"The entry {shipclass_code} file 'library/ships.txt' did not contain an entry for 'ENERGY_WEAPON:'"
)
if energy_weapon_ not in ALL_ENERGY_WEAPONS:
raise KeyError(
f"The energy weapon code {energy_weapon_} was not found in the dictionary of energy weapons. Valid code are: {ALL_ENERGY_WEAPONS.keys()}")
else:
energy_weapon = ALL_ENERGY_WEAPONS[energy_weapon_]
cloak_strength = get_first_group_in_pattern(
shipclass_txt, cloak_strength_pattern, return_aux_if_no_match=True, aux_valute_to_return_if_no_match=0.0,
type_to_convert_to=float
)
cloak_cooldown = get_first_group_in_pattern(
shipclass_txt, cloak_cooldown_pattern, return_aux_if_no_match=True, aux_valute_to_return_if_no_match=2,
type_to_convert_to=int
)
detection_strength = get_first_group_in_pattern(
shipclass_txt, detection_strength_pattern, type_to_convert_to=float,
error_message=f"The entry {shipclass_code} file 'library/ships.txt' did not contain an entry for 'TYPE:'"
)
damage_control = get_first_group_in_pattern(
shipclass_txt, damage_control_pattern,
type_to_convert_to=float,
error_message=f"The entry {shipclass_code} file 'library/ships.txt' did not contain an entry for 'DAMAGE_CONTROL:'"
)
size = get_first_group_in_pattern(
shipclass_txt, size_pattern, type_to_convert_to=float,
error_message=f"The entry {shipclass_code} file 'library/ships.txt' did not contain an entry for 'SIZE:'"
)
evasion = get_first_group_in_pattern(
shipclass_txt, evasion_pattern, type_to_convert_to=float,
return_aux_if_no_match=True, aux_valute_to_return_if_no_match=0.0
)
max_warp = get_first_group_in_pattern(
shipclass_txt, max_warp_pattern, type_to_convert_to=int, return_aux_if_no_match=True,
aux_valute_to_return_if_no_match=0
)
targeting = get_first_group_in_pattern(
shipclass_txt, targeting_pattern, type_to_convert_to=float,
error_message=f"The entry {shipclass_code} file 'library/ships.txt' did not contain an entry for 'TARGETING:'"
)
max_beam_energy = get_first_group_in_pattern(
shipclass_txt, max_beam_energy_pattern, type_to_convert_to=int,
return_aux_if_no_match=True, aux_valute_to_return_if_no_match=0
)
max_beam_targets = get_first_group_in_pattern(
shipclass_txt, max_beam_targets_pattern, type_to_convert_to=int,
return_aux_if_no_match=True, aux_valute_to_return_if_no_match=1
)
max_cannon_energy = get_first_group_in_pattern(
shipclass_txt, max_cannon_energy_pattern, type_to_convert_to=int,
return_aux_if_no_match=True, aux_valute_to_return_if_no_match=0
)
warp_core_breach_damage = get_first_group_in_pattern(
shipclass_txt, warp_core_breach_damage_pattern, type_to_convert_to=int,
error_message=f"The entry {shipclass_code} file 'library/ships.txt' did not contain an entry for 'WARP_CORE_BREACH_DAMAGE:'"
)
shipclass_dict[shipclass_code] = ShipClass.create_ship_class(
ship_type=type_,
symbol=symbol,
name=name,
max_shields=shields,
polarized_hull=polarized_hull,
max_hull=hull,
scanner_range=scanner_range,
torp_dict=torp_dict,
torp_tubes=torpedo_tubes,
damage_control=damage_control,
max_beam_energy=max_beam_energy,
max_beam_targets=max_beam_targets,
max_cannon_energy=max_cannon_energy,
max_energy=energy,
power_generated_per_turn=power_generation,
max_crew=crew,
transporters=transporters,
cloak_strength=cloak_strength,
cloak_cooldown=cloak_cooldown,
detection_strength=detection_strength,
size=size,
targeting=targeting,
evasion=evasion,
max_warp=max_warp,
warp_breach_damage=warp_core_breach_damage,
nation=nation,
energy_weapon=energy_weapon
)
return frozendict(shipclass_dict)
ALL_SHIP_CLASSES:Final = create_ship_classes()
| 2.296875
| 2
|
lightning.py
|
entn-at/Online-Speech-Recognition
| 0
|
12774581
|
<reponame>entn-at/Online-Speech-Recognition<filename>lightning.py<gh_stars>0
import os, sys
import jiwer
import torch
import numpy as np
import torch.optim as optim
from absl import app
from apex import amp
from tqdm import trange, tqdm
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from warprnnt_pytorch import RNNTLoss
from rnnt.args import FLAGS
from rnnt.dataset import seq_collate, MergedDataset, Librispeech, CommonVoice, TEDLIUM, YoutubeCaption
from rnnt.models import Transducer
from rnnt.tokenizer import HuggingFaceTokenizer, CharTokenizer
from rnnt.transforms import build_transform
from rnnt.tokenizer import NUL, BOS, PAD
import pytorch_lightning as pl
from optimizer import SM3, AdamW, Novograd
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
FLAGS(sys.argv)
class ParallelTraining(pl.LightningModule):
def __init__(self):
super(ParallelTraining, self).__init__()
_, _, input_size = build_transform(
feature_type=FLAGS.feature, feature_size=FLAGS.feature_size,
n_fft=FLAGS.n_fft, win_length=FLAGS.win_length,
hop_length=FLAGS.hop_length, delta=FLAGS.delta, cmvn=FLAGS.cmvn,
downsample=FLAGS.downsample,
T_mask=FLAGS.T_mask, T_num_mask=FLAGS.T_num_mask,
F_mask=FLAGS.F_mask, F_num_mask=FLAGS.F_num_mask
)
self.log_path = None
self.loss_fn = RNNTLoss(blank=NUL)
if FLAGS.tokenizer == 'char':
self.tokenizer = CharTokenizer(cache_dir=self.logdir)
else:
self.tokenizer = HuggingFaceTokenizer(
cache_dir='BPE-2048', vocab_size=FLAGS.bpe_size)
self.vocab_size = self.tokenizer.vocab_size
print(FLAGS.enc_type)
self.model = Transducer(
vocab_embed_size=FLAGS.vocab_embed_size,
vocab_size=self.vocab_size,
input_size=input_size,
enc_hidden_size=FLAGS.enc_hidden_size,
enc_layers=FLAGS.enc_layers,
enc_dropout=FLAGS.enc_dropout,
enc_proj_size=FLAGS.enc_proj_size,
dec_hidden_size=FLAGS.dec_hidden_size,
dec_layers=FLAGS.dec_layers,
dec_dropout=FLAGS.dec_dropout,
dec_proj_size=FLAGS.dec_proj_size,
joint_size=FLAGS.joint_size,
module_type=FLAGS.enc_type,
output_loss=False,
)
self.latest_alignment = None
self.steps = 0
self.epoch = 0
self.best_wer = 1000
def warmup_optimizer_step(self, steps):
if steps < FLAGS.warmup_step:
lr_scale = min(1., float(steps + 1) / FLAGS.warmup_step*1.0)
for pg in self.optimizer.param_groups:
pg['lr'] = lr_scale * FLAGS.lr
def forward(self, batch):
xs, ys, xlen, ylen = batch
# xs, ys, xlen = xs.cuda(), ys, xlen.cuda()
alignment = self.model(xs, ys, xlen, ylen)
return alignment
def training_step(self, batch, batch_nb):
xs, ys, xlen, ylen = batch
# xs, ys, xlen = xs.cuda(), ys, xlen.cuda()
if xs.shape[1] != xlen.max():
xs = xs[:, :xlen.max()]
ys = ys[:, :ylen.max()]
alignment = self.model(xs, ys, xlen, ylen)
xlen = self.model.scale_length(alignment, xlen)
loss = self.loss_fn(alignment, ys.int(), xlen, ylen)
if batch_nb % 100 == 0:
lr_val = 0
for param_group in self.optimizer.param_groups:
lr_val = param_group['lr']
self.logger.experiment.add_scalar('lr', lr_val, self.steps)
self.steps += 1
if self.steps < FLAGS.warmup_step:
self.warmup_optimizer_step(self.steps)
return {'loss': loss, 'log': {
'loss': loss.item()
}}
def validation_step(self, batch, batch_nb):
xs, ys, xlen, ylen = batch
y, nll = self.model.greedy_decode(xs, xlen)
hypothesis = self.tokenizer.decode_plus(y)
ground_truth = self.tokenizer.decode_plus(ys.cpu().numpy())
measures = jiwer.compute_measures(ground_truth, hypothesis)
return {'val_loss': nll.mean().item(), 'wer': measures['wer'], 'ground_truth': ground_truth[0], 'hypothesis': hypothesis[0]}
def validation_end(self, outputs):
# OPTIONAL
self.logger.experiment.add_text('test', 'This is test', 0)
avg_wer = np.mean([x['wer'] for x in outputs])
ppl = np.mean([x['val_loss'] for x in outputs])
self.logger.experiment.add_scalar('val/WER', avg_wer, self.steps)
self.logger.experiment.add_scalar('val/perplexity', ppl, self.steps)
hypothesis, ground_truth = '', ''
for idx in range(min(5, len(outputs))):
hypothesis += outputs[idx]['hypothesis']+'\n\n'
ground_truth += outputs[idx]['ground_truth'] + '\n\n'
self.logger.experiment.add_text('generated', hypothesis, self.steps)
self.logger.experiment.add_text('grouth_truth', ground_truth, self.steps)
if self.latest_alignment != None:
alignment = self.latest_alignment
idx = random.randint(0, alignment.size(0) - 1)
alignment = torch.softmax(alignment[idx], dim=-1)
alignment[:, :, 0] = 0 # ignore blank token
alignment = alignment.mean(dim=-1)
self.logger.experiment.add_image(
"alignment",
plot_alignment_to_numpy(alignment.data.numpy().T),
self.steps, dataformats='HWC')
self.logger.experiment.flush()
if self.best_wer > avg_wer and self.epoch > 0:
print('best checkpoint found!')
# checkpoint = {
# 'model': self.model.state_dict(),
# 'optimizer': self.optimizer.state_dict(),
# 'epoch': self.epoch
# }
# if FLAGS.apex:
# checkpoint['amp'] = amp.state_dict()
# torch.save(checkpoint, os.path.join(self.log_path, str(self.epoch)+'amp_checkpoint.pt'))
self.trainer.save_checkpoint(os.path.join(self.log_path, str(self.epoch)+'amp_checkpoint.pt'))
self.best_wer = avg_wer
self.plateau_scheduler.step(avg_wer)
self.epoch += 1
return {'val/WER': torch.tensor(avg_wer),
'wer': torch.tensor(avg_wer),
'val/perplexity': torch.tensor(ppl) }
def validation_epoch_end(self, outputs):
avg_wer = np.mean([x['wer'] for x in outputs])
ppl = np.mean([x['val_loss'] for x in outputs])
hypothesis, ground_truth = '', ''
for idx in range(5):
hypothesis += outputs[idx]['hypothesis']+'\n\n'
ground_truth += outputs[idx]['ground_truth'] + '\n\n'
writer.add_text('generated', hypothesis, self.steps)
writer.add_text('grouth_truth', ground_truth, self.steps)
if self.latest_alignment != None:
alignment = self.latest_alignment
idx = random.randint(0, alignment.size(0) - 1)
alignment = torch.softmax(alignment[idx], dim=-1)
alignment[:, :, 0] = 0 # ignore blank token
alignment = alignment.mean(dim=-1)
writer.add_image(
"alignment",
plot_alignment_to_numpy(alignment.data.numpy().T),
self.steps, dataformats='HWC')
self.logger.experiment.add_scalar('val/WER', avg_wer, self.steps)
self.logger.experiment.add_scalar('val/perplexity', ppl, self.steps)
self.logger.experiment.flush()
self.plateau_scheduler.step(avg_wer)
self.epoch += 1
return {'val/WER': torch.tensor(avg_wer),
'val/perplexity': torch.tensor(ppl) }
def configure_optimizers(self):
if FLAGS.optim == 'adam':
self.optimizer = AdamW(
self.model.parameters(), lr=FLAGS.lr, weight_decay=1e-5)
elif FLAGS.optim == 'sm3':
self.optimizer = SM3(
self.model.parameters(), lr=FLAGS.lr, momentum=0.0)
else:
self.optimizer = Novograd(
self.model.parameters(), lr=FLAGS.lr, weight_decay=1e-3)
scheduler = []
if FLAGS.sched:
self.plateau_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer, patience=FLAGS.sched_patience,
factor=FLAGS.sched_factor, min_lr=FLAGS.sched_min_lr,
verbose=1)
scheduler= [self.plateau_scheduler]
self.warmup_optimizer_step(0)
return [self.optimizer]
@pl.data_loader
def train_dataloader(self):
transform_train, _, _ = build_transform(
feature_type=FLAGS.feature, feature_size=FLAGS.feature_size,
n_fft=FLAGS.n_fft, win_length=FLAGS.win_length,
hop_length=FLAGS.hop_length, delta=FLAGS.delta, cmvn=FLAGS.cmvn,
downsample=FLAGS.downsample,
T_mask=FLAGS.T_mask, T_num_mask=FLAGS.T_num_mask,
F_mask=FLAGS.F_mask, F_num_mask=FLAGS.F_num_mask
)
dataloader = DataLoader(
dataset=MergedDataset([
Librispeech(
root=FLAGS.LibriSpeech_train_500,
tokenizer=self.tokenizer,
transform=transform_train,
audio_max_length=FLAGS.audio_max_length),
Librispeech(
root=FLAGS.LibriSpeech_train_360,
tokenizer=self.tokenizer,
transform=transform_train,
audio_max_length=FLAGS.audio_max_length),
# Librispeech(
# root=FLAGS.LibriSpeech_train_100,
# tokenizer=self.tokenizer,
# transform=transform_train,
# audio_max_length=FLAGS.audio_max_length),
TEDLIUM(
root=FLAGS.TEDLIUM_train,
tokenizer=self.tokenizer,
transform=transform_train,
audio_max_length=FLAGS.audio_max_length),
CommonVoice(
root=FLAGS.CommonVoice, labels='train.tsv',
tokenizer=self.tokenizer,
transform=transform_train,
audio_max_length=FLAGS.audio_max_length,
audio_min_length=1),
YoutubeCaption(
root='../speech_data/youtube-speech-text/', labels='bloomberg2_meta.csv',
tokenizer=self.tokenizer,
transform=transform_train,
audio_max_length=FLAGS.audio_max_length,
audio_min_length=1),
YoutubeCaption(
root='../speech_data/youtube-speech-text/', labels='life_meta.csv',
tokenizer=self.tokenizer,
transform=transform_train,
audio_max_length=FLAGS.audio_max_length,
audio_min_length=1),
YoutubeCaption(
root='../speech_data/youtube-speech-text/', labels='news_meta.csv',
tokenizer=self.tokenizer,
transform=transform_train,
audio_max_length=FLAGS.audio_max_length,
audio_min_length=1),
YoutubeCaption(
root='../speech_data/youtube-speech-text/', labels='english2_meta.csv',
tokenizer=self.tokenizer,
transform=transform_train,
audio_max_length=FLAGS.audio_max_length,
audio_min_length=1),
]),
batch_size=FLAGS.sub_batch_size, shuffle=True,
num_workers=FLAGS.num_workers, collate_fn=seq_collate,
drop_last=True)
return dataloader
@pl.data_loader
def val_dataloader(self):
_, transform_test, _ = build_transform(
feature_type=FLAGS.feature, feature_size=FLAGS.feature_size,
n_fft=FLAGS.n_fft, win_length=FLAGS.win_length,
hop_length=FLAGS.hop_length, delta=FLAGS.delta, cmvn=FLAGS.cmvn,
downsample=FLAGS.downsample,
T_mask=FLAGS.T_mask, T_num_mask=FLAGS.T_num_mask,
F_mask=FLAGS.F_mask, F_num_mask=FLAGS.F_num_mask
)
val_dataloader = DataLoader(
dataset=MergedDataset([
Librispeech(
root=FLAGS.LibriSpeech_test,
tokenizer=self.tokenizer,
transform=transform_test,
reverse_sorted_by_length=True)]),
batch_size=FLAGS.eval_batch_size, shuffle=False,
num_workers=FLAGS.num_workers, collate_fn=seq_collate)
return val_dataloader
if __name__ == "__main__":
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
import pickle
model = ParallelTraining()
# with open('test.pt', 'wb') as f:
# pickle.dump(model, f)
gpus = [0,1, 2, 3]
params = {
'gpus': gpus,
'distributed_backend': 'ddp',
'gradient_clip_val': 10,
'val_check_interval': 0.25,
'accumulate_grad_batches': FLAGS.batch_size // (FLAGS.sub_batch_size*len(gpus))
}
if FLAGS.apex:
print('use apex')
params['amp_level'] = FLAGS.opt_level
params['precision'] = 16
params['min_loss_scale'] = 1.0
from datetime import datetime
cur_time = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
log_name = '{}-{}'.format('rnnt-m', FLAGS.tokenizer)
log_path = 'logs/{}'.format(log_name)
os.makedirs(log_path, exist_ok=True)
model.log_path = log_path
logger = pl.loggers.tensorboard.TensorBoardLogger('logs', name='rnnt-m')
params['logger'] = logger
checkpoint_callback = ModelCheckpoint(
filepath=log_path,
save_top_k=True,
verbose=True,
monitor='val/perplexity',
mode='min',
prefix=''
)
params['checkpoint_callback'] = checkpoint_callback
print(params)
# params['resume_from_checkpoint'] = '/home/theblackcat/rnn_transducer/logs/rnnt-bpe/8amp_checkpoint.pt'
trainer = Trainer(**params)
model.trainer = trainer
trainer.fit(model)
| 1.84375
| 2
|
exberry_adapter/setup.py
|
gaborh-da/da-marketplace
| 0
|
12774582
|
<reponame>gaborh-da/da-marketplace<filename>exberry_adapter/setup.py<gh_stars>0
from setuptools import setup
setup(name='marketplace-exchange-adapter',
version='0.1.18',
description='Daml Marketplace Exchange Adapter',
author='<NAME>',
url='daml.com',
license='Apache2',
install_requires=['dazl>=7,<8', 'aiohttp'],
packages=['bot'],
include_package_data=True)
| 1.335938
| 1
|
jwql/instrument_monitors/nirspec_monitors/data_trending/utils/process_data.py
|
falkben/jwql
| 1
|
12774583
|
"""This module holds functions for miri data trending
All functions in this module are tailored for the miri datatrending application.
Detailed descriptions are given for every function individually.
-------
- <NAME>
Use
---
Dependencies
------------
MIRI_trend_requestsDRAFT1900201.docx
References
----------
Notes
-----
"""
import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.mnemonics as mn
import jwql.instrument_monitors.nirspec_monitors.data_trending.utils.condition as cond
import statistics
import sqlite3
import warnings
import numpy as np
from collections import defaultdict
def extract_data(condition, mnemonic):
'''Function extracts data from given mnemmonic at a given condition
Parameters
----------
condition : object
conditon object that holds one or more subconditions
mnemonic : AstropyTable
holds single table with mnemonic data
Return
------
temp : list or None
holds data that applies to given condition
'''
temp = []
#look for all values that fit to the given conditions
for element in mnemonic:
if condition.state(float(element['time'])):
temp.append(float(element['value']))
#return temp is one ore more values fit to the condition
#return None if no applicable data was found
if len(temp) > 0:
return temp
else:
return None
def lamp_distinction(caa_flag, lamp_sel, lamp_curr, lamp_volt):
"""Distincts over all calibration lamps and returns representative current means
each
Parameters
----------
"""
#initilize empty dict
lamp_values = defaultdict(list)
for index, flag in enumerate(caa_flag):
if flag['value'] == 'ON':
#initialize lamp value to default
current_lamp = "default"
#find current lamp value
for lamp in lamp_sel:
if lamp['time'] <= flag['time']:
current_lamp = lamp['value']
#go to next Value if dummy lamps are activated
if (current_lamp == 'NO_LAMP') or (current_lamp == 'DUMMY'):
continue
#define on_time of current lamp
try:
start_time = flag['time']
i = 1
if caa_flag[index+i]['value'] == 'OFF':
end_time = caa_flag[index+1]['time']
else:
i += 1
except IndexError:
break
#append and evaluate current and voltage values
temp_curr = []
temp_volt = []
#append current values to list
for curr in lamp_curr:
if curr['time'] >= start_time:
if curr['time'] < end_time:
temp_curr.append(float(curr['value']))
else:
break
#append voltage values to list
for volt in lamp_volt:
if volt['time'] >= start_time :
if volt['time'] < end_time:
temp_volt.append(float(volt['value']))
else:
break
lamp_data = []
#append current values
lamp_data.append(start_time)
lamp_data.append(end_time)
lamp_data.append(len(temp_curr))
lamp_data.append(statistics.mean(temp_curr))
lamp_data.append(statistics.stdev(temp_curr))
#append voltage values
lamp_data.append(len(temp_volt))
lamp_data.append(statistics.mean(temp_volt))
lamp_data.append(statistics.stdev(temp_volt))
lamp_values[current_lamp].append(( lamp_data ))
return lamp_values
def extract_filterpos(move_stat, wheel_pos, wheel_val):
'''Extracts ratio values which correspond to given position values and their
proposed nominals
Parameters
----------
condition : object
conditon object that holds one or more subconditions
nominals : dict
holds nominal values for all wheel positions
ratio_mem : AstropyTable
holds ratio values of one specific mnemonic
pos_mem : AstropyTable
holds pos values of one specific mnemonic
Return
------
pos_values : dict
holds ratio values and times with corresponding positionlabel as key
'''
#initilize empty dict for assigned ratio values
pos_values = defaultdict(list)
for index, stat in enumerate(move_stat):
#raise warning if position is UNKNOWN
if stat['value'] == "SUCCESS":
#initialize lamp value to default
current_pos = "default"
pos_val = 0
pos_time = 0
#Evaluate current position
for pos in wheel_pos:
if pos['time'] <= stat['time']:
current_pos = pos['value']
if pos['time'] > stat['time']:
break
#Evaluate corresponding value
for val in wheel_val:
if val['time'] <= stat['time']:
pos_val = val['value']
pos_time = val['time']
if val['time'] > stat['time']:
break
print (current_pos, pos_val, pos_time)
if current_pos != 'default':
pos_values[current_pos].append((pos_time, pos_val))
else:
continue
return pos_values
def once_a_day_routine(mnemonic_data):
'''Routine for processing a 15min data file once a day
Parameters
----------
mnemonic_data : dict
dict holds time and value in a astropy table with correspining identifier as key
Return
------
return_data : dict
Holds extracted data with applied conditions
'''
#abbreviate attribute
m = mnemonic_data
return_data = dict()
###########################################################################
con_set_1 = [ \
cond.unequal(m.mnemonic('INRSD_EXP_STAT'),'STARTED')]
#setup condition
condition_1 = cond.condition(con_set_1)
for identifier in mn.mnemonic_cond_1:
data = extract_data(condition_1, m.mnemonic(identifier))
if data != None:
return_data.update( {identifier:data} )
else:
print("no data for {}".format(identifier))
del condition_1
###########################################################################
con_set_2 = [ \
cond.equal(m.mnemonic('INRSH_LAMP_SEL'), 'NO_LAMP')]
#setup condition
condition_2 = cond.condition(con_set_2)
for identifier in mn.mnemonic_cond_2:
data = extract_data(condition_2, m.mnemonic(identifier))
if data != None:
return_data.update( {identifier:data} )
else:
print("no data for {}".format(identifier))
del condition_2
###########################################################################
con_set_3 = [ \
cond.unequal(m.mnemonic('INRSM_MOVE_STAT'), 'STARTED')]
#setup condition
condition_3 = cond.condition(con_set_3)
for identifier in mn.mnemonic_cond_3:
data = extract_data(condition_3, m.mnemonic(identifier))
if data != None:
return_data.update( {identifier:data} )
else:
print("no data for {}".format(identifier))
del condition_3
return return_data
def whole_day_routine(mnemonic_data):
'''Proposed routine for processing a 15min data file once a day
Parameters
----------
mnemonic_data : dict
dict holds time and value in a astropy table with correspining identifier as key
Return
------
data_cond_1 : dict
holds extracted data with condition 1 applied
data_cond_1 : dict
holds extracted data with condition 2 applied
'''
#abbreviate attribute
m = mnemonic_data
return_data = dict()
###########################################################################
con_set_ft_10 = [
cond.equal(m.mnemonic('ICTM_RT_FILTER'), 10, stringval = False)]
#setup condition
condition_ft_10 = cond.condition(con_set_ft_10)
for identifier in mn.mnemonic_ft10:
data = extract_data(condition_ft_10, m.mnemonic(identifier))
if data != None:
return_data.update( {identifier:data} )
else:
print("no data for {}".format(identifier))
del condition_ft_10
##########################################################################
con_set_caa = [ \
cond.equal(m.mnemonic('INRSH_CAA_PWRF_ST'), 'ON')]
#setup condition
condition_caa = cond.condition(con_set_caa)
for identifier in mn.mnemonic_caa:
data = extract_data(condition_caa, m.mnemonic(identifier))
if data != None:
return_data.update( {identifier:data} )
else:
print("no data for {}".format(identifier))
del condition_caa
###########################################################################
data_lamps = lamp_distinction( m.mnemonic('INRSI_CAA_ON_FLAG'),
m.mnemonic('INRSH_LAMP_SEL'),
m.mnemonic('INRSI_C_CAA_CURRENT'),
m.mnemonic('INRSI_C_CAA_VOLTAGE') )
return return_data, data_lamps
def wheelpos_routine(mnemonic_data):
'''Proposed routine for positionsensors each day
Parameters
----------
mnemonic_data : dict
dict holds time and value in a astropy table with correspining identifier as key
Return
------
FW : dict
holds FW ratio values and times with corresponding positionlabel as key
GW14 : dict
holds GW14 ratio values and times with corresponding positionlabel as key
GW23 : dict
holds GW23 ratio values and times with corresponding positionlabel as key
CCC : dict
holds CCC ratio values and times with corresponding positionlabel as key
'''
#abbreviate attribute
m = mnemonic_data
FW = extract_filterpos( m.mnemonic('INRSI_FWA_MOVE_ST'),
m.mnemonic('INRSI_FWA_MECH_POS'),
m.mnemonic('INRSI_C_FWA_POSITION'))
GWX = extract_filterpos(m.mnemonic('INRSI_GWA_MOVE_ST'),
m.mnemonic('INRSI_GWA_MECH_POS'),
m.mnemonic('INRSI_C_GWA_X_POSITION'))
GWY = extract_filterpos(m.mnemonic('INRSI_GWA_MOVE_ST'),
m.mnemonic('INRSI_GWA_MECH_POS'),
m.mnemonic('INRSI_C_GWA_Y_POSITION'))
return FW, GWX, GWY
if __name__ =='__main__':
pass
| 2.46875
| 2
|
src/database/create_db.py
|
johnnychiuchiu/Music-Recommender
| 0
|
12774584
|
<reponame>johnnychiuchiu/Music-Recommender<gh_stars>0
from schema import db
# from schema.db_models import *
if __name__=='__main__':
# create a new table scheme as defined in the schema script
db.create_all()
| 1.429688
| 1
|
adobe_analytics/exceptions.py
|
nickolasgryga/adobe_analytics
| 1
|
12774585
|
class ApiError(Exception):
"""
Exception raised when user does not have appropriate credentials
Used for 301 & 401 HTTP Status codes
"""
def __init__(self, response):
if 'error_description' in response:
self.message = response['error_description']
else:
self.message = response['error']
| 2.984375
| 3
|
tools/ete/ete_gene_cnv.py
|
anilthanki/tgac-galaxytools
| 8
|
12774586
|
from __future__ import print_function
import argparse
import collections
from ete3 import PhyloTree
def printTSV(myDict, colList=None):
""" Pretty print a list of dictionaries (myDict) as a dynamically sized table.
If column names (colList) aren't specified, they will show in random order.
Author: <NAME> - Use it as you want but don't blame me.
"""
if not colList:
colList = list(myDict[0].keys() if myDict else [])
myList = [colList]
for item in myDict:
myList.append([str(item[col] if item[col] is not None else '') for col in colList])
for item in myList:
print(*item, sep="\t")
def main():
parser = argparse.ArgumentParser(description='Gene Copy Number Finder')
parser.add_argument('--genetree', required=True, help='GeneTree in nhx format')
parser.add_argument('--speciesorder', required=True, help='Comma-separated species list')
args = parser.parse_args()
species_list = args.speciesorder.split(",")
species_list = [_.strip() for _ in species_list]
table = []
with open(args.genetree, "r") as f:
# reads multiple gene tree line by line gene tree
for line in f:
# Remove empty NHX features that can be produced by TreeBest but break ete3
line = line.replace('[&&NHX]', '')
# reads single gene tree
genetree = PhyloTree(line)
leaves = genetree.get_leaf_names()
leaves_parts = [_.split("_") for _ in leaves]
for i, leaf_parts in enumerate(leaves_parts):
if len(leaf_parts) != 2:
raise Exception("Leaf node '%s' is not in gene_species format" % leaves[i])
leaves_species = [_[1] for _ in leaves_parts]
species_counter = collections.Counter(leaves_species)
# Assign to ref_species the first element of species_list which
# appears in a leaf node
for ref_species in species_list:
if ref_species in species_counter:
break
else:
raise Exception("None of the specified species was found in the GeneTree '%s'" % line)
# Find the gene of the (first) leaf node for the ref_species
for leaf_parts in leaves_parts:
if leaf_parts[1] == ref_species:
species_counter['gene'] = leaf_parts[0]
break
table.append(species_counter)
colList = ["gene"] + species_list
printTSV(table, colList)
if __name__ == "__main__":
main()
| 3.296875
| 3
|
gtp_monitor.py
|
decastromonteiro/GTPv1-Monitoring
| 0
|
12774587
|
<reponame>decastromonteiro/GTPv1-Monitoring<gh_stars>0
from scapy.all import *
from scapy.contrib import gtp
import random
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-l","--loop", help="Loop the script for x times", type=int)
parser.add_argument("-G", "--GGSN", help="Choose which GGSN to monitor", type=str)
args = parser.parse_args()
def main(loop=1, GGSN='GPHPT02'):
for x in xrange(loop):
ggsn = {'10.221.48.252': 'GPHPT01',
'10.221.58.214': 'GPHPT02'}
GGSN_dict = { "GPHPT01": '10.221.48.252',
"GPHPT02": '10.221.58.214'}
IP_GGSN = GGSN_dict.get(GGSN)
fake_TEICI = 0xFFFFF
IMSI_Rand_Part = str(random.randint(0000000000,9999999999)).zfill(10)
IMSI = '7240{}{}'.format(random.randint(2,4), IMSI_Rand_Part)
create_gtp_packet = IP(dst=IP_GGSN) / UDP(dport=2123) / gtp.GTPHeader() / gtp.GTPCreatePDPContextRequest()
create_gtp_packet.IE_list = [
gtp.IE_IMSI(imsi=IMSI),
gtp.IE_Routing(MCC='724',MNC='02',LAC=53221, RAC=20),
gtp.IE_SelectionMode(SelectionMode="MS"),
gtp.IE_TEIDI(TEIDI=0),
gtp.IE_TEICP(TEICI=fake_TEICI),
gtp.IE_NSAPI(NSAPI=5),
gtp.IE_ChargingCharacteristics(normal_charging=1),
gtp.IE_EndUserAddress(PDPTypeNumber=0x8d),
gtp.IE_AccessPointName(length=13, APN='timbrasil.br'),
gtp.IE_GSNAddress(address='172.16.17.32'),
gtp.IE_GSNAddress(address='192.168.127.12'),
gtp.IE_MSInternationalNumber(length=8, digits='55{}{}'.format(random.randint(11,97), random.randint(900000000,999999999))),
gtp.IE_QoS(length=15, allocation_retention_prioiry=2),
gtp.IE_CommonFlags(length=1, dual_addr_bearer_fl=1),
gtp.IE_RATType(RAT_Type=1),
gtp.IE_UserLocationInformation(length=8, SAC=0x3ead, LAC=0xcfe5, MCC='724', MNC='02'),
gtp.IE_EvolvedAllocationRetentionPriority(length=1, PL=0x06)
]
a = sr1(create_gtp_packet,timeout=5,verbose=False)
create_gtp_response = a[1]
result = validate_response(create_gtp_response)
for IE in create_gtp_response.IE_list:
if IE.ietype == 17:
response_TEIDI = IE.TEICI
if result == "Success":
delete_gtp_packet = IP(dst=IP_GGSN) / UDP(dport=2123) / gtp.GTPHeader(teid=response_TEIDI) / gtp.GTPDeletePDPContextRequest()
delete_gtp_packet.IE_list = [gtp.IE_Teardown(),
gtp.IE_NSAPI(NSAPI=5)
]
b = sr1(delete_gtp_packet, timeout=5,verbose=False)
print('GGSN {} is OK').format(ggsn.get(IP_GGSN))
else:
print("Create PDP Context Request Failed - {} is Faulty.").format(ggsn.get(IP_GGSN))
def validate_response(packet):
for IE in packet.IE_list:
if IE.ietype == 1:
cause = IE.CauseValue
# Convert CauseValue from Decimal to Bit --> Check the first two Bits --> If first two Bits == '10' it means Success
# 3GPP TS29.060 Rel10
if "{0:b}".format(int(cause))[0:2] == "10":
return "Success"
else:
return "Failure"
if __name__ == "__main__":
if args.loop and args.GGSN:
main(args.loop, args.GGSN)
elif args.loop:
main(loop=args.loop)
elif args.GGSN:
main(GGSN=args.GGSN)
else:
main()
| 2.53125
| 3
|
day04/d4.py
|
basoares/advent-of-code-2018
| 0
|
12774588
|
'''
Advent of Code - 2018
--- Day 4: Repose Record ---
Released under the MIT License <http://opensource.org/licenses/mit-license.php>
'''
import os
from collections import defaultdict
from datetime import datetime
import re
def sleep_pattern(events):
sleep = defaultdict(lambda: [0 for i in range(60)])
for event in sorted(events):
time, action = event[1:].split("] ")
date = datetime.strptime(time, '%Y-%m-%d %H:%M')
if "Guard" in action:
guard = int(re.findall("[\d]+", action)[0])
elif "asleep" in action:
start = date.minute
elif "wakes" in action:
end = date.minute
for m in range(start, end):
sleep[guard][m] += 1
return sleep
def part1(sleep):
guard_most_sleep = max(sleep.keys(), key = (lambda g: sum(sleep[g])))
minute_most_asleep = sleep[guard_most_sleep].index(max(sleep[guard_most_sleep]))
return guard_most_sleep * minute_most_asleep
def part2(sleep):
guard_most_sleep_minute = max(sleep.keys(), key=lambda g: max(sleep[g]))
minute_most_asleep = sleep[guard_most_sleep_minute].index(max(sleep[guard_most_sleep_minute]))
return guard_most_sleep_minute * minute_most_asleep
if __name__ == '__main__':
with open('../input/d04.txt', mode='r') as f:
_input = f.readlines()
sleep = sleep_pattern(_input)
print('Part One: {}'.format(part1(sleep)))
print('Part Two: {}'.format(part2(sleep)))
| 2.90625
| 3
|
trpgcreator/ui/dialogs/function.py
|
jacobcheatley/trpg-creator
| 0
|
12774589
|
<reponame>jacobcheatley/trpg-creator
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_files/dialogs/function.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_FunctionDialog(object):
def setupUi(self, FunctionDialog):
FunctionDialog.setObjectName("FunctionDialog")
FunctionDialog.resize(400, 300)
self.verticalLayout = QtWidgets.QVBoxLayout(FunctionDialog)
self.verticalLayout.setObjectName("verticalLayout")
self.plainTextEditFunc = QtWidgets.QPlainTextEdit(FunctionDialog)
self.plainTextEditFunc.setAutoFillBackground(False)
self.plainTextEditFunc.setStyleSheet("* {\n"
" font-family: monospace;\n"
"}")
self.plainTextEditFunc.setLineWidth(1)
self.plainTextEditFunc.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustIgnored)
self.plainTextEditFunc.setTabStopWidth(36)
self.plainTextEditFunc.setObjectName("plainTextEditFunc")
self.verticalLayout.addWidget(self.plainTextEditFunc)
self.buttonBox = QtWidgets.QDialogButtonBox(FunctionDialog)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(FunctionDialog)
QtCore.QMetaObject.connectSlotsByName(FunctionDialog)
def retranslateUi(self, FunctionDialog):
_translate = QtCore.QCoreApplication.translate
FunctionDialog.setWindowTitle(_translate("FunctionDialog", "Edit Function"))
| 1.71875
| 2
|
src/probnum/diffeq/odefiltsmooth/initialize.py
|
jzenn/probnum
| 1
|
12774590
|
"""Initialisation procedures."""
# pylint: disable=import-outside-toplevel
import numpy as np
import scipy.integrate as sci
import probnum.filtsmooth as pnfs
import probnum.statespace as pnss
from probnum import randvars
# In the initialisation-via-RK function below, this value is added to the marginal stds of the initial derivatives that are known.
# If we put in zero, there are linalg errors (because a zero-cov RV is conditioned on a dirac likelihood).
# This value is chosen such that its square-root is a really small damping factor).
SMALL_VALUE = 1e-28
def initialize_odefilter_with_rk(
f, y0, t0, prior, initrv, df=None, h0=1e-2, method="DOP853"
):
r"""Initialize an ODE filter by fitting the prior process to a few steps of an approximate ODE solution computed with Scipy's RK.
It goes as follows:
1. The ODE integration problem is set up on the interval ``[t0, t0 + (2*order+1)*h0]``
and solved with a call to ``scipy.integrate.solve_ivp``. The solver is uses adaptive steps with ``atol=rtol=1e-12``,
but is forced to pass through the
events ``(t0, t0+h0, t0 + 2*h0, ..., t0 + (2*order+1)*h0)``.
The result is a vector of time points and states, with at least ``(2*order+1)``.
Potentially, the adaptive steps selected many more steps, but because of the events, fewer steps cannot have happened.
2. A prescribed prior is fitted to the first ``(2*order+1)`` (t, y) pairs of the solution. ``order`` is the order of the prior.
3. The value of the resulting posterior at time ``t=t0`` is an estimate of the state and all its derivatives.
The resulting marginal standard deviations estimate the error. This random variable is returned.
Parameters
----------
f
ODE vector field.
y0
Initial value.
t0
Initial time point.
prior
Prior distribution used for the ODE solver. For instance an integrated Brownian motion prior (``IBM``).
initrv
Initial random variable.
df
Jacobian of the ODE vector field. Optional. If specified, more components of the result will be exact.
h0
Maximum step-size to use for computing the approximate ODE solution. The smaller, the more accurate, but also, the smaller, the less stable.
The best value here depends on the ODE problem, and probably the chosen method. Optional. Default is ``1e-2``.
method
Which solver to use. This is communicated as a string that is compatible with ``scipy.integrate.solve_ivp(..., method=method)``.
Optional. Default is `DOP853`.
Returns
-------
Normal
Estimated (improved) initial random variable. Compatible with the specified prior.
Examples
--------
>>> from dataclasses import astuple
>>> from probnum.randvars import Normal
>>> from probnum.statespace import IBM
>>> from probnum.problems.zoo.diffeq import vanderpol
Compute the initial values of the van-der-Pol problem as follows
>>> f, t0, tmax, y0, df, *_ = astuple(vanderpol())
>>> print(y0)
[2. 0.]
>>> prior = IBM(ordint=3, spatialdim=2)
>>> initrv = Normal(mean=np.zeros(prior.dimension), cov=np.eye(prior.dimension))
>>> improved_initrv = initialize_odefilter_with_rk(f, y0, t0, prior=prior, initrv=initrv, df=df)
>>> print(prior.proj2coord(0) @ improved_initrv.mean)
[2. 0.]
>>> print(np.round(improved_initrv.mean, 1))
[ 2. 0. -2. 58.2 0. -2. 60. -1745.7]
>>> print(np.round(np.log10(improved_initrv.std), 1))
[-13.8 -11.3 -9. -1.5 -13.8 -11.3 -9. -1.5]
"""
y0 = np.asarray(y0)
ode_dim = y0.shape[0] if y0.ndim > 0 else 1
order = prior.ordint
proj_to_y = prior.proj2coord(0)
zeros_shift = np.zeros(ode_dim)
zeros_cov = np.zeros((ode_dim, ode_dim))
measmod = pnss.DiscreteLTIGaussian(
proj_to_y,
zeros_shift,
zeros_cov,
proc_noise_cov_cholesky=zeros_cov,
forward_implementation="sqrt",
backward_implementation="sqrt",
)
# order + 1 would suffice in theory, 2*order + 1 is for good measure
# (the "+1" is a safety factor for order=1)
num_steps = 2 * order + 1
t_eval = np.arange(t0, t0 + (num_steps + 1) * h0, h0)
sol = sci.solve_ivp(
f,
(t0, t0 + (num_steps + 1) * h0),
y0=y0,
atol=1e-12,
rtol=1e-12,
t_eval=t_eval,
method=method,
)
ts = sol.t[:num_steps]
ys = sol.y[:, :num_steps].T
initmean = initrv.mean.copy()
initmean[0 :: (order + 1)] = y0
initmean[1 :: (order + 1)] = f(t0, y0)
initcov_diag = np.diag(initrv.cov).copy()
initcov_diag[0 :: (order + 1)] = SMALL_VALUE
initcov_diag[1 :: (order + 1)] = SMALL_VALUE
if df is not None:
if order > 1:
initmean[2 :: (order + 1)] = df(t0, y0) @ f(t0, y0)
initcov_diag[2 :: (order + 1)] = SMALL_VALUE
initcov = np.diag(initcov_diag)
initcov_cholesky = np.diag(np.sqrt(initcov_diag))
initrv = randvars.Normal(initmean, initcov, cov_cholesky=initcov_cholesky)
kalman = pnfs.Kalman(prior, measmod, initrv)
out = kalman.filtsmooth(ys, ts)
estimated_initrv = out.state_rvs[0]
return estimated_initrv
def initialize_odefilter_with_taylormode(f, y0, t0, prior, initrv):
"""Initialize an ODE filter with Taylor-mode automatic differentiation.
This requires JAX. For an explanation of what happens ``under the hood``, see [1]_.
References
----------
.. [1] <NAME>. and <NAME>., Stable implementation of probabilistic ODE solvers,
*arXiv:2012.10106*, 2020.
The implementation is inspired by the implementation in
https://github.com/jacobjinkelly/easy-neural-ode/blob/master/latent_ode.py
Parameters
----------
f
ODE vector field.
y0
Initial value.
t0
Initial time point.
prior
Prior distribution used for the ODE solver. For instance an integrated Brownian motion prior (``IBM``).
initrv
Initial random variable.
Returns
-------
Normal
Estimated initial random variable. Compatible with the specified prior.
Examples
--------
>>> import sys, pytest
>>> if sys.platform.startswith('win'):
... pytest.skip('this doctest does not work on Windows')
>>> from dataclasses import astuple
>>> from probnum.randvars import Normal
>>> from probnum.problems.zoo.diffeq import threebody_jax, vanderpol_jax
>>> from probnum.statespace import IBM
Compute the initial values of the restricted three-body problem as follows
>>> f, t0, tmax, y0, df, *_ = astuple(threebody_jax())
>>> print(y0)
[ 0.994 0. 0. -2.00158511]
>>> prior = IBM(ordint=3, spatialdim=4)
>>> initrv = Normal(mean=np.zeros(prior.dimension), cov=np.eye(prior.dimension))
>>> improved_initrv = initialize_odefilter_with_taylormode(f, y0, t0, prior, initrv)
>>> print(prior.proj2coord(0) @ improved_initrv.mean)
[ 0.994 0. 0. -2.00158511]
>>> print(improved_initrv.mean)
[ 9.94000000e-01 0.00000000e+00 -3.15543023e+02 0.00000000e+00
0.00000000e+00 -2.00158511e+00 0.00000000e+00 9.99720945e+04
0.00000000e+00 -3.15543023e+02 0.00000000e+00 6.39028111e+07
-2.00158511e+00 0.00000000e+00 9.99720945e+04 0.00000000e+00]
Compute the initial values of the van-der-Pol oscillator as follows
>>> f, t0, tmax, y0, df, *_ = astuple(vanderpol_jax())
>>> print(y0)
[2. 0.]
>>> prior = IBM(ordint=3, spatialdim=2)
>>> initrv = Normal(mean=np.zeros(prior.dimension), cov=np.eye(prior.dimension))
>>> improved_initrv = initialize_odefilter_with_taylormode(f, y0, t0, prior, initrv)
>>> print(prior.proj2coord(0) @ improved_initrv.mean)
[2. 0.]
>>> print(improved_initrv.mean)
[ 2. 0. -2. 60. 0. -2. 60. -1798.]
>>> print(improved_initrv.std)
[0. 0. 0. 0. 0. 0. 0. 0.]
"""
try:
import jax.numpy as jnp
from jax.config import config
from jax.experimental.jet import jet
config.update("jax_enable_x64", True)
except ImportError as err:
raise ImportError(
"Cannot perform Taylor-mode initialisation without optional "
"dependencies jax and jaxlib. Try installing them via `pip install jax jaxlib`."
) from err
order = prior.ordint
def total_derivative(z_t):
"""Total derivative."""
z, t = jnp.reshape(z_t[:-1], z_shape), z_t[-1]
dz = jnp.ravel(f(t, z))
dt = jnp.array([1.0])
dz_t = jnp.concatenate((dz, dt))
return dz_t
z_shape = y0.shape
z_t = jnp.concatenate((jnp.ravel(y0), jnp.array([t0])))
derivs = []
derivs.extend(y0)
if order == 0:
all_derivs = pnss.Integrator._convert_derivwise_to_coordwise(
np.asarray(jnp.array(derivs)), ordint=0, spatialdim=len(y0)
)
return randvars.Normal(
np.asarray(all_derivs),
cov=np.asarray(jnp.diag(jnp.zeros(len(derivs)))),
cov_cholesky=np.asarray(jnp.diag(jnp.zeros(len(derivs)))),
)
(dy0, [*yns]) = jet(total_derivative, (z_t,), ((jnp.ones_like(z_t),),))
derivs.extend(dy0[:-1])
if order == 1:
all_derivs = pnss.Integrator._convert_derivwise_to_coordwise(
np.asarray(jnp.array(derivs)), ordint=1, spatialdim=len(y0)
)
return randvars.Normal(
np.asarray(all_derivs),
cov=np.asarray(jnp.diag(jnp.zeros(len(derivs)))),
cov_cholesky=np.asarray(jnp.diag(jnp.zeros(len(derivs)))),
)
for _ in range(1, order):
(dy0, [*yns]) = jet(total_derivative, (z_t,), ((dy0, *yns),))
derivs.extend(yns[-2][:-1])
all_derivs = pnss.Integrator._convert_derivwise_to_coordwise(
jnp.array(derivs), ordint=order, spatialdim=len(y0)
)
return randvars.Normal(
np.asarray(all_derivs),
cov=np.asarray(jnp.diag(jnp.zeros(len(derivs)))),
cov_cholesky=np.asarray(jnp.diag(jnp.zeros(len(derivs)))),
)
| 3.171875
| 3
|
cogs/utils/april_fool.py
|
HypixelBot/bot
| 10
|
12774591
|
<gh_stars>1-10
from random import random
hieroglyphics = {'a': 'ᔑ','b': 'ʖ','c': 'ᓵ','d': '↸','e': 'ᒷ','f': '⎓','g': '⊣','h': '⍑','i': '╎','j': '⋮','k': 'ꖌ','l': 'ꖎ','m': 'ᒲ','n': 'リ','o': '𝙹','p': '!¡','q': 'ᑑ','r': '∷','s': 'ᓭ','t': 'ℸ̣','u': '⚍','v': '⍊', 'w': '∴', 'x': '̇/', 'y': '||', 'z': '⨅'}
def magik(text):
if random() > 0.7: return ''.join(list(map(lambda x: hieroglyphics[x.lower()] if x.lower() in hieroglyphics else x, text)))
return text
def prank(emb):
emb['embed']['title'] = magik(emb['embed']['title'])
emb['footer']['text'] = magik(emb['footer']['text'])
for p in emb['pages']:
for f in emb['pages'][p]:
if f and 'name' in f: f['name'] = magik(f['name'])
return emb
| 2.9375
| 3
|
model/__init__.py
|
KentWangYQ/mongo2es
| 5
|
12774592
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from pymongo import MongoClient
from config import settings
client = MongoClient(settings.MONGO.get('uri'))
models = client.get_database()
from .base import BaseModel
# Brand
brand = models.brands
brand.__class__ = BaseModel
# car_product
car_product = models.carproducts
car_product.__class__ = BaseModel
# merchant
merchant = models.merchants
merchant.__class__ = BaseModel
# car_offer
car_offer = models.caroffers
car_offer.__class__ = BaseModel
# used_car
used_car = models.usedcars
used_car.__class__ = BaseModel
# used_car_offer
used_car_offer = models.usedcaroffers
used_car_offer.__class__ = BaseModel
# car_change_plan
car_change_plan = models.carchangeplans
car_change_plan.__class__ = BaseModel
# order
order = models.orders
order.__class__ = BaseModel
# wx_order
wx_order = models.wxorders
wx_order.__class__ = BaseModel
# pos_order
pos_order = models.posorders
pos_order.__class__ = BaseModel
# user
user = models.users
user.__class__ = BaseModel
# token
token = models.tokens
token.__class__ = BaseModel
# act_entity
act_entity = models.actentities
act_entity.__class__ = BaseModel
# link_action
link_action = models.linkactions
link_action.__class__ = BaseModel
# act_share_detail
act_share_detail = models.actsharedetails
act_share_detail.__class__ = BaseModel
# car_product_map
car_product_map = models.carproductmaps
car_product_map.__class__ = BaseModel
# car_offer_external
car_offer_external = models.carofferexternals
car_offer_external.__class__ = BaseModel
# car_offer_history
car_offer_history = models.carofferhistories
car_offer_history.__class__ = BaseModel
# impression_track
impression_track = models.impressiontracks
impression_track.__class__ = BaseModel
# intents
intent = models.intents
intent.__class__ = BaseModel
# car_product_estimation
car_product_estimation = models.carproductestimations
car_product_estimation.__class__ = BaseModel
| 1.914063
| 2
|
pan15_compliant/temp/quick_read.py
|
ivan-bilan/author-profiling-pan-2016
| 6
|
12774593
|
<reponame>ivan-bilan/author-profiling-pan-2016
import glob
import ntpath
import re
import codecs
from nltk.tokenize import word_tokenize, sent_tokenize
from unidecode import unidecode
import pickle
import cPickle
from time import sleep
from xml.dom import minidom
from nltk.stem.porter import *
import treetaggerwrapper
from bs4 import BeautifulSoup
from itertools import tee
import HTMLParser
# pip install http://pypi.python.org/packages/source/h/htmllaundry/htmllaundry-2.0.tar.gz
from htmllaundry import strip_markup
from pylab import *
def get_a_genre_mod_dev():
main_dictionary = dict()
number_of_files = 0
tmp_list = list()
author_id = 33
count_same_text_file = 0
filename = "C:/test_small/mini_dutch/12103872.xml"
try:
xmldoc = minidom.parse(filename)
itemlist = xmldoc.getElementsByTagName('document')
if len(itemlist) > 0:
documents_per_author = 0
bool_tester = 0
for s in itemlist:
# print s.childNodes
number_of_files += 1
documents_per_author += 1
try:
# get CDATA
for node in s.childNodes:
# print node.nodeType
if node.nodeType == 4 or node.nodeType == 3:
# print "Getting the CDATA element of each author document"
text_inner = node.data.strip()
print text_inner
try:
inner_soup = BeautifulSoup(text_inner, "lxml")
# print inner_soup.get_text()
# print
except Exception as e:
print filename
print e
if (len(inner_soup.get_text()) > 0):
if author_id in main_dictionary:
pass
else:
main_dictionary[author_id] = dict()
if 'documents' in main_dictionary[author_id]:
current_document_sample = inner_soup.get_text()
if current_document_sample in main_dictionary[author_id]['documents']:
count_same_text_file+=1
'''
print
print author_id
print main_dictionary[author_id]['documents']
print current_document_sample
sleep(100)
'''
pass
else:
main_dictionary[author_id]['documents'].append(current_document_sample)
else:
main_dictionary[author_id]['documents'] = [inner_soup.get_text()]
bool_tester = 1
else:
print "Error! The text sample is empty or couldn't read CDATA. Skipping to next one."
except Exception as e:
print "Error! Failed to read a file."
print filename
print e
pass
tmp_list.append(documents_per_author)
except Exception as e:
print
print "Error! Couldn't read current text sample. Skipping to the next one."
print "Error message: ", e
print "Error occured in file: ", filename
print
pass
try:
average_blogs_per_author = float(sum(tmp_list))/len(tmp_list) if len(tmp_list) > 0 else float('nan')
except:
average_blogs_per_author = 'nan'
# print len(current_truth), number_of_files, average_blogs_per_author
# print len(main_dictionary)
# for key, value in main_dictionary.iteritems():
# print key, value
# print "Found duplicates: ", count_same_text_file
print main_dictionary
print len(main_dictionary[33]['documents'])
get_a_genre_mod_dev()
| 2.4375
| 2
|
bin/__init__.py
|
eurica/splunk_app_pagerduty
| 2
|
12774594
|
<reponame>eurica/splunk_app_pagerduty
#!/usr/bin/env python
"""Splunk App for Pagerduty."""
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright 2014 OnBeep, Inc.'
__license__ = 'Apache License, Version 2.0'
from .pagerduty import (PagerDutyException, PagerDuty, extract_events, # NOQA
trigger_pagerduty, get_pagerduty_api_key)
| 1.132813
| 1
|
arp_spoof_detector.py
|
goodoldie/Stalker
| 0
|
12774595
|
<reponame>goodoldie/Stalker
import scapy.all as scapy
import os
import sys
import netifaces
def become_root():
euid = os.geteuid()
if euid != 0:
print("Script not started as root. Running sudo..")
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
print('Running. Your euid is', + euid)
print("---------------------------------------------------------")
def get_mac(ip):
arp_request = scapy.ARP(pdst=ip)
broadcast_frame = scapy.Ether(dst="ff:ff:ff:ff:ff:ff")
arp_ether_broadcast = broadcast_frame / arp_request
answered_list = scapy.srp(arp_ether_broadcast, timeout=1, verbose=False)[0] # we need only answered list
return answered_list[0][1].hwsrc
def sniff(interface):
scapy.sniff(iface=interface, store=False, prn=processed_sniffed_packet)
def processed_sniffed_packet(packet):
if packet.haslayer(scapy.ARP) and packet[scapy.ARP].op == 2:
try:
real_mac = get_mac(packet[scapy.ARP].psrc)
response_mac = packet[scapy.ARP].hwsrc
if real_mac != response_mac:
print("[+] !!!!!!!You are under attack!!!!!")
except IndexError:
# unable to find the real mac
pass
def run_spoof_detector():
become_root()
interfaces = netifaces.interfaces()
print("Availabe Interfaces :")
print(interfaces)
interface = input("Enter the interface ")
if interface not in interfaces:
print("Pleas Enter a valid Interface!!")
else:
print("ARP Spoof Detector ON!!!")
sniff(interface)
| 2.46875
| 2
|
list_ec2_publicly_accessible/check-ec2-publicly-exposed.py
|
trackit/audit-tools
| 0
|
12774596
|
import boto3
import botocore
import csv
import pprint
import argparse
import ConfigParser
import os
import re
def get_ec2_name(tags):
for tag in tags:
if tag['Key'] == 'Name':
return tag['Value']
return 'NA'
def get_ec2_sg(sgs):
res = []
for sg in sgs:
res.append(sg['GroupId'])
return res
def check_if_exposed(sg):
for r in sg['IpRanges']:
if r['CidrIp'] == '0.0.0.0/0':
return True
return False
def get_port_exposed(client, region, sgs):
res = []
response = client.describe_security_groups(GroupIds=sgs)
for sg in response['SecurityGroups']:
for permission in sg['IpPermissions']:
if check_if_exposed(permission):
if 'FromPort' in permission and 'ToPort' in permission:
protocol = permission['IpProtocol']
if protocol == "-1":
protocol = "all"
port = "{0}/{1}".format(protocol, str(permission["FromPort"]))
if permission['FromPort'] != permission['ToPort']:
port = "{0}/{1}-{2}".format(protocol, str(permission['FromPort']), str(permission['ToPort']))
res.append(port)
return get_ports_formatted(res)
def get_tags_formatted(tags):
if tags == "":
return ""
res = ""
for elem in tags:
res += "{}={}, ".format(elem["Key"], elem["Value"])
return res[:-2]
def get_ports_formatted(ports):
res = ""
for elem in ports:
res += "{}, ".format(elem)
if len(res) == 0:
return res
return res[:-2]
def get_sg_list_formatted(sg):
res = ""
for elem in sg:
res += "{}, ".format(elem)
if len(res) == 0:
return res
return res[:-2]
def get_ec2_ips(session, regions, account):
res = []
for region in regions:
client = session.client('ec2', region_name=region)
reservations = client.describe_instances()
for reservation in reservations['Reservations']:
for instance in reservation['Instances']:
ip_list = []
add_to_list = False
for interface in instance['NetworkInterfaces']:
for address in interface['PrivateIpAddresses']:
if 'Association' in address:
ip_list.append(address['Association']['PublicIp'])
add_to_list = True
if add_to_list:
sg_list = get_ec2_sg(instance['SecurityGroups'])
res += [
{
'account': account,
'service': 'ec2',
'region': region,
'name': get_ec2_name(instance['Tags']) if 'Tags' in instance else "",
'tags': get_tags_formatted(instance['Tags']) if 'Tags' in instance else "",
'ip_addresses': ip_list[0],
'sg': get_sg_list_formatted(sg_list),
'port_exposed': get_port_exposed(client, region, sg_list)
}
]
return res
def get_regions(session):
client = session.client('ec2')
regions = client.describe_regions()
return [
region['RegionName']
for region in regions['Regions']
]
def generate_csv(data, args, header_name):
filename = "report.csv"
if args['o']:
filename = args['o']
with open(filename, 'wb') as file:
writer = csv.DictWriter(file, header_name)
writer.writeheader()
for row in data:
writer.writerow(row)
def init():
config_path = os.environ.get('HOME') + "/.aws/credentials"
parser = ConfigParser.ConfigParser()
parser.read(config_path)
if parser.sections():
return parser.sections()
return []
def main():
data = []
parser = argparse.ArgumentParser(description="Analyse reserved instances")
parser.add_argument("--profile", nargs="+", help="Specify AWS profile(s) (stored in ~/.aws/credentials) for the program to use")
parser.add_argument("-o", nargs="?", help="Specify output csv file")
parser.add_argument("--profiles-all", nargs="?", help="Run it on all profile")
aws_access_key = os.environ.get('AWS_ACCESS_KEY_ID')
aws_secret_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
aws_region = os.environ.get('AWS_DEFAULT_REGION')
args = vars(parser.parse_args())
if 'profiles-all' in args:
keys = init()
elif 'profile' in args and args['profile']:
keys = args['profile']
else:
keys = init()
for key in keys:
print 'Processing %s...' % key
try:
if aws_access_key and aws_secret_key and aws_region:
session = boto3.Session(aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, region_name=aws_region)
else:
session = boto3.Session(profile_name=key)
regions = get_regions(session)
data += get_ec2_ips(session, regions, key)
except botocore.exceptions.ClientError, error:
print error
pprint.pprint(data)
generate_csv(data, args, ['account', 'service', 'name', 'region', 'ip_addresses', 'sg', 'port_exposed', 'tags'])
if __name__ == '__main__':
main()
| 2.609375
| 3
|
scripts.py
|
guindosaros/tutotravis
| 0
|
12774597
|
import os
import platform
import subprocess
# # Creation de l'environnement virtual Venv et activation
version = subprocess.run(["python", "--version"], capture_output=True, text=True)
if "python 3" in version.stdout.lower():
python = "python"
else:
python = "python3"
createvenv = os.system(f"{python} -m venv venv")
if platform.system() == 'Windows':
activatevenv = os.system("source venv/Scripts/activate && pip install django")
else:
activatevenv = os.system("source venv/bin/activate && pip install django")
nomprojet = input('Entrez le nom de votre projet django : ')
activatevenv = os.system(f"source venv/bin/activate && django-admin startproject {nomprojet}")
creationrequirement = os.system(f"source venv/bin/activate && cd {nomprojet} && pip freeze >> requirements.txt")
| 2.390625
| 2
|
RIT33PYT/Codigo_1/Guide/13_Funciones_genericas_1.py
|
Sbastdia/Ejercicios-probables-examen
| 0
|
12774598
|
"""
Ejercicio: hacer un juego "Guess The number"
PARTE 1: Pedir al usuario que introduzca un número entre 0 y 100
PARTE 2: Adivinar el número por parte del usuario
Usar una función para capitalizar el código común
"""
MIN = 0
MAX = 99
def solicitar_introducir_numero(invite):
# Completar la entrada:
invite += " entre " + str(MIN) + " y " + str(MAX) + " incluídos: "
while True:
# Entramos en un bucle infinito
# Pedimos introducir un número
datoIntroducido = input(invite)
try:
datoIntroducido = int(datoIntroducido)
except:
pass
else:
# Hacer la comparación
if MIN <= datoIntroducido <= MAX:
# Tenemos lo que queremos, salimos del bucle
break
return datoIntroducido
# PARTE 1
numero = solicitar_introducir_numero("Introduzca el número a adivinar")
# PARTE 2
while True:
# Entramos en un bucle infinito
# que permite jugar varios turnos
intento = solicitar_introducir_numero("Adivine el número")
# Se prueba si el intento es correcto o no
if intento < numero:
print("Demasiado pequeño")
elif intento > numero:
print("Demasiado grande")
else:
print("Victoria!")
break
| 3.953125
| 4
|
IOProgram/pickling.py
|
zhaoyu69/python3-learning
| 1
|
12774599
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 序列化
# import pickle
# d = dict(name='Bob', age=20, score=88)
# print(pickle.dumps(d))
# f = open('dump.txt', 'wb')
# pickle.dump(d, f)
# f.close()
# f = open('dump.txt', 'rb')
# d = pickle.load(f)
# f.close()
# print(d)
# JSON
# import json
# d = dict(name='Bob', age=20, score=88)
# print(json.dumps(d)) # <class 'str'>
#
# json_str = '{"age": 20, "score": 88, "name": "Bob"}'
# print(json.loads(json_str)) # <class 'dict'>
# JSON进阶
import json
# class Student(object):
# def __init__(self, name, age, score):
# self.name = name
# self.age = age
# self.score = score
#
# s = Student('Bob', 20, 88)
# print(json.dumps(s)) # TypeError
# class -> {}
# def student2dict(std):
# return {
# 'name': std.name,
# 'age': std.age,
# 'score': std.score
# }
#
# print(json.dumps(s, default=student2dict))
# class的__dict__就是一个dict,用来存储实例变量
# print(json.dumps(s, default=lambda obj: obj.__dict__))
# loads也需要函数转换
# def dict2student(d):
# return Student(d['name'], d['age'], d['score'])
#
# json_str = '{"age": 20, "score": 88, "name": "Bob"}'
# print(json.loads(json_str, object_hook=dict2student)) # 反序列化的Student实例对象
# test
import json
obj = dict(name='小明', age=20)
s = json.dumps(obj, ensure_ascii=False)
print(s)
| 3.84375
| 4
|
py/py_0134_prime_pair_connection.py
|
lcsm29/project-euler
| 0
|
12774600
|
# Solution of;
# Project Euler Problem 134: Prime pair connection
# https://projecteuler.net/problem=134
#
# Consider the consecutive primes p1 = 19 and p2 = 23. It can be verified that
# 1219 is the smallest number such that the last digits are formed by p1
# whilst also being divisible by p2. In fact, with the exception of p1 = 3 and
# p2 = 5, for every pair of consecutive primes, p2 > p1, there exist values of
# n for which the last digits are formed by p1 and n is divisible by p2. Let S
# be the smallest of these values of n. Find ∑ S for every pair of consecutive
# primes with 5 ≤ p1 ≤ 1000000.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 134
timed.caller(dummy, n, i, prob_id)
| 3.171875
| 3
|
projects/gmc705/week1/demo_simple_pendulum_multiple_controller_options.py
|
echoix/pyro
| 0
|
12774601
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 16 12:05:08 2018
@author: Alexandre
"""
###############################################################################
import numpy as np
###############################################################################
from pyro.dynamic import pendulum
from pyro.control import nonlinear
from pyro.control import robotcontrollers
from pyro.planning import plan
from pyro.analysis import simulation
###############################################################################
sys = pendulum.SinglePendulum()
###############################################################################
# Planning
traj = plan.load_trajectory('rrt.npy')
q_goal = np.array([-3.14])
###############################################################################
# P
kp = 5
kd = 0
ki = 0
p_ctl = robotcontrollers.JointPID( 1 , kp , ki, kd)
p_ctl.rbar = q_goal
# PD
kp = 5
kd = 2
ki = 0
pd_ctl = robotcontrollers.JointPID( 1 , kp , ki, kd)
pd_ctl.rbar = q_goal
# PID
kp = 5
kd = 2
ki = 1
pid_ctl = robotcontrollers.JointPID( 1 , kp , ki, kd)
pid_ctl.rbar = q_goal
# Computed Torque
ctc_ctl = nonlinear.ComputedTorqueController( sys )
ctc_ctl.rbar = q_goal
ctc_ctl.w0 = 2.0
ctc_ctl.zeta = 0.8
# Sliding Mode
sld_ctl = nonlinear.SlidingModeController( sys )
sld_ctl.lam = 1
sld_ctl.gain = 5
sld_ctl.rbar = q_goal
# OpenLoop with traj
traj_ctl = plan.OpenLoopController( traj )
# Computed Torque with traj
traj_ctc_ctl = nonlinear.ComputedTorqueController( sys , traj )
traj_ctc_ctl.rbar = q_goal
traj_ctc_ctl.w0 = 2.0
traj_ctc_ctl.zeta = 0.8
# Sliding Mode with traj
traj_sld_ctl = nonlinear.SlidingModeController( sys , traj )
traj_sld_ctl.lam = 1
traj_sld_ctl.gain = 5
traj_sld_ctl.rbar = q_goal
###############################################################################
# Controller selection
#ctl = p_ctl
#ctl = pd_ctl
#ctl = pid_ctl
#ctl = ctc_ctl
#ctl = sld_ctl
#ctl = traj_ctl
#ctl = traj_ctc_ctl
ctl = traj_sld_ctl
###############################################################################
# New cl-dynamic
cl_sys = ctl + sys
# Simultation
q0 = 0
tf = 10
cl_sys.sim = simulation.CLosedLoopSimulation( cl_sys , tf , tf * 1000 + 1 , 'euler' )
cl_sys.sim.x0 = np.array([q0,0])
cl_sys.sim.compute()
cl_sys.sim.plot('xu')
cl_sys.animate_simulation()
cl_sys.sim.phase_plane_trajectory(0,1)
| 2.625
| 3
|
ofb/datasource.py
|
openfoodbroker/pynutrition
| 3
|
12774602
|
"""Abstract datasource for recipes"""
from os import path
import json
class Datasource:
"""foo"""
def __init__(self, data):
self.name = data["name"]
self.text = data["text"]
self.url = data["url"]
self.local = data["local"]
def validate(self):
"""Prove that data source is valid"""
if path.exists(self.local) is False:
errmsg_part1 = "missing local data source file %s;"
errmsg_part2 = "please download it from %s"
err = errmsg_part1 + errmsg_part2
raise Exception(err % (self.local, self.url))
return True
def load_local_json(self):
"""Load json structure from file"""
json_object = json.load(open(self.local))
return json_object
| 3.328125
| 3
|
main.py
|
justinhchae/gis_project
| 0
|
12774603
|
import geoplot as gplt
import geopandas as gpd
import matplotlib.pyplot as plt
import pandas as pd
from src import constants
# data from cook county open data and us census
township_boundaries = gpd.read_file(constants.TOWNSHIP_POLYGONS_GEOJSON)
zip_code_data = gpd.read_file(constants.TOWNSHIP_POINTS_GEOJSON)
# call geoplot plot on polygons as ax
ax = gplt.polyplot(township_boundaries)
# plot point data with same axis, ax
gplt.pointplot(zip_code_data,s=1, ax=ax)
# set title
plt.title("Example Plot: IL State-wide Zip Codes and Cook Count Township Boundaries")
# display plot
plt.show()
| 3.1875
| 3
|
util/utility.py
|
Isaac-Li-cn/certify_robustness
| 0
|
12774604
|
<reponame>Isaac-Li-cn/certify_robustness
import torch
import torch.nn as nn
import numpy as np
def reduced_m_bm(m1, m2):
'''
>>> merge a new constant transformation with a batch transformation
>>> m1: tensor of shape [dim1, dim2]
>>> m2: tensor of shape [batch_size, dim2] or [batch_size, dim2, dim3]
'''
assert len(m1.shape) == 2, 'The dim of m1 should be 2.'
dim2 = len(m2.shape)
if dim2 == 2:
mbm = m1.unsqueeze(0) * m2.unsqueeze(1)
elif dim2 == 3:
mbm = torch.matmul(m1, m2)
else:
raise ValueError('The dim of m2 should be either 2 or 3.')
return mbm
def reduced_bm_m(m1, m2):
'''
>>> merge a batch transformation with a new constant transformation
>>> m1: tensor of shape [batch_size, dim2] or [batch_size, dim1, dim2]
>>> m2: tensor of shape [dim2, dim3]
'''
assert len(m2.shape) == 2, 'The dim of m2 should be 2.'
dim1 = len(m1.shape)
if dim1 == 2:
bmm = m1.unsqueeze(2) * m2.unsqueeze(0)
elif dim1 == 3:
bmm = torch.matmul(m1, m2)
else:
raise ValueError('The dim of m1 should be either 2 or 3.')
return bmm
def reduced_bm_bm(m1, m2):
'''
>>> merge a batch trasformation with a new batch of transformation
>>> m1: tensor of shape [batch_size, dim2] or [batch_size, dim1, dim2]
>>> m2: tensor of shape [batch_size, dim2] or [batch_size, dim2, dim3]
'''
dim1 = len(m1.shape)
dim2 = len(m2.shape)
if (dim1, dim2) == (2, 2):
bmbm = m1 * m2
elif (dim1, dim2) == (2, 3):
bmbm = m1.unsqueeze(2) * m2
elif (dim1, dim2) == (3, 2):
bmbm = m1 * m2.unsqueeze(1)
elif (dim1, dim2) == (3, 3):
bmbm = torch.matmul(m1, m2)
else:
raise ValueError('The dim of m1 and m2 should be either 2 or 3.')
return bmbm
def reduced_bv_bm(m1, m2):
'''
>>> merge a batch of values with a batch of transformation
>>> m1: tensor of shape [batch_size, dim1]
>>> m2: tensor of shape [batch_size, dim1] or [batch_size, dim1, dim2]
'''
assert len(m1.shape) == 2, 'The dim of m1 should be 2.'
dim2 = len(m2.shape)
if dim2 == 2:
bvbm = m1 * m2
elif dim2 == 3:
bvbm = torch.matmul(m1.unsqueeze(1), m1).squeeze(1)
else:
raise ValueError('The dim of m2 should be either 2 or 3.')
return bvbm
def reduced_bm_bv(m1, m2):
'''
>>> merge a batch of transformation with a batch of values
>>> m1: tensor of shape [batch_size, dim2] or [batch_size, dim1, dim2]
>>> m2: tensor of shape [batch_size, dim2]
'''
assert len(m2.shape) == 2, 'The dim of m2 should be 2.'
dim1 = len(m1.shape)
if dim1 == 2:
bmbv = m1 * m2
elif dim1 == 3:
bmbv = torch.matmul(m1, m2.unsqueeze(2)).squeeze(2)
else:
raise ValueError('The dim of m1 should be either 2 or 3.')
return bmbv
def quad_bound_calc(W_list, m1_list, m2_list, ori_perturb_norm = None, ori_perturb_eps = None):
'''
>>> W_list, m1_list, m2_list: The transition matrix, lower bound input and upper bound input.
>>> ori_perturb_norm: float, the norm of initial perturbation
>>> ori_perturb_eps: tensor of shape [batch_size, in_dim]
'''
up_bound = 0.
low_bound = 0.
if ori_perturb_norm != None:
primal_norm = ori_perturb_norm
dual_norm = 1. / (1. - 1. / primal_norm)
# print(W_list[0].size())
# print(ori_perturb_eps.shape)
up_bound = torch.norm(W_list[0] * ori_perturb_eps.unsqueeze(1), dim = 2, p = dual_norm) # of shape [batch_size, out_dim]
low_bound = - up_bound
for W, m1, m2 in zip(W_list, m1_list, m2_list):
W_neg = torch.clamp(W, max = 0.)
W_pos = torch.clamp(W, min = 0.)
up_bound = up_bound + reduced_bm_bv(W_pos, m2) + reduced_bm_bv(W_neg, m1)
low_bound = low_bound + reduced_bm_bv(W_pos, m1) + reduced_bm_bv(W_neg, m2)
return low_bound, up_bound
| 2.9375
| 3
|
src/mlflow_turing_scoring_server/scoring_server/wsgi.py
|
jose-turintech/mlflow-turing-scoring-server
| 0
|
12774605
|
import os
from mlflow_turing_scoring_server import scoring_server
from mlflow.pyfunc import load_model
app = scoring_server.init(load_model(os.environ[scoring_server._SERVER_MODEL_PATH]))
| 1.492188
| 1
|
data_processing/eccv2020-sharp-workshop/setup.py
|
jchibane/if-net_texture
| 42
|
12774606
|
import setuptools
with open("readme.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="sharpcvi2",
version="1.0.0",
author="CVI2: Computer Vision, Imaging and Machine Intelligence Research Group",
author_email="<EMAIL>",
description="Routines for the SHARP Challenge, ECCV 2020",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://cvi2.uni.lu/sharp2020/",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 1.367188
| 1
|
ImageTracker/ImageTrackerTest.py
|
Engin-Boot/transfer-images-s1b13
| 0
|
12774607
|
<reponame>Engin-Boot/transfer-images-s1b13<gh_stars>0
import unittest
import ImageTracker
class TestImageTracker(unittest.TestCase):
def test_when_client_calls_with_valid_csv_then_print_CSV_data(self):
testsamplefilename = 'temp-samples-test.csv'
with open(testsamplefilename, 'w') as samplefile:
samplefile.write(
"""ImageFileName,Status
0.img,Diagnosis Pending
1.img,Diagnosis Complete
2.img,Diagnosis Pending""")
x=ImageTracker.showCSVData(testsamplefilename)
self.assertEqual(x,"CSV data printed successfully")
def test_when_client_updates_Diagnosis_status_Pending_then_DiagnosisPending_is_set(self):
testsamplefilename = 'temp-samples-test.csv'
with open(testsamplefilename, 'w') as samplefile:
samplefile.write(
"""ImageFileName,Status
0.img,Diagnosis Pending
1.img,Diagnosis Pending
2.img,Diagnosis Pending""")
imageName="0.img"
updated_status="Pending" # Pending for Diagnosis Pending
self.assertTrue(ImageTracker.update_ImageStatus(testsamplefilename,imageName,updated_status)=="Status updated successfully")
def test_when_client_updates_Diagnosis_status_Pending_then_DiagnosisCompleted_is_set(self):
testsamplefilename = 'temp-samples-test.csv'
with open(testsamplefilename, 'w') as samplefile:
samplefile.write(
"""ImageFileName,Status
0.img,Diagnosis Pending
1.img,Diagnosis Pending
2.img,Diagnosis Pending""")
imageName="1.img"
updated_status="Completed" # Complete for Diagnosis Complete
self.assertTrue(ImageTracker.update_ImageStatus(testsamplefilename,imageName,updated_status)=="Status updated successfully")
def test_when_new_file_is_updated_then_it_gets_added_to_csv_with_Pending_Status(self):
testsamplefilename = 'temp-samples-test.csv'
with open(testsamplefilename, 'w') as samplefile:
samplefile.write(
"""ImageFileName,Status
0.img,Diagnosis Pending
1.img,Diagnosis Complete
2.img,Diagnosis Pending""")
new_file="3.img"
ImageTracker.addNewFileNameToCsv(testsamplefilename,new_file)
print("File Added successfully")
x=ImageTracker.showCSVData(testsamplefilename)
self.assertEqual(x,"CSV data printed successfully")
if __name__ == '__main__':
unittest.main()
| 2.625
| 3
|
data/data_stage3.py
|
wswdx/C2F-FWN
| 39
|
12774608
|
<filename>data/data_stage3.py
import os.path
import torchvision.transforms as transforms
import torch
from PIL import Image
import numpy as np
from data.base_dataset import BaseDataset, get_img_params, get_transform, get_transform_fixed, get_video_params, concat_frame
from data.image_folder import make_grouped_dataset, check_path_valid
from data.keypoint2img import read_keypoints
# dataset for the Composition GAN of stage 3
class ComposerDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_tparsing = os.path.join(opt.dataroot, opt.phase + '_parsing/target')
self.dir_timg = os.path.join(opt.dataroot, opt.phase + '_img/target')
self.dir_spose = os.path.join(opt.dataroot, opt.phase + '_pose/source')
self.dir_sparsing = os.path.join(opt.dataroot, opt.phase + '_parsing/source')
self.dir_sfg = os.path.join(opt.dataroot, opt.phase + '_fg/source')
self.dir_simg = os.path.join(opt.dataroot, opt.phase + '_img/source')
self.dir_bg = os.path.join(opt.dataroot, opt.phase + '_bg')
self.tparsing_paths = sorted(make_grouped_dataset(self.dir_tparsing))
self.timg_paths = sorted(make_grouped_dataset(self.dir_timg))
self.spose_paths = sorted(make_grouped_dataset(self.dir_spose))
self.sparsing_paths = sorted(make_grouped_dataset(self.dir_sparsing))
self.sfg_paths = sorted(make_grouped_dataset(self.dir_sfg))
self.simg_paths = sorted(make_grouped_dataset(self.dir_simg))
self.init_frame_idx_composer(self.simg_paths)
def __getitem__(self, index):
TParsing, TFG, SPose, SParsing, SFG, SFG_full, BG, BG_flag, SI, seq_idx = self.update_frame_idx_composer(self.simg_paths, index)
simg_paths = self.simg_paths[seq_idx]
n_frames_total, start_idx, t_step = get_video_params(self.opt, self.n_frames_total, len(simg_paths), self.frame_idx)
simg = Image.open(simg_paths[start_idx]).convert('RGB')
size = simg.size
BigSizeFlag = True
if size[0]/size[1] > 1:
BigSizeFlag = True
else:
BigSizeFlag = False
if BigSizeFlag:
params = get_img_params(self.opt, (1920,1080))
else:
params = get_img_params(self.opt, size)
tparsing_path = self.tparsing_paths[seq_idx][0]
timg_path = self.timg_paths[seq_idx][0]
video_name = timg_path[timg_path.rfind('video'):timg_path.rfind('/timg')]
bg_path = self.dir_bg + '/' + video_name + '.jpg'
BG_i, BG_flag = self.get_bg_image(bg_path, size, params, BigSizeFlag)
TParsing, TFG = self.get_TImage(tparsing_path, timg_path, size, params, BigSizeFlag)
TParsing, TFG = self.crop(TParsing), self.crop(TFG)
frame_range = list(range(n_frames_total)) if (self.opt.isTrain or self.TPose is None) else [self.opt.n_frames_G-1]
for i in frame_range:
simg_path = simg_paths[start_idx + i * t_step]
sfg_path = self.sfg_paths[seq_idx][start_idx + i * t_step]
spose_path = self.spose_paths[seq_idx][start_idx + i * t_step]
sparsing_path = self.sparsing_paths[seq_idx][start_idx + i * t_step]
SPose_i, SParsing_i, SFG_i, SFG_full_i, SI_i = self.get_SImage(spose_path, sparsing_path, sfg_path, simg_path, size, params, BigSizeFlag)
SParsing_i = self.crop(SParsing_i)
SFG_i = self.crop(SFG_i)
SPose_i, SFG_full_i, SI_i = self.crop(SPose_i), self.crop(SFG_full_i), self.crop(SI_i)
SPose = concat_frame(SPose, SPose_i, n_frames_total)
SParsing = concat_frame(SParsing, SParsing_i, n_frames_total)
SFG = concat_frame(SFG, SFG_i, n_frames_total)
SFG_full = concat_frame(SFG_full, SFG_full_i, n_frames_total)
SI = concat_frame(SI, SI_i, n_frames_total)
BG = concat_frame(BG, BG_i, n_frames_total)
if not self.opt.isTrain:
self.TParsing, self.TFG, self.SPose, self.SParsing, self.SFG, self.SFG_full, self.BG, self.BG_flag, self.SI = TParsing, TFG, SPose, SParsing, SFG, SFG_full, BG, BG_flag, SI
self.frame_idx += 1
change_seq = False if self.opt.isTrain else self.change_seq
return_list = {'TParsing': TParsing, 'TFG': TFG, 'SPose': SPose, 'SParsing': SParsing, 'SFG': SFG, 'SFG_full': SFG_full, 'BG': BG, 'BG_flag': BG_flag, 'SI': SI, 'A_path': simg_path, 'change_seq': change_seq}
return return_list
def get_bg_image(self, bg_path, size, params, BigSizeFlag):
if os.path.exists(bg_path):
BG = Image.open(bg_path).convert('RGB')
if BigSizeFlag:
transform_scale = get_transform(self.opt, params, normalize=True, method=Image.BICUBIC, color_aug=self.opt.color_aug)
else:
transform_scale = get_transform_fixed(self.opt, params, normalize=True, method=Image.BICUBIC, color_aug=self.opt.color_aug)
BG_scaled = transform_scale(BG)
BG_scaled = self.crop(BG_scaled)
BG_flag = True
else:
BG_scaled = -torch.ones(3, 256, 192)
BG_flag = False
return BG_scaled, BG_flag
def get_SImage(self, spose_path, sparsing_path, sfg_path, simg_path, size, params, BigSizeFlag):
SI = Image.open(simg_path).convert('RGB')
if SI.size != (1920,1080) and BigSizeFlag:
SI = SI.resize((1920,1080), Image.BICUBIC)
elif not BigSizeFlag:
SI = SI.resize((192,256), Image.BICUBIC)
SFG_np = np.array(SI)
SFG_full_np = np.array(SI)
random_drop_prob = self.opt.random_drop_prob if self.opt.isTrain else 0
SPose_array, _ = read_keypoints(spose_path, size, random_drop_prob, self.opt.remove_face_labels, self.opt.basic_point_only)
SPose = Image.fromarray(SPose_array)
if SPose.size != (1920,1080) and BigSizeFlag:
SPose = SPose.resize((1920,1080), Image.NEAREST)
elif not BigSizeFlag:
SPose = SPose.resize((192,256), Image.NEAREST)
SPose_np = np.array(SPose)
SParsing = Image.open(sparsing_path)
SParsing_size = SParsing.size
if SParsing_size != (1920,1080) and SParsing_size != (192,256) and BigSizeFlag:
SParsing = SParsing.resize((1920,1080), Image.NEAREST)
elif not BigSizeFlag and SParsing_size != (192,256):
SParsing = SParsing.resize((192,256), Image.NEAREST)
SParsing_np = np.array(SParsing)
if SParsing_size == (192,256):
SParsing_new_np = SParsing_np
else:
SParsing_new_np = np.zeros_like(SParsing_np)
SParsing_new_np[(SParsing_np == 3) | (SParsing_np == 5) | (SParsing_np == 6) | (SParsing_np == 7) | (SParsing_np == 11)] = 1
SParsing_new_np[(SParsing_np == 8) | (SParsing_np == 9) | (SParsing_np == 12)] = 2
SParsing_new_np[(SParsing_np == 1) | (SParsing_np == 2)] = 3
SParsing_new_np[(SParsing_np == 4) | (SParsing_np == 13)] = 4
SParsing_new_np[(SParsing_np == 14)] = 5
SParsing_new_np[(SParsing_np == 15)] = 6
SParsing_new_np[(SParsing_np == 16)] = 7
SParsing_new_np[(SParsing_np == 17)] = 8
SParsing_new_np[(SParsing_np == 10)] = 9
SParsing_new_np[(SParsing_np == 18)] = 10
SParsing_new_np[(SParsing_np == 19)] = 11
if BigSizeFlag:
transform_scale = get_transform(self.opt, params, normalize=True, method=Image.NEAREST, color_aug=False)
else:
transform_scale = get_transform_fixed(self.opt, params, normalize=True, method=Image.NEAREST, color_aug=False)
SPose_scaled = transform_scale(Image.fromarray(SPose_np))
SParsing_new = Image.fromarray(SParsing_new_np)
if SParsing_size != (192,256) and BigSizeFlag:
transform_scale = get_transform(self.opt, params, normalize=False, method=Image.NEAREST, color_aug=False)
else:
transform_scale = get_transform_fixed(self.opt, params, normalize=False, method=Image.NEAREST, color_aug=False)
SParsing_scaled = transform_scale(SParsing_new)*255.0
if BigSizeFlag:
transform_scale = get_transform(self.opt, params, normalize=True, method=Image.BICUBIC, color_aug=self.opt.color_aug)
else:
transform_scale = get_transform_fixed(self.opt, params, normalize=True, method=Image.BICUBIC, color_aug=self.opt.color_aug)
SI_scaled = transform_scale(SI)
SFG_full_np[(SParsing_new_np == 0)] = 0
SFG_full_scaled = transform_scale(Image.fromarray(SFG_full_np))
if SI.size != (192,256) and BigSizeFlag:
transform_scale = get_transform(self.opt, params, normalize=True, method=Image.BICUBIC, color_aug=self.opt.color_aug)
else:
transform_scale = get_transform_fixed(self.opt, params, normalize=True, method=Image.BICUBIC, color_aug=self.opt.color_aug)
if SI.size != (192,256):
SFG_np[(SParsing_new_np != 1) & (SParsing_new_np != 2) & (SParsing_new_np != 3)] = 0
SFG_scaled = transform_scale(Image.fromarray(SFG_np))
return SPose_scaled, SParsing_scaled, SFG_scaled, SFG_full_scaled, SI_scaled
def get_TImage(self, tparsing_path, timg_path, size, params, BigSizeFlag):
random_drop_prob = self.opt.random_drop_prob if self.opt.isTrain else 0
TI = Image.open(timg_path).convert('RGB')
if TI.size != (1920,1080) and BigSizeFlag:
TI = TI.resize((1920,1080), Image.BICUBIC)
elif not BigSizeFlag:
TI = TI.resize((192,256), Image.BICUBIC)
TFG_np = np.array(TI)
TParsing = Image.open(tparsing_path)
TParsing_size = TParsing.size
if TParsing_size != (1920,1080) and TParsing_size != (192,256) and BigSizeFlag:
TParsing = TParsing.resize((1920,1080), Image.NEAREST)
elif not BigSizeFlag and TParsing_size != (192,256):
TParsing = TParsing.resize((192,256), Image.NEAREST)
TParsing_np = np.array(TParsing)
TParsing_new_np = np.zeros_like(TParsing_np)
TParsing_new_np[(TParsing_np == 1) | (TParsing_np == 2)] = 1
TParsing_new_np[(TParsing_np == 4) | (TParsing_np == 13)] = 2
TParsing_new_np[(TParsing_np == 14)] = 3
TParsing_new_np[(TParsing_np == 15)] = 4
TParsing_new_np[(TParsing_np == 16)] = 5
TParsing_new_np[(TParsing_np == 17)] = 6
TParsing_new_np[(TParsing_np == 10)] = 7
TParsing_new_np[(TParsing_np == 18)] = 8
TParsing_new_np[(TParsing_np == 19)] = 9
TParsing_new = Image.fromarray(TParsing_new_np)
if TParsing_size != (192,256) and BigSizeFlag:
transform_scale = get_transform(self.opt, params, normalize=False, method=Image.NEAREST, color_aug=False)
else:
transform_scale = get_transform_fixed(self.opt, params, normalize=False, method=Image.NEAREST, color_aug=False)
TParsing_scaled = transform_scale(TParsing_new)*255.0
if BigSizeFlag:
transform_scale = get_transform(self.opt, params, normalize=True, method=Image.BICUBIC, color_aug=self.opt.color_aug)
else:
transform_scale = get_transform_fixed(self.opt, params, normalize=True, method=Image.BICUBIC, color_aug=self.opt.color_aug)
TFG_np[:,:,0][(TParsing_new_np == 0)] = 0
TFG_np[:,:,1][(TParsing_new_np == 0)] = 0
TFG_np[:,:,2][(TParsing_new_np == 0)] = 0
TFG_scaled = transform_scale(Image.fromarray(TFG_np))
return TParsing_scaled, TFG_scaled
def crop(self, Ai):
w = Ai.size()[2]
base = 32
x_cen = w // 2
bs = int(w * 0.25) // base * base
return Ai[:,:,(x_cen-bs):(x_cen+bs)]
def __len__(self):
return sum(self.frames_count)
def name(self):
return 'ComposerDataset'
| 2.21875
| 2
|
ABC/176/a.py
|
fumiyanll23/AtCoder
| 0
|
12774609
|
<reponame>fumiyanll23/AtCoder<gh_stars>0
N, X, T = map(int, input().split())
time = N // X
if(N%X == 0):
print(time * T)
else:
print((time+1) * T)
| 2.765625
| 3
|
PyTorch Exercises/My_CNN_Sample.py
|
FredAmouzgar/46_Simple_python_Exercise
| 0
|
12774610
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import torch
from torch import nn
import torch.nn.functional as F
class ConvNet(nn.Module):
def __init__(self, input_shape=(1,3,28,28)):
super(ConvNet, self).__init__()
self.cnn = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=128, kernel_size=3, padding=1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(),
nn.MaxPool2d(2))
cs = self._calculate_cnn_output_shape(input_shape=input_shape)
cnn_flattened_size = cs[1] * cs[2] * cs[3]
self.dense = nn.Sequential(nn.Linear(cnn_flattened_size, 128),
nn.ReLU(),
nn.Linear(128, 10))
def forward(self, x):
x = self.cnn(x)
x = x.view(x.shape[0], -1)
return F.log_softmax(self.dense(x), dim=-1)
def _calculate_cnn_output_shape(self, input_shape=(10, 3, 28, 28)):
data = torch.rand(input_shape)
return list(self.cnn(data).size())
if __name__ == "__main__":
net = ConvNet()
imgs = torch.rand(10, 3, 28, 28)
with torch.no_grad():
print(net(imgs))
| 3.046875
| 3
|
viz_one_image.py
|
AdidasSuperstar/detr
| 0
|
12774611
|
<reponame>AdidasSuperstar/detr
import torch
import pandas as pd
import cv2
###CREATE DATASET
DIR_TRAIN = "C:\\Users\\Eva.Locusteanu\\PycharmProjects\\detr\\models\\train" #3423 images
class WheatDataset(Dataset):
def __init__(self, image_ids, dataframe, transforms=None):
self.image_ids = image_ids
self.df = dataframe
self.transforms = transforms
def __len__(self) -> int:
return self.image_ids.shape[0]
def __getitem__(self, index):
image_id = self.image_ids[index]
records = self.df[self.df['image_id'] == image_id]
image = cv2.imread(f'{DIR_TRAIN}/{image_id}.jpg', cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
image /= 255.0
# DETR takes in data in coco format
boxes = records[['x', 'y', 'w', 'h']].values
# Area of bb
area = boxes[:, 2] * boxes[:, 3]
area = torch.as_tensor(area, dtype=torch.float32)
# AS pointed out by PRVI It works better if the main class is labelled as zero
labels = np.zeros(len(boxes), dtype=np.int32)
if self.transforms:
sample = {
'image': image,
'bboxes': boxes,
'labels': labels
}
sample = self.transforms(**sample)
image = sample['image']
boxes = sample['bboxes']
labels = sample['labels']
# Normalizing BBOXES
_, h, w = image.shape
boxes = A.augmentations.bbox_utils.normalize_bboxes(sample['bboxes'], rows=h, cols=w)
target = {}
target['boxes'] = torch.as_tensor(boxes, dtype=torch.float32)
target['labels'] = torch.as_tensor(labels, dtype=torch.long)
target['image_id'] = torch.tensor([index])
target['area'] = area
return image, target, image_id
def run(fold):
df_train = df_folds[df_folds['fold'] != fold]
df_valid = df_folds[df_folds['fold'] == fold]
train_dataset = WheatDataset(
image_ids=df_train.index.values,
dataframe=marking,
transforms=get_train_transforms()
)
valid_dataset = WheatDataset(
image_ids=df_valid.index.values,
dataframe=marking,
transforms=get_valid_transforms()
)
train_data_loader = DataLoader(
train_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=4,
collate_fn=collate_fn
)
valid_data_loader = DataLoader(
valid_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=4,
collate_fn=collate_fn
)
device = torch.device('cuda:0')
model = DETRModel(num_classes=num_classes, num_queries=num_queries)
model = model.to(device)
criterion = SetCriterion(num_classes - 1, matcher, weight_dict, eos_coef=null_class_coef, losses=losses)
criterion = criterion.to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=LR)
best_loss = 10 ** 5
for epoch in range(EPOCHS):
train_loss = train_fn(train_data_loader, model, criterion, optimizer, device, scheduler=None, epoch=epoch)
valid_loss = eval_fn(valid_data_loader, model, criterion, device)
print('|EPOCH {}| TRAIN_LOSS {}| VALID_LOSS {}|'.format(epoch + 1, train_loss.avg, valid_loss.avg))
if valid_loss.avg < best_loss:
best_loss = valid_loss.avg
print('Best model found for Fold {} in Epoch {}........Saving Model'.format(fold, epoch + 1))
torch.save(model.state_dict(), f'detr_best_{fold}.pth')
if __name__ == '__main__':
model = run(fold=0)
model.cuda()
| 2.78125
| 3
|
storytracker/toolbox.py
|
pastpages/storytracker
| 24
|
12774612
|
<filename>storytracker/toolbox.py
#!/usr/bin/env python
from __future__ import print_function
import six
import re
import math
import operator
from functools import reduce
try:
import cStringIO as io
except:
import io
class UnicodeMixin(object):
"""
Mixin class to handle defining the proper __str__/__unicode__
methods in Python 2 or 3.
"""
# Python 3
if six.PY3:
def __str__(self):
return self.__unicode__()
# Python 2
else:
def __str__(self):
return self.__unicode__().encode('utf8')
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.__str__())
def indent(rows, hasHeader=False, headerChar='-', delim=' | ', justify='left',
separateRows=False, prefix='', postfix='', wrapfunc=lambda x: x):
"""Indents a table by column.
- rows: A sequence of sequences of items, one sequence per row.
- hasHeader: True if the first row consists of the columns' names.
- headerChar: Character to be used for the row separator line
(if hasHeader==True or separateRows==True).
- delim: The column delimiter.
- justify: Determines how are data justified in their column.
Valid values are 'left','right' and 'center'.
- separateRows: True if rows are to be separated by a line
of 'headerChar's.
- prefix: A string prepended to each printed row.
- postfix: A string appended to each printed row.
- wrapfunc: A function f(text) for wrapping text; each element in
the table is first wrapped by this function."""
# closure for breaking logical rows to physical, using wrapfunc
def rowWrapper(row):
newRows = [wrapfunc(item).split('\n') for item in row]
return [
[substr or '' for substr in item]
for item in list(map(lambda *a: a, *newRows))
]
# break each logical row into one or more physical ones
logicalRows = [rowWrapper(row) for row in rows]
# columns of physical rows
columns = list(map(lambda *a: a, *reduce(operator.add, logicalRows)))
# get the maximum of each column by the string length of its items
maxWidths = [
max([len(str(item)) for item in column]) for column in columns
]
rowSeparator = headerChar * (len(prefix) + len(postfix) + sum(maxWidths) +
len(delim) * (len(maxWidths) - 1))
# select the appropriate justify method
justify = {
'center': str.center,
'right': str.rjust,
'left': str.ljust
}[justify.lower()]
output = io.StringIO()
if separateRows:
print >> output, rowSeparator
for physicalRows in logicalRows:
for row in physicalRows:
print(
prefix +
delim.join([
justify(str(item), width)
for (item, width) in zip(row, maxWidths)
])
+ postfix, file=output
)
if separateRows or hasHeader:
print(rowSeparator, file=output)
hasHeader = False
return output.getvalue()
def wrap_onspace(text, width):
"""
A word-wrap function that preserves existing line breaks
and most spaces in the text. Expects that existing line
breaks are posix newlines (\n).
By <NAME>
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/148061
"""
return reduce(
lambda line, word, width=width: '%s%s%s' %
(
line,
' \n'
[
(
len(line[line.rfind('\n') + 1:]) +
len(word.split('\n', 1)[0]) >= width
)
],
word
),
text.split(' ')
)
def wrap_onspace_strict(text, width):
"""
Similar to wrap_onspace, but enforces the width constraint:
words longer than width are split.
"""
wordRegex = re.compile(r'\S{' + str(width) + r',}')
return wrap_onspace(
wordRegex.sub(lambda m: wrap_always(m.group(), width), text),
width
)
def wrap_always(text, width):
"""
A simple word-wrap function that wraps text on exactly width characters.
It doesn't split the text in words.
"""
return '\n'.join([
text[width * i:width * (i + 1)]
for i in range(
int(math.ceil(1.0 * len(text) / width))
)
])
| 3.140625
| 3
|
angel-ps/python/pyangel/ml/client/angel_client_factory.py
|
weien8899/angel
| 1
|
12774613
|
<reponame>weien8899/angel
#
# Tencent is pleased to support the open source community by making Angel available.
#
# Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
#
from pyangel.context import Configuration
class AngelClientFactory(object):
"""
Angel client factory, it support two types client now: LOCAL and YARN
"""
@staticmethod
def get(map, conf):
"""
get an instance of AngelClient
:param conf: a Java HashMap which contains the entry set
to build a Hadoop Configuration instance
:return: AngelClient(AngelLocalClient or AngelYarnClient)
"""
return conf._jvm.com.tencent.angel.client.AngelClientFactory.get(map, conf._jconf)
| 1.585938
| 2
|
src/tn_scraper.py
|
erik1066/covid-web-scraper
| 3
|
12774614
|
<filename>src/tn_scraper.py
import requests, openpyxl, io, csv, datetime, os, pathlib
import county_report, state_report
STATE_ABBR = 'TN'
STATE = 'Tennessee'
URL = 'https://www.tn.gov/content/dam/tn/health/documents/cedep/novel-coronavirus/datasets/Public-Dataset-County-New.XLSX'
def scraper():
# make an HTTP web request to get the MI XLSX file
response = requests.get(URL)
if response.status_code == requests.codes.ok:
# Success - print to the console that the HTTP request succeeeded
print(' ', STATE_ABBR, ': Downloaded succeeded')
temppath = 'temp'
if not os.path.exists(temppath):
os.makedirs(temppath)
tempfilename = datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S") + '_temp_' + STATE_ABBR + '.xlsx'
tempfilepath = pathlib.Path.cwd().joinpath('temp', tempfilename)
with open(tempfilepath, "wb") as file:
file.write(response.content)
wb = openpyxl.load_workbook(filename=tempfilepath)
sheet = wb.worksheets[0]
max_rows = sheet.max_row
counties = []
countyDictionary = {}
i = max_rows
while i > 2:
rowCount = str(i)
county_name = sheet['B' + rowCount].value
county = findCounty(county_name, countyDictionary)
if county == None:
confirmed = int(sheet['E' + rowCount].value)
deaths = int(sheet['P' + rowCount].value)
county = county_report.CountyReport(STATE, county_name, confirmed, deaths, -1, -1, datetime.datetime.now())
counties.append(county) # append the countyReport to our list of counties
countyDictionary[county_name] = county
i = i - 1
# since the above algorithm outputs the counties in reverse-ABC order, let's reverse that so they're in ABC order...
counties = list(reversed(counties))
# print the number of counties we processed
print(' ', STATE_ABBR, ':', len(counties), ' counties processed OK')
# build the state-level report object that will include all of the counties
stateReport = state_report.StateReport(STATE, STATE_ABBR, counties, datetime.datetime.now())
# return the state-level report
return stateReport
else:
# Fail
print(' ', STATE_ABBR, ': ERROR : Web download failed - HTTP status code ', response.status_code)
def findCounty(county_name, countyDictionary):
if county_name in countyDictionary:
return countyDictionary[county_name]
else:
return None
| 3.234375
| 3
|
lz_assoc_viewer/minimum_solution.py
|
atomai/lz-viewer
| 1
|
12774615
|
import gzip
import Data_reader
##REQUIRES filename is a tabix file, names is the header of the file
##MODIFIES nothing
##EFFECTS finds the position of the minimum pvalue
def find_min_pvals(filename, filetype, num_minimums, region_buffer):
#create a file reader from the file
file_reader = Data_reader.Data_reader.factory(filename, filetype)
#skip the header
file_reader.skip_header()
#create the minimums dictionary
minimums = create_baseline_minimums(num_minimums)
#find the highest of the minimums
highest_min, highest_min_index = find_highest_min(minimums, num_minimums)
#loops through the lines in the file
line = file_reader.get_line()
while line != '#Genomic' or line != '':
if line == '' or line.split()[0] == '#Genomic':
break
#if the pvalue is not available
if file_reader.get_pval() == 'NA':
line = file_reader.get_line()
continue
#if the pvalue is equal to the highest minimum, we do not add it to dictionary
elif float(file_reader.get_pval()) >= highest_min:
line = file_reader.get_line()
continue
#lastly, we must check other attributes of this pval if we want to add it to the dictionary
else:
#determine if this pvalue shares a region with another minimum
shares_region, shared_index = index_of_shared_region(minimums, num_minimums, long(file_reader.get_pos()), region_buffer)
#if it does share a region:
if shares_region:
#determine which is smaller, and place the smaller minimum in the list
if float(file_reader.get_pval()) < minimums['value'][shared_index]:
minimums = replace_minimum(minimums, long(file_reader.get_pos()), float(file_reader.get_pval()), int(file_reader.get_chrom()), shared_index)
highest_min, highest_min_index = find_highest_min(minimums, num_minimums)
else:
line = file_reader.get_line()
continue
#if it does not share a region, place replace the previous highest minimum with the new minimum
else:
minimums = replace_minimum(minimums, long(file_reader.get_pos()), float(file_reader.get_pval()), int(file_reader.get_chrom()), highest_min_index)
highest_min, highest_min_index = find_highest_min(minimums, num_minimums)
line = file_reader.get_line()
minimums = sort_minimums(minimums, num_minimums)
return minimums
##REQUIRES minimums has at least two minimums
##MODIFIES minimums
##EFFECTS sorts (decreasing order) the dictionary of minimums based on pvalue
def sort_minimums(minimums, num_minimums):
new_minimums = create_baseline_minimums(num_minimums)
index = 0
for min in minimums['value']:
best = find_min_of_mins(minimums)
new_minimums['position'][index] = minimums['position'][best]
new_minimums['value'][index] = minimums['value'][best]
new_minimums['chromosome'][index] = minimums['chromosome'][best]
minimums['value'][best] = 1
index += 1
return new_minimums
##REQUIRES minimums has at least 1 minimum
##MODIFIES minimums
##EFFECTS returns an updated dictionary of minimums
def replace_minimum(minimums, position, pvalue, chromosome, index):
minimums['position'][index] = position
minimums['value'][index] = pvalue
minimums['chromosome'][index] = chromosome
return minimums
##REQUIRES minimums has at least 1 minimum
##MODIFIES nothing
##EFFECTS returns a bool and a index, denoting that the current position is within a certain buffer region of another minimum
def index_of_shared_region(minimums, num_minimums, position, region_buffer):
for x in range(0, num_minimums):
position_diff = abs( position - minimums['position'][x] )
if position_diff < region_buffer:
return True, x
return False, -1
##REQUIRES minimums has a least one 'minimum' in it
##MODIFIES
##EFFECTS returns the highest minimum and the index it is stored at
def find_highest_min(minimums, num_minimums):
current_max = 0
for x in range(0, num_minimums):
if minimums['value'][x] > current_max:
current_max = minimums['value'][x]
current_position = x
return current_max, current_position
##REQUIRES num_minimums is > 0
##MODIFIES nothing
##EFFECTS creates a minimums dictionary, including position, value and chromosome
def create_baseline_minimums(num_minimums):
minimums = {'position' : [], 'value' : [], 'chromosome' : [] }
for x in range( 0 , num_minimums ):
minimums['position'].append(-1000000)
minimums['value'].append(1)
minimums['chromosome'].append(0)
return minimums
##REQUIRES minimums is a dictionary of minimums
##MODIFIES nothing
##EFFECTS finds the index of the minimum of the minimums
def find_min_of_mins(minimums):
current_min = 1
counter = 0
for min in minimums['value']:
if current_min > min:
current_min = min
current_position = counter
counter += 1
return current_position
##REQUIRES: minimums is a dictionary of minimums
##MODIFIES nothing
##EFFECTS creats a top hits list
def create_hits(minimums):
hits = []
##create the hits list for flask
for x in range(0, 10):
chr = minimums['chromosome'][x]
chr = str(chr)
pos = minimums['position'][x]
pos = str(pos)
hits.append([chr + ":" + pos, chr + ":" + pos])
return hits
##REQUIRES
##MODIFIES
##EFFECTS
def get_basic_region(filename, filetype):
#create a file reader from the file
file_reader = Data_reader.Data_reader.factory(filename, filetype)
#skip the header
file_reader.skip_header()
#get a line
line = file_reader.get_line()
chrom = file_reader.get_chrom()
position = file_reader.get_pos()
return str(chrom) + ":" + str(position) + "-" + str(int(position) + 200000)
| 3.234375
| 3
|
src/amaping.py
|
Devdevdavid/Amaping
| 1
|
12774616
|
<filename>src/amaping.py
#!/usr/bin/python
# Author : <NAME>
# Desc : See APP_DESC :)
# File : Amaping.py
# Date : July 4th, 2021
# Version : 1.0.0
import time
import signal, os
import traceback # For debugging unhandled exceptions
import argparse # To parse command line arguments
from geopy.geocoders import Nominatim # Get GeoCode from text address
import pandas # Read CSV file
import pickle # Used to load/save context an speedup developpement
from Painter import Painter
from MapGenerator import MapGenerator
import Logger
import Framacarte # To generate umap files
from AmapMember import AmapMember # Define a member
# Constants
APP_NAME = "Amaping"
APP_DESC = "Build a map of AMAP members locations"
# Globale Variables
geoLocator = None
# Tell is value is considered as set or not
def _isset(value):
if (pandas.isna(value)):
return False
elif (value == ""):
return False
else:
return True
class Amaping:
# =============
# CONSTANTS
# =============
DEFAULT_CSV_FILENAME = './ressources/amap_data.csv'
DEFAULT_ODS_FILENAME = './ressources/Cagette_Adh_Brama-2021-09.ods'
DEFAULT_CSV_SEPARATOR = ';'
DEFAULT_OUTPUT_MAP_NAME = './output/map.png'
DEFAULT_MAP_ZOOM_LEVEL = 16
DEFAULT_MAP_SIZE = "4080x4080"
AMAP_ADDRESS = "Salle Brama, Avenue Sainte-Marie"
AMAP_CITY = "Talence"
AMAP_POSTAL_CODE = "33400"
# =============
# Variables
# =============
config = None # Store the configuration
amapMemberArray = [] # Store data of all members
# =============
# Members
# =============
def handler_sigint(self, signum, frame):
self.isAppQuitting = True
raise RuntimeError("Stopped by user")
def __init__(self):
# Init
self.isAppQuitting = False
# Check arguments
parser = argparse.ArgumentParser(description=APP_DESC)
parser.add_argument('-v', '--verbose', help='enable verbose logs', default=False, action='store_true')
parser.add_argument('-u', '--umap', default=False, dest="umap", help='enable umap file generation', action='store_true')
parser.add_argument('-p', '--png', default=False, dest="png", help='enable PNG file generation', action='store_true')
parser.add_argument('-c', '--csv', default=self.DEFAULT_CSV_FILENAME, dest="csvFilename", help='specify CSV data file', type=str)
parser.add_argument('-d', '--ods', default=self.DEFAULT_ODS_FILENAME, dest="odsFilename", help='specify ODS data file', type=str)
parser.add_argument('-s', '--separator', default=self.DEFAULT_CSV_SEPARATOR, dest="csvSeparator", help='specify CSV column speparator', type=str)
parser.add_argument('-o', '--output', default=self.DEFAULT_OUTPUT_MAP_NAME, dest="mapFilename", help='specify a map filename', type=str)
parser.add_argument('-m', '--mapSize', default=self.DEFAULT_MAP_SIZE, dest="mapSize", help='specify a size in pixel for map generation (Ex: 1920x1080)', type=str)
parser.add_argument('-z', '--zoomLevel', default=self.DEFAULT_MAP_ZOOM_LEVEL, dest="zoomLevel", help='specify a zoom level for map generation', type=int)
# Use vars() to get python dict from Namespace object
self.args = vars(parser.parse_args())
# Handle args
if self.args["verbose"]:
Logger.setLevelDebug()
if not self.args["png"] and not self.args["umap"]:
raise RuntimeError("At least one type of file generation is needed, use -u or -p !")
# See https://wiki.openstreetmap.org/wiki/Zoom_levels
# 20 might not be available everywhere
if (self.args["zoomLevel"] < 0) or (self.args["zoomLevel"] > 20):
raise RuntimeError("Zoom level must be in range [0; 20]")
def save_context(self):
f = open('./output/amapMemberArray.obj', 'wb')
pickle.dump(self.amapMemberArray, f)
Logger.debug("Saving context to file")
def load_context(self):
try:
f = open('./output/amapMemberArray.obj', 'rb')
except Exception as e:
Logger.info("There is no context to load")
return -1
self.amapMemberArray = pickle.load(f)
return 0
def open_ods_sheet(self, odsFile, sheetName):
Logger.info("ODS - Reading sheet " + sheetName)
return pandas.read_excel(odsFile, engine='odf', sheet_name=sheetName)
def find_member_by(self, name1, name2):
# Find a match in our member list
for member in self.amapMemberArray:
displayName = member.get_display_name().lower()
# Check name 1
if (not name1.lower() in displayName):
continue
# Check name 2
if (name2 != ""):
if (not name2.lower() in displayName):
continue
# Return found member
return member
# Member not found
msg = "Couldn't find a match in known members for {0}".format(name1)
if (name2 != ""):
msg += "/" + name2
Logger.warning(msg)
return None
def find_member_from_row(self, row, index):
nom1 = nom2 = ""
if (_isset(row['nom'])):
nom1 = row['nom'].replace(" ", "")
else:
Logger.debug("Couldn't find a match for row {0}".format(index))
return None
if (_isset(row['nom conjoint'])):
nom2 = row['nom conjoint'].replace(" ", "")
# Find a match in our member list
matchMember = self.find_member_by(nom1, nom2)
return matchMember
def run(self):
# Load CSV file
data = pandas.read_csv(self.args["csvFilename"], sep=self.args["csvSeparator"], header=0)
# Get AMAP address
salleBrama = AmapMember()
salleBrama.add_people("Salle", "Brama")
salleBrama.set_address(self.AMAP_ADDRESS)
salleBrama.set_city(self.AMAP_CITY)
salleBrama.set_postal_code(self.AMAP_POSTAL_CODE)
if (salleBrama.req_map_position(geoLocator) == None):
raise RuntimeError("Unable to find AMAP address: \"{0}\"".format(salleBrama.get_display_address()))
# Clear output array
self.amapMemberArray = []
self.csvDataRowCount = len(data.index)
Logger.debug("Found {0} rows in CSV file \"{1}\"".format(self.csvDataRowCount, self.args["csvFilename"]))
if self.load_context() == -1:
# Open a report file to log what needs to be modified in DB
reportFile = open("./output/report.txt", "w")
# For each line in the CSV...
for index, rowdata in data.iterrows():
member = AmapMember()
# Manage ID
if (_isset(rowdata['id'])):
member.set_id(rowdata['id'])
# Manage names (first name is optionnal)
if (_isset(rowdata['Nom'])):
if (_isset(rowdata['Prénom'])):
prenom = rowdata['Prénom']
else:
prenom = "Prénom"
member.add_people(rowdata['Nom'], prenom)
if (_isset(rowdata['Nom partenaire'])):
if (_isset(rowdata['Prénom partenaire'])):
prenom = rowdata['Prénom partenaire']
else:
prenom = "Prénom"
member.add_people(rowdata['Nom partenaire'], prenom)
# Manage address
if (_isset(rowdata['Adresse 1']) and _isset(rowdata['Adresse 2'])):
member.set_address(rowdata['Adresse 1'])
# Display warning
Logger.warning("2 addresses detected for member {0}, choosing {1}".format(
member.get_display_name(),
member.get_address()))
elif (_isset(rowdata['Adresse 1'])):
member.set_address(rowdata['Adresse 1'])
elif (_isset(rowdata['Adresse 2'])):
member.set_address(rowdata['Adresse 2'])
else:
Logger.warning("No address detected for member {0}".format(member.get_display_name()))
reportFile.write("Pas d'adresse pour {0}\n".format(member.get_display_name()))
continue
if (_isset(rowdata['Ville'])):
member.set_city(rowdata['Ville'])
if (_isset(rowdata['Code postal'])):
member.set_postal_code(rowdata['Code postal'])
# Get Geocode, ignore if it failed
if (member.req_map_position(geoLocator) == None):
reportFile.write("Le membre {0} a une adresse non reconnue : \"{1}\"\n".format(
member.get_display_name(),
member.get_display_address()
))
continue
# Filter out member with far locations
isCloseToHome = member.is_close_to(salleBrama.get_map_position())
member.set_close_to_home(isCloseToHome)
if (not isCloseToHome):
Logger.warning("Member {0} is too far away from {1}".format(
member.get_display_name(),
salleBrama.get_display_name())
)
reportFile.write("Le membre {0} est trop éloigné de {1} pour être affiché sur la map PNG\n".format(
member.get_display_name(),
salleBrama.get_display_name()
))
if (_isset(rowdata['Téléphone'])):
member.set_phone(rowdata['Téléphone'])
if (_isset(rowdata['Email'])):
member.set_email(rowdata['Email'])
# Add member to output array
self.amapMemberArray.append(member)
# Check remove members
self.removeMemberCount = self.csvDataRowCount - len(self.amapMemberArray)
if (self.removeMemberCount > 0):
Logger.warning("{0} members will not be on the map because of above warnings/errors !".format(self.removeMemberCount))
reportFile.write("{0} membre(s) nécessite(nt) de l'attention\n".format(self.removeMemberCount))
# Close the report file, we don't need it anymore
reportFile.close()
# Save context to speed up latter execution
self.save_context()
else:
Logger.info("Using cached context file")
# ========================
# ODS FILE
# ========================
if (self.args["odsFilename"] != ""):
Logger.info("ODS - Reading file " + self.args["odsFilename"])
# Analyse 1st sheet
odsContent = self.open_ods_sheet(self.args["odsFilename"], "COORDONNEES")
# Iterate over each lines of the file
for index, row in odsContent.iterrows():
matchMember = self.find_member_from_row(row, index)
if (matchMember == None):
continue
# Add info
if (_isset(row['Rôles'])):
matchMember.set_role(row['Rôles'])
else:
matchMember.set_role("Adhérent")
if (_isset(row['Framacarte'])):
isOnMap = row['Framacarte'].upper() == "OUI"
matchMember.set_on_map(isOnMap)
# Analyse 1st sheet
odsContent = self.open_ods_sheet(self.args["odsFilename"], "ENGAGEMENTS")
# Iterate over each lines of the file
for index, row in odsContent.iterrows():
matchMember = self.find_member_from_row(row, index)
if (matchMember == None):
continue
# Add info
if (row['Légumes'] in ("hebdo", "pair", "impair")):
matchMember.set_type_panier(row['Légumes'])
# ========================
# COLORS AND SHAPES
# ========================
# Define color and shape for each members
markerShapes = ["star", "triangle", "sun", "circle", "rectangle", "cross"]
for member in self.amapMemberArray:
color = "gray"
shape = "cross"
if (member.get_type_panier() != ""):
if (member.get_type_panier() == "hebdo"):
color = "green"
elif (member.get_type_panier() == "pair"):
color = "orange"
elif (member.get_type_panier() == "impair"):
color = "blue"
member.set_marker(color, shape)
# Prepend Salle Brama to the member list in order to be drawn as all other members
salleBrama.set_marker("red", "home")
self.amapMemberArray.insert(0, salleBrama)
# ========================
# GEOJSON
# ========================
if self.args["umap"]:
Logger.info("Generating UMap file...")
amapBramaCollection = {}
for member in self.amapMemberArray:
# Set description
description = member.get_display_address()
if (member.is_on_map() == False):
Logger.info("Member {0} don't want to appear on the map".format(member.get_display_name()))
continue
# Add info if we got one
if (member.get_type_panier() != ""):
description += "\nLégumes : " + member.get_type_panier().capitalize()
if (member.get_phone() != ""):
description += "\nTel. : " + member.get_phone()
if (member.get_email() != ""):
description += "\nEmail : " + member.get_email()
if (member.get_role() != "" and member.get_role() != "Adhérent"):
description += "\nRôle : " + member.get_role()
if (member.get_type_panier() != ""):
collectionName = member.get_type_panier()
else:
collectionName = "Autre"
# Create collection if needed
if (not collectionName in amapBramaCollection):
amapBramaCollection[collectionName] = Framacarte.Collection(collectionName.capitalize())
curCollection = amapBramaCollection[collectionName]
# Add the marker
curCollection.add_marker(
member.get_display_name(),
member.get_map_position(),
member.get_color(),
member.get_shape(),
description
)
umapObj = Framacarte.UMap("BRAMA")
for curCollection in amapBramaCollection:
umapObj.add_collection(amapBramaCollection[curCollection])
umapObj.write_file()
# ========================
# PNG
# ========================
if self.args["png"]:
Logger.info("Generating PNG file...")
# Genarate map
mapSize = tuple(map(int, self.args["mapSize"].split('x')))
mapGen = MapGenerator(
center=salleBrama.get_map_position(),
zoomLevel=self.args["zoomLevel"],
mapSize=mapSize
)
mapGen.render()
# Reopend map with painter and sidebar
painter = Painter(mapGen=mapGen)
sideBarWidth = int(mapSize[0] / 3)
painter.add_side_bar(sideBarWidth)
# Add title
painter.add_legend_title("{0} membres de l'AMAP Pétal :".format(len(self.amapMemberArray)))
# Add markers
Logger.info("Adding markers...")
for member in self.amapMemberArray:
# Ignore far members
if (not member.is_close_to_home()):
continue
if (member.is_on_map() == False):
continue
painter.add_marker(
member.get_display_name(),
member.get_map_position(),
member.get_color(),
member.get_shape()
)
Logger.info("Openning output file...")
painter.save(self.args["mapFilename"])
painter.show()
painter.close()
# ========================
# DONE
# ========================
Logger.info("Work done !")
# ========================
# ENTRY POINT
# ========================
if __name__ == '__main__':
# Logging
Logger.init(APP_NAME)
# Init global variables
geoLocator = Nominatim(user_agent="http")
try:
# Init app
app = Amaping()
# Configure signal handler
signal.signal(signal.SIGINT, app.handler_sigint);
app.run()
except Exception as e:
Logger.error("Exit with errors: " + str(e));
Logger.debug(traceback.format_exc())
| 2.359375
| 2
|
nbp/tests/tools.py
|
machism0/non-bonded-periodic
| 0
|
12774617
|
<reponame>machism0/non-bonded-periodic<gh_stars>0
import nbp
import numpy as np
def make_system(characteristic_length=10,
sigma=None, epsilon_lj=None, particle_charges=None, positions=None, particle_count=None,
lj=True, ewald=True, use_neighbours=False):
if particle_count is None:
if particle_charges is not None:
particle_count = np.asarray(particle_charges).shape[0]
elif positions is not None:
particle_count = np.asarray(positions).shape[0]
else:
particle_count = 50
if not sigma:
sigma = np.ones((particle_count, 1))
if not epsilon_lj:
epsilon_lj = np.ones((particle_count, 1))
if particle_charges is None:
particle_charges = np.random.rand(particle_count, 1)
if positions is None:
positions = characteristic_length * np.random.rand(particle_count, 3)
system = nbp.System(characteristic_length, sigma, epsilon_lj, particle_charges, positions,
lj=lj, ewald=ewald, use_neighbours=use_neighbours)
return system
| 2.484375
| 2
|
reflex/reflex_runner.py
|
ankur-gos/RE-Flex
| 3
|
12774618
|
"""
Classes for running lm inference
"""
import os
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from reflex.models.reflex import Reflex
from dataclasses import dataclass
from reflex.utils import load_file, to_list
from reflex.structs import Sample
from reflex.models.pmi_filter import WordEmbeddingsPMIFilter
from reflex.squad_utils import convert_examples_to_features, read_input_examples, RawResult, get_predictions
from reflex.metrics import calculate_relation_metrics
from tqdm import tqdm
class ReflexRunner:
def __init__(self,
model_dir,
model_name,
device,
relations_filepath,
data_directory,
batch_size,
must_choose_answer,
l,
we_model,
spacy_model,
k,
expand,
hyperparams=None):
self.context_filter = WordEmbeddingsPMIFilter(we_model, spacy_model, l)
self.model = Reflex(model_dir, model_name, device, k, self.context_filter.nlp)
self.relations_filepath = relations_filepath # path to relations file
self.data_directory = data_directory # data directory path
self.batch_size = batch_size
self.must_choose_answer = must_choose_answer # For datasets where there is always an answer, setting this to true will ensure that QA models that can return "answer doesn't exist" will always return a span in the context
self.hyperparams = hyperparams
self.expand = expand
self.override_expand = False
self.e_list = []
self.override_l = False
def update_l(self, l):
self.context_filter = WordEmbeddingsPMIFilter(self.context_filter.word_emb, self.context_filter.nlp, l)
def predict(self):
# Load relations file
relations = load_file(self.relations_filepath)
# Iterate through relations file and predict for each relation
aggregate_em = aggregate_f1 = 0
per_relation_metrics = {}
for relation in relations:
# Check for per relation tuned hyperparams
if self.hyperparams is not None:
l, expand, _ = self.hyperparams[relation['relation']]
if not self.override_l:
self.update_l(l)
if not self.override_expand:
self.expand = expand
data_file = os.path.join(self.data_directory, relation['relation']) + '.jsonl'
data = load_file(data_file)
# Adding to set filters any accidental duplicates
samples_set = set()
for d in data:
samples_set.add(Sample(d['subject'], d['context'], d['object'], None, relation['template']))
samples = list(samples_set)
init_len = len(samples)
final_len = 1
if self.must_choose_answer:
print('Must choose answer is True. Skipping filtering step')
else:
print('Starting filtering')
samples = self.context_filter.filter(samples)
final_len = len(samples)
print(f'Filtering finished. Filtered {init_len - final_len}.')
all_results = []
if final_len != 0:
print(f'Loaded relation {relation["relation"]}. There are {len(samples)} test samples')
print('Batching samples')
batches, samples = self.model.batch(samples, self.batch_size)
print('Starting inference')
for batch in tqdm(batches):
results = self.model.predict(batch, self.expand)
all_results.extend(results)
else:
print('All samples were filtered. Skipping inference.')
# Now we need to readd all the filtered samples
filtered_samples = [s for s in samples_set if s not in samples]
samples = list(samples)
samples.extend(filtered_samples)
# Predict empty string for every sample
filtered_predictions = [''] * len(filtered_samples)
all_results.extend(filtered_predictions)
relation_em, relation_f1, per_relation_metrics, _, relation_e_list = calculate_relation_metrics(samples, all_results, per_relation_metrics, relation, single_error_list=None, reflex_e_list=True)
self.e_list.extend(relation_e_list)
aggregate_em += relation_em
aggregate_f1 += relation_f1
aggregate_em /= len(relations)
aggregate_f1 /= len(relations)
return aggregate_em, aggregate_f1, per_relation_metrics
| 2.390625
| 2
|
emmet/css_abbreviation/tokenizer/tokens.py
|
jingyuexing/py-emmet
| 29
|
12774619
|
class Token:
__slots__ = ('start', 'end')
def __init__(self, start: int=None, end: int=None):
self.start = start
self.end = end
@property
def type(self):
"Type of current token"
return self.__class__.__name__
def to_json(self):
return dict([(k, self.__getattribute__(k)) for k in dir(self) if not k.startswith('__') and k != 'to_json'])
class Chars:
Hash = '#'
Dollar = '$'
Dash = '-'
Dot = '.'
Colon = ':'
Comma = ','
Excl = '!'
At = '@'
Percent = '%'
Underscore = '_'
RoundBracketOpen = '('
RoundBracketClose = ')'
CurlyBracketOpen = '{'
CurlyBracketClose = '}'
Sibling = '+'
SingleQuote = "'"
DoubleQuote = '"'
Transparent = 't'
class OperatorType:
Sibling = '+'
Important = '!'
ArgumentDelimiter = ','
ValueDelimiter = '-'
PropertyDelimiter = ':'
class Operator(Token):
__slots__ = ('operator',)
def __init__(self, operator: OperatorType, *args):
super(Operator, self).__init__(*args)
self.operator = operator
class Bracket(Token):
__slots__ = ('open',)
def __init__(self, is_open: bool, *args):
super(Bracket, self).__init__(*args)
self.open = is_open
class Literal(Token):
__slots__ = ('value',)
def __init__(self, value: str, *args):
super(Literal, self).__init__(*args)
self.value = value
class NumberValue(Token):
__slots__ = ('value', 'raw_value', 'unit')
def __init__(self, value: int, raw_value: str, unit='', *args):
super(NumberValue, self).__init__(*args)
self.value = value
self.raw_value = raw_value
self.unit = unit
class ColorValue(Token):
__slots__ = ('r', 'g', 'b', 'a', 'raw')
def __init__(self, r=0, g=0, b=0, a=None, raw='', *args):
super(ColorValue, self).__init__(*args)
self.r = r
self.g = g
self.b = b
self.a = a if a is not None else 1
self.raw = raw
class StringValue(Token):
__slots__ = ('value', 'quote')
def __init__(self, value: str, quote='', *args):
super(StringValue, self).__init__(*args)
self.value = value
self.quote = quote
class Field(Token):
__slots__ = ('name', 'index')
def __init__(self, name: str, index: int=None, *args):
super(Field, self).__init__(*args)
self.index = index
self.name = name
class WhiteSpace(Token): pass
| 2.578125
| 3
|
backend/app/models/client_model.py
|
juniorosorio47/client-order
| 0
|
12774620
|
<filename>backend/app/models/client_model.py
from django.contrib.auth import get_user_model
from django.db import models
User = get_user_model()
# Client model
class Client(models.Model):
name = models.CharField(max_length=120)
def _str_(self):
return self.name
| 2.140625
| 2
|
apps/Tests/algs/TestAlg.py
|
erinzm/NEXT-chemistry
| 155
|
12774621
|
<filename>apps/Tests/algs/TestAlg.py
from apps.Tests.tests.test_api import set_and_get_alg, get_alg, get_exp
class MyAlg:
def initExp(self, butler, dummy):
get_exp(butler)
set_and_get_alg(butler)
return "return_init_exp"
def getQuery(self, butler):
get_alg(butler)
return "return_get_query"
def processAnswer(self, butler):
get_alg(butler)
return "return_process_answer"
def getModel(self, butler):
get_alg(butler)
return "return_process_answer"
| 2.0625
| 2
|
pinterest.py
|
raphaottoni/pinterest-crawler
| 9
|
12774622
|
<reponame>raphaottoni/pinterest-crawler
import urllib2
import time
import random
import socket
import MySQLdb
from config import *
import os,re
class Pinterest:
#------inicializacao -------#
def __init__(self, verbose=0):
self.db = MySQLdb.connect(host,user,password,database)
self.cursor = self.db.cursor()
self. cursor.connection.autocommit(True)
def findError(self,html):
erro = re.search("HTML-Error-Code: ([0-9]*)\n",html)
if (erro):
#print "achei o erro-"+erro.group(1)
return erro.group(1)
else:
return 0
def analyzeAnswer(self,html):
code = self.findError(html)
if (code != 0):
if code == "404":
return 1
else:
print "dormindo - code " + code
time.sleep(random.randint(0,2))
return 2
else:
return 0
def fetch(self,url):
done=0
while(not done):
html = os.popen("phantomjs ./pinterest.js "+url).read()
answerCode = self.analyzeAnswer(html)
if (answerCode == 0):
done=1;
elif (answerCode == 1):
return 1
return html
def fetchPins(self,url,qtd):
done=0
while(not done):
html = os.popen("phantomjs ./pinterestPin.js "+url+ " " +qtd).read()
answerCode = self.analyzeAnswer(html)
if (answerCode == 0):
done=1;
elif (answerCode == 1):
return 1
return html
def fetchSimple(self,url):
done=0
while(not done):
print "coletando"
html = os.popen("phantomjs ./pinterestSimple.js "+url).read()
answerCode = self.analyzeAnswer(html)
if (answerCode == 0):
done=1;
elif (answerCode == 1):
return 1
return html
def snowBall(self):
self.cursor.execute("select pinterestID from fatos where statusIDs is null or statusIDs = 0 limit 1")
self.db.commit()
return self.cursor.fetchone()[0]
def getIDtoCrawl(self):
self.cursor.execute("select pinterestID from usersToCollect where statusColeta is null or statusColeta = 0 order by rand() limit 1")
self.db.commit()
return self.cursor.fetchone()[0]
def insereID(self,pinterestID):
try:
self.cursor.execute("insert into fatos (pinterestID) values ('"+pinterestID+"')")
self.db.commit()
except MySQLdb.IntegrityError, e:
log = open("/var/tmp/log","a+")
log.write("Erro de integridade do banco: '"+ str(e)+ "' \n")
log.close()
def statusColetaIDs(self,pinterestID,valor):
try:
self.cursor.execute("update usersToCollect set statusIDs ='"+valor+"' where pinterestID = '"+pinterestID+"'")
self.db.commit()
print "update usersToCollect set statusIDs ='"+valor+"' where pinterestID = '"+pinterestID+"'"
except Exception, e:
log = open("/var/tmp/log","a+")
log.write("Erro de integridade do banco: '"+ str(e)+ "' \n")
log.close()
def statusColeta(self,pinterestID,valor, dominio):
try:
self.cursor.execute("update usersToCollect set statusColeta ='"+valor+"' , crawler = '"+dominio+"' where pinterestID = '"+pinterestID+"'")
self.db.commit()
print "update usersToCollect set statusColeta ='"+valor+"' , crawler = '"+dominio+"' where pinterestID = '"+pinterestID+"'"
except Exception, e:
log = open("/var/tmp/log","a+")
log.write("Erro de integridade do banco: '"+ str(e)+ "' \n")
log.close()
def nPinsUser(self,pinterestID,valor):
try:
self.cursor.execute("insert into deltaPinUsers (pinterestID,nPins) values ('"+pinterestID+"' , '"+valor+"')")
self.db.commit()
#print ("insert into deltaPinUsers (pinterestID,nPins) values ('"+pinterestID+"' , '"+valor+"')")
except Exception, e:
print str(e)
log = open("/var/tmp/log","a+")
log.write("Erro de integridade do banco: '"+ str(e)+ "' \n")
log.close()
| 2.828125
| 3
|
ciphers/zazcipher.py
|
zdhoward/practice
| 0
|
12774623
|
<reponame>zdhoward/practice
literals = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ ,.?/:;{[]}-=_+~!@#$%^&*()"
#obfuscated
literals = "tJ;EM mKrFzQ_SOT?]B[U@$yqec~fhd{=is&alxPIbnuRkC%Z(jDw#G:/)L,*.V!pov+HNYA^g-}WX"
key = 7
def shuffle(plaintext):
shuffled = ""
# shuffle plaintext
for i in range(int(len(plaintext) / 3)):
block = plaintext[i*3] + plaintext[i*3 + 1] + plaintext[i*3 + 2]
old0 = block[0]
old1 = block[1]
old2 = block[2]
block = old2 + old0 + old1
shuffled += block
shuffled += plaintext[len(plaintext) - (len(plaintext) % 3):len(plaintext)]
return shuffled
def unshuffle(ciphertext):
unshuffled = ""
# unshuffle plaintext
for i in range(int(len(ciphertext) / 3)):
block = ciphertext[i*3] + ciphertext[i*3 + 1] + ciphertext[i*3 + 2]
old0 = block[0]
old1 = block[1]
old2 = block[2]
block = old1 + old2 + old0
unshuffled += block
unshuffled += ciphertext[len(ciphertext) - (len(ciphertext) % 3):len(ciphertext)]
return unshuffled
def shift(plaintext):
shifted = ""
# Cipher shift
tmp = []
for i in range(len(plaintext)):
pos = literals.find(plaintext[i])
if pos >= 0:
if pos + key > len(literals):
pos = (pos + key) - len(literals)
res = literals[pos + key]
else:
res = plaintext[i]
tmp.append(res)
# reconstruct ciphertext
for i in range(len(tmp)):
shifted += tmp[i]
return shifted
def unshift(ciphertext):
unshifted = ""
tmp = []
for i in range(len(ciphertext)):
pos = literals.find(ciphertext[i])
if pos >= 0:
if pos - key < 0:
pos = (pos - key) + len(literals)
res = literals[pos - key]
else:
res = ciphertext[i]
tmp.append(res)
#reconstruct ciphertext
for i in range(len(tmp)):
unshifted += tmp[i]
return unshifted
def encrypt(msg):
msg = shuffle(msg)
msg = shift(msg)
return msg
def decrypt(msg):
#msg = unshuffle(msg)
msg = unshift(msg)
msg = unshuffle(msg)
return msg
def test():
test = "This is my plaintext"
test = "\nThis is a long paragraph with lots of exciting things\nI could go on and on about all of this stuff.\nLove, Zach!"
test = "abcdefghijklmnopqrstuvwxyz-ABCDEFGHIJKLMNOPQRSTUVWXYZ_!@#$%^&*()"
print ("Testing: " + test)
print ("Shuffle: " + shuffle(test))
print ("Shift: " + shift(shuffle(test)))
print ("Unshift: " + unshift(shift(shuffle(test))))
print ("Unshuffle: " + unshuffle(unshift(shift(shuffle(test)))))
print ("")
print ("Encrypt: " + encrypt(test))
print ("Decrypt: " + decrypt(encrypt(test)))
if __name__ == "__main__":
test()
| 3.078125
| 3
|
testsqlite.py
|
UncleEngineer/UncleVocab
| 0
|
12774624
|
# testsqlite.py
import sqlite3
conn = sqlite3.connect('vocab.db')
c = conn.cursor()
c.execute("""CREATE TABLE IF NOT EXISTS vocab (
ID INTEGER PRIMARY KEY AUTOINCREMENT,
vocab text,
meaning text,
score int)""")
def insert_vocab(vocab,meaning):
ID = None
score = 0
with conn:
c.execute("""INSERT INTO vocab VALUES (?,?,?,?)""",
(ID,vocab,meaning,score))
conn.commit()
print('Data was inserted')
def view_vocab():
with conn:
c.execute("SELECT * FROM vocab")
allvocab = c.fetchall()
print(allvocab)
return allvocab
view_vocab()
#insert_vocab('Cat','แมว')
| 3.59375
| 4
|
09_binarytree/binary_tree_xrh.py
|
Xinrihui/Data-Structure-and-Algrithms
| 1
|
12774625
|
from collections import deque
class TreeNode(object):
def __init__(self,item):
self.val=item
self.left=None
self.right=None
# self.height=None
# Codec
class DFS_Serialize:
"""
基于 DFS 的 二叉树 的序列化 和 反序列化
递归实现
ref:
https://blog.csdn.net/Shenpibaipao/article/details/108378093
https://zhuanlan.zhihu.com/p/164408048
"""
def __serialize_preorder(self, p):
"""
将 树 序列化为 前序序列
:param p:
:return:
"""
if p != None:
c = p.val
self.preorder_list.append(c)
self.__serialize_preorder(p.left)
self.__serialize_preorder(p.right)
else:
self.preorder_list.append('#') # 使用 '#' 表示空节点
def serialize(self, root):
"""
将 树 序列化为 前序序列
:type preorder: str
"""
if root is None:
return ''
self.preorder_list = []
self.__serialize_preorder(root)
preorder_list = [str(ele) for ele in self.preorder_list]
res = ','.join(preorder_list)
return res
def __preorder_deSerialize(self, preorder):
"""
递归方法
:param preorder:
:param prev: 父亲节点
:return:
"""
c = preorder.popleft()
if c != '#':
p = TreeNode(c)
p.left = self.__preorder_deSerialize(preorder)
p.right = self.__preorder_deSerialize(preorder)
return p
def deserialize(self, str1):
"""
由 先序序列 反序列化 出 BST 二叉搜索树 (递归)
:param preorder:
:return:
"""
if len(str1) == 0:
return None
preorder = str1.split(',')
preorder = [ele.strip() for ele in preorder]
preorder = deque(preorder)
root = self.__preorder_deSerialize(preorder)
return root
# Codec
class DFS_Serialize_Stack:
"""
基于 DFS 的 二叉树 的序列化 和 反序列化
非递归实现
"""
def serialize(self, root):
"""
将 二叉树 序列化为 前序序列
"""
if root is None:
return ''
preorder_list = []
p = root
stack = []
stack.append(p)
while len(stack) > 0:
current = stack.pop()
if current is not None: # 当前节点不是 空节点
preorder_list.append(current.val)
if current.right is not None:
stack.append(current.right)
else:
stack.append(None) # 空节点 入栈
if current.left is not None:
stack.append(current.left)
else:
stack.append(None)
else: # 当前节点 是空节点
preorder_list.append('#') # 使用 '#' 表示空节点
preorder_list = [str(ele) for ele in preorder_list] # leetcode 的树 的节点是 int
preorder_str = ','.join(preorder_list)
return preorder_str
def deserialize(self, preorder_str):
"""
由 先序序列 反序列化 出 二叉树 ( 非递归 )
ref: https://blog.csdn.net/cyuyanenen/article/details/51589945
:param preorder:
:return:
"""
if len(preorder_str) == 0:
return None
preorder = preorder_str.split(',')
preorder = [ele.strip() for ele in preorder]
i = 0
root = TreeNode(preorder[i])
stack = []
stack.append(root)
i += 1
flag = 1
"""
flag = 1 表示现在需要创建当前节点的左孩子,
flag = 2 表示需要创建右孩子,
flag = 3 则表示当前节点的左右孩子都已经创建完毕,需要执行出栈操作,直到出栈节点不是当前栈顶节点的右孩子为止。
"""
while i < len(preorder):
if flag == 1:
if preorder[i] == '#':
flag = 2
else:
child_left = TreeNode(preorder[i])
current = stack[-1]
current.left = child_left
stack.append(child_left)
flag = 1
elif flag == 2:
if preorder[i] == '#':
flag = 3
else:
child_right = TreeNode(preorder[i])
current = stack[-1]
current.right = child_right
stack.append(child_right)
flag = 1
elif flag == 3:
top_ele = stack.pop()
while len(stack) > 0 and stack[-1].right == top_ele:
top_ele = stack.pop()
i -= 1
flag = 2
i += 1
return root
# Codec
class BFS_Serialize:
"""
基于 BFS 的 二叉树 的序列化 和 反序列化
"""
def serialize(self, root):
"""
将 二叉树 序列化为 层次序列
"""
if root is None:
return ''
h_list = []
p = root
queue = deque()
queue.append(p)
while len(queue) > 0:
current = queue.popleft()
if current is not None: # 当前节点不是 空节点
h_list.append(current.val)
if current.left is not None:
queue.append(current.left)
else:
queue.append(None)
if current.right is not None:
queue.append(current.right)
else:
queue.append(None) # 空节点 入栈
else: # 当前节点 是空节点
h_list.append('#') # h_list 使用 '#' 表示空节点
h_list = [str(ele) for ele in h_list] # leetcode 的树 的节点是 int
h_str = ','.join(h_list) # ',' 作为 分隔符
return h_str
def deserialize(self, h_str):
"""
由 先序序列 反序列化 出 二叉树 ( 非递归 )
:param preorder:
:return:
"""
if len(h_str) == 0:
return None
h_list = h_str.split(',')
h_list = [ele.strip() for ele in h_list]
i = 0
root = TreeNode(h_list[i])
i += 1
queue = deque()
queue.append(root)
while i < len(h_list) and len(queue) > 0:
current = queue.popleft()
if h_list[i] != '#':
left_child = TreeNode(h_list[i])
current.left = left_child
queue.append(left_child)
i += 1
if h_list[i] != '#':
right_child = TreeNode(h_list[i])
current.right = right_child
queue.append(right_child)
i += 1
return root
class Solution1(object):
"""
二叉树的链式存储法 表达二叉树
"""
def buildTree(self, preorder,inorder):
"""
用树的前序和中序遍历的结果来构建树
:type preorder: ['a','b','c','e','d']
:type inorder: ['c','b','e','a','d']
:rtype: TreeNode
"""
self.preorder = deque(preorder)
self.inorder = deque(inorder)
return self._buildTree(0, len(inorder))
def _buildTree(self, start, end):
if start<end:
root_val=self.preorder.popleft()
print("root: ",root_val )
root=TreeNode(root_val)
index=self.inorder.index(root_val,start,end) # 在数组的位置范围: [start,end) 中寻找 root_val
root.left=self._buildTree(start,index)
root.right=self._buildTree(index+1,end)
return root
def pre_order(self,root):
if root is not None:
print(root.val)
self.pre_order(root.left)
self.pre_order(root.right)
return
def in_order_depreatured(self,root):
"""
非递归 实现树的中序遍历
:param root:
:return:
"""
stack=[root]
p=root
res=[]
while len(stack)!=0 :
while (p!=None) and (p.left!=None) and (p.val not in res): #访问过的节点不要再入栈
p = p.left
stack.append(p)
p=stack.pop()
res.append(p.val)
if p.right!=None:
p=p.right
stack.append(p)
return res
def in_order(self, root):
"""
非递归 实现树的中序遍历
:param root:
:return:
"""
stack = []
p = root
res = []
while p!=None or len(stack)!=0:
if p!=None: # p 不为空就入栈
stack.append(p)
p=p.left #指向左节点
else: # 如果p 为空就弹出
p=stack.pop() # 访问中间节点
res.append(p.val)
p=p.right # 指针指向右子树
return res
def _depth_recursion(self,root):
if root is None:
return 0
left_depth= self._depth_recursion(root.left)
right_depth=self._depth_recursion(root.right)
return max(left_depth,right_depth)+1
def _depth(self, root):
"""
改进层次遍历 ,把树的各个层都切分出来,并能输出树的高度
:type root: TreeNode
:rtype: int
"""
Queue = deque()
Queue.append(root)
depth = 0
while (len(Queue) != 0):
depth += 1
n = len(Queue)
for i in range(n): # Stratified according to depth
target = Queue.popleft()
print(target.val)
print('depth: ', depth)
if target.left != None:
Queue.append(target.left)
if target.right != None:
Queue.append(target.right)
return depth
class Solution2(object):
"""
基于数组的顺序存储法 表达二叉树
"""
def pre_order(self, tree_array):
"""
前序遍历 中->左->右
:param tree_array:
:return:
"""
stack=[]
i=1
node=[tree_array[i],i]
stack.append(node)
result=[]
while ( len(stack)!=0 ):
current=stack.pop()
# print(current)
result.append(current[0])
i=current[1]
if 2*i+1<len(tree_array) and tree_array[2*i+1]!=None: # tree_array 越界 访问检查 : 2*i+1<len(tree_array)
node=[tree_array[2*i+1],2*i+1]
stack.append(node)
if 2*i<len(tree_array) and tree_array[2*i]!=None:
node = [tree_array[2 * i ], 2 * i]
stack.append(node)
return result
def post_order(self, tree_array):
"""
前序遍历 :中->左->右
前序遍历反过来 :中->右->左
前序遍历反过来再逆序 : 左 -> 右 ->中 (后序遍历)
https://www.cnblogs.com/bjwu/p/9284534.html
:param tree_array:
:return:
"""
stack=[]
i=1
node=[tree_array[i],i]
stack.append(node)
result=[]
while ( len(stack)!=0 ):
current=stack.pop()
# print(current)
result.append(current[0])
i=current[1]
if 2*i<len(tree_array) and tree_array[2*i]!=None:
node = [tree_array[2 * i ], 2 * i]
stack.append(node)
if 2*i+1<len(tree_array) and tree_array[2*i+1]!=None: # tree_array 越界 访问检查 : 2*i+1<len(tree_array)
node=[tree_array[2*i+1],2*i+1]
stack.append(node)
return result[::-1] # 逆序输出即为 后序遍历
def in_order_deprecated(self, tree_array):
stack=[]
i=1
result=[]
while ( i < len(tree_array) and tree_array[i] != None) or (len(stack) != 0): # ( i < len(tree_array) and tree_array[i] != None) 等价于 p != None
while (i < len(tree_array) and tree_array[i] != None):
node = [tree_array[i], i]
stack.append(node)
i = 2 * i # 左子树全部进栈
if (len(stack) != 0) :
current = stack.pop() #
# print(current)
result.append(current[0])
i = current[1]
i= 2*i+1 #尝试去访问右子树
return result
def in_order(self, tree_array):
"""
好理解
:param tree_array:
:return:
"""
stack=[]
i=1
result=[]
while ( i < len(tree_array) and tree_array[i] != None) or (len(stack) != 0):
if (i < len(tree_array) and tree_array[i] != None):
node = [tree_array[i], i]
stack.append(node)
i = 2 * i # 左子树全部进栈
else:
current = stack.pop() #
# print(current)
result.append(current[0])
i = current[1]
i= 2*i+1 #尝试去访问右子树
return result
def hierarchy_order(self, tree_array):
"""
树的层次遍历 (广度优先遍历)
:param tree_array:
:return:
"""
fifo=deque()
i=1
node=[tree_array[i],i]
fifo.appendleft(node)
result=[]
while ( len(fifo)!=0 ):
current=fifo.pop()
# print(current)
result.append(current[0])
i=current[1]
if 2*i<len(tree_array) and tree_array[2*i]!=None: # 左边
node = [tree_array[2 * i ], 2 * i]
fifo.appendleft(node)
if 2*i+1<len(tree_array) and tree_array[2*i+1]!=None: # 右边
node=[tree_array[2*i+1],2*i+1]
fifo.appendleft(node)
return result
class Test:
def test_DFS(self):
sol = DFS_Serialize_Stack()
preorder = '1,2,#,#,3,4,#,5,#,#,#'
tree = sol.deserialize(preorder)
print(sol.serialize(tree))
assert sol.serialize(tree) == preorder
preorder = ''
tree = sol.deserialize(preorder)
assert sol.serialize(tree) == preorder
preorder = '9,3,4,#,5,#,#,1,#,#,#'
tree = sol.deserialize(preorder)
print(sol.serialize(tree))
assert sol.serialize(tree) == preorder
def test_BFS(self):
sol = BFS_Serialize()
preorder = '8,6,10,5,7,9,11,#,#,#,#,#,#,#,#'
tree = sol.deserialize(preorder)
print(sol.serialize(tree))
assert sol.serialize(tree) == preorder
preorder = ''
tree = sol.deserialize(preorder)
assert sol.serialize(tree) == preorder
preorder = '8,6,10,#,#,9,11,#,#,#,#'
tree = sol.deserialize(preorder)
print(sol.serialize(tree))
assert sol.serialize(tree) == preorder
def test_solution1(self):
# solution1
preorder=['a','b','c','e','d']
inorder= ['c','b','e','a','d']
preorder=['A','B','D','F','G','C','E','H']
inorder=['F','D','G','B','A','E','H','C']
postorder= ['F','G','D','B','H','E','C','A']
solution=Solution1()
root=solution.buildTree(preorder,inorder)
solution.pre_order(root)
print(solution.in_order(root))
print(solution._depth(root))
print(solution._depth_recursion(root))
def test_solution2(self):
tree_array=[None,'A','B','C','D',None,'E',None,'F','G',None,None,None,'H']
solution2 = Solution2()
print('preorder: ',solution2.pre_order(tree_array))
print('inorder: ',solution2.in_order(tree_array))
print('postorder: ', solution2.post_order(tree_array))
print('hierarchy_order: ', solution2.hierarchy_order(tree_array))
if __name__ == "__main__":
t = Test()
t.test_BFS()
| 3.328125
| 3
|
apis_v1/documentation_source/organization_suggestion_tasks_doc.py
|
ranadeepmitra21/WeVoteServer_Ranadeep
| 44
|
12774626
|
# apis_v1/documentation_source/organization_suggestion_tasks_doc.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
def organization_suggestion_tasks_doc_template_values(url_root):
"""
Show documentation about organizationSuggestionTask
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
{
'name': 'kind_of_suggestion_task',
'value': 'string', # boolean, integer, long, string
'description': 'Default is UPDATE_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW. '
'Other options include UPDATE_SUGGESTIONS_FROM_WHAT_FRIENDS_FOLLOW, '
'UPDATE_SUGGESTIONS_FROM_WHAT_FRIENDS_FOLLOW_ON_TWITTER, '
'UPDATE_SUGGESTIONS_FROM_WHAT_FRIEND_FOLLOWS, '
'UPDATE_SUGGESTIONS_FROM_WHAT_FRIEND_FOLLOWS_ON_TWITTER or UPDATE_SUGGESTIONS_ALL',
},
]
optional_query_parameter_list = [
{
'name': 'kind_of_follow_task',
'value': 'string', # boolean, integer, long, string
'description': 'Default is FOLLOW_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW. '
'Other options include FOLLOW_SUGGESTIONS_FROM_FRIENDS, '
'or FOLLOW_SUGGESTIONS_FROM_FRIENDS_ON_TWITTER, ',
},
]
potential_status_codes_list = [
{
'code': 'VALID_VOTER_DEVICE_ID_MISSING',
'description': 'Cannot proceed. A valid voter_device_id parameter was not included.',
},
{
'code': 'VALID_VOTER_ID_MISSING',
'description': 'Cannot proceed. A valid voter_id was not found.',
},
]
try_now_link_variables_dict = {
# 'organization_we_vote_id': 'wv85org1',
}
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "voter_device_id": string (88 characters long),\n' \
'}'
template_values = {
'api_name': 'organizationSuggestionTasks',
'api_slug': 'organizationSuggestionTasks',
'api_introduction':
"This will provide list of suggested endorsers to follow. "
"These suggestions are generated from twitter ids i follow, or organization of my friends follow",
'try_now_link': 'apis_v1:organizationSuggestionTasksView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
| 1.664063
| 2
|
tests/integration/conftest.py
|
tonybaloney/django-xss-fuzzer
| 32
|
12774627
|
import pytest
@pytest.fixture(scope='session')
def session_capabilities(session_capabilities):
session_capabilities['loggingPrefs'] = {'browser': 'ALL'}
session_capabilities['goog:loggingPrefs'] = {'browser': 'ALL'}
return session_capabilities
@pytest.fixture
def chrome_options(chrome_options):
chrome_options.headless = True
return chrome_options
| 1.828125
| 2
|
ws-python/ex043.py
|
DerickSilva/Python
| 0
|
12774628
|
peso = float(input('Qual é seu peso? '))
altura = float(input('Qual é sua altura? '))
imc = peso / (altura * altura)
if imc < 18.5:
print(f'IMC {imc:.1f} Abaixo do Peso')
elif imc < 25:
print(f'IMC {imc:.1f} Peso Ideal')
elif imc < 30:
print(f'IMC {imc:.1f} Sobrepeso')
elif imc < 40:
print(f'IMC {imc:.1f} Obesidade')
else:
print(f'IMC {imc:.1f} Obesidade Morbida')
| 3.71875
| 4
|
examples/demo_a2c.py
|
antoine-moulin/rlberry
| 0
|
12774629
|
from rlberry.agents import A2CAgent
from rlberry.envs.classic_control import MountainCar
from rlberry.envs.benchmarks.ball_exploration import PBall2D
from rlberry.seeding import seeding
render = True
seeding.set_global_seed(1223)
for env, n_episodes, horizon in zip([PBall2D(), MountainCar()],
[400, 40000], [256, 512]):
print("Running A2C on %s" % env.name)
agent = A2CAgent(env, n_episodes=n_episodes, horizon=horizon,
gamma=0.99, learning_rate=0.001, k_epochs=4)
agent.fit()
if render:
env.enable_rendering()
state = env.reset()
for tt in range(200):
action = agent.policy(state)
next_state, reward, done, _ = env.step(action)
state = next_state
env.render()
| 2.28125
| 2
|
autogoal/datasets/movie_reviews.py
|
gmijenes/autogoal
| 0
|
12774630
|
<filename>autogoal/datasets/movie_reviews.py
import random
from autogoal.datasets import download, datapath
def load(max_examples=None):
try:
download("movie_reviews")
except:
print(
"Error loading data. This may be caused due to bad connection. Please delete badly downloaded data and retry"
)
raise
sentences = []
classes = []
path = datapath("movie_reviews")
ids = list(path.rglob("*.txt"))
random.shuffle(ids)
for fd in ids:
if "neg/" in str(fd):
cls = "neg"
else:
cls = "pos"
with fd.open() as fp:
sentences.append(fp.read())
classes.append(cls)
if max_examples and len(classes) >= max_examples:
break
return sentences, classes
def make_fn(test_size=0.25, examples=None):
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
X, y = load(examples)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
def fitness_fn(pipeline):
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_test)
return accuracy_score(y_test, y_pred)
return fitness_fn
| 3.109375
| 3
|
h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_countmatches.py
|
vishalbelsare/h2o-3
| 6,098
|
12774631
|
<reponame>vishalbelsare/h2o-3<gh_stars>1000+
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
def h2o_H2OFrame_countmatches():
"""
Python API test: h2o.frame.H2OFrame.countmatches(pattern)
Copied from pyunit_countmatches.py
"""
python_lists = [["what","is"], ["going", "on"], ["When", "are"], ["MeetingMeetingon", "gone"]]
h2oframe = h2o.H2OFrame(python_obj=python_lists)
matches = h2oframe.countmatches(['Wh', 'ing', 'on'])
assert_is_type(matches, H2OFrame)
assert matches.shape == h2oframe.shape, "h2o.H2OFrame.countmatches() command is not working."
assert matches.any_na_rm(), "h2o.H2OFrame.countmatches() command is not working."
nomatches = h2oframe.countmatches(['rain','pluck'])
assert not(nomatches.any_na_rm()), "h2o.H2OFrame.countmatches() command is not working."
pyunit_utils.standalone_test(h2o_H2OFrame_countmatches)
| 2.40625
| 2
|
mechlib/amech_io/parser/mech.py
|
keceli/mechdriver
| 0
|
12774632
|
""" Parses the `mechanism.dat` input file for MechDriver that contains
all of the reactions of mechanism. The format of this file corresponds
to some user-specified format.
"""
import mechanalyzer.parser.pes
def pes_dictionary(mech_str, mech_type, spc_dct, printlog=True):
""" Calls mechanalyzer to do the dictionary
"""
return mechanalyzer.parser.pes.pes_dictionary(
mech_str, mech_type, spc_dct, printlog=printlog)
| 3.09375
| 3
|
ml_code/GunshotDetection/eval_nolabel.py
|
ankitshah009/Daisy_Shooter_Localization
| 18
|
12774633
|
<reponame>ankitshah009/Daisy_Shooter_Localization
# coding=utf-8
# given downloaded annotation file from videoAnno(rename the filename first), the test file is filename+score+label, then compute the AP and precision and recall
# annotation will be transform first based on over-half overlapping of each filename(start and end time provided).
from ChunWai import *
import os,sys
def usage():
print """
-testfile
-imgfile # if set, will save the plot to a img
"""
sys.exit()
def getOverlap(s,e,sc,ec):
if((sc>e) or (s>ec)):
return None
elif((s<=ec) and (s>=sc)):
if(e>=ec):
return (s,ec)
else:
return (s,e)
elif(s<sc):
if(e>=ec):
return (sc,ec)
else:
return (sc,e)
else:
error("wtf2")
def getTime(timestr):
hour,minutes,sec = timestr.strip().split(":")
return float(hour)*60.0*60.0+float(minutes)*60.0+float(sec)
if __name__ == "__main__":
testfile,imgfile = resolveParam(['-testfile','-imgfile'])
if(cp([testfile])):
usage()
scores = {}
count = {}
for line in open(testfile,"r").readlines():
videoname,score,label = line.strip().split()
label = int(label)
stuff = os.path.splitext(videoname)[0].split("_")
filename,start,end = "_".join(stuff[:-2]),stuff[-2],stuff[-1]
if(not scores.has_key(filename)):
scores[filename] = []
scores[filename].append({"score":float(score),"mylabel":label,"start":start,"end":end})
import matplotlib
matplotlib.use('agg')
import numpy as np
import matplotlib.pyplot as plt
for filename in scores:
scores[filename].sort(key=operator.itemgetter("start"),reverse=False)
# assume a timestamp is 1 second
timestamps = [0.05,0.1,0.3,0.5,0.7,0.9,1.0] # xticks
timestamps = [one*len(scores[filename]) for one in timestamps]
plt.xticks(timestamps,[sec2time(one) for one in timestamps],rotation=50)
plt.ylim(ymin=-0.05,ymax=1)
plt.xlabel("Video Time")
plt.ylabel("Prediction Score")
plot_handles = []
thisScores = np.array([one['score'] for one in scores[filename]])
thisTimesteps = np.array(range(len(scores[filename])))
a1, = plt.plot(thisTimesteps,thisScores,'b-',label="prediction")
plot_handles.extend([a1])
plt.legend(handles=plot_handles,loc='upper right')
if(imgfile != ''):
plt.savefig(imgfile,bbox_inches="tight")
else:
plt.show()
| 2.75
| 3
|
September 2020/03-Multidimensional-Lists/Exercises/05-Snake-Moves.py
|
eclipse-ib/Software-University-Professional-Advanced-Module
| 0
|
12774634
|
from collections import deque
rows, cols = [int(_) for _ in input().split()]
string = input()
matrix = [[0 for j in range(cols)] for i in range(rows)]
# matrix = []
#
# for i in range(rows):
# matrix.append([])
# for j in range(cols):
# matrix[i].append(0)
key_word = deque(string)
count = 0
for i in range(rows):
for j in range(cols):
if not key_word:
key_word = deque(string)
matrix[i][j] = key_word.popleft()
if count % 2 != 0:
matrix[i] = matrix[i][::-1]
count += 1
print('\n'.join([''.join(map(str, m)) for m in matrix]))
| 3.34375
| 3
|
setup.py
|
bao-eng/oscc-check
| 4
|
12774635
|
<gh_stars>1-10
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
long_description = open('README.md', 'r').read()
class PyTest(TestCommand):
user_options = [("pytest-args=", "a", "Arguments to pass to pytest")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ""
def run_tests(self):
import shlex
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
setup(name='oscc-check',
version='0.0.1',
url='https://github.com/PolySync/oscc-check',
author='<NAME>',
author_email='<EMAIL>',
maintainer='PolySync Technologies',
maintainer_email='<EMAIL>',
description='Check that your vehcile and the installed OSCC are in a good state.',
long_description=long_description,
download_url='https://github.com/PolySync/oscc-check',
packages=["oscccan"],
license='MIT',
install_requires=[
'colorama',
'docopt',
'python-can',
],
scripts=['oscc-check.py'],
tests_require=['pytest', 'hypothesis'],
test_suite="tests",
cmdclass={"test": PyTest},
classifiers=[
'Environment :: Console',
'License :: MIT License',
'Natural Language :: English',
'Operating System :: Linux',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
],
)
| 1.796875
| 2
|
src/utils/setup_django.py
|
mrts/foodbank-campaign
| 1
|
12774636
|
import os
import sys
import django
PROJDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, PROJDIR)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "foodbank.settings")
django.setup()
| 1.546875
| 2
|
plenum/test/run_continuously.py
|
andkononykhin/plenum
| 148
|
12774637
|
import traceback
import pytest
from plenum.test.testing_utils import setupTestLogging
setupTestLogging()
def run(test, stopOnFail=True, maxTimes=None):
count = 0
passes = 0
fails = 0
while maxTimes is None or count < maxTimes:
exitcode = pytest.main(test)
count += 1
if exitcode:
fails += 1
print("Test failed!")
traceback.print_exc()
if stopOnFail:
break
else:
passes += 1
print("Test passed.")
print("current stats: successes: {} fails: {}".format(passes, fails))
run("monitoring/test_instance_change_with_Delta.py",
stopOnFail=False, maxTimes=100)
| 2.53125
| 3
|
bsmetadata/experiments/sample.py
|
chkla/metadata
| 13
|
12774638
|
from dataclasses import dataclass
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import PreTrainedTokenizerBase
from bsmetadata.input_pipeline import DataConfig
@dataclass
class DataCollatorForCLM:
tokenizer: PreTrainedTokenizerBase
pad_to_multiple_of: int = 16
def __call__(self, batch):
batch = self.tokenizer(
[x["text"] for x in batch],
truncation=True,
padding="max_length",
max_length=512, # TODO: make this configurable
return_tensors="pt",
pad_to_multiple_of=self.pad_to_multiple_of,
)
labels = batch["input_ids"].clone()
# force an error in no pad_token
# if self.tokenizer.pad_token_id is not None:
labels[labels == self.tokenizer.pad_token_id] = -100
batch["labels"] = labels
return batch
def get_dataloaders(tokenizer, cfg: DataConfig):
datasets = load_dataset("wikitext", "wikitext-2-raw-v1")
data_collator = DataCollatorForCLM(tokenizer)
train_dataloader = DataLoader(
datasets["train"],
shuffle=True,
collate_fn=data_collator,
batch_size=cfg.per_device_train_batch_size,
num_workers=1,
)
eval_dataloader = DataLoader(
datasets["validation"],
collate_fn=data_collator,
batch_size=cfg.per_device_eval_batch_size,
num_workers=1,
)
return train_dataloader, {"val": eval_dataloader}
| 2.46875
| 2
|
admin/migrations/0007_auto_20180304_2342.py
|
rodlukas/UP-admin
| 4
|
12774639
|
<gh_stars>1-10
# Generated by Django 2.0.2 on 2018-03-04 22:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("admin", "0006_auto_20180304_2334")]
operations = [
migrations.RenameField(
model_name="attendance", old_name="attendancestate", new_name="attendance_state"
)
]
| 1.679688
| 2
|
methods/smoking-behavior.py
|
wdempsey/sense2stop-lvm
| 1
|
12774640
|
<gh_stars>1-10
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# # Summary
#
# * ADD LATER
# * ADD LATER
# * ADD LATER
# %% [markdown]
# # Estimation
# %%
import pymc3 as pm
import arviz as az
import pandas as pd
import numpy as np
from datetime import datetime
import os
exec(open('../env_vars.py').read())
dir_data = os.environ['dir_data']
dir_picklejar = os.environ['dir_picklejar']
# %% [markdown]
# Only self-report data will be used to estimate time between events for now.
# %%
data_selfreport = pd.read_csv(os.path.join(os.path.realpath(dir_data), 'work_with_datapoints.csv'))
use_this_data = data_selfreport
# %% [markdown]
# Let's define the distribution of censored data.
# %%
def exponential_log_complementary_cdf(x, lam):
''' log complementary CDF of exponential distribution '''
return -lam*x
# %% [markdown]
# Let's pull out variables that will be used in all models.
# %%
censored = use_this_data['censored'].values.astype(bool)
time_to_next_event = use_this_data['time_to_next_event'].values.astype(float)
is_post_quit = use_this_data['is_post_quit'].values.astype(float)
# %% [markdown]
# Let's pull out features we have constructed.
# %%
# Features applicable to pre- and post-quit periods
day_within_period = use_this_data['day_within_period'].values.astype(float)
hours_since_previous_sr_within_day = use_this_data['hours_since_previous_sr_within_day'].values.astype(float)
hours_since_previous_sr_within_period = use_this_data['hours_since_previous_sr_within_period'].values.astype(float)
is_first_sr_within_day = use_this_data['is_first_sr_within_day'].values.astype(float)
is_first_sr_within_period = use_this_data['is_first_sr_within_period'].values.astype(float)
order_within_day = use_this_data['order_within_day'].values.astype(float)
order_within_period = use_this_data['order_within_period'].values.astype(float)
hours_since_start_of_study = use_this_data['hours_since_start_of_study'].values.astype(float)
hours_since_start_of_period = use_this_data['hours_since_start_of_period'].values.astype(float)
hour_of_day = use_this_data['hour_of_day'].values.astype(float)
sleep = use_this_data['sleep'].values.astype(float) # 1=if between 1am to 6am, 0=outside of this time
# Features applicable only to the post-quit period
is_within24hours_quit = use_this_data['is_within24hours_quit'].values.astype(float)
is_within48hours_quit = use_this_data['is_within48hours_quit'].values.astype(float)
is_within72hours_quit = use_this_data['is_within72hours_quit'].values.astype(float)
# %% [markdown]
# ## Model 1
# %%
with pm.Model() as model:
# -------------------------------------------------------------------------
# Priors
# -------------------------------------------------------------------------
beta_prequit = pm.Normal('beta_prequit', mu=0, sd=10)
beta_postquit = pm.Normal('beta_postquit', mu=0, sd=10)
beta_prequit_day = pm.Normal('beta_prequit_day', mu=0, sd=10)
beta_postquit_day = pm.Normal('beta_postquit_day', mu=0, sd=10)
# -------------------------------------------------------------------------
# Likelihood
# -------------------------------------------------------------------------
loglamb_observed = (
beta_prequit*(1-is_post_quit[~censored]) + beta_prequit_day*day_within_period[~censored]*(1-is_post_quit[~censored])
+ beta_postquit*is_post_quit[~censored] + beta_postquit_day*day_within_period[~censored]*is_post_quit[~censored]
)
lamb_observed = np.exp(loglamb_observed)
Y_hat_observed = pm.Exponential('Y_hat_observed', lam = lamb_observed, observed=time_to_next_event[~censored])
loglamb_censored = (
beta_prequit*(1-is_post_quit[censored]) + beta_prequit_day*day_within_period[censored]*(1-is_post_quit[censored])
+ beta_postquit*is_post_quit[censored] + beta_postquit_day*day_within_period[censored]*is_post_quit[censored]
)
lamb_censored = np.exp(loglamb_censored)
Y_hat_censored = pm.Potential('Y_hat_censored', exponential_log_complementary_cdf(x = time_to_next_event[censored], lam = lamb_censored))
# Sample from posterior distribution
with model:
posterior_samples = pm.sample(draws=1000, tune=1000, cores=1, init='adapt_diag', target_accept=0.90, max_treedepth=50)
# %%
# Calculate 95% credible interval
model_summary_logscale = az.summary(posterior_samples, credible_interval=.95)
model_summary_logscale = model_summary_logscale[['mean','hpd_2.5%','hpd_97.5%']]
model_summary_logscale
# %%
summary_expscale = {'mean': [np.mean(np.exp(posterior_samples['beta_prequit_day'])), np.mean(np.exp(posterior_samples['beta_postquit_day']))],
'LB': [np.quantile(np.exp(posterior_samples['beta_prequit_day']), q=.125), np.quantile(np.exp(posterior_samples['beta_postquit_day']), q=.125)],
'UB': [np.quantile(np.exp(posterior_samples['beta_prequit_day']), q=.975), np.quantile(np.exp(posterior_samples['beta_postquit_day']), q=.975)]}
summary_expscale = pd.DataFrame(summary_expscale)
summary_expscale.index = ['exp_beta_prequit_day','exp_beta_postquit_day']
summary_expscale
# %%
pm.traceplot(posterior_samples)
# %%
# Remove variable from workspace
del model, posterior_samples, model_summary_logscale
# %% [markdown]
# ## Model 2
# %%
feature1 = hours_since_previous_sr_within_period
# %%
with pm.Model() as model:
# -------------------------------------------------------------------------
# Priors
# -------------------------------------------------------------------------
beta_prequit = pm.Normal('beta_prequit', mu=0, sd=10)
beta_postquit = pm.Normal('beta_postquit', mu=0, sd=10)
beta_prequit_feature1 = pm.Normal('beta_prequit_feature1', mu=0, sd=10)
beta_postquit_feature1 = pm.Normal('beta_postquit_feature1', mu=0, sd=10)
# -------------------------------------------------------------------------
# Likelihood
# -------------------------------------------------------------------------
loglamb_observed = (
beta_prequit*(1-is_post_quit[~censored]) + beta_postquit*is_post_quit[~censored]
)
loglamb_observed_features = (
beta_prequit_feature1*feature1[~censored]*(1-is_post_quit[~censored]) +
beta_postquit_feature1*feature1[~censored]*is_post_quit[~censored]
)
lamb_observed = np.exp(loglamb_observed + loglamb_observed_features)
Y_hat_observed = pm.Exponential('Y_hat_observed', lam = lamb_observed, observed=time_to_next_event[~censored])
loglamb_censored = (
beta_prequit*(1-is_post_quit[censored]) + beta_postquit*is_post_quit[censored]
)
loglamb_censored_features = (
beta_prequit_feature1*feature1[censored]*(1-is_post_quit[censored]) +
beta_postquit_feature1*feature1[censored]*is_post_quit[censored]
)
lamb_censored = np.exp(loglamb_censored + loglamb_censored_features)
Y_hat_censored = pm.Potential('Y_hat_censored', exponential_log_complementary_cdf(x = time_to_next_event[censored], lam = lamb_censored))
#%%
# Sample from posterior distribution
with model:
posterior_samples = pm.sample(draws=1000, tune=1000, cores=1, init='adapt_diag', target_accept=0.90, max_treedepth=50)
# %%
model_summary_logscale = az.summary(posterior_samples, credible_interval=.95)
model_summary_logscale = model_summary_logscale[['mean','hpd_2.5%','hpd_97.5%']]
model_summary_logscale
# %%
posterior_samples_expscale_prequit_feature1 = np.exp(posterior_samples['beta_prequit_feature1'])
posterior_samples_expscale_postquit_feature1 = np.exp(posterior_samples['beta_postquit_feature1'])
model_summary_expscale = {'mean': [np.mean(posterior_samples_expscale_prequit_feature1), np.mean(posterior_samples_expscale_postquit_feature1)],
'LB': [np.quantile(posterior_samples_expscale_prequit_feature1, q=.125), np.quantile(posterior_samples_expscale_postquit_feature1, q=.125)],
'UB': [np.quantile(posterior_samples_expscale_prequit_feature1, q=.975), np.quantile(posterior_samples_expscale_postquit_feature1, q=.975)]}
model_summary_expscale = pd.DataFrame(model_summary_expscale)
model_summary_expscale.index = ['exp_beta_prequit_feature1', 'exp_beta_postquit_feature1']
model_summary_expscale
# %%
diff_prepost_feature1 = posterior_samples['beta_postquit_feature1'] - posterior_samples['beta_prequit_feature1']
exp_diff_prepost_feature1 = np.exp(diff_prepost_feature1)
diff_summary_expscale = {'mean': [np.mean(exp_diff_prepost_feature1)],
'LB': [np.quantile(exp_diff_prepost_feature1, q=.125)],
'UB': [np.quantile(exp_diff_prepost_feature1, q=.975)]}
diff_summary_expscale = pd.DataFrame(diff_summary_expscale)
diff_summary_expscale.index = ['exp_diff_prepost_feature1']
diff_summary_expscale
# %%
pm.traceplot(posterior_samples)
# %% [markdown]
# ## Model 3
# %%
feature1 = is_within48hours_quit
feature2 = hours_since_previous_sr_within_period
# %%
with pm.Model() as model:
# -------------------------------------------------------------------------
# Priors
# -------------------------------------------------------------------------
beta_prequit = pm.Normal('beta_prequit', mu=0, sd=10)
beta_postquit = pm.Normal('beta_postquit', mu=0, sd=10)
beta_postquit_feature1 = pm.Normal('beta_postquit_feature1', mu=0, sd=10)
beta_prequit_feature2 = pm.Normal('beta_prequit_feature2', mu=0, sd=10)
beta_postquit_feature2 = pm.Normal('beta_postquit_feature2', mu=0, sd=10)
beta_postquit_feature_product = pm.Normal('beta_postquit_feature_product', mu=0, sd=10)
# -------------------------------------------------------------------------
# Likelihood
# -------------------------------------------------------------------------
loglamb_observed = (
beta_prequit*(1-is_post_quit[~censored]) + beta_postquit*is_post_quit[~censored]
)
loglamb_observed_features1 = (
beta_postquit_feature1*feature1[~censored]*is_post_quit[~censored] +
beta_prequit_feature2*feature2[~censored]*(1-is_post_quit[~censored]) +
beta_postquit_feature2*feature2[~censored]*is_post_quit[~censored] +
beta_postquit_feature_product*feature1[~censored]*feature2[~censored]*is_post_quit[~censored]
)
lamb_observed = np.exp(loglamb_observed + loglamb_observed_features1)
Y_hat_observed = pm.Exponential('Y_hat_observed', lam = lamb_observed, observed=time_to_next_event[~censored])
loglamb_censored = (
beta_prequit*(1-is_post_quit[censored]) + beta_postquit*is_post_quit[censored]
)
loglamb_censored_features1 = (
beta_postquit_feature1*feature1[censored]*is_post_quit[censored] +
beta_prequit_feature2*feature2[censored]*(1-is_post_quit[censored]) +
beta_postquit_feature2*feature2[censored]*is_post_quit[censored] +
beta_postquit_feature_product*feature1[censored]*feature2[censored]*is_post_quit[censored]
)
lamb_censored = np.exp(loglamb_censored + loglamb_censored_features1)
Y_hat_censored = pm.Potential('Y_hat_censored', exponential_log_complementary_cdf(x = time_to_next_event[censored], lam = lamb_censored))
with model:
posterior_samples = pm.sample(draws=1000, tune=1000, cores=1, init='adapt_diag', target_accept=0.90, max_treedepth=50)
# %%
model_summary_logscale = az.summary(posterior_samples, credible_interval=.95)
model_summary_logscale = model_summary_logscale[['mean','hpd_2.5%','hpd_97.5%']]
model_summary_logscale
# %%
# Slope of hours since previous self-report within period:
# Difference between within first 48 hours in post-quit period vs. after first 48 hours in post-quit period
diff_feature_postquitwithin48_postquitafter48 = posterior_samples['beta_postquit_feature_product']
exp_diff_feature_postquitwithin48_postquitafter48 = np.exp(diff_feature_postquitwithin48_postquitafter48)
# Difference between within first 48 hours in post-quit period vs. pre-quit
diff_feature_postquitwithin48_prequit = posterior_samples['beta_postquit_feature2'] + posterior_samples['beta_postquit_feature_product'] - posterior_samples['beta_prequit_feature2']
exp_diff_feature_postquitwithin48_prequit = np.exp(diff_feature_postquitwithin48_prequit)
# Difference between after 48 hours in post-quit period vs. pre-quit
diff_feature_postquitafter48_prequit = posterior_samples['beta_postquit_feature2'] - posterior_samples['beta_prequit_feature2']
exp_diff_feature_postquitafter48_prequit = np.exp(diff_feature_postquitafter48_prequit)
diff_summary_expscale = {'mean': [np.mean(exp_diff_feature_postquitwithin48_postquitafter48), np.mean(exp_diff_feature_postquitwithin48_prequit), np.mean(exp_diff_feature_postquitafter48_prequit)],
'LB': [np.quantile(exp_diff_feature_postquitwithin48_postquitafter48, q=.125), np.quantile(exp_diff_feature_postquitwithin48_prequit, q=.125), np.quantile(exp_diff_feature_postquitafter48_prequit, q=.125)],
'UB': [np.quantile(exp_diff_feature_postquitwithin48_postquitafter48, q=.975), np.quantile(exp_diff_feature_postquitwithin48_prequit, q=.975), np.quantile(exp_diff_feature_postquitafter48_prequit, q=.975)]}
diff_summary_expscale = pd.DataFrame(diff_summary_expscale)
diff_summary_expscale.index = ['exp_diff_feature_postquitwithin48_postquitafter48','exp_diff_feature_postquitwithin48_prequit','exp_diff_feature_postquitafter48_prequit']
diff_summary_expscale
# %%
pm.traceplot(posterior_samples)
# %%
# Remove variable from workspace
del model, posterior_samples, model_summary_logscale
# %% [markdown]
# ## Model 4
# %%
feature1 = order_within_day
# %%
with pm.Model() as model:
# -------------------------------------------------------------------------
# Priors
# -------------------------------------------------------------------------
beta_prequit = pm.Normal('beta_prequit', mu=0, sd=10)
beta_postquit = pm.Normal('beta_postquit', mu=0, sd=10)
beta_prequit_feature1 = pm.Normal('beta_prequit_feature1', mu=0, sd=10)
beta_postquit_feature1 = pm.Normal('beta_postquit_feature1', mu=0, sd=10)
# -------------------------------------------------------------------------
# Likelihood
# -------------------------------------------------------------------------
loglamb_observed = (
beta_prequit*(1-is_post_quit[~censored]) + beta_postquit*is_post_quit[~censored]
)
loglamb_observed_features = (
beta_prequit_feature1*feature1[~censored]*(1-is_post_quit[~censored]) +
beta_postquit_feature1*feature1[~censored]*is_post_quit[~censored]
)
lamb_observed = np.exp(loglamb_observed + loglamb_observed_features)
Y_hat_observed = pm.Exponential('Y_hat_observed', lam = lamb_observed, observed=time_to_next_event[~censored])
loglamb_censored = (
beta_prequit*(1-is_post_quit[censored]) + beta_postquit*is_post_quit[censored]
)
loglamb_censored_features = (
beta_prequit_feature1*feature1[censored]*(1-is_post_quit[censored]) +
beta_postquit_feature1*feature1[censored]*is_post_quit[censored]
)
lamb_censored = np.exp(loglamb_censored + loglamb_censored_features)
Y_hat_censored = pm.Potential('Y_hat_censored', exponential_log_complementary_cdf(x = time_to_next_event[censored], lam = lamb_censored))
#%%
# Sample from posterior distribution
with model:
posterior_samples = pm.sample(draws=1000, tune=1000, cores=1, init='adapt_diag', target_accept=0.90, max_treedepth=50)
# %%
model_summary_logscale = az.summary(posterior_samples, credible_interval=.95)
model_summary_logscale = model_summary_logscale[['mean','hpd_2.5%','hpd_97.5%']]
model_summary_logscale
# %%
posterior_samples_expscale_prequit_feature1 = np.exp(posterior_samples['beta_prequit_feature1'])
posterior_samples_expscale_postquit_feature1 = np.exp(posterior_samples['beta_postquit_feature1'])
model_summary_expscale = {'mean': [np.mean(posterior_samples_expscale_prequit_feature1), np.mean(posterior_samples_expscale_postquit_feature1)],
'LB': [np.quantile(posterior_samples_expscale_prequit_feature1, q=.125), np.quantile(posterior_samples_expscale_postquit_feature1, q=.125)],
'UB': [np.quantile(posterior_samples_expscale_prequit_feature1, q=.975), np.quantile(posterior_samples_expscale_postquit_feature1, q=.975)]}
model_summary_expscale = pd.DataFrame(model_summary_expscale)
model_summary_expscale.index = ['exp_beta_prequit_feature1', 'exp_beta_postquit_feature1']
model_summary_expscale
# %%
# Difference between pre-quit and post-quit periods:
# time to first self-report
diff_prepost_feature1 = posterior_samples['beta_postquit_feature1'] - posterior_samples['beta_prequit_feature1']
exp_diff_prepost_feature1 = np.exp(diff_prepost_feature1)
diff_summary_expscale = {'mean': [np.mean(exp_diff_prepost_feature1)],
'LB': [np.quantile(exp_diff_prepost_feature1, q=.125)],
'UB': [np.quantile(exp_diff_prepost_feature1, q=.975)]}
diff_summary_expscale = pd.DataFrame(diff_summary_expscale)
diff_summary_expscale.index = ['exp_diff_prepost_feature1']
diff_summary_expscale
# %%
pm.traceplot(posterior_samples)
# %% [markdown]
# ## Model 5
# %%
feature1 = is_within48hours_quit
feature2 = order_within_day
# %%
with pm.Model() as model:
# -------------------------------------------------------------------------
# Priors
# -------------------------------------------------------------------------
beta_prequit = pm.Normal('beta_prequit', mu=0, sd=10)
beta_postquit = pm.Normal('beta_postquit', mu=0, sd=10)
beta_postquit_feature1 = pm.Normal('beta_postquit_feature1', mu=0, sd=10)
beta_prequit_feature2 = pm.Normal('beta_prequit_feature2', mu=0, sd=10)
beta_postquit_feature2 = pm.Normal('beta_postquit_feature2', mu=0, sd=10)
beta_postquit_feature_product = pm.Normal('beta_postquit_feature_product', mu=0, sd=10)
# -------------------------------------------------------------------------
# Likelihood
# -------------------------------------------------------------------------
loglamb_observed = (
beta_prequit*(1-is_post_quit[~censored]) + beta_postquit*is_post_quit[~censored]
)
loglamb_observed_features1 = (
beta_postquit_feature1*feature1[~censored]*is_post_quit[~censored] +
beta_prequit_feature2*feature2[~censored]*(1-is_post_quit[~censored]) +
beta_postquit_feature2*feature2[~censored]*is_post_quit[~censored] +
beta_postquit_feature_product*feature1[~censored]*feature2[~censored]*is_post_quit[~censored]
)
lamb_observed = np.exp(loglamb_observed + loglamb_observed_features1)
Y_hat_observed = pm.Exponential('Y_hat_observed', lam = lamb_observed, observed=time_to_next_event[~censored])
loglamb_censored = (
beta_prequit*(1-is_post_quit[censored]) + beta_postquit*is_post_quit[censored]
)
loglamb_censored_features1 = (
beta_postquit_feature1*feature1[censored]*is_post_quit[censored] +
beta_prequit_feature2*feature2[censored]*(1-is_post_quit[censored]) +
beta_postquit_feature2*feature2[censored]*is_post_quit[censored] +
beta_postquit_feature_product*feature1[censored]*feature2[censored]*is_post_quit[censored]
)
lamb_censored = np.exp(loglamb_censored + loglamb_censored_features1)
Y_hat_censored = pm.Potential('Y_hat_censored', exponential_log_complementary_cdf(x = time_to_next_event[censored], lam = lamb_censored))
with model:
posterior_samples = pm.sample(draws=1000, tune=1000, cores=1, init='adapt_diag', target_accept=0.90, max_treedepth=50)
# %%
model_summary_logscale = az.summary(posterior_samples, credible_interval=.95)
model_summary_logscale = model_summary_logscale[['mean','hpd_2.5%','hpd_97.5%']]
model_summary_logscale
# %%
posterior_samples_expscale_postquit_feature1 = np.exp(posterior_samples['beta_postquit_feature1'])
posterior_samples_expscale_prequit_feature2 = np.exp(posterior_samples['beta_prequit_feature2'])
posterior_samples_expscale_postquit_feature2 = np.exp(posterior_samples['beta_postquit_feature2'])
posterior_samples_expscale_postquit_feature_product = np.exp(posterior_samples['beta_postquit_feature_product'])
model_summary_expscale = {'mean': [np.mean(posterior_samples_expscale_postquit_feature1),
np.mean(posterior_samples_expscale_prequit_feature2),
np.mean(posterior_samples_expscale_postquit_feature2),
np.mean(posterior_samples_expscale_postquit_feature_product)],
'LB': [np.quantile(posterior_samples_expscale_postquit_feature1, q=.125),
np.quantile(posterior_samples_expscale_prequit_feature2, q=.125),
np.quantile(posterior_samples_expscale_postquit_feature2, q=.125),
np.quantile(posterior_samples_expscale_postquit_feature_product, q=.125)],
'UB': [np.quantile(posterior_samples_expscale_postquit_feature1, q=.975),
np.quantile(posterior_samples_expscale_prequit_feature2, q=.975),
np.quantile(posterior_samples_expscale_postquit_feature2, q=.975),
np.quantile(posterior_samples_expscale_postquit_feature_product, q=.975)]}
model_summary_expscale = pd.DataFrame(model_summary_expscale)
model_summary_expscale.index = ['exp_beta_postquit_feature1','exp_beta_prequit_feature2', 'exp_beta_postquit_feature2','exp_beta_postquit_feature_product']
model_summary_expscale
# %%
# Time to first self-report within period:
# Difference between within first 48 hours in post-quit period vs. after first 48 hours in post-quit period
diff_feature_postquitwithin48_postquitafter48 = posterior_samples['beta_postquit_feature_product']
exp_diff_feature_postquitwithin48_postquitafter48 = np.exp(diff_feature_postquitwithin48_postquitafter48)
# Difference between within first 48 hours in post-quit period vs. pre-quit
diff_feature_postquitwithin48_prequit = posterior_samples['beta_postquit_feature2'] + posterior_samples['beta_postquit_feature_product'] - posterior_samples['beta_prequit_feature2']
exp_diff_feature_postquitwithin48_prequit = np.exp(diff_feature_postquitwithin48_prequit)
# Difference between after 48 hours in post-quit period vs. pre-quit
diff_feature_postquitafter48_prequit = posterior_samples['beta_postquit_feature2'] - posterior_samples['beta_prequit_feature2']
exp_diff_feature_postquitafter48_prequit = np.exp(diff_feature_postquitafter48_prequit)
diff_summary_expscale = {'mean': [np.mean(exp_diff_feature_postquitwithin48_postquitafter48), np.mean(exp_diff_feature_postquitwithin48_prequit), np.mean(exp_diff_feature_postquitafter48_prequit)],
'LB': [np.quantile(exp_diff_feature_postquitwithin48_postquitafter48, q=.125), np.quantile(exp_diff_feature_postquitwithin48_prequit, q=.125), np.quantile(exp_diff_feature_postquitafter48_prequit, q=.125)],
'UB': [np.quantile(exp_diff_feature_postquitwithin48_postquitafter48, q=.975), np.quantile(exp_diff_feature_postquitwithin48_prequit, q=.975), np.quantile(exp_diff_feature_postquitafter48_prequit, q=.975)]}
diff_summary_expscale = pd.DataFrame(diff_summary_expscale)
diff_summary_expscale.index = ['exp_diff_feature_postquitwithin48_postquitafter48','exp_diff_feature_postquitwithin48_prequit','exp_diff_feature_postquitafter48_prequit']
diff_summary_expscale
# %%
# Remove variable from workspace
del model, posterior_samples, model_summary_logscale
# %%
| 2.28125
| 2
|
data_construction/neg_mask_regen/detect_relation_ne_document_summary.py
|
launchnlp/cliff_summ
| 14
|
12774641
|
import argparse
from tqdm import tqdm
from spacy.tokens import DocBin
import spacy_stanza
nlp = spacy_stanza.load_pipeline("en", use_gpu=True)
def swap_one(document, summary):
try:
source_doc = nlp(document)
except RecursionError:
source_doc = nlp(document[:2000])
summary_doc = nlp(summary)
return source_doc, summary_doc
def main():
parser = argparse.ArgumentParser()
parser.add_argument('document')
parser.add_argument('summary')
parser.add_argument('out_prefix')
args = parser.parse_args()
with open(args.document) as f:
documents = [line.strip() for line in f]
with open(args.summary) as f:
summaries = [line.strip() for line in f]
source_doc_bin = DocBin(['LEMMA', 'POS', 'DEP', 'ENT_IOB', 'ENT_TYPE', 'IS_STOP', 'HEAD'])
summary_doc_bin = DocBin(['LEMMA', 'POS', 'DEP', 'ENT_IOB', 'ENT_TYPE', 'IS_STOP', 'HEAD'])
for document, summary in tqdm(zip(documents, summaries)):
source_doc, summary_doc = swap_one(document, summary)
source_doc_bin.add(source_doc)
summary_doc_bin.add(summary_doc)
with open(args.out_prefix + '.source', 'wb') as f:
f.write(source_doc_bin.to_bytes())
with open(args.out_prefix + '.target', 'wb') as f:
f.write(summary_doc_bin.to_bytes())
if __name__ == '__main__':
main()
| 2.609375
| 3
|
glue/qt/widgets/histogram_widget.py
|
aak65/glue
| 0
|
12774642
|
from __future__ import absolute_import, division, print_function
from functools import partial
from ...external.qt import QtGui
from ...external.qt.QtCore import Qt
from ...core import message as msg
from ...clients.histogram_client import HistogramClient
from ..widget_properties import (connect_int_spin, ButtonProperty,
FloatLineProperty,
ValueProperty)
from ..glue_toolbar import GlueToolbar
from ..mouse_mode import HRangeMode
from .data_viewer import DataViewer
from .mpl_widget import MplWidget, defer_draw
from ..qtutil import pretty_number, load_ui
__all__ = ['HistogramWidget']
WARN_SLOW = 10000000
def _hash(x):
return str(id(x))
class HistogramWidget(DataViewer):
LABEL = "Histogram"
_property_set = DataViewer._property_set + \
'component xlog ylog normed cumulative autoscale xmin xmax nbins'.split(
)
xmin = FloatLineProperty('ui.xmin', 'Minimum value')
xmax = FloatLineProperty('ui.xmax', 'Maximum value')
normed = ButtonProperty('ui.normalized_box', 'Normalized?')
autoscale = ButtonProperty('ui.autoscale_box',
'Autoscale view to histogram?')
cumulative = ButtonProperty('ui.cumulative_box', 'Cumulative?')
nbins = ValueProperty('ui.binSpinBox', 'Number of bins')
xlog = ButtonProperty('ui.xlog_box', 'Log-scale the x axis?')
ylog = ButtonProperty('ui.ylog_box', 'Log-scale the y axis?')
def __init__(self, session, parent=None):
super(HistogramWidget, self).__init__(session, parent)
self.central_widget = MplWidget()
self.setCentralWidget(self.central_widget)
self.option_widget = QtGui.QWidget()
self.ui = load_ui('histogramwidget', self.option_widget)
self._tweak_geometry()
self.client = HistogramClient(self._data,
self.central_widget.canvas.fig,
artist_container=self._container)
self._init_limits()
self.make_toolbar()
self._connect()
# maps _hash(componentID) -> componentID
self._component_hashes = {}
@staticmethod
def _get_default_tools():
return []
def _init_limits(self):
validator = QtGui.QDoubleValidator(None)
validator.setDecimals(7)
self.ui.xmin.setValidator(validator)
self.ui.xmax.setValidator(validator)
lo, hi = self.client.xlimits
self.ui.xmin.setText(str(lo))
self.ui.xmax.setText(str(hi))
def _tweak_geometry(self):
self.central_widget.resize(600, 400)
self.resize(self.central_widget.size())
def _connect(self):
ui = self.ui
cl = self.client
ui.attributeCombo.currentIndexChanged.connect(
self._set_attribute_from_combo)
ui.attributeCombo.currentIndexChanged.connect(
self._update_minmax_labels)
connect_int_spin(cl, 'nbins', ui.binSpinBox)
ui.normalized_box.toggled.connect(partial(setattr, cl, 'normed'))
ui.autoscale_box.toggled.connect(partial(setattr, cl, 'autoscale'))
ui.cumulative_box.toggled.connect(partial(setattr, cl, 'cumulative'))
ui.xlog_box.toggled.connect(partial(setattr, cl, 'xlog'))
ui.ylog_box.toggled.connect(partial(setattr, cl, 'ylog'))
ui.xmin.editingFinished.connect(self._set_limits)
ui.xmax.editingFinished.connect(self._set_limits)
@defer_draw
def _set_limits(self):
lo = float(self.ui.xmin.text())
hi = float(self.ui.xmax.text())
self.client.xlimits = lo, hi
def _update_minmax_labels(self):
lo, hi = pretty_number(self.client.xlimits)
self.ui.xmin.setText(lo)
self.ui.xmax.setText(hi)
def make_toolbar(self):
result = GlueToolbar(self.central_widget.canvas, self,
name='Histogram')
for mode in self._mouse_modes():
result.add_mode(mode)
self.addToolBar(result)
return result
def _mouse_modes(self):
axes = self.client.axes
def apply_mode(mode):
return self.apply_roi(mode.roi())
rect = HRangeMode(axes, roi_callback=apply_mode)
return [rect]
@defer_draw
def _update_attributes(self):
"""Repopulate the combo box that selects the quantity to plot"""
combo = self.ui.attributeCombo
component = self.component
new = self.client.component or component
combo.blockSignals(True)
combo.clear()
# implementation note:
# PySide doesn't robustly store python objects with setData
# use _hash(x) instead
model = QtGui.QStandardItemModel()
data_ids = set(_hash(d) for d in self._data)
self._component_hashes = dict((_hash(c), c) for d in self._data
for c in d.components)
found = False
for d in self._data:
if d not in self._container:
continue
item = QtGui.QStandardItem(d.label)
item.setData(_hash(d), role=Qt.UserRole)
assert item.data(Qt.UserRole) == _hash(d)
item.setFlags(item.flags() & ~Qt.ItemIsEnabled)
model.appendRow(item)
for c in d.visible_components:
if not d.get_component(c).numeric:
continue
if c is new:
found = True
item = QtGui.QStandardItem(c.label)
item.setData(_hash(c), role=Qt.UserRole)
model.appendRow(item)
combo.setModel(model)
# separators below data items
for i in range(combo.count()):
if combo.itemData(i) in data_ids:
combo.insertSeparator(i + 1)
combo.blockSignals(False)
if found:
self.component = new
else:
combo.setCurrentIndex(2) # skip first data + separator
self._set_attribute_from_combo()
@property
def component(self):
combo = self.ui.attributeCombo
index = combo.currentIndex()
return self._component_hashes.get(combo.itemData(index), None)
@component.setter
def component(self, component):
combo = self.ui.attributeCombo
if combo.count() == 0: # cold start problem, when restoring
self._update_attributes()
# combo.findData doesn't seem to work robustly
for i in range(combo.count()):
data = combo.itemData(i)
if data == _hash(component):
combo.setCurrentIndex(i)
return
raise IndexError("Component not present: %s" % component)
@defer_draw
def _set_attribute_from_combo(self, *args):
self.client.set_component(self.component)
self.update_window_title()
@defer_draw
def add_data(self, data):
""" Add data item to combo box.
If first addition, also update attributes """
if self.data_present(data):
return True
if data.size > WARN_SLOW and not self._confirm_large_data(data):
return False
self.client.add_layer(data)
self._update_attributes()
self._update_minmax_labels()
return True
def add_subset(self, subset):
pass
def _remove_data(self, data):
""" Remove data item from the combo box """
pass
def data_present(self, data):
return data in self._container
def register_to_hub(self, hub):
super(HistogramWidget, self).register_to_hub(hub)
self.client.register_to_hub(hub)
hub.subscribe(self,
msg.DataCollectionDeleteMessage,
handler=lambda x: self._remove_data(x.data))
hub.subscribe(self,
msg.DataUpdateMessage,
handler=lambda *args: self._update_labels())
hub.subscribe(self,
msg.ComponentsChangedMessage,
handler=lambda x: self._update_attributes())
def unregister(self, hub):
super(HistogramWidget, self).unregister(hub)
self.client.unregister(hub)
hub.unsubscribe_all(self)
@property
def window_title(self):
c = self.client.component
if c is not None:
label = str(c.label)
else:
label = 'Histogram'
return label
def _update_labels(self):
self.update_window_title()
self._update_attributes()
def __str__(self):
return "Histogram Widget"
def options_widget(self):
return self.option_widget
| 1.914063
| 2
|
worlds_worst_operator/action_sets/text_adventure_actions.py
|
nigelmathes/worlds-worst-operator
| 0
|
12774643
|
<reponame>nigelmathes/worlds-worst-operator<filename>worlds_worst_operator/action_sets/text_adventure_actions.py
import json
from dataclasses import asdict
from typing import Dict, Tuple, List
import boto3
try:
from player_data import Player
from arns import TEXT_ADVENTURE_ARN
from action_sets.common_actions import create_update_fields
except ImportError:
from ..player_data import Player
from ..arns import TEXT_ADVENTURE_ARN
from .common_actions import create_update_fields
lambda_client = boto3.client("lambda", region_name="us-east-1")
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
ActionResponse = Tuple[Player, Player, Dict, Dict, List]
def play_game(player: Player, table: dynamodb.Table) -> ActionResponse:
"""
Plays game defined in player.context using commands stored in player.history.
:param player: The original player, before actions were taken
:param table: DynamoDB table object (unused)
:return: Updated Player dataclass and dict of fields to update, and a message
"""
# If player wants to quit the game, don't pass the quit action to the text
# adventure game, just end here and send the player home
quit_with_typos = ["quit", "qit", "qut", "quitt", "quuit", "quiit"]
if any(word in player.history[-1] for word in quit_with_typos):
updated_player = Player(**asdict(player))
updated_player.context = "home"
updated_player.history = []
updated_player.target = "None"
message = [f"You turn off {player.target}, returning home."]
player_updates = create_update_fields(player, updated_player)
return player, player, player_updates, player_updates, message
# If not quitting, play the text adventure game
data = {"body": {"actions": player.history, "game": player.target}}
payload = json.dumps(data)
# Invoke the text adventure lambda
response = lambda_client.invoke(
FunctionName=TEXT_ADVENTURE_ARN,
InvocationType="RequestResponse",
Payload=payload,
)
# response of the form:
# {
# "statusCode": 200,
# "body": text_adventure_result,
# "headers": {"Access-Control-Allow-Origin": "*"},
# }
response_payload = json.loads(response.get("Payload").read())
message = [json.loads(response_payload["body"])]
player_updates = {"history": player.history}
return player, player, player_updates, player_updates, message
TEXT_ADVENTURE_ACTIONS_MAP = {
"default": play_game,
}
| 2.65625
| 3
|
common/datetime_helper.py
|
wuxh123/my_bottle
| 3
|
12774644
|
<gh_stars>1-10
#!/usr/bin/env python
# coding=utf-8
import time
import datetime
def to_date(dt):
"""将时间格式化为日期字符串"""
if isinstance(dt, datetime.datetime):
return dt.strftime('%Y-%m-%d')
elif isinstance(dt, datetime.date):
return dt.strftime('%Y-%m-%d')
else:
raise Exception("日期类型错误")
def to_datetime(dt):
"""将时间格式化为日期时间字符串"""
if isinstance(dt, datetime.datetime):
return dt.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(dt, datetime.date):
return dt.strftime('%Y-%m-%d')
else:
raise Exception("日期类型错误")
def to_number(format=''):
"""当前时间转换为年月日时分秒毫秒共10位数的字符串"""
if format:
return datetime.datetime.now().strftime(format)
else:
return datetime.datetime.now().strftime('%Y%m%d%H%M%S')
def to_timestamp10():
"""获取当前时间长度为10位长度的时间戳"""
return int(time.time())
def to_timestamp13():
"""获取当前时间长度为13位长度的时间戳"""
return int(time.time() * 1000)
def timedelta(sign, dt, value):
"""
对指定时间进行加减运算,几秒、几分、几小时、几日、几周、几月、几年
sign: y = 年, m = 月, w = 周, d = 日, h = 时, n = 分钟, s = 秒
dt: 日期,只能是datetime或datetime.date类型
value: 加减的数值
return: 返回运算后的datetime类型值
"""
if not isinstance(dt, datetime.datetime) and not isinstance(dt, datetime.date):
raise Exception("日期类型错误")
if sign == 'y':
year = dt.year + value
if isinstance(dt, datetime.date):
return datetime.datetime(year, dt.month, dt.day)
elif isinstance(dt, datetime.datetime):
return datetime.datetime(year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond)
else:
return None
elif sign == 'm':
year = dt.year
month = dt.month + value
### 如果月份加减后超出范围,则需要计算一下,对年份进行处理 ###
# 如果月份加减后等于0时,需要特殊处理一下
if month == 0:
year = year - 1
month = 12
else:
# 对年月进行处理
year = year + month // 12
month = month % 12
if isinstance(dt, datetime.date):
return datetime.datetime(year, month, dt.day)
elif isinstance(dt, datetime.datetime):
return datetime.datetime(year, month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond)
else:
return None
elif sign == 'w':
delta = datetime.timedelta(weeks=value)
elif sign == 'd':
delta = datetime.timedelta(days=value)
elif sign == 'h':
delta = datetime.timedelta(hours=value)
elif sign == 'n':
delta = datetime.timedelta(minutes=value)
elif sign == 's':
delta = datetime.timedelta(seconds=value)
else:
return None
return dt + delta
| 3.265625
| 3
|
rec_to_nwb/test/processing/test_fileSorter.py
|
jihyunbak/rec_to_nwb
| 8
|
12774645
|
from unittest import TestCase
from rec_to_nwb.processing.tools.file_sorter import FileSorter
class TestFilenameSorter(TestCase):
def setUp(self):
self.strings_to_sort = ['name01', 'name11', 'name10', 'name02', 'name21']
self.correct_order_of_strings = ['name01', 'name02', 'name10', 'name11', 'name21']
self.filenames_to_sort = ['20190718_beans_01_s1.nt0' + str(i) + '.mda' for i in range(1, 10)]
self.filenames_to_sort.extend(['20190718_beans_01_s1.nt' + str(i) + '.mda' for i in range(10, 64)])
self.file_sorter = FileSorter()
self.sorted_strings = self.file_sorter.sort_filenames(self.strings_to_sort)
self.sorted_filenames = self.file_sorter.sort_filenames(self.filenames_to_sort)
def test_string_sorting(self):
self.assertEqual(self.sorted_strings, self.correct_order_of_strings)
def test_filename_sorting(self):
self.assertEqual(self.sorted_filenames[1], '20190718_beans_01_s1.nt02.mda')
self.assertEqual(self.sorted_filenames[9], '20190718_beans_01_s1.nt10.mda')
self.assertEqual(self.sorted_filenames[18], '20190718_beans_01_s1.nt19.mda')
self.assertEqual(self.sorted_filenames[19], '20190718_beans_01_s1.nt20.mda')
| 2.765625
| 3
|
t01primeshcflcm_primekth.py
|
CherryNoddles/y1math
| 1
|
12774646
|
# What is the kth prime number?
| 1.039063
| 1
|
Puzzles/Easy/EquivalentResistanceCircuitBuilding.py
|
Naheuldark/Codingame
| 0
|
12774647
|
import sys
import math
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
n = int(input())
resdict = {}
for i in range(n):
inputs = input().split()
name = inputs[0]
r = int(inputs[1])
resdict[name] = r
circuit = input().split(' ')
print(circuit, file=sys.stderr, flush=True)
print(resdict, file=sys.stderr, flush=True)
# Write an answer using print
# To debug: print("Debug messages...", file=sys.stderr, flush=True)
def process(circuit):
s = []
i = 0
while i < len(circuit):
op = circuit[i]
if op == ')':
res = 0.0
op2 = s.pop()
while op2 != '(':
if op2 in resdict:
res += float(resdict[op2])
else:
res += float(op2)
op2 = s.pop()
s.append(res)
elif op == ']':
res = 0.0
op2 = s.pop()
while op2 != '[':
if op2 in resdict:
res += 1.0 / float(resdict[op2])
else:
res += 1.0 / float(op2)
op2 = s.pop()
res = 1.0 / res
s.append(res)
else:
s.append(op)
print(s, file=sys.stderr, flush=True)
i += 1
return str(round(s[0], 1))
print(process(circuit))
| 3.4375
| 3
|
sqlalchemy/sqlalchemy-0.3.6+codebay/examples/collections/large_collection.py
|
nakedible/vpnease-l2tp
| 5
|
12774648
|
"""illlustrates techniques for dealing with very large collections"""
from sqlalchemy import *
meta = BoundMetaData('sqlite://', echo=True)
org_table = Table('organizations', meta,
Column('org_id', Integer, primary_key=True),
Column('org_name', String(50), nullable=False, key='name'),
mysql_engine='InnoDB')
member_table = Table('members', meta,
Column('member_id', Integer, primary_key=True),
Column('member_name', String(50), nullable=False, key='name'),
Column('org_id', Integer, ForeignKey('organizations.org_id')),
mysql_engine='InnoDB')
meta.create_all()
class Organization(object):
def __init__(self, name):
self.name = name
def find_members(self, criterion):
"""locate a subset of the members associated with this Organization"""
return object_session(self).query(Member).select(and_(member_table.c.name.like(criterion), org_table.c.org_id==self.org_id), from_obj=[org_table.join(member_table)])
class Member(object):
def __init__(self, name):
self.name = name
# note that we can also place "ON DELETE CASCADE" on the tables themselves,
# instead of using this extension
class DeleteMemberExt(MapperExtension):
"""will delete child Member objects in one pass when Organizations are deleted"""
def before_delete(self, mapper, connection, instance):
connection.execute(member_table.delete(member_table.c.org_id==instance.org_id))
mapper(Organization, org_table, extension=DeleteMemberExt(), properties = {
# set up the relationship with "lazy=None" so no loading occurs (even lazily),
# "cascade='all, delete-orphan'" to declare Member objects as local to their parent Organization,
# "passive_deletes=True" so that the "delete, delete-orphan" cascades do not load in the child objects
# upon deletion
'members' : relation(Member, lazy=None, passive_deletes=True, cascade="all, delete-orphan")
})
mapper(Member, member_table)
sess = create_session()
# create org with some members
org = Organization('org one')
org.members.append(Member('member one'))
org.members.append(Member('member two'))
org.members.append(Member('member three'))
sess.save(org)
print "-------------------------\nflush one - save org + 3 members"
sess.flush()
sess.clear()
# reload. load the org and some child members
print "-------------------------\nload subset of members"
org = sess.query(Organization).get(org.org_id)
members = org.find_members('%member t%')
print members
sess.clear()
# reload. create some more members and flush, without loading any of the original members
org = sess.query(Organization).get(org.org_id)
org.members.append(Member('member four'))
org.members.append(Member('member five'))
org.members.append(Member('member six'))
print "-------------------------\nflush two - save 3 more members"
sess.flush()
sess.clear()
org = sess.query(Organization).get(org.org_id)
# now delete. note that this will explictily delete members four, five and six because they are in the session,
# but will not issue individual deletes for members one, two and three, nor will it load them.
sess.delete(org)
print "-------------------------\nflush three - delete org, delete members in one statement"
sess.flush()
| 2.9375
| 3
|
scripts/artifacts/imoHD_Chat.py
|
isoft123/iLEAPP
| 1
|
12774649
|
import sqlite3
import io
import json
import os
import shutil
import nska_deserialize as nd
import scripts.artifacts.artGlobals
from packaging import version
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, logdevinfo, timeline, kmlgen, tsv, is_platform_windows, open_sqlite_db_readonly
def get_imoHD_Chat(files_found, report_folder, seeker):
for file_found in files_found:
file_found = str(file_found)
if file_found.endswith('.sqlite'):
break
db = open_sqlite_db_readonly(file_found)
cursor = db.cursor()
cursor.execute('''
select
case ZIMOCHATMSG.ZTS
when 0 then ''
else datetime(ZTS/1000000000,'unixepoch')
end as "Timestamp",
ZIMOCONTACT.ZDISPLAY as "Sender Display Name",
ZIMOCHATMSG.ZALIAS as "Sender Alias",
ZIMOCONTACT.ZDIGIT_PHONE,
ZIMOCHATMSG.ZTEXT as "Message",
case ZIMOCHATMSG.ZISSENT
when 0 then 'Received'
when 1 then 'Sent'
end as "Message Status",
ZIMOCHATMSG.ZIMDATA
from ZIMOCHATMSG
left join ZIMOCONTACT ON ZIMOCONTACT.ZBUID = ZIMOCHATMSG.ZA_UID
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
data_list = []
if usageentries > 0:
for row in all_rows:
plist = ''
timestamp = row[0]
senderName = row[1]
senderAlias = row[2]
senderPhone = row[3]
message = row[4]
messageStatus = row[5]
itemAction = ''
attachmentURL = ''
thumb = ''
plist_file_object = io.BytesIO(row[6])
if row[6] is None:
pass
else:
if row[6].find(b'NSKeyedArchiver') == -1:
if sys.version_info >= (3, 9):
plist = plistlib.load(plist_file_object)
else:
plist = biplist.readPlist(plist_file_object)
else:
try:
plist = nd.deserialize_plist(plist_file_object)
except (nd.DeserializeError, nd.biplist.NotBinaryPlistException, nd.biplist.InvalidPlistException,
nd.plistlib.InvalidFileException, nd.ccl_bplist.BplistError, ValueError, TypeError, OSError, OverflowError) as ex:
logfunc(f'Failed to read plist for {row[0]}, error was:' + str(ex))
itemAction = plist['type']
#Check for Attachments
if plist.get('objects') is not None:
attachmentName = plist['objects'][0]['object_id']
attachmentURL = "https://cdn.imoim.us/s/object/" + attachmentName + "/"
for match in files_found:
if attachmentName in match:
shutil.copy2(match, report_folder)
data_file_name = os.path.basename(match)
thumb = f'<img src="{report_folder}/{data_file_name}"></img>'
else:
attachmentURL = ''
data_list.append((timestamp, senderName, senderAlias, senderPhone, message, messageStatus, itemAction, attachmentURL, thumb))
description = 'IMO HD Chat - Messages'
report = ArtifactHtmlReport('IMO HD Chat - Messages')
report.start_artifact_report(report_folder, 'IMO HD Chat - Messages')
report.add_script()
data_headers = (
'Timestamp', 'Sender Name', 'Sender Alias', 'Sender Phone', 'Message', 'Message Status', 'Item Action',
'Attachment URL', 'Attachment') # Don't remove the comma, that is required to make this a tuple as there is only 1 element
report.write_artifact_data_table(data_headers, data_list, file_found, html_no_escape=['Attachment'])
report.end_artifact_report()
tsvname = f'IMO HD Chat - Messages'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'IMO HD Chat - Messages'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('IMO HD Chat - Messages data available')
cursor.execute('''
select
ZPH_NAME,
ZALIAS,
ZPHONE,
"https://cdn.imoim.us/s/object/" || ZICON_ID || "/" as "Profile Pic",
ZBUID
from ZIMOCONTACT
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
description = 'IMO HD Chat - Contacts'
report = ArtifactHtmlReport('IMO HD Chat - Contacts')
report.start_artifact_report(report_folder, 'IMO HD Chat - Contacts')
report.add_script()
data_headers = ('Contact Name','Contact Alias','Contact Phone','Profile Pic URL','User ID') # Don't remove the comma, that is required to make this a tuple as there is only 1 element
data_list = []
for row in all_rows:
data_list.append((row[0],row[1],row[2],row[3],row[4]))
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'IMO HD Chat - Contacts'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'IMO HD Chat - Contacts'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('IMO HD Chat - Contacts data available')
db.close()
| 2
| 2
|
amurlevel_model/model/model.py
|
RaevskyDN/aij2020-amur-noflood-public
| 7
|
12774650
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
import tensorflow as tf
import tensorflow.keras.layers as L
import tensorflow.keras.backend as K
from .metrics import rmse,mae,mae_inference
from ..config_features import NUMERICAL_FEATURES,CATEGORICAL_FEATURES,CAT_MAP
from ..config import DAYS_FORECAST,ALL_STATIONS
def lstm_layer(hidden_dim, dropout):
return L.Bidirectional(
L.LSTM(hidden_dim,
dropout=dropout,
return_sequences=True,
kernel_initializer='orthogonal'))
class Conv1BN():
'''
Архитектура - conv1D->Batchnorm->conv1D->Batchnorm->Droput
:param filters: int, число фильтров
:param input_shape: tuple, входной shape
:param kernel_size: int, размер ядра для 1D свертки
:param dilation_rate: int, dilation для 1D свертки
'''
def __init__(self,filters,input_shape,kernel_size,dilation_rate):
self.conv1 = L.Conv1D(filters=filters,input_shape=input_shape,padding='same',
kernel_size=kernel_size,dilation_rate=dilation_rate)
self.drop1 = L.Dropout(0.1)
self.bn1 = L.BatchNormalization()
self.conv2 = L.Conv1D(filters=filters,padding='same',
kernel_size=kernel_size,dilation_rate=dilation_rate)
self.bn2 = L.BatchNormalization()
def __call__(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.drop1(x)
return x
def build_model(dropout=0.35, hidden_dim=256, embed_dim=1,
numerical=len(NUMERICAL_FEATURES),
categorical=len(CATEGORICAL_FEATURES)):
'''
Итоговая модель для предсказания уровня
:param dropout: float, dropout используемый в модели
:param hidden_dim: int, размерность скрытого слоя в LSTM
:param embed_dim: int, размерность эмбединга для категориальных признаков
:param numerical: int, количество численных признаков
:param categorical: int, количество категориальных признаков
:return: model
'''
K.clear_session()
inputs = L.Input(shape=(DAYS_FORECAST, numerical + categorical))
num_inputs = inputs[:, :, :numerical]
if categorical > 0:
embed_inputs = inputs[:, :, numerical:numerical + categorical]
embed = L.Embedding(input_dim=len(CAT_MAP), output_dim=embed_dim)(embed_inputs)
conv_embed_inputs = tf.reshape(embed, shape=(-1, embed.shape[1], embed.shape[2] * embed.shape[3]))
in1 = L.Dense(1000, activation='relu')(num_inputs)
in1 = L.BatchNormalization()(in1)
dropped1 = L.Dropout(dropout)(in1)
if categorical > 0:
reshaped = tf.concat([conv_embed_inputs, dropped1], axis=2)
else:
reshaped = dropped1
#print(f'reshaped shape {reshaped.shape}')
hidden = lstm_layer(hidden_dim, dropout)(reshaped)
hidden = lstm_layer(hidden_dim, dropout)(hidden)
#print(hidden.shape)
hidden = L.BatchNormalization()(hidden)
out1 = L.Dense(800, activation='relu')(hidden)
out1 = L.BatchNormalization()(out1)
dropped = L.Dropout(dropout)(out1)
out1 = L.Dense(800, activation='relu')(dropped)
out1 = L.BatchNormalization()(out1)
dropped2 = L.Dropout(dropout)(num_inputs)
convbn = L.Dropout(dropout)(num_inputs)
convbn = Conv1BN(filters=1000, kernel_size=3, dilation_rate=1, input_shape=convbn.shape)(convbn)
# out1 = tf.concat([out1, dropped2], axis=2)
out1 = tf.concat([out1, dropped2, convbn], axis=2)
out = L.Dense(len(ALL_STATIONS), activation='linear')(out1)
model = tf.keras.Model(inputs=inputs, outputs=out)
adam = tf.optimizers.Adam(learning_rate=0.00008)
model.compile(optimizer=adam, loss=rmse,metrics=[mae_inference, mae])
return model
| 2.515625
| 3
|