max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
gitools/modules/restore_backup.py
|
AliRezaBeigy/Gitools
| 6
|
12784851
|
from ..module import Module
class RestoreBackupModule(Module):
def process(self):
Module.restoreBackup()
@staticmethod
def getFlag():
return "rb"
@staticmethod
def getName():
return "Restore Backup"
@staticmethod
def getDescription():
return ""
def isVisible(self):
return Module.hasBackup()
| 2.421875
| 2
|
Lesson16/api.py
|
IslamRaslambekov/HomeWork
| 0
|
12784852
|
import requests
from pprint import pprint
def find_vacancies(parameters):
URL = 'https://www.cbr-xml-daily.ru/daily_json.js'
response = requests.get(URL).json()
usd_rate = response['Valute']['USD']['Value']
euro_rate = response['Valute']['EUR']['Value']
URL_HH = 'https://api.hh.ru/vacancies'
min_salary = list()
max_salary = list()
response = requests.get(URL_HH, params=parameters).json()
pages = response['pages']
vacancies_count = response['found']
for page in range(pages + 1):
params = {'text': parameters.get('text'),
'only_with_salary': parameters.get('only_with_salary'),
'per_page': parameters.get('per_page'),
'page': page}
response = requests.get(URL_HH, params=params).json()
for item in response['items']:
salfrom = item['salary']['from']
salto = item['salary']['to']
salcurr = item['salary']['currency']
if salcurr == 'RUR':
if salfrom is not None:
min_salary.append(salfrom)
if salto is not None:
max_salary.append(salto)
elif salcurr == 'USD':
if salfrom is not None:
min_salary.append(int(salfrom * usd_rate))
if salto is not None:
max_salary.append(int(salto * usd_rate))
elif salcurr == 'EUR':
if salfrom is not None:
min_salary.append(int(salfrom * euro_rate))
if salto is not None:
max_salary.append(int(salto * euro_rate))
data = {
'average_salary': f'{sum(min_salary) // len(min_salary)} - {sum(max_salary) // len(max_salary)}',
'vacancies_count': vacancies_count
}
return data
| 3.09375
| 3
|
homeassistant/components/flux_led/discovery.py
|
gptubpkCsHKzjC8fKcRXUdK6SbECPM49P5Xu46U/core
| 1
|
12784853
|
<filename>homeassistant/components/flux_led/discovery.py
"""The Flux LED/MagicLight integration discovery."""
from __future__ import annotations
import asyncio
import logging
from flux_led.aioscanner import AIOBulbScanner
from flux_led.const import ATTR_ID, ATTR_IPADDR, ATTR_MODEL, ATTR_MODEL_DESCRIPTION
from flux_led.scanner import FluxLEDDiscovery
from homeassistant import config_entries
from homeassistant.components import network
from homeassistant.core import HomeAssistant, callback
from .const import DISCOVER_SCAN_TIMEOUT, DOMAIN
_LOGGER = logging.getLogger(__name__)
@callback
def async_name_from_discovery(device: FluxLEDDiscovery) -> str:
"""Convert a flux_led discovery to a human readable name."""
mac_address = device[ATTR_ID]
if mac_address is None:
return device[ATTR_IPADDR]
short_mac = mac_address[-6:]
if device[ATTR_MODEL_DESCRIPTION]:
return f"{device[ATTR_MODEL_DESCRIPTION]} {short_mac}"
return f"{device[ATTR_MODEL]} {short_mac}"
async def async_discover_devices(
hass: HomeAssistant, timeout: int, address: str | None = None
) -> list[FluxLEDDiscovery]:
"""Discover flux led devices."""
if address:
targets = [address]
else:
targets = [
str(address)
for address in await network.async_get_ipv4_broadcast_addresses(hass)
]
scanner = AIOBulbScanner()
for idx, discovered in enumerate(
await asyncio.gather(
*[
scanner.async_scan(timeout=timeout, address=address)
for address in targets
],
return_exceptions=True,
)
):
if isinstance(discovered, Exception):
_LOGGER.debug("Scanning %s failed with error: %s", targets[idx], discovered)
continue
if not address:
return scanner.getBulbInfo()
return [
device for device in scanner.getBulbInfo() if device[ATTR_IPADDR] == address
]
async def async_discover_device(
hass: HomeAssistant, host: str
) -> FluxLEDDiscovery | None:
"""Direct discovery at a single ip instead of broadcast."""
# If we are missing the unique_id we should be able to fetch it
# from the device by doing a directed discovery at the host only
for device in await async_discover_devices(hass, DISCOVER_SCAN_TIMEOUT, host):
if device[ATTR_IPADDR] == host:
return device
return None
@callback
def async_trigger_discovery(
hass: HomeAssistant,
discovered_devices: list[FluxLEDDiscovery],
) -> None:
"""Trigger config flows for discovered devices."""
for device in discovered_devices:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DISCOVERY},
data={**device},
)
)
| 2.21875
| 2
|
executor/meta/env_var.py
|
asyre/bachelor_degree
| 0
|
12784854
|
import os
from typing import Optional
def is_env_var(value: str) -> bool:
return value.startswith("$")
def extract_env_var(value: str) -> Optional[str]:
return os.environ[value[1:]]
def must_extract_env_var(value: str) -> str:
env = extract_env_var(value)
if env is None:
raise MissingEnvVar
return env
def must_extract_env_var_if_present(value: str) -> str:
if is_env_var(value):
return must_extract_env_var(value)
return value
class MissingEnvVar(Exception):
pass
| 3.078125
| 3
|
train.py
|
yutong-xie/Depth-Estimation-based-on-CNN
| 0
|
12784855
|
import os
import numpy as np
import torch
import torch.nn as nn
import torchvision
import PIL
from torchvision import transforms
from sklearn.metrics import average_precision_score
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
from nyu_dataloader import LoadData, NYUDataset
from model import CoarseNetwork, FineNetwork
from metrics import Metrics_Calculate
from loss import Train_Loss
%matplotlib inline
%load_ext autoreload
%autoreload 2
param =0.5 # 0 1
# # learning_rate = {'coarse': [0.001,0.001,0.001,0.001,0.001,0.1,0.1], 'fine':[0.01,0.1,0.01]}
learning_rate = 0.001
num_epochs = {'coarse':30, 'fine':30}
criterion = Train_Loss(param)
optimizer = {'coarse': torch.optim.SGD(net['coarse'].parameters(), lr = 0.001, momentum=0.9, weight_decay= 5e-4),
'fine':torch.optim.SGD(net['fine'].parameters(), lr = 0.001, momentum=0.9, weight_decay= 5e-4)}
# optimizer = {'coarse': torch.optim.Adam(net['coarse'].parameters(), lr = 0.001),
# 'fine':torch.optim.Adam(net['fine'].parameters(), lr = 0.001)}
optimizer = {}
optimizer['coarse'] = torch.optim.SGD([
{'params': net['coarse'].coarse1.parameters(), 'lr': 0.001}, {'params': net['coarse'].coarse2.parameters(), 'lr': 0.001},
{'params': net['coarse'].coarse3.parameters(), 'lr': 0.001}, {'params': net['coarse'].coarse4.parameters(), 'lr': 0.001},
{'params': net['coarse'].coarse5.parameters(), 'lr': 0.001}, {'params': net['coarse'].coarse6.parameters(), 'lr': 0.1},
{'params': net['coarse'].coarse7.parameters(), 'lr': 0.1}
], lr=0.001, momentum=0.9, weight_decay= 5e-4)
optimizer['fine'] = torch.optim.SGD([
{'params': net['fine'].fine1.parameters(), 'lr': 0.001},
{'params': net['fine'].fine2.parameters(), 'lr': 0.01},
{'params': net['fine'].fine3.parameters(), 'lr': 0.001}
], lr=0.001, momentum=0.9, weight_decay= 5e-4)
# optimizer = {}
# optimizer['coarse'] = torch.optim.Adam([
# {'params': net['coarse'].coarse1.parameters(), 'lr': 0.001}, {'params': net['coarse'].coarse2.parameters(), 'lr': 0.001},
# {'params': net['coarse'].coarse3.parameters(), 'lr': 0.001}, {'params': net['coarse'].coarse4.parameters(), 'lr': 0.001},
# {'params': net['coarse'].coarse5.parameters(), 'lr': 0.001}, {'params': net['coarse'].coarse6.parameters(), 'lr': 0.1},
# {'params': net['coarse'].coarse7.parameters(), 'lr': 0.1}
# ], lr=0.001)
# optimizer['fine'] = torch.optim.Adam([
# {'params': net['fine'].fine1.parameters(), 'lr': 0.001},
# {'params': net['fine'].fine2.parameters(), 'lr': 0.01},
# {'params': net['fine'].fine3.parameters(), 'lr': 0.001}
# ], lr=0.001)
def train(mode, net, optimizer):
best_test_loss = float("inf")
val_loss_list = []
train_loss_list = []
# Loop over the dataset for multiple epochs
for epoch in range(num_epochs[mode]):
net[mode].train()
running_loss = 0.0
start_time = time.time()
print('\nStarting epoch %d / %d' % (epoch + 1, num_epochs[mode]))
# For each mini-batch...
for inputs, labels in train_loader:
inputs, labels = inputs.to(device), labels.to(device)
optimizer[mode].zero_grad()
if mode == 'coarse':
outputs = net['coarse'](inputs)
elif mode == 'fine':
with torch.no_grad():
net['coarse'].eval()
coarse_outputs = net['coarse'](inputs)
outputs = net['fine'](inputs, coarse_outputs.detach())
loss = criterion(outputs, labels)
loss.backward()
optimizer[mode].step()
running_loss += loss.item()
running_loss /= len(train_loader)
train_loss_list.append(running_loss)
# save model every 5 epochs
# if epoch %5 == 4 and load_network_path is not None:
# torch.save(net[mode].state_dict(), load_network_path[mode])
# evaluate the network on the validation dataset
with torch.no_grad():
val_loss = 0.0
net[mode].eval()
for inputs, labels in val_loader:
inputs, labels = inputs.to(device), labels.to(device)
if mode == 'coarse':
outputs = net['coarse'](inputs)
elif mode == 'fine':
net['coarse'].eval()
coarse_outputs = net['coarse'](inputs)
outputs = net['fine'](inputs, coarse_outputs)
loss = criterion(outputs,labels)
val_loss += loss.item()
val_loss /= len(val_loader)
val_loss_list.append(val_loss)
# Metrics: t1, t2, t3, abs_error, squared_error, rmse_linear, rmse_log
t1, t2, t3, abs_error, squared_error, rmse_linear, rmse_log = Metrics_Calculate(outputs, labels)
print("epoch:", epoch + 1, ", training loss:", running_loss, "validation loss:", val_loss)
if epoch % 10 == 9:
print("\n------------Validation--------------")
print("Threshold < 1.25:", t1)
print("Threshold < 1.25^2:", t2)
print("Threshold < 1.25^3:", t3)
print("abs_relative_difference:", abs_error.item())
print("squared_relative_difference:", squared_error.item())
print("RMSE (linear):", rmse_linear.item())
print("RMSE (log):", rmse_log.item())
print("RMSE (log, scale inv.):", val_loss)
print("---------------------------------------")
# training_time = time.time() - start_time
# print("Training time: %d min %d s"% (training_time//60, training_time % 60))
return net, train_loss_list, val_loss_list
# Train the coarse network
net, train_losses, val_losses= train('coarse', net, optimizer)
plot_loss(train_losses, val_losses)
# Train the fine network
net, train_losses, val_losses= train('fine', net, optimizer)
plot_loss(train_losses, val_losses)
# Test the model and output samples
test_loss = 0.0
for inputs, labels in test_loader:
inputs, labels = inputs.to(device), labels.to(device)
net["coarse"].eval()
net["fine"].eval()
with torch.no_grad():
coarse_outputs = net['coarse'](inputs)
fine_outputs = net['fine'](inputs, coarse_outputs)
loss = criterion(fine_outputs, labels)
test_loss += loss.item()
test_loss /= len(test_loader)
print("Test loss: ", test_loss)
t1, t2, t3, abs_error, squared_error, rmse_linear, rmse_log = Metrics_Calculate(fine_outputs, labels)
print("\n------------Validation--------------")
print("Threshold < 1.25:", t1)
print("Threshold < 1.25^2:", t2)
print("Threshold < 1.25^3:", t3)
print("abs_relative_difference:", abs_error.item())
print("squared_relative_difference:", squared_error.item())
print("RMSE (linear):", rmse_linear.item())
print("RMSE (log):", rmse_log.item())
print("RMSE (log, scale inv.):", test_loss)
print("---------------------------------------")
| 2.09375
| 2
|
job/SLURM/Opuntia.py
|
martintb/typyQ
| 0
|
12784856
|
<reponame>martintb/typyQ<filename>job/SLURM/Opuntia.py
from SLURM import SLURMJob
class OpuntiaJob(SLURMJob):
#no specialization needed!
pass
| 1.257813
| 1
|
ddpcr/app/urls.py
|
clinical-genomics-uppsala/where_my_assys_at
| 0
|
12784857
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.Index.as_view(), name='index'),
path('assays/', views.AssayList.as_view(), name='assays'),
path('assays/create/', views.AssayCreate.as_view(), name='assay-create'),
path('assays/upload/', views.AssaysCreate.as_view(), name="assays-create"),
path('assays/update/<int:pk>/', views.AssayUpdate.as_view(), name='assay-update'),
path("lots/", views.LotList.as_view(), name="lots"),
path("lots/update/<int:pk>", views.LotUpdate.as_view(), name="lot-update"),
path("lots/order/", views.LotOrderList.as_view(), name="lot-order-list"),
path("lots/order/<int:pk>", views.LotOrder.as_view(), name="lot-order"),
path("lots/scan/", views.LotScanList.as_view(), name="lot-scan-list"),
path("lots/scan/<int:pk>", views.LotScan.as_view(), name="lot-scan"),
path("lots/validate/", views.LotValidateList.as_view(), name="lot-validate-list"),
path("lots/validate/<int:pk>", views.LotValidate.as_view(), name="lot-validate"),
path("lots/activate/", views.LotActivateList.as_view(), name="lot-activate-list"),
path("lots/activate/<int:pk>", views.LotActivate.as_view(), name="lot-activate"),
path("lots/lowvol/", views.LotLowVolumeList.as_view(), name="lot-low-volume-list"),
path("lots/lowvol/<int:pk>", views.LotLowVolume.as_view(), name="lot-low-volume"),
path("lots/inactivate/", views.LotInactivateList.as_view(), name="lot-inactivate-list"),
path("lots/inactivate/<int:pk>", views.LotInactivate.as_view(), name="lot-inactivate"),
path('patients/', views.PatientList.as_view(), name='patients'),
path('patients/create/', views.PatientCreate.as_view(), name='patient-create'),
path('patients/upload/', views.PatientsCreate.as_view(), name="patients-create"),
path('patients/update/<int:pk>/', views.PatientUpdate.as_view(), name='patient-update'),
]
| 1.898438
| 2
|
wards/forms.py
|
Naomi-sigu/awwwards
| 1
|
12784858
|
<filename>wards/forms.py<gh_stars>1-10
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Profile, Projects
class UpdateUser(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username','email']
class UpdateProfile(forms.ModelForm):
class Meta:
model = Profile
fields = ['profile_pic','bio']
class ProjectsForm(forms.ModelForm):
class Meta:
model = Projects
fields = ['title', 'image', 'project_description', 'user', 'site', 'country']
| 2.015625
| 2
|
dm_irods/list.py
|
sara-nl/surfsara-dmf-irods-client
| 2
|
12784859
|
<reponame>sara-nl/surfsara-dmf-irods-client
import sys
import atexit
import json
import time
from argparse import ArgumentParser
from .socket_server.client import Client
from .socket_server.server import ReturnCode
from .server import ensure_daemon_is_running
from .server import DmIRodsServer
from .cprint import terminal_erase
from .cprint import terminal_home
from .cprint import print_request_error
from .table import Table
from .table import get_term_size
WATCH_DEALY = 2
def dm_ilist(argv=sys.argv[1:]):
parser = ArgumentParser(description='List files in archive.')
help_format = ('Configure columns to be displayed' +
'Examples:\n' +
'dmf,time,status,mod,file,local_file (default)\n' +
'dmf,time,status,mod,file:20,local_file:20')
help_watch = 'display the list and refresh screen automatically'
parser.add_argument('--format',
type=str,
default='dmf,time,status,mod,file,local_file',
help=help_format)
parser.add_argument('--limit',
type=int,
help='limit number of items to be listed')
parser.add_argument('--watch', '-w',
action='store_true',
help=help_watch)
parser.add_argument('--active', '-a',
action='store_true',
help='only active objects')
args = parser.parse_args(argv)
ensure_daemon_is_running()
client = Client(DmIRodsServer.get_socket_file())
if args.watch:
terminal_erase()
if args.limit is None:
(columns, lines) = get_term_size()
args.limit = lines - 3
atexit.register(terminal_erase)
while True:
table = Table(format=args.format)
for code, result in client.request_all({"list": True,
"all": True,
"filter": {"active":
args.active},
"limit": args.limit}):
if code != ReturnCode.OK:
print_request_error(code, result)
sys.exit(8)
table.print_row(json.loads(result))
if args.watch:
time.sleep(WATCH_DEALY)
terminal_home()
else:
break
if __name__ == "__main__":
dm_ilist()
| 2.234375
| 2
|
dianping/dianping/spiders/dpBaseSpider.py
|
MircroCode/dpSpider
| 0
|
12784860
|
#!/usr/local/bin/python
#-*-coding:utf8-*-
__author__ = 'youjiezeng'
import random
from scrapy.spider import BaseSpider
from scrapy.spider import Spider
from scrapy.contrib.spiders import CrawlSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from scrapy.http import Response
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
import sys
sys.path.append('../items')
reload(sys)
from scrapy.contrib.downloadermiddleware.useragent import UserAgentMiddleware
try:
from dianping.items import DianpingItem
except Exception,e:
print e
import re
from scrapy.exceptions import CloseSpider
from scrapy.selector import Selector
'''
this is a way to run spider in cmdline
scrapy runspider my_spider.py
优化点:
1. 启动爬虫间隔
2. 启动cookie,防止被ban
3. 随机切换user-agent,防止被ban
4. 将编码任务切换到pipeline中
'''
'''
It’s a derived class of BaseSpider which has three fields and one method (all are required):
name: name of your spider, which is use to launch the spider (will mention later). You can name it whatever you want. According to my taste, I will call it “nettuts”.
allowed_domains: a list of domains on which the crawler crawling. Every domains which are not in this list are not available for crawling.
start_urls: a list of URLs, which will be the roots of later crawls.
parse(self, response): main method which is invoked by BaseSpider and contains main logic of our crawler.'''
class DpSpider(BaseSpider):
from scrapy.contrib.spiders import Rule
from scrapy.contrib.linkextractors.lxmlhtml import LxmlParserLinkExtractor
name = 'dianping'
allowed_domains = ['dianping.com']
start_urls = ['http://www.dianping.com/search/category/2/20/g120r1481']
# rules = [Rule(LxmlParserLinkExtractor(['shop/\d+']), 'parse')]
# rules = [Rule(SgmlLinkExtractor(allow=('/shop'),restrict_xpaths=('//@href')), callback='parse',follow=True)]
def parse(self, response):
shop = DianpingItem()
status_code = response.status
if status_code == 403: #当爬虫被禁时,关闭爬虫
raise CloseSpider('======== SPIDER WAS FORBIDDEN =========')
# htmlData = HtmlXPathSelector(response)
# shop_names = htmlData.select('//h1[@class="shop-name"]/text()').extract()
# street_addresses = htmlData.select('//span[@itemprop="street-address"]/text()').extract()
# shop_tels = htmlData.select('//span[@class="info-name" and text()="'+u"电话:"+'"]/../text()[2]').extract()
# open_times = htmlData.select('//p[@class="info info-indent"]/span[text()="'+u"营业时间:"+'"]/../span[2]/text()').extract()
# shop_tags = htmlData.select('//span[@class=item]/a[@rel="tag"]/text()').extract()
# scripts = htmlData.select('//script/text()').extract()
# urls = htmlData.select('//attribute::href').extract()
htmlData = Selector(response)
shop_names = htmlData.xpath('//h1[@class="shop-name"]/text()').extract()
street_addresses = htmlData.xpath('//span[@itemprop="street-address"]/text()').extract()
shop_tels = htmlData.xpath('//span[@class="info-name" and text()="'+u"电话:"+'"]/../text()[2]').extract()
open_times = htmlData.xpath('//p[@class="info info-indent"]/span[text()="'+u"营业时间:"+'"]/../span[2]/text()').extract()
shop_tags = htmlData.xpath('//span[@class=item]/a[@rel="tag"]/text()').extract()
scripts = htmlData.xpath('//script/text()').extract()
urls = htmlData.xpath('//attribute::href').extract()
#爬取的数据中,有大量的换行符以及“号,在这里清洗掉
shop['shop_name'] = str(shop_names[0].encode('utf8')).replace("\n",' ').strip("\"").strip() if len(shop_names)>0 else ''
shop['street_address'] = str(street_addresses[0].encode('utf8')).replace("\n",' ').strip("\"").strip() if len(street_addresses)>0 else ''
shop['shop_tel'] = str(shop_tels[0].encode('utf8')).replace("\n",' ').strip("\"").strip() if len(shop_tels)>0 else ''
shop['open_time'] = str(open_times[0].encode('utf8')).replace("\n",' ').strip("\"").strip() if len(open_times)>0 else ''
shop['shop_tag'] = str(shop_tags[0].encode('utf8')).replace("\n",' ').strip("\"").strip() if len(shop_tags)>0 else ''
pat = re.compile('lng:[0-9.]+,lat:[0-9.]+')
latAndLngList = [pat.findall(src)[0] for src in scripts if pat.findall(src)]
latAndLng = latAndLngList[0] if len(latAndLngList)>0 else ''
latIdx = latAndLng.find('lat:')
lat = latAndLng[latIdx+4:]
lng = latAndLng[4:latIdx-1]
shop['shop_lat'] = lat
shop['shop_lng'] = lng
if lat != '' and lng != '':
yield shop
currentUrl = response.url
domainUrl = 'http://www.dianping.com'
# urls = htmlData.select('//@href').extract()
urlPattern = re.compile('shop/[0-9]+')
for url in urls:
if not urlPattern.findall(url):
continue
if str(url.encode('utf8')).startswith('http'):
pass
elif str(url.encode('utf8')).startswith('/'):
url = domainUrl+url
else:
url = currentUrl + url
print '------------url:',url
yield Request(url,callback=self.parse)
| 2.671875
| 3
|
google/colab/_import_magics.py
|
figufema/TesteClone
| 1,521
|
12784861
|
<filename>google/colab/_import_magics.py
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow magics to be declared without forcing an import.
This module allows us to declare a magic will be available while delaying the
import of the associated package. The primary purpose is to avoid importing too
many packages at startup, as it complicates package installation for users.
Note that importing the original module will *replace* these registrations, as
magics are still being registered in their original modules.
In addition, the IPython getdoc() function allows us to lazily request help on
a magic -- again, requesting help on a specific magic will import the module
where that magic resides.
For general Python objects or functions, this might be dangerous -- however,
magics are special, in that they're not represented by a Python object, so
there's no danger that overwriting the name -> function mapping will cause
trouble later on. The only user-visible aspect is that the source reference in
the help will update from this module to the actual importing module after the
first use.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# IPython requires get_ipython to be available as a local variable wherever you
# want to register a magic, in an attempt to prevent you from registering
# magics before the IPython magic machinery is loaded. So we need to directly
# import the symbol, instead of just the module.
from IPython import get_ipython
from IPython.core import magic
def _load_extension(module):
get_ipython().extension_manager.load_extension(module)
def _get_extension_magic(name, module, magic_type, magic_module_loader):
magic_module_loader(module)
m = get_ipython().magics_manager.magics[magic_type][name]
if m.__module__ == __name__:
raise ValueError('No %s magic named "%s" found in "%s"' %
(magic_type, name, module))
return m
def _declare_line_magic(name, module, magic_module_loader):
"""Declare a line magic called name in module."""
# If the module or extension has already been imported, don't overwrite the
# existing definition.
if module in sys.modules or module in get_ipython().extension_manager.loaded:
return
def impl(line, **kwargs):
return _get_extension_magic(name, module, 'line',
magic_module_loader)(line, **kwargs)
# pylint: disable=g-long-lambda
impl.getdoc = lambda: _get_extension_magic(name, module, 'line',
magic_module_loader).__doc__
magic.register_line_magic(name)(impl)
def _declare_cell_magic(name, module, magic_module_loader):
"""Declare a cell magic called name in module."""
# If the module or extension has already been imported, don't overwrite the
# existing definition.
if module in sys.modules or module in get_ipython().extension_manager.loaded:
return
def impl(line, cell, **kwargs):
return _get_extension_magic(name, module, 'cell',
magic_module_loader)(line, cell, **kwargs)
# pylint: disable=g-long-lambda
impl.getdoc = lambda: _get_extension_magic(name, module, 'cell',
magic_module_loader).__doc__
magic.register_cell_magic(name)(impl)
def _declare_colabx_magics():
if get_ipython():
_declare_cell_magic('bigquery', 'google.cloud.bigquery', _load_extension)
| 2.109375
| 2
|
fits2png_example/primitives/simple_fits_reader.py
|
Keck-DataReductionPipelines/keckdrpframework
| 0
|
12784862
|
"""
Example to read a FITS file.
Created on Jul 9, 2019
Be aware that hdus.close () needs to be called to limit the number of open files at a given time.
@author: skwok
"""
import astropy.io.fits as pf
from astropy.utils.exceptions import AstropyWarning
import warnings
import numpy as np
from keckdrpframework.models.arguments import Arguments
from keckdrpframework.primitives.base_primitive import BasePrimitive
def open_nowarning(filename):
with warnings.catch_warnings():
warnings.simplefilter("ignore", AstropyWarning)
return pf.open(filename, memmap=False)
class SimpleFitsReader_LRIS(BasePrimitive):
def __init__(self, action, context):
"""
Initializes the super class.
"""
BasePrimitive.__init__(self, action, context)
def _perform(self):
"""
Expects action.args.name as fits file name
Returns HDUs or (later) data model
"""
name = self.action.args.name
self.logger.debug(f"Reading {name}")
out_args = Arguments()
out_args.name = name
out_args.img = self.readData(name)
return out_args
def readData(self, name, cutout=True):
"""
Reads FITS file, mostly from KECK instruments.
If there are multiple HDUs, the image is assembled according to
the kewyrods DETSEC and DATASEC.
Otherwise hdus[0].data is returned.
If cutout is TRUE, then only the none-zero portion is returned.
"""
with open_nowarning(name) as hdus:
if len(hdus) == 1:
return hdus[0].data
else:
imgBuf = hdus[1].data
for hdu in hdus[2:]:
imgBuf = np.concatenate((imgBuf, hdu.data), 1)
return imgBuf
| 3.015625
| 3
|
ax1500-poc.py
|
marcnewlin/ax1500-crypto-client
| 14
|
12784863
|
<reponame>marcnewlin/ax1500-crypto-client<gh_stars>10-100
#!/usr/bin/env python3
import base64
import binascii
import hashlib
import json
import logging
import re
import requests
from Crypto.Cipher import AES, PKCS1_v1_5
from Crypto.PublicKey import RSA
from pprint import pprint
ROUTER_HOST = "192.168.0.1"
ROUTER_PASSWORD = "password"
class Client(object):
def __init__(self, password, sig_priv=None, enc_priv=None):
# get a requests session
self.session = requests.Session()
# setup crypto
self.password = password
self.init_aes()
self.init_rsa()
# build the username/password hash
h = hashlib.md5()
h.update(b"admin%s" % self.password.encode())
# build the signature string
self.sig_base = b"k=%s&i=%s&h=%s&s=" % (self.aes_key, self.aes_iv, h.hexdigest().encode())
# login
self.stok = ""
self.login()
def decrypt_aes(self, ciphertext):
cipher = AES.new(self.aes_key, AES.MODE_CBC, iv=self.aes_iv)
plaintext = cipher.decrypt(ciphertext)
return plaintext
def encrypt_aes(self, plaintext):
cipher = AES.new(self.aes_key, AES.MODE_CBC, iv=self.aes_iv)
ciphertext = cipher.encrypt(plaintext)
return ciphertext
def get_signature(self, datalen):
# plaintext signature string
ss = b"%s%d" % (self.sig_base, (self.seq+datalen))
# encrypt using the 512-bit public key
sig = b""
for x in range(0, len(ss), 53):
chunk = ss[x:x+53]
sig += self.sig_cipher.encrypt(ss[x:x+53])
sig = binascii.hexlify(sig)
return sig
def send_encrypted_command(self, cmd, url):
# encrypt the command (AES) and then base64-encode
pc = chr(16 - (len(cmd) % 16))
while len(cmd) % 16 != 0:
cmd += pc.encode()
cmd = self.encrypt_aes(cmd)
cmd = base64.b64encode(cmd)
# get the signature for the current sequence number
sig = self.get_signature(len(cmd))
# build the POST data
post_data = { "sign": sig, "data": cmd }
# send the request
res = self.session.post("http://%s/cgi-bin/luci/;stok=%s%s" % (ROUTER_HOST, self.stok, url), data=post_data)
# parse and decrypt the response
data = json.loads(res.content)
data_raw = base64.b64decode(data["data"])
data = self.decrypt_aes(data_raw)
if data[-1] < 16:
data = data[:-data[-1]]
data = json.loads(data)
return data
def login(self):
# build the login command and encrypt with AES
login_cmd = b"password=%s&operation=login" % binascii.hexlify(self.enc_cipher.encrypt(self.password.encode()))
# send the command
data = self.send_encrypted_command(login_cmd, "/login?form=login")
# process the response
if data["success"] != True:
raise Exception("Login failure!")
self.stok = data["data"]["stok"]
logging.info("Logged in successfully!")
def init_rsa(self, enc_priv=None, sig_priv=None):
# request the signature public key and sequence number
url = "http://%s/cgi-bin/luci/;stok=/login?form=auth" % ROUTER_HOST
res = self.session.post(url, data={"operation":"read"})
data = json.loads(res.content)
self.sig_pub = int.from_bytes(binascii.unhexlify(data["data"]["key"][0]), "big")
self.seq = data["data"]["seq"]
# request the data public key
url = "http://%s/cgi-bin/luci/;stok=/login?form=keys" % ROUTER_HOST
res = self.session.post(url, data={"operation":"read"})
data = json.loads(res.content)
self.enc_pub = int.from_bytes(binascii.unhexlify(data["data"]["password"][0]), "big")
# setup the data cipher
self.enc_key = RSA.construct((self.enc_pub, 65537))
if enc_priv is not None:
self.enc_priv = enc_priv
self.enc_key = RSA.construct((self.enc_pub, 65537, self.enc_priv))
self.enc_cipher = PKCS1_v1_5.new(self.enc_key)
# setup the signature cipher
self.sig_key = RSA.construct((self.sig_pub, 65537))
if sig_priv is not None:
self.sig_key = RSA.construct((self.sig_pub, 65537, sig_priv))
self.sig_cipher = PKCS1_v1_5.new(self.sig_key)
def init_aes(self):
# request tpEncrypt.js and parse out the default AES key/IV
url = "http://%s/webpages/js/libs/tpEncrypt.js" % ROUTER_HOST
res = self.session.get(url)
self.aes_key = re.search(r"t=\"(\d{16})\"", res.content.decode()).group(1).encode()
self.aes_iv = re.search(r"e=\"(\d{16})\"", res.content.decode()).group(1).encode()
# setup the cipher
self.aes_cipher = AES.new(self.aes_key, AES.MODE_CBC, iv=self.aes_iv)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
# instantiate a Client instance and login
client = Client(ROUTER_PASSWORD)
# read out the aggregate status data
ep = "/admin/status?form=all"
ret = client.send_encrypted_command(b"operation=read", ep)
pprint(ret)
| 2.546875
| 3
|
Exercises.py
|
KA-Advocates/KATranslationCheck
| 1
|
12784864
|
<filename>Exercises.py
#!/usr/bin/env python3
"""
Utilities regarding KA exercises
"""
import re
import os
from collections import defaultdict
def findFilenameToExercisesMapping(lang="de"):
"""
Find a map from filepath to a set of exercises.
The links are extracted mainly from the tcomment section.
"""
linkRegex = re.compile(r"http://translate\.khanacademy\.org/translate/content/items\?exercises=([^\"#]+)")
fileToExercises = defaultdict(set)
for (curdir, _, files) in os.walk(lang):
for f in files:
#Ignore non-PO files
if not f.endswith(".po") and not f.endswith(".pot"): continue
#Add to list of files to process
path = os.path.join(curdir, f)
with open(path) as infile:
for line in infile:
for hit in linkRegex.findall(line):
fileToExercises[path].add(hit)
return fileToExercises
if __name__ == "__main__":
fileToExercises = findFilenameToExercisesMapping()
print(fileToExercises)
| 2.875
| 3
|
meldnafen/retroarch.py
|
cecton/meldnafen
| 0
|
12784865
|
<reponame>cecton/meldnafen<gh_stars>0
import re
from tempfile import NamedTemporaryFile
def prepare(command, controls, settings):
with NamedTemporaryFile('wt', prefix='retroarch-', delete=False) as fh:
for player, player_controls in sorted(controls.items()):
write_retroarch_controls(fh, player, player_controls)
fh.write("\n")
fh.write("input_autodetect_enable = false\n")
fh.write("video_force_aspect = true\n")
fh.write("video_scale_integer = true\n")
fh.write("video_smooth = {}\n".format(
"true" if settings['smooth'] else "false"))
return command + ['--appendconfig', fh.name]
def write_retroarch_controls(fileobj, player, controls):
for k, v in controls.items():
if player == "1" and re.search(r"^(enable_hotkey|menu_toggle)", k):
fileobj.write("input_{} = {}\n".format(k, v))
else:
fileobj.write("input_player{}_{} = {}\n".format(player, k, v))
| 2.359375
| 2
|
scripts/experiment/__unfinished_gan_like_model/pytorch_stuff/models/timg_denoise.py
|
shantanu-gupta/spad-timg-denoise
| 0
|
12784866
|
""" timg_denoise.py
"""
import numpy as np
import torch
import torch.nn as nn
class Timg_DenoiseNet_LinT_1Layer(nn.Module):
def __init__(self):
super(Timg_DenoiseNet_LinT_1Layer, self).__init__()
self.C = 64
self.K = 13
self.centre = 3/255.0
self.scale = 2.0
self.conv1 = nn.Conv2d(1, self.C, self.K, padding=self.K//2)
self.norm1 = nn.BatchNorm2d(self.C)
self.relu1 = nn.ReLU()
self.comb = nn.Conv2d(self.C, 1, 1)
# just need time to be above the minimum
self.fix_range_t = nn.Threshold(1/255.0, 1/255.0)
# nn.init.dirac_(self.conv1.weight)
def forward(self, t):
t = self.scale * (t - self.centre)
t = self.conv1(t)
t = self.relu1(t)
t = self.comb(t)
t = self.fix_range_t(t)
return t
class Timg_DenoiseNet_LinT(nn.Module):
def __init__(self, Tmin=1e-3, Tmax=1e3):
super(Timg_DenoiseNet_LinT, self).__init__()
self.C = 64
self.Tmin = Tmin
self.Tmax = Tmax
self.Tmid = 1
self.Tscale = self.Tmid - self.Tmin
self.conv1 = nn.Conv2d(1, self.C, 5, padding=2)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn2 = nn.BatchNorm2d(self.C)
self.relu2 = nn.ReLU()
self.conv3 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn3 = nn.BatchNorm2d(self.C)
self.relu3 = nn.ReLU()
self.conv4 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn4 = nn.BatchNorm2d(self.C)
self.relu4 = nn.ReLU()
self.conv5 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn5 = nn.BatchNorm2d(self.C)
self.relu5 = nn.ReLU()
self.comb = nn.Conv2d(self.C, 1, 1)
self.fix_range1 = nn.Hardtanh(min_val=self.Tmin, max_val=self.Tmax)
self.fix_range2 = nn.Hardtanh(min_val=0, max_val=1)
def forward(self, t):
t = (1.0/self.Tscale) * (t - self.Tmid)
t = self.conv1(t)
t = self.relu1(t)
t = self.conv2(t)
t = self.bn2(t)
t = self.relu2(t)
t = self.conv3(t)
t = self.bn3(t)
t = self.relu3(t)
t = self.conv4(t)
t = self.bn4(t)
t = self.relu4(t)
t = self.conv5(t)
t = self.bn5(t)
t = self.relu5(t)
t = self.comb(t)
t = self.Tmid + (self.Tscale * t)
t = self.fix_range1(t)
y = torch.pow(t, -1)
y = self.fix_range2(y)
return y
class Timg_DenoiseNet(nn.Module):
def __init__(self, Tmin=1e-3, Tmax=1e3):
super(Timg_DenoiseNet, self).__init__()
self.C = 64
self.Tmin = np.log(Tmin)
self.Tmax = np.log(Tmax)
# self.conv1 = nn.Conv2d(1, self.C, 3, padding=1)
self.conv1 = nn.Conv2d(1, self.C, 5, padding=2)
# self.conv1 = nn.Conv2d(1, self.C, 7, padding=3)
# self.conv1 = nn.Conv2d(1, self.C, 9, padding=4)
# self.conv1 = nn.Conv2d(1, self.C, 11, padding=5)
# self.conv1 = nn.Conv2d(1, self.C, 13, padding=6)
# self.conv1 = nn.Conv2d(1, self.C, 15, padding=7)
# self.conv1 = nn.Conv2d(1, self.C, 17, padding=8)
# self.conv1 = nn.Conv2d(1, self.C, 19, padding=9)
# self.conv1 = nn.Conv2d(1, self.C, 21, padding=10)
self.relu1 = nn.ReLU()
# self.conv2 = nn.Conv2d(self.C, self.C, 3, padding=1)
self.conv2 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn2 = nn.BatchNorm2d(self.C)
self.relu2 = nn.ReLU()
# self.conv3 = nn.Conv2d(self.C, self.C, 3, padding=1)
self.conv3 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn3 = nn.BatchNorm2d(self.C)
self.relu3 = nn.ReLU()
# self.conv4 = nn.Conv2d(self.C, self.C, 3, padding=1)
self.conv4 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn4 = nn.BatchNorm2d(self.C)
self.relu4 = nn.ReLU()
# self.conv5 = nn.Conv2d(self.C, self.C, 3, padding=1)
self.conv5 = nn.Conv2d(self.C, self.C, 5, padding=2)
self.bn5 = nn.BatchNorm2d(self.C)
self.relu5 = nn.ReLU()
self.comb = nn.Conv2d(self.C, 1, 1)
self.fix_range1 = nn.Hardtanh(min_val=self.Tmin, max_val=self.Tmax)
self.fix_range2 = nn.Hardtanh(min_val=0, max_val=1)
def forward(self, t):
logt = torch.log(t)
logt = self.conv1(logt)
logt = self.relu1(logt)
logt = self.conv2(logt)
logt = self.bn2(logt)
logt = self.relu2(logt)
logt = self.conv3(logt)
logt = self.bn3(logt)
logt = self.relu3(logt)
logt = self.conv4(logt)
logt = self.bn4(logt)
logt = self.relu4(logt)
logt = self.conv5(logt)
logt = self.bn5(logt)
logt = self.relu5(logt)
logt = self.comb(logt)
logt = self.fix_range1(logt)
t = torch.exp(logt)
y = torch.pow(t, -1)
y = self.fix_range2(y)
return y
| 2.296875
| 2
|
netconf/nc_rpc/base/get.py
|
Balaji-P/voltha_docker_compose-rsk_tech_CKAD
| 0
|
12784867
|
#!/usr/bin/env python
#
# Copyright 2016 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from lxml import etree
import structlog
from netconf.nc_rpc.rpc import Rpc
import netconf.nc_common.error as ncerror
from netconf.constants import Constants as C
from netconf.utils import filter_tag_match
from twisted.internet.defer import inlineCallbacks, returnValue
import dicttoxml
from simplejson import dumps, load
log = structlog.get_logger()
class Get(Rpc):
def __init__(self, request, grpc_client, session):
super(Get, self).__init__(request, grpc_client, session)
self._validate_parameters()
@inlineCallbacks
def execute(self):
if self.rpc_response.is_error:
returnValue(self.rpc_response)
log.info('get-request', session=self.session.session_id,
request=self.request)
rpc = self.get_voltha_rpc(self.request)
if not rpc:
log.info('unsupported-request', request=self.request)
self.rpc_response.is_error = True
self.rpc_response.node = ncerror.BadMsg(self.request)
return
# Invoke voltha via the grpc client
res_dict = yield self.grpc_client.invoke_voltha_api(rpc)
# convert dict to xml
xml = dicttoxml.dicttoxml(res_dict, attr_type=True)
log.info('voltha-info', res=res_dict, xml=xml)
root_elem = self.get_root_element(xml)
# Build the yang response
self.rpc_response.node = self.rpc_response.build_yang_response(
root_elem, self.request)
self.rpc_response.is_error = False
returnValue(self.rpc_response)
def _validate_parameters(self):
log.info('validate-parameters', session=self.session.session_id)
# Validate the GET command
if self.request:
try:
if self.request['command'] != 'get':
self.rpc_response.is_error = True
self.rpc_response.node = ncerror.BadMsg('No GET in get '
'request')
if self.request.has_key('filter'):
if not self.request.has_key('class'):
self.rpc_response.is_error = True
self.rpc_response.node = ncerror.BadMsg(
'Missing filter sub-element')
except Exception as e:
self.rpc_response.is_error = True
self.rpc_response.node = ncerror.BadMsg(self.request)
return
def get_voltha_rpc(self, request):
if request.has_key('class'):
rpcs = self.rpc_request_mapping.get(request['class'])
if rpcs is None:
return None
for rpc in rpcs:
if request.has_key('subclass'):
# search first for subclass
if rpc['subclass'] and request['subclass'] == rpc[
'subclass']:
return rpc['rpc']
# If we are here then no subclass exists. Just return the rpc
# associated with theNone subclass
for rpc in rpcs:
if rpc['subclass'] is None:
return rpc['rpc']
return None
# Supported Get Methods
rpc_request_mapping = {
'Voltha': [
{'subclass': None,
'rpc': 'VolthaGlobalService-GetVoltha'
}],
'VolthaInstance': [
{'subclass': None,
'rpc': 'VolthaLocalService-GetVolthaInstance'
},
{'subclass': 'health',
'rpc': 'VolthaLocalService-GetHealth'
},
{'subclass': 'adapters',
'rpc': 'VolthaLocalService-ListAdapters'
},
{'subclass': 'logical_devices',
'rpc': 'VolthaLocalService-ListLogicalDevices'
},
{'subclass': 'devices',
'rpc': 'VolthaLocalService-ListDevices'
},
{'subclass': 'device_types',
'rpc': 'VolthaLocalService-ListDeviceTypes'
},
{'subclass': 'device_groups',
'rpc': 'VolthaLocalService-ListDeviceGroups'
},
],
'VolthaInstances': [
{'subclass': None,
'rpc': 'VolthaGlobalService-ListVolthaInstances'
}],
}
| 1.960938
| 2
|
astar.py
|
AP-Atul/SnakeAI
| 2
|
12784868
|
from queue import PriorityQueue
from components import *
class AStar:
"""
A star algorithm implementation
f(n) = g(n) + h(n)
"""
def __init__(self):
self.paths = [
KEY_RIGHT,
KEY_LEFT,
KEY_UP,
KEY_DOWN
]
self.invalid = {
KEY_UP: KEY_DOWN,
KEY_DOWN: KEY_UP,
KEY_LEFT: KEY_RIGHT,
KEY_RIGHT: KEY_LEFT
}
self.moves = 0
def collides(self, headPosition, snake):
""" Check for body collision on the next step """
return any([body.position == headPosition for body in snake.body[: -1]])
def getDistances(self, goal, current, snake):
""" Finding distance for each path """
distances = PriorityQueue()
self.moves += 1
for path in self.paths:
x = None
y = None
goal_x = goal.x
goal_y = goal.y
if path is KEY_UP:
x = current.x
y = current.y - 1
elif path is KEY_DOWN:
x = current.x
y = current.y + 1
elif path is KEY_RIGHT:
x = current.x + 1
y = current.y
elif path is KEY_LEFT:
x = current.x - 1
y = current.y
if self.collides((x, y), snake):
continue
gn = self.moves
hn = abs(x - goal_x) + abs(y - goal_y)
fn = gn + hn
# add to queue
distances.put((fn, path))
return distances
def getKey(self, food, snake):
""" Returns the next step """
if snake.head.x == food.x and snake.head.y:
self.moves = 0
return snake.direction
distances = self.getDistances(food, snake.head, snake)
if distances.qsize() == 0:
return snake.direction
return distances.get()[1]
| 3.953125
| 4
|
examples/kmeans.py
|
7enTropy7/ravml
| 7
|
12784869
|
<reponame>7enTropy7/ravml
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from ravml.cluster import KMeans
if __name__ == '__main__':
k = KMeans()
iris = load_iris()
X = iris.data[:1000]
y = iris.target[:1000]
X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=True, test_size=0.3)
k.fit(X_train, 3, iter=30)
k.plot()
| 2.484375
| 2
|
docs/new-pandas-doc/generated/pandas-DataFrame-plot-bar-3.py
|
maartenbreddels/datapythonista.github.io
| 0
|
12784870
|
<reponame>maartenbreddels/datapythonista.github.io<filename>docs/new-pandas-doc/generated/pandas-DataFrame-plot-bar-3.py<gh_stars>0
axes = df.plot.bar(rot=0, subplots=True)
axes[1].legend(loc=2) # doctest: +SKIP
| 2.109375
| 2
|
anuvaad-etl/anuvaad-extractor/content-handler/src/db/connection_manager.py
|
srihari-nagaraj/anuvaad
| 0
|
12784871
|
from config import MONGO_CONNECTION_URL,MONGO_DB_SCHEMA
from config import REDIS_SERVER_HOST,REDIS_SERVER_PORT
from app import server
from pymongo import MongoClient
from anuvaad_auditor.loghandler import log_info, log_exception
from utilities import AppContext
from flask import g
import redis
client = MongoClient(MONGO_CONNECTION_URL)
def get_db():
# log_info("Establishing connection with mongo", AppContext.getContext())
return client[MONGO_DB_SCHEMA]
def get_redis():
if 'redisdb' not in g:
# log_info("Establishing connection with redis store", AppContext.getContext())
g.redisdb = redis.Redis(host=REDIS_SERVER_HOST, port=REDIS_SERVER_PORT, db=4)
return g.redisdb
# def get_db():
# with server.app_context():
# if 'mongodb' not in g:
# log_info("Establishing connection with mongo", AppContext.getContext())
# client = MongoClient(MONGO_CONNECTION_URL)
# g.mongodb = client[MONGO_DB_SCHEMA]
# return g.mongodb
| 2.265625
| 2
|
forum/models.py
|
hann1010/simpleforum_test
| 0
|
12784872
|
<filename>forum/models.py
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from ckeditor.fields import RichTextField
class Forum_post(models.Model):
title = models.CharField(max_length=100, blank=False)
post_type = models.CharField(max_length=100, blank=True)
origin_post_id= models.IntegerField(default=0)
content = RichTextField()
date_posted = models.DateTimeField(default=timezone.now)
date_last_save = models.DateTimeField(auto_now=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title + " / " + str(self.author)
| 2.40625
| 2
|
data_loader/data_loaders.py
|
lizhogn/cellpose
| 1
|
12784873
|
<reponame>lizhogn/cellpose
from torchvision import datasets, transforms
from base import BaseDataLoader
from data_loader import cell_datasets
class CellDataLoader(BaseDataLoader):
"""
Cell data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True):
self.data_dir = data_dir
self.dataset = cell_datasets.CellDataset(data_dir=self.data_dir, train=training)
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
| 2.828125
| 3
|
corpora/tng_scripts/tng_scripts.py
|
galtay/word-to-vec
| 1
|
12784874
|
<gh_stars>1-10
from collections import Counter
import json
import string
import numpy as np
class TngScripts:
def __init__(self, file_path):
with open(file_path, 'r') as fp:
self._scripts = json.load(fp)
def iter_sentences(self):
for episode in self._scripts:
for sent in episode['sentences']:
yield sent
def iter_target_context(self, back_window=2, front_window=2, shuffle=True):
episode_indices = np.arange(len(self._scripts))
if shuffle:
np.random.shuffle(episode_indices)
for episode_index in episode_indices:
sentences = self._scripts[episode_index]['sentences']
sentence_indices = np.arange(len(sentences))
if shuffle:
np.random.shuffle(sentence_indices)
for sentence_index in sentence_indices:
sentence = sentences[sentence_index]
for target_indx, target_word in enumerate(sentence):
min_context_indx = max(0, target_indx - back_window)
max_context_indx = min(len(sentence)-1, target_indx + front_window)
context_indices = [
indx for indx in range(min_context_indx, max_context_indx + 1)
if indx != target_indx]
context_words = [sentence[indx] for indx in context_indices]
yield (target_word, context_words)
def iter_target_context_batch(self, back_window=2, front_window=2, batch_size=10, shuffle=True):
tc_iter = self.iter_target_context(
back_window=back_window,
front_window=front_window,
shuffle=shuffle)
keep_going = True
while keep_going:
batch = []
for ii in range(batch_size):
try:
batch.append(next(tc_iter))
except StopIteration:
keep_going = False
pass
yield batch
if __name__ == '__main__':
file_path = 'tng_scripts.json'
corpus = TngScripts(file_path)
sentences = list(corpus.iter_sentences())
# write json corpus
corpus_file = 'corpus.json'
with open(corpus_file, 'w') as fp:
json.dump(sentences, fp)
# write vocabulary
word_counter = Counter([word for sent in sentences for word in sent])
vocab_file = 'vocab.json'
with open(vocab_file, 'w') as fp:
json.dump(word_counter.most_common(), fp)
| 2.515625
| 3
|
project/project_files_Sylwia/student_utils.py
|
SylwiaNowakowska/Patient_Selection_for_Diabetes_Drug_Testing
| 0
|
12784875
|
<filename>project/project_files_Sylwia/student_utils.py
import pandas as pd
import numpy as np
import os
import tensorflow as tf
from sklearn.model_selection import GroupShuffleSplit
import functools
####### STUDENTS FILL THIS OUT ######
#Question 3
def reduce_dimension_ndc(df, ndc_df):
'''
df: pandas dataframe, input dataset
ndc_df: pandas dataframe, drug code dataset used for mapping in generic names
return:
reduce_dim_df: pandas dataframe, output dataframe with joined generic drug name
'''
reduce_dim_df = df.merge(ndc_df[['NDC_Code','Non-proprietary Name']], how='left', left_on='ndc_code', right_on='NDC_Code')
reduce_dim_df.drop(columns=['NDC_Code'], inplace=True)
reduce_dim_df.rename(columns={'Non-proprietary Name':'generic_drug_name'}, inplace=True)
return reduce_dim_df
#Question 4
def select_first_encounter(df):
'''
df: pandas dataframe, dataframe with all encounters
return:
- first_encounter_df: pandas dataframe, dataframe with only the first encounter for a given patient
'''
first_encounter_df = df.sort_values(['encounter_id'], ascending=True).groupby('patient_nbr').head(1)
return first_encounter_df
#Question 6
def patient_dataset_splitter(df, patient_key='patient_nbr'):
'''
df: pandas dataframe, input dataset that will be split
patient_key: string, column that is the patient id
return:
- train: pandas dataframe,
- validation: pandas dataframe,
- test: pandas dataframe,
'''
train_inds, validation_test_inds = next(GroupShuffleSplit(test_size=0.4,
n_splits=1,
random_state = 16).split(df, groups=df[patient_key]))
train = df.loc[train_inds,:]
validation_test = df.loc[validation_test_inds,:]
validation_test.reset_index(inplace=True)
validation_inds, test_inds = next(GroupShuffleSplit(test_size=0.5,
n_splits=1,
random_state = 16).split(validation_test, groups=validation_test[patient_key]))
validation = validation_test.loc[validation_inds,:]
test = validation_test.loc[test_inds,:]
return train, validation, test
#Question 7
def create_tf_categorical_feature_cols(categorical_col_list,
vocab_dir='./diabetes_vocab/'):
'''
categorical_col_list: list, categorical field list that will be transformed with TF feature column
vocab_dir: string, the path where the vocabulary text files are located
return:
output_tf_list: list of TF feature columns
'''
output_tf_list = []
for c in categorical_col_list:
vocab_file_path = os.path.join(vocab_dir, c + "_vocab.txt")
'''
Which TF function allows you to read from a text file and create a categorical feature
You can use a pattern like this below...
tf_categorical_feature_column = tf.feature_column.......
'''
tf_categorical_feature_column = tf.feature_column.categorical_column_with_vocabulary_file(key=c,
vocabulary_file=vocab_file_path,
num_oov_buckets=0)
if c == 'primary_diagnosis_code':
categorical_column = tf.feature_column.embedding_column(tf_categorical_feature_column, dimension=10)
else:
categorical_column = tf.feature_column.indicator_column(tf_categorical_feature_column)
output_tf_list.append(categorical_column)
return output_tf_list
#Question 8
def normalize_numeric_with_zscore(col, mean, std):
'''
This function can be used in conjunction with the tf feature column for normalization
'''
return (col - mean)/std
def create_tf_numeric_feature(col, MEAN, STD, default_value=0):
'''
col: string, input numerical column name
MEAN: the mean for the column in the training data
STD: the standard deviation for the column in the training data
default_value: the value that will be used for imputing the field
return:
tf_numeric_feature: tf feature column representation of the input field
'''
normalizer = functools.partial(normalize_numeric_with_zscore, mean=MEAN, std=STD)
tf_numeric_feature = tf.feature_column.numeric_column(key=col, default_value=default_value, dtype=tf.float64,
normalizer_fn=normalizer)
return tf_numeric_feature
#Question 9
def get_mean_std_from_preds(diabetes_yhat):
'''
diabetes_yhat: TF Probability prediction object
'''
m = diabetes_yhat.mean()
s = diabetes_yhat.stddev()
return m, s
# Question 10
def get_student_binary_prediction(df, col):
'''
df: pandas dataframe prediction output dataframe
col: str, probability mean prediction field
return:
student_binary_prediction: pandas dataframe converting input to flattened numpy array and binary labels
'''
treshold = 5.0
df['binary_pred'] = df[col] >= treshold
student_binary_prediction = df['binary_pred'].replace(True, 1).replace(False, 0).to_numpy()
return student_binary_prediction
| 2.984375
| 3
|
main.py
|
devashishupadhyay/ml-covid-19
| 1
|
12784876
|
<reponame>devashishupadhyay/ml-covid-19
from core.covid import pred
model = pred('Model/model.sav','covid19-features-df/covid_19_strain.pkl')
print(model.predict())
| 1.804688
| 2
|
examples/composite_keys/testdata.py
|
NeolithEra/Flask-AppBuilder
| 3,862
|
12784877
|
import logging
from app import db
from app.models import Inventory, Datacenter, Rack, Item
import random
import string
from datetime import datetime
log = logging.getLogger(__name__)
DC_RACK_MAX = 20
ITEM_MAX = 1000
cities = ["Lisbon", "Porto", "Madrid", "Barcelona", "Frankfurt", "London"]
models = ["Server MX", "Server MY", "Server DL380", "Server x440", "Server x460"]
datacenters = list()
def get_random_name(names_list, size=1):
return names_list[random.randrange(0, len(names_list))]
def serial_generator(size=6, chars=string.ascii_uppercase + string.digits):
return "".join(random.choice(chars) for _ in range(size))
for city in cities:
datacenter = Datacenter()
datacenter.name = "DC %s" % city
datacenter.address = city
datacenters.append(datacenter)
db.session.add(datacenter)
log.info(datacenter)
try:
db.session.commit()
for num in range(1, DC_RACK_MAX):
rack = Rack()
rack.num = num
rack.datacenter = datacenter
db.session.add(rack)
except Exception as e:
log.error("Creating Datacenter: %s", e)
db.session.rollback()
for i in range(1, ITEM_MAX):
item = Item()
item.serial_number = serial_generator()
item.model = get_random_name(models)
db.session.add(item)
log.info(item)
try:
db.session.commit()
except Exception as e:
log.error("Creating Item: %s", e)
db.session.rollback()
| 2.65625
| 3
|
host.py
|
JinlongWukong/DevLab-ansible
| 0
|
12784878
|
from ansible_task_executor import AnsibleTaskExecutor
import os
class HOST(object):
def __init__(self, ip, user, password, subnet=None, role=None):
self.ip = ip
self.user = user
self.password = password
self.role = role
self.cpu = None
self.memory = None
self.disk = None
self.os_type = None
self.subnet = subnet
self.ansible_inventory = "{} ansible_ssh_user={} ansible_ssh_pass={} role={}".format(ip, user, password, role)
self.executor = AnsibleTaskExecutor()
self.proxy = os.getenv('https_proxy')
def install(self):
if self.role == "compute":
result_code, callback = self.executor.execute('install-host.yml', self.ansible_inventory,
extra_vars={
"proxy_env": {'https_proxy': self.proxy},
"subnet": self.subnet
})
elif self.role == "container":
result_code, callback = self.executor.execute('install-container-host.yml', self.ansible_inventory,
extra_vars={
'https_proxy': self.proxy,
"subnet": self.subnet
})
else:
raise Exception("host role not supported")
if result_code:
raise Exception(callback.get_all_result())
for event in callback.host_ok:
if event['task'] == "Print total memory size" and event['host'] == self.ip:
self.memory = event['result']['msg']
elif event['task'] == "Print total cpu count" and event['host'] == self.ip:
self.cpu = event['result']['msg']
elif event['task'] == "Print os type" and event['host'] == self.ip:
self.os_type = event['result']['msg']
elif event['task'] == "Print virt vol disk usage" and event['host'] == self.ip:
self.disk = int(event['result']['msg'])
else:
pass
return self.cpu, self.memory, self.disk, self.os_type
def static_routes(self, routes):
result_code, callback = self.executor.execute('route.yml', self.ansible_inventory,
extra_vars={"routes": routes})
if result_code:
raise Exception(callback.get_failed_result())
def get_info(self):
"""
Get host cpu/mem/disk usage
:return:
cpu, mem, disk, engine_status(0,1)
"""
result_code, callback = self.executor.execute('check-host.yml', self.ansible_inventory,
extra_vars={"role": self.role})
if result_code:
raise Exception(callback.get_failed_result())
for event in callback.host_ok:
if event['task'] == "Print total memory avail" and event['host'] == self.ip:
memory_avail = event['result']['msg']
elif event['task'] == "Print cpu load usage" and event['host'] == self.ip:
cpu_load = event['result']['msg']
elif event['task'] == "Print virt vol disk usage" and event['host'] == self.ip:
disk_usage = event['result']['msg']
elif event['task'] == "Check engine liveness" and event['host'] == self.ip:
engine_status = event['result']['rc']
else:
pass
return memory_avail, cpu_load, disk_usage, engine_status
def port_dnat(self, rules):
"""
Set iptables rules
:return:
none
"""
result_code, callback = self.executor.execute('iptables.yml', self.ansible_inventory,
extra_vars={"rules": rules})
if result_code:
raise Exception(callback.get_failed_result())
class MultiHOST(HOST):
def __init__(self, hosts):
self.ansible_inventory = ""
for h in hosts:
if len(h) != 4:
continue
self.ansible_inventory += "{} ansible_ssh_user={} ansible_ssh_pass={} role={}".format(
h[0], h[1], h[2], h[3]) + "\n"
self.executor = AnsibleTaskExecutor()
| 2.296875
| 2
|
jams/tcherkez.py
|
MuellerSeb/jams_python
| 9
|
12784879
|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
import jams.const as const
def tcherkez(Rstar, Phi=0.3, T=0.056,
a2=1.0012, a3=1.0058, a4=1.0161,
t1=0.9924, t2=1.0008, g=20e-3,
RG=False, Rchl=False, Rcyt=False, fullmodel=True):
"""
Calculates the Tcherkez model of 13C-discrimiantion in the Calvin cycle.
Definition
----------
def tcherkez(Rstar, Phi=0.3, T=0.056, a2=1.0012, a3=1.0058, a4=1.0161,
t1=0.9924, t2=1.0008, g=20e-3
RG=False, Rchl=False, Rcyt=False, fullmodel=True):
Input
-----
Rstar Isotope ratio of assimilated carbon, e.g. of Farquhar et al. (1982) model
Optional Input
--------------
Phi Vo/Vc: ratio of carboxylateion to oxygentation of Rubisco (default: 0.3)
T Relative flux of starch synthesis [mol(C6 of starch)/mol(CO2 assimilated)] (default: 0.056)
a2 Inverse fractionation associated with aldolase
for the C-2 position of FBP (Fructose-1,6-bisphosphate) (default: 1.0012)
a3 Same for C-3 of FBP (default: 1.0058)
a4 Same for C-4 of FBP (default: 1.0161)
t1 Inverse fractionation associated with trankelotase
for C-1 in E4P (erythrose-4-phosphate) and R5P (ribose-5-phosphate) (default: 0.9924)
t2 Same for C-2 of X5P (xylulose-5-phosphate) (default: 1.0008)
g Isotope discrimination of photorespiratory decarboxylation of Gly (Glycine) (default: 20e-3)
RG If True, output isotope ratio of G3P (3-phosphoglyceraldehyde
or glyceraldehyde-3-phosphate) (default: False)
Rchl If True, output isotope ratio of chloroplastic hexoses and transitory starch (default: False)
Rcyt If True, output isotope ratio of cytoplasmic hexoses (default: False)
fullmodel If True, output RG, Rchl and Rcyt (default: True)
Output
------
RG, Rchl, Rcyt if fullmodel=True
Restrictions
------------
If at least one of RG, Rchl or Rcyt is given then fullmode=False.
References
----------
<NAME>, <NAME>, <NAME> & <NAME>, Theoretical considerations about carbon isotope
distribution in glucose of C3 plants, Functional Plant Biology 31, 857-877, 2004
<NAME>, <NAME>, <NAME>, <NAME> & <NAME>, Experimental evidence for diel variations
of the carbon isotope composition in leaf, stem and phloem sap organic matter in Ricinus communis,
Plant, Cell and Environment 31, 941-953, 2008
Examples
--------
>>> a = -4.4e-3
>>> b = -27e-3
>>> ca = 353e-6
>>> ci = 0.7*ca
>>> Delta = a+(b-a)*ci/ca
>>> delta_a1 = -8e-3
>>> Ra1 = (delta_a1+1.)*const.R13VPDB
>>> Rstar1 = (1.-Delta)*Ra1
>>> from autostring import astr
>>> print(astr((np.array(tcherkez(Rstar1, Phi=0.3, T=0.056))/const.R13VPDB-1.)*1000.,3,pp=True))
['12.764' '17.125' '12.978']
>>> delta_a2 = -7.8e-3
>>> Ra2 = (delta_a2+1.)*const.R13VPDB
>>> Rstar2 = (1.-Delta)*Ra2
>>> R1 = (np.array(tcherkez([Rstar1, Rstar2], Rcyt=True))/const.R13VPDB-1.)*1000.
>>> print(astr(R1,3,pp=True))
[['12.978' '13.182']]
>>> R1, R2 = tcherkez([Rstar1, Rstar2], Rchl=True, Rcyt=True)
>>> print(astr((R1/const.R13VPDB-1.)*1000.,3,pp=True))
['17.125' '17.330']
>>> print(astr((R2/const.R13VPDB-1.)*1000.,3,pp=True))
['12.978' '13.182']
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2013 <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Jan 2012
Modified, MC, Feb 2013 - ported to Python 3
"""
#
if (RG | Rchl | Rcyt):
fullmodel = False
if fullmodel:
RG = True
Rchl = True
Rcyt = True
#
a2tilde = (1.+0.5*Phi-T) / ((2.*a2+1.)/3.+Phi*(2.*a2-0.5)/3.+T*(a2-2.))
a3tilde = (1.+0.5*Phi-T) / ((2.*a3+1.)/3.+Phi*(2.*a3-0.5)/3.+T*(a3-2.))
t1tilde = (1.+3.*T)/(t1+3.*T)*t1
t2tilde = (1.+3.*T)/(t2+3.*T)*t2
eps = a3*a3tilde
epsdash = (t1tilde+1.5*Phi)*a3*a3tilde/(3.*(1.+0.5*Phi-(1.+t2tilde)*a2*a2tilde/3.))
iRG = np.array(Rstar) / (1.+Phi*(0.5-(1.+g)/(2.+g)*(eps+2.*a2*a2tilde*epsdash)/3.)+T*(a4-1.))
iRchl = 1./6.*(epsdash*(1.+(a2*a2tilde*t2tilde)/t2)+eps*(2.+t1tilde/t1)+a4) * iRG
iRcyt = 1./6.*(2.*eps+3.*(a2+1.)/(a2+2.)*epsdash*a2tilde+3.*a3tilde/(2.+a3)*(a3+2.*a4/(1.+a4))) * iRG
out = []
if RG:
out += [iRG]
if Rchl:
out += [iRchl]
if Rcyt:
out += [iRcyt]
return out
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| 1.898438
| 2
|
architectures/providers/kubernetes/controlplane.py
|
marinmuso/architectures
| 15
|
12784880
|
# Do not modify this file directly. It is auto-generated with Python.
from architectures.providers import _Kubernetes
class _Controlplane(_Kubernetes):
_service_type = "controlplane"
_icon_dir = "icons/kubernetes/controlplane"
class Sched(_Controlplane):
_icon = "sched.png"
_default_label = "Sched"
class Kubelet(_Controlplane):
_icon = "kubelet.png"
_default_label = "Kubelet"
class CM(_Controlplane):
_icon = "c-m.png"
_default_label = "C M"
class CCM(_Controlplane):
_icon = "c-c-m.png"
_default_label = "C C M"
class Api(_Controlplane):
_icon = "api.png"
_default_label = "Api"
class KProxy(_Controlplane):
_icon = "k-proxy.png"
_default_label = "K Proxy"
| 1.570313
| 2
|
glsl/glsl_translate.py
|
avr-aics-riken/SURFACE
| 10
|
12784881
|
<reponame>avr-aics-riken/SURFACE
#!/usr/bin/env python
#
# TODO:
# o Support array(swizzle) much more cleaner manner.
# o and more...
#
import os
import os.path
import subprocess
import sys
import tempfile
from string import Template
from sexps import *
gSlotToN = {
'x': 0, 'y': 1, 'z': 2, 'w': 3
}
gNToSlot = ['x', 'y', 'z', 'w']
gIndentLevel = 0
gVarCount = 0
# Symbol table(as stack) for tempolrary variables
# depth 1 = global scope
# depth 2 = function scope
gSymbolTable = []
# @todo { Create context to save these variables. }
gUniformInputs = {}
gUniformCount = 0
gVaryingInputs = {}
gVaryingInputCount = 0
gVaryingOutputs = []
gStructDefs = {}
def DefineStruct(struct):
gStructDefs[struct['name']] = struct
def FindStruct(name):
if isinstance(name, list) and name[0] == 'array':
# maybe array of struct
# ['array', 'Sphere__0x7fdac1c11eb0', '3']
p = name[1].split('__')
if len(p) > 1:
sname = p[0] + '__'
else:
sname = p[0]
if sname in gStructDefs:
return gStructDefs[sname]
raise
elif name in gStructDefs:
return gStructDefs[name]
return False
def GetGLTypeSize(basety, n):
# 4 = sizeof(float)
# 8 = sizeof(double)
if basety == "vec":
return 4 * n
if basety == "dvec":
return 8 * n
elif basety == "ivec":
return 4 * n
elif basety == "bvec":
return 1 * n # byte
elif basety == "mat":
return 4 * n * n
# Unknown type
assert 0
def AddSymbol(varname, ty, n, quals):
assert len(gSymbolTable) > 0
# Add symbol to last stack of symbol table
gSymbolTable[-1][varname] = {'type': (ty, n),
'quals': quals, 'name': varname}
def GetSymbol(varname):
assert len(gSymbolTable) > 0
# search internal scope first
n = len(gSymbolTable) - 1
while n >= 0:
if varname in gSymbolTable[n]:
return gSymbolTable[n][varname]
n = n - 1
return False
def GetScopeLevel():
return len(gSymbolTable)
def IsArray(s):
return isinstance(s, (list, tuple))
def IsSamplerTy(tyname):
samplers = [
"sampler2D", "sampler3D"
]
if tyname in samplers:
return True
return False
def ParseTy(tyname):
"""
Parse type into (elementType, num). sampler has special treatment
example. vec4 -> ('vec' , 4)
float -> ('float', 1)
sampler2D -> ('sampler2D', 1)
"""
if IsSamplerTy(tyname):
return (tyname, 1)
# array type is TODO
if IsArray(tyname):
return (tyname, 1)
p = re.compile("([a-zA-z]+)([0-9]*)")
groups = p.match(tyname).groups()
if len(groups) == 1:
return (groups[0], 1)
else:
if groups[1] == '':
return (groups[0], 1)
else:
return (groups[0], int(groups[1]))
def IsBuiltinTextureFunction(fname):
builtins = [
"texture2D", "texture3D"
]
if fname in builtins:
return True
return False
def IsBuiltinTraceFunction(fname):
builtins = [
"trace", # LSGL ext
"raydepth", # LSGL ext
"rayattrib", # LSGL ext
"rayoption", # LSGL ext
"isectinfo", # LSGL ext
"camerainfo", # LSGL ext
"numIntersects", # LSGL ext
"queryIntersect", # LSGL ext
]
if fname in builtins:
return True
return False
def IsBuiltinRandomFunction(fname):
builtins = [
"random" # LSGL ext
]
if fname in builtins:
return True
return False
def IsBuiltinFunction(fname):
builtins = [
"radians",
"degrees",
"sin",
"cos",
"tan",
"asin",
"acos",
"atan",
"atan2",
"pow",
"exp",
"log",
"exp2",
"log2",
"sqrt",
"inversesqrt",
"abs",
"sign",
"floor",
"ceil",
"fract",
"mod",
"min",
"max",
"clamp",
"mix",
"step",
"smoothstep",
"length",
"distance",
"dot",
"cross",
"normalize",
"faceforward",
"reflect",
"refract",
"matrixCompMult",
"lessThan",
"lessThanEqual",
"greaterThan",
"greaterThanEqual",
"equal",
"notEqual",
"any",
"all",
"not",
"texture2D",
"texture3D",
"trace", # LSGL ext
"raydepth", # LSGL ext
"rayattrib", # LSGL ext
"rayoption", # LSGL ext
"isectinfo", # LSGL ext
"camerainfo", # LSGL ext
"random", # LSGL ext
"numIntersects", # LSGL ext
"queryIntersect", # LSGL ext
]
if fname in builtins:
return True
return False
def GetBuiltinType(varname):
builtins = {
"gl_FragCoord": ("vec", 4),
"gl_FrontFacing": ("bool", 1),
"gl_FragColor": ("vec", 4),
"gl_PointCoord": ("vec", 2),
"gl_MaxVertexAttribs": ("int", 1),
"gl_MaxVertexUniformVectors": ("int", 1),
"gl_MaxVaryingVectors": ("int", 1),
"gl_MaxVertexTextureImageUnits": ("int", 1),
"gl_MaxCombinedTextureImageUnits": ("int", 1),
"gl_MaxTextureImageUnits": ("int", 1),
"gl_MaxFragmentUniformVectors": ("int", 1),
"gl_DepthRangeParameters": ("todo", 1),
"gl_DepthRange": ("todo", -1),
"gl_MaxDrawBuffers": ("int", 1),
"gl_FragData": ("todo", -1),
}
if varname in builtins:
return builtins[varname]
return False
def IsBuiltinVariable(varname):
builtins = [
"gl_FragCoord",
"gl_FrontFacing",
"gl_FragColor",
"gl_PointCoord",
"gl_MaxVertexAttribs",
"gl_MaxVertexUniformVectors",
"gl_MaxVaryingVectors",
"gl_MaxVertexTextureImageUnits",
"gl_MaxCombinedTextureImageUnits",
"gl_MaxTextureImageUnits",
"gl_MaxFragmentUniformVectors",
"gl_DepthRangeParameters",
"gl_DepthRange",
"gl_MaxDrawBuffers",
"gl_FragData",
]
if varname in builtins:
return True
return False
def AddUniformInput(varname, ty, n, quals):
# Assign unique index(from 0)
global gUniformCount
gUniformInputs[varname] = {
'type': (ty, n), 'quals': quals, 'name': varname, 'index': gUniformCount}
gUniformCount += 1
def IsUniform(varname):
global gUniformInputs
if varname in gUniformInputs:
return True
return False
def GetUniform(varname):
global gUniformInputs
return gUniformInputs[varname]
def IsVaryingInput(varname):
global gVaryingInputs
if varname in gVaryingInputs:
return True
return False
def GetVaryingInput(varname):
global gVaryingInputs
return gVaryingInputs[varname]
def IsTexture(varname):
if IsUniform(varname):
uniform = GetUniform(varname)
if IsSamplerTy(uniform['type'][0]):
return True
return False
def IsTemporary(varname):
if IsTexture(varname) or IsVaryingInput(varname) or IsUniform(varname):
return False
# Might be temporary variable
return True
def AddVaryingInput(varname, ty, n, quals):
# Assign unique index(from 0)
global gVaryingInputCount
gVaryingInputs[varname] = {
'type': (ty, n), 'quals': quals, 'name': varname, 'index': gVaryingInputCount}
gVaryingInputCount += 1
def GetTypeCastString(varname):
if IsVaryingInput(varname):
varying = GetVaryingInput(varname)
if IsVectorType(varying['type'][0]):
return "(%s%d *)" % (varying['type'][0], varying['type'][1])
elif IsMatrixType(varying['type'][0]):
return "(%s%d *)" % (varying['type'][0], varying['type'][1])
else:
return "(%s *)" % (varying['type'][0])
elif IsUniform(varname):
uniform = GetUniform(varname)
if IsTexture(varname):
# No typecast required
return ""
else:
if IsVectorType(uniform['type'][0]):
return "(%s%d *)" % (uniform['type'][0], uniform['type'][1])
elif IsMatrixType(uniform['type'][0]):
return "(%s%d *)" % (uniform['type'][0], uniform['type'][1])
else:
return "(%s *)" % (uniform['type'][0])
# No typecast required
return ""
def GetTypeOfSymbol(varname):
if IsVaryingInput(varname):
varying = GetVaryingInput(varname)
return varying['type']
elif IsUniform(varname):
uniform = GetUniform(varname)
return uniform['type']
elif IsBuiltinVariable(varname):
return GetBuiltinType(varname)
elif IsTemporary(varname):
temp = GetSymbol(varname)
# print varname
assert temp is not False
return temp['type']
assert 0 # Unknown symbol
def IncrementIndent():
global gIndentLevel
gIndentLevel += 1
def DecrementIndent():
global gIndentLevel
gIndentLevel -= 1
if gIndentLevel < 0:
gIndentLevel = 0
def Indent():
global gIndentLevel
s = ""
for i in range(gIndentLevel):
s += " "
return s
def NewTempVar():
global gVarCount
s = "tmpvar_%d" % gVarCount
gVarCount += 1
return s
def IsVectorType(s):
tys = ['vec', 'ivec', 'bvec', 'dvec']
if s in tys:
return True
return False
def IsMatrixType(s):
tys = ['mat']
if s in tys:
return True
return False
def baseType(ty):
if ty == "vec":
return "float"
elif ty == "ivec":
return "int"
elif ty == "bvec":
return "bool"
elif ty == "mat":
return "float"
else:
return ty
def parseValue(lst):
(ty, n) = ParseTy(lst[0])
values = lst[1]
return (ty, n, values)
def renameVariable(varname):
# if varname == "gl_FragColor":
# return "(fragment->fragColor)"
# Rewrite builtin variables
if varname == "gl_FragColor":
return "(__fragment->fragColor)"
elif varname == "gl_FragCoord":
return "(__fragment->fragCoord)"
elif varname == "Normal":
return "(__fragment->normal)"
elif varname == "Eye":
return "(@TODO:Eye)"
# Rewrite uniform variables
if IsUniform(varname):
uniform = GetUniform(varname)
if IsTexture(varname):
return "(__state->textures[%d])" % uniform['index']
else:
return "(__state->uniforms[%d])" % uniform['index']
# Rewrite varying variables
if IsVaryingInput(varname):
varying = GetVaryingInput(varname)
return "(__state->varyings[%d])" % varying['index']
# Might be local variable. No rewrite required.
return varname
class VarDecl:
def __init__(self, varname, ty, n):
self.varname = varname
self.ty = ty
self.n = n
def __str__(self):
return self.varname
def getDeclareString(self):
s = ""
if isinstance(self.ty, list) and self.ty[0] == 'array':
# array def
# ['array', 'Sphere__0x7fe31ac11eb0', '3']
p = self.ty[1].split('__')
if len(p) > 1:
sname = p[0] + '__'
else:
sname = p[0]
s += "%s %s[%s];\n" % (sname, self.varname, self.ty[2])
elif IsVectorType(self.ty):
s += "%s%d %s;\n" % (self.ty, self.n, self.varname)
elif IsMatrixType(self.ty):
s += "%s%d %s;\n" % (self.ty, self.n, self.varname)
else:
s += "%s %s;\n" % (self.ty, self.varname)
return s
def getIntermExprString(self):
return ""
def getExprString(self, slot, i):
return self.varname + ("[%d]" % gSlotToN[slot])
class VarRef:
def __init__(self, varname):
self.varname = renameVariable(varname)
self.orgname = varname
def __str__(self):
return self.varname
def getDeclareString(self):
return ""
def getIntermExprString(self):
return ""
def getCExpr(self):
prefix = ""
isInput = False
if IsUniform(self.orgname) or IsVaryingInput(self.orgname):
# If this variable is uniform/varying variable, add a prefix.
prefix = ".data"
isInput = True
(ty, n) = GetTypeOfSymbol(self.orgname)
tycast = GetTypeCastString(self.orgname)
if isInput:
return "/*var_ref*/" + "(*" + tycast + "(" + self.varname + prefix + "))"
else:
postfix = "&"
return "/*var_ref*/" + "(*" + tycast + postfix + "(" + self.varname + prefix + "))"
def getExprString(self, slot, i):
prefix = ""
isInput = False
if IsUniform(self.orgname) or IsVaryingInput(self.orgname):
# If this variable is uniform variable, add a prefix.
prefix = ".data"
isInput = True
else:
# If this variable is temporary variable, add some prefix depending
# on its type.
var = GetSymbol(self.orgname)
if var is not False:
if IsVectorType(var['type'][0]):
prefix = ".v"
(ty, n) = GetTypeOfSymbol(self.orgname)
tycast = GetTypeCastString(self.orgname)
if isInput:
# print (ty, n)
if IsVectorType(ty):
return "/*var_ref(vec)*/" + "(*" + tycast + "(" + self.varname + prefix + ")).v[%d]" % gSlotToN[slot]
else:
return "/*var_ref*/" + "(*" + tycast + "(" + self.varname + prefix + "))"
else:
postfix = "&"
if IsVectorType(ty):
return "/*var_ref*/" + tycast + self.varname + prefix + ("[%d]" % gSlotToN[slot])
else:
return "/*var_ref*/" + "(*" + tycast + postfix + "(" + self.varname + prefix + "))"
class RecordRef:
def __init__(self, varname, membername):
if (len(varname) == 2 and varname[0] == 'var_ref'):
# should be ['var_ref', 'str']
self.var = VarRef(varname[1])
self.recordname = varname[1]
self.membername = membername
self.is_array = False
elif (len(varname) == 3 and varname[0] == 'array_ref'):
# ['array_ref', ['var_ref', 'sphere'], ['constant', 'int', ['0']]]
self.var = ArrayRef(varname[1], varname[2])
self.recordname = varname[1][1]
self.membername = membername
self.is_array = True
else:
raise
def __str__(self):
return str(self.var) + "." + self.membername
def getDeclareString(self):
return ""
def getIntermExprString(self):
return ""
def getCExpr(self):
# Look up struct definition
sym = GetSymbol(self.recordname)
assert sym is not False
(sty, sn) = sym['type']
struct = FindStruct(sty)
assert struct is not False
# Look up type of member variable
member = struct['members'][self.membername]
assert member is not False
(ty, n) = ParseTy(member['ty'])
return "/*record_ref*/" + "(" + str(self.var) + "." + self.membername + ")"
def getExprString(self, slot, i):
# Look up struct definition
sym = GetSymbol(self.recordname)
assert sym is not False
(sty, sn) = sym['type']
# print((sty, sn))
struct = FindStruct(sty)
assert struct is not False
# Look up type of member variable
member = struct['members'][self.membername]
assert member is not False
(ty, n) = ParseTy(member['ty'])
# return "/*record_ref*/%s" % struct['name']
prefix = ""
if IsVectorType(ty):
prefix = ".v"
if IsVectorType(ty):
return "/*record_ref*/" + "(" + str(self.var) + "." + self.membername + ")" + prefix + ("[%d]" % gSlotToN[slot])
else:
return "/*record_ref*/" + "(" + str(self.var) + "." + self.membername + ")"
class ArrayRef:
def __init__(self, varname, indexname):
assert len(varname) == 2 # should be ['var_ref', 'str']
assert varname[0] == 'var_ref'
assert len(indexname) == 3 # should be ['constant', 'int', [N]]
self.var = VarRef(varname[1])
self.recordname = varname[1]
self.indexname = indexname[2][0]
def __str__(self):
return self.recordname + "[" + self.indexname + "]"
def getDeclareString(self):
return ""
def getIntermExprString(self):
return ""
def getCExpr(self):
# Look up struct definition
sym = GetSymbol(self.recordname)
assert sym is not False
#(sty, sn) = sym['type']
#structinfo = FindStruct(sty)
#assert structinfo is not False
(ty, n) = sym['type']
prefix = ""
if IsVectorType(ty):
prefix = ".v"
elif IsMatrixType(ty):
prefix = ".v"
return "/*array_ref*/" + self.recordname + "%s[%s]" % (prefix, self.indexname)
def getExprString(self, slot, i):
if IsUniform(self.recordname):
# Assume mat3, mat4 or elase
prefix = ".data"
(ty, n) = GetTypeOfSymbol(self.recordname)
tycast = GetTypeCastString(self.recordname)
name = renameVariable(self.recordname)
if IsMatrixType(ty):
return "/*array_ref*/" + "(*" + tycast + "(" + name + prefix + (")).v[%s][%d]" % (self.indexname, gSlotToN[slot]))
else:
print((ty, n))
raise
# return "/*array_ref*/" + "(" + self.recordname + "[" +
# self.indexname + "]" + ")"
else:
# Look up struct definition
sym = GetSymbol(self.recordname)
# print sym, self.recordname
assert sym is not False
(ty, n) = sym['type']
prefix = ""
if IsVectorType(ty):
prefix = ".v"
elif IsMatrixType(ty):
prefix = ".v"
if IsVectorType(ty):
return "/*array_ref*/" + "(" + self.recordname + "[" + self.indexname + "]" + prefix + ("[%d]" % gSlotToN[slot]) + ")"
if IsMatrixType(ty):
return "/*array_ref*/" + "(" + self.recordname + prefix + ("[%s][%d]" % (self.indexname, gSlotToN[slot])) + ")"
else:
return "/*array_ref*/" + "(" + self.recordname + "[" + self.indexname + "]" + ")"
class Constant:
def __init__(self, ty, n, values):
self.ty = ty
self.n = n
i = 0
self.values = []
# Consider '10', 'e', '-10' case
while i < len(values):
if i + 2 < len(values) and (values[i + 1] == 'e' or values[i + 1] == 'E'):
val = values[i] + values[i + 1] + values[i + 2]
self.values.append(val)
i = i + 3
else:
self.values.append(values[i])
i = i + 1
def __str__(self):
return "(%s, %d, %s)" % (self.ty, self.n, self.values)
def getDeclareString(self):
return ""
def getIntermExprString(self):
return ""
def getCExpr(self):
s = ""
if (self.n == 1):
s += "(" + str(self.values[0]) + ")"
else:
if IsVectorType(self.ty):
# for const value, no swizzle required.
# n = gSlotToN[slot]
# s = str(self.values[i])
s += "__make_vec%d(" % self.n
s += ','.join(map(str, self.values))
s += ")"
elif IsMatrixType(self.ty):
# for const value, no swizzle required.
# n = gSlotToN[slot]
# s = str(self.values[i])
s += "__make_mat%d(" % self.n
s += ','.join(map(str, self.values))
s += ")"
else:
raise
return s
def getExprString(self, slot, i):
s = ""
if (self.n == 1):
s = "(" + str(self.values[0]) + ")"
else:
# for const value, no swizzle required.
# n = gSlotToN[slot]
s = "(" + str(self.values[i]) + ")"
return s
class Swizzle:
def __init__(self, slots, values):
self.slots = slots
self.values = values
def __str__(self):
# return "TODO(swizzle: %s, %s)" % (self.slots, str(self.values))
return ""
def getDeclareString(self):
s = ""
s += self.values.getDeclareString()
return s
def getIntermExprString(self):
s = ""
s += self.values.getIntermExprString()
# for i in range(self.n):
# slot = gNToSlot[i]
# s += Indent()
# s += self.getExprString(slot, i)
# s += " = "
# s += self.lhs.getExprString(slot, i)
# s += " " + self.op + " "
# s += self.rhs.getExprString(slot, i)
# s += ";\n"
return s
def getCExpr(self):
s = ""
s += "__swizzle("
for (n, i) in enumerate(self.slots):
s += "%d" % gSlotToN[i]
if n != len(self.slots):
s += ", "
s += self.values.getCExpr()
s += ")"
return s
def getExprString(self, slot, i):
s = ""
nn = len(self.slots)
if (nn == 1):
s = "/*swiz*/"
s += self.values.getExprString(self.slots, 0)
#s += "[%d]" % gSlotToN[self.slots]
assert isinstance(s, str)
else:
#n = gSlotToN[slot]
ss = self.slots[i]
s = self.values.getExprString(ss, i)
assert isinstance(s, str)
return s
class Break:
def __init__(self):
pass
def __str__(self):
return "break"
def getDeclareString(self):
return ""
def getIntermExprString(self):
return ""
def getCExpr(self):
return "break"
def getExprString(self, slot, i):
return "break"
class Assign:
def __init__(self, slots, lhs, rhs):
self.slots = slots
self.lhs = lhs
self.rhs = rhs
def __str__(self):
# return "TODO(swizzle: %s, %s)" % (self.slots, str(self.values))
return ""
def getDeclareString(self):
return ""
def getIntermExprString(self):
return ""
#s = ""
#s += self.values.getIntermExprString()
# for (n, slot) in enumerate(self.slots):
# #slot = gNToSlot[i]
# s += Indent() + self.values.getExprString(slot, n) + ";\n"
# return s
def getCExpr(self):
# todo
return ""
def getExprString(self, slot, i):
s = ""
nn = len(self.slots)
if (nn == 1):
s = str(self.values)
s += "[%d]" % i
assert isinstance(s, str)
else:
#n = gSlotToN[slot]
ss = self.slots[i]
s = self.values.getExprString(ss, i)
assert isinstance(s, str)
return s
class BinaryExpression:
def __init__(self, ty, n, op, lhs, rhs):
self.ty = ty
self.n = n
self.op = op
self.lhs = lhs
self.rhs = rhs
self.dst = NewTempVar()
def __str__(self):
return "TODO(expr)"
def getDeclareString(self):
s = ""
s += self.lhs.getDeclareString()
s += Indent() + self.rhs.getDeclareString()
baseTy = baseType(self.ty)
if self.n == 1:
s += "%s %s;\n" % (baseTy, self.dst)
else:
s += "%s %s[%d];\n" % (baseTy, self.dst, self.n)
return s
def getIntermExprString(self):
s = ""
s += self.lhs.getIntermExprString()
s += self.rhs.getIntermExprString()
if self.ty == 'vec' and self.n >= 4: # mat and vec?
ops = {
'/': '__div',
'+': '__add',
'-': '__sub',
'*': '__mul',
'>': '__gt',
'>=': '__ge',
'<': '__lt',
'<=': '__le',
'==': '__eq',
'&&': '__and',
'||': '__or',
'!': '__not',
'any_nequal': '__any_neq',
'all_equal': '__all_eq',
}
func = ops[self.op]
assert func is not False
s = ""
s += "__assign(" + self.dst + ", "
s += func + "("
s += self.lhs.getCExpr()
s += ", "
s += self.rhs.getCExpr()
s += "));\n"
else:
for i in range(self.n):
slot = gNToSlot[i]
s += Indent()
s += self.getExprString(slot, i)
s += " = "
s += self.lhs.getExprString(slot, i)
s += " " + self.op + " "
s += self.rhs.getExprString(slot, i)
s += ";\n"
return s
def getCExpr(self):
ops = {
'/': '__div',
'+': '__add',
'-': '__sub',
'*': '__mul',
'>': '__gt',
'>=': '__ge',
'<': '__lt',
'<=': '__le',
'==': '__eq',
'&&': '__and',
'||': '__or',
'!': '__not',
'any_nequal': '__any_neq',
'all_equal': '__all_eq',
}
func = ops[self.op]
assert func is not False
s = ""
s += func + "("
s += self.lhs.getCExpr()
s += ", "
s += self.rhs.getCExpr()
s += ")"
return s
def getExprString(self, slot, i):
s = ""
if self.n == 1:
s = self.dst
else:
s = "%s[%d]" % (self.dst, gSlotToN[slot])
assert isinstance(s, str)
return s
def dst(self):
return self.dst
class UnaryExpression:
def __init__(self, ty, n, op, src):
self.ty = ty
self.n = n
self.op = op
self.src = src
self.dst = NewTempVar()
def __str__(self):
return "TODO(uexpr)"
def getDeclareString(self):
s = ""
s += self.src.getDeclareString()
baseTy = baseType(self.ty)
if self.n == 1:
s += "%s %s;\n" % (baseTy, self.dst)
else:
s += "%s %s[%d];\n" % (baseTy, self.dst, self.n)
return s
def getIntermExprString(self):
# print "UnaryInterm\n"
ops = {
'neg': '-(',
'rcp': '__rcp(',
'i2f': '(float)(',
'f2i': '(int)(',
'b2f': '(float)(',
'!': '!(',
}
opExpr = ops[self.op]
assert opExpr is not False
s = ""
s += self.src.getIntermExprString()
for i in range(self.n):
slot = gNToSlot[i]
s += Indent()
s += self.getExprString(slot, i)
s += " = "
s += ("(%s(" % opExpr) + self.src.getExprString(slot, i) + ")))"
s += ";\n"
return s
def getCExpr(self):
ops = {
'neg': '__neg',
'rcp': '__rcp',
'rsq': '__rsq',
'i2f': '__i2f',
'f2i': '__f2i',
'b2f': '__b2f',
'!': '__not',
}
func = ops[self.op]
assert func is not False
s = ""
s += func + "("
s += self.src.getCExpr()
s += ")"
return s
def getExprString(self, slot, i):
s = ""
if self.n == 1:
s = "%s" % (self.dst)
else:
s = "%s[%d]" % (self.dst, gSlotToN[slot])
assert isinstance(s, str)
return s
def dst(self):
return self.dst
def constructExpr(expr):
name = expr[0]
if name == "var_ref":
return VarRef(expr[1])
elif name == "record_ref":
return RecordRef(expr[1], expr[2])
elif name == "constant":
(ty, n, values) = parseValue(expr[1:])
return Constant(ty, n, values)
elif name == "assign":
slots = expr[1]
lhs = constructExpr(expr[2])
rhs = constructExpr(expr[3])
return Assign(slots, lhs, rhs)
elif name == "swiz":
slots = expr[1]
e = constructExpr(expr[2])
return Swizzle(slots, e)
elif name == "expression":
(ty, n) = ParseTy(expr[1])
op = expr[2]
lhs = constructExpr(expr[3])
if len(expr) > 4:
rhs = constructExpr(expr[4])
return BinaryExpression(ty, n, op, lhs, rhs)
else:
# Unary expression
return UnaryExpression(ty, n, op, lhs)
elif name == "declare":
quals = expr[1]
ty = expr[2]
offt = 3
(ty, n) = ParseTy(ty)
varname = expr[offt]
return VarDecl(varname, ty, n)
elif name == "call":
# print expr
assert 0
elif expr == "break": # not a name
return Break()
elif name == "array_ref":
return ArrayRef(expr[1], expr[2])
else:
print("expr:", expr)
print("name:", name)
raise
return None
def EvalExpr(expr):
ss = ""
for e in expr:
if isinstance(e, list):
name = e[0]
else:
name = e
if name in expTables:
method = expTables[name]
ss += method(e)
return ss
def eAssign(exp):
slots = exp[1]
ss = ""
if len(slots) == 0:
# maybe assignment of matrix or struct type.
lhs = constructExpr(exp[2])
rhs = constructExpr(exp[3])
if isinstance(lhs, VarRef):
(ty, n) = GetTypeOfSymbol(lhs.orgname)
if IsMatrixType(ty):
for j in range(n):
for i in range(n):
if isinstance(rhs, Constant):
idx = j * n + i
ss += Indent() + \
"%s.v[%d][%d] = %s;\n" % (
lhs, j, i, rhs.values[idx])
else:
ss += Indent() + \
"%s.v[%d][%d] = %s.v[%d][%d];\n" % (
lhs, j, i, rhs, j, i)
else:
sym = GetSymbol(lhs.orgname)
if sym is not None:
(sty, sn) = sym['type']
if FindStruct(sty) is not None:
# struct type
ss += Indent() + "%s = %s;\n" % (lhs, rhs)
else:
print("Invalid definition:" + sym)
raise
else:
print("Unknown or unsupported type:" + ty)
raise
else:
print("Unknown assign op")
raise
else:
# Don't emit code for redundant assignment
#
# e.g. assign to `assignment_tmp` in global scope.
# (assign (xyz) (var_ref assignment_tmp) (var_ref normalize_retval) )
if GetScopeLevel() == 1:
if len(exp[2]) == 2 and exp[2][0] == 'var_ref' and exp[2][1] == 'assignment_tmp':
return "// killed redundant assign to 'assignment_tmp'\n"
# @fixme. Supports first elem only at this time.
slot = slots[0]
lhs = constructExpr(exp[2])
rhs = constructExpr(exp[3])
# declare temp value if exist
ss += lhs.getDeclareString() + "\n"
ss += rhs.getDeclareString() + "\n"
# emit intermediate expr
ss += lhs.getIntermExprString()
ss += rhs.getIntermExprString()
# body
for (i, s) in enumerate(slot):
# print "lhs:" + lhs
# print "rhs:" + str(rhs)
ss += Indent() + lhs.getExprString(s, i) + " = " + \
rhs.getExprString(s, i) + ";\n"
# print ss
return ss
def eExpression(exp):
assert 0 # todo
print(exp)
def eReturn(exp):
if len(exp) < 2: # no argument for 'return'
ss = Indent() + "return;\n"
else:
retExp = constructExpr(exp[1])
ss = Indent() + "return " + retExp.getCExpr() + ";\n"
return ss
def eDiscard(exp):
ss = Indent() + "__glsl_discard(__fragment); return;\n"
return ss
def eSwizzle(expr):
slots = expr[1]
args = expr[2]
# print expr
def eCall(expr):
name = expr[1]
if len(expr) < 4:
# might be void type
dst = False
args = expr[2]
else:
dst = expr[2]
args = expr[3]
# print "dst:", dst
# print "args:", args
# print expr
if dst is not False:
# dst should be var_ref
assert dst[0] == 'var_ref'
dstExp = constructExpr(dst)
isBuiltin = False
isFuncPtrBuiltin = False
if IsBuiltinFunction(name):
isBuiltin = True
prefix = "__glsl_"
if IsBuiltinTraceFunction(name) or IsBuiltinRandomFunction(name) or IsBuiltinTextureFunction(name):
isFuncPtrBuiltin = True
else:
prefix = ""
s = ""
# dst = func(state, a, b, c, ...)
s += Indent() + "// { 'Call' : '" + str(args) + "' }\n"
if dst is not False:
s += Indent() + str(dstExp) + " = " + prefix + name + "("
else:
s += Indent() + prefix + name + "("
if isFuncPtrBuiltin:
s += "__fragment"
elif isBuiltin:
pass
else:
s += "__fragment, __state"
for (count, arg) in enumerate(args):
# print "arg:", arg
exp = constructExpr(arg)
# print "exp:", exp
if isBuiltin:
if isFuncPtrBuiltin:
s += ", "
else:
s += ", "
tycast = ""
isInput = False
isVarRef = False
if isinstance(exp, VarRef):
(ty, n) = GetTypeOfSymbol(exp.orgname)
isVarRef = True
tycast = GetTypeCastString(exp.orgname)
if (IsUniform(exp.orgname) or IsVaryingInput(exp.orgname)) and not IsTexture(exp.orgname):
isInput = True
if isInput:
if IsVectorType(ty):
s += "/*input:vec*/(*(" + tycast + "(" + str(exp) + ".data)))"
else:
s += "/*input:scalar*/" + \
"(*(" + tycast + "(" + str(exp) + ".data)))"
pass
elif isVarRef:
if IsBuiltinVariable(exp.orgname):
(ty, n) = GetBuiltinType(exp.orgname)
if IsVectorType(ty):
tycast = "(%s%d *)" % (ty, n)
else:
tycast = "(%s *)" % (ty)
s += "/*var_ref*/" + "(*(" + tycast + str(exp) + "))"
else:
s += "/*var_ref*/" + str(exp)
else:
s += exp.getCExpr()
if (isBuiltin and (not isFuncPtrBuiltin)) and ((len(args) - 1) != count):
s += ", "
s += ");\n"
return s
# assert 0
def eIf(expr):
# print "expr:", expr
# print "cond:", expr[1]
# print "then:", expr[2]
condExpr = constructExpr(expr[1])
# statement = [expr]
thenStmt = expr[2]
if len(expr[3]) > 0: # ![]
# print "else:", expr[3]
elseStmt = expr[3]
else:
elseStmt = None
# print "cond:", condExpr
# print "then:", thenStmt
# print "else:", elseStmt
ss = ""
ss = Indent() + "if ("
ss += condExpr.getCExpr()
ss += ") {\n"
# then expr
IncrementIndent()
for e in expr[2]:
if isinstance(e, list):
name = e[0]
else:
name = e
if name in expTables:
method = expTables[name]
ss += method(e)
DecrementIndent()
ss += "\n" + Indent() + "}"
if elseStmt is not False:
ss += " else {\n"
IncrementIndent()
for e in expr[3]:
if isinstance(e, list):
name = e[0]
else:
name = e
if name in expTables:
method = expTables[name]
ss += method(e)
DecrementIndent()
ss += "\n" + Indent() + "}\n"
else:
ss += "\n"
return ss
def eLoop(expr):
# print "expr:", expr
declStmt = expr[1]
initStmt = expr[2]
condStmt = expr[3]
tickStmt = expr[4]
stmt = expr[5]
# print "stmt:", stmt
if len(declStmt) == 0 and len(initStmt) == 0 and len(condStmt) == 0 and len(tickStmt) == 0:
# while loop
ss = ""
ss += Indent() + "while (1) {\n"
IncrementIndent()
ss += EvalExpr(stmt)
DecrementIndent()
ss += Indent() + "}\n"
else:
# for(i = init; i < cond; i += tick)
ss = ""
assert len(declStmt) == 1
assert len(initStmt) == 1
assert len(condStmt) == 1
assert len(tickStmt) == 1
declExpr = declStmt[0]
assert declExpr[0] == 'declare'
initExpr = initStmt[0]
condExpr = condStmt[0]
tickExpr = tickStmt[0]
decl = parseDeclare(declExpr)
ss += Indent() + "{\n"
ss += Indent() + "// decl = " + str(declExpr) + "\n"
ss += EvalExpr(declStmt)
ss += Indent() + "// init = " + str(initExpr) + "\n"
initE = constructExpr(initExpr)
ss += Indent() + decl['name'] + " = " + initE.getCExpr() + ";\n"
ss += Indent() + "// cond = " + str(condExpr) + "\n"
condE = constructExpr(condExpr)
tickE = constructExpr(tickExpr)
ss += Indent() + "for (; " + decl['name'] + " < " + condE.getCExpr(
) + "; " + decl['name'] + " += " + tickE.getCExpr() + ") {\n"
#ss += Indent() + decl['name'] + " += " + tickE.getCExpr() + ";\n"
IncrementIndent()
#ss += Indent() + "if (" + decl['name'] + " >= " + condE.getCExpr() + ") {\n"
#ss += Indent() + " break;\n"
#ss += Indent() + "}\n"
for e in stmt:
# print "e:", e
if isinstance(e, list):
name = e[0]
else:
name = e
# Filter out tick expression
if tickStmt:
if name == "assign":
# ['assign', ['x'], ['var_ref', 'i'], ['expression', 'int', '+', ['var_ref', 'i'], ['constant', 'int', ['1']]]]
if e[2][1] == decl['name']:
# Skip this expression
continue
if name in expTables:
method = expTables[name]
ss += method(e)
#ss += Indent() + "// tick = " + str(tickExpr) + "\n"
#tickE = constructExpr(tickExpr)
#ss += Indent() + decl['name'] + " += " + tickE.getCExpr() + ";\n"
DecrementIndent()
ss += Indent() + "}\n"
ss += "}\n"
return ss
def eBreak(expr):
# print "break"
return Indent() + "break;\n"
def eDeclare(exp):
quals = exp[1]
ty = exp[2]
# if IsArray(ty):
# print "array", ty
offt = 3
(ty, n) = ParseTy(ty)
varname = exp[offt]
# print exp
# print "[eDeclare] ty:", (ty, n), ", var:", varname
isBuiltin = False
isInOut = False
if ('in' in quals) or ('out' in quals) or ('uniform' in quals):
isInOut = True
if not IsBuiltinVariable(varname):
if 'in' in quals:
# ((ty, n), qual, name)
AddVaryingInput(varname, ty, n, quals)
elif 'uniform' in quals:
# ((ty, n), qual, name)
AddUniformInput(varname, ty, n, quals)
else:
isBuiltin = True
s = ""
# user defined global var or temporary variable needs var declaration.
if (not isInOut) and (not isBuiltin):
# skip redundant AST in global scope, generated from MESA's GLSL
# compiler.
if varname == "assignment_tmp" and (GetScopeLevel() == 1):
s = ""
else:
# Add to symbol table
AddSymbol(varname, ty, n, quals)
decl = VarDecl(varname, ty, n)
s = Indent() + decl.getDeclareString()
return s
def parseDeclare(exp):
d = {}
quals = exp[1]
ty = exp[2]
# if IsArray(ty):
# print "array", ty
offt = 3
(ty, n) = ParseTy(ty)
varname = exp[offt]
# print exp
# print "ty:", (ty, n), ", var:", varname
d['type'] = (ty, n)
d['quals'] = quals
d['name'] = varname
return d
def emitCArgs(args):
# args = ['parameters', ...]
if len(args) == 1:
return ""
s = ""
for (n, arg) in enumerate(args[1:]): # skip 'parameters' tag
decl = parseDeclare(arg)
prefix = ""
if 'out' in decl['quals']:
prefix = "&" # C++ reference
if IsVectorType(decl['type'][0]):
s += decl['type'][0] + str(decl['type'][1]) + prefix
elif IsMatrixType(decl['type'][0]):
s += decl['type'][0] + str(decl['type'][1]) + prefix
else:
s += decl['type'][0] + prefix
s += " "
s += decl['name']
if n != (len(args[1:]) - 1):
s += ", "
return s
def eFunction(exp):
name = exp[1]
params = exp[2]
# consider struct type
p = params[1].split('__')
if len(p) > 1:
signature = p[0] + '__'
else:
signature = p[0]
args = params[2]
statements = params[3]
# push symbol table stack
gSymbolTable.append(dict())
if IsBuiltinFunction(name):
# No need to emit builtin function declaration.
return ""
isMainFunction = False
s = ""
if name == "main":
# entry point function
s += "void shader(Fragment* __fragment, FragmentState* __state)"
isMainFunction = True
else:
# static function.
argStr = emitCArgs(args)
s += "// args = " + str(args)
s += "\n"
s += "static "
s += signature # @fixme. Support compound and vector type.
s += " "
s += name
s += " ("
# __fragment and __state arg are required to access
# builtin/uniform/varying variable from user-defined function
s += "Fragment* __fragment, FragmentState* __state"
if len(argStr) > 0:
s += ", "
s += argStr
s += ")"
# add function parameter to symbol table
if len(args) > 1: # function has arguments
for arg in args[1:]: # skip 'parameter' tag
quals = arg[1]
ty = arg[2]
# if IsArray(ty):
# print "array", ty
offt = 3
(ty, n) = ParseTy(ty)
varname = arg[offt]
AddSymbol(varname, ty, n, quals)
if len(statements) < 1:
# Seems it has no function body.
s += " {};\n\n"
return s
s += "\n{\n"
IncrementIndent()
# Call constructor in main function
if isMainFunction:
s += Indent() + "__shader_internal_constructor(__state);\n"
for stmt in statements:
expName = stmt[0]
if expName in expTables:
method = expTables[expName]
assert method
s += method(stmt)
DecrementIndent()
s += "}\n\n"
# pop symbol table
gSymbolTable.pop()
return s
expTables = {
'declare': eDeclare,
'function': eFunction,
'assign': eAssign,
'expression': eExpression,
'swiz': eSwizzle,
'call': eCall,
'if': eIf,
'loop': eLoop,
'break': eBreak,
'return': eReturn,
'discard': eDiscard,
}
def emitEntryPoint():
"""
Emit C entry point function definition.
"""
s = "extern \"C\" {\n"
s += "void shader(Fragment* __fragment, FragmentState* __state);\n"
s += "int shader_info(FragmentConfig* config);\n"
s += "}\n"
return s
def emitInitializer():
"""
Emit initializer function.
"""
qualDic = {
'in': "GLSL_QUALIFIER_IN",
'out': "GLSL_QUALIFIER_OUT",
'uniform': "GLSL_QUALIFIER_UNIFORM",
}
s = ""
s += "int shader_info(FragmentConfig* config)\n"
s += "{\n"
IncrementIndent()
# Uniforms
for uniform in list(gUniformInputs.values()):
qualStr = "GLSL_QUALIFIER_UNIFORM"
ty = uniform['type']
idx = uniform['index']
s += Indent() + "// " + str(uniform) + "\n"
s += Indent() + \
"config->uniformInfos[" + \
str(idx) + "].type.name = \"" + ty[0] + "\";\n"
s += Indent() + \
"config->uniformInfos[" + str(idx) + \
"].type.n = " + str(ty[1]) + ";\n"
s += Indent() + \
"config->uniformInfos[" + str(idx) + "].qualifier = " + \
qualStr + ";\n" # redundant?
s += Indent() + \
"config->uniformInfos[" + str(idx) + "].name = \"" + \
uniform['name'] + "\";\n"
s += "\n"
s += Indent() + "config->numUniforms = %d;\n" % (len(gUniformInputs))
s += "\n"
# Varyings
for varying in list(gVaryingInputs.values()):
qualStr = ""
if len(varying['quals']) == 0:
qualStr += "GLSL_QUALIFIER_NONE"
else:
for (n, qual) in enumerate(varying['quals']):
qualStr += qualDic[qual]
if (n != (len(varying['quals']) - 1)):
qualStr += " | "
ty = varying['type']
idx = varying['index']
s += Indent() + "// " + str(varying) + "\n"
s += Indent() + \
"config->varyingInfos[" + \
str(idx) + "].type.name = \"" + ty[0] + "\";\n"
s += Indent() + \
"config->varyingInfos[" + str(idx) + \
"].type.n = " + str(ty[1]) + ";\n"
s += Indent() + \
"config->varyingInfos[" + \
str(idx) + "].qualifier = " + qualStr + ";\n"
s += Indent() + \
"config->varyingInfos[" + str(idx) + "].name = \"" + \
varying['name'] + "\";\n"
s += "\n"
s += Indent() + "config->numVaryings = %d;\n" % (len(gVaryingInputs))
s += "\n"
s += Indent() + "return 0; // OK\n"
DecrementIndent()
s += "}\n"
return s
def ir_to_c(input_sexp_string, opts):
"""
Converts S-expression style IR to C/C++ code.
"""
ir_exp = parse_sexp(input_sexp_string)
if ('-v', '') in opts:
print("IR:" + str(ir_exp))
s = ""
# header
s = "#include \"glsl_runtime.h\"\n"
# entry point
s += emitEntryPoint()
#
# pass1.1: emit struct definition
#
# add global scope
s += "// --> struct definition\n"
# print ir_exp
for e in ir_exp:
if isinstance(e, list):
name = e[0]
else:
name = e
if name == 'structure':
struct = {}
# ['structure, ['name'], ['instance'], ['N'], [fields]
# fields = [['ty'], ['name']]
structname = e[1][0] + "__"
struct['name'] = structname
members = e[4]
memberDefs = {}
s += "// struct_def = " + str(e) + "\n"
s += "typedef struct {\n"
for member in members:
ty = member[0][0]
name = member[1][0]
memberDefs[name] = {'ty': ty, 'name': name}
s += " " + ty + " " + name + ";\n"
s += "} %s;\n" % structname
struct['members'] = memberDefs
# Add to definition
DefineStruct(struct)
s += "// <-- struct definition\n"
#
# pass1.2: emit global variable definition
#
# add global scope
gSymbolTable.append(dict())
s += "// --> global variables\n"
for e in ir_exp:
if isinstance(e, list):
name = e[0]
else:
name = e
if name == 'declare':
s += eDeclare(e)
s += "// <-- global variables\n"
#
# pass2: emit global variable initializer
#
IncrementIndent()
s += "static void __shader_internal_constructor(FragmentState* __state) {\n"
for e in ir_exp:
if isinstance(e, list):
name = e[0]
else:
name = e
if name == 'assign':
s += eAssign(e)
DecrementIndent()
s += "}\n"
#
# pass3: body
#
for e in ir_exp:
if isinstance(e, list):
name = e[0]
else:
name = e
# skip decl and assign in gloal scope
if name == 'declare' or name == 'assign':
continue
# might be member field of struct
if isinstance(name, list):
continue
if name in expTables:
method = expTables[name]
s += method(e)
# initializer
s += emitInitializer()
# for safety
gSymbolTable.pop()
return s
| 2.203125
| 2
|
lopi/descriptors/regex_validation.py
|
QristaLabs/LOPi
| 0
|
12784882
|
import re
import typing as t
from .typed import StringTyped
class RegexDescriptor(StringTyped):
def __init__(self, *args, pattern: t.Union[str, re.Pattern], **kwargs) -> None:
super().__init__(*args, **kwargs)
if isinstance(pattern, str):
pattern = re.compile(pattern)
self.pattern = pattern
def __set__(self, instance: object, value: str) -> None:
if not self.pattern.match(value):
raise ValueError("String must match the regex pattern.")
super().__set__(instance, value)
| 3.03125
| 3
|
Core/DivideExamples.py
|
rothadamg/UPSITE
| 1
|
12784883
|
<filename>Core/DivideExamples.py
"""
Pseudorandomly distributed subsets
"""
__version__ = "$Revision: 1.4 $"
import Split
import sys
def getDocumentId(idString):
return idString.rsplit(".",2)[0]
def getIdFromLine(line):
assert(line.find("#") != -1)
return line.split("#")[-1].strip()
def getDocumentIds(filename):
documentIds = []
inputFile = open(filename, "rt")
try:
for line in inputFile:
if len(line) == 0 or line[0] == "#":
continue
docId = getDocumentId(getIdFromLine(line))
if not docId in documentIds:
documentIds.append(docId)
finally:
inputFile.close()
return documentIds
def getDocumentFolds(documentIds, folds):
sample = Split.getFolds(len(documentIds),folds)
division = {}
for i in range(len(documentIds)):
division[documentIds[i]] = sample[i]
return division
def divideExamples(filename, outputFilenames):
print >> sys.stderr, "Reading document ids"
documentIds = getDocumentIds(filename)
print >> sys.stderr, "Dividing documents into folds"
division = getDocumentFolds(documentIds, len(outputFilenames))
print >> sys.stderr, "Dividing examples"
outputFiles = []
for name in outputFilenames:
outputFiles.append(open(name, "wt"))
inputFile = open(filename, "rt")
try:
for line in inputFile:
if len(line) == 0 or line[0] == "#":
continue
docId = getDocumentId(getIdFromLine(line))
outputFiles[division[docId]].write(line)
finally:
inputFile.close()
for outputFile in outputFiles:
outputFile.close()
if __name__=="__main__":
from optparse import OptionParser
defaultAnalysisFilename = "/usr/share/biotext/ComplexPPI/BioInferForComplexPPIVisible.xml"
optparser = OptionParser(usage="%prog [options]\nCreate an html visualization for a corpus.")
optparser.add_option("-i", "--input", default=defaultAnalysisFilename, dest="input", help="Corpus in analysis format", metavar="FILE")
optparser.add_option("-o", "--output", default="", dest="output", help="Output directory")
optparser.add_option("-f", "--folds", type="int", default=10, dest="folds", help="X-fold cross validation")
(options, args) = optparser.parse_args()
outputFilenames = []
for i in range(options.folds):
outputFilenames.append(options.output + options.input + ".fold" + str(i))
divideExamples(options.input, outputFilenames)
| 2.671875
| 3
|
setup.py
|
mcx/deep_ope
| 64
|
12784884
|
<gh_stars>10-100
# Copyright 2020 Google LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages
from setuptools import setup
setup(
name='deep_ope',
description=(
'Deep OPE: Benchmarks for Off-policy Evaluation and Offline Policy Selection'
),
author='Google LLC',
author_email='<EMAIL>',
url='https://github.com/google-research/deep_ope',
license='Apache 2.0',
packages=find_packages(),
package_data={},
install_requires=[
'absl-py==0.10.0',
'dm-acme==0.1.8',
'dm-env==1.2',
'dm-reverb==0.1.0',
'dm-sonnet',
'dm-tree==0.1.5',
'gym==0.17.2',
'tensorflow==2.3.0',
'd4rl @ git+git://github.com/rail-berkeley/d4rl@c39eefd68d2f3277ca68e996a45ce1dd24e65625',
'jax==0.2.11',
'jaxlib==0.1.64',
])
| 0.976563
| 1
|
test/test_invertible_1x1_conv.py
|
AI-Huang/glow-pytorch
| 0
|
12784885
|
<filename>test/test_invertible_1x1_conv.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : Aug-04-21 21:43
# @Author : <NAME> (<EMAIL>)
import torch
import torch.nn.functional as F
from models.glow.invertible_1x1_conv import Invertible_1x1_Conv
def invertible_1x1_conv_test():
"""
Test cases:
conv1x1 = Invertible_1x1_Conv(
in_channels=in_channels, out_channels=out_channels, LU_decomposed=False)
"""
in_channels = 3
# out_channels = 16 # in_channels and out_channels must equal
out_channels = 3
conv1x1 = Invertible_1x1_Conv(
in_channels=in_channels, out_channels=out_channels, LU_decomposed=False)
x = torch.randn([64, in_channels, 32, 32])
print(x.shape)
print(f"x.sum(): {x.sum()}")
logdet_init = 0
# Forward obversely
z, logdet = conv1x1(x, logdet_init)
print(z.shape)
print(f"z.sum(): {z.sum()}")
# Forward reversely
x_hat, logdet_final = conv1x1(z, logdet, reverse=True)
print(x_hat.shape)
print(f"x_hat.sum(): {x_hat.sum()}")
print(f"F.l1_loss(x, x_hat): {F.l1_loss(x, x_hat)}")
assert F.l1_loss(x, x_hat) < 1
def main():
invertible_1x1_conv_test()
if __name__ == "__main__":
main()
| 2.84375
| 3
|
moderngl_window/capture/__init__.py
|
sheepman4267/moderngl-window
| 12
|
12784886
|
<reponame>sheepman4267/moderngl-window<gh_stars>10-100
from .base import BaseVideoCapture # noqa
from .ffmpeg import FFmpegCapture # noqa
| 0.882813
| 1
|
code/119.Pascal's-Triangle-II.py
|
Aden-Q/leetcode
| 1
|
12784887
|
<gh_stars>1-10
class Solution:
def getRow(self, rowIndex: int) -> List[int]:
if rowIndex == 0:
return [1]
cur = [1, 1]
next_row = cur
for i in range(1, rowIndex):
next_row = [1]
for j in range(i):
next_row.append(cur[j] + cur[j+1])
next_row.append(1)
cur = next_row
return next_row
| 3.046875
| 3
|
mesh_transformer/layers.py
|
VE-FORBRYDERNE/mesh-transformer-jax
| 2
|
12784888
|
<filename>mesh_transformer/layers.py
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from mesh_transformer.util import f_psum, g_psum, maybe_shard, head_print
from jax.experimental import PartitionSpec as P
from jax.experimental.maps import thread_resources
class ReplicatedLayerNorm(hk.Module):
def __init__(self, offset=True):
super().__init__()
self.offset = offset
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
mean = jnp.mean(inputs, axis=-1, keepdims=True)
variance = jnp.var(inputs, axis=-1, keepdims=True)
param_shape = inputs.shape[-1:]
scale = hk.get_parameter("scale", param_shape, inputs.dtype, init=jnp.ones)
scale = jax.lax.all_gather(scale, "shard")[0]
offset = hk.get_parameter("offset", param_shape, inputs.dtype, init=jnp.zeros)
offset = jax.lax.all_gather(offset, "shard")[0]
scale = jnp.broadcast_to(scale, inputs.shape)
offset = jnp.broadcast_to(offset, inputs.shape)
mean = jnp.broadcast_to(mean, inputs.shape)
inv = scale * jax.lax.rsqrt(variance + 1e-5)
if self.offset:
return inv * (inputs - mean) + offset
else:
return inv * (inputs - mean)
class RMSNorm(hk.Module):
def __init__(self, offset, elementwise):
super().__init__()
self.offset = offset
self.elementwise = elementwise
def __call__(self, x):
param_shape = (x.shape[-1],) if self.elementwise else ()
normed = x / (jnp.linalg.norm(x, axis=-1, keepdims=True) + 1e-5)
scale = hk.get_parameter('scale', param_shape, init=hk.initializers.Constant(x.shape[-1] ** 0.5))
scale = jax.lax.pmean(scale, "shard")
normed = normed * scale
if self.offset:
offset = hk.get_parameter('offset', param_shape, init=jnp.zeros)
offset = jax.lax.pmean(offset, "shard")
normed = normed + offset
return normed
def getnorm(type):
if type == "layernorm":
return ReplicatedLayerNorm()
if type == "layernorm-desync":
return hk.LayerNorm(-1, True, True)
elif type == "layernorm-nobias":
return ReplicatedLayerNorm(offset=False)
elif type == "rmsnorm":
return RMSNorm(False, True)
elif type == "scalenorm":
return RMSNorm(False, False)
elif type == "rmsnorm-bias":
return RMSNorm(True, True)
elif type == "scalenorm-bias":
return RMSNorm(True, False)
else:
raise Exception("Not implemented")
class RelativePositionEmbs(hk.Module):
@staticmethod
def _relative_position_bucket(relative_position,
num_buckets=32,
max_distance=128):
ret = 0
n = -relative_position
n = np.maximum(n, 0)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = (n < max_exact)
val_if_large = max_exact + (
np.log(n.astype(np.float32) / max_exact + np.finfo(np.float32).eps) /
np.log(max_distance / max_exact) *
(num_buckets - max_exact)).astype(np.int32)
val_if_large = np.minimum(val_if_large, num_buckets - 1)
ret += np.where(is_small, n, val_if_large)
return ret
def __call__(self, qlen, klen, heads, num_buckets):
"""Produce relative position embedding attention biases.
Returns:
output: `(heads, q_len, k_len)` attention bias
"""
context_position = np.arange(qlen, dtype=jnp.int32)[:, None]
memory_position = np.arange(klen, dtype=jnp.int32)[None, :]
relative_position = memory_position - context_position # shape (qlen, klen)
rp_bucket = self._relative_position_bucket(relative_position)
relative_attention_bias = hk.get_parameter('rel_embedding', [heads, num_buckets],
init=hk.initializers.TruncatedNormal(stddev=0.02))
# Instead of using a slow gather, we create a leading-dimension one-hot
# array from rp_bucket and use it to perform the gather-equivalent via a
# contraction, i.e.:
# (num_head, num_buckets) x (num_buckets one-hot, qlen, klen).
# This is equivalent to relative_attention_bias[:, rp_bucket]
bcast_iota = jax.lax.broadcasted_iota(jnp.int32, (num_buckets, 1, 1), 0)
rp_bucket_one_hot = jnp.array(rp_bucket[jnp.newaxis, Ellipsis] == bcast_iota).astype(
relative_attention_bias.dtype)
# --> shape (qlen, klen, num_heads)
values = jax.lax.dot_general(
relative_attention_bias,
rp_bucket_one_hot,
(
((1,), (0,)), # rhs, lhs contracting dims
((), ()))) # no batched dims
return values
class TransposingLinear(hk.Module):
def __init__(self, input_size, output_size, with_bias=True, w_init=None, b_init=None, name=None):
if name is None:
name = "linear"
super().__init__(name=name)
self.input_size = input_size
self.output_size = output_size
self.with_bias = with_bias
self.w_init = w_init
self.b_init = b_init or jnp.zeros
def __call__(self, inputs: jnp.ndarray, *, precision=None, transpose_weights=False) -> jnp.ndarray:
if not inputs.shape:
raise ValueError("Input must not be scalar.")
input_size = self.input_size
output_size = self.output_size
dtype = inputs.dtype
w_init = self.w_init
if w_init is None:
stddev = 1. / np.sqrt(self.input_size)
w_init = hk.initializers.TruncatedNormal(stddev=stddev)
w = hk.get_parameter("w", [input_size, output_size], dtype, init=w_init)
if transpose_weights:
w = w.T
out = jnp.dot(inputs, w, precision=precision)
if self.with_bias:
b = hk.get_parameter("b", [self.output_size], dtype, init=self.b_init)
b = jnp.broadcast_to(b, out.shape)
out = out + b
return out
def fixed_pos_embedding(x, seq_dim=0):
dim = x.shape[-1]
inv_freq = 1. / (10000 ** (np.arange(0, dim, 2) / dim))
sinusoid_inp = np.einsum('i , j -> i j', np.arange(x.shape[seq_dim]), inv_freq)
return np.sin(sinusoid_inp), np.cos(sinusoid_inp)
def rotate_every_two(x):
x1 = x[:, :, ::2]
x2 = x[:, :, 1::2]
x = jnp.stack((-x2, x1), axis=-1)
return x.reshape(*x.shape[:-2], -1)
def apply_rotary_pos_emb(x, sincos):
sin, cos = map(lambda t: t.repeat(2, axis=-1)[-x.shape[0]:, None, :], sincos)
return (x * cos) + (rotate_every_two(x) * sin)
def rotate_every_two_v2(x):
x1 = x[:, :, :, ::2]
x2 = x[:, :, :, 1::2]
x = jnp.stack((-x2, x1), axis=-1)
return x.reshape(*x.shape[:-2], -1)
def apply_rotary_pos_emb_v2(x, sincos):
sin, cos = map(lambda t: t.repeat(2, axis=-1)[-x.shape[-3]:, None, :], sincos)
return (x * cos) + (rotate_every_two_v2(x) * sin)
class EmbeddingShard(hk.Module):
def __init__(self, config, name=None):
super().__init__(name=name)
in_dim = config["n_vocab"] + config.get("n_vocab_padding", 0)
out_dim = config["d_model"]
shards = config["cores_per_replica"]
self.compat = config.get("compat", "j")
assert in_dim % shards == 0
self.in_dim = in_dim
self.out_dim = out_dim
self.in_dim_per_shard = in_dim // shards
self.out_dim_per_shard = out_dim // shards
if config["pe"] == "fixed":
embed_init = hk.initializers.TruncatedNormal(stddev=0.02)
self.positional_embeddings = hk.get_parameter('pos_embs', [config["seq"], self.out_dim_per_shard], init=embed_init)
else:
self.positional_embeddings = None
self.proj = TransposingLinear(self.in_dim_per_shard, self.out_dim, w_init=hk.initializers.TruncatedNormal(stddev=1 / np.sqrt(in_dim)), with_bias=self.compat != "neo")
def __call__(self, x, dtype=jnp.bfloat16, pe_length=0, soft_embeddings=None):
shard_start_index = jax.lax.axis_index('shard') * self.in_dim_per_shard
input_onehot = jax.nn.one_hot(x - shard_start_index, self.in_dim_per_shard)
proj_out = self.proj(input_onehot)
mask = jnp.broadcast_to((x < self.in_dim)[:, jnp.newaxis], proj_out.shape)
proj_out = jnp.where(mask, proj_out, 0)
if soft_embeddings is not None:
assert soft_embeddings.ndim == 2
assert soft_embeddings.shape[1] == self.out_dim
soft_shard_start_index = self.in_dim + jax.lax.axis_index('shard') * soft_embeddings.shape[0]
input_soft_onehot = jax.nn.one_hot(x - soft_shard_start_index, soft_embeddings.shape[0])
proj_out += jnp.dot(input_soft_onehot, soft_embeddings)
proj_out = g_psum(proj_out)
if self.positional_embeddings is not None:
pe_length = jnp.int32(pe_length)
shard_roll_index = jnp.int32(jax.lax.axis_index('shard') * self.out_dim_per_shard)
pos_embed = jnp.pad(self.positional_embeddings, ((0, 0), (0, self.out_dim - self.out_dim_per_shard)))
pos_embed = jnp.roll(pos_embed, shard_roll_index, axis=1)
pos_embed = jnp.roll(pos_embed, -pe_length, axis=0)[-proj_out.shape[0]:]
proj_out += pos_embed
proj_out = g_psum(proj_out)
return proj_out
class EmbeddingShardV2(hk.Module):
def __init__(self, config, name=None):
super().__init__(name=name)
in_dim = config["n_vocab"]
out_dim = config["d_model"]
shards = config["cores_per_replica"]
assert in_dim % shards == 0
self.in_dim = in_dim
self.out_dim = out_dim
self.proj = hk.Linear(self.out_dim, w_init=hk.initializers.TruncatedNormal(stddev=1 / np.sqrt(in_dim)))
def __call__(self, x, dtype=jnp.bfloat16):
input_onehot = jax.nn.one_hot(x, self.in_dim)
input_onehot = maybe_shard(input_onehot, P("dp", None, "mp"))
proj_out = self.proj(input_onehot)
return proj_out
# We actually combine the FF and dense in one layer (i.e. compute in parallel) to minimize all reduces
class TransformerLayerShard(hk.Module):
def __init__(self, config, name=None, init_scale=1., attention_type="global"):
super().__init__(name=name)
heads = config["n_heads"]
dim = config["d_model"]
shards = config["cores_per_replica"]
norm = getnorm(config["norm"])
self.is_rotary = config["pe"] == "rotary"
self.attention_type = attention_type
self.local_attention_window = config.get("local_attention_window", 256)
self.compat = config.get("compat", "j")
assert dim % heads == 0
assert heads % shards == 0
assert attention_type in ("global", "local")
self.dim = dim
self.dim_per_head = dim // heads
self.heads_per_shard = heads // shards
self.dim_per_shard = dim // shards
self.pe_rotary_dims = config.get("pe_rotary_dims", self.dim_per_head)
self.norm = norm
if self.compat == "neo":
self.norm_2 = getnorm(config["norm"])
self.q = hk.Linear(self.dim_per_shard, with_bias=False)
self.v = hk.Linear(self.dim_per_shard, with_bias=False)
self.k = hk.Linear(self.dim_per_shard, with_bias=False)
self.o = hk.Linear(self.dim, with_bias=self.compat == "neo",
w_init=hk.initializers.TruncatedNormal(stddev=init_scale / np.sqrt(self.dim)))
self.dense_proj = hk.Linear(self.dim_per_shard * 4)
self.dense_proj_o = hk.Linear(self.dim,
w_init=hk.initializers.TruncatedNormal(stddev=init_scale / np.sqrt(self.dim)))
def self_attn(self, q, v, k, attn_bias):
if self.is_rotary:
k_rot = k[:, :, :self.pe_rotary_dims]
k_pass = k[:, :, self.pe_rotary_dims:]
q_rot = q[:, :, :self.pe_rotary_dims]
q_pass = q[:, :, self.pe_rotary_dims:]
sincos = fixed_pos_embedding(k_rot)
q_rot = apply_rotary_pos_emb(q_rot, sincos)
k_rot = apply_rotary_pos_emb(k_rot, sincos)
k = jnp.concatenate([k_rot, k_pass], axis=-1)
q = jnp.concatenate([q_rot, q_pass], axis=-1)
attention_logits = jnp.einsum("thd,Thd->htT", q, k)
if self.compat != "neo":
sqrt_key_size = np.sqrt(self.dim_per_head).astype(k.dtype)
attention_logits = attention_logits / sqrt_key_size
attention_logits += attn_bias
attention_weights = jax.nn.softmax(attention_logits)
attention_vec = jnp.einsum("htT,Thd->thd", attention_weights, v).reshape((-1, self.dim_per_shard))
return self.o(attention_vec)
def ff(self, x):
dense_proj = self.dense_proj(x)
dense_proj = jax.nn.gelu(dense_proj)
return self.dense_proj_o(dense_proj)
def qvk_proj(self, x):
q = self.q(x).reshape(x.shape[:-1] + (self.heads_per_shard, self.dim_per_head))
v = self.v(x).reshape(x.shape[:-1] + (self.heads_per_shard, self.dim_per_head))
k = self.k(x).reshape(x.shape[:-1] + (self.heads_per_shard, self.dim_per_head))
return q, v, k
def neo_ff(self, x):
x = self.norm_2(x)
dense_out = self.ff(x)
return g_psum(dense_out)
def __call__(self, x, attn_bias):
x = f_psum(x)
x = self.norm(x)
q, v, k = self.qvk_proj(x)
seq_len = x.shape[0]
causal_mask = np.tril(np.ones((seq_len, seq_len)))
if self.attention_type == "local":
causal_mask -= np.tril(causal_mask, -self.local_attention_window)
bias = -1e10 * (1. - causal_mask)
bias += attn_bias
attn_out = self.self_attn(q, v, k, bias)
if self.compat == "neo":
out = attn_out
else:
dense_out = self.ff(x)
out = attn_out + dense_out
return g_psum(out)
# iterate the decoding process by a single token
def decode_once(self, decode_state, x, attn_bias):
x = f_psum(x)
x = self.norm(x)
assert x.shape[0] == 1
q, v, k = self.qvk_proj(x)
# add new kv to end
v = jnp.concatenate((decode_state["v"], v), axis=0)[1:]
k = jnp.concatenate((decode_state["k"], k), axis=0)[1:]
tokens_decoded = decode_state["tokens_decoded"] + 1
length = v.shape[0]
if self.attention_type == "local":
masked_tokens = length - jnp.minimum(tokens_decoded, self.local_attention_window)
else:
masked_tokens = length - tokens_decoded
attention_mask = jnp.arange(0, length) < masked_tokens
bias = (-1e10 * attention_mask)
bias += attn_bias
attn_out = self.self_attn(q, v, k, bias)
if self.compat == "neo":
out = attn_out
else:
dense_out = self.ff(x)
out = attn_out + dense_out
return g_psum(out), {
"tokens_decoded": tokens_decoded,
"k": k,
"v": v
}
# take in right aligned context tokens and generate an initial state
def get_init_decode_state(self, x, given_length, attn_bias):
x = f_psum(x)
x = self.norm(x)
q, v, k = self.qvk_proj(x)
full_length = x.shape[0]
masked_tokens = full_length - given_length
seq_len = x.shape[0]
causal_mask = np.tril(np.ones((seq_len, seq_len)))
if self.attention_type == "local":
causal_mask -= np.tril(causal_mask, -self.local_attention_window)
bias = -1e10 * (1. - causal_mask) # regular AR masking
bias -= 1e10 * (jnp.arange(0, full_length) < masked_tokens) # mask out zero tokens before context starts
bias += attn_bias # finally add attn bias for rpe
attn_out = self.self_attn(q, v, k, bias)
if self.compat == "neo":
out = attn_out
else:
dense_out = self.ff(x)
out = attn_out + dense_out
return g_psum(out), {"k": k, "v": v, "tokens_decoded": given_length.astype(jnp.uint32)}
# This new class combines the input and output projection into one matmul for better efficiency
class TransformerLayerShardV2(hk.Module):
def __init__(self, config, name=None, init_scale=1.):
super().__init__(name=name)
self.dim = config["d_model"]
self.n_head = config["n_heads"]
self.d_head = config["d_head"]
self.d_rotary = config["pe_rotary_dims"]
self.mp_num = thread_resources.env.shape['mp']
self.norm = hk.LayerNorm(-1, True, True)
self.input_proj = hk.Linear(self.d_head * self.n_head * 3 + self.dim * 8)
self.output_proj = hk.Linear(self.dim,
w_init=hk.initializers.TruncatedNormal(stddev=init_scale / jnp.sqrt(self.dim)))
def self_attn(self, q, v, k, attn_bias):
k_rot = k[:, :, :, :self.d_rotary]
k_pass = k[:, :, :, self.d_rotary:]
q_rot = q[:, :, :, :self.d_rotary]
q_pass = q[:, :, :, self.d_rotary:]
sincos = fixed_pos_embedding(k_rot, seq_dim=1)
q_rot = apply_rotary_pos_emb_v2(q_rot, sincos)
k_rot = apply_rotary_pos_emb_v2(k_rot, sincos)
q_rot = maybe_shard(q_rot, P("dp", None, "mp", None))
k_rot = maybe_shard(k_rot, P("dp", None, "mp", None))
k = jnp.concatenate([k_rot, k_pass], axis=-1)
q = jnp.concatenate([q_rot, q_pass], axis=-1)
k = maybe_shard(k, P("dp", None, "mp", None))
q = maybe_shard(q, P("dp", None, "mp", None))
attention_logits = jnp.einsum("bthd,bThd->bhtT", q, k)
attention_logits = maybe_shard(attention_logits, P("dp", "mp", None, None))
sqrt_key_size = np.sqrt(self.d_head).astype(k.dtype)
attention_logits = attention_logits / sqrt_key_size
attention_logits += attn_bias
attention_logits = maybe_shard(attention_logits, P("dp", "mp", None, None))
attention_weights = jax.nn.softmax(attention_logits)
attention_weights = maybe_shard(attention_weights, P("dp", "mp", None, None))
attention_vec = jnp.einsum("bhtT,bThd->bthd", attention_weights, v)
attention_vec = maybe_shard(attention_vec, P("dp", None, "mp", None))
sharded_attn_vec = attention_vec.reshape(attention_vec.shape[:2] + (self.mp_num, self.n_head//self.mp_num, -1))
sharded_attn_vec = maybe_shard(sharded_attn_vec, P("dp", None, "mp", None, None))
attention_vec = attention_vec.reshape(sharded_attn_vec.shape[:2] + (self.mp_num, -1))
return maybe_shard(attention_vec, P("dp", None, "mp", None))
# input: [batch, seq, dim]
# output: [batch, seq, n_head, d_head]
def head_split(self, x):
reshaped = x.reshape(x.shape[:-1] + (self.n_head//self.mp_num, self.d_head))
reshaped = reshaped.reshape(x.shape[:-2] + (-1, ) + x.shape[-1:])
# return reshaped
return maybe_shard(reshaped, P("dp", None, "mp", None))
def input(self, x):
# [batch, seq, dim]
projected = self.input_proj(x)
# [batch, seq, mp, dim//mp]
projected = maybe_shard(projected, P("dp", None, "mp"))
mp_split = jnp.reshape(projected, projected.shape[:-1] + (self.mp_num, -1))
mp_split = maybe_shard(mp_split, P("dp", None, "mp", None))
local_dim = self.d_head * self.n_head // self.mp_num
q, v, k, ff = jnp.split(mp_split, [local_dim, local_dim * 2, local_dim * 3], axis=-1)
q = self.head_split(q)
v = self.head_split(v)
k = self.head_split(k)
return q, v, k, ff
def output(self, *x):
out = jnp.concatenate(x, axis=-1)
out = maybe_shard(out, P("dp", None, "mp", None))
out = out.reshape(x[0].shape[:-2] + (-1,))
out_shard = maybe_shard(out, P("dp", None, "mp"))
return self.output_proj(out_shard)
def __call__(self, x, attn_bias):
x = self.norm(x)
q, v, k, ff = self.input(x)
# head_print("x.shape", x.shape)
# head_print("attn_bias.shape", attn_bias.shape)
seq_len = x.shape[1]
causal_mask = np.tril(np.ones((seq_len, seq_len)))[None, :, :]
bias = -1e10 * (1. - causal_mask)
# head_print("bias.shape", bias.shape)
bias += attn_bias
attn_out = self.self_attn(q, v, k, bias)
ff_out = self.glu(ff)
return self.output(attn_out, ff_out)
# [batch, seq, mp, dim*2//mp]
def glu(self, x):
out, gate = jnp.split(x, 2, axis=-1)
return out * jax.nn.gelu(gate)
# iterate the decoding process by a single token
def decode_once(self, decode_state, x, attn_bias):
x = self.norm(x)
assert x.shape[0] == 1
q, v, k, ff = self.input(x)
# add new kv to end
v = jnp.concatenate((decode_state["v"], v), axis=1)[1:]
k = jnp.concatenate((decode_state["k"], k), axis=1)[1:]
tokens_decoded = decode_state["tokens_decoded"] + 1
length = v.shape[1]
masked_tokens = length - tokens_decoded
attention_mask = jnp.arange(0, length) < masked_tokens
bias = (-1e10 * attention_mask)
bias += attn_bias
attn_out = self.self_attn(q, v, k, bias)
ff_out = self.glu(ff)
return self.output(attn_out, ff_out), {
"tokens_decoded": tokens_decoded,
"k": k,
"v": v
}
# take in right aligned context tokens and generate an initial state
def get_init_decode_state(self, x, given_length, attn_bias):
x = self.norm(x)
q, v, k, ff = self.input(x)
full_length = x.shape[1]
masked_tokens = full_length - given_length
causal_mask = np.tril(np.ones((full_length, full_length)))
bias = -1e10 * (1. - causal_mask) # regular AR masking
bias -= 1e10 * (jnp.arange(0, full_length) < masked_tokens) # mask out zero tokens before context starts
bias += attn_bias # finally add attn bias for rpe
attn_out = self.self_attn(q, v, k, bias)
ff_out = self.glu(ff)
return self.output(attn_out, ff_out), {
"tokens_decoded": given_length.astype(jnp.uint32),
"k": k,
"v": v,
}
class ProjectionShard(hk.Module):
def __init__(self, config, name=None, embedding_shard=None):
super().__init__(name=name)
self.out_dim_unpadded = config["n_vocab"]
out_dim = self.out_dim_unpadded + config.get("n_vocab_padding", 0)
shards = config["cores_per_replica"]
norm = getnorm(config["norm"])
self.compat = config.get("compat", "j")
assert out_dim % shards == 0
self.dim = out_dim
self.dim_per_shard = out_dim // shards
self.norm = norm
if self.compat == "neo":
self.proj = embedding_shard.proj
else:
self.proj = TransposingLinear(config["d_model"], self.dim_per_shard)
def __call__(self, x):
x = self.norm(x)
proj = self.proj(x, transpose_weights=self.compat == "neo")
all_proj = jax.lax.all_gather(proj, 'shard')
return hk.Flatten()(jnp.transpose(all_proj, (1, 0, 2)))[:, :self.out_dim_unpadded]
def loss(self, x, targets, z_loss=1):
x = f_psum(x)
x = self.norm(x)
logits = self.proj(x, transpose_weights=self.compat == "neo")
shard_start_index = jax.lax.axis_index('shard') * self.dim_per_shard
vocab_mask = targets < self.out_dim_unpadded
logit_mask = jnp.arange(self.dim_per_shard) + shard_start_index < self.out_dim_unpadded
global_max = jax.lax.pmax(jax.lax.stop_gradient(logits.max(-1, keepdims=True, initial=-jnp.inf, where=logit_mask)), "shard")
logits -= jax.lax.stop_gradient(global_max)
gt_onehot = jax.nn.one_hot(targets - shard_start_index, self.dim_per_shard)
predicted_logits = jnp.sum(jnp.multiply(gt_onehot, logits), axis=-1)
predicted_logits = g_psum(predicted_logits)
exp_logits = jnp.exp(logits)
sum_exp_logits = exp_logits.sum(axis=-1)
sum_exp_logits = g_psum(sum_exp_logits)
loss = jnp.log(sum_exp_logits) - predicted_logits
loss += (1e-4 * jnp.square(jnp.log(sum_exp_logits)) * z_loss).sum() / vocab_mask.sum()
correct = (0.0 == predicted_logits)
return vocab_mask * loss, vocab_mask * correct
class Projection(hk.Module):
def __init__(self, config, name=None):
super().__init__(name=name)
out_dim = config["n_vocab"]
self.dim = out_dim
self.norm = hk.LayerNorm(-1, True, True)
self.proj = hk.Linear(self.dim)
def __call__(self, x):
x = self.norm(x)
return self.proj(x)
def loss(self, x, targets, z_loss=1):
x = self.norm(x)
logits = self.proj(x)
logits -= logits.max(-1, keepdims=True)
gt_onehot = jax.nn.one_hot(targets, self.dim)
predicted_logits = jnp.sum(jnp.multiply(gt_onehot, logits), axis=-1)
exp_logits = jnp.exp(logits)
sum_exp_logits = exp_logits.sum(axis=-1)
loss = jnp.log(sum_exp_logits) - predicted_logits
loss += (1e-4 * jnp.square(jnp.log(sum_exp_logits)) * z_loss).mean()
correct = (0.0 == predicted_logits)
return loss, correct
| 1.84375
| 2
|
app/tasksmodule/__init__.py
|
sanketsaurav/personfinder
| 0
|
12784889
|
<filename>app/tasksmodule/__init__.py
# TODO(nworden): rename this module to just "tasks" once tasks.py is emptied and
# deleted.
| 1.4375
| 1
|
fhir/resources/DSTU2/substance.py
|
cstoltze/fhir.resources
| 144
|
12784890
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/DSTU2/substance.html
Release: DSTU2
Version: 1.0.2
Revision: 7202
"""
from typing import List as ListType
from pydantic import Field
from . import domainresource, fhirtypes
from .backboneelement import BackboneElement
class Substance(domainresource.DomainResource):
"""A homogeneous material with a definite composition
A homogeneous material with a definite composition.
"""
resource_type = Field("Substance", const=True)
identifier: ListType[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="List of Unique identifier (represented as 'dict' in JSON)",
description="Unique identifier for the substance",
element_property=True,
)
category: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="category",
title="List of Type `CodeableConcept` (represented as `dict` in JSON).",
description="What class/type of substance this is",
element_property=True,
)
code: fhirtypes.CodeableConceptType = Field(
None,
alias="code",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="What substance this is",
element_property=True,
)
description: fhirtypes.String = Field(
None,
alias="description",
title="Type `String` (represented as `dict` in JSON)",
description="Textual description of the substance, comments",
element_property=True,
)
instance: ListType[fhirtypes.SubstanceInstanceType] = Field(
None,
alias="instance",
title="List of Type `SubstanceInstance` (represented as `dict` in JSON).",
description="If this describes a specific package/container of the substance",
element_property=True,
)
ingredient: ListType[fhirtypes.SubstanceIngredientType] = Field(
None,
alias="ingredient",
title="List of Type `SubstanceIngredient` (represented as `dict` in JSON).",
description="Composition information about the substance",
element_property=True,
)
class SubstanceInstance(BackboneElement):
"""If this describes a specific package/container of the substance
If this describes a specific package/container of the substance.
"""
resource_type = Field("SubstanceInstance", const=True)
identifier: fhirtypes.IdentifierType = Field(
None,
alias="identifier",
title="Identifier of the package/container",
description=(
"Identifier associated with the package/container"
" (usually a label affixed directly)"
),
element_property=True,
)
expiry: fhirtypes.DateTime = Field(
None,
alias="expiry",
title="When no longer valid to use",
description=(
"When the substance is no longer valid to use. "
"For some substances, a single arbitrary date is used for expiry."
),
element_property=True,
)
quantity: fhirtypes.QuantityType = Field(
None,
alias="quantity",
title=(
"Type `Quantity` referencing `SimpleQuantity` (represented as `dict` in "
"JSON)."
),
description="Amount of substance in the package",
element_property=True,
)
class SubstanceIngredient(BackboneElement):
"""Composition information about the substance
A substance can be composed of other substances.
"""
resource_type = Field("SubstanceIngredient", const=True)
quantity: fhirtypes.RatioType = Field(
None,
alias="quantity",
title="Type `Ratio` (represented as `dict` in JSON).",
description="Optional amount (concentration)",
element_property=True,
)
substance: fhirtypes.ReferenceType = Field(
None,
alias="substance",
title=(
"`Reference` items referencing `Substance` (represented as `dict` in"
" JSON)"
),
description="A component of the substance",
enum_reference_types=["Substance"],
element_property=True,
)
| 2.09375
| 2
|
hrs/utils/rest.py
|
coyotevz/hrs
| 0
|
12784891
|
<reponame>coyotevz/hrs<filename>hrs/utils/rest.py
# -*- coding: utf-8 -*-
"""
utils.rest
~~~~~~~~~~~~
Provides tools for building REST interfaces.
primary function built_result().
Limitations:
- Only work for simple queries agains one model.
Depends on:
- SQLAlchemy
- Flask
- Marshmallow
"""
import copy
from math import ceil
from collections import namedtuple
from sqlalchemy import and_
from sqlalchemy.orm import (
ColumnProperty, SynonymProperty, RelationshipProperty, object_mapper
)
from sqlalchemy.orm.util import class_mapper
from flask import request, current_app
from marshmallow.utils import is_collection
class Pagination(object):
def __init__(self, iterable, page, per_page):
iterable = list(iterable)
self.total = len(iterable)
offset = (page-1) * per_page
limit = min(offset+per_page, self.total)
self.items = iterable[offset:limit]
self.page = page
self.per_page = per_page
@property
def pages(self):
if self.per_page == 0:
pages = 0
else:
pages = int(ceil(self.total / float(self.per_page)))
return pages
OPERATORS = {
# General comparators
'eq': lambda f, a: f == a,
'neq': lambda f, a: f != a,
'gt': lambda f, a: f > a,
'gte': lambda f, a: f >= a,
'lt': lambda f, a: f < a,
'lte': lambda f, a: f <= a,
# String operators
'contains': lambda f, a: f.contains(a),
'icontains': lambda f, a: f.ilike('%'+a+'%'),
'endswith': lambda f, a: f.endswith(a),
'startswith': lambda f, a: f.startswith(a),
# List operators
'in': lambda f, a: f.in_(a),
'nin': lambda f, a: ~f.in_(a),
}
SORT_ORDER = {
# Sort order
'asc': lambda f: f.asc,
'desc': lambda f: f.desc,
}
_default_filter = 'eq'
#: Represents and "order by" in SQL query expression
OrderBy = namedtuple('OrderBy', 'field direction')
#: Represents a filter to apply to a SQL query
Filter = namedtuple('Filter', 'field operator argument')
def parse_filters(filters):
retval = []
for f in filters:
field, op, arg = (f.split(':') + [None, None])[:3]
if op is None:
# Malformed filter ignore
continue
if arg is None:
if op in OPERATORS:
# Argument missing ignore
continue
# Default operator 'eq'
arg = op
op = _default_filter
retval.append(Filter(field, op, arg))
return retval
def create_operation(model, fieldname, operator, argument, relation=None):
"""
Translate an operation described as string to a valid SQLAlchemy query
parameter using a field or relation of the model.
"""
opfunc = OPERATORS.get(operator)
field = getattr(model, relation or fieldname, None)
if opfunc and field:
return opfunc(field, argument)
def create_filters(filters, model):
"Returns a list of operations on `model`specified in the `filters` list."
retfilters = []
for f in filters:
fname = f.field
relation = None
if '.' in fname:
relation, fname = fname.split('.')
arg = create_operation(model, fname, f.operator, f.argument, relation)
retfilters.append(arg)
return list(filter(lambda x: x is not None, retfilters))
def apply_query_filters(query, model=None):
filters = request.args.getlist('filter')
if filters:
filters = parse_filters(filters)
if model is None:
# Retreive model from query, FIXME: only first model retrieved
model = query.column_descriptions[0]['type']
filters = create_filters(filters, model)
query = query.filter(and_(*filters))
return query
def select_and_omit(schema):
"Fills schema.only based on 'select' and 'omit' query string parameters"
select = set(','.join(request.args.getlist('select')).split(','))
omit = set(','.join(request.args.getlist('omit')).split(','))
if select or omit:
schema = copy.copy(schema)
if list(select)[0]:
schema.only = select.intersection(schema.fields.keys())
if list(omit)[0]:
only_set = set(schema.only or schema.fields.keys())
schema.only = only_set.difference(omit)
if schema.only:
# Always return id field
schema.only.add('id')
return schema
def paginate(query):
max_per_page = current_app.config.get('MAX_ITEMS_PER_PAGE', 100)
try:
page = int(request.args.get('page', 1))
per_page = min(int(request.args.get('per_page', 25)),
max_per_page)
except ValueError:
from flask import abort
abort(400, message='Invalid parameter type')
if hasattr(query, 'paginate') and callable(query.paginate):
return query.paginate(page, per_page)
else:
return Pagination(query, page, per_page)
def build_result(query, schema, model=None):
schema = select_and_omit(schema)
if is_collection(query):
query = apply_query_filters(query, model)
result = paginate(query)
return {
'num_results': result.total,
'page': result.page,
'num_pages': result.pages,
'objects': schema.dump(result.items, many=True).data,
}
else:
return schema.dump(query, many=False).data
| 2.390625
| 2
|
src/bitcaster/middleware/i18n.py
|
bitcaster-io/bitcaster
| 4
|
12784892
|
from django.utils import translation
from bitcaster.config import settings
from bitcaster.utils.language import get_attr
class UserLanguageMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if request.user and request.user.is_authenticated:
translation.activate(request.user.language)
response = self.get_response(request)
# FIXME: here user can be Application due TriggerKeyAuthentication
if get_attr(request, 'user.is_authenticated') and request.user.is_authenticated:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, request.user.language)
return response
| 2.046875
| 2
|
Experiment Processing/experiment2/count_picked_songs_per_playlist.py
|
Austaon/GroupRecommendationThesis
| 0
|
12784893
|
<reponame>Austaon/GroupRecommendationThesis<filename>Experiment Processing/experiment2/count_picked_songs_per_playlist.py<gh_stars>0
import statistics
import matplotlib.pyplot as plt
from database.session import Session
def parse_int(playlist_string):
return int(''.join(filter(str.isdigit, playlist_string)))
def count_picked_songs_per_playlist():
"""
Finds the number of tracks that were selected by a person that were recommended to a playlist.
Additionally, does a normalization on these numbers, as the chance of a person having more tracks goes down
when the group has more members.
The result is printed and plotted.
:return:
"""
number_of_used_tracks = {
"Probability Weighted Sum": [],
"Fairness": [],
"Least Misery": []
}
normalized_to_group_members = {
"Probability Weighted Sum": [],
"Fairness": [],
"Least Misery": []
}
for user, session in Session.get_users_with_surveys():
user_tracks = [track["id"] for track in user.tracks]
for playlist, playlist_string in user.get_playlists_from_survey():
playlist_index = parse_int(playlist_string)
recommended_playlist = session.recommendations[playlist_index - 1]["tracks"]
playlist_rule = playlist["rule_name"]
user_tracks_vector = [
1 if recommended_playlist[parse_int(song) - 1]["id"] in user_tracks else 0
for song, rating in playlist["like_rating_specific"].items()
]
number_of_used_tracks[playlist_rule].append(sum(user_tracks_vector))
normalized_to_group_members[playlist_rule].append(sum(user_tracks_vector) / session.get_number_of_users())
result = "Result:\n"
for playlist in number_of_used_tracks:
playlist_data = number_of_used_tracks[playlist]
result += f"{playlist}: {statistics.mean(playlist_data):.2f} (stdev: {statistics.stdev(playlist_data):.2f}), "
print(result[:-2])
result = "Normalised result:\n"
for playlist in normalized_to_group_members:
playlist_data = normalized_to_group_members[playlist]
result += f"{playlist}: {statistics.mean(playlist_data):.2f} (stdev: {statistics.stdev(playlist_data):.2f}), "
print(result[:-2])
labels = ["PWS", "F", "LM"]
boxplot_data = [
number_of_used_tracks["Probability Weighted Sum"],
number_of_used_tracks["Fairness"],
number_of_used_tracks["Least Misery"],
]
fig, ax = plt.subplots()
ax.boxplot(boxplot_data, labels=labels)
ax.set_xticklabels(labels)
fig.tight_layout()
plt.show()
boxplot_data = [
normalized_to_group_members["Probability Weighted Sum"],
normalized_to_group_members["Fairness"],
normalized_to_group_members["Least Misery"],
]
fig, ax = plt.subplots()
ax.boxplot(boxplot_data, labels=labels)
ax.set_xticklabels(labels)
fig.tight_layout()
plt.show()
| 3.359375
| 3
|
cmr_localization/scripts/generate_db_name.py
|
MarvinStuede/cmr_localization
| 0
|
12784894
|
#!/usr/bin/env python
import time
import sys
millis = int(round(time.time() * 1000))
sys.stdout.write("~/.ros/rtabmap_test_" + str(millis)+ '.db')
| 1.664063
| 2
|
strops/schemes/admin.py
|
ckoerber/strops
| 1
|
12784895
|
"""Admin pages for schemes models.
On default generates list view admins for all models
"""
from django.contrib.admin import StackedInline, register
from espressodb.base.admin import register_admins
from espressodb.base.admin import ListViewAdmin as LVA
from strops.schemes.models import (
ExpansionScheme,
ExpansionParameter,
ExpansionOrder,
OperatorRelation,
)
class ExpansionParameterInline(StackedInline):
model = ExpansionParameter
extra = 1
@register(ExpansionScheme)
class ExpansionSchemeAdmin(LVA):
inlines = (ExpansionParameterInline,)
class ExpansionOrderInline(StackedInline):
model = ExpansionOrder
extra = 1
register_admins(
"strops.schemes",
exclude_models=["ExpansionScheme", "OperatorRelation", "ExpansionOrder"],
)
@register(OperatorRelation)
class OperatorRelationAdmin(LVA):
inlines = (ExpansionOrderInline,)
autocomplete_fields = ("source", "target")
| 1.804688
| 2
|
setup.py
|
Sage-Bionetworks/synapsemonitor
| 4
|
12784896
|
"""Setup"""
import os
from setuptools import setup, find_packages
# figure out the version
# about = {}
# here = os.path.abspath(os.path.dirname(__file__))
# with open(os.path.join(here, "synapsemonitor", "__version__.py")) as f:
# exec(f.read(), about)
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='synapsemonitor',
# version=about["__version__"],
version="0.0.2",
description='Synapse monitoring',
url='https://github.com/Sage-Bionetworks/synapseMonitor',
author='<NAME>, <NAME>',
author_email='<EMAIL>',
long_description=long_description,
long_description_content_type="text/markdown",
license='Apache',
packages=find_packages(),
zip_safe=False,
python_requires='>=3.6, <3.9',
entry_points={'console_scripts': ['synapsemonitor = synapsemonitor.__main__:main']},
install_requires=['synapseclient', 'pandas'])
| 1.5625
| 2
|
pipeline_prepare_db.py
|
TobiasKoopmann/airankings
| 0
|
12784897
|
import urllib.request
import shutil
import gzip
import json
import re
import os
from collections import defaultdict
from scholarmetrics import hindex
from tqdm import tqdm
from app.dblp_parser import parse_dblp, parse_dblp_person, get_dblp_country
from app.myfunctions import get_dblp_url
URL = 'http://dblp.org/xml/'
basedir = os.path.abspath(os.path.dirname(__file__))
DATA_PATH = basedir + '/data/'
STATIC_PATH = basedir + '/app/static/'
def download_dblp() -> None:
"""
Downloads the DBLP dataset and saves it into the data_path, which is usually ./data.
:return:
"""
source_gz = URL + 'dblp.xml.gz'
source_dtd = URL + 'dblp.dtd'
target_gz = DATA_PATH + 'dblp.xml.gz'
target_dtd = DATA_PATH + 'dblp.dtd'
print(' Downloading file ' + source_gz)
with urllib.request.urlopen(source_gz) as response, open(target_gz, 'wb') as fh:
shutil.copyfileobj(response, fh)
print(' Downloading file ' + source_dtd)
with urllib.request.urlopen(source_dtd) as response, open(target_dtd, 'wb') as fh:
shutil.copyfileobj(response, fh)
print(' Download finish!')
print()
def unzip_dblp() -> None:
"""
Unzips the downloaded DBLP dataset.
:return:
"""
source = DATA_PATH + 'dblp.xml.gz'
target = DATA_PATH + 'dblp.xml'
with gzip.open(source, 'rb') as f_in:
with open(target, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
print()
def extract_publications():
"""
Parses the DBLP XML file to json, which can be used by this pipeline.
:return:
"""
source = DATA_PATH + 'dblp.xml'
target = DATA_PATH + 'dblp.json'
parse_dblp(source, target)
print()
def extract_ai_publications() -> list:
"""
Using the venue file (`./app/static/ai_venues.json`) to extract all publications from these respective venues.
:return:
"""
source = DATA_PATH + 'dblp.json'
source_venues = STATIC_PATH + 'ai_venues.json'
target_pubs = DATA_PATH + 'ai_dblp.json'
authors = set()
with open(source_venues, "r", encoding="utf-8") as f:
tmp = json.load(f)
# Create a dict for all instances
venues = dict(pair for d in tmp.values() for pair in d.items())
venues_set = set()
for k, v in venues.items():
venues_set.add(k)
venues_set.update(v)
def get_disambiguated_venue(venue_name: str):
if venue_name in venues:
return venue_name
else:
for k, v in venues.items():
if venue_name in v:
return k
print(' Parsing ' + source)
with open(target_pubs, "w", encoding="utf-8") as out_f:
with open(source, "r", encoding="utf-8") as in_f:
for line in tqdm(in_f):
line = json.loads(line)
if line['booktitle']:
curr_venue = line['booktitle'][0]
elif line['journal']:
curr_venue = line['journal'][0]
curr_venue = re.sub(" \([0-9]+\)$", "", curr_venue)
if curr_venue in venues_set:
line['venue'] = get_disambiguated_venue(curr_venue)
json.dump(line, out_f)
out_f.write("\n")
authors.update(line['author'])
print(' Parse finish! File ai_dblp.json created!')
print()
return list(authors)
def download_semantic_scholar_if_needed(semantic_scholar_path: str, default_count: int = 184, download: bool = False):
"""
Well, as the name says.
:param semantic_scholar_path:
:param default_count:
:param download:
:return:
"""
sem_url = "https://s3-us-west-2.amazonaws.com/ai2-s2-research-public/open-corpus/2020-04-10/"
if not os.path.exists(semantic_scholar_path):
os.mkdir(semantic_scholar_path)
download = True
if download:
print(" Downloading semantic scholar first. ")
with urllib.request.urlopen(sem_url + "manifest.txt") as response, open(semantic_scholar_path + "manifest.txt", 'wb') as fh:
shutil.copyfileobj(response, fh)
with open(semantic_scholar_path + "/manifest.txt", "r") as f:
for line in tqdm(f, total=default_count):
line = line.strip()
with urllib.request.urlopen(sem_url + line) as response, open(
semantic_scholar_path + line, 'wb') as fh:
shutil.copyfileobj(response, fh)
if "s2-corpus-" in line:
with gzip.open(semantic_scholar_path + line, 'rb') as f_in:
with open(semantic_scholar_path + line[:-3], 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(semantic_scholar_path + line)
def match_semantic_scholar(download: bool = False):
"""
Firstly, downloads the Semantic Scholar. Then tries to match all publications and extracts the citations.
:param download:
:return:
"""
source = DATA_PATH + 'ai_dblp.json'
target = DATA_PATH + 'ai_dataset.json'
source_persons = DATA_PATH + 'persons.json'
semantic_scholar_path = DATA_PATH + "semantic_scholar/"
download_semantic_scholar_if_needed(semantic_scholar_path, download=download)
def de_list(x, parse_int: bool = False):
if isinstance(x, list):
if parse_int:
return int(x[0])
return x[0]
if parse_int:
return int(x)
return x
def get_doi(line) -> str:
"""
Get doi for a given line of the data, useful for semantic_scholar matching"
"""
if "ee" in line:
for x in de_list(line["ee"]):
if "doi" in x:
return x.replace("https://doi.org/", "")
with open(source_persons, encoding="utf-8") as file:
persons = [json.loads(line) for line in file]
# Put all author names into set
authors = dict()
for person in persons:
if isinstance(person["author"], list):
for auth in person['author']:
authors[auth] = person['author'][0]
with open(source, "r", encoding="utf-8") as f:
pubs = f.readlines()
pubs = [json.loads(x) for x in pubs]
for pub in pubs:
tmp = pub['author']
for name in pub['author']:
if name in authors:
tmp.append(authors[name])
tmp.remove(name)
pub['author'] = tmp
removed_indices = set()
titles = defaultdict(list)
[titles[x['title'][0].strip(".").lower()].append(i) for i, x in enumerate(pubs)]
files = [file_path for file_path in os.listdir(semantic_scholar_path) if "s2-corpus-" in file_path]
counter = 1
with open(target, 'w', encoding="utf-8") as out_f:
for file_path in files:
print("Reading file ... (", str(counter), "/", str(len(files)), ")")
with open(semantic_scholar_path + file_path, 'r', encoding="utf-8") as in_f:
for line in in_f:
line = json.loads(line)
curr_title = de_list(line['title']).strip().lower()
if curr_title in titles:
index = None
for i in titles[curr_title]:
pub = pubs[i]
doi = get_doi(pub)
if doi and "doi" in line and line["doi"]:
if doi == line["doi"]:
index = i
break
elif "year" in line and de_list(pub["year"], True) == de_list(line["year"], True):
if line["venue"] == "ArXiv":
if pub["journal"] and de_list(pub["journal"]) == "CoRR":
index = i
break
elif pub["journal"] and de_list(pub["journal"]) == "CoRR":
continue
else:
index = i
break
if index and index not in removed_indices:
if 'in_citations' not in pub:
pub['inCitations'] = len(line['inCitations'])
json.dump(pub, out_f)
out_f.write("\n")
removed_indices.add(index)
counter += 1
for i, pub in enumerate(pubs):
if i not in removed_indices:
json.dump(pub, out_f)
out_f.write("\n")
print(' Parse finish! File ai_dataset.json created!')
print()
def extract_persons(author_list: list) -> None:
"""
Extracting all author information from DBLP, as affiliations etc.
:param author_list:
:return:
"""
source = DATA_PATH + 'dblp.xml'
target = DATA_PATH + 'persons'
print(' Parsing ' + source)
parse_dblp_person(source, target, author_list)
print(' Parse finish! File persons.json created!')
print()
def parse_countries() -> None:
"""
Parses country information from the DBLp into the file 'author_countries.json'.
:return: The file 'author_countries.json'
"""
source_country = STATIC_PATH + 'countries_domain.txt'
source_person = DATA_PATH + 'persons.json'
target = DATA_PATH + 'author_countries.json'
print(' Parsing ' + source_person)
countries = get_dblp_country(source_person, source_country)
with open(target, "w", encoding="utf-8") as f:
for line in countries:
json.dump(line, f)
f.write("\n")
print(' Parse finish! File author_countries.json created!')
print()
def pipeline_prepare_db() -> None:
"""
'*** Starting pipeline process to prepare PyCSRankings Database ***'
Careful, it will download the semantic scholar, which is up to 240 GB large.
:return: The files 'ai_dataset.json', 'persons.json' and 'author_countries.json' in the 'data' folder.
"""
print('**** Starting pipeline process to prepare PyCSRankings Database ****')
print()
print('Process 01 - Download DBLP data')
download_dblp()
print('Process 02 - Unzipping DBLP data')
unzip_dblp()
print('Process 03 - Create dblp.json')
extract_publications()
print('Process 04 - Create ai_article.json')
author_list = extract_ai_publications()
print('Process 05 - Create persons.json')
extract_persons(author_list)
print('Process 06 - Create author_countries.json')
parse_countries()
print('Process 07 - Match with Semantic Scholar')
# Be warned. This will download the semantic scholar dataset, which is rather large.
match_semantic_scholar()
print('*** Pipeline process to prepare PyCSRankings Database Finished! ***')
if __name__ == '__main__':
pipeline_prepare_db()
| 2.765625
| 3
|
codes/data/LQGT_dataset_3d.py
|
LCM1999/VolumeRescaling
| 4
|
12784898
|
import random
import numpy as np
import cv2
import torch
import torch.utils.data as data
import logging
from . import util
class LQGTDataset3D(data.Dataset):
'''
Read LQ (Low Quality, here is LR) and GT vti file pairs.
If only GT image is provided, generate LQ vti on-the-fly.
The pair is ensured by 'sorted' function, so please check the name convention.
'''
logger = logging.getLogger('base')
def __init__(self, opt):
super(LQGTDataset3D, self).__init__()
self.opt = opt
self.paths_GT = None, None
if opt['set_type'] == 'vtk':
self.paths_GT = util.get_vtk_paths(opt['dataroot_GT'])
# self.paths_LQ = util.get_vtk_paths(opt['dataroot_LQ'])
elif opt['set_type'] == 'tecplot':
self.paths_GT = util.get_tecplot_paths(opt['dataroot_GT'])
# self.paths_LQ = util.get_tecplot_paths(opt['dataroot_LQ'])
else:
ex = Exception("Type '%s' is not supported" % opt['type'])
raise ex
assert self.paths_GT, 'Error: GT path is empty.'
# if self.paths_LQ and self.paths_GT:
# assert len(self.paths_LQ) == len(
# self.paths_GT
# ), 'GT and LQ datasets have different number of images - {}, {}.'.format(
# len(self.paths_LQ), len(self.paths_GT))
self.random_scale_list = [1]
def __getitem__(self, index):
# cv2.setNumThreads(0)
scale = self.opt['scale']
GT_size = self.opt['GT_size']
attr_id = self.opt.get('attr_id', 0)
# get GT image
GT_path = self.paths_GT[index]
vti_GT_generator = util.getTensorGenerator(GT_path, self.opt['data_type'])
vti_GT, component_GT = vti_GT_generator.get_array_by_id(attr_id)
print('origin GT shape: {}'.format(vti_GT.shape))
if self.opt['phase'] != 'train':
vti_GT = util.modcrop_3d(vti_GT, scale)
# if self.paths_LQ:
# LQ_path = self.paths_LQ[index]
# vti_LQ_generator = util.getTensorGenerator(LQ_path)
# vti_LQ_generator.set_type(self.opt['type'])
# vti_LQ, component_LQ = vti_LQ_generator.get_array_by_id(attr_id)
# else:
# if self.opt['phase'] == 'train':
# # random_scale = random.choice(self.random_scale_list)
# # Z_s, Y_s, X_s = vti_GT.shape
#
# # def _mod(n, random_scale, scale, thres):
# # rlt = int(n * random_scale)
# # rlt = (rlt // scale) * scale
# # return thres if rlt < thres else rlt
#
# # Z_s = _mod(Z_s, random_scale, scale, GT_size)
# # Y_s = _mod(Y_s, random_scale, scale, GT_size)
# # X_s = _mod(X_s, random_scale, scale, GT_size)
# vti_GT = util.resize_3d(arr=np.copy(vti_GT), newsize=GT_size)
#
# # using matlab imresize3
# vti_LQ = util.imresize3_np(vti_GT, 1 / scale, True)
# if vti_LQ.ndim != 3:
# ex = Exception("Error: dims not right")
# raise ex
if self.opt['phase'] == 'train':
Z, Y, X = vti_GT.shape
if Z < GT_size or Y < GT_size or X < GT_size:
vti_GT = util.resize_3d(np.copy(vti_GT), newsize=GT_size)
elif Z > GT_size or Y > GT_size or X > GT_size:
vti_GT = util.modcrop_3d(vti_GT, scale)
# using matlab imresize3
# vti_LQ = util.imresize3_np(vti_GT, 1 / scale, True)
# if vti_LQ.ndim != 2:
# ex = Exception("Error: dims not right")
# raise ex
# Z, Y, X = vti_LQ.shape
# LQ_size = GT_size // scale
#
# # randomly crop
# rnd_Z = random.randint(0, max(0, Z - LQ_size))
# rnd_Y = random.randint(0, max(0, Y - LQ_size))
# rnd_X = random.randint(0, max(0, X - LQ_size))
# vti_LQ = vti_LQ[rnd_Z: rnd_Z + LQ_size, rnd_Y: rnd_Y + LQ_size, rnd_X: rnd_X + LQ_size]
# rnd_Z_GT, rnd_Y_GT, rnd_X_GT = int(rnd_Z * scale), int(rnd_Y * scale), int(rnd_X * scale)
# vti_GT = vti_GT[rnd_Z_GT: rnd_Z_GT + GT_size, rnd_Y_GT: rnd_Y_GT + GT_size, rnd_X_GT: rnd_X_GT + GT_size]
# ZYX to XYZ
vti_GT = torch.from_numpy(np.ascontiguousarray(vti_GT)).float().unsqueeze(0)
print("vti_GT size: {}".format(vti_GT.size()))
# vti_LQ = torch.from_numpy(np.ascontiguousarray(vti_LQ)).float().unsqueeze(0)
# if LQ_path is None:
# LQ_path = GT_path
return {'GT': vti_GT, 'GT_path': GT_path}
def __len__(self):
return len(self.paths_GT)
| 2.15625
| 2
|
rl/wrench_rl.py
|
yifan-you-37/ScaffoldLearning
| 5
|
12784899
|
import multiprocessing
import threading
import numpy as np
import os
import shutil
import matplotlib.pyplot as plt
import sys
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from shared_adam import SharedAdam
import math, os
import cv2
import torchvision.transforms as transforms
import imageio
os.environ["OMP_NUM_THREADS"] = "1"
device=torch.device("cuda")
np.set_printoptions(precision=4,suppress=True)
simulation_dir = '../simulation'
sys.path.insert(0, simulation_dir)
from Wrench_Manipulation_Env import RobotEnv
ExName = "Wrench_Manipulation"
sys.path.insert(0,'../external/bullet3.git/build_cmake/examples/pybullet')
import pybullet
def v_wrap(np_array,dtype=np.float32):
if np_array.dtype != dtype:
np_array = np_array.astype(dtype)
return torch.from_numpy(np_array).to(device)
def push_and_pull(opt, lnet, gnet, done, s_, bs, ba, br, bdone, gamma):
if done:
v_s_ = 0.
else:
v_s_ = lnet.forward(v_wrap(s_[None,:]))[-1].data.cpu().numpy()[0,0]
buffer_v_target = []
for r, termination in zip(br[::-1], bdone[::-1]):
if termination:
v_s_ = 0
v_s_ = r + gamma * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
loss = lnet.loss_func(
v_wrap(np.vstack(bs)),
v_wrap(np.vstack(ba)),
v_wrap(np.array(buffer_v_target)[:, None]))
opt.zero_grad()
loss.backward()
nn.utils.clip_grad_norm(lnet.parameters(),1.0)
for lp, gp in zip(lnet.parameters(), gnet.parameters()):
gp._grad = lp.grad
opt.step()
# pull global parameters
lnet.load_state_dict(gnet.state_dict())
MAX_EP = 15000
UPDATE_GLOBAL_ITER = 10
GAMMA = 0.9
ENTROPY_BETA = 0.001
def set_init(layers):
for layer in layers:
nn.init.normal_(layer.weight, mean=0., std=0.01)
nn.init.constant_(layer.bias, 0.)
class ACNet(nn.Module):
def __init__(self):
super(ACNet, self).__init__()
self.distribution = torch.distributions.Normal
self.block1 = nn.Sequential(
nn.Conv2d(in_channels=3,out_channels=32,kernel_size=(3,3),stride=(2,2),padding=(1,1),bias=True),
nn.ReLU(),
nn.BatchNorm2d(32),
)
# 60, 80
self.block2 = nn.Sequential(
nn.Conv2d(in_channels=32,out_channels=32,kernel_size=(3,3),stride=(2,2),padding=(1,1),bias=True),
nn.ReLU(),
nn.BatchNorm2d(32),
)
# 30, 40
self.block3 = nn.Sequential(
nn.Conv2d(in_channels=32,out_channels=64,kernel_size=(3,3),stride=(2,2),padding=(1,1),bias=True),
nn.ReLU(),
nn.BatchNorm2d(64),
)
# 15, 20
self.block4 = nn.Sequential(
nn.Conv2d(in_channels=64,out_channels=64,kernel_size=(3,3),stride=(2,2),padding=(1,1),bias=True),
nn.ReLU(),
nn.BatchNorm2d(64),
)
# 8, 10
self.block5 = nn.Sequential(
nn.Conv2d(in_channels=64,out_channels=128,kernel_size=(3,3),stride=(2,2),padding=(1,1),bias=True),
nn.ReLU(),
nn.BatchNorm2d(128),
)
# 4, 5
self.block6 = nn.Sequential(
nn.Conv2d(in_channels=128,out_channels=128,kernel_size=(3,3),stride=(2,2),padding=(1,1),bias=True),
nn.ReLU(),
nn.BatchNorm2d(128),
)
# 2, 3
self.fc_a = nn.Sequential(
nn.Linear(2 * 3 * 128, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 24),
nn.ReLU()
)
self.fc_s = nn.Sequential(
nn.Linear(2 * 3 * 128, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 24),
nn.ReLU()
)
self.fc_v = nn.Sequential(
nn.Linear(2 * 3 * 128, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 24),
nn.ReLU()
)
self.mu_layer = nn.Linear(24,6)
self.sigma_layer = nn.Linear(24,6)
self.v_layer = nn.Linear(24,1)
set_init([self.mu_layer, self.sigma_layer, self.v_layer])
def forward(self, im):
im = im.view(-1, 120, 160, 3)
im = im.permute(0,3,1,2)
im = self.block1(im)
im = self.block2(im)
im = self.block3(im)
im = self.block4(im)
im = self.block5(im)
im = self.block6(im)
im = im.reshape(-1, 2 * 3 * 128)
x_a = self.fc_a(im)
mu = self.mu_layer(x_a)
mu = F.tanh(mu)
x_s = self.fc_s(im)
sigma = self.sigma_layer(x_s)
sigma = F.softplus(sigma) * 0.06 + 0.005
x_v= self.fc_v(im)
values = self.v_layer(x_v)
return mu, sigma, values
def choose_action(self, s):
self.training = False
mu, sigma, _ = self.forward(s)
m = self.distribution(mu.view(-1,).data, sigma.view(-1,).data)
return m.sample().cpu().numpy(), mu.cpu().detach().numpy(), sigma.cpu().detach().numpy()
def loss_func(self, s, a, v_t):
self.train()
mu, sigma, values = self.forward(s)
td = v_t - values
c_loss = td.pow(2)
m = self.distribution(mu, sigma)
log_prob = m.log_prob(a)
entropy = 0.5 + 0.5 * math.log(2 * math.pi) + torch.log(m.scale)
exp_v = log_prob * td.detach() + ENTROPY_BETA * entropy
a_loss = -exp_v
total_loss = (a_loss + c_loss).mean()
return total_loss
class Worker(mp.Process):
def __init__(self, gnet, opt, global_ep, global_ep_r, res_queue, wid, SAVE_TOP_DIR):
super(Worker, self).__init__()
print("wid %d" % wid)
self.wid = wid
self.step = 0
self.g_ep, self.g_ep_r, self.res_queue = global_ep, global_ep_r, res_queue
self.gnet, self.opt = gnet, opt
self.random_seed = 42 + self.wid + int(np.log(self.wid * 100 + 1))
print("random_seed",self.random_seed,"self.wid",self.wid)
np.random.seed(self.random_seed)
self.lnet = ACNet().to(device)
self.init_step = 0
self.SAVE_TOP_DIR = SAVE_TOP_DIR
def run(self):
mean=np.array([0.485, 0.456, 0.406])
std=np.array([0.229, 0.224, 0.225])
mean = np.reshape(mean,(1,1,3))
std = np.reshape(std,(1,1,3))
self.start_pos = [-0.1,-0.4,0.5]
self.dt = 1./30.0
if self.wid == 0:
self.p_id = pybullet.connect(pybullet.GUI)
else:
self.p_id = pybullet.connect(pybullet.DIRECT)
action_dir = os.path.join(self.SAVE_TOP_DIR,"action.npy")
fixture_action = np.zeros((3,))
self.env = RobotEnv(worker_id=self.wid,p_id=pybullet,dt=self.dt,maxSteps=20,fixture_offset=fixture_action)
total_step = 1 + self.init_step
suc_check = 0
reward_check = 0
episode_check = 0
sigma_check1 = 0
sigma_check2 = 0
total_episode = 0
buffer_s, buffer_a, buffer_r, buffer_done = [], [], [], []
while total_step < MAX_EP:
observation = self.env.reset()
observation = observation/255.0
observation = (observation - mean)/std
observation = np.reshape(observation,(-1,))
while True:
action, mu_r, sigma_r = self.lnet.choose_action(v_wrap(observation[None,:]))
action[:3] = action[:3].clip(-0.03,0.03)
action[3:] = action[3:].clip(-0.05,0.05)
#
# if action[2] > 0.005:
#w action[2] = 0.005
observation_next, reward, done, suc = self.env.step(action)
observation_next = observation_next/255.0
observation_next = (observation_next - mean)/std
recordGif = False
if recordGif and total_step > 10:
imageio.mimsave('pokingSthSlightly.gif',self.env.obs_list)
return
observation_next = np.reshape(observation_next,(-1,))
buffer_s.append(observation)
buffer_r.append(reward)
buffer_a.append(action)
buffer_done.append(done)
if total_step % (UPDATE_GLOBAL_ITER + self.wid) == 0 or done:
push_and_pull(self.opt, self.lnet, self.gnet, done, observation_next, buffer_s, buffer_a, buffer_r, buffer_done, GAMMA)
buffer_s, buffer_a, buffer_r, buffer_done = [], [], [], []
if done:
suc_check += suc
episode_check += 1
total_episode += 1
observation = observation_next
total_step += 1
reward_check += reward
if total_step % 100 == 0:
current_performance = float(suc_check)/episode_check
avg_sigma1 = sigma_check1 / 100.0
avg_sigma2 = sigma_check2 / 100.0
if self.wid == 0:
print(self.SAVE_TOP_DIR,"total step %d, avg suc %f, avg reward %f" % (total_step, suc_check / 100.0, reward_check / 100.0))
save_path = os.path.join(self.SAVE_TOP_DIR,str(total_step)+'model.pth.tar')
if self.wid == 0 and int(total_step) % 1000 == 0:
print("saving to",save_path)
torch.save(self.gnet.state_dict(), save_path)
suc_check = 0
episode_check = 0
sigma_check1 = 0.0
sigma_check2 = 0.0
if done:
break
reward_dir = os.path.join(self.SAVE_TOP_DIR,"reward.txt")
np.savetxt(reward_dir,np.array([reward_check/100.0]),fmt='%f')
print("finising the learning!")
torch.cuda.empty_cache()
print("empyting the cache!")
sys.exit()
os._exit(1)
if __name__ == "__main__":
ExName = 'optimal'#sys.argv[1]
#print(ExName)
SAVE_TOP_DIR = os.path.join('./wrench/',ExName)
if not os.path.exists(SAVE_TOP_DIR):
os.makedirs(SAVE_TOP_DIR)
mp.set_start_method('spawn')
gnet = ACNet() # global network
## loading
Load_model_id = '2000'
Load_path = os.path.join(SAVE_TOP_DIR,Load_model_id + 'model.pth.tar')
#checkpoint = torch.load(Load_path)
#gnet.load_state_dict(checkpoint)
gnet.to(device)
gnet.share_memory()
opt = SharedAdam(gnet.parameters(),lr=0.0001)
global_ep, global_ep_r, res_queue = mp.Value('i',0), mp.Value('d',0.), mp.Queue()
workers = [Worker(gnet, opt, global_ep, global_ep_r, res_queue, i, SAVE_TOP_DIR) for i in range(1)]
[w.start() for w in workers]
res = []
for worker in workers:
worker.init_step = 0
[w.join() for w in workers]
| 2.15625
| 2
|
loginsystem.py
|
TaylorJonesTRT/Loginsystem
| 0
|
12784900
|
import json
import sys
import hashlib
user_details = {}
hasher = hashlib.sha256()
with open('user_credentials.json', 'r') as f:
user_details = json.load(f)
while True:
print("Welcome to the new and sophisticated login system!")
choice = input("What would you like to do? \n [1]LOGIN \n [2]REGISTER \n")
while choice == "1":
username_login = input("Pleaser enter your username: ")
if username_login in user_details.keys():
password_login = input(
"Please enter your password: ").encode('utf-8')
password_login = <PASSWORD>.update(password_login)
if username_login in user_details and user_details[username_login] == password_login:
print("You are now loggged in! Welcome back!")
print("Now exiting program")
sys.exit()
else:
print("Sorry, that was the wrong password. Try again.")
else:
print("Sorry, that username does not exist. Please try again")
while choice == "2":
print("Please choose a username, it can be your email address or a plain username")
username_registration = input("")
print("Please enter a password")
password_registration = input("").encode('utf-8')
user_details[username_registration] = hasher.update(
password_registration)
with open('user_credentials.json', 'w') as fp:
json.dump(user_details, fp, indent=4)
print("Thank you for registering! You may now sign in!")
break
decision = input(
"What would you like to do now? Continue or Exit?").lower()
if decision == "continue":
continue
else:
sys.exit()
| 3.625
| 4
|
991_cnn_godata_mobilenetv2_transfer_learning_with_data_augmentation_classifier.py
|
lao-tseu-is-alive/tensorflow2-tutorial
| 0
|
12784901
|
<reponame>lao-tseu-is-alive/tensorflow2-tutorial
import pathlib
import matplotlib.pyplot as plt
import numpy as np
import os
# next line is to limit tensorflow verbose output
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
# inspired by https://www.tensorflow.org/tutorials/images/transfer_learning
from my_tf_lib import images_classification as ic
CONST_MODEL_PATH = 'trained_models/tf2_model_cnn_godata_mobilenetv2_transfer_learning_with_data_augmentation_classifier'
CONST_CLASS_NAMES = ['correspondance', 'facturation', 'photo', 'plan', 'plan_projet', 'plan_situation']
CONST_IMAGE_SIZE = (160, 160)
CONST_BATCH_SIZE = 32
base_path = '/home/cgil/PycharmProjects/tensorflow2-tutorial/godata_resized'
data_dir = pathlib.Path(base_path)
image_count = len(list(data_dir.glob('*/*.jpeg')))
print('Total number of images : {}'.format(image_count))
ic.show_n_images_category_from_path(CONST_CLASS_NAMES, base_path)
print('# creating the tf.data.Dataset from disk')
train_dataset = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=CONST_IMAGE_SIZE,
batch_size=CONST_BATCH_SIZE)
validation_dataset = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=CONST_IMAGE_SIZE,
batch_size=CONST_BATCH_SIZE)
print("""
# You can find the class names in the class_names attribute on these datasets.
# These correspond to the directory names in alphabetical order.
""")
class_names = train_dataset.class_names
print(class_names)
CONST_CLASS_NAMES.sort()
print(CONST_CLASS_NAMES)
ic.show_n_images_from_dataset(train_dataset)
print('# Configure the dataset for performance')
AUTOTUNE = tf.data.AUTOTUNE
train_dataset = train_dataset.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
validation_dataset = validation_dataset.cache().prefetch(buffer_size=AUTOTUNE)
print("""
# When you don't have a large image dataset, it's a good practice to artificially introduce sample diversity
# by applying random, yet realistic, transformations to the training images, such as rotation and horizontal flipping.
# This helps expose the model to different aspects of the training data and reduce overfitting.
# You can learn more about data augmentation in this tutorial:
# https://www.tensorflow.org/tutorials/images/data_augmentation
""")
data_augmentation = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),
])
preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input
# Note: Alternatively, you could rescale pixel values from [0,255] to [-1, 1] using a Rescaling layer.
# normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1. / 127.5, offset=-1)
num_classes = len(class_names)
img_height, img_width = CONST_IMAGE_SIZE
# Create the base model from the pre-trained model MobileNet V2
IMG_SHAPE = CONST_IMAGE_SIZE + (3,)
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
image_batch, label_batch = next(iter(train_dataset))
feature_batch = base_model(image_batch)
print('# feature_batch.shape : {}'.format(feature_batch.shape))
base_model.trainable = False
# Let's take a look at the base model architecture
print('# base_model.summary :')
base_model.summary()
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
print('# feature_batch_average.shape : {}'.format(feature_batch_average.shape))
prediction_layer = tf.keras.layers.Dense(num_classes, activation='relu')
prediction_batch = prediction_layer(feature_batch_average)
print('# prediction_batch.shape : {}'.format(prediction_batch.shape))
print('# IMG_SHAPE is {} , should be (160, 160, 3)'.format(IMG_SHAPE))
inputs = tf.keras.Input(shape=(160, 160, 3))
x = data_augmentation(inputs)
x = preprocess_input(x)
x = base_model(x, training=False)
x = global_average_layer(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)
# model = Sequential([
# layers.experimental.preprocessing.Rescaling(1. / 255, input_shape=(img_height, img_width, 3)),
# layers.Conv2D(16, 3, padding='same', activation='relu'),
# layers.MaxPooling2D(),
# layers.Conv2D(32, 3, padding='same', activation='relu'),
# layers.MaxPooling2D(),
# layers.Conv2D(64, 3, padding='same', activation='relu'),
# layers.MaxPooling2D(),
# layers.Flatten(),
# layers.Dense(128, activation='relu'),
# layers.Dense(num_classes)
# ])
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
print('# model.summary : {}'.format(model.summary()))
print('# len(model.trainable_variables : {}'.format(len(model.trainable_variables)))
initial_epochs = 10
loss0, accuracy0 = model.evaluate(validation_dataset)
print("# initial loss: {:.2f}".format(loss0))
print("# initial accuracy: {:.2f}".format(accuracy0))
history = model.fit(
train_dataset,
validation_data=validation_dataset,
epochs=initial_epochs
)
# let's see how we are doing for the first round
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()), 1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0, 1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
print(" FINE TUNING :")
base_model.trainable = True
# Let's take a look to see how many layers are in the base model
print("Number of layers in the base model: ", len(base_model.layers))
# Fine-tune from this layer onwards
fine_tune_at = 100
# Freeze all the layers before the `fine_tune_at` layer
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.RMSprop(lr=base_learning_rate / 10),
metrics=['accuracy'])
print('# model.summary : {}'.format(model.summary()))
print('# len(model.trainable_variables : {}'.format(len(model.trainable_variables)))
fine_tune_epochs = 10
total_epochs = initial_epochs + fine_tune_epochs
history_fine = model.fit(train_dataset,
epochs=total_epochs,
initial_epoch=history.epoch[-1],
validation_data=validation_dataset)
acc += history_fine.history['accuracy']
val_acc += history_fine.history['val_accuracy']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.ylim([0.8, 1])
plt.plot([initial_epochs-1,initial_epochs-1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.ylim([0, 1.0])
plt.plot([initial_epochs-1,initial_epochs-1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
loss += history_fine.history['loss']
val_loss += history_fine.history['val_loss']
print('# SAVING THE MODEL FOR LATER USE')
print("### Now will save model to path : {}".format(CONST_MODEL_PATH))
tf.saved_model.save(model, CONST_MODEL_PATH)
| 2.9375
| 3
|
mowgli_etl/loader/_kg_edge_loader.py
|
tetherless-world/mowgli
| 4
|
12784902
|
<reponame>tetherless-world/mowgli<gh_stars>1-10
from abc import abstractmethod
from mowgli_etl._loader import _Loader
from mowgli_etl.model.kg_edge import KgEdge
class _KgEdgeLoader(_Loader):
@abstractmethod
def load_kg_edge(self, edge: KgEdge):
raise NotImplementedError
| 2.03125
| 2
|
jaxopt/_src/base.py
|
mblondel/jaxopt
| 0
|
12784903
|
<reponame>mblondel/jaxopt
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base definitions useful across the project."""
from typing import Any
from typing import NamedTuple
import jax
import jax.numpy as jnp
class OptimizeResults(NamedTuple):
error: float
nit: int
x: Any
class OptStep(NamedTuple):
params: Any
state: Any
@jax.tree_util.register_pytree_node_class
class LinearOperator(object):
def __init__(self, A):
self.A = jnp.array(A)
def shape(self):
return self.A.shape
def matvec(self, x):
"""Computes dot(A, x)."""
return jnp.dot(self.A, x)
def matvec_element(self, x, idx):
"""Computes dot(A, x)[idx]."""
return jnp.dot(self.A[idx], x)
def rmatvec(self, x):
"""Computes dot(A.T, x)."""
return jnp.dot(self.A.T, x)
def rmatvec_element(self, x, idx):
"""Computes dot(A.T, x)[idx]."""
return jnp.dot(self.A[:, idx], x)
def update_matvec(self, Ax, delta, idx):
"""Updates dot(A, x) when x[idx] += delta."""
if len(Ax.shape) == 1:
return Ax + delta * self.A[:, idx]
elif len(Ax.shape) == 2:
return Ax + jnp.outer(self.A[:, idx], delta)
else:
raise ValueError("Ax should be a vector or a matrix.")
def update_rmatvec(self, ATx, delta, idx):
"""Updates dot(A.T, x) when x[idx] += delta."""
if len(ATx.shape) == 1:
return ATx + delta * self.A[idx]
elif len(ATx.shape) == 2:
raise NotImplementedError
else:
raise ValueError("Ax should be a vector or a matrix.")
def column_l2_norms(self, squared=False):
ret = jnp.sum(self.A ** 2, axis=0)
if not squared:
ret = jnp.sqrt(ret)
return ret
def tree_flatten(self):
return (self.A,), None
@classmethod
def tree_unflatten(cls, aux_data, children):
del aux_data
return cls(*children)
| 2.421875
| 2
|
tests/oldtests/dm2_download_megatest.py
|
ctb/pygr
| 2
|
12784904
|
# test via
# python protest.py dm2_download_megatest.py
from pygr import nlmsa_utils
import pygr.Data
import os, tempfile, time
def rm_recursive(top):
'recursively remove top and everything in it!'
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(top)
#'http://biodb.bioinformatics.ucla.edu/PYGRDATA/dm2_multiz9way.txt.gz',
class NLMSADownload_Test(object):
'''try to save and build via download catalog auto-constructed from biodb site'''
def __init__(self,url='http://biodb.bioinformatics.ucla.edu/PYGRDATA/',
testDir = tempfile.gettempdir()):
self.url = url
import random
self.testDir = os.path.join(testDir,'test%d' % random.randint(1,99999))
self.pygrdatapath = ','.join([self.testDir,
'http://biodb2.bioinformatics.ucla.edu:5000'])
def setup(self):
'create pygr.Data entries for all NLMSAs on biodb/PYGRDATA site'
os.mkdir(self.testDir)
pygr.Data.update(self.pygrdatapath) # set our desired path
from pygr.apps.catalog_downloads import save_NLMSA_downloaders
save_NLMSA_downloaders(self.url)
## def setup(self):
## 'create pygr.Data entries for building the target NLMSA'
## os.mkdir(self.testDir)
## pygrData = get_pygr_data_path(self.pygrdatapath)
## source = pygrData.SourceURL(self.url)
## source.__doc__ = 'textdump of NLMSA to test'
## pygrData.Bio.MSA.UCSC.dm2_multiz9way.txt = source
## msaref = nlmsa_utils.NLMSABuilder(source)
## msaref.__doc__ = 'NLMSA builder to test'
## pygrData.Bio.MSA.UCSC.dm2_multiz9way = msaref
## pygrData.save()
def download_test(self):
'test building the NLMSA, and a simple query'
os.environ['PYGRDATADOWNLOAD'] = self.testDir
os.environ['PYGRDATABUILDDIR'] = self.testDir
t = time.time()
pygr.Data.Bio.MSA.UCSC.dm2_multiz9way() # build it!
t1 = time.time() - t # 1st build time
pygr.Data.clear_cache() # reload rsrc db
t = time.time()
msa = pygr.Data.Bio.MSA.UCSC.dm2_multiz9way() # already built
t2 = time.time() - t # 2nd request time
assert t2 < t1/3., 'second request took too long!'
chr4 = msa.seqDict['dm2.chr4']
result = msa[chr4[:10000]]
assert len(result) == 9
def teardown(self):
'clean up our temporary directory'
rm_recursive(self.testDir)
| 2.515625
| 3
|
StockAnalysisSystem/core/interface_util.py
|
lifg2000/StockAnalysisSystem
| 0
|
12784905
|
from .AnalyzerEntry import StrategyEntry, AnalysisResult
from StockAnalysisSystem.core.Utiltity.task_future import *
from StockAnalysisSystem.core.DataHubEntry import DataHubEntry
# For core.interface, without ui.
class SasAnalysisTask(TaskFuture):
def __init__(self, strategy_entry: StrategyEntry, data_hub: DataHubEntry,
securities: str or [str], analyzer_list: [str], time_serial: tuple, enable_from_cache: bool):
super(SasAnalysisTask, self).__init__('SasAnalysisTask')
self.__data_hub = data_hub
self.__strategy = strategy_entry
self.__securities = securities
self.__analyzer_list = analyzer_list
self.__time_serial = time_serial
self.__enable_from_cache = enable_from_cache
def run(self):
stock_list = self.select()
result_list = self.analysis(stock_list)
self.update_result(result_list)
def identity(self) -> str:
return 'SasAnalysisTask'
# -----------------------------------------------------------------------------
def select(self) -> [str]:
if self.__securities is None:
data_utility = self.__data_hub.get_data_utility()
stock_list = data_utility.get_stock_identities()
elif isinstance(self.__securities, str):
stock_list = [self.__securities]
elif isinstance(self.__securities, (list, tuple, set)):
stock_list = list(self.__securities)
else:
stock_list = []
return stock_list
def analysis(self, securities_list: [str]) -> [AnalysisResult]:
total_result = self.__strategy.analysis_advance(
securities_list, self.__analyzer_list, self.__time_serial,
self.get_progress_rage(), enable_from_cache=self.__enable_from_cache)
return total_result
| 2.234375
| 2
|
string_algorithms/utils.py
|
mhozza/string_algorithms
| 6
|
12784906
|
from math import floor, log2
from operator import itemgetter
def argmin(*args):
if len(args) == 1:
iterable = args[0]
else:
iterable = args
return min((j, i) for i, j in enumerate(iterable))[1]
def greatest_pow2(n):
return 2 ** floor(log2(n))
def inverse_index(a):
return {v: k for k, v in enumerate(a)}
def inverse_index_array(a):
ia = [None] * len(a)
for i, v in enumerate(a):
ia[v] = i
return ia
| 3.234375
| 3
|
test1.py
|
jeffhawk/pythontraining
| 0
|
12784907
|
<filename>test1.py
import sys
if sys.version_info[0] >= 3:
import PySimpleGUI as sg
else:
import PySimpleGUI27 as sg
def MachineLearningGUI():
sg.SetOptions(text_justification='right')
flags = [[sg.Checkbox('Normalize', size=(12, 1), default=True), sg.Checkbox('Verbose', size=(20, 1))],
[sg.Checkbox('Cluster', size=(12, 1)), sg.Checkbox('Flush Output', size=(20, 1), default=True)],
[sg.Checkbox('Write Results', size=(12, 1)), sg.Checkbox('Keep Intermediate Data', size=(20, 1))],
[sg.Checkbox('Normalize', size=(12, 1), default=True), sg.Checkbox('Verbose', size=(20, 1))],
[sg.Checkbox('Cluster', size=(12, 1)), sg.Checkbox('Flush Output', size=(20, 1), default=True)],
[sg.Checkbox('Write Results', size=(12, 1)), sg.Checkbox('Keep Intermediate Data', size=(20, 1))],]
loss_functions = [[sg.Radio('Cross-Entropy', 'loss', size=(12, 1)), sg.Radio('Logistic', 'loss', default=True, size=(12, 1))],
[sg.Radio('Hinge', 'loss', size=(12, 1)), sg.Radio('Huber', 'loss', size=(12, 1))],
[sg.Radio('Kullerback', 'loss', size=(12, 1)), sg.Radio('MAE(L1)', 'loss', size=(12, 1))],
[sg.Radio('MSE(L2)', 'loss', size=(12, 1)), sg.Radio('MB(L0)', 'loss', size=(12, 1))],]
command_line_parms = [[sg.Text('Passes', size=(8, 1)), sg.Spin(values=[i for i in range(1, 1000)], initial_value=20, size=(6, 1)),
sg.Text('Steps', size=(8, 1), pad=((7,3))), sg.Spin(values=[i for i in range(1, 1000)], initial_value=20, size=(6, 1))],
[sg.Text('ooa', size=(8, 1)), sg.In(default_text='6', size=(8, 1)), sg.Text('nn', size=(8, 1)),
sg.In(default_text='10', size=(10, 1))],
[sg.Text('q', size=(8, 1)), sg.In(default_text='ff', size=(8, 1)), sg.Text('ngram', size=(8, 1)),
sg.In(default_text='5', size=(10, 1))],
[sg.Text('l', size=(8, 1)), sg.In(default_text='0.4', size=(8, 1)), sg.Text('Layers', size=(8, 1)),
sg.Drop(values=('BatchNorm', 'other'), auto_size_text=True)],]
layout = [[sg.Frame('Command Line Parameteres', command_line_parms, title_color='green', font='Any 12')],
[sg.Frame('Flags', flags, font='Any 12', title_color='blue')],
[sg.Frame('Loss Functions', loss_functions, font='Any 12', title_color='red')],
[sg.Submit(), sg.Cancel()]]
window = sg.Window('Machine Learning Front End', font=("Helvetica", 12)).Layout(layout)
button, values = window.Read()
sg.SetOptions(text_justification='left')
print(button, values)
def CustomMeter():
# layout the form
layout = [[sg.Text('A custom progress meter')],
[sg.ProgressBar(1000, orientation='h', size=(20,20), key='progress')],
[sg.Cancel()]]
# create the form`
window = sg.Window('Custom Progress Meter').Layout(layout)
progress_bar = window.FindElement('progress')
# loop that would normally do something useful
for i in range(1000):
# check to see if the cancel button was clicked and exit loop if clicked
event, values = window.Read(timeout=0, timeout_key='timeout')
if event == 'Cancel' or event == None:
break
# update bar with loop value +1 so that bar eventually reaches the maximum
progress_bar.UpdateBar(i+1)
# done with loop... need to destroy the window as it's still open
window.CloseNonBlocking()
if __name__ == '__main__':
CustomMeter()
MachineLearningGUI()
| 2.5
| 2
|
setup.py
|
XertroV/nvblib
| 3
|
12784908
|
<gh_stars>1-10
from distutils.core import setup
setup(
name='nvblib',
version='0.0.1',
packages=['nvblib', 'nvblib.instructions'],
url='https://github.com/XertroV/nvblib',
license='MIT',
author='XertroV',
author_email='<EMAIL>',
description='core library for nvb objects'
)
| 1.179688
| 1
|
venv/lib/python2.7/site-packages/lettuce/plugins/scenario_names.py
|
GrupoMazoGuay/final
| 0
|
12784909
|
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERsteps.pyCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from lettuce import core
from lettuce.terrain import after
from lettuce.terrain import before
from lettuce.plugins.reporter import Reporter
class NameReporter(Reporter):
def print_scenario_running(self, scenario):
self.wrt('%s ... ' % scenario.name)
def print_scenario_ran(self, scenario):
if scenario.passed:
self.wrt("OK")
elif scenario.failed:
reason = self.scenarios_and_its_fails[scenario]
if isinstance(reason.exception, AssertionError):
self.wrt("FAILED")
else:
self.wrt("ERROR")
self.wrt("\n")
reporter = NameReporter()
before.each_scenario(reporter.print_scenario_running)
after.each_scenario(reporter.print_scenario_ran)
after.each_step(reporter.store_failed_step)
after.all(reporter.print_end)
def print_no_features_found(where):
where = core.fs.relpath(where)
if not where.startswith(os.sep):
where = '.%s%s' % (os.sep, where)
reporter.wrt('Oops!\n')
reporter.wrt('could not find features at %s\n' % where)
| 2.15625
| 2
|
dcp/003/solution.py
|
dantin/poj
| 0
|
12784910
|
<reponame>dantin/poj
# -*- coding: utf-8 -*-
class Node():
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution():
def deserialize(self, data):
def decode(vals):
val = next(vals)
if val == '#':
return None
node = Node(val)
node.left = decode(vals)
node.right = decode(vals)
return node
vals = iter(data.split())
return decode(vals)
def serialize(self, root):
vals = []
def encode(node):
if node:
vals.append(str(node.val))
encode(node.left)
encode(node.right)
else:
vals.append('#')
encode(root)
return ' '.join(vals)
if __name__ == '__main__':
node = Node('root', Node('left', Node('left.left')), Node('right'))
solver = Solution()
assert solver.deserialize(solver.serialize(node)).left.left.val == 'left.left'
| 3.15625
| 3
|
automation/read_config_file.py
|
ominocutherium/gamejam-skeleton-project
| 0
|
12784911
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021 ominocutherium
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Part of ominocutherium's godot gamejam skeleton project.
import os
class ConfigFileInfo:
docs_default_files = []
all_build_objects = []
additional_packages_to_build = []
default_build_info_object = None
__current_build_info_object = None
itch_user = ""
itch_game_name = ""
gut_test_dirs = []
git_primary_branch_name = ""
def __handle_docs_defaults_line(self,line:str):
self.docs_default_files.append(line.split()[1])
def __handle_export_exclude_line(self,line:str):
if self.__current_build_info_object != None:
self.__current_build_info_object.remove_globs.append(line[:-1].split(None,1)[1])
def __handle_export_include_line(self,line:str):
if self.__current_build_info_object != None:
self.__current_build_info_object.add_globs.append(line[:-1].split(None,1)[1])
def __handle_build_info_line(self,line:str):
data = line[:-1].split()
if data[3] == "assets":
self.__current_build_info_object = AssetPackBuildInfo()
if len(data) > 4:
self.__current_build_info_object.pack_name = data[4]
if len(data) > 5 and data[5] == "dlc":
self.__current_build_info_object.add_to_all_platform_packs = False
else:
self.__current_build_info_object = PlatformBuildInfo()
self.__current_build_info_object.platform_template_name = line[:-1].split(None,3)[3]
self.all_build_objects.append(self.__current_build_info_object)
self.__current_build_info_object.itch_channel_name = data[1]
self.__current_build_info_object.build_dir = data[2]
def __handle_itch_config_line(self,line:str):
data = line[:-1].split()
if len(data) > 2:
self.itch_user = data[1]
self.itch_game_name = data[2]
def __handle_test_dir_line(self,line:str):
self.gut_test_dirs.append(line[:-1].split()[1])
def __handle_git_primary_branch_name(self,line:str):
self.git_primary_branch_name = line[:-1].split()[1]
handlers_for_keys = {
"docs_defaults":__handle_docs_defaults_line,
"export_include":__handle_export_include_line,
"export_exclude":__handle_export_exclude_line,
"build_info":__handle_build_info_line,
"itch_config":__handle_itch_config_line,
"git_primary_branchname":__handle_git_primary_branch_name,
"test_dir":__handle_test_dir_line,
}
def __init__(self):
self.docs_default_files = []
self.additional_packages_to_build = []
self.__current_build_info_object = DefaultBuildInfo()
self.default_build_info_object = self.__current_build_info_object
self.all_build_objects = [self.__current_build_info_object]
self.gut_test_dirs = []
def read_config(self):
if os.path.exists(os.path.join('automation','config.txt')):
with open(os.path.join('automation','config.txt')) as config_file:
for line in config_file:
line_without_newline = line[:-1]
split_line = line_without_newline.split()
if len(split_line) > 1 and split_line[0] in self.handlers_for_keys:
self.handlers_for_keys[split_line[0]](self,line)
class BuildInfo():
# have all of the state but none of the behavior of build_game.BuildInfo
# in build_game, BuildInfo copies from this BuildInfo on initialization
build_dir = ""
build_type = ""
platform_template_name = "" # only for game exports, not asset packs
itch_channel_name = ""
files_included : list = []
add_globs : list = []
remove_globs : list = []
def __init__(self):
self.add_globs = []
self.remove_globs = []
self.files_included = []
class PlatformBuildInfo(BuildInfo):
build_type = "platform"
class DefaultBuildInfo(BuildInfo):
build_type = "default"
class AssetPackBuildInfo(BuildInfo):
# have all of the state but none of the behavior of build_asset_packs.AssetPackBuildInfo
# in build_asset_packs, AssetPackBuildInfo copies from this BuildInfo on initialization
build_type = "asset_pack"
pack_name = ""
add_to_all_platform_packs : bool = True
def read_config():
config_info = ConfigFileInfo()
config_info.read_config()
return config_info
| 1.742188
| 2
|
common/config_parser/config_dto.py
|
Softeq/PyCats
| 7
|
12784912
|
<filename>common/config_parser/config_dto.py
from dataclasses import dataclass
@dataclass
class APIValidationDTO:
check_status_code: bool
check_headers: bool
check_body: bool
check_is_field_missing: bool
@dataclass
class WebDriverSettingsDTO:
webdriver_folder: str
default_wait_time: int
implicit_wait_time: int
selenium_server_executable: str
chrome_driver_name: str
firefox_driver_name: str
browser: str
driver_path: str
stop_server: bool
chrome_options: list
@dataclass
class MobileDriverSettingsDTO:
appium_server_path: str
node_executable_path: str
default_wait_time: int
implicit_wait_time: int
platform: str
ios_udid: str
ipa_path: str
android_udid: str
android_package: str
android_activity: str
| 1.796875
| 2
|
apps/about/models.py
|
glenjasper/cobija-web
| 0
|
12784913
|
<reponame>glenjasper/cobija-web
from django.db import models
from auditlog.registry import auditlog
class About(models.Model):
title = models.CharField(max_length = 100, verbose_name = 'Title', help_text = "It's not used, it's only referential.")
description = models.TextField(verbose_name = 'Description', help_text = 'Description')
status = models.BooleanField(default = True, verbose_name = 'Status')
created = models.DateTimeField(auto_now_add = True, blank = True, null = True, verbose_name = 'Creation date')
updated = models.DateTimeField(auto_now = True, blank = True, null = True, verbose_name = 'Modification date')
class Meta:
db_table = "cobija_about"
verbose_name = "About"
verbose_name_plural = "About"
def __str__(self):
return self.title
auditlog.register(About)
| 2.125
| 2
|
mep/people/tests/test_people_commands.py
|
making-books-ren-today/test_eval_3_shxco
| 3
|
12784914
|
<reponame>making-books-ren-today/test_eval_3_shxco
import datetime
from io import StringIO
from django.test import TestCase
from mep.accounts.models import Event
from mep.people.management.commands import export_members
from mep.people.models import Person
class TestExportMembers(TestCase):
fixtures = ['sample_people']
def setUp(self):
self.cmd = export_members.Command()
self.cmd.stdout = StringIO()
def test_get_queryset(self):
# queryset should only include library members
member = Person.objects.get(pk=189) # francisque gay, member
author = Person.objects.get(pk=7152) # aeschylus, non-member
qs = self.cmd.get_queryset()
assert member in qs
assert author not in qs
def test_get_object_data(self):
# fetch some example people from fixture & call get_object_data
gay = Person.objects.get(name='<NAME>')
hemingway = Person.objects.get(name='<NAME>')
gay_data = self.cmd.get_object_data(gay)
hemingway_data = self.cmd.get_object_data(hemingway)
# check some basic data
assert gay_data['name'] == '<NAME>'
assert gay_data['gender'] == 'Male'
assert gay_data['birth_year'] == 1885
assert hemingway_data['sort_name'] == 'Hemingway, Ernest'
assert hemingway_data['death_year'] == 1961
assert 'title' not in hemingway_data # empty fields not present
# fixture has no events, so no years are set
assert hemingway_data['membership_years'] == []
# check nationalities
assert 'France' in gay_data['nationalities']
assert 'United States' in hemingway_data['nationalities']
# check viaf & wikipedia urls
assert hemingway_data['wikipedia_url'] == \
'https://en.wikipedia.org/wiki/Ernest_Hemingway'
assert gay_data['viaf_url'] == 'http://viaf.org/viaf/9857613'
# check addresses & coordinates
assert '3 Rue Garancière, Paris' in gay_data['addresses']
assert '48.85101, 2.33590' in gay_data['coordinates']
assert '75006' in gay_data['postal_codes']
assert 6 in gay_data['arrondissements']
assert gay_data['updated'] == gay.updated_at.isoformat()
assert hemingway_data['updated'] == hemingway.updated_at.isoformat()
# add events to check membership years
account = gay.account_set.first()
Event.objects.create(
account=account, start_date=datetime.date(1920, 5, 1),
end_date=datetime.date(1921, 2, 1))
Event.objects.create(
account=account, start_date=datetime.date(1935, 5, 1))
gay_data = self.cmd.get_object_data(gay)
assert gay_data['membership_years'] == [1920, 1921, 1935]
| 2.328125
| 2
|
server/swagger_server/test/test_notification_controller.py
|
kakwa/certascale
| 0
|
12784915
|
<reponame>kakwa/certascale<gh_stars>0
# coding: utf-8
from __future__ import absolute_import
from flask import json
from six import BytesIO
from swagger_server.models.default_error import DefaultError # noqa: E501
from swagger_server.models.default_message import DefaultMessage # noqa: E501
from swagger_server.models.notification import Notification # noqa: E501
from swagger_server.models.notification_list import NotificationList # noqa: E501
from swagger_server.models.notification_update import NotificationUpdate # noqa: E501
from swagger_server.test import BaseTestCase
class TestNotificationController(BaseTestCase):
"""NotificationController integration test stubs"""
def test_notification_acknowledge(self):
"""Test case for notification_acknowledge
"""
body = NotificationUpdate()
response = self.client.open(
'/api/v1.0/notification/{notificationId}'.format(notificationId='notificationId_example'),
method='PUT',
data=json.dumps(body),
content_type='application/json')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_notification_get(self):
"""Test case for notification_get
"""
response = self.client.open(
'/api/v1.0/notification/{notificationId}'.format(notificationId='notificationId_example'),
method='GET',
content_type='application/json')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_notification_list(self):
"""Test case for notification_list
"""
query_string = [('next_id', 56)]
response = self.client.open(
'/api/v1.0/notification',
method='GET',
content_type='application/json',
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
| 1.859375
| 2
|
codeforces/1111A/solution.py
|
yhmin84/codeforces
| 0
|
12784916
|
# https://codeforces.com/problemset/problem/1111/A
vowels = set(["a", "e", "i", "o", "u"])
def can_partial_transform(c1, c2):
if (c1 in vowels and c2 in vowels) or (c1 not in vowels and c2 not in vowels):
return True
return False
def can_transform(hero1, hero2):
hero1_len = len(hero1)
if hero1_len != len(hero2):
return False
for i in range(hero1_len):
if not can_partial_transform(hero1[i], hero2[i]):
return False
return True
if __name__ == "__main__":
hero1 = input()
hero2 = input()
result = can_transform(hero1, hero2)
if result:
print('Yes')
else:
print('No')
| 4.03125
| 4
|
test4.py
|
Ticlext-Altihaf/Absen-Kamera
| 0
|
12784917
|
<filename>test4.py
from deepface import DeepFace
DeepFace.stream()
| 1.039063
| 1
|
development/modules/auxiliary_career_decision_data.py
|
tobiasraabe/respy_for_ma
| 0
|
12784918
|
<filename>development/modules/auxiliary_career_decision_data.py
"""This module contains the auxiliary function for the data transformation."""
import numpy as np
import pandas as pd
from respy.python.shared.shared_constants import TEST_RESOURCES_DIR
def prepare_dataset():
"""Convert the raw dataset to a DataFrame that can be used with Respy."""
df = load_dataset()
df = minor_refactoring(df)
df = truncate_military_history(df)
df = add_state_variables(df)
write_out(df)
def load_dataset():
"""This function just prepares the original submission in a data frame."""
columns = ["Identifier", "Age", "Schooling", "Choice", "Wage"]
dtype = {
"Identifier": np.int,
"Age": np.int,
"Schooling": np.int,
"Choice": "category",
}
df = pd.DataFrame(
np.genfromtxt(str(TEST_RESOURCES_DIR / "KW_97.raw")), columns=columns
).astype(dtype)
df.set_index(["Identifier", "Age"], inplace=True, drop=False)
# I drop the information on the NLSY identifier and set the identifier to the count.
for count, idx in enumerate(df.index.levels[0]):
df.loc[(slice(idx, idx), slice(None)), "Identifier"] = count
df.set_index(["Identifier", "Age"], inplace=True, drop=False)
# This mapping is based on direct communication with <NAME>.
df["Choice"].cat.categories = ["Schooling", "Home", "White", "Blue", "Military"]
return df
def minor_refactoring(df):
"""This function performs some basic refactoring directly from existing
variables."""
df["Period"] = df["Age"] - 16
df["Choice"].cat.categories = [3, 4, 1, 2, -99]
df.rename(columns={"Schooling": "Years_Schooling"}, inplace=True)
return df
def truncate_military_history(df):
"""This function truncates in individual's history once assigned to the military."""
def _delete_military_service(agent):
"""This function deletes all observations going forward if an individual enrolls
in the military."""
for index, row in agent.iterrows():
identifier, period = index
if row["Choice"] == -99:
return agent.loc[(slice(None, None), slice(None, period - 1)), :]
return agent
# pandas 23 does not allow for two index levels with the same name.
# this was introduced on purpose, but currently breaks many sensible
# groupby operations. This will probably be fixed in future versions
# but here we introduce a workaround:
# (the workaround should also work in all earlier and later versions.)
df["id"] = df["Identifier"]
df = df.groupby("id").apply(_delete_military_service)
df.set_index(["Identifier", "Age"], inplace=True, drop=False)
df.drop("id", axis=1, inplace=True)
return df
def add_state_variables(df):
"""This function adds all additional state variables."""
def _add_state_variables(agent):
"""This function iterates through an agent record and constructs the state
variables for each point in tim.
"""
exp_a, exp_b = 0, 0
# We simply assume that individuals who do not have the expected number of years
# of education did spend the last year at home.
if agent.loc[(slice(None), slice(16, 16)), "Years_Schooling"].to_numpy() < 10:
lagged_choice = 4
else:
lagged_choice = 3
for index, row in agent.iterrows():
identifier, period = index
agent["Lagged_Choice"].loc[:, period] = lagged_choice
agent["Experience_A"].loc[:, period] = exp_a
agent["Experience_B"].loc[:, period] = exp_b
# Update labor market experience
if row["Choice"] == 1:
exp_a += 1
elif row["Choice"] == 2:
exp_b += 1
else:
pass
# Update lagged activity:
# (0) Home, (1) Education, (2) Occupation A, and (3) Occupation B.
lagged_choice = row["Choice"]
return agent
df["Lagged_Choice"] = np.nan
df["Experience_A"] = np.nan
df["Experience_B"] = np.nan
# pandas 23 does not allow for two index levels with the same name.
# this was introduced on purpose, but currently breaks many sensible
# groupby operations. This will probably be fixed in future versions
# but here we introduce a workaround:
# (the workaround should also work in all earlier and later versions.)
df["id"] = df["Identifier"]
df = df.groupby("id").apply(_add_state_variables)
df.drop("id", axis=1, inplace=True)
return df
def write_out(df):
labels = [
"Identifier",
"Period",
"Choice",
"Wage",
"Experience_A",
"Experience_B",
"Years_Schooling",
"Lagged_Choice",
]
formats = {label: np.int for label in labels}
formats["Wage"] = np.float
"""This function writes out the relevant information to a simple text file."""
df = df[labels].astype(formats)
with open("career_data.respy.dat", "w") as file_:
df.to_string(file_, index=False, header=True, na_rep=".")
df.set_index(["Identifier", "Period"], drop=False, inplace=True)
df.to_pickle("career_data.respy.pkl")
| 3.09375
| 3
|
corefacility/authorizations/google/entity/authorization_token/authorization_token.py
|
serik1987/corefacility
| 0
|
12784919
|
<reponame>serik1987/corefacility
from core.entity.external_authorization_token import ExternalAuthorizationToken
from core.entity.entity_fields import ReadOnlyField, ManagedEntityField
from .authorization_token_set import AuthorizationTokenSet
from .model_provider import ModelProvider
from .mock_provider import MockProvider
from .refresh_token_manager import RefreshTokenManager
class AuthorizationToken(ExternalAuthorizationToken):
"""
Defines the Google external authorization token and the ways
how the token can be received, stored or retrieved
Let's mention all public fields and how the ExternalAuthorizationToken works:
* code - when you gave the authorization code from the user you need to fill in this field
by putting the code to this field, like here: token = AuthorizationToken(code=your_authorization_code)
When you call token.create() the entity will make the first request to Google to exchange the authorization
code to the authorization token. Next, it make the second request to Google to receive the user's account name
using the authorization token. Third, the entity will query the Account entity to find the user with
particular account name. And at last the entity will save all the data received from Google and the Account
entity to the database. When the entity is loaded from the local database the value of this field is None which
means that you are no longer need the authorization code. The field is required when you create() an entity
but the field is not required when you update() the entity.
* access_token - this is a read-only field. Before creating an instance the field value is None while
after its create the field value is a particular access token.
* expires_in - just informative field that allows you to check whether the access token is expired
* refresh_token - the token is required to refresh the access token when this is expired. To refresh the
access_token just use the following: token.refresh_token.refresh(). This will request the Google for new
access token and updated the newly created access token to the database.
* authentication - the authentication attached to the token. In order to look the user this token belongs to
use: token.authentication.user.
"""
_entity_set_class = AuthorizationTokenSet
_entity_provider_list = [MockProvider(), ModelProvider()]
_intermediate_field_description = {
"access_token": ReadOnlyField(description="Access token"),
"expires_in": ReadOnlyField(description="Access token expiration time"),
"refresh_token": ManagedEntityField(RefreshTokenManager, description="Refresh token")
}
| 2.921875
| 3
|
fabric_bolt/projects/migrations/0003_auto_20150911_1911.py
|
jooni22/fabric-bolt
| 219
|
12784920
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('hosts', '0002_sshconfig'),
('projects', '0002_auto_20140912_1509'),
]
operations = [
migrations.AddField(
model_name='configuration',
name='value_ssh_key',
field=models.ForeignKey(verbose_name=b'Value', blank=True, to='hosts.SSHConfig', null=True),
),
migrations.AlterField(
model_name='configuration',
name='data_type',
field=models.CharField(default=b'string', max_length=10, null=True, blank=True, choices=[(b'boolean', b'Boolean'), (b'number', b'Number'), (b'string', b'String'), (b'ssk_key', b'SSH Key')]),
),
]
| 1.71875
| 2
|
utils/download_data.py
|
ialifinaritra/Music_Generation
| 0
|
12784921
|
import urllib.request
DIR_PATH = '../data/'
def download_music(output_path):
midiFile_l = ['cs1-2all.mid', 'cs5-1pre.mid', 'cs4-1pre.mid', 'cs3-5bou.mid', 'cs1-4sar.mid', 'cs2-5men.mid',
'cs2-3cou.mid', 'cs1-6gig.mid', 'cs6-4sar.mid', 'cs4-5bou.mid', 'cs4-3cou.mid', 'cs5-3cou.mid',
'cs6-5gav.mid', 'cs6-6gig.mid', 'cs6-2all.mid', 'cs2-1pre.mid', 'cs3-1pre.mid', 'cs3-6gig.mid',
'cs2-6gig.mid', 'cs2-4sar.mid', 'cs3-4sar.mid', 'cs1-5men.mid', 'cs1-3cou.mid', 'cs6-1pre.mid',
'cs2-2all.mid', 'cs3-2all.mid', 'cs1-1pre.mid', 'cs5-2all.mid', 'cs4-2all.mid', 'cs5-5gav.mid',
'cs4-6gig.mid', 'cs5-6gig.mid', 'cs5-4sar.mid', 'cs4-4sar.mid', 'cs6-3cou.mid', 'cs3-3cou.mid']
for midiFile in midiFile_l:
urllib.request.urlretrieve("http://www.jsbach.net/midi/" + midiFile, output_path + midiFile)
if __name__ == "__main__":
download_music(DIR_PATH)
| 2.859375
| 3
|
src/stock_data_provider/cn_a/__init__.py
|
stonewell/learn-curve
| 0
|
12784922
|
#load stock data from vip data for cn_a
from .vip_dataset import load_stock_data
| 1
| 1
|
ppgan/metric/metric_util.py
|
ZMpursue/PaddleGAN
| 4
|
12784923
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def reorder_image(img, input_order='HWC'):
"""Reorder images to 'HWC' order.
If the input_order is (h, w), return (h, w, 1);
If the input_order is (c, h, w), return (h, w, c);
If the input_order is (h, w, c), return as it is.
Args:
img (ndarray): Input image.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
If the input image shape is (h, w), input_order will not have
effects. Default: 'HWC'.
Returns:
ndarray: reordered image.
"""
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
"'HWC' and 'CHW'")
if len(img.shape) == 2:
img = img[..., None]
return img
if input_order == 'CHW':
img = img.transpose(1, 2, 0)
return img
def bgr2ycbcr(img, y_only=False):
"""Convert a BGR image to YCbCr image.
The bgr version of rgb2ycbcr.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
y_only (bool): Whether to only return Y channel. Default: False.
Returns:
ndarray: The converted YCbCr image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
#img = _convert_input_type_range(img)
if y_only:
out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0
else:
out_img = np.matmul(
img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) + [16, 128, 128]
#out_img = _convert_output_type_range(out_img, img_type)
return out_img
def to_y_channel(img):
"""Change to Y channel of YCbCr.
Args:
img (ndarray): Images with range [0, 255].
Returns:
(ndarray): Images with range [0, 255] (float type) without round.
"""
img = img.astype(np.float32) / 255.
if img.ndim == 3 and img.shape[2] == 3:
img = bgr2ycbcr(img, y_only=True)
img = img[..., None]
return img * 255.
| 2.5
| 2
|
run_gps_master.py
|
MarieNyst/GPS
| 4
|
12784924
|
import time
import random
import os
from GPS import gps
from GPS import redisHelper
from GPS import helper
from GPS import args
from GPS import postProcess
# Parse the command line arguments, then, if provided, parse the arguments in
# the scenario file. Then adds default values for paramaters without definitions
# Finally, validates all argument definitions, checks that needed files and
# directories exist, and then checks to make sure that all required arguements
# received definitions.
argument_parser = args.ArgumentParser()
arguments, skipped_lines = argument_parser.parse_arguments()
# Everything GPS does should be done from within the experiment directory
# (which defaults to the current working directory)
with helper.cd(arguments['experiment_dir']):
# Connect to the redis database
R = redisHelper.connect(host=arguments['redis_host'],
port=arguments['redis_port'],
dbid=arguments['redis_dbid'])
# Clear all old state from the current database
redisHelper.deleteDB(R)
# Create a random ID for this GPS run
gpsID = helper.generateID()
# Make the output directory if it does not already exist
helper.mkdir(arguments['output_dir'])
# Now create a directory inside of that one for the files from
# this particular GPS run. If this directory already exists, rename it to
# something else
output_dir = '{}/gps-run-{}'.format(arguments['output_dir'], gpsID)
arguments['output_dir'] = output_dir
moved = False
if helper.isDir(output_dir):
random_id = helper.generateID()
os.system('mv {output_dir} {output_dir}-{random_id}'
''.format(output_dir=output_dir, random_id=random_id))
moved = True
helper.mkdir(output_dir)
# Get a logger
logger = gps.getLogger('{}/gps.log'.format(output_dir), arguments['verbose'])
# Announce the start of the run
logger.info('Starting new GPS run with GPS ID {}'.format(gpsID))
# And record a warning, if needed.
if moved:
logger.warning('Moved old GPS log files to directory {}-{}'
''.format(output_dir, random_id))
# Issue a warning about skipped lines in the scenario file
if len(skipped_lines) > 0:
for line in skipped_lines:
logger.warning("GPS skipped the following unrecognized line '{}' "
"in the scenario file".format(line))
# Update the random seed, if needed
if arguments['seed'] <= 0:
arguments['seed'] = random.randrange(0,999999)
# Create a new scenario file in the log location with all of GPS's parameters
# instantiated to their final values. The workers will use this file to set up,
# and it is useful to have for later for debugging purposes as well.
scenario_file = os.path.abspath(os.path.expanduser(os.path.expandvars('{}/scenario.txt'.format(output_dir))))
argument_parser.create_scenario_file(scenario_file, arguments)
R.set('scenarioFile:' + str(gpsID), scenario_file)
R.set('readyCount:' + str(gpsID), 0)
# Signal to the workers that the master is ready.
R.set('gpsID', gpsID)
try:
#Wait until all of the slaves are ready
ready = False
logger.info('Waiting until all workers are ready...')
oldReadyCount = -1
while(not ready):
time.sleep(1)
readyCount = redisHelper.getReadyCount(gpsID,R)
if(readyCount != oldReadyCount):
logger.info("There are {} out of a minimum of {} workers ready..."
"".format(readyCount, arguments['minimum_workers']))
oldReadyCount = readyCount
ready = readyCount >= arguments['minimum_workers']
readyCount = redisHelper.getReadyCount(gpsID,R)
logger.info("There are {} out of a minimum of {} workers ready..."
"".format(readyCount, arguments['minimum_workers']))
logger.info("GPS Master process is starting.")
pbest, decisionSeq, incumbentTrace, cpuTime, wallTime = gps.gps(arguments, gpsID)
end_master_time = time.time()
R.set('incumbent:' + str(gpsID),pbest)
finally:
R.set('cancel:' + str(gpsID),'True')
if arguments['post_process_incumbent']:
logger.info('Beginning GPS post-processing of configuration runs to select as the incumbent the '
'configuration that has the best performance on the largest number of instances. This '
'should only take a few seconds and helps protect against mistakes made by GPS due to '
'parameter interactions.')
# Create a new post-processing selector
selector = postProcess.Selector(
min_instances=arguments['post_process_min_runs'],
alpha=arguments['post_process_alpha'],
n_permutations=arguments['post_process_n_permutations'],
multiple_test_correction=arguments['post_process_multiple_test_correction'],
logger=logger)
# Add the data from the current scenario
logger.info(arguments['output_dir'])
selector.add_scenarios(arguments['output_dir'])
# And select the best configuration
incumbent, num_runs, estimated_runtime = selector.extract_best()
logger.info("The final incumbent after post-processing all of the configuration runs was evaluated "
" on {0} unique instances and has an estimated running time of {1:.2f} seconds."
"".format(num_runs, estimated_runtime))
logger.info("Final Incumbent: {}".format(incumbent))
if gps.getParamString(pbest) != incumbent:
incumbent_logger = gps.getLogger(arguments['output_dir'] + '/traj.csv', verbose=1, console=False,
format_='%(message)s', logger_name='incumbent_logger_post_process')
incumbent_logger.info('{cpu_time},{train_perf},{wall_time},{inc_id},{ac_time},{config}'
''.format(cpu_time=cpuTime,
train_perf=estimated_runtime,
wall_time=wallTime + time.time() - end_master_time,
inc_id=-1,
ac_time=-1,
config=incumbent.replace(' -',',').replace(' ','=')[1:]))
| 2.734375
| 3
|
main.py
|
Deepkk-9/phdc-discord-bot
| 0
|
12784925
|
<reponame>Deepkk-9/phdc-discord-bot
#Imports
import discord
import os
import requests
import json
import random
import urllib
from replit import db
from keep_alive import keep_alive
#Variables
client = discord.Client()
bad_words = [
"Fuck", "fuck", "Fuck You", "Shit", "Piss off", "Fuck off", "Dick head",
"Asshole", "Son of a bitch", "Bastard", "Bitch", "Wanker"
]
sad_words = [
"error", "build failed", "not working", "bug", "failed", "err", "buggy"
]
warning = [
">>> ⚠ Please avoid using Swear Words it is against our server policy!",
">>> ⚠ Use of Swear Words are against our server policy!",
">>> ⚠ Bullying someone using Swear Words are against our server policy!"
]
solution = [
">>> 🤔 I think you should find something on stackoverflow !\n💡 Tip: Sharing your project link is also helpful"
]
projects = [
">>> **Live Projects** \n1. Discord Bot\n2. PDC Application\n3. PDC Website\n4. Hacktober Practice\n5. Hacktober Website\n6. API"
]
core_team_1 = [
">>> **Core Team** \n1. Lead: Random Name\n2. Co Lead: Random Name\n3. Web Lead: Random Name"
]
help_data = [
">>> **Help Commands** \n\nThese are the available commands:\n\n1. `!pdc help` - Dailogue of all commands\n2. `!pdc info` - Gives info of bot\n3. `!pdc about` - Returns server information\n4. `!pdc discord` - Provides invitation link for the discord server\n5. `!pdc github` - Provides link to the github organisation\n6. `!pdc core team` - Returns current Core Member\n7. `!pdc projects` - Returns active projects\n8. `!pdc quote`s - Returns random quote\n9. `!pdc events` - Returns upcoming events\n10. `!pdc new-event` - Add new event\n11. `!pdc delete-event` - Delete an event\n12. `!pdc list-events` - List all events\n13. `!pdc event-syntax` - List all syntax for events command\n14. `!pdc meme` - Returns random meme\n15. `!pdc joke` - Returns random joke\n\n _Our bot is Open Source_"
]
event_syntax = [
"`!php new-event | <event-title> | <event_time>`\n`!php delete index_value`"
]
#Setting up function for Quotes
def get_quote():
response = requests.get(
"https://zenquotes.io/api/random") #API uses Random Quotes
json_data = json.loads(response.text)
quote = json_data[0]['q'] + " - " + json_data[0]['a']
return (quote)
#Setting up funcyion for adding an
def new_event(event_title, event_date, event_time):
new_event = event_title, event_date, event_time
if "events" in db.keys():
events = db["events"]
events.append(new_event)
db["events"] = events
else:
db["events"] = [(new_event)]
def remove_event(index):
events = db["events"]
if len(events) > index:
del events[index]
db["events"] = events
#Function to return random meme images URL
def random_meme():
url = "https://some-random-api.ml/meme"
response = urllib.request.urlopen(url)
data = json.loads(response.read())
path = data["image"]
return path
#Function to return random jokes
def random_joke():
url = "https://some-random-api.ml/joke"
response = urllib.request.urlopen(url)
data = json.loads(response.read())
joke = data["joke"]
return joke
#Creating Login message
@client.event
async def on_ready():
print('Bot is now live as {0.user}'.format(client) +
(' at PHP-DC Discord Server'))
@client.event
async def on_message(message):
#Variables Ease
msg = message.content
#Condition for self texting
if message.author == client.user:
return
#Condition help
if msg.startswith('!pdc help'):
await message.channel.send(''.join(help_data))
#Condition info
if msg.startswith('!pdc info'):
await message.channel.send('>>> PDC Bot v1.0.0')
#Condition about
if msg.startswith('!pdc about'):
await message.channel.send(
'>>> **About** \nPDC is an university based community group for students interested in computer technology. \nStudents from any undergraduate or graduate programs with an interest in growing as a developer can join. \nWe aim in growing knowledge in a peer-to-peer learning environment and build solutions for local businesses and community.'
)
#Condition discord
if msg.startswith('!pdc discord'):
await message.channel.send('https://discord.gg/Gbanp7fYCZ')
#Condition github
if msg.startswith('!pdc github'):
await message.channel.send('https://github.com/PH-DC')
#Condition core team
if msg.startswith('!pdc core team'):
await message.channel.send(''.join(core_team_1))
#Condition projects
if msg.startswith('!pdc projects'):
await message.channel.send(''.join(projects))
#Condition requesting Quotes
if msg.startswith('!pdc quote'):
quote = get_quote()
await message.channel.send('>>> ' + '_' + quote + '_')
#Condition for using bad words
if any(word in msg for word in bad_words):
await message.channel.send(random.choice(warning))
#Condition for using sad words
if any(word in msg for word in sad_words):
await message.channel.send(''.join(solution))
#Condition for adding an event
if msg.startswith("!pdc new-event"):
msg_array = msg.split("|")
event_title = msg_array[1]
event_date = msg_array[2]
event_time = msg_array[3]
new_event(event_title, event_date, event_time)
await message.channel.send(">>> New event added!")
#Condition to view all the events currently in the database
if msg.startswith("!pdc list-events"):
events = db["events"].value
for event_title, event_date, event_time in events:
await message.channel.send(" {} | {} | {} ".format(event_title, event_date, event_time))
#Condition for deleting events
if msg.startswith("!pdc delete event"):
index = int(msg.split("!pdc delete event",1)[1])
remove_event(index)
await message.channel.send(">>> Event Deleted")
#Condition to view all event related syntax
if msg.startswith("!pdc event-syntax"):
await message.channel.send('>>> '.join(event_syntax))
#Condition to return random meme
if msg.startswith('!pdc meme'):
meme = random_meme()
await message.channel.send(meme)
#Condition to return random jokes
if msg.startswith('!pdc joke'):
joke = random_joke()
await message.channel.send(">>> " + joke)
#Keep Alive
keep_alive()
client.run(os.getenv('botTOKEN'))
| 2.734375
| 3
|
tests/test_helper.py
|
JimBoonie/hydra
| 28
|
12784926
|
<filename>tests/test_helper.py
"""
Helper methods for unit testing.
"""
REPEAT = 100
| 1.15625
| 1
|
server/demon/matching.py
|
jphacks/TK_1814
| 1
|
12784927
|
import sys
sys.path.append('/home/kenta/pinky')
from itertools import groupby
import time
from database import session
from model import Motion, Promise
def run_loop():
while True:
filepath = '/home/kenta/pinky/demon/test.log'
log_file = open(filepath,'a')
matching()
try:
pass
# log_file.write(time.ctime()+"\n")
finally:
log_file.close()
time.sleep(5)
def matching():
all_motion = session.query(Motion).all()
user_motion = {}
delete_motion_list = []
all_motion.sort(key=lambda tmp_motion: tmp_motion.user_id)
for user_id, motions in groupby(all_motion, key=lambda tmp_motion: tmp_motion.user_id):
tmp_motion_list = []
for motion in motions:
tmp_motion_list.append(motion)
user_motion[user_id] = tmp_motion_list
user_id_list = []
print(user_motion)
for user_id in user_motion:
if len(user_motion[user_id]) >= 2:
delete_motion_list += user_motion[user_id]
user_id_list.append(user_id)
print(user_id_list)
print('delete_motion_list: ', delete_motion_list)
matching_results = []
for i in range(len(user_id_list) - 1):
firstA = user_motion[user_id_list[i]][0].created_at
lastA = user_motion[user_id_list[i]][1].created_at
for j in range(i + 1, len(user_id_list)):
firstB = user_motion[user_id_list[j]][0].created_at
lastB = user_motion[user_id_list[j]][1].created_at
if abs(firstA - firstB).total_seconds() <= 5 and abs(lastA - lastB).total_seconds() <= 5:
# マッチング結果
if user_motion[user_id_list[i]][0].promise_id is None:
matching_results.append({'promise_id': user_motion[user_id_list[j]][0].promise_id, 'slave_user_id': user_id_list[i]})
else:
matching_results.append({'promise_id': user_motion[user_id_list[i]][0].promise_id, 'slave_user_id': user_id_list[j]})
print(user_id_list[i], user_id_list[j])
print(matching_results)
updates = []
for result in matching_results:
promise = session.query(Promise).filter(Promise.id == result['promise_id']).one_or_none()
promise.slave_user_id = result['slave_user_id']
updates.append(promise)
session.bulk_save_objects(updates)
for motion in delete_motion_list:
print('*****************')
print(motion.created_at, motion.user_id)
print('*****************')
session.delete(motion)
session.commit()
session.close()
if __name__ == '__main__':
run_loop()
| 2.34375
| 2
|
tests/snippets/type_hints.py
|
janczer/RustPython
| 0
|
12784928
|
# See also: https://github.com/RustPython/RustPython/issues/587
def curry(foo: int) -> float:
return foo * 3.1415926 * 2
assert curry(2) > 10
| 2.28125
| 2
|
lib/gstreamer/util.py
|
yefengxx/vaapi-fits
| 0
|
12784929
|
<reponame>yefengxx/vaapi-fits
###
### Copyright (C) 2021 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ...lib.common import memoize, try_call
@memoize
def have_gst():
return try_call("which gst-launch-1.0") and try_call("which gst-inspect-1.0")
@memoize
def have_gst_element(element):
result = try_call("gst-inspect-1.0 {}".format(element))
return result, element
| 1.976563
| 2
|
mudi/dispatcher.py
|
daturkel/mudi
| 0
|
12784930
|
<filename>mudi/dispatcher.py
import logging
from pathlib import Path
import watchgod
from .site import Site
from .utils import rel_name
from .watcher import MudiWatcher
class MudiDispatcher:
def __init__(self, site: Site):
self.site = site
def _dispatch(self, change_type: watchgod.Change, path: Path):
if self.site.template_dir in path.parents:
self._dispatch_template(change_type, path)
elif self.site.sass_in in path.parents:
self._dispatch_sass(change_type, path)
elif self.site.content_dir in path.parents:
if path.suffix in [".html", ".md"]:
self._dispatch_page(change_type, path)
else:
self._dispatch_file(change_type, path)
def _dispatch_template(self, change_type: watchgod.Change, path: Path):
logging.info("reinitializing jinja")
self.site._get_jinja_env()
self.site.render_all_pages()
def _dispatch_sass(self, change_type: watchgod.Change, path: Path):
self.site.compile_sass()
def _dispatch_page(self, change_type: watchgod.Change, path: Path):
if change_type.name == "added":
self.site.add_page_from_file(path)
self.site.render_all_pages()
elif change_type.name == "modified":
self.site.remove_page_from_file(path)
self.site.add_page_from_file(path)
self.site.render_all_pages()
elif change_type.name == "deleted":
self.site.remove_page_from_file(path)
self.site.render_all_pages()
def _dispatch_file(self, change_type: watchgod.Change, path: Path):
path = rel_name(path, self.site.content_dir)
if change_type.name in ["added", "modified"]:
self.site.copy_file(path)
else:
self.site.delete_file(path)
def watch(self):
for changes in watchgod.watch(
".", watcher_cls=MudiWatcher, watcher_kwargs={"site": self.site}
):
for change_type, path in changes:
logging.info(f"{path} {change_type.name}")
path = Path(path)
self._dispatch(change_type, path)
| 2.109375
| 2
|
ports/core/port.py
|
yzgyyang/portcran
| 1
|
12784931
|
"""Classes describing a FreeBSD Port and the various structures."""
from abc import ABCMeta, abstractmethod
from io import StringIO
from itertools import groupby
from math import ceil, floor
from pathlib import Path
from typing import (Any, Callable, Dict, Generic, IO, Iterable, Iterator, List, Optional, Set, Tuple, TypeVar, Union,
cast)
from .dependency import Dependency
from .make import MakeDict, make, make_vars
from .platform import Platform
from .uses import Uses
from ..utilities import Orderable
__all__ = ["Port", "PortError", "PortStub"]
T = TypeVar("T", covariant=True) # pylint: disable=C0103
def peek(file: IO[Any], length: int) -> str:
pos = file.tell()
value = file.read(length)
file.seek(pos)
return value
class PortValue(Orderable, Generic[T], metaclass=ABCMeta): # pylint: disable=E1136
def __init__(self, section: int, order: int = 1) -> None:
super().__init__()
self.order = order
self.section = section
@abstractmethod
def __get__(self, instance: "Port", owner: type) -> T:
raise NotImplementedError()
@property
def _key(self) -> Tuple[int, int]:
return self.section, self.order
@abstractmethod
def generate(self, value: Union[str, List[str], "PortObject"]) -> Iterable[Tuple[str, Iterable[str]]]:
raise NotImplementedError()
@abstractmethod
def load(self, obj: "Port", variables: MakeDict) -> None:
raise NotImplementedError()
class PortVar(PortValue[Optional[str]]): # pylint: disable=E1136
def __init__(self, section: int, order: int, name: str) -> None:
super().__init__(section, order)
self.name = name
def __delete__(self, instance: "Port") -> None:
instance.del_value(self)
def __get__(self, instance: "Port", owner: type) -> Optional[str]:
value = instance.uses.get_variable(self.name)
if value is None:
if instance.has_value(self):
return cast(str, instance.get_value(self))
return None
else:
assert len(value) == 1 and isinstance(value[0], str)
return value[0]
def __set__(self, obj: "Port", value: str) -> None:
obj.set_value(self, value)
def generate(self, value: Union[str, List[str], "PortObject"]) -> Iterable[Tuple[str, Iterable[str]]]:
assert isinstance(value, str)
return (self.name, (value,)),
def load(self, obj: "Port", variables: MakeDict) -> None:
if self.name in variables:
value = variables.pop_value(self.name, combine=True)
assert value is not None
self.__set__(obj, value)
class PortVarList(PortValue[List[str]]): # pylint: disable=E1136
def __init__(self, section: int, order: int, name: str) -> None:
super().__init__(section, order)
self._setter: Callable[[Port, List[str]], List[str]] = lambda x, y: y
self.name = name
def __get__(self, instance: "Port", owner: type) -> List[str]:
value = instance.uses.get_variable(self.name)
if value is None:
if not instance.has_value(self):
self.__set__(instance, [])
value = cast(List[str], instance.get_value(self))
assert isinstance(value, list)
return value
def __set__(self, obj: "Port", value: List[str]) -> None:
obj.set_value(self, self._setter(obj, value))
def generate(self, value: Union[str, List[str], "PortObject"]) -> Iterable[Tuple[str, Iterable[str]]]:
assert isinstance(value, list)
return (self.name, value),
def load(self, obj: "Port", variables: MakeDict) -> None:
if self.name in variables:
self.__set__(obj, variables.pop(self.name))
def setter(self, setter: Callable[["Port", List[str]], List[str]]) -> "PortVarList":
self._setter = setter
return self
class PortObject(object, metaclass=ABCMeta): # pylint: disable=E1136
@abstractmethod
def generate(self) -> Iterable[Tuple[str, Iterable[str]]]:
raise NotImplementedError()
@abstractmethod
def load(self, variables: MakeDict) -> None:
raise NotImplementedError()
T2 = TypeVar("T2", bound=PortObject)
class PortObj(PortValue[T2]): # pylint: disable=E1136
def __init__(self, section: int, factory: Callable[[], T2]) -> None:
super().__init__(section)
self.factory = factory
def __get__(self, instance: "Port", owner: type) -> T2:
if not instance.has_value(self):
instance.set_value(self, self.factory())
return cast(T2, instance.get_value(self))
def generate(self, value: Union[str, List[str], PortObject]) -> Iterable[Tuple[str, Iterable[str]]]:
# pylint: disable=no-self-use
return cast(T2, value).generate()
def load(self, obj: "Port", variables: MakeDict) -> None:
self.__get__(obj, Port).load(variables)
class PortLicense(PortObject, Iterable[str]):
def __init__(self) -> None:
super().__init__()
self._licenses: Set[str] = set()
self.combination: Optional[str] = None
self.file: Optional[str] = None
def __iter__(self) -> Iterator[str]:
return iter(self._licenses)
def add(self, license_type: str) -> "PortLicense":
self._licenses.add(license_type)
return self
def generate(self) -> Iterable[Tuple[str, Iterable[str]]]:
yield ("LICENSE", sorted(self._licenses))
if self.combination is not None:
yield ("LICENSE_COMB", (self.combination,))
if self.file is not None:
yield ("LICENSE_FILE", (self.file,))
def load(self, variables: MakeDict) -> None:
if "LICENSE" in variables:
for license_type in variables.pop("LICENSE"):
self.add(license_type)
self.combination = variables.pop_value("LICENSE_COMB", default=None)
self.file = variables.pop_value("LICENSE_FILE", default=None)
class PortDepends(PortObject):
# pylint: disable=too-few-public-methods
class Collection(object):
def __init__(self, name: str) -> None:
self.name = name
self._depends: List[Dependency] = []
def __iter__(self) -> Iterator[Dependency]:
return iter(self._depends)
def add(self, dependency: Dependency) -> None:
if dependency not in self._depends:
self._depends.append(dependency)
else:
raise KeyError("%s: dependency '%s' already registered" % (self.name, dependency))
def __init__(self) -> None:
super().__init__()
self._depends: List[PortDepends.Collection] = []
self.build = self._make_depends("BUILD_DEPENDS")
self.lib = self._make_depends("LIB_DEPENDS")
self.run = self._make_depends("RUN_DEPENDS")
self.test = self._make_depends("TEST_DEPENDS")
def _make_depends(self, name: str,) -> "PortDepends.Collection":
depends = PortDepends.Collection(name)
self._depends.append(depends)
return depends
def generate(self) -> Iterable[Tuple[str, Iterable[str]]]:
return ((i.name, (str(d) + "\n" for d in sorted(i))) for i in self._depends if any(i))
def load(self, variables: MakeDict) -> None:
for depends in self._depends:
for depend in variables.pop(depends.name, default=[]):
depends.add(Dependency.create(depend))
class PortBroken(PortObject):
class Category(object):
def __init__(self, arch: str = None, opsys: str = None, osrel: str = None) -> None:
self.arch = arch
self.opsys = opsys
self.osrel = osrel
def __eq__(self, other: object) -> bool:
if isinstance(other, PortBroken.Category):
return self.arch == other.arch and self.opsys == other.opsys and self.osrel == other.osrel
return False
def __hash__(self) -> int:
return hash(str(self))
def __str__(self) -> str:
subcat: List[str] = []
if self.opsys is not None:
subcat.append(self.opsys)
if self.osrel is not None:
subcat.append(self.osrel)
if self.arch is not None:
subcat.append(self.arch)
elif self.arch is not None:
subcat.append(self.arch)
if subcat:
return "BROKEN_" + "_".join(subcat)
else:
return "BROKEN"
@staticmethod
def create(makevar: str) -> "PortBroken.Category":
subcat = makevar.split("_")[1:]
arch = None
opsys = None
osrel = None
if len(subcat) > 1:
opsys = subcat[0]
osrel = subcat[1]
if len(subcat) == 3:
arch = subcat[2]
elif len(subcat) == 1:
if subcat[0] == "FreeBSD":
opsys = subcat[0]
else:
arch = subcat[0]
return PortBroken.Category(arch, opsys, osrel)
def __init__(self) -> None:
super().__init__()
self.reasons: Dict[PortBroken.Category, str] = {}
def generate(self) -> Iterable[Tuple[str, Iterable[str]]]:
broken: Dict[str, str] = {}
for category, reason in self.reasons.items():
broken[str(category)] = reason
for category_name in sorted(broken.keys()):
yield (category_name, (broken[category_name],))
def load(self, variables: MakeDict) -> None:
for variable in variables.variables:
if variable.startswith("BROKEN"):
self.reasons[PortBroken.Category.create(variable)] = " ".join(variables.pop(variable))
class PortUses(PortObject):
def __init__(self) -> None:
super().__init__()
self._uses: Dict[type, Uses] = {}
def __contains__(self, item: Union[type, str]) -> bool:
if isinstance(item, str):
item = Uses.get(item)
return item in self._uses
def __getitem__(self, item: Union[type, str]) -> Uses:
if isinstance(item, str):
item = Uses.get(item)
if item not in self._uses:
self._uses[item] = item()
return self._uses[item]
def get_variable(self, name: str) -> Optional[List[str]]:
values = [v for v in (u.get_variable(name) for u in list(self._uses.values())) if v is not None]
if len(values) > 1:
raise PortError("PortUses: multiple uses define value for variable '%s'" % name)
return values[0] if values else None
def generate(self) -> Iterable[Tuple[str, Iterable[str]]]:
yield ("USES", (str(u) for u in sorted(self._uses.values())))
for uses in sorted(self._uses.values()):
yield from uses.generate()
def load(self, variables: MakeDict) -> None:
for use in variables.pop("USES", default=[]):
uses_var = use.split(":")
assert 1 <= len(uses_var) <= 2
name = uses_var[0]
args = uses_var[1].split(",") if len(uses_var) == 2 else []
uses = self[name]
for arg in args:
uses.add(arg)
uses.load(variables)
class PortError(Exception):
pass
class PortStub(object):
def __init__(self, category: str, name: str, portdir: Optional[Path] = None) -> None:
self.category = category
self.name = name
self._portdir = portdir
def __repr__(self) -> str:
return "<Port: %s>" % self.origin
@property
def portdir(self) -> Path:
if self._portdir is None:
from ports.core.ports import Ports
return Ports.dir / self.category / self.name
return self._portdir
@property
def origin(self) -> str:
return "%s/%s" % (self.category, self.name)
class Port(PortStub):
portname = PortVar(1, 1, "PORTNAME")
portversion = PortVar(1, 2, "PORTVERSION")
distversion = PortVar(1, 4, "DISTVERSION")
portrevision = PortVar(1, 6, "PORTREVISION")
categories = PortVarList(1, 8, "CATEGORIES")
pkgnameprefix = PortVar(1, 12, "PKGNAMEPREFIX")
distname = PortVar(1, 14, "DISTNAME")
maintainer = PortVar(2, 1, "MAINTAINER")
comment = PortVar(2, 2, "COMMENT")
license = PortObj(3, PortLicense)
depends = PortObj(4, PortDepends)
broken = PortObj(5, PortBroken)
uses = PortObj(6, PortUses)
no_arch = PortVar(7, 1, "NO_ARCH")
def __init__(self, category: str, name: str, portdir: Optional[Path]) -> None:
self._values: Dict[PortValue, Union[str, List[str], PortObject]] = {}
self.categories = [category]
super().__init__(category, name, portdir)
self.changelog: Dict[str, List[str]] = {}
self.maintainer = Platform.address
self.portname = name
self.description: Optional[str] = None
self.website: Optional[str] = None
@property # type: ignore
def category(self) -> str: # type: ignore
return self.categories[0]
@category.setter
def category(self, value: str) -> None: # type: ignore
categories = self.categories
if value in categories:
categories.remove(value)
self.categories = [value] + categories
@categories.setter
def categories(self, categories: List[str]) -> List[str]:
if not categories:
raise PortError("Port: invalid categories, must start with: %s" % self.category)
return categories
@property
def descr(self) -> Path:
return self.portdir / "pkg-descr"
@property
def pkgname(self) -> str:
return "%s%s" % (self.pkgnameprefix or "", self.portname)
@property
def version(self) -> str:
if self.distversion is not None:
return self.distversion
assert self.portversion is not None
return self.portversion
@staticmethod
def _gen_footer(makefile: StringIO) -> None:
makefile.write("\n.include <bsd.port.mk>\n")
def _gen_header(self, makefile: StringIO) -> None:
port_makefile = self.portdir / "Makefile"
metadata: List[str] = []
if port_makefile.exists():
with port_makefile.open("rU") as makefile_file:
for line in iter(makefile_file.readline, ""):
if line.startswith("# Created by") or line.startswith("# $FreeBSD"):
metadata.append(line)
if peek(makefile_file, 1) != "#":
break
else:
metadata.append("# $FreeBSD$\n")
makefile.writelines(metadata)
def _gen_sections(self, makefile: StringIO) -> None:
for _, items in groupby(sorted(list(self._values.items()), key=lambda k: k[0]), lambda k: k[0].section):
values = [j for i in items for j in i[0].generate(i[1])]
if not values:
continue
tabs = max(2, int(ceil(max(len(n[0]) for n in values) + 1.0) / Platform.tab_width))
makefile.write("\n")
for name, value in values:
needed_tabs = tabs - int(floor((len(name) + 1.0) / Platform.tab_width))
makefile.write("%s=%s" % (name, "\t" * needed_tabs))
width = tabs * Platform.tab_width
first_line = True
for i in value:
next_line = i[-1] == "\n"
i = i.rstrip("\n")
if not first_line:
if width == -1 or width + len(i) + 1 > Platform.page_width:
makefile.write(" \\\n%s" % ("\t" * tabs))
width = tabs * Platform.tab_width
else:
makefile.write(" ")
width += 1
first_line = False
makefile.write(i)
if next_line:
width = -1
else:
width += len(i)
makefile.write("\n")
def _gen_distinfo(self) -> None:
make(self.portdir, 'makesum')
def _gen_descr(self) -> None:
if self.description is None:
if self.descr.exists():
self.descr.unlink()
else:
with self.descr.open("w") as descr:
width = 0
for word in self.description.split():
next_line = word[-1] == "\n"
word = word.rstrip("\n")
if width == -1 or width + len(word) + 1 > 79:
descr.write("\n")
width = 0
elif width:
descr.write(" ")
width += 1
descr.write(word)
if next_line:
width = -1
else:
width += len(word)
descr.write("\n")
if self.website is not None:
descr.write("\nWWW: %s\n" % self.website)
def _gen_plist(self) -> None:
raise NotImplementedError("Generic Port does not know how to create pkg-plist")
def generate(self) -> None:
makefile = StringIO()
self._gen_header(makefile)
self._gen_sections(makefile)
self._gen_footer(makefile)
with open(self.portdir / "Makefile", "w") as portmakefile:
portmakefile.write(makefile.getvalue())
self._gen_distinfo()
self._gen_descr()
self._gen_plist()
def load(self) -> None:
variables = make_vars(self.portdir)
bases = [type(self)]
i = 0
while i < len(bases):
bases.extend(j for j in bases[i].__bases__ if j not in bases)
for var in list(vars(bases[i]).values()):
if isinstance(var, PortValue):
var.load(self, variables)
i += 1
if not variables.all_popped:
# TODO: remove once all R-cran ports have been verified
print("Unloaded variables for %s:" % self.name, variables)
assert variables.all_popped
if self.descr.exists():
with self.descr.open() as descr:
lines = descr.readlines()
if lines[-1].startswith("WWW"):
self.website = lines[-1].split()[1]
lines.pop()
if lines[-1] == "\n":
lines.pop()
self.description = " ".join(l.strip() for l in lines)
def del_value(self, port_value: PortValue) -> None:
if port_value in self._values:
del self._values[port_value]
def get_value(self, port_value: PortValue) -> Union[str, List[str], PortObject]:
return self._values[port_value]
def has_value(self, port_value: PortValue) -> bool:
return port_value in self._values
def set_value(self, port_value: PortValue, value: Union[str, List[str], PortObject]) -> None:
self._values[port_value] = value
| 2.453125
| 2
|
jumpscale/tools/wireguard/__init__.py
|
zaibon/js-sdk
| 13
|
12784932
|
<reponame>zaibon/js-sdk
import binascii
from nacl import public
from nacl.encoding import Base64Encoder
from nacl.signing import VerifyKey
def generate_zos_keys(node_public_key):
"""Generate a new set of wireguard key pair and encrypt
the private side using the public key of a 0-OS node.
Args:
node_public_key (str): hex encoded public key of 0-OS node.
This is the format you find in the explorer
Returns:
tuple: tuple containing 3 fields (private key, private key encrypted, public key)
"""
wg_private = public.PrivateKey.generate()
wg_public = wg_private.public_key
wg_private_base64 = wg_private.encode(Base64Encoder)
wg_public_base64 = wg_public.encode(Base64Encoder)
node_public_bin = binascii.unhexlify(node_public_key)
node_public = VerifyKey(node_public_bin)
box = public.SealedBox(node_public.to_curve25519_public_key())
wg_private_encrypted = box.encrypt(wg_private_base64)
wg_private_encrypted_hex = binascii.hexlify(wg_private_encrypted)
return (wg_private_base64.decode(), wg_private_encrypted_hex.decode(), wg_public_base64.decode())
def generate_key_pair():
wg_private = public.PrivateKey.generate()
wg_public = wg_private.public_key
wg_private_base64 = wg_private.encode(Base64Encoder)
wg_public_base64 = wg_public.encode(Base64Encoder)
return wg_private_base64, wg_public_base64
| 2.21875
| 2
|
6 kyu/Madhav array.py
|
mwk0408/codewars_solutions
| 6
|
12784933
|
def is_madhav_array(arr):
if len(arr)<=1:
return False
low=1
high=3
increment=3
compare=arr[0]
while high<=len(arr):
if sum(arr[low:high])!=compare:
return False
low, high=high, high+increment
increment+=1
return True if (high-increment+1)==len(arr) else False
| 3.609375
| 4
|
stacks/constructs/api.py
|
juliuskrahn-com/backend
| 0
|
12784934
|
<reponame>juliuskrahn-com/backend
import aws_cdk.core
import aws_cdk.aws_apigateway as apigw
import aws_cdk.aws_lambda as lambda_
import aws_cdk.aws_dynamodb as dynamodb
import aws_cdk.aws_secretsmanager as sm
import aws_cdk.aws_logs as logs
from .. import Environment
from stacks.stack_utils import to_camel_case
class Api(aws_cdk.core.Construct):
def __init__(
self, scope: aws_cdk.core.Construct,
construct_id: str,
environment: object = Environment.PRODUCTION):
super().__init__(scope, construct_id)
# Integration dependencies
# have to be created before creating the RestApi
# because the RestApi 'default integration' also has to be created at this time
#
# (applied to integrations auto., accessed by integration construct via scope)
self.table_article_name = construct_id + "Article"
self.table_comment_name = construct_id + "Comment"
self.lambda_layers = [
lambda_.LayerVersion(
self,
"MiddlewareLayer",
code=lambda_.Code.from_asset("build/middleware_layer"),
compatible_runtimes=[lambda_.Runtime.PYTHON_3_8]
),
lambda_.LayerVersion(
self,
"VendorLayer",
code=lambda_.Code.from_asset("build/vendor_layer"),
compatible_runtimes=[lambda_.Runtime.PYTHON_3_8]
)
]
self.secret_admin_key = sm.Secret.from_secret_arn( # read access granted in integration construct
self,
"blog-backend-admin-key",
"arn:aws:secretsmanager:us-east-1:473883619336:secret:blog-backend-admin-key-bctwKR"
)
# (applied to integrations individually)
table_article = dynamodb.Table(
self,
"ArticleTable",
table_name=self.table_article_name,
partition_key=dynamodb.Attribute(name="urlTitle", type=dynamodb.AttributeType.STRING),
billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
removal_policy=aws_cdk.core.RemovalPolicy.RETAIN if environment is Environment.PRODUCTION
else aws_cdk.core.RemovalPolicy.DESTROY,
point_in_time_recovery=environment is Environment.PRODUCTION
)
table_article.add_global_secondary_index(
index_name="tagIndex",
partition_key=dynamodb.Attribute(name="tag", type=dynamodb.AttributeType.STRING),
sort_key=dynamodb.Attribute(name="published", type=dynamodb.AttributeType.STRING),
projection_type=dynamodb.ProjectionType.INCLUDE,
non_key_attributes=["urlTitle", "title", "description"]
)
table_comment = dynamodb.Table(
self,
"CommentTable",
table_name=self.table_comment_name,
partition_key=dynamodb.Attribute(name="articleUrlTitle", type=dynamodb.AttributeType.STRING),
sort_key=dynamodb.Attribute(name="id", type=dynamodb.AttributeType.STRING),
billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
removal_policy=aws_cdk.core.RemovalPolicy.RETAIN if environment is Environment.PRODUCTION
else aws_cdk.core.RemovalPolicy.DESTROY,
)
# RestApi
self.instance = apigw.RestApi(
self,
construct_id + "I", # I -> Instance
default_cors_preflight_options=apigw.CorsOptions(
allow_origins=apigw.Cors.ALL_ORIGINS,
allow_methods=apigw.Cors.ALL_METHODS,
),
default_integration=APIIntegration(self, "default"),
deploy_options=apigw.StageOptions(
throttling_rate_limit=256,
throttling_burst_limit=64, # concurrent
),
endpoint_configuration=apigw.EndpointConfiguration(types=[apigw.EndpointType.REGIONAL]),
)
# Endpoints
self.instance.root.add_proxy()
# /article
resource_article_collection = self.instance.root.add_resource(path_part="article")
integration_article_get_collection = APIIntegration(self, "article_get_collection")
table_article.grant_read_data(integration_article_get_collection.lambda_function)
resource_article_collection.add_method("GET", integration=integration_article_get_collection)
integration_article_create = APIIntegration(self, "article_create")
table_article.grant_read_write_data(integration_article_create.lambda_function)
resource_article_collection.add_method("POST", integration=integration_article_create)
# /article/{}
resource_article = resource_article_collection.add_resource(path_part="{articleUrlTitle}")
integration_article_get = APIIntegration(self, "article_get")
table_article.grant_read_data(integration_article_get.lambda_function)
resource_article.add_method("GET", integration=integration_article_get)
integration_article_update = APIIntegration(self, "article_update")
table_article.grant_read_write_data(integration_article_update.lambda_function)
resource_article.add_method("PATCH", integration=integration_article_update)
integration_article_delete = APIIntegration(self, "article_delete")
table_article.grant_read_write_data(integration_article_delete.lambda_function)
resource_article.add_method("DELETE", integration=integration_article_delete)
# /article/{}/comments
resource_article_comment_collection = resource_article.add_resource(path_part="comments")
integration_comment_get_collection = APIIntegration(self, "comment_get_collection")
table_comment.grant_read_data(integration_comment_get_collection.lambda_function)
resource_article_comment_collection.add_method("GET", integration=integration_comment_get_collection)
integration_comment_create = APIIntegration(self, "comment_create")
table_comment.grant_read_write_data(integration_comment_create.lambda_function)
resource_article_comment_collection.add_method("POST", integration=integration_comment_create)
# /article/{}/comments/{}
resource_article_comment = resource_article_comment_collection.add_resource(path_part="{commentId}")
integration_comment_delete = APIIntegration(self, "comment_delete")
table_comment.grant_read_write_data(integration_comment_delete.lambda_function)
resource_article_comment.add_method("DELETE", integration=integration_comment_delete)
# /article/{}/comments/{}/resps
resource_article_comment_resp_collection = resource_article_comment.add_resource(path_part="resps")
integration_resp_create = APIIntegration(self, "resp_create")
table_comment.grant_read_write_data(integration_resp_create.lambda_function)
resource_article_comment_resp_collection.add_method("POST", integration=integration_resp_create)
# /article/{}/comments/{}/resps/{}
resource_article_comment_resp = resource_article_comment_resp_collection.add_resource(path_part="{respId}")
integration_resp_delete = APIIntegration(self, "resp_delete")
table_comment.grant_read_write_data(integration_resp_delete.lambda_function)
resource_article_comment_resp.add_method("DELETE", integration=integration_resp_delete)
# /tag
resource_tag_collection = self.instance.root.add_resource(path_part="tag")
integration_tag_get_collection = APIIntegration(self, "tag_get_collection")
table_article.grant_read_data(integration_tag_get_collection.lambda_function)
resource_tag_collection.add_method("GET", integration=integration_tag_get_collection)
# /tag/{}
resource_tag = resource_tag_collection.add_resource(path_part="{tagName}")
integration_tag_get_article_collection = APIIntegration(self, "tag_get_article_collection")
table_article.grant_read_data(integration_tag_get_article_collection.lambda_function)
resource_tag.add_method("GET", integration_tag_get_article_collection)
# /admin-login
resource_admin_login = self.instance.root.add_resource(path_part="admin-login")
integration_admin_login = APIIntegration(self, "admin_login")
resource_admin_login.add_method("POST", integration=integration_admin_login)
class APIIntegration(apigw.LambdaIntegration):
def __init__(self, scope: Api, name: str):
self.lambda_function = lambda_.Function(
scope,
f"{to_camel_case(name)}Fn",
runtime=lambda_.Runtime.PYTHON_3_8,
handler=f"lambda_function.handler",
code=lambda_.Code.from_asset(f"backend/lambda_functions/{name}"),
environment={
"ArticleTableName": scope.table_article_name,
"CommentTableName": scope.table_comment_name
},
memory_size=256,
log_retention=logs.RetentionDays.FIVE_DAYS,
layers=scope.lambda_layers
)
super().__init__(
handler=self.lambda_function
)
scope.secret_admin_key.grant_read(self.lambda_function)
| 1.851563
| 2
|
webapp/main/migrations/0009_auto_20140321_1349.py
|
joepetrini/bike-counter
| 5
|
12784935
|
<filename>webapp/main/migrations/0009_auto_20140321_1349.py
# encoding: utf8
from django.db import models, migrations
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('main', '0008_survey_value'),
]
operations = [
migrations.CreateModel(
name='SurveyValue',
fields=[
(u'id', models.AutoField(verbose_name=u'ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name=u'created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name=u'modified', editable=False)),
('survey', models.ForeignKey(to='main.Survey', to_field=u'id')),
('metric', models.ForeignKey(to='main.Metric', to_field=u'id')),
('value', models.ForeignKey(to='main.Value', to_field=u'id')),
],
options={
u'unique_together': set([('survey', 'metric')]),
u'db_table': 'survey_value',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='location',
name='type',
field=models.CharField(default='intersection', max_length=20, choices=[('intersection', 'Intersection'), ('trail', 'Trail'), ('bridge', 'Bridge')]),
preserve_default=True,
),
]
| 1.710938
| 2
|
tests/test_extractor.py
|
Edinburgh-Genome-Foundry/easy_dna
| 6
|
12784936
|
import os
import pytest
import numpy as np
import easy_dna as dna
def test_extract_from_input(tmpdir):
parts = []
for i in range(10):
part_id = "part_%s" % ("ABCDEFGHAB"[i]) # id is nonunique on purpose
alias = "part_%d" % i # alias is unique
part_length = np.random.randint(1000, 1500)
sequence = dna.random_dna_sequence(part_length)
record = dna.sequence_to_biopython_record(sequence, id=part_id)
record.name = part_id
dna.annotate_record(record, label=part_id, alias=alias)
parts.append(record)
constructs = []
for position_of_last_part in [8, 10]:
# 8: parts A-H; 10: parts A--H and A, B again
construct_record = sum(parts[1:position_of_last_part], parts[0])
construct_record.id = "construct_%02d" % (position_of_last_part)
construct_record.name = construct_record.id
constructs.append(construct_record)
target_dir = os.path.join(str(tmpdir), "test_dir")
records_dict = dna.extract_from_input(
construct_list=constructs, output_path=target_dir
)
assert records_dict["processed_report"]["shared_with"].count() == 16
with pytest.raises(TypeError):
dna.extract_from_input(output_path=target_dir)
| 2.1875
| 2
|
backend/apps/workOrder/migrations/0006_auto_20211130_0131.py
|
jorgejimenez98/auditorio-django-react
| 0
|
12784937
|
# Generated by Django 3.2.7 on 2021-11-30 00:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workOrder', '0005_remove_workorder_forg'),
]
operations = [
migrations.AddField(
model_name='workorder',
name='FORG',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='workorder',
name='auditType',
field=models.CharField(default='', max_length=256),
),
]
| 1.53125
| 2
|
benchmarks/query_benchmarks/query_delete_related/benchmark.py
|
deepakdinesh1123/actions
| 0
|
12784938
|
from ...utils import bench_setup
from .models import Artist, Song
class QueryDeleteRel:
def setup(self):
bench_setup(migrate=True)
self.a1 = Artist.objects.create(name="abc")
self.a2 = Artist.objects.create(name="abc")
self.a3 = Artist.objects.create(name="abc")
self.a4 = Artist.objects.create(name="abc")
self.a5 = Artist.objects.create(name="abc")
self.a6 = Artist.objects.create(name="abc")
self.a7 = Artist.objects.create(name="abc")
self.a8 = Artist.objects.create(name="abc")
self.a9 = Artist.objects.create(name="abc")
self.a10 = Artist.objects.create(name="abc")
for i in range(10):
Song.objects.create(artist=self.a1, name=f"song{i}")
Song.objects.create(artist=self.a2, name=f"song{i}")
Song.objects.create(artist=self.a3, name=f"song{i}")
Song.objects.create(artist=self.a4, name=f"song{i}")
Song.objects.create(artist=self.a5, name=f"song{i}")
Song.objects.create(artist=self.a6, name=f"song{i}")
Song.objects.create(artist=self.a7, name=f"song{i}")
Song.objects.create(artist=self.a8, name=f"song{i}")
Song.objects.create(artist=self.a9, name=f"song{i}")
Song.objects.create(artist=self.a10, name=f"song{i}")
def time_query_del_rel(self):
Artist.objects.all().delete()
| 2.515625
| 3
|
src/posts/migrations/0003_auto_20170327_1306.py
|
ChrisMunene/Blog
| 0
|
12784939
|
<filename>src/posts/migrations/0003_auto_20170327_1306.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-27 10:06
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20170326_1643'),
]
operations = [
migrations.AlterField(
model_name='post',
name='publish',
field=models.DateField(default=datetime.datetime(2017, 3, 27, 10, 6, 44, 221000, tzinfo=utc)),
),
]
| 1.539063
| 2
|
__init__.py
|
daqbroker/daqbrokerServer
| 0
|
12784940
|
from daqbrokerServer.daqbroker import Server
| 1.054688
| 1
|
examples/text_scroll.py
|
paddywwoof/python-sense-hat
| 104
|
12784941
|
<reponame>paddywwoof/python-sense-hat<gh_stars>100-1000
#!/usr/bin/python
from sense_hat import SenseHat
sense = SenseHat()
sense.set_rotation(180)
red = (255, 0, 0)
sense.show_message("One small step for Pi!", text_colour=red)
| 1.992188
| 2
|
python/athena/__init__.py
|
sj1104/Het
| 2
|
12784942
|
<gh_stars>1-10
from __future__ import absolute_import
from .gpu_links import *
from .gpu_ops import *
from .stream import *
| 1.078125
| 1
|
blog/admin.py
|
abhishekBhartiProjects/learnLanguageDjango
| 0
|
12784943
|
from django.contrib import admin
# Register your models here.
from blog.models import Post
admin.site.register(Post)
| 1.289063
| 1
|
pyresourcepool/pyresourcepool.py
|
bensonrodney/pyresourcepool
| 0
|
12784944
|
#!/usr/bin/env python3
""" Basic python object resource pool.
"""
import copy
import time
import traceback
from threading import RLock, Thread
from contextlib import contextmanager
# Callback attribute name when adding a return callback to an object
CALLBACK_ATTRIBUTE = 'resource_pool_return_callback'
class AllResourcesRemoved(Exception):
""" Raised when all recources in the pool have been removed.
"""
class ObjectAlreadyInPool(Exception):
""" Raised when adding an object that is already in the pool.
"""
class ObjectNotInPool(Exception):
""" Raise when operations are performed for an object that is
not part of the resource pool.
"""
class ResourcePool(object):
def __init__(self, objects, return_callback=None):
"""
Instantiate with a list of objects you want in the resource pool.
'return_callback' is a function or method that can be used to
perform some action on an object before it is returned to the
pool but without making the process that returned the object
needing to wait for that function to be run.
This is useful for performing a time consumeing "factory reset"
(or similar) on an object before it is returned to the pool but
without holding up the process that used the resource.
The callback function, if specified should just take the object as an
argument and success is measured by no exceptions being raised. If
an exception is raised by the callback then the object will be removed
from the pool rather than being returned as an available resource.
"""
# used to track the original pool of resources, not used yet
self._objects = objects
self._removed = {}
for o in self._objects:
self._removed[id(o)] = False
# create another list with the same object references:
# copy.copy() only copies the references so the two lists are
# separate lists that point to the same objects
self._available = copy.copy(objects)
self._lock = RLock()
self._return_callback = return_callback
def all_removed(self):
return all(self._removed[id(o)] for o in self._objects)
def _get_active(self):
""" returns the list of objects that haven't been removed """
return [o for o in self._objects if not self._removed[id(o)]]
active = property(_get_active)
def _get_active_size(self):
return len(self.active)
active_size = property(_get_active_size)
def add(self, obj):
"""
Adds new objects to the pool, 'obj' can be a single object or a list of
objects and new objects are added to the end of the available resources.
"""
if type(obj) is not list:
obj = [obj]
with self._lock:
for o in obj:
if o in self._objects:
raise ObjectAlreadyInPool("Object is already in the pool.")
self._objects.append(o)
self._available.append(o)
self._removed[id(o)] = False
def remove(self, obj):
"""
Removes an object from the pool so that it can't be handed out as an
available resource again. If the object passed in is not in the pool
an ObjectNotInPool exception is raised.
"""
with self._lock:
if obj not in self._objects:
raise ObjectNotInPool("Object is not in the list of pool objects.")
# mark the resource as deleted
self._removed[id(obj)] = True
# if it is currently in the available set, remove it
self._available = [o for o in self._available if o is not obj]
if self.all_removed():
raise AllResourcesRemoved(
"All resources have been removed. "
"Further use of the resource pool is void.")
def get_resource_unmanaged(self, block=True):
"""
Gets a resource from the pool but in an "unmanaged" fashion. It is
up to you to return the resource to the pool by calling
return_resource().
Return value is an object from the pool but see the note below.
NOTE:
You should consider using get_resource() instead in a 'with' statement
as this will handle returning the resource automatically. eg:
with pool.get_resrouce() as r:
do_stuff(r)
The resource will be automatically returned upon exiting the 'with'
block.
"""
# if the pool is empty, wait for an object to be returned to the
# pool
obj = None
while True:
with self._lock:
if self.all_removed():
raise AllResourcesRemoved(
"All resources have been removed. Further use of "
"the resource pool is void unless new resources are"
"added.")
if self._available:
obj = self._available.pop(0)
if obj or (not block):
break
time.sleep(0.1)
return obj
def return_resource(self, obj, force=False):
""" Returns a resource to the pool but if:
- obj has a property named 'resource_pool_return_callback' and it is
not None
OR
- self._return_callback is not None
then start a thread that calls that callback before returning the resource
to the pool. This allows the calling process to not have to wait for that
pre-return-to-pool operation (eg. factory reset of a device that is being
tested).
NOTE: the callback added as a property to the object gets precedence
over the one specified for the pool.
NOTE: the callback property is stripped from the obj during the return
process.
"""
if (not obj) or (obj not in self._objects):
raise ObjectNotInPool("Object {} not a member of the pool".format(str(obj)))
if not force:
callback = None
if hasattr(obj, CALLBACK_ATTRIBUTE) and \
getattr(obj, CALLBACK_ATTRIBUTE) is not None:
callback = getattr(obj, CALLBACK_ATTRIBUTE)
# strip the callback attribute from the object
delattr(obj, CALLBACK_ATTRIBUTE)
elif self._return_callback:
callback = self._return_callback
if callback:
thread = Thread(target=self._run_return_callback, args=(obj, callback))
thread.setName("return_obj_{}".format(id(obj)))
thread.start()
return
with self._lock:
if not self._removed[id(obj)]:
self._available.append(obj)
def _run_return_callback(self, obj, callback):
""" This should only really be called by self.return_resource() and is intended
to be run in a thread to perform some pre-returnn-to-pool process without
the process that used the resource having to wait for that operation to occur.
If running the callback raises an exception the resource will be removed from
the pool.
"""
try:
callback(obj)
self.return_resource(obj, force=True)
except Exception:
traceback.print_exc()
self.remove(obj)
@contextmanager
def get_resource(self, block=True):
"""
Intended to be used in a 'with' statement or a contextlib.ExitStack.
Returns an object from the pool and waits if necessary. If 'block' is
False, then None is returned if the pool has been depleted.
Example useage:
with get_resrouce() as r:
do_stuff(r)
# at this point, outside the with block, the resource has
# been returned to the pool.
"""
obj = None
try:
obj = self.get_resource_unmanaged(block=block)
yield obj
finally:
if obj:
self.return_resource(obj)
| 3.625
| 4
|
src/scripts/rPi/main.py
|
MarkintoshZ/Self-Driving-Pi
| 2
|
12784945
|
<filename>src/scripts/rPi/main.py
import numpy as np
from PIL import Image
import os
from os import system as sys
from keras.models import load_model
import keras
from replay import ExperienceReplay
import random
import time
import cv2
import drive
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
steering_angle = 0
steering_angles = [-0.45, 0.45]
encoder = load_model('../models/80-2 Encoder')
exp_replay = ExperienceReplay()
new_model = False
batch_size = 64
skip_learn = 1
if new_model:
epsilon = 0.5
epsilon_decrease = 0.1
model = keras.Sequential()
model.add(keras.layers.Dense(2, activation='selu', input_dim=2))
model.add(keras.layers.Dense(8, activation='selu'))
model.add(keras.layers.Dense(2))
model.compile('adam', loss='mse', metrics=['mse'])
else:
epsilon = 0
epsilon_decrease = 0
print("Loading model")
model = keras.models.load_model('../Models/new.h5')
model.compile('adam', loss='mse', metrics=['mse'])
def get_new_state():
# Grab a single frame of video
ret, frame = video_capture.read()
# print(frame.shape)
# while size(frame[0][0]) == 0:
# print('No camera data check connection')
frame = frame[200:, 160: 480]
image = cv2.resize(frame, (10, 8), Image.ANTIALIAS)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
data = np.asarray(image).ravel()
data = data / 255
data = encoder.predict(data.reshape(1, -1))
input_t = data.reshape((-1))
reward = 0
game_over = False
start = True
return input_t, reward, game_over, start
if __name__ == '__main__':
e = 0
# connected to unity
try:
while True:
e += 1
print("running")
start = False
game_over = False
loss = 0.
counter = 0
# get initial input
while not start:
input_t, reward, game_over, start = get_new_state()
time.sleep(0.05)
continue
print('Starting New Session')
while not game_over:
counter += 1
input_tm1 = input_t
# get next action
if np.random.rand() <= epsilon:
action = random.randint(0, 1)
else:
q_value = model.predict(np.expand_dims(input_tm1, 0))
if q_value[0][0] == np.nan:
print("nan in model output")
break
action = np.argmax(q_value[0])
print(action)
# apply action, get rewards and new state
print(action)
# send_msg(str(steering_angles[action]))
if action:
drive.right()
else:
drive.left()
input_t, reward, game_over, start = get_new_state()
# store experience
exp_replay.remember([input_tm1, action, reward, input_t], game_over)
# if counter % skip_learn == 0:
# print('training')
# inputs, targets = exp_replay.get_batch(model, batch_size)
# loss = np.array(model.train_on_batch(inputs, targets)).sum()
print('training')
for _ in range(25):
inputs, targets = exp_replay.get_batch(model, batch_size)
loss = np.array(model.train_on_batch(inputs, targets)).sum()
print("Epoch {:03d} | Loss {:.4f} | Epsilon {:.3f}".format(e, loss, epsilon))
epsilon -= epsilon_decrease
except KeyboardInterrupt:
name = input("\nenter file name")
if name != 'no':
model.save(name)
| 2.65625
| 3
|
tests/ibllib/test_atlas.py
|
SebastianBruijns/ibllib
| 0
|
12784946
|
<reponame>SebastianBruijns/ibllib
import unittest
import numpy as np
from ibllib.atlas import BrainCoordinates, sph2cart, cart2sph, Trajectory, Insertion
class TestInsertion(unittest.TestCase):
def test_init(self):
d = {
'label': 'probe00',
'x': 544.0,
'y': 1285.0,
'z': 0.0,
'phi': 0.0,
'theta': 5.0,
'depth': 4501.0,
'beta': 0.0}
ins = Insertion.from_dict(d)
# eval the entry point, should be super close
dxyz = ins.trajectory.eval_x(d['x'] / 1e6) - np.array((d['x'], d['y'], d['z'])) / 1e6
self.assertTrue(np.all(np.isclose(dxyz, 0)))
# test methods tip/entry/xyz
dd = np.sum(np.sqrt(np.diff(ins.xyz, axis=0) ** 2)) - d['depth'] / 1e6
self.assertLess(abs(dd), 0.01)
class TestTrajectory(unittest.TestCase):
def test_eval_trajectory(self):
line = Trajectory.fit(np.array([[0.3, 0.3, 0.4], [0, 0, 1]]))
# test integer
self.assertTrue(np.all(np.isclose(line.eval_y(0), np.array([0, 0, 1]))))
# test float
self.assertTrue(np.all(np.isclose(line.eval_y(0.0), np.array([0, 0, 1]))))
# test list
self.assertTrue(np.all(np.isclose(line.eval_y([0.0, 0.0]), np.array([0, 0, 1]))))
# test array
arr = np.array([0.0, 0.0])[..., np.newaxis]
self.assertTrue(np.all(np.isclose(line.eval_y(arr), np.array([0, 0, 1]))))
# test void direction
vertical = Trajectory.fit(np.array([[0, 0, 0], [0, 0, 1]]))
self.assertTrue(np.all(np.isnan(vertical.eval_x(5))))
def test_trajectory(self):
np.random.seed(42)
xyz = np.zeros([120, 3])
xyz[:, 0] = np.linspace(1, 9, 120)
xyz[:, 1] = np.linspace(2, 4, 120)
xyz[:, 2] = np.linspace(-2, 3, 120)
xyz += np.random.normal(size=xyz.shape) * 0.4
traj = Trajectory.fit(xyz)
# import matplotlib.pyplot as plt
# import mpl_toolkits.mplot3d as m3d
# ax = m3d.Axes3D(plt.figure())
# ax.scatter3D(*xyz.T)
# ax.plot3D(*insertion.eval_x(np.array([0, 10])).T)
# ax.plot3D(*insertion.eval_y(xyz[:, 1]).T, 'r')
d = xyz[:, 0] - traj.eval_y(xyz[:, 1])[:, 0]
self.assertTrue(np.abs(np.mean(d)) < 0.001)
d = xyz[:, 0] - traj.eval_y(xyz[:, 1])[:, 0]
self.assertTrue(np.abs(np.mean(d)) < 0.001)
d = xyz[:, 1] - traj.eval_z(xyz[:, 2])[:, 1]
self.assertTrue(np.abs(np.mean(d)) < 0.001)
def test_exit_volume(self):
bc = BrainCoordinates((11, 13, 15), xyz0=(-5, -6, -7))
# test arbitrary line
line = Trajectory.fit(np.array([[0.1, 0.1, 0], [0, 0, 1]]))
epoints = Trajectory.exit_points(line, bc)
self.assertTrue(np.all(np.isclose(epoints, np.array([[0.8, 0.8, -7.], [-0.6, -0.6, 7.]]))))
# test apline
hline = Trajectory.fit(np.array([[0, 0, 0], [0, 1, 0]]))
epoints = Trajectory.exit_points(hline, bc)
self.assertTrue(np.all(np.isclose(epoints, np.array([[0, -6, 0], [0, 6, 0]]))))
# test mlline
hline = Trajectory.fit(np.array([[0, 0, 0], [1, 0, 0]]))
epoints = Trajectory.exit_points(hline, bc)
self.assertTrue(np.all(np.isclose(epoints, np.array([[-5, 0, 0], [5, 0, 0]]))))
# test vertical line
vline = Trajectory.fit(np.array([[0, 0, 0], [0, 0, 1]]))
epoints = Trajectory.exit_points(vline, bc)
self.assertTrue(np.all(np.isclose(epoints, np.array([[0, 0, -7.], [0, 0, 7.]]))))
class TestsCoordinatesSimples(unittest.TestCase):
def test_brain_coordinates(self):
vshape = (6, 7, 8)
bc = BrainCoordinates(vshape)
self.assertTrue(bc.i2x(0) == 0)
self.assertTrue(bc.i2x(6) == 6)
self.assertTrue(bc.nx == 6)
self.assertTrue(bc.ny == 7)
self.assertTrue(bc.nz == 8)
# test array functions
in_out = [([6, 7, 8], np.array([6, 7, 8])),
(np.array([6, 7, 8]), np.array([6, 7, 8])),
(np.array([[6, 7, 8], [6, 7, 8]]), np.array([[6, 7, 8], [6, 7, 8]])),
]
for io in in_out:
self.assertTrue(np.all(bc.xyz2i(io[0]) == io[1]))
self.assertTrue(np.all(bc.i2xyz(io[1]) == io[0]))
def test_reverse_directions(self):
bc = BrainCoordinates(nxyz=(6, 7, 8), xyz0=[50, 60, 70], dxyz=[-10, -10, -10])
self.assertTrue(bc.i2x(0) == 50 and bc.i2x(bc.nx - 1) == 0)
self.assertTrue(bc.i2y(0) == 60 and bc.i2y(bc.ny - 1) == 0)
self.assertTrue(np.all(bc.i2z(np.array([0, 1])) == np.array([70, 60])))
bc = BrainCoordinates(nxyz=(6, 7, 8), xyz0=[50, 60, 70], dxyz=-10)
self.assertTrue(bc.dx == bc.dy == bc.dz == -10)
def test_sph2cart_and_back(self):
dv = np.array([0, -1, 1, 0, 0, 0, 0, 0, 0]) # z
ml = np.array([0, 0, 0, 0, -1, 1, 0, 0, 0]) # x
ap = np.array([0, 0, 0, 0, 0, 0, 0, -1, 1]) # y
phi = np.array([0., 0., 0., 0., 180., 0., 0., -90., 90.])
theta = np.array([0., 180., 0., 0., 90., 90., 0., 90., 90.])
r = np.array([0., 1, 1, 0., 1, 1, 0., 1, 1])
r_, t_, p_ = cart2sph(ml, ap, dv)
assert np.all(np.isclose(r, r_))
assert np.all(np.isclose(phi, p_))
assert np.all(np.isclose(theta, t_))
x_, y_, z_ = sph2cart(r, theta, phi)
assert np.all(np.isclose(ml, x_))
assert np.all(np.isclose(ap, y_))
assert np.all(np.isclose(dv, z_))
if __name__ == "__main__":
unittest.main(exit=False)
| 2.65625
| 3
|
train.py
|
pandeydeep9/Attentive-Neural-Process
| 0
|
12784947
|
<gh_stars>0
from tqdm import tqdm
from network import LatentModel
from tensorboardX import SummaryWriter
import torchvision
import torch as t
from torch.utils.data import DataLoader
from preprocess import collate_fn
import os
def adjust_learning_rate(optimizer, step_num, warmup_step=4000):
lr = 0.001 * warmup_step**0.5 * min(step_num * warmup_step**-1.5, step_num**-0.5)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def main():
train_dataset = torchvision.datasets.MNIST('./data', train=True, download=True,)
epochs = 50
model = LatentModel(128).cuda()
model.train()
optim = t.optim.Adam(model.parameters(), lr=1e-4)
writer = SummaryWriter()
global_step = 0
for epoch in range(epochs):
dloader = DataLoader(train_dataset, batch_size=8, collate_fn=collate_fn, shuffle=True, num_workers=4)
pbar = tqdm(dloader)
for i, data in enumerate(pbar):
global_step += 1
adjust_learning_rate(optim, global_step)
context_x, context_y, target_x, target_y = data
context_x = context_x.cuda()
context_y = context_y.cuda()
target_x = target_x.cuda()
target_y = target_y.cuda()
# pass through the latent model
y_pred, kl, loss = model(context_x, context_y, target_x, target_y)
# Training step
optim.zero_grad()
loss.backward(retain_graph=False)
optim.step()
model.zero_grad()
# Logging
writer.add_scalars('training_loss',{
'loss':loss,
'kl':kl.mean(),
}, global_step)
# save model by each epoch
t.save({'model':model.state_dict(),
'optimizer':optim.state_dict()},'May27checkpoint_%d.pth.tar' % (epoch+1))
if __name__ == '__main__':
main()
| 2.171875
| 2
|
examples/jsonrpc_server/jsonrpc_server.py
|
podhmo/toybox
| 3
|
12784948
|
# -*- coding:utf-8 -*-
import logging
from toybox.simpleapi import run
from pyramid_rpc.jsonrpc import jsonrpc_method
# please: pip install pyramid_rpc
# see also: http://docs.pylonsproject.org/projects/pyramid_rpc/en/latest/jsonrpc.html
"""
python ./jsonrpc_server.py
$ echo '{"id": "1", "params": {"name": "foo"}, "method": "say_hello", "jsonrpc": "2.0"}' | http POST :8080/api
{
"id": "1",
"jsonrpc": "2.0",
"result": "hello, foo"
}
"""
@jsonrpc_method(endpoint='api')
def say_hello(request, name):
return 'hello, {}'.format(name)
def includeme(config):
config.include('pyramid_rpc.jsonrpc')
config.add_jsonrpc_endpoint('api', '/api')
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
run.include(includeme)
run(port=8080)
| 2.5
| 2
|
fake_count/templatetags/fake_count_tmpl.py
|
iterweb/django_fake_counter
| 1
|
12784949
|
<filename>fake_count/templatetags/fake_count_tmpl.py
from django.template import Library
from random import randint
from datetime import datetime
from fake_count.app_settings import DAY, NIGHT
register = Library()
night = 235922861933
morning = 100129385257
current_time = str(datetime.now().time()).replace(':', '').replace('.', '')
@register.inclusion_tag('_count.html')
def fake_counter():
time = randint(4, 9)
if morning < int(current_time):
random_num = randint(1, (DAY / 100 * 5))
res = DAY - random_num
return {
'counter': str(res),
'time': time,
}
elif night > int(current_time):
random_num = randint(1, (NIGHT / 100 * 5))
res = NIGHT - random_num
return {
'counter': str(res),
'time': time,
}
| 2.5
| 2
|
Examen/pandas_example - copia.py
|
jansforte/Inteligencia-Artificial
| 0
|
12784950
|
<filename>Examen/pandas_example - copia.py
import pandas as pd
Datos = pd.DataFrame({"hora": ["0.29 [0.15-0.48]", "6.586 [0.15-0.48]", "9800 [10-200]", "3 [10-200]", "6.586 [0.15-0.48]"]})
Datos["hora"] = Datos["hora"].str.extract(r"(.*\d\s)")
print(Datos)
| 3.578125
| 4
|