hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0f1744199f477069c02875d1067c890dfb7a1805 | 340 | py | Python | src/main/python/grammer/Function.py | photowey/python-study | 218456a0d661709a49fb060659664102b9287de8 | [
"Apache-2.0"
] | null | null | null | src/main/python/grammer/Function.py | photowey/python-study | 218456a0d661709a49fb060659664102b9287de8 | [
"Apache-2.0"
] | null | null | null | src/main/python/grammer/Function.py | photowey/python-study | 218456a0d661709a49fb060659664102b9287de8 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# ---------------------------------------------
# @file Function.py
# @description Function
# @author WcJun
# @date 2020/06/20
# ---------------------------------------------
# 求两个数 n 加到 m 的和
def add(n, m):
s = 0
while n <= m:
s += n
n += 1
return s
# 求和
add = add(1, 100)
print(add)
| 14.782609 | 47 | 0.364706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.641667 |
0f18e0ca0c5499bebb322bcb92fe32db9bf17ba3 | 6,709 | py | Python | Missions_to_Mars/scrape_mars.py | VallieTracy/web-scraping-challenge | 7bb248b97d5ca126fbcd558ff1e89bd680bd18a9 | [
"ADSL"
] | null | null | null | Missions_to_Mars/scrape_mars.py | VallieTracy/web-scraping-challenge | 7bb248b97d5ca126fbcd558ff1e89bd680bd18a9 | [
"ADSL"
] | null | null | null | Missions_to_Mars/scrape_mars.py | VallieTracy/web-scraping-challenge | 7bb248b97d5ca126fbcd558ff1e89bd680bd18a9 | [
"ADSL"
] | null | null | null | # Dependencies
from bs4 import BeautifulSoup as bs
import requests
from splinter import Browser
import time
import pandas as pd
import requests as req
# Define browser path
def init_browser():
executable_path = {"executable_path":r"C:/bin/chromedriver"}
return Browser('chrome', **executable_path, headless=False)
# Define 'scrape' function
def scrape():
browser = init_browser()
# MARS NEWS
# Visit the mars Nasa news site
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
time.sleep(1)
# Scrape page into soup
browser_html = browser.html
news_soup = bs(browser_html, "html.parser")
# Get most recent headline
slide_element = news_soup.select_one("ul.item_list li.slide")
news_title = slide_element.find("div", class_="content_title").find("a").text
# Get first snippet of article text
news_p = slide_element.find("div", class_="article_teaser_body").text
# JPL FEATURED SPACE IMAGE
# URL to visit through chromedriver
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
time.sleep(1)
browser.click_link_by_partial_text('FULL IMAGE')
image_html = browser.html
image_soup = bs(image_html, "html.parser")
featured_img_rel = image_soup.select_one(".carousel_item").get("style")
featured_img_rel = featured_img_rel.split("\'")[1]
featured_img_url = f'https://www.jpl.nasa.gov{featured_img_rel}'
# MARS WEATHER
# Request info from Mars' twitter page
twitter_response = req.get('https://twitter.com/marswxreport?lang=en')
# Return beautiful soup object
twitter_soup = bs(twitter_response.text, "html.parser")
# Parent container for the top weather tweet
tweet_containers = twitter_soup.find_all("div", class_='js-tweet-text-container')
# Pull out the text we want and save into variable called 'mars_weather'
mars_weather = tweet_containers[0].text
# MARS FACTS
# Visit the Mars Facts webpage
mars_facts_url = 'https://space-facts.com/mars/'
# Use Pandas to read the tables from the webpage
tables = pd.read_html(mars_facts_url)
# Specify which table we want
table_one_df = tables[0]
# Rename the columns
table_one_df.columns = ["Mars Planet Profile", "Values"]
# Reset the index
table_one_df.set_index("Mars Planet Profile", inplace=True)
# Put table into html string + remove 'n's
html_table = table_one_df.to_html()
html_table = html_table.replace('\n', '')
# MARS HEMISPHERES
# Mars hemispheres url
hemi_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(hemi_url)
time.sleep(1)
# Convert the browser html to a soup object
hemi_html = browser.html
hemi_soup = bs(hemi_html, "html.parser")
# Parent html that holds the title and link to individual hemispheres
# hemi_parent = hemi_soup.select_one("div.result-list div.item")
# Full image located at a different url
# First need image parent
# image_parent = hemi_parent.find("div", class_="description")
# 'Tail' portion of url which will direct us to url with full-size image
# image_link_partial = image_parent.find("a")["href"]
def get_first_url(soup_div):
# Title is found within the first url
title = soup_div.find("h3").text
# Image parent
image_parent = soup_div.find("div", class_="description")
# 'Tail end of url for full-size image
image_link_partial = image_parent.find("a")["href"]
# Return title & partial url link
return([title, image_link_partial])
def get_image_url(page_url, browser):
# Visit the webpage for each image url
browser.visit(link)
time.sleep(1)
# Convert browser to html
image_html = browser.html
#Convert to a soup object
image_soup = bs(image_html, "html.parser")
# Parent element of the full-size image
full_img_parent = image_soup.select_one("div.wide-image-wrapper div.downloads")
# Find the full-size image url within the parent element
img_url = full_img_parent.find("a")["href"]
# Return full image url
return(img_url)
# Run a loop using the above functions in order to get titles + full-size image urls
# List of html that holds all 4 hemisphere info
results = hemi_soup.select("div.result-list div.item")
#Define parent url
parent_url = 'https://astrogeology.usgs.gov'
# Create empty list to store titles
titles = []
# Create empty list to store partial urls for each hemisphere
img_partials = []
# Create empty list to hold the urls for the full-size images
links = []
# Create empty list that will hold four dictionaries of Titles & Full-Size Image urls
hemisphere_image_urls = []
# Loop thru
for result in results:
# Calling 'get_first_url' function to find the titles and partial urls
[title, img_partial] = get_first_url(result)
# Appending titles & img_partials lists
titles.append(title)
img_partials.append(img_partial)
# Define hemisphere image link using parent link + newly found img_partial
link = 'https://astrogeology.usgs.gov' + img_partial
img_url = get_image_url(link, browser)
links.append(link)
# Create dictionary to hold titles and image urls
hemi_dict = {"title": title, "img_url": img_url}
# Append hemisphere_image_urls list with this 'hemi_dict'
hemisphere_image_urls.append(hemi_dict)
# Couldn't figure out how to loop though for the dictionary
# So for images on local host webpage, defined images as such
title_one = hemisphere_image_urls[0]['title']
title_two = hemisphere_image_urls[1]['title']
title_three = hemisphere_image_urls[2]['title']
title_four = hemisphere_image_urls[3]['title']
image_one = hemisphere_image_urls[0]['img_url']
image_two = hemisphere_image_urls[1]['img_url']
image_three = hemisphere_image_urls[2]['img_url']
image_four = hemisphere_image_urls[3]['img_url']
# Store all data from scrape function in a dictionary
mars_dictionary = {
"Top_News": news_title,
"Teaser_P": news_p,
"Featured_Image": featured_img_url,
"Mars_Weather": mars_weather,
"Mars_Info_Table": html_table,
"First_Hemi_Title": title,
"First_Hemi_Img": img_url,
"Title_One": title_one,
"Title_Two": title_two,
"Title_Three": title_three,
"Title_Four": title_four,
"Image_One": image_one,
"Image_Two": image_two,
"Image_Three": image_three,
"Image_Four": image_four}
return mars_dictionary
| 32.567961 | 100 | 0.693099 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,347 | 0.498882 |
0f1a15f7826e26b74a287b61edb934c290001cbf | 1,144 | py | Python | pydmtx/__init__.py | pydmtx/pydmtx | 946c3920593dd865b756825667105655394b9cae | [
"MIT"
] | 4 | 2021-05-03T15:26:32.000Z | 2022-02-23T22:26:01.000Z | pydmtx/__init__.py | pydmtx/pydmtx | 946c3920593dd865b756825667105655394b9cae | [
"MIT"
] | null | null | null | pydmtx/__init__.py | pydmtx/pydmtx | 946c3920593dd865b756825667105655394b9cae | [
"MIT"
] | 2 | 2021-11-11T20:30:51.000Z | 2021-11-18T15:19:07.000Z | from pydmtx.symbol import Symbol
from pydmtx.encode import encode as encode_encode
from pydmtx.reedsolomon import encode as reedsolomon_encode
from pydmtx.bitstream import bitstream
from pydmtx.plugins.registry import plugin_manager
from pydmtx.plugins import ExportPlugin
DEFAULT_QUIET_ZONE = 2
DEFAULT_SYMBOL_SIZE = "square"
class RawSymbol(list):
def __init__(self, data):
super().__init__(data)
def format(self, formatter, **options):
if isinstance(formatter, str):
formatter = plugin_manager.find_export_plugin_by_format_type(formatter)
assert issubclass(formatter, ExportPlugin)
return formatter(self).format(**options)
def encode(data, quiet_zone=DEFAULT_QUIET_ZONE, version=DEFAULT_SYMBOL_SIZE):
data = list(bytes(data, encoding="UTF-8"))
version, data_codewords = encode_encode(data, version)
data_codewords = bytes(data_codewords)
codewords = bytes(reedsolomon_encode(data_codewords, version))
symbol = Symbol(version, quiet_zone=quiet_zone)
symbol.draw_data(bitstream(codewords))
raw_symbol = RawSymbol(symbol.raw())
return raw_symbol
| 28.6 | 83 | 0.756993 | 353 | 0.308566 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.013112 |
0f1a8a8fdf277ed1542030735b29d945045c82ec | 40 | py | Python | tools/RNN/rnn_quantizer/tensorflow/tf_nndct/utils/__init__.py | hito0512/Vitis-AI | 996459fb96cb077ed2f7e789d515893b1cccbc95 | [
"Apache-2.0"
] | 1 | 2021-04-01T06:38:48.000Z | 2021-04-01T06:38:48.000Z | tools/RNN/rnn_quantizer/tensorflow/tf_nndct/utils/__init__.py | hito0512/Vitis-AI | 996459fb96cb077ed2f7e789d515893b1cccbc95 | [
"Apache-2.0"
] | null | null | null | tools/RNN/rnn_quantizer/tensorflow/tf_nndct/utils/__init__.py | hito0512/Vitis-AI | 996459fb96cb077ed2f7e789d515893b1cccbc95 | [
"Apache-2.0"
] | null | null | null | from nndct_shared.utils import registry
| 20 | 39 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0f1b976e46c7f2ba93a9ecff9c247ef02d3200d7 | 774 | py | Python | benchmark/python/tpch_base.py | alefranz/spark | f5f15379b2c080339a36423b0262f29d978fd362 | [
"MIT"
] | 1 | 2019-06-14T09:25:43.000Z | 2019-06-14T09:25:43.000Z | benchmark/python/tpch_base.py | tianyaba/spark | f1c5b86d84bc91cb2d6f5aaf3d2a401de6fd4098 | [
"MIT"
] | null | null | null | benchmark/python/tpch_base.py | tianyaba/spark | f1c5b86d84bc91cb2d6f5aaf3d2a401de6fd4098 | [
"MIT"
] | null | null | null | # Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
import pyspark
from pyspark.sql import SparkSession
class TpchBase:
def __init__(self, spark, dir):
self.customer = spark.read.parquet(dir + "customer")
self.lineitem = spark.read.parquet(dir + "lineitem")
self.nation = spark.read.parquet(dir + "nation")
self.region = spark.read.parquet(dir + "region")
self.orders = spark.read.parquet(dir + "orders")
self.part = spark.read.parquet(dir + "part")
self.partsupp = spark.read.parquet(dir + "partsupp")
self.supplier = spark.read.parquet(dir + "supplier")
| 43 | 70 | 0.683463 | 519 | 0.670543 | 0 | 0 | 0 | 0 | 0 | 0 | 267 | 0.344961 |
0f1ca8b43d824f2698d264b0e2c96a3fd9919b23 | 6,385 | py | Python | get_data.py | savage13/botw_chart | 1c5f895e1327b74768f1429cc5a2b1ce07377a53 | [
"MIT"
] | null | null | null | get_data.py | savage13/botw_chart | 1c5f895e1327b74768f1429cc5a2b1ce07377a53 | [
"MIT"
] | null | null | null | get_data.py | savage13/botw_chart | 1c5f895e1327b74768f1429cc5a2b1ce07377a53 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import json
import time
import subprocess
cats = {
"any": { "id": "vdoq4xvk", "output_file": "all.json", "output_file2": "all2.json", },
"100": { "id": "xk9jv4gd", "output_file": "100.json", "output_file2": "1002.json", },
'amq': { "id": "n2yj3r82", "output_file": "amq.json", "output_file2": "amq2.json", },
"as": { "id": "wkpqmw8d", "output_file": "as.json", "output_file2": "as2.json", },
"ad": { "id": "9d8jgv7k", "output_file": "ad.json", "output_file2": "ad2.json", }
}
def get_next_link( v ) :
links = v['pagination']['links']
URL = None
for link in links:
if link['rel'] == 'next':
return link['uri']
return None
def get_player_ids_from_runs( out ):
ids = []
for run in out:
pids = [p['id'] for p in run['players'] if 'id' in p]
ids.extend(pids)
return list(set(sorted(ids)))
def get_url( URL ):
p = subprocess.run(['curl', URL], capture_output = True)
return json.loads( p.stdout )
def get_runs_at_offset( category, offset ):
URL="https://www.speedrun.com/api/v1/runs?category={}&orderby=submitted&direction=asc&offset={}".format( category, offset )
return get_url( URL )
def get_all_runs( category, offset = 0 ):
out = []
URL="https://www.speedrun.com/api/v1/runs?category={}&orderby=submitted&direction=asc&offset={}".format( category, offset )
n=0
while URL is not None:
v = get_url( URL )
out.extend( v['data'] )
URL = get_next_link( v )
print("{:5} {}".format(len(out), URL))
time.sleep(1)
return out
def load_runs( filename ):
chunk = 20
runs = json.load(open(filename,'r'))
category = runs[0]['category']
n = len(runs)
n20 = (n // chunk) * chunk
tmp = get_runs_at_offset(category, n20)
if tmp['data'][0]['id'] == runs[n20]['id']: # Check if first id in downloaded data matches the same in the existing data
print(" Last ID matches, just update runs", len(runs))
runs = runs[:n20] # Truncate runs at beginning of chunk
runs.extend(tmp['data']) # Add "new" runs
URL = get_next_link( tmp )
if URL is not None:
print(" ... Last chunk has more runs, get 'em")
r = get_all_runs( category, offset = n20 + chunk )
runs.extend( r )
return runs
else :
raise ValueError("IDs do not match")
return runs
def get_all_players( ids ):
pid = {}
for xid in ids:
#print(id)
url = "https://www.speedrun.com/api/v1/users/{}".format(xid)
#print(url)
args = ['curl', '--silent', url]
p = subprocess.run(args, stdout=subprocess.PIPE)
v = json.loads(p.stdout.decode())
pid[xid] = v
print("ID {}".format(xid))
time.sleep(1)
return pid
def update_players_list( runs, players_file = 'players.json'):
ids = get_player_ids_from_runs( runs )
players = json.load(open(players_file,'r'))
new_ids = [ key for key in ids if key not in players ]
if len(new_ids) > 0:
players_new = get_all_players( new_ids )
players.update( players_new )
with open(players_file, 'w') as fp:
json.dump( players, fp )
if __name__ == '__main__':
players_file = "players.json"
players_file2 = "players2.json"
if len(sys.argv) < 2:
print("Usage: get_data.py -a -p -r -f category")
print(" -a Get all runs (starts over)")
print(" -f Filter runs and players")
print(" -u Update runs")
print(" -p Update players")
print(" Ex: next.py -u -p -f all # Update all categories ")
print(" next.py -a -p -f all # Get all categories ")
sys.exit(0)
cat = sys.argv[-1]
if cat not in cats and cat != "all":
print("Please specify a category")
for key in cats.keys() :
print(" ", key)
print(" all")
sys.exit(-1)
if cat == "all":
categories = list(cats.keys())
else:
categories = [ cat ]
get_all = any([arg == '-a' for arg in sys.argv[1:]])
update_players = any([arg == '-p' for arg in sys.argv[1:]])
update_runs = any([arg == '-u' for arg in sys.argv[1:]])
filter_data = any([arg == '-f' for arg in sys.argv[1:]])
runs = []
if get_all:
for cat in categories:
c = cats[cat]
runs = get_all_runs( c['id'] )
out = c['output_file']
with open(out, 'w') as fp:
json.dump( runs, fp )
if update_players:
update_players_list( runs )
if update_runs:
for cat in categories:
print("Updating category:", cat)
c = cats[cat]
out = c['output_file']
runs = load_runs( out )
with open(out, 'w') as fp:
json.dump( runs, fp )
if update_players:
print(" Updating players ...")
update_players_list( runs )
if filter_data:
for cat in categories:
c = cats[cat]
out = c['output_file']
out2 = c['output_file2']
runs = json.load(open(out,'r'))
rout = []
# Remove extra field we are not using
for r in runs:
r2 = {}
if len(r['players']) == 0:
continue
if 'id' not in r['players'][0]:
continue
for k in ['id','weblink','submitted']:
r2[k] = r[k]
r2['status'] = { 'status': r['status']['status'] }
r2['times'] = {'primary_t': r['times']['primary_t'] }
r2['players'] = [ {'id': r['players'][0]['id'] } ]
r2['values'] = r['values']
rout.append(r2)
json.dump( rout, open(out2,'w'))
# Players file Remove extra fields we
players = json.load(open(players_file, 'r'))
pout = {}
for k,v in players.items():
pout[k] = {
'data': {
'weblink': v['data']['weblink'],
'names': v['data']['names'],
'name-style': v['data']['name-style'],
}
}
json.dump( pout, open(players_file2,'w'))
| 33.429319 | 128 | 0.519969 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,770 | 0.277212 |
0f1d77811b98689b2cae0d686170946ae5f678f0 | 621 | py | Python | server/schemas/kind_to_strain.py | Georgi2704/pricelist-fastapi-boilerplate | 24b88e1f5c28b7eaff50745cd4464caac6de01e6 | [
"Apache-2.0"
] | null | null | null | server/schemas/kind_to_strain.py | Georgi2704/pricelist-fastapi-boilerplate | 24b88e1f5c28b7eaff50745cd4464caac6de01e6 | [
"Apache-2.0"
] | 2 | 2021-11-11T15:19:30.000Z | 2022-02-07T22:52:07.000Z | server/schemas/kind_to_strain.py | Georgi2704/pricelist-fastapi | 24b88e1f5c28b7eaff50745cd4464caac6de01e6 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from typing import Optional
from uuid import UUID
from server.schemas.base import BoilerplateBaseModel
class KindToStrainBase(BoilerplateBaseModel):
kind_id: UUID
strain_id: UUID
# Properties to receive via API on creation
class KindToStrainCreate(KindToStrainBase):
pass
# Properties to receive via API on update
class KindToStrainUpdate(KindToStrainBase):
pass
class KindToStrainInDBBase(KindToStrainBase):
id: UUID
class Config:
orm_mode = True
# Additional properties to return via API
class KindToStrainSchema(KindToStrainInDBBase):
pass
| 18.818182 | 52 | 0.780998 | 344 | 0.553945 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.201288 |
0f1ecb2bfe2239fd22330150e77bde33035a5f02 | 2,215 | py | Python | ras_realsense/realsense_camera/test/files/scripts/check_camera_service_power_set_off_and_on_with_no_subscriber.py | RAS-2018-grp-4/ras_miscellaneous- | 12b769086148b724c17b3ba9f15216bdfd5025d5 | [
"MIT"
] | 3 | 2020-02-17T12:56:22.000Z | 2020-09-30T11:17:03.000Z | ras_realsense/realsense_camera/test/files/scripts/check_camera_service_power_set_off_and_on_with_no_subscriber.py | RAS-2018-grp-4/ras_miscellaneous- | 12b769086148b724c17b3ba9f15216bdfd5025d5 | [
"MIT"
] | 2 | 2020-02-17T15:17:43.000Z | 2021-05-11T21:01:26.000Z | ras_realsense/realsense_camera/test/files/scripts/check_camera_service_power_set_off_and_on_with_no_subscriber.py | RAS-2018-grp-4/ras_miscellaneous- | 12b769086148b724c17b3ba9f15216bdfd5025d5 | [
"MIT"
] | 8 | 2020-02-17T12:55:59.000Z | 2021-07-22T12:24:15.000Z | #!/usr/bin/env python
"""
@file check_camera_service_power_set_off_and_on_with_no_subscriber.py
"""
import os
import sys
import unittest
import time
import subprocess
import commands
import rospy
import rostest
from rs_general.rs_general import get_camera_params_and_values, \
is_log_contains_keyword, LOGFILE
PKG = "realsense_camera"
NAME = "check_camera_service_power_set_off_and_on_with_no_subscriber"
class CheckCameraServicePowerSetOffAndOnWithNoSubscriber(unittest.TestCase):
"""
@class CheckCameraServicePowerSetOffAndOnWithNoSubscriber: check reponse for power operation like
set on/off when camera has no subscriber
"""
def setUp(self):
'''
@fn setUp: make sure camera is powered on
@param self
'''
rospy.init_node(NAME, anonymous=True, log_level=rospy.DEBUG)
time.sleep(10)
output = os.popen("rosservice call /camera/driver/is_powered")
if output.read().find('True') == -1:
self.assertTrue(False, 'Camera is not powered on.')
def test_set_camera_off_and_on_with_no_subscribers(self):
"""
@fn test_set_camera_off_and_on_with_no_subscribers
check camera power can be set off and on when no subscribers exist
@param self
@return
"""
os.system("rosservice call /camera/driver/set_power false")
time.sleep(2)
output = os.popen("rosservice call /camera/driver/is_powered")
self.assertNotEqual(-1, output.read().find('False'))
os.system("rosservice call /camera/driver/set_power true")
time.sleep(2)
output = os.popen("rosservice call /camera/driver/is_powered")
self.assertNotEqual(-1, output.read().find('True'))
self.assertTrue(is_log_contains_keyword(LOGFILE, stop_camera_info))
self.assertTrue(is_log_contains_keyword(LOGFILE, start_camera_info))
if __name__ == '__main__':
param_dict = get_camera_params_and_values(sys.argv)
start_camera_info = param_dict['start_camera_info'].replace('*', ' ')
stop_camera_info = param_dict['stop_camera_info'].replace('*', ' ')
rostest.rosrun(PKG, NAME, CheckCameraServicePowerSetOffAndOnWithNoSubscriber, sys.argv)
| 37.542373 | 101 | 0.71377 | 1,479 | 0.66772 | 0 | 0 | 0 | 0 | 0 | 0 | 936 | 0.422573 |
0f1ecb304df6b9ed35e437846e4e6724371a2aac | 1,141 | py | Python | gardenpi/utils.py | argodev/gardenpi | 452d23a37b1778d5ecf3df654460b1e939e44cd9 | [
"MIT"
] | null | null | null | gardenpi/utils.py | argodev/gardenpi | 452d23a37b1778d5ecf3df654460b1e939e44cd9 | [
"MIT"
] | null | null | null | gardenpi/utils.py | argodev/gardenpi | 452d23a37b1778d5ecf3df654460b1e939e44cd9 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding:utf-8 -*-
import logging
import configparser
def load_config(config_file='settings.ini'):
"""
Loads configuration file from disk
"""
logging.info("Loading Configuration Information")
config = configparser.ConfigParser()
config.read(config_file)
return config
def dump_config(config):
"""
Utility function to dump the configuration information
"""
for section in config.sections():
logging.info("** %s **" % section)
for key in config[section]:
logging.info(" %s - %s" % (key, config[section][key]))
def ctof(c):
"""
Utility function to convert Celcius to Faranheit
"""
f = ((c*9.0)/5.0) + 32
return f
def scale_to_percent(val, min, max):
"""
Utility function to scale a given value to a percentage within a range
"""
current = val
# first, ensure that current is within our defined min/max
if val < min:
current = min
elif current > max:
current = max
# now, we scale it to b/t 0 and 1
scaled = (current-min)/(max - min)
return scaled * 100 | 21.942308 | 74 | 0.611744 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 474 | 0.415425 |
0f1f297616083eef96595fea2dbc2238ebb6b129 | 27,206 | py | Python | coherence/upnp/core/device.py | palfrey/Cohen3 | d5779b4cbcf736e12d0ccfd162238ac5c376bb0b | [
"MIT"
] | 60 | 2018-09-14T18:57:38.000Z | 2022-02-19T18:16:24.000Z | coherence/upnp/core/device.py | palfrey/Cohen3 | d5779b4cbcf736e12d0ccfd162238ac5c376bb0b | [
"MIT"
] | 37 | 2018-09-04T08:51:11.000Z | 2022-02-21T01:36:21.000Z | coherence/upnp/core/device.py | palfrey/Cohen3 | d5779b4cbcf736e12d0ccfd162238ac5c376bb0b | [
"MIT"
] | 16 | 2019-02-19T18:34:58.000Z | 2022-02-05T15:36:33.000Z | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright (C) 2006 Fluendo, S.A. (www.fluendo.com).
# Copyright 2006, Frank Scholz <coherence@beebits.net>
# Copyright 2018, Pol Canelles <canellestudi@gmail.com>
'''
Devices
=======
This module contains two classes describing UPnP devices.
:class:`Device`
---------------
The base class for all devices.
:class:`RootDevice`
-------------------
A device representing a root device.
'''
import time
from lxml import etree
from eventdispatcher import EventDispatcher, Property, ListProperty
from twisted.internet import defer
from coherence import log
from coherence.upnp.core import utils
from coherence.upnp.core.service import Service
from . import xml_constants
ns = xml_constants.UPNP_DEVICE_NS
class Device(EventDispatcher, log.LogAble):
'''
Represents a UPnP's device, but this is not a root device, it's the base
class used for any device. See :class:`RootDevice` if you want a root
device.
.. versionchanged:: 0.9.0
* Migrated from louie/dispatcher to EventDispatcher
* The emitted events changed:
- Coherence.UPnP.Device.detection_completed =>
device_detection_completed
- Coherence.UPnP.Device.remove_client =>
device_remove_client
* New events: device_service_notified, device_got_client
* Changes some class variables to benefit from the EventDispatcher's
properties:
- :attr:`client`
- :attr:`devices`
- :attr:`services`
- :attr:`client`
- :attr:`detection_completed`
'''
logCategory = 'device'
client = Property(None)
'''
Defined by :class:`~coherence.upnp.devices.controlpoint.ControlPoint`.
It should be one of:
- Initialized instance of a class
:class:`~coherence.upnp.devices.media_server_client.MediaServerClient`
- Initialized instance of a class
:class:`~coherence.upnp.devices.media_renderer_client.MediaRendererClient`
- Initialized instance of a class
:class:`~coherence.upnp.devices.internet_gateway_device_client.InternetGatewayDeviceClient`
Whenever a client is set an event will be sent notifying it by
:meth:`on_client`.
''' # noqa
icons = ListProperty([])
'''A list of the device icons.'''
devices = ListProperty([])
'''A list of the device devices.'''
services = ListProperty([])
'''A list of the device services.'''
detection_completed = Property(False)
'''
To know whenever the device detection has completed. Defaults to `False`
and it will be set automatically to `True` by the class method
:meth:`receiver`.
'''
def __init__(self, parent=None, udn=None):
log.LogAble.__init__(self)
EventDispatcher.__init__(self)
self.register_event(
'device_detection_completed',
'device_remove_client',
'device_service_notified',
'device_got_client',
)
self.parent = parent
self.udn = udn
# self.uid = self.usn[:-len(self.st)-2]
self.friendly_name = ''
self.device_type = ''
self.upnp_version = 'n/a'
self.friendly_device_type = '[unknown]'
self.device_type_version = 0
def __repr__(self):
return (
f'embedded device {self.friendly_name} '
+ f'{self.device_type}, parent {self.parent}'
)
# def __del__(self):
# # print('Device removal completed')
# pass
def as_dict(self):
d = {
'device_type': self.get_device_type(),
'friendly_name': self.get_friendly_name(),
'udn': self.get_id(),
'services': [x.as_dict() for x in self.services],
}
icons = []
for icon in self.icons:
icons.append(
{
'mimetype': icon['mimetype'],
'url': icon['url'],
'height': icon['height'],
'width': icon['width'],
'depth': icon['depth'],
}
)
d['icons'] = icons
return d
def remove(self, *args):
self.info(f'removal of {self.friendly_name} {self.udn}')
while len(self.devices) > 0:
device = self.devices.pop()
self.debug(f'try to remove {device}')
device.remove()
while len(self.services) > 0:
service = self.services.pop()
self.debug(f'try to remove {service}')
service.remove()
if self.client is not None:
self.dispatch_event('device_remove_client', self.udn, self.client)
self.client = None
# del self
return True
def receiver(self, *args, **kwargs):
if self.detection_completed:
return
for s in self.services:
if not s.detection_completed:
return
self.dispatch_event('device_service_notified', service=s)
if self.udn is None:
return
self.detection_completed = True
if self.parent is not None:
self.info(
f'embedded device {self.friendly_name} '
+ f'{self.device_type} initialized, parent {self.parent}'
)
if self.parent is not None:
self.dispatch_event('device_detection_completed', self.parent)
else:
self.dispatch_event('device_detection_completed', self)
def service_detection_failed(self, device):
self.remove()
def get_id(self):
return self.udn
def get_uuid(self):
return self.udn[5:]
def get_embedded_devices(self):
return self.devices
def get_embedded_device_by_type(self, type):
r = []
for device in self.devices:
if type == device.friendly_device_type:
r.append(device)
return r
def get_services(self):
return self.services
def get_service_by_type(self, service_type):
if not isinstance(service_type, (tuple, list)):
service_type = [service_type]
for service in self.services:
_, _, _, service_class, version = service.service_type.split(':')
if service_class in service_type:
return service
def add_service(self, service):
'''
Add a service to the device. Also we check if service already notified,
and trigger the callback if needed. We also connect the device to
service in case the service still not completed his detection in order
that the device knows when the service has completed his detection.
Args:
service (object): A service which should be an initialized instance
of :class:`~coherence.upnp.core.service.Service`
'''
self.debug(f'add_service {service}')
if service.detection_completed:
self.receiver(service)
service.bind(
service_detection_completed=self.receiver,
service_detection_failed=self.service_detection_failed,
)
self.services.append(service)
# fixme: This fails as Service.get_usn() is not implemented.
def remove_service_with_usn(self, service_usn):
for service in self.services:
if service.get_usn() == service_usn:
service.unbind(
service_detection_completed=self.receiver,
service_detection_failed=self.service_detection_failed,
)
self.services.remove(service)
service.remove()
break
def add_device(self, device):
self.debug(f'Device add_device {device}')
self.devices.append(device)
def get_friendly_name(self):
return self.friendly_name
def get_device_type(self):
return self.device_type
def get_friendly_device_type(self):
return self.friendly_device_type
def get_markup_name(self):
try:
return self._markup_name
except AttributeError:
self._markup_name = (
f'{self.friendly_device_type}:{self.device_type_version} '
+ f'{self.friendly_name}'
)
return self._markup_name
def get_device_type_version(self):
return self.device_type_version
def set_client(self, client):
self.client = client
def get_client(self):
return self.client
def on_client(self, *args):
'''
Automatically triggered whenever a client is set or changed. Emmit
an event notifying that the client has changed.
.. versionadded:: 0.9.0
'''
self.dispatch_event('device_got_client', self, client=self.client)
def renew_service_subscriptions(self):
''' iterate over device's services and renew subscriptions '''
self.info(f'renew service subscriptions for {self.friendly_name}')
now = time.time()
for service in self.services:
self.info(
f'check service {service.id} {service.get_sid()} '
+ f'{service.get_timeout()} {now}'
)
if service.get_sid() is not None:
if service.get_timeout() < now:
self.debug(
f'wow, we lost an event subscription for '
+ f'{self.friendly_name} {service.get_id()}, '
+ f'maybe we need to rethink the loop time and '
+ f'timeout calculation?'
)
if service.get_timeout() < now + 30:
service.renew_subscription()
for device in self.devices:
device.renew_service_subscriptions()
def unsubscribe_service_subscriptions(self):
'''Iterate over device's services and unsubscribe subscriptions '''
sl = []
for service in self.get_services():
if service.get_sid() is not None:
sl.append(service.unsubscribe())
dl = defer.DeferredList(sl)
return dl
def parse_device(self, d):
self.info(f'parse_device {d}')
self.device_type = d.findtext(f'./{{{ns}}}deviceType')
(
self.friendly_device_type, self.device_type_version,
) = self.device_type.split(':')[-2:]
self.friendly_name = d.findtext(f'./{{{ns}}}friendlyName')
self.udn = d.findtext(f'./{{{ns}}}UDN')
self.info(f'found udn {self.udn} {self.friendly_name}')
try:
self.manufacturer = d.findtext(f'./{{{ns}}}manufacturer')
except Exception:
pass
try:
self.manufacturer_url = d.findtext(f'./{{{ns}}}manufacturerURL')
except Exception:
pass
try:
self.model_name = d.findtext(f'./{{{ns}}}modelName')
except Exception:
pass
try:
self.model_description = d.findtext(f'./{{{ns}}}modelDescription')
except Exception:
pass
try:
self.model_number = d.findtext(f'./{{{ns}}}modelNumber')
except Exception:
pass
try:
self.model_url = d.findtext(f'./{{{ns}}}modelURL')
except Exception:
pass
try:
self.serial_number = d.findtext(f'./{{{ns}}}serialNumber')
except Exception:
pass
try:
self.upc = d.findtext(f'./{{{ns}}}UPC')
except Exception:
pass
try:
self.presentation_url = d.findtext(f'./{{{ns}}}presentationURL')
except Exception:
pass
try:
for dlna_doc in d.findall(
'./{urn:schemas-dlna-org:device-1-0}X_DLNADOC'
):
try:
self.dlna_dc.append(dlna_doc.text)
except AttributeError:
self.dlna_dc = []
self.dlna_dc.append(dlna_doc.text)
except Exception:
pass
try:
for dlna_cap in d.findall(
'./{urn:schemas-dlna-org:device-1-0}X_DLNACAP'
):
for cap in dlna_cap.text.split(','):
try:
self.dlna_cap.append(cap)
except AttributeError:
self.dlna_cap = []
self.dlna_cap.append(cap)
except Exception:
pass
icon_list = d.find(f'./{{{ns}}}iconList')
if icon_list is not None:
from urllib.parse import urlparse
url_base = '%s://%s' % urlparse(self.get_location())[:2]
for icon in icon_list.findall(f'./{{{ns}}}icon'):
try:
i = {}
i['mimetype'] = icon.find(f'./{{{ns}}}mimetype').text
i['width'] = icon.find(f'./{{{ns}}}width').text
i['height'] = icon.find(f'./{{{ns}}}height').text
i['depth'] = icon.find(f'./{{{ns}}}depth').text
i['realurl'] = icon.find(f'./{{{ns}}}url').text
i['url'] = self.make_fullyqualified(i['realurl']).decode(
'utf-8'
)
self.icons.append(i)
self.debug(f'adding icon {i} for {self.friendly_name}')
except Exception as e:
import traceback
self.debug(traceback.format_exc())
self.warning(
f'device {self.friendly_name} seems to have an invalid'
+ f' icon description, ignoring that icon [error: {e}]'
)
serviceList = d.find(f'./{{{ns}}}serviceList')
if serviceList is not None:
for service in serviceList.findall(f'./{{{ns}}}service'):
serviceType = service.findtext(f'{{{ns}}}serviceType')
serviceId = service.findtext(f'{{{ns}}}serviceId')
controlUrl = service.findtext(f'{{{ns}}}controlURL')
eventSubUrl = service.findtext(f'{{{ns}}}eventSubURL')
presentationUrl = service.findtext(f'{{{ns}}}presentationURL')
scpdUrl = service.findtext(f'{{{ns}}}SCPDURL')
# check if values are somehow reasonable
if len(scpdUrl) == 0:
self.warning('service has no uri for its description')
continue
if len(eventSubUrl) == 0:
self.warning('service has no uri for eventing')
continue
if len(controlUrl) == 0:
self.warning('service has no uri for controling')
continue
try:
self.add_service(
Service(
serviceType,
serviceId,
self.get_location(),
controlUrl,
eventSubUrl,
presentationUrl,
scpdUrl,
self,
)
)
except Exception as e:
self.error(
f'Error on adding service: {service} [ERROR: {e}]'
)
# now look for all sub devices
embedded_devices = d.find(f'./{{{ns}}}deviceList')
if embedded_devices is not None:
for d in embedded_devices.findall(f'./{{{ns}}}device'):
embedded_device = Device(self)
self.add_device(embedded_device)
embedded_device.parse_device(d)
self.receiver()
def get_location(self):
return self.parent.get_location()
def get_usn(self):
return self.parent.get_usn()
def get_upnp_version(self):
return self.parent.get_upnp_version()
def get_urlbase(self):
return self.parent.get_urlbase()
def get_presentation_url(self):
try:
return self.make_fullyqualified(self.presentation_url)
except Exception:
return ''
def get_parent_id(self):
try:
return self.parent.get_id()
except Exception:
return ''
def make_fullyqualified(self, url):
return self.parent.make_fullyqualified(url)
def as_tuples(self):
r = []
def append(name, attribute):
try:
if isinstance(attribute, tuple):
if callable(attribute[0]):
v1 = attribute[0]()
else:
v1 = getattr(self, attribute[0])
if v1 in [None, 'None']:
return
if callable(attribute[1]):
v2 = attribute[1]()
else:
v2 = getattr(self, attribute[1])
if v2 in [None, 'None']:
return
r.append((name, (v1, v2)))
return
elif callable(attribute):
v = attribute()
else:
v = getattr(self, attribute)
if v not in [None, 'None']:
r.append((name, v))
except Exception as e:
self.error(f'Device.as_tuples: {e}')
import traceback
self.debug(traceback.format_exc())
try:
r.append(('Location', (self.get_location(), self.get_location())))
except Exception:
pass
try:
append('URL base', self.get_urlbase)
except Exception:
pass
try:
r.append(('UDN', self.get_id()))
except Exception:
pass
try:
r.append(('Type', self.device_type))
except Exception:
pass
try:
r.append(('UPnP Version', self.upnp_version))
except Exception:
pass
try:
r.append(('DLNA Device Class', ','.join(self.dlna_dc)))
except Exception:
pass
try:
r.append(('DLNA Device Capability', ','.join(self.dlna_cap)))
except Exception:
pass
try:
r.append(('Friendly Name', self.friendly_name))
except Exception:
pass
try:
append('Manufacturer', 'manufacturer')
except Exception:
pass
try:
append(
'Manufacturer URL', ('manufacturer_url', 'manufacturer_url')
)
except Exception:
pass
try:
append('Model Description', 'model_description')
except Exception:
pass
try:
append('Model Name', 'model_name')
except Exception:
pass
try:
append('Model Number', 'model_number')
except Exception:
pass
try:
append('Model URL', ('model_url', 'model_url'))
except Exception:
pass
try:
append('Serial Number', 'serial_number')
except Exception:
pass
try:
append('UPC', 'upc')
except Exception:
pass
try:
append(
'Presentation URL',
(
'presentation_url',
lambda: self.make_fullyqualified(
getattr(self, 'presentation_url')
),
),
)
except Exception:
pass
for icon in self.icons:
r.append(
(
'Icon',
(
icon['realurl'],
self.make_fullyqualified(icon['realurl']),
{
'Mimetype': icon['mimetype'],
'Width': icon['width'],
'Height': icon['height'],
'Depth': icon['depth'],
},
),
)
)
return r
class RootDevice(Device):
'''
Description for a root device.
.. versionchanged:: 0.9.0
* Migrated from louie/dispatcher to EventDispatcher
* The emitted events changed:
- Coherence.UPnP.RootDevice.detection_completed =>
root_device_detection_completed
- Coherence.UPnP.RootDevice.removed => root_device_removed
'''
root_detection_completed = Property(False)
'''
To know whenever the root device detection has completed. Defaults to
`False` and it will be set automatically to `True` by the class method
:meth:`device_detect`.
'''
def __init__(self, infos):
self.usn = infos['USN']
self.udn = infos.get('UDN', '')
self.server = infos['SERVER']
self.st = infos['ST']
self.location = infos['LOCATION']
self.manifestation = infos['MANIFESTATION']
self.host = infos['HOST']
Device.__init__(self, None)
self.register_event(
'root_device_detection_completed', 'root_device_removed'
)
self.bind(detection_completed=self.device_detect)
# we need to handle root device completion
# these events could be our self or our children.
self.parse_description()
self.debug(f'RootDevice initialized: {self.location}')
def __repr__(self):
return (
f'rootdevice {self.friendly_name} {self.udn} {self.st} '
f'{self.host}, manifestation {self.manifestation}'
)
def remove(self, *args):
result = Device.remove(self, *args)
self.dispatch_event('root_device_removed', self, usn=self.get_usn())
return result
def get_usn(self):
return self.usn
def get_st(self):
return self.st
def get_location(self):
return (
self.location
if isinstance(self.location, bytes)
else self.location.encode('ascii')
if self.location
else None
)
def get_upnp_version(self):
return self.upnp_version
def get_urlbase(self):
return (
self.urlbase
if isinstance(self.urlbase, bytes)
else self.urlbase.encode('ascii')
if self.urlbase
else None
)
def get_host(self):
return self.host
def is_local(self):
if self.manifestation == 'local':
return True
return False
def is_remote(self):
if self.manifestation != 'local':
return True
return False
def device_detect(self, *args, **kwargs):
'''
This method is automatically triggered whenever the property of the
base class :attr:`Device.detection_completed` is set to `True`. Here we
perform some more operations, before the :class:`RootDevice` emits
an event notifying that the root device detection has completed.
'''
self.debug(f'device_detect {kwargs}')
self.debug(f'root_detection_completed {self.root_detection_completed}')
if self.root_detection_completed:
return
# our self is not complete yet
self.debug(f'detection_completed {self.detection_completed}')
if not self.detection_completed:
return
# now check child devices.
self.debug(f'self.devices {self.devices}')
for d in self.devices:
self.debug(f'check device {d.detection_completed} {d}')
if not d.detection_completed:
return
# now must be done, so notify root done
self.root_detection_completed = True
self.info(
f'rootdevice {self.friendly_name} {self.st} {self.host} '
+ f'initialized, manifestation {self.manifestation}'
)
self.dispatch_event('root_device_detection_completed', self)
def add_device(self, device):
self.debug(f'RootDevice add_device {device}')
self.devices.append(device)
def get_devices(self):
self.debug(f'RootDevice get_devices: {self.devices}')
return self.devices
def parse_description(self):
def gotPage(x):
self.debug(f'got device description from {self.location}')
self.debug(f'data is {x}')
data, headers = x
xml_data = None
try:
xml_data = etree.fromstring(data)
except Exception:
self.warning(
f'Invalid device description received from {self.location}'
)
import traceback
self.debug(traceback.format_exc())
if xml_data is not None:
tree = xml_data
major = tree.findtext(f'./{{{ns}}}specVersion/{{{ns}}}major')
minor = tree.findtext(f'./{{{ns}}}specVersion/{{{ns}}}minor')
try:
self.upnp_version = '.'.join((major, minor))
except Exception:
self.upnp_version = 'n/a'
try:
self.urlbase = tree.findtext(f'./{{{ns}}}URLBase')
except Exception:
import traceback
self.debug(traceback.format_exc())
d = tree.find(f'./{{{ns}}}device')
if d is not None:
self.parse_device(d) # root device
self.debug(f'device parsed successfully {self.location}')
def gotError(failure, url):
self.warning(f'error getting device description from {url}')
self.info(failure)
try:
utils.getPage(self.location).addCallbacks(
gotPage, gotError, None, None, [self.location], None
)
except Exception as e:
self.error(f'Error on parsing device description: {e}')
def make_fullyqualified(self, url):
'''Be aware that this function returns a byte string'''
self.info(f'make_fullyqualified: {url} [{type(url)}]')
if isinstance(url, str):
url = url.encode('ascii')
if url.startswith(b'http://'):
return url
from urllib.parse import urljoin
base = self.get_urlbase()
if isinstance(base, str):
base = base.encode('ascii')
if base is not None:
if base[-1] != b'/':
base += b'/'
r = urljoin(base, url)
else:
loc = self.get_location()
if isinstance(loc, str):
loc = loc.encode('ascii')
r = urljoin(loc, url)
return r
| 33.057108 | 101 | 0.532309 | 26,407 | 0.970631 | 0 | 0 | 0 | 0 | 0 | 0 | 8,311 | 0.305484 |
0f1f722d095e171acc248a140ec608fb9b0bb1d0 | 1,063 | py | Python | imdb_dataloader.py | garyCC227/cs9444 | 078d88ea0cb15f511d74c59effd12e361f1aed4e | [
"MIT"
] | null | null | null | imdb_dataloader.py | garyCC227/cs9444 | 078d88ea0cb15f511d74c59effd12e361f1aed4e | [
"MIT"
] | null | null | null | imdb_dataloader.py | garyCC227/cs9444 | 078d88ea0cb15f511d74c59effd12e361f1aed4e | [
"MIT"
] | null | null | null | """
DO NOT MODIFY
Dataloder for parts 2 and 3
We will also call this file when loading test data
"""
import os
import glob
import io
from torchtext import data
class IMDB(data.Dataset):
name = 'imdb'
dirname = 'aclImdb'
def __init__(self, path, text_field, label_field, **kwargs):
fields = [('text', text_field), ('label', label_field)]
examples = []
for label in ['pos', 'neg']:
for fname in glob.iglob(os.path.join(path, label, '*.txt')):
with io.open(fname, 'r', encoding="utf-8") as f:
text = f.readline()
examples.append(data.Example.fromlist([text, label], fields))
super(IMDB, self).__init__(examples, fields, **kwargs)
@classmethod
def splits(cls, text_field, label_field, root='data',
train=None, test=None, validation=None, **kwargs):
return super(IMDB, cls).splits(
root=root, text_field=text_field, label_field=label_field,
train=train, validation=validation, test=test, **kwargs)
| 30.371429 | 77 | 0.613358 | 899 | 0.84572 | 0 | 0 | 316 | 0.297272 | 0 | 0 | 161 | 0.151458 |
0f1fdc9583dd36bad5225aaff9eca13e2d53bb4b | 825 | py | Python | nfl/migrations/0007_player_position.py | rwflick/djangoXNFLDemo | 825072b25b9b33eba9687d7ec358d59c7706a16f | [
"MIT"
] | 1 | 2020-09-14T16:43:33.000Z | 2020-09-14T16:43:33.000Z | nfl/migrations/0007_player_position.py | rwflick/djangoXNFLDemo | 825072b25b9b33eba9687d7ec358d59c7706a16f | [
"MIT"
] | null | null | null | nfl/migrations/0007_player_position.py | rwflick/djangoXNFLDemo | 825072b25b9b33eba9687d7ec358d59c7706a16f | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-09-13 19:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nfl', '0006_player'),
]
operations = [
migrations.AddField(
model_name='player',
name='position',
field=models.CharField(choices=[('QB', 'Quarterback'), ('RB', 'Running Back'), ('FB', 'Fullback'), ('WR', 'Wide Receiver'), ('TE', 'Tight End'), ('C', 'Center'), ('OT', 'Offensive Tackle'), ('OG', 'Offensive Guard'), ('DE', 'Defensive End'), ('DT', 'Defensive Tackle'), ('LB', 'Line Backer'), ('DB', 'Defensive Back'), ('CB', 'Cornerback'), ('S', 'Safety'), ('K', 'Kicker'), ('P', 'Punter'), ('LS', 'Long Snapper'), ('KR', 'Kick Returner'), ('PR', 'Punt Returner')], max_length=25, null=True),
),
]
| 43.421053 | 505 | 0.56 | 732 | 0.887273 | 0 | 0 | 0 | 0 | 0 | 0 | 403 | 0.488485 |
0f21ea6cca6377a0bd8bcf855a84161050071410 | 5,954 | py | Python | src/commercetools/services/shopping_lists.py | jeroenubbink/commercetools-python-sdk | ee27768d6fdde3e12618059891d1d4f75dd61390 | [
"MIT"
] | null | null | null | src/commercetools/services/shopping_lists.py | jeroenubbink/commercetools-python-sdk | ee27768d6fdde3e12618059891d1d4f75dd61390 | [
"MIT"
] | null | null | null | src/commercetools/services/shopping_lists.py | jeroenubbink/commercetools-python-sdk | ee27768d6fdde3e12618059891d1d4f75dd61390 | [
"MIT"
] | null | null | null | # DO NOT EDIT! This file is automatically generated
import typing
from commercetools._schemas._shopping_list import (
ShoppingListDraftSchema,
ShoppingListPagedQueryResponseSchema,
ShoppingListSchema,
ShoppingListUpdateSchema,
)
from commercetools.helpers import RemoveEmptyValuesMixin
from commercetools.types._shopping_list import (
ShoppingList,
ShoppingListDraft,
ShoppingListPagedQueryResponse,
ShoppingListUpdate,
ShoppingListUpdateAction,
)
from commercetools.typing import OptionalListStr
from . import abstract, traits
class _ShoppingListQuerySchema(
traits.ExpandableSchema,
traits.SortableSchema,
traits.PagingSchema,
traits.QuerySchema,
):
pass
class _ShoppingListUpdateSchema(traits.ExpandableSchema, traits.VersionedSchema):
pass
class _ShoppingListDeleteSchema(
traits.VersionedSchema, traits.ExpandableSchema, traits.DataErasureSchema
):
pass
class ShoppingListService(abstract.AbstractService):
"""shopping-lists e.
g. for wishlist support
"""
def get_by_id(self, id: str, *, expand: OptionalListStr = None) -> ShoppingList:
"""Gets a shopping list by ID."""
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._get(
endpoint=f"shopping-lists/{id}",
params=params,
schema_cls=ShoppingListSchema,
)
def get_by_key(self, key: str, *, expand: OptionalListStr = None) -> ShoppingList:
"""Gets a shopping list by Key."""
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._get(
endpoint=f"shopping-lists/key={key}",
params=params,
schema_cls=ShoppingListSchema,
)
def query(
self,
*,
expand: OptionalListStr = None,
sort: OptionalListStr = None,
limit: int = None,
offset: int = None,
with_total: bool = None,
where: OptionalListStr = None,
predicate_var: typing.Dict[str, str] = None,
) -> ShoppingListPagedQueryResponse:
"""shopping-lists e.g. for wishlist support
"""
params = self._serialize_params(
{
"expand": expand,
"sort": sort,
"limit": limit,
"offset": offset,
"withTotal": with_total,
"where": where,
"predicate_var": predicate_var,
},
_ShoppingListQuerySchema,
)
return self._client._get(
endpoint="shopping-lists",
params=params,
schema_cls=ShoppingListPagedQueryResponseSchema,
)
def create(
self, draft: ShoppingListDraft, *, expand: OptionalListStr = None
) -> ShoppingList:
"""shopping-lists e.g. for wishlist support
"""
params = self._serialize_params({"expand": expand}, traits.ExpandableSchema)
return self._client._post(
endpoint="shopping-lists",
params=params,
data_object=draft,
request_schema_cls=ShoppingListDraftSchema,
response_schema_cls=ShoppingListSchema,
)
def update_by_id(
self,
id: str,
version: int,
actions: typing.List[ShoppingListUpdateAction],
*,
expand: OptionalListStr = None,
force_update: bool = False,
) -> ShoppingList:
params = self._serialize_params({"expand": expand}, _ShoppingListUpdateSchema)
update_action = ShoppingListUpdate(version=version, actions=actions)
return self._client._post(
endpoint=f"shopping-lists/{id}",
params=params,
data_object=update_action,
request_schema_cls=ShoppingListUpdateSchema,
response_schema_cls=ShoppingListSchema,
force_update=force_update,
)
def update_by_key(
self,
key: str,
version: int,
actions: typing.List[ShoppingListUpdateAction],
*,
expand: OptionalListStr = None,
force_update: bool = False,
) -> ShoppingList:
"""Update a shopping list found by its Key."""
params = self._serialize_params({"expand": expand}, _ShoppingListUpdateSchema)
update_action = ShoppingListUpdate(version=version, actions=actions)
return self._client._post(
endpoint=f"shopping-lists/key={key}",
params=params,
data_object=update_action,
request_schema_cls=ShoppingListUpdateSchema,
response_schema_cls=ShoppingListSchema,
force_update=force_update,
)
def delete_by_id(
self,
id: str,
version: int,
*,
expand: OptionalListStr = None,
data_erasure: bool = None,
force_delete: bool = False,
) -> ShoppingList:
params = self._serialize_params(
{"version": version, "expand": expand, "dataErasure": data_erasure},
_ShoppingListDeleteSchema,
)
return self._client._delete(
endpoint=f"shopping-lists/{id}",
params=params,
response_schema_cls=ShoppingListSchema,
force_delete=force_delete,
)
def delete_by_key(
self,
key: str,
version: int,
*,
expand: OptionalListStr = None,
data_erasure: bool = None,
force_delete: bool = False,
) -> ShoppingList:
params = self._serialize_params(
{"version": version, "expand": expand, "dataErasure": data_erasure},
_ShoppingListDeleteSchema,
)
return self._client._delete(
endpoint=f"shopping-lists/key={key}",
params=params,
response_schema_cls=ShoppingListSchema,
force_delete=force_delete,
)
| 31.172775 | 86 | 0.615721 | 5,376 | 0.902922 | 0 | 0 | 0 | 0 | 0 | 0 | 672 | 0.112865 |
0f23e92f67f8a8aa91e1f9b314ed49fa3fdd3f82 | 3,591 | py | Python | ui/tools.py | liyao001/BioQueue | 2b2c9f023b988fd926a037eb4755f639632b2991 | [
"Apache-2.0"
] | 33 | 2017-03-12T16:26:45.000Z | 2021-04-30T05:37:35.000Z | ui/tools.py | liyao001/BioQueue | 2b2c9f023b988fd926a037eb4755f639632b2991 | [
"Apache-2.0"
] | 6 | 2017-04-21T08:44:47.000Z | 2018-11-11T16:20:22.000Z | ui/tools.py | liyao001/BioQueue | 2b2c9f023b988fd926a037eb4755f639632b2991 | [
"Apache-2.0"
] | 13 | 2017-03-12T16:26:56.000Z | 2020-04-20T05:35:00.000Z | from django.http import JsonResponse, StreamingHttpResponse
from worker.bases import get_config, rand_sig, get_user_folder_size
from django.core.paginator import EmptyPage, PageNotAnInteger
import os
def build_json_protocol(protocol):
import json
"""
response = StreamingHttpResponse(json.dumps(protocol))
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="{0}"'.format(protocol['name']+'.txt')
return response
"""
return json.dumps(protocol)
def build_json_reference(ref):
result = list()
for value in ref:
result.append('{'+value+'}')
return JsonResponse(','.join(result), safe=False)
def check_user_existence(username):
from django.contrib.auth.models import User
try:
u = User.objects.get(username=username)
return u.id
except Exception as e:
return 0
def check_disk_quota_lock(user):
disk_limit = get_config('env', 'disk_quota')
if disk_limit:
if get_user_folder_size(user) < int(disk_limit):
return 1
else:
return 0
else:
return 1
def delete_file(file_path):
import os
try:
if os.path.exists(file_path):
os.remove(file_path)
return success('Deleted')
else:
return error('File can not be found.')
except Exception as e:
return error(e)
def get_disk_quota_info(user):
try:
disk_pool = int(get_config('env', 'disk_quota'))
disk_used = get_user_folder_size(user)
disk_perc = int(round(disk_used / disk_pool * 100))
except Exception as e:
print(e)
disk_pool = disk_used = disk_perc = 0
return disk_pool, disk_used, disk_perc
def get_maintenance_protocols():
"""
Get maintenance protocols
:return: list, module names
"""
protocols = []
protocols_path = os.path.join(os.path.split(os.path.realpath(__file__))[0], 'maintenance_protocols')
for model_name in os.listdir(protocols_path):
if not model_name.endswith('.py') or model_name.startswith('_') or model_name.startswith('maintenance'):
continue
protocols.append(model_name.replace('.py', ''))
return protocols
def error(message, jump_url='.', msg_title="error", status=0, wait_second=3):
json_data = dict()
json_data['msg_title'] = msg_title
json_data['info'] = str(message)
json_data['url'] = jump_url
json_data['status'] = status
json_data['wait_second'] = wait_second
return JsonResponse(json_data)
def handle_uploaded_file(f):
import os
file_name = os.path.join(get_config('env', 'batch_job'), rand_sig()+'.txt')
with open(file_name, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
return file_name
def os_to_int():
import platform
if platform.system() == 'Linux':
return 1
elif platform.system() == 'Darwin':
return 3
else:
return 2
def page_info(page_model, page):
try:
items = page_model.page(page)
except PageNotAnInteger:
items = page_model.page(1)
except EmptyPage:
items = page_model.page(page_model.num_pages)
return items
def success(message, jump_url='.', msg_title="success", status=1, wait_second=1):
json_data = dict()
json_data['msg_title'] = msg_title
json_data['info'] = message
json_data['url'] = jump_url
json_data['status'] = status
json_data['wait_second'] = wait_second
return JsonResponse(json_data)
| 27.623077 | 112 | 0.656085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 596 | 0.16597 |
0f255e895daccb92929c3b545a2a06d8d36e7091 | 18,246 | py | Python | data/data-pipeline/data_pipeline/etl/sources/census_decennial/etl.py | usds/justice40-tool | 87e08f5fe1d36b50b176a102c050343cfca199ba | [
"CC0-1.0"
] | 59 | 2021-05-10T21:43:36.000Z | 2022-03-30T17:57:17.000Z | data/data-pipeline/data_pipeline/etl/sources/census_decennial/etl.py | usds/justice40-tool | 87e08f5fe1d36b50b176a102c050343cfca199ba | [
"CC0-1.0"
] | 1,259 | 2021-05-10T18:21:26.000Z | 2022-03-31T21:35:49.000Z | data/data-pipeline/data_pipeline/etl/sources/census_decennial/etl.py | usds/justice40-tool | 87e08f5fe1d36b50b176a102c050343cfca199ba | [
"CC0-1.0"
] | 24 | 2021-05-15T00:58:39.000Z | 2022-03-24T23:18:17.000Z | import json
import requests
import numpy as np
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.utils import get_module_logger
from data_pipeline.score import field_names
pd.options.mode.chained_assignment = "raise"
logger = get_module_logger(__name__)
class CensusDecennialETL(ExtractTransformLoad):
def __init__(self):
self.DECENNIAL_YEAR = 2010
self.OUTPUT_PATH = (
self.DATA_PATH
/ "dataset"
/ f"census_decennial_{self.DECENNIAL_YEAR}"
)
# Income Fields
# AS, GU, and MP all share the same variable names, but VI is different
# https://api.census.gov/data/2010/dec/as.html
# https://api.census.gov/data/2010/dec/gu/variables.html
# https://api.census.gov/data/2010/dec/mp/variables.html
# https://api.census.gov/data/2010/dec/vi/variables.html
# Total population field is the same in all island areas
self.TOTAL_POP_FIELD = self.TOTAL_POP_VI_FIELD = "P001001"
self.TOTAL_POP_FIELD_NAME = "Total population in 2009"
self.MEDIAN_INCOME_FIELD = "PBG049001"
self.MEDIAN_INCOME_VI_FIELD = "PBG047001"
self.MEDIAN_INCOME_FIELD_NAME = "Median household income in 2009 ($)"
self.AREA_MEDIAN_INCOME_FIELD_NAME = (
"Median household income as a percent of "
"territory median income in 2009"
)
self.TERRITORY_MEDIAN_INCOME_FIELD = "Territory Median Income"
self.TOTAL_HOUSEHOLD_RATIO_INCOME_TO_POVERTY_LEVEL_FIELD = "PBG083001"
self.TOTAL_HOUSEHOLD_RATIO_INCOME_TO_POVERTY_LEVEL_VI_FIELD = (
"PBG077001"
)
self.TOTAL_HOUSEHOLD_RATIO_INCOME_TO_POVERTY_LEVEL_FIELD_NAME = (
"TOTAL; RATIO OF INCOME TO POVERTY LEVEL IN 2009"
)
self.HOUSEHOLD_OVER_200_PERC_POVERTY_LEVEL_FIELD = "PBG083010"
self.HOUSEHOLD_OVER_200_PERC_POVERTY_LEVEL_VI_FIELD = "PBG077010"
self.HOUSEHOLD_OVER_200_PERC_POVERTY_LEVEL_FIELD_NAME = (
"Total!!2.00 and over; RATIO OF INCOME TO POVERTY LEVEL IN 2009"
)
self.PERCENTAGE_HOUSEHOLDS_BELOW_200_PERC_POVERTY_LEVEL_FIELD_NAME = (
"Percentage households below 200% of federal poverty line in 2009"
)
# We will combine three fields to get households < 100% FPL.
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_PART_ONE = (
"PBG083002" # Total!!Under .50
)
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_PART_TWO = (
"PBG083003" # Total!!.50 to .74
)
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_PART_THREE = (
"PBG083004" # Total!!.75 to .99
)
# Same fields, for Virgin Islands.
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_VI_PART_ONE = (
"PBG077002" # Total!!Under .50
)
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_VI_PART_TWO = (
"PBG077003" # Total!!.50 to .74
)
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_VI_PART_THREE = (
"PBG077004" # Total!!.75 to .99
)
self.HOUSEHOLD_OVER_200_PERC_POVERTY_LEVEL_FIELD = "PBG083010"
self.HOUSEHOLD_OVER_200_PERC_POVERTY_LEVEL_VI_FIELD = "PBG077010"
self.HOUSEHOLD_OVER_200_PERC_POVERTY_LEVEL_FIELD_NAME = (
"Total!!2.00 and over; RATIO OF INCOME TO POVERTY LEVEL IN 2009"
)
self.PERCENTAGE_HOUSEHOLDS_BELOW_100_PERC_POVERTY_LEVEL_FIELD_NAME = (
"Percentage households below 100% of federal poverty line in 2009"
)
# High School Education Fields
self.TOTAL_POPULATION_FIELD = "PBG026001"
self.TOTAL_POPULATION_VI_FIELD = "PCT032001"
self.TOTAL_POPULATION_FIELD_NAME = "Total; SEX BY EDUCATIONAL ATTAINMENT FOR THE POPULATION 25 YEARS AND OVER"
self.MALE_HIGH_SCHOOL_ED_FIELD = "PBG026005"
self.MALE_HIGH_SCHOOL_ED_VI_FIELD = "PCT032011"
self.MALE_HIGH_SCHOOL_ED_FIELD_NAME = (
"Total!!Male!!High school graduate, GED, or alternative; "
"SEX BY EDUCATIONAL ATTAINMENT FOR THE POPULATION 25 YEARS AND OVER"
)
self.FEMALE_HIGH_SCHOOL_ED_FIELD = "PBG026012"
self.FEMALE_HIGH_SCHOOL_ED_VI_FIELD = "PCT032028"
self.FEMALE_HIGH_SCHOOL_ED_FIELD_NAME = (
"Total!!Female!!High school graduate, GED, or alternative; "
"SEX BY EDUCATIONAL ATTAINMENT FOR THE POPULATION 25 YEARS AND OVER"
)
self.PERCENTAGE_HIGH_SCHOOL_ED_FIELD_NAME = "Percent individuals age 25 or over with less than high school degree in 2009"
# Employment fields
self.EMPLOYMENT_MALE_IN_LABOR_FORCE_FIELD = (
"PBG038003" # Total!!Male!!In labor force
)
self.EMPLOYMENT_MALE_UNEMPLOYED_FIELD = (
"PBG038007" # Total!!Male!!In labor force!!Civilian!!Unemployed
)
self.EMPLOYMENT_FEMALE_IN_LABOR_FORCE_FIELD = (
"PBG038010" # Total!!Female!!In labor force
)
self.EMPLOYMENT_FEMALE_UNEMPLOYED_FIELD = (
"PBG038014" # Total!!Female!!In labor force!!Civilian!!Unemployed
)
# Same fields, Virgin Islands.
self.EMPLOYMENT_MALE_IN_LABOR_FORCE_VI_FIELD = (
"PBG036003" # Total!!Male!!In labor force
)
self.EMPLOYMENT_MALE_UNEMPLOYED_VI_FIELD = (
"PBG036007" # Total!!Male!!In labor force!!Civilian!!Unemployed
)
self.EMPLOYMENT_FEMALE_IN_LABOR_FORCE_VI_FIELD = (
"PBG036010" # Total!!Female!!In labor force
)
self.EMPLOYMENT_FEMALE_UNEMPLOYED_VI_FIELD = (
"PBG036014" # Total!!Female!!In labor force!!Civilian!!Unemployed
)
self.UNEMPLOYMENT_FIELD_NAME = (
field_names.CENSUS_DECENNIAL_UNEMPLOYMENT_FIELD_2009
)
var_list = [
self.MEDIAN_INCOME_FIELD,
self.TOTAL_HOUSEHOLD_RATIO_INCOME_TO_POVERTY_LEVEL_FIELD,
self.HOUSEHOLD_OVER_200_PERC_POVERTY_LEVEL_FIELD,
self.TOTAL_POPULATION_FIELD,
self.MALE_HIGH_SCHOOL_ED_FIELD,
self.FEMALE_HIGH_SCHOOL_ED_FIELD,
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_PART_ONE,
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_PART_TWO,
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_PART_THREE,
self.EMPLOYMENT_MALE_IN_LABOR_FORCE_FIELD,
self.EMPLOYMENT_MALE_UNEMPLOYED_FIELD,
self.EMPLOYMENT_FEMALE_IN_LABOR_FORCE_FIELD,
self.EMPLOYMENT_FEMALE_UNEMPLOYED_FIELD,
self.TOTAL_POP_FIELD,
]
var_list = ",".join(var_list)
var_list_vi = [
self.MEDIAN_INCOME_VI_FIELD,
self.TOTAL_HOUSEHOLD_RATIO_INCOME_TO_POVERTY_LEVEL_VI_FIELD,
self.HOUSEHOLD_OVER_200_PERC_POVERTY_LEVEL_VI_FIELD,
self.TOTAL_POPULATION_VI_FIELD,
self.MALE_HIGH_SCHOOL_ED_VI_FIELD,
self.FEMALE_HIGH_SCHOOL_ED_VI_FIELD,
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_VI_PART_ONE,
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_VI_PART_TWO,
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_VI_PART_THREE,
self.EMPLOYMENT_MALE_IN_LABOR_FORCE_VI_FIELD,
self.EMPLOYMENT_MALE_UNEMPLOYED_VI_FIELD,
self.EMPLOYMENT_FEMALE_IN_LABOR_FORCE_VI_FIELD,
self.EMPLOYMENT_FEMALE_UNEMPLOYED_VI_FIELD,
self.TOTAL_POP_VI_FIELD,
]
var_list_vi = ",".join(var_list_vi)
self.FIELD_NAME_XWALK = {
self.MEDIAN_INCOME_FIELD: self.MEDIAN_INCOME_FIELD_NAME,
self.MEDIAN_INCOME_VI_FIELD: self.MEDIAN_INCOME_FIELD_NAME,
self.TOTAL_HOUSEHOLD_RATIO_INCOME_TO_POVERTY_LEVEL_FIELD: self.TOTAL_HOUSEHOLD_RATIO_INCOME_TO_POVERTY_LEVEL_FIELD_NAME,
self.TOTAL_HOUSEHOLD_RATIO_INCOME_TO_POVERTY_LEVEL_VI_FIELD: self.TOTAL_HOUSEHOLD_RATIO_INCOME_TO_POVERTY_LEVEL_FIELD_NAME,
self.HOUSEHOLD_OVER_200_PERC_POVERTY_LEVEL_FIELD: self.HOUSEHOLD_OVER_200_PERC_POVERTY_LEVEL_FIELD_NAME,
self.HOUSEHOLD_OVER_200_PERC_POVERTY_LEVEL_VI_FIELD: self.HOUSEHOLD_OVER_200_PERC_POVERTY_LEVEL_FIELD_NAME,
self.TOTAL_POPULATION_FIELD: self.TOTAL_POPULATION_FIELD_NAME,
self.TOTAL_POPULATION_VI_FIELD: self.TOTAL_POPULATION_FIELD_NAME,
self.MALE_HIGH_SCHOOL_ED_FIELD: self.MALE_HIGH_SCHOOL_ED_FIELD_NAME,
self.MALE_HIGH_SCHOOL_ED_VI_FIELD: self.MALE_HIGH_SCHOOL_ED_FIELD_NAME,
self.FEMALE_HIGH_SCHOOL_ED_FIELD: self.FEMALE_HIGH_SCHOOL_ED_FIELD_NAME,
self.FEMALE_HIGH_SCHOOL_ED_VI_FIELD: self.FEMALE_HIGH_SCHOOL_ED_FIELD_NAME,
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_PART_ONE: self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_PART_ONE,
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_VI_PART_ONE: self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_PART_ONE,
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_PART_TWO: self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_PART_TWO,
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_VI_PART_TWO: self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_PART_TWO,
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_PART_THREE: self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_PART_THREE,
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_VI_PART_THREE: self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_PART_THREE,
self.EMPLOYMENT_MALE_IN_LABOR_FORCE_VI_FIELD: self.EMPLOYMENT_MALE_IN_LABOR_FORCE_FIELD,
self.EMPLOYMENT_MALE_UNEMPLOYED_VI_FIELD: self.EMPLOYMENT_MALE_UNEMPLOYED_FIELD,
self.EMPLOYMENT_FEMALE_IN_LABOR_FORCE_VI_FIELD: self.EMPLOYMENT_FEMALE_IN_LABOR_FORCE_FIELD,
self.EMPLOYMENT_FEMALE_UNEMPLOYED_VI_FIELD: self.EMPLOYMENT_FEMALE_UNEMPLOYED_FIELD,
self.EMPLOYMENT_MALE_IN_LABOR_FORCE_FIELD: self.EMPLOYMENT_MALE_IN_LABOR_FORCE_FIELD,
self.EMPLOYMENT_MALE_UNEMPLOYED_FIELD: self.EMPLOYMENT_MALE_UNEMPLOYED_FIELD,
self.EMPLOYMENT_FEMALE_IN_LABOR_FORCE_FIELD: self.EMPLOYMENT_FEMALE_IN_LABOR_FORCE_FIELD,
self.EMPLOYMENT_FEMALE_UNEMPLOYED_FIELD: self.EMPLOYMENT_FEMALE_UNEMPLOYED_FIELD,
}
# To do: Ask Census Slack Group about whether you need to hardcode the county fips
# https://uscensusbureau.slack.com/archives/C6DGLC05B/p1635218909012600
self.ISLAND_TERRITORIES = [
{
"state_abbreviation": "as",
"fips": "60",
"county_fips": ["010", "020", "030", "040", "050"],
"var_list": var_list,
# Note: we hardcode the median income for each territory in this dict,
# because that data is hard to programmatically access.
self.TERRITORY_MEDIAN_INCOME_FIELD: 23892,
},
{
"state_abbreviation": "gu",
"fips": "66",
"county_fips": ["010"],
"var_list": var_list,
self.TERRITORY_MEDIAN_INCOME_FIELD: 48274,
},
{
"state_abbreviation": "mp",
"fips": "69",
"county_fips": ["085", "100", "110", "120"],
"var_list": var_list,
self.TERRITORY_MEDIAN_INCOME_FIELD: 19958,
},
{
"state_abbreviation": "vi",
"fips": "78",
"county_fips": ["010", "020", "030"],
"var_list": var_list_vi,
self.TERRITORY_MEDIAN_INCOME_FIELD: 37254,
},
]
self.API_URL = (
"https://api.census.gov/data/{}/dec/{}?get=NAME,{}"
+ "&for=tract:*&in=state:{}%20county:{}"
)
self.df: pd.DataFrame
self.df_vi: pd.DataFrame
self.df_all: pd.DataFrame
def extract(self) -> None:
dfs = []
dfs_vi = []
for island in self.ISLAND_TERRITORIES:
logger.info(
f"Downloading data for state/territory {island['state_abbreviation']}"
)
for county in island["county_fips"]:
download = requests.get(
self.API_URL.format(
self.DECENNIAL_YEAR,
island["state_abbreviation"],
island["var_list"],
island["fips"],
county,
)
)
df = json.loads(download.content)
# First row is the header
df = pd.DataFrame(df[1:], columns=df[0])
for col in island["var_list"].split(","):
# Converting appropriate variables to numeric.
# Also replacing 0s with NaNs
df[col] = pd.to_numeric(df[col])
# TO-DO: CHECK THIS. I think it makes sense to replace 0 with NaN
# because for our variables of interest (e.g. Median Household Income,
# it doesn't make sense for that to be 0.)
# Likely, it's actually missing but can't find a cite for that in the docs
df[col] = df[col].replace(0, np.nan)
if island["state_abbreviation"] == "vi":
dfs_vi.append(df)
else:
dfs.append(df)
self.df = pd.concat(dfs)
self.df_vi = pd.concat(dfs_vi)
def transform(self) -> None:
logger.info("Starting Census Decennial Transform")
# Rename All Fields
self.df.rename(columns=self.FIELD_NAME_XWALK, inplace=True)
self.df_vi.rename(columns=self.FIELD_NAME_XWALK, inplace=True)
# Combine the dfs after renaming
self.df_all = pd.concat([self.df, self.df_vi])
# Rename total population:
self.df_all[self.TOTAL_POP_FIELD_NAME] = self.df_all[
self.TOTAL_POP_FIELD
]
# Percentage of households below 200% which is
# [PBG083001 (total) - PBG083010 (num households over 200%)] / PBG083001 (total)
self.df_all[
self.PERCENTAGE_HOUSEHOLDS_BELOW_200_PERC_POVERTY_LEVEL_FIELD_NAME
] = (
self.df_all[
self.TOTAL_HOUSEHOLD_RATIO_INCOME_TO_POVERTY_LEVEL_FIELD_NAME
]
- self.df_all[self.HOUSEHOLD_OVER_200_PERC_POVERTY_LEVEL_FIELD_NAME]
) / self.df_all[
self.TOTAL_HOUSEHOLD_RATIO_INCOME_TO_POVERTY_LEVEL_FIELD_NAME
]
# Percentage of households below 100% FPL
# which we get by adding `Total!!Under .50`, `Total!!.50 to .74`, ` Total!!.75 to .99`,
# and then dividing by PBG083001 (total)
self.df_all[
self.PERCENTAGE_HOUSEHOLDS_BELOW_100_PERC_POVERTY_LEVEL_FIELD_NAME
] = (
self.df_all[
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_PART_ONE
]
+ self.df_all[
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_PART_TWO
]
+ self.df_all[
self.HOUSEHOLD_UNDER_100_PERC_POVERTY_LEVEL_FIELD_PART_THREE
]
) / self.df_all[
self.TOTAL_HOUSEHOLD_RATIO_INCOME_TO_POVERTY_LEVEL_FIELD_NAME
]
# Percentage High School Achievement is
# Percentage = (Male + Female) / (Total)
self.df_all[self.PERCENTAGE_HIGH_SCHOOL_ED_FIELD_NAME] = (
self.df_all[self.MALE_HIGH_SCHOOL_ED_FIELD_NAME]
+ self.df_all[self.FEMALE_HIGH_SCHOOL_ED_FIELD_NAME]
) / self.df_all[self.TOTAL_POPULATION_FIELD_NAME]
# Calculate employment.
self.df_all[self.UNEMPLOYMENT_FIELD_NAME] = (
self.df_all[self.EMPLOYMENT_MALE_UNEMPLOYED_FIELD]
+ self.df_all[self.EMPLOYMENT_FEMALE_UNEMPLOYED_FIELD]
) / (
self.df_all[self.EMPLOYMENT_MALE_IN_LABOR_FORCE_FIELD]
+ self.df_all[self.EMPLOYMENT_FEMALE_IN_LABOR_FORCE_FIELD]
)
# Calculate area median income
median_income_df = pd.DataFrame(self.ISLAND_TERRITORIES)
median_income_df = median_income_df[
["fips", self.TERRITORY_MEDIAN_INCOME_FIELD]
]
self.df_all = self.df_all.merge(
right=median_income_df, left_on="state", right_on="fips", how="left"
)
self.df_all[self.AREA_MEDIAN_INCOME_FIELD_NAME] = (
self.df_all[self.MEDIAN_INCOME_FIELD_NAME]
/ self.df_all[self.TERRITORY_MEDIAN_INCOME_FIELD]
)
# Creating Geo ID (Census Block Group) Field Name
self.df_all[self.GEOID_TRACT_FIELD_NAME] = (
self.df_all["state"] + self.df_all["county"] + self.df_all["tract"]
)
# Reporting Missing Values
for col in self.df_all.columns:
missing_value_count = self.df_all[col].isnull().sum()
logger.info(
f"There are {missing_value_count} missing values in the field {col} out of a total of {self.df_all.shape[0]} rows"
)
def load(self) -> None:
logger.info("Saving Census Decennial Data")
# mkdir census
self.OUTPUT_PATH.mkdir(parents=True, exist_ok=True)
columns_to_include = [
self.GEOID_TRACT_FIELD_NAME,
self.TOTAL_POP_FIELD_NAME,
self.MEDIAN_INCOME_FIELD_NAME,
self.TERRITORY_MEDIAN_INCOME_FIELD,
self.AREA_MEDIAN_INCOME_FIELD_NAME,
self.PERCENTAGE_HOUSEHOLDS_BELOW_100_PERC_POVERTY_LEVEL_FIELD_NAME,
self.PERCENTAGE_HOUSEHOLDS_BELOW_200_PERC_POVERTY_LEVEL_FIELD_NAME,
self.PERCENTAGE_HIGH_SCHOOL_ED_FIELD_NAME,
self.UNEMPLOYMENT_FIELD_NAME,
]
self.df_all[columns_to_include].to_csv(
path_or_buf=self.OUTPUT_PATH / "usa.csv", index=False
)
def validate(self) -> None:
logger.info("Validating Census Decennial Data")
pass
| 44.179177 | 138 | 0.652527 | 17,940 | 0.983229 | 0 | 0 | 0 | 0 | 0 | 0 | 4,258 | 0.233366 |
0f2612b9ae0c0dbd7fdceda1e64ee15cf69d3dbf | 1,046 | py | Python | xml_tree.py | rcflorestal/scientificComputerPython | 741c605ae987209a7f7d0751879a91940b1a5140 | [
"MIT"
] | null | null | null | xml_tree.py | rcflorestal/scientificComputerPython | 741c605ae987209a7f7d0751879a91940b1a5140 | [
"MIT"
] | null | null | null | xml_tree.py | rcflorestal/scientificComputerPython | 741c605ae987209a7f7d0751879a91940b1a5140 | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as ET
# data = '''
# <person>
# <name>Chuck</name>
# <phone type="intl">
# +1 734 303 4456
# </phone>
# <email hide="yes"/>
# </person>
# '''
data = '''
<person> <!-- Start tag -->
<name>Chuck</name>
<phone type="intl"> <!-- type="intl" is an attribute -->
+1 734 303 4456 <!-- Text content -->
</phone>
<!-- self closing with an attribute -->
<!-- hide="yes" is an attribute -->
<email hide="yes"/>
</person> <!-- end tag -->
'''
tree = ET.fromstring(data)
print('Name: ', tree.find('name').text)
print('Attr: ', tree.find('email').get('hide'), end='\n\n')
## get xml data from a file
xml_file = 'xml_basics.xml'
tree_from_file = ET.parse(xml_file)
root = tree_from_file.getroot()
print(root.tag)
print(root.attrib, end='\n\n')
for child in root:
print(child.tag, child.attrib, end='\n')
print('Name: ', root[0].text) # print('Name: ', root.find('name').text)
| 24.904762 | 73 | 0.526769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 672 | 0.642447 |
0f272c8d3fadc457e5a1a40afe6d595d75f8d4e9 | 2,000 | py | Python | bin/gftools-find-features.py | hyvyys/gftools | 85ef924f9307f290be08b15115805cc5e3287d33 | [
"Apache-2.0"
] | 1 | 2019-01-29T20:47:44.000Z | 2019-01-29T20:47:44.000Z | bin/gftools-find-features.py | hyvyys/gftools | 85ef924f9307f290be08b15115805cc5e3287d33 | [
"Apache-2.0"
] | 1 | 2021-06-25T15:32:21.000Z | 2021-06-25T15:32:21.000Z | bin/gftools-find-features.py | hyvyys/gftools | 85ef924f9307f290be08b15115805cc5e3287d33 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tool to print GPOS and GSUB features supported by font file(s).
"""
from __future__ import print_function
import contextlib
import os
import sys
from fontTools.ttLib import TTFont
from gftools.util import google_fonts as fonts
from absl import app
def ListFeatures(font):
"""List features for specified font. Table assumed structured like GPS/GSUB.
Args:
font: a TTFont.
Returns:
List of 3-tuples of ('GPOS', tag, name) of the features in the font.
"""
results = []
for tbl in ["GPOS", "GSUB"]:
if tbl in font.keys():
results += [
(tbl,
f.FeatureTag,
"lookups: [{}]".format(", ".join(map(str, f.Feature.LookupListIndex)))
) for f in font[tbl].table.FeatureList.FeatureRecord
]
return results
def main(path):
if path.endswith(".ttf"):
font_files = [path]
elif os.path.isdir(path):
font_files = glob(path + "/*.ttf")
for font_file in font_files:
features = []
with TTFont(font_file) as font:
features += ListFeatures(font)
for (table, tag, lookup_name) in features:
print('{:32s} {:4s} {:8s} {:15s}'.format(
os.path.basename(font_file), table, str(tag), lookup_name))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Please include either a path to a ttf or a path to a dir "
"containing ttfs")
else:
main(sys.argv[1])
| 27.777778 | 79 | 0.6775 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,031 | 0.5155 |
0f27eca1b82dcb372dfbca87d3b80c02b19e0650 | 2,739 | py | Python | sockeye/postprocess.py | hec44/DCGCN | a6e9c610d847295829a67337536769d678419ec2 | [
"MIT"
] | 75 | 2019-03-12T04:29:40.000Z | 2022-03-19T14:04:04.000Z | sockeye/postprocess.py | hec44/DCGCN | a6e9c610d847295829a67337536769d678419ec2 | [
"MIT"
] | 2 | 2019-08-23T03:51:17.000Z | 2019-10-10T14:16:37.000Z | sockeye/postprocess.py | hec44/DCGCN | a6e9c610d847295829a67337536769d678419ec2 | [
"MIT"
] | 8 | 2019-08-22T07:35:30.000Z | 2022-03-01T03:53:11.000Z | import sys
map_file = sys.argv[1]
raw_test_file = sys.argv[2]
output_file = sys.argv[3]
date_set = ('year_0_number', 'year_1_number', 'year_2_number', 'year_3_number', 'month_0_number', 'month_0_name', 'month_1_name', 'day_0_number', 'day_1_number')
def replace_date(tok):
if tok == 'year_0_number':
tok = 'year_0'
elif tok == 'year_1_number':
tok = 'year_1'
elif tok == 'year_2_number':
tok = 'year_2'
elif tok == 'year_3_number':
tok = 'year_3'
elif tok == 'month_0_number':
tok = 'month_0'
elif tok == 'month_0_name':
tok = 'month_0'
elif tok == 'month_1_name':
tok = 'month_1'
elif tok == 'day_0_number':
tok = 'day_0'
elif tok == 'day_1_number':
tok = 'day_1'
return tok
mapping_list = list()
with open(map_file) as f:
map_list = f.readlines()
print(len(map_list))
for line in map_list:
line = line.strip()
if line != '{}':
line = line[1:-1]
entity_dict = dict()
if ',' in line:
entity_list = line.split('",')
for entity in entity_list:
entity = entity.split(':')
anon = entity[0].strip()[1:-1]
if entity[1].strip()[-1] == '"':
deanon = entity[1].strip()[1:-1].lower()
else:
deanon = entity[1].strip()[1:].lower()
entity_dict[anon] = deanon
else:
entity = line.split(':')
anon = entity[0].strip()[1:-1]
deanon = entity[1].strip()[1:-1].lower()
entity_dict[anon] = deanon
# print(entity_dict)
mapping_list.append(entity_dict)
else:
mapping_list.append([])
print(len(mapping_list))
with open(raw_test_file) as f:
output_list = f.readlines()
all_sent_list = list()
for index, line in enumerate(output_list):
entities = mapping_list[index]
if not len(entities):
all_sent_list.append(line)
continue
sent_list = line.strip().split(' ')
# print(entities)
new_sent = ''
for tok in sent_list:
if tok in date_set:
tok = replace_date(tok)
print(tok)
if tok in entities.keys():
deanon = entities[tok]
new_sent += deanon + ' '
else:
new_sent += tok + ' '
new_sent += '\n'
# print(new_sent)
all_sent_list.append(new_sent)
with open(output_file, 'w') as out:
for sent in all_sent_list:
out.write(sent)
# print(all_sent_list)
| 29.138298 | 161 | 0.513326 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 451 | 0.164659 |
0f28bd836ea19980a01d0e09e41aaa7563cec959 | 3,134 | py | Python | samples/100_nodes.py | Kuree/pyns | c627d11bbc79b86f868371282165b41652040e57 | [
"MIT"
] | null | null | null | samples/100_nodes.py | Kuree/pyns | c627d11bbc79b86f868371282165b41652040e57 | [
"MIT"
] | null | null | null | samples/100_nodes.py | Kuree/pyns | c627d11bbc79b86f868371282165b41652040e57 | [
"MIT"
] | null | null | null | from pyns.protocols import create_basestation, create_node, ProtocolType
from pyns.engine import Simulator, SimArg, TraceFormatter, TransmissionMedium
from pyns.phy import PHYLayer
import logging
import numpy
import sys
import random
import os
import json
class ConstantSimulator(Simulator):
def __init__(self, total_time, use_seed, num_nodes, protocol_type, log_prefix):
super().__init__(total_time)
self.use_seed = use_seed
self.num_nodes = num_nodes
self.protocol_type = protocol_type
self.log_prefix = log_prefix
def _run(self, env, pr):
if self.use_seed:
seeds = [i for i in range(self.num_nodes + 1)]
numpy.random.seed(0)
random.seed(0)
else:
seeds = [random.randint(0, self.num_nodes * 1000) for i in range(self.num_nodes + 1)]
special_args = {"seed": seeds[0]}
name = self.log_prefix + str(pr)
with open("100.json") as f:
config = json.load(f)
layer = PHYLayer(120, 10000, 1) # 10 KHz bandwidth. won't be used in the simulation
t = TransmissionMedium(env, name, layer=layer)
t.add_logger(name)
bs = create_basestation(self.protocol_type, 0, env, config, special_args)
t.add_device(bs)
nodes = []
for i in range(self.num_nodes):
special_arg = {"total": self.num_nodes, "scheduled_time": i, "seed": seeds[i]}
n = create_node(self.protocol_type, i, env, config, special_arg)
nodes.append(n)
t.add_device(n)
rate = pr * len(nodes)
dummy_payload = "Test"
ADJUST_FACTOR = 4 # this is for DQN adjustment
load = rate if self.protocol_type != 3 else rate / config["N"] * ADJUST_FACTOR
while True:
num_of_trans = numpy.random.poisson(load)
nodes_to_trans = random.sample(nodes, num_of_trans)
for n in nodes_to_trans:
n.send(dummy_payload, int(n.MTU / ADJUST_FACTOR))
sleep_time = numpy.random.uniform(0, 2)
yield env.timeout(sleep_time)
def main():
parser = SimArg("Simulation with various rates and 100 nodes.", remove_num = True)
args = parser.parse_args()
# setting up logger
total_time = args.sim_time
use_seed = args.use_seed
num_nodes = 100
#pr = args.packet_rate
protocol_type = args.type
log_prefix = "rate-"
if args.test:
rates = [1 / num_nodes]
use_seed = True
else:
rates = [0.05 / num_nodes * i for i in range(1, 21)]
sim = ConstantSimulator(total_time, use_seed, num_nodes, protocol_type, log_prefix)
for rate in rates:
name = log_prefix + str(rate)
logger = logging.getLogger(name)
if args.stdout or args.test:
ch = logging.StreamHandler(sys.stdout)
else:
ch = logging.FileHandler(os.path.join("100_log", str(protocol_type) + "-" + name))
ch.setFormatter(TraceFormatter())
ch.setLevel(logging.INFO)
logger.addHandler(ch)
sim.start(rates)
if __name__ == "__main__":
main()
| 32.309278 | 97 | 0.62508 | 1,850 | 0.5903 | 1,540 | 0.491385 | 0 | 0 | 0 | 0 | 249 | 0.079451 |
0f297395918547fd04d388da57afb6142bad95dd | 414 | py | Python | 06_Banner/python/test_banner.py | MartinThoma/basic-computer-games | bcd59488ff57bf7e52e152c6fc5fa964c76d0694 | [
"Unlicense"
] | 1 | 2022-03-24T17:56:31.000Z | 2022-03-24T17:56:31.000Z | 06_Banner/python/test_banner.py | MartinThoma/basic-computer-games | bcd59488ff57bf7e52e152c6fc5fa964c76d0694 | [
"Unlicense"
] | null | null | null | 06_Banner/python/test_banner.py | MartinThoma/basic-computer-games | bcd59488ff57bf7e52e152c6fc5fa964c76d0694 | [
"Unlicense"
] | null | null | null | import io
from banner import print_banner
def test_print_banner(monkeypatch) -> None:
horizontal = "1"
vertical = "1"
centered = "1"
char = "*"
statement = "O" # only capital letters
set_page = "2"
monkeypatch.setattr(
"sys.stdin",
io.StringIO(
f"{horizontal}\n{vertical}\n{centered}\n{char}\n{statement}\n{set_page}"
),
)
print_banner()
| 20.7 | 84 | 0.584541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.297101 |
0f2b5ba04147b3afbb2b735357905e7eac75bc23 | 3,296 | py | Python | yt_handle.py | luceatnobis/yt_handle | f67ddf0f6d312b0af0eda18834a4fe06e8a3002d | [
"Apache-2.0"
] | null | null | null | yt_handle.py | luceatnobis/yt_handle | f67ddf0f6d312b0af0eda18834a4fe06e8a3002d | [
"Apache-2.0"
] | null | null | null | yt_handle.py | luceatnobis/yt_handle | f67ddf0f6d312b0af0eda18834a4fe06e8a3002d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
from __future__ import print_function
import os
import sys
import shutil
import httplib2
import oauth2client
try:
import apiclient as googleapiclient
except ImportError:
import googleapiclient
from oauth2client.file import Storage, Credentials
from oauth2client.client import flow_from_clientsecrets
CS = "client_secrets.json"
CREDS = "credentials.json"
YOUTUBE_DATA_ROOT = '~/.youtube'
YOUTUBE_READ_WRITE_SSL_SCOPE = (
"https://www.googleapis.com/auth/youtube.force-ssl")
def return_handle(id_name):
identity_root = os.path.expanduser(YOUTUBE_DATA_ROOT)
identity_folder = os.path.join(identity_root, id_name)
if not os.path.exists(identity_folder):
n = input("Identity %s is not known; create it? [Y|n] " % id_name)
if not n or n.lower().startswith('y'):
create_identity(id_name)
else:
sys.exit()
identity = _retrieve_files(identity_folder)
c = Credentials().new_from_json(identity['credentials'])
handle = c.authorize(http=httplib2.Http())
return googleapiclient.discovery.build(
"youtube", "v3", http=handle)
def create_identity(id_name, cs_location=None):
if cs_location is None:
n = input("Please specify the location of the client_secrets file: ")
cs_location = os.path.abspath(os.path.expanduser(n))
if os.path.isdir(cs_location):
cs_location = os.path.join(cs_location, CS)
identity_root = os.path.expanduser(YOUTUBE_DATA_ROOT)
identity_folder = os.path.join(identity_root, id_name)
if os.path.exists(identity_folder):
return
id_cs_location = os.path.join(identity_root, id_name, CS)
id_cred_location = os.path.join(identity_root, id_name, CREDS)
storage = Storage(id_cred_location)
credentials = storage.get()
if credentials and not credentials.invalid:
return credentials # credentials exist
flow = flow_from_clientsecrets(
cs_location, scope=YOUTUBE_READ_WRITE_SSL_SCOPE)
flow.redirect_uri = oauth2client.client.OOB_CALLBACK_URN
authorize_url = flow.step1_get_authorize_url()
code = _console_auth(authorize_url)
if code:
credential = flow.step2_exchange(code, http=None)
os.makedirs(identity_folder)
storage.put(credential)
credential.set_store(storage)
shutil.copyfile(cs_location, id_cs_location)
return credential
else:
print("Invalid input, exiting", file=sys.stderr)
sys.exit()
def _console_auth(authorize_url):
"""Show authorization URL and return the code the user wrote."""
message = "Check this link in your browser: {0}".format(authorize_url)
sys.stderr.write(message + "\n")
try:
input = raw_input # For Python2 compatability
except NameError:
# For Python3 on Windows compatability
try:
from builtins import input as input
except ImportError:
pass
return input("Enter verification code: ")
def _retrieve_files(folder):
cs_f = os.path.join(folder, CS)
creds_f = os.path.join(folder, CREDS)
with open(cs_f) as sec, open(creds_f) as cred:
secrets = sec.read()
credentials = cred.read()
return dict(secrets=secrets, credentials=credentials)
| 29.693694 | 77 | 0.698726 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 497 | 0.150789 |
0f2b82918b30990906c8cf384315c56b06dde564 | 80 | py | Python | release/stubs.min/Tekla/Structures/ModelInternal_parts/AreWeUnitTesting.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/Tekla/Structures/ModelInternal_parts/AreWeUnitTesting.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/Tekla/Structures/ModelInternal_parts/AreWeUnitTesting.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | class AreWeUnitTesting(object):
# no doc
Value = False
__all__ = []
| 16 | 31 | 0.6125 | 79 | 0.9875 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.1 |
0f2bfcc1f4edd32f202d82cac639a588dbfdc47f | 1,001 | py | Python | 1108.defanging-an-ip-address.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | 1108.defanging-an-ip-address.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | 1108.defanging-an-ip-address.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | # coding=utf-8
#
# @lc app=leetcode id=1108 lang=python
#
# [1108] Defanging an IP Address
#
# https://leetcode.com/problems/defanging-an-ip-address/description/
#
# algorithms
# Easy (85.21%)
# Likes: 66
# Dislikes: 256
# Total Accepted: 36.7K
# Total Submissions: 43.1K
# Testcase Example: '"1.1.1.1"'
#
# Given a valid (IPv4) IP address, return a defanged version of that IP
# address.
#
# A defanged IP address replaces every period "." with "[.]".
#
#
# Example 1:
# Input: address = "1.1.1.1"
# Output: "1[.]1[.]1[.]1"
# Example 2:
# Input: address = "255.100.50.0"
# Output: "255[.]100[.]50[.]0"
#
#
# Constraints:
#
#
# The given address is a valid IPv4 address.
#
#
class Solution(object):
def defangIPaddr(self, address):
"""
:type address: str
:rtype: str
"""
result = ""
for add in address:
if add == ".":
result += "[.]"
else:
result += add
return result
| 19.25 | 71 | 0.558442 | 308 | 0.307079 | 0 | 0 | 0 | 0 | 0 | 0 | 728 | 0.725823 |
0f2caf81081a11f464cc6de29247f66e01a2f10e | 2,324 | py | Python | Plumet/scoring.py | mehmeterenballi/Plumet | 2f81cd8cb7f50432ff0b4d46c43aa8ebc5e91a2c | [
"MIT"
] | null | null | null | Plumet/scoring.py | mehmeterenballi/Plumet | 2f81cd8cb7f50432ff0b4d46c43aa8ebc5e91a2c | [
"MIT"
] | null | null | null | Plumet/scoring.py | mehmeterenballi/Plumet | 2f81cd8cb7f50432ff0b4d46c43aa8ebc5e91a2c | [
"MIT"
] | null | null | null | import pygame as pg
def score_blitting(win, score):
screen_width, screen_height = 288, 512
score_image = [pg.image.load('%d.png' % decimal) for decimal in range(0, 10)]
if 10 > score >= 0:
win.blit(score_image[score], (screen_width / 2, 0))
elif 100 > score >= 10:
units_digit = score % 10
tens_digit = int((score - units_digit) / 10)
win.blit(score_image[tens_digit], (screen_width / 2 - 10, 0))
win.blit(score_image[units_digit], (screen_width / 2 + 10, 0))
elif 1000 > score >= 100:
units_digit = score % 10
tens_digit = int(((score - units_digit) % 100) / 10)
hundreds_digit = int((score - tens_digit * 10 - units_digit) / 100)
win.blit(score_image[hundreds_digit], (screen_width / 2 - 36, 0))
win.blit(score_image[tens_digit], (screen_width / 2 - 12, 0))
win.blit(score_image[units_digit], (screen_width / 2 + 12, 0))
elif 10000 > score >= 1000:
units_digit = score % 10
tens_digit = int(((score % 100) - units_digit) / 10)
hundreds_digit = int(((score - (score % 100)) % 1000) / 100)
thousands_digit = int((score - (score % 1000)) / 1000)
win.blit(score_image[thousands_digit], (screen_width / 2 - 48, 0))
win.blit(score_image[hundreds_digit], (screen_width / 2 - 24, 0))
win.blit(score_image[tens_digit], (screen_width / 2, 0))
win.blit(score_image[units_digit], (screen_width / 2 + 24, 0))
elif 100000 > score >= 10000:
units_digit = score % 10
tens_digit = int(((score % 100) - units_digit) / 10)
hundreds_digit = int(((score - tens_digit * 10 - units_digit) % 1000) / 100)
thousands_digit = int(((score - hundreds_digit * 100 - tens_digit * 10 - units_digit) % 10000) / 1000)
ten_thousands_digit = int((score - (score % 10000)) / 10000)
win.blit(score_image[hundreds_digit], (screen_width / 2 - 12, 0))
win.blit(score_image[tens_digit], (screen_width / 2 + 12, 0))
win.blit(score_image[units_digit], (screen_width / 2 + 36, 0))
win.blit(score_image[thousands_digit], (screen_width / 2 - 36, 0))
win.blit(score_image[ten_thousands_digit], (screen_width / 2 - 60, 0))
else:
print("Game Completed")
| 52.818182 | 111 | 0.603701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.010327 |
0f2cb2735a3c9b7ea7019b251686b0875798e3d0 | 64 | py | Python | flattenator/__init__.py | lsst-sqre/flattenator | 297c92a1b075653ca37c200c0ddc8d46d1b64975 | [
"MIT"
] | null | null | null | flattenator/__init__.py | lsst-sqre/flattenator | 297c92a1b075653ca37c200c0ddc8d46d1b64975 | [
"MIT"
] | null | null | null | flattenator/__init__.py | lsst-sqre/flattenator | 297c92a1b075653ca37c200c0ddc8d46d1b64975 | [
"MIT"
] | null | null | null | from .flattenator import Flattenator
__all__ = ["Flattenator"]
| 16 | 36 | 0.78125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.203125 |
0f2faddde2571ad100ca7bd92dfedb14e2aab42d | 375 | py | Python | examples/bbox.py | mzaglia/stac.py | 39314add494b5ab1bf11c44dcb69eba4614144db | [
"MIT"
] | null | null | null | examples/bbox.py | mzaglia/stac.py | 39314add494b5ab1bf11c44dcb69eba4614144db | [
"MIT"
] | null | null | null | examples/bbox.py | mzaglia/stac.py | 39314add494b5ab1bf11c44dcb69eba4614144db | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
#%%
import stac
#%%
s = stac.STAC('http://brazildatacube.dpi.inpe.br/bdc-stac/0.8.1/', True)
#%%
s.catalog
#%%
collection = s.collection('C4_64_16D_MED')
collection
#%%
items = collection.get_items(filter={'bbox':'-56.86523437500001,-15.919073517982413,-53.17382812500001,-13.902075852500483', 'time':'2016-09-13/2019-12-31'})
items
| 19.736842 | 157 | 0.698667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.616 |
0f2fe1eebe8b38c8882944a23ef192e5649f7714 | 1,482 | py | Python | cliboa/common/environment.py | chiru1221/cliboa | 0aad84f237b7c0d8a5ae0cbd27b9d70f97acbee1 | [
"MIT"
] | 27 | 2019-11-11T11:09:47.000Z | 2022-03-01T14:27:59.000Z | cliboa/common/environment.py | chiru1221/cliboa | 0aad84f237b7c0d8a5ae0cbd27b9d70f97acbee1 | [
"MIT"
] | 228 | 2019-11-11T11:04:26.000Z | 2022-03-29T02:16:05.000Z | cliboa/common/environment.py | chiru1221/cliboa | 0aad84f237b7c0d8a5ae0cbd27b9d70f97acbee1 | [
"MIT"
] | 11 | 2019-11-12T03:15:52.000Z | 2022-01-11T05:46:02.000Z | #
# Copyright 2019 BrainPad Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
import os
# Directory name which scenario file is placed
SCENARIO_DIR_NAME = "scenario"
# scenario file name excluding extension
SCENARIO_FILE_NAME = "scenario"
# cliboa project directory path
BASE_DIR = os.getcwd()
# Project directory path. Customization is available
PROJECT_DIR = os.path.join(BASE_DIR, "project")
# Common scenario directory path. Customization is available
COMMON_DIR = os.path.join(BASE_DIR, "common")
# Common scenario directory path. Customization is available
COMMON_SCENARIO_DIR = os.path.join(COMMON_DIR, "scenario")
# the blow paths are appended to sys.path of python
SYSTEM_APPEND_PATHS = [COMMON_SCENARIO_DIR]
# common custom classes to make available
COMMON_CUSTOM_CLASSES = ["sample_step.SampleStep", "sample_step.SampleStepSub"]
# project congenital classes to make available
PROJECT_CUSTOM_CLASSES = []
| 35.285714 | 79 | 0.789474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,148 | 0.774629 |
0f2feac9a2affe9de90859ee84f6a37f704d944b | 58 | py | Python | tests/constants.py | hairygeek/yt_lib | 3de4ba980cd26312e9211930d4424267803fc6df | [
"MIT"
] | 4 | 2019-08-12T12:40:48.000Z | 2020-04-20T20:00:03.000Z | tests/constants.py | hairygeek/yt_lib | 3de4ba980cd26312e9211930d4424267803fc6df | [
"MIT"
] | null | null | null | tests/constants.py | hairygeek/yt_lib | 3de4ba980cd26312e9211930d4424267803fc6df | [
"MIT"
] | null | null | null | CJ_PATH = r''
COOKIES_PATH = r''
CHAN_ID = ''
VID_ID = ''
| 11.6 | 18 | 0.586207 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.172414 |
0f30ef54d1462b0e500b903576cbf4ddc4cd102a | 5,614 | py | Python | self_driving_ai/training.py | kforti/self-driving-ai | 50b3793c16d65ab0d9e0b3a87bd98a0e08587239 | [
"MIT"
] | null | null | null | self_driving_ai/training.py | kforti/self-driving-ai | 50b3793c16d65ab0d9e0b3a87bd98a0e08587239 | [
"MIT"
] | null | null | null | self_driving_ai/training.py | kforti/self-driving-ai | 50b3793c16d65ab0d9e0b3a87bd98a0e08587239 | [
"MIT"
] | null | null | null | import copy
import os
import time
from collections import OrderedDict
from sklearn.model_selection import train_test_split
from torchvision import models
import torch
from torch.utils.tensorboard import SummaryWriter
import pandas as pd
from skimage.io import imread
from self_driving_ai.utils import *
"""
Credit: https://www.kaggle.com/gxkok21/resnet50-with-pytorch
"""
class DrivingDataset(torch.utils.data.Dataset):
"""
This is our custom dataset class which will load the images, perform transforms on them,
and load their corresponding labels.
"""
def __init__(self, img_dir, labels_csv_file=None, transform=None):
self.img_dir = img_dir
if labels_csv_file:
self.labels_df = pd.read_csv(labels_csv_file)
else:
self.images = [os.path.join(img_dir, f) for f in os.listdir(img_dir) if f.endswith(".jpg")]
self.transform = transform
def __getitem__(self, idx):
try:
img_path = self.labels_df.iloc[idx, 0]
except AttributeError:
img_path = self.images[idx]
# print("img_path:", img_path)
img = imread(img_path)
if self.transform:
img = self.transform(img)
sample = {
"image": img,
}
try:
sample["label"] = self.labels_df.iloc[idx, 1]#torch.tensor((self.labels_df.iloc[idx, 1], self.labels_df.iloc[idx, 2]))
sample["id"] = idx#self.labels_df.loc[idx, "id"]
except AttributeError:
#sample["id"] = os.path.basename(self.images[idx]).replace(".tif", "")
pass
return sample
def __len__(self):
try:
return self.labels_df.shape[0]
except AttributeError:
return len(self.images)
if __name__ == '__main__':
# Train
EPOCHS = 20
USE_GPU = True if torch.cuda.is_available() else False
device = torch.device("cuda:0" if USE_GPU else "cpu")
writer = SummaryWriter("runs/self_driving_ai")
IMG_DIR = "../Data/Training_Images"
LABELS_PATH = "../Data/Training_Data"
labels_df = pd.read_csv(LABELS_PATH, header=None)
train_indices, test_indices = train_test_split(labels_df.index - 1, test_size=0.20)
train_dataset = DrivingDataset(IMG_DIR, LABELS_PATH, transform_pipe)
model = models.resnet50(pretrained=True)
# Freeze model weights
# for param in model.parameters():
# param.requires_grad = False
freeze_layes = 6
for i, child in enumerate(model.children()):
if i <= freeze_layes:
for param in child.parameters():
param.requires_grad = False
model.fc = torch.nn.Sequential(
torch.nn.Linear(
in_features=2048,
out_features=1
),
)
model.to(device)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=32,
sampler=torch.utils.data.SubsetRandomSampler(
train_indices
))
test_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=32,
sampler=torch.utils.data.SubsetRandomSampler(
test_indices
))
optimizer = torch.optim.Adam(model.parameters())
criterion = torch.nn.MSELoss()
best_model_wts = copy.deepcopy(model.state_dict())
best_epoch_mse_loss = 0.0
phases = OrderedDict([("train", train_loader), ("test", test_loader)])
start = time.time()
for i in range(EPOCHS):
epoch = i + 1
samples = 0
mse_loss_sum = 0
correct_sum = 0
for phase, loader in phases.items():
for j, batch in enumerate(loader):
X = batch["image"]
labels = batch["label"]
if USE_GPU:
X = X.cuda()
labels = labels.cuda()
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
y = model(X)#.view(1, 3, 224, 224))
loss = criterion(
y,
labels.view(-1, 1).float()#.float()#
)
if phase == "train":
loss.backward()
optimizer.step()
mse_loss_sum += loss.item() * X.shape[0] # We need to multiple by batch size as loss is the mean loss of the samples in the batch
samples += X.shape[0]
# Print batch statistics every 50 batches
if j % 50 == 49 and phase == "train":
print("{}:{} - MSE_loss: {}".format(
i + 1,
j + 1,
float(mse_loss_sum) / float(samples)
))
# Print epoch statistics
epoch_mse_loss = float(mse_loss_sum) / float(samples)
print("epoch: {} - {} MSE_loss:{:.4f}".format(i + 1, phase, epoch_mse_loss))
# Deep copy the model
if phase == "test" and epoch_mse_loss > best_epoch_mse_loss:
writer.add_scalar('training MSE loss', mse_loss_sum / len(train_indices), epoch)
best_epoch_mse_loss = epoch_mse_loss
best_model_wts = copy.deepcopy(model.state_dict())
torch.save(best_model_wts, "resnet50-optimal.pth")
writer.close()
end = time.time()
train_time = end - start
print("Total Training Time: {} seconds".format(train_time))
print("Training Time Per Epoch: {} seconds".format(train_time / EPOCHS))
| 32.264368 | 150 | 0.573388 | 1,416 | 0.252227 | 0 | 0 | 0 | 0 | 0 | 0 | 1,063 | 0.189348 |
0f332ca51a17140c793e8775330bba139c2fa976 | 9,602 | py | Python | py/lvmutil/test/test_census.py | sdss/lvmutil | 1938f6e1d7f4074a90a55570a316886850c5c6af | [
"BSD-3-Clause"
] | null | null | null | py/lvmutil/test/test_census.py | sdss/lvmutil | 1938f6e1d7f4074a90a55570a316886850c5c6af | [
"BSD-3-Clause"
] | null | null | null | py/lvmutil/test/test_census.py | sdss/lvmutil | 1938f6e1d7f4074a90a55570a316886850c5c6af | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""Test lvmutil.census.
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
# The line above will help with 2to3 support.
import unittest
has_mock = True
try:
from unittest.mock import call, patch, Mock
except ImportError:
has_mock = False
has_commonpath = True
try:
from os.path import commonpath
except ImportError:
has_commonpath = False
class TestCensus(unittest.TestCase):
"""Test lvmutil.census.
"""
@classmethod
def setUpClass(cls):
from os.path import dirname, join
cls.data_dir = join(dirname(__file__), 't')
@classmethod
def tearDownClass(cls):
pass
def test_ScannedFile(self):
"""Test simple object storing file data.
"""
from ..census import ScannedFile
f = ScannedFile('foo.txt', 12345, 1973)
self.assertEqual(f.filename, 'foo.txt')
self.assertEqual(f.size, 12345)
self.assertEqual(f.year, 1973)
self.assertFalse(f.islink)
self.assertFalse(f.isexternal)
self.assertIsNone(f.linkname)
self.assertIsNone(f.linksize)
self.assertIsNone(f.linkyear)
def test_get_options(self):
"""Test command-line argument parsing.
"""
from ..census import get_options
options = get_options([])
self.assertFalse(options.verbose)
options = get_options(['--verbose'])
self.assertTrue(options.verbose)
options = get_options(['-c', 'foo.yaml'])
self.assertEqual(options.config, 'foo.yaml')
@unittest.skipUnless(has_mock,
"Skipping test that requires unittest.mock.")
def test_walk_error(self):
"""Test error-handling function for os.walk().
"""
from ..census import walk_error
with patch('lvmutil.log.get_logger') as mock_get_logger:
mock = Mock()
mock_get_logger.return_value = mock
try:
raise OSError(2, 'File not found', 'foo.txt')
except OSError as e:
walk_error(e)
calls = [call.error("[Errno 2] File not found: 'foo.txt'")]
self.assertListEqual(mock.mock_calls, calls)
with patch('lvmutil.log.get_logger') as mock_get_logger:
mock = Mock()
mock_get_logger.return_value = mock
try:
raise OSError(2, 'File not found', 'foo.txt', None, 'bar.txt')
except OSError as e:
walk_error(e)
calls = [call.error("[Errno 2] File not found: 'foo.txt' -> " +
"'bar.txt'")]
self.assertListEqual(mock.mock_calls, calls)
def test_year(self):
"""Test conversion of mtime to year.
"""
from ..census import year
from time import gmtime
mtime = 1475692367.0
self.assertEqual(year(mtime), 2017)
self.assertEqual(year(mtime, fy=False), 2016)
@unittest.skipUnless(has_mock,
"Skipping test that requires unittest.mock.")
def test_scan_file(self):
"""Test analysis of a single file.
"""
from os import stat_result
from os.path import join
from ..census import scan_file
mock_os = Mock()
fd = join(self.data_dir, 'test.module')
intlink = join(self.data_dir, 'test.module.link')
extlink = '/foo/bar/t/test.module'
s = stat_result((33188, 83865343, 16777220,
1, 501, 20, 973,
1491428112, 1446143268,
1462630505))
#
# Simulate a simple file.
#
calls = [call.debug("os.stat('{0}')".format(fd)),
call.warning("{0} does not have correct group id!".format(fd))]
mock_log = Mock()
with patch('lvmutil.log.get_logger') as mock_get_logger:
with patch.dict('sys.modules', {'os': mock_os,
'os.path': mock_os.path}):
mock_get_logger.return_value = mock_log
mock_os.environ = dict()
mock_os.stat.return_value = s
mock_os.path.islink.return_value = False
mock_os.path.join.return_value = fd
f = scan_file(self.data_dir, 'test.module', 12345)
self.assertListEqual(mock_log.mock_calls, calls)
self.assertEqual(f.filename, fd)
self.assertEqual(f.size, 973)
self.assertEqual(f.year, 2016)
#
# Simulate an internal link.
#
calls = [call.debug("os.stat('{0}')".format(fd)),
call.warning("{0} does not have correct group id!".format(fd)),
call.debug("os.lstat('{0}')".format(fd)),
call.warning("{0} does not have correct group id!".format(fd)),
call.debug("Found internal link {0} -> {0}.link.".format(fd))]
mock_log = Mock()
with patch('lvmutil.log.get_logger') as mock_get_logger:
with patch.dict('sys.modules', {'os': mock_os,
'os.path': mock_os.path}):
mock_get_logger.return_value = mock_log
mock_os.environ = dict()
mock_os.stat.return_value = s
mock_os.lstat.return_value = s
mock_os.path.commonpath.return_value = self.data_dir
mock_os.path.islink.return_value = True
mock_os.path.join.return_value = fd
mock_os.path.realpath.return_value = intlink
f = scan_file(self.data_dir, 'test.module', 12345)
self.assertListEqual(mock_log.mock_calls, calls)
self.assertEqual(f.filename, fd)
self.assertEqual(f.size, 973)
self.assertTrue(f.islink)
self.assertFalse(f.isexternal)
self.assertEqual(f.linkname, intlink)
#
# Simulate an external link.
#
calls = [call.debug("os.stat('{0}')".format(fd)),
call.warning("{0} does not have correct group id!".format(fd)),
call.debug("os.lstat('{0}')".format(fd)),
call.warning("{0} does not have correct group id!".format(fd)),
call.debug("Found external link {0} -> {1}.".format(fd, extlink))]
mock_log = Mock()
with patch('lvmutil.log.get_logger') as mock_get_logger:
with patch.dict('sys.modules', {'os': mock_os,
'os.path': mock_os.path}):
mock_get_logger.return_value = mock_log
mock_os.environ = dict()
mock_os.stat.return_value = s
mock_os.lstat.return_value = s
mock_os.path.commonpath.return_value = '/'
mock_os.path.islink.return_value = True
mock_os.path.join.return_value = fd
mock_os.path.realpath.return_value = extlink
f = scan_file(self.data_dir, 'test.module', 12345)
self.assertListEqual(mock_log.mock_calls, calls)
self.assertEqual(f.filename, fd)
self.assertEqual(f.size, 973)
self.assertTrue(f.islink)
self.assertTrue(f.isexternal)
self.assertEqual(f.linkname, extlink)
@unittest.skipUnless(has_commonpath,
"Skipping test that requires os.path.commonpath().")
def test_in_path(self):
"""Test directory hierarchy checker.
"""
from ..census import in_path
self.assertTrue(in_path('/foo/bar/baz', '/foo/bar/baz/a/b/c/foo.txt'))
self.assertTrue(in_path('/foo/bar/baz', '/foo/bar/baz/a'))
self.assertFalse(in_path('/foo/bar/baz', '/foo/bar/baz-x2'))
self.assertFalse(in_path('/foo/bar/baz', '/foo/baz/bar'))
def test_output_csv(self):
"""Test CSV writer.
"""
from os import remove
from os.path import join
from collections import OrderedDict
from ..census import output_csv
csvfile = join(self.data_dir, 'test_output_csv.csv')
d = OrderedDict()
d['/foo/bar'] = {2000: {'number': 2, 'size': 20},
2001: {'number': 2, 'size': 20},
2002: {'number': 2, 'size': 20}}
d['/foo/bar/baz'] = {2000: {'number': 1, 'size': 10},
2001: {'number': 1, 'size': 10},
2002: {'number': 1, 'size': 10}}
dd = OrderedDict()
dd['/a/b/c'] = {2001: {'number': 2, 'size': 50},
2002: {'number': 4, 'size': 100},
2003: {'number': 2, 'size': 50}}
dd['/a/b/c/d'] = {2002: {'number': 2, 'size': 50}}
output_data = output_csv([d, dd], csvfile)
datatext = """Directory,FY2000 Number,FY2001 Number,FY2002 Number,FY2003 Number,FY2000 Size,FY2001 Size,FY2002 Size,FY2003 Size
/foo/bar,2,4,6,6,20,40,60,60
/foo/bar/baz,1,2,3,3,10,20,30,30
/a/b/c,0,2,6,8,0,50,150,200
/a/b/c/d,0,0,2,2,0,0,50,50"""
data = [row.split(',') for row in datatext.split('\n')]
self.assertEqual(len(output_data), len(data))
for k in range(len(data)):
self.assertListEqual(output_data[k], data[k])
remove(csvfile)
def test_suite():
"""Allows testing of only this module with the command::
python setup.py test -m <modulename>
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| 40.686441 | 135 | 0.562175 | 8,890 | 0.925849 | 0 | 0 | 6,150 | 0.640492 | 0 | 0 | 2,272 | 0.236617 |
0f33e5b8c215544f5bd67330d63bc69f98ee16b3 | 1,182 | py | Python | catatom2osm/csvtools.py | OSM-es/CatAtom2Osm | 89394161b07c5dd7cd2843bce8f911b93796b33a | [
"BSD-2-Clause"
] | 8 | 2018-01-30T18:26:29.000Z | 2022-02-03T19:16:54.000Z | catatom2osm/csvtools.py | OSM-es/CatAtom2Osm | 89394161b07c5dd7cd2843bce8f911b93796b33a | [
"BSD-2-Clause"
] | 99 | 2018-01-22T08:37:53.000Z | 2022-03-28T13:50:06.000Z | catatom2osm/csvtools.py | OSM-es/CatAtom2Osm | 89394161b07c5dd7cd2843bce8f911b93796b33a | [
"BSD-2-Clause"
] | 9 | 2018-01-21T15:51:41.000Z | 2022-01-04T18:35:19.000Z | # -*- coding: utf-8 -*-
"""
CSV related help functions
"""
from __future__ import unicode_literals
oldstr = str
from builtins import str
import csv
import io
import six
from catatom2osm.config import eol, encoding, delimiter
def dict2csv(csv_path, a_dict, sort=None):
"""
Writes a dictionary to a csv file, optinally sorted by key (sort=0) or
value (sort=1)
"""
with io.open(csv_path, 'w', encoding=encoding) as csv_file:
dictitems = list(a_dict.items())
if sort in [0, 1]:
dictitems.sort(key=lambda x:x[sort])
for (k, v) in dictitems:
csv_file.write("%s%s%s%s" % (k, delimiter, v, '\n'))
def csv2dict(csv_path, a_dict, encoding=encoding):
"""Read a dictionary from a csv file"""
with open(csv_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=oldstr(delimiter))
for row in csv_reader:
if len(row) < 2:
raise IOError(_("Failed to load CSV file '%s'") % csv_file.name)
elif six.PY2:
a_dict[row[0].decode(encoding)] = row[1].decode(encoding)
else:
a_dict[row[0]] = row[1]
return a_dict
| 30.307692 | 80 | 0.609137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 249 | 0.21066 |
0f3599212f84331de9e9f8ea42c3d1bb4ebb50e9 | 1,881 | py | Python | mpesaviz/apps/transactions/models.py | savioabuga/mpesaviz | 2567ab5646f8f684c32b0644f5b06d3cc58c62bc | [
"BSD-3-Clause"
] | 2 | 2015-07-07T09:30:27.000Z | 2017-01-31T20:26:45.000Z | mpesaviz/apps/transactions/models.py | savioabuga/mpesaviz | 2567ab5646f8f684c32b0644f5b06d3cc58c62bc | [
"BSD-3-Clause"
] | null | null | null | mpesaviz/apps/transactions/models.py | savioabuga/mpesaviz | 2567ab5646f8f684c32b0644f5b06d3cc58c62bc | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from model_utils import Choices
from model_utils.models import TimeStampedModel
from phonenumber_field.modelfields import PhoneNumberField
from django_pandas.io import read_frame
class Transaction(TimeStampedModel):
TYPES = Choices(('sent', 'Sent Transactions'), ('received', 'Received Transactions'), ('paybill', 'Pay bill Transactions'),
('buy_good', 'Buy Good Transactions'), ('airtime', 'Airtime'), ('deposits', 'Deposits'), ('withdrawals', 'Withdrawals'),)
code = models.CharField(max_length=30)
date = models.DateTimeField()
type = models.CharField(choices=TYPES, max_length=20, default=TYPES.sent)
amount = models.DecimalField(max_digits=10, decimal_places=4)
recipient = models.CharField(max_length=30, blank=True)
phonenumber = PhoneNumberField()
sent_by = models.CharField(max_length=30, blank=True)
account_number = models.CharField(max_length=30)
airtime_for = models.CharField(max_length=30)
def monthly_transactions(self):
dataframe = read_frame(Transaction.objects.all())
dataframe['month'] = [date.strftime('%B') for date in dataframe['date']]
dataframe['year'] = [date.strftime('%Y') for date in dataframe['date']]
groups = dataframe.groupby(['year', 'month', 'type'])['amount'].sum().reset_index(name='amount')
return groups
def top_recipients(self):
dataframe = read_frame(Transaction.objects.all())
recipient_dataframe = dataframe[dataframe.type == 'Sent Transactions']
return recipient_dataframe.groupby(['recipient', 'type'])['amount'].sum().reset_index(name='amount').sort('amount', ascending=False)
class UploadFile(TimeStampedModel):
type = models.CharField(choices=Transaction.TYPES, max_length=20, default=Transaction.TYPES.sent)
file = models.FileField(upload_to='uploads/')
| 47.025 | 141 | 0.714514 | 1,665 | 0.885167 | 0 | 0 | 0 | 0 | 0 | 0 | 325 | 0.17278 |
0f38c2d9ebe98c43fd9dab61490261ca69679e03 | 4,268 | py | Python | ros_bt_py/test/rostest/test_topic_publish_leaf.py | fzi-forschungszentrum-informatik/ros_bt_py | ed65e2b2f0a03411101f455c0ab38401ba50bada | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 4 | 2022-03-11T14:30:43.000Z | 2022-03-31T07:21:35.000Z | ros_bt_py/test/rostest/test_topic_publish_leaf.py | fzi-forschungszentrum-informatik/ros_bt_py | ed65e2b2f0a03411101f455c0ab38401ba50bada | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | ros_bt_py/test/rostest/test_topic_publish_leaf.py | fzi-forschungszentrum-informatik/ros_bt_py | ed65e2b2f0a03411101f455c0ab38401ba50bada | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -------- BEGIN LICENSE BLOCK --------
# Copyright 2022 FZI Forschungszentrum Informatik
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the {copyright_holder} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -------- END LICENSE BLOCK --------
from threading import Lock
import unittest
import rospy
from std_msgs.msg import Int32
from ros_bt_py_msgs.msg import Node as NodeMsg
from ros_bt_py.node_config import NodeConfig
from ros_bt_py.nodes.topic import TopicPublisher
PKG = 'ros_bt_py'
class TestTopicPublisherLeaf(unittest.TestCase):
"""This expects a test_topics_node.py instance running alongside
That node will "reflect" anything we publish to /numbers_in - it's a
separate node to avoid threading shenanigans in here.
"""
def setUp(self):
self.publisher_leaf = TopicPublisher(options={
'topic_name': '/numbers_in',
'topic_type': Int32
})
self.publisher_leaf.setup()
self._lock = Lock()
self.msg = None
self.subscriber = rospy.Subscriber('/numbers_out', Int32, self.cb)
rospy.wait_for_message('/ready', Int32)
def tearDown(self):
self.publisher_leaf.shutdown()
def cb(self, msg):
with self._lock:
self.msg = msg
def testSendsNumber(self):
self.assertIsNone(self.msg)
self.publisher_leaf.inputs['message'] = Int32(data=1)
self.publisher_leaf.tick()
# This should basically never fail - anything that can go wrong should
# go wrong in the setup() method
self.assertEqual(self.publisher_leaf.state, NodeMsg.SUCCEEDED)
rospy.sleep(0.1)
self.assertEqual(self.msg.data, 1)
self.publisher_leaf.inputs['message'] = Int32(data=42)
self.publisher_leaf.tick()
# This should basically never fail - anything that can go wrong should
# go wrong in the setup() method
self.assertEqual(self.publisher_leaf.state, NodeMsg.SUCCEEDED)
rospy.sleep(0.1)
self.assertEqual(self.msg.data, 42)
self.assertEqual(self.publisher_leaf.untick(), NodeMsg.IDLE)
self.publisher_leaf.reset()
self.publisher_leaf.inputs['message'] = Int32(data=23)
self.publisher_leaf.tick()
# This should basically never fail - anything that can go wrong should
# go wrong in the setup() method
self.assertEqual(self.publisher_leaf.state, NodeMsg.SUCCEEDED)
rospy.sleep(0.1)
self.assertEqual(self.msg.data, 23)
if __name__ == '__main__':
rospy.init_node('test_topic_publish_leaf')
import rostest
import sys
import os
os.environ['COVERAGE_FILE'] = '%s.%s.coverage' % (PKG, 'test_topic_publish_leaf')
rostest.rosrun(PKG, 'test_topic_publish_leaf', TestTopicPublisherLeaf,
sysargs=sys.argv + ['--cov'])
| 38.45045 | 85 | 0.702905 | 2,020 | 0.47329 | 0 | 0 | 0 | 0 | 0 | 0 | 2,361 | 0.553187 |
0f39a392c46b7861f4edc27f2f6de39934c00cb4 | 1,236 | py | Python | workbench/awt/migrations/0012_auto_20201012_1433.py | yoshson/workbench | 701558cac3357cd82e4dc99f0fefed12ee81ddc5 | [
"MIT"
] | 15 | 2020-09-02T22:17:34.000Z | 2022-02-01T20:09:10.000Z | workbench/awt/migrations/0012_auto_20201012_1433.py | yoshson/workbench | 701558cac3357cd82e4dc99f0fefed12ee81ddc5 | [
"MIT"
] | 18 | 2020-01-08T15:28:26.000Z | 2022-02-28T02:46:41.000Z | workbench/awt/migrations/0012_auto_20201012_1433.py | yoshson/workbench | 701558cac3357cd82e4dc99f0fefed12ee81ddc5 | [
"MIT"
] | 8 | 2020-09-29T08:00:24.000Z | 2022-01-16T11:58:19.000Z | # Generated by Django 3.1.2 on 2020-10-12 12:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("awt", "0011_absence_ends_on"),
]
operations = [
migrations.AddField(
model_name="absence",
name="is_working_time",
field=models.BooleanField(default=True, verbose_name="is working time"),
),
migrations.AlterField(
model_name="absence",
name="reason",
field=models.CharField(
choices=[
("vacation", "vacation"),
("sickness", "sickness"),
("paid", "paid leave (e.g. civilian service, maternity etc.)"),
("other", "other reasons (no working time)"),
("correction", "Working time correction"),
],
max_length=10,
verbose_name="reason",
),
),
migrations.RunSQL(
"""
UPDATE awt_absence
SET reason='paid'
WHERE starts_on<'2020-01-01' AND reason='other';
UPDATE awt_absence
SET is_working_time=FALSE
WHERE reason='other';
""",
"",
),
]
| 26.869565 | 84 | 0.514563 | 1,143 | 0.924757 | 0 | 0 | 0 | 0 | 0 | 0 | 492 | 0.398058 |
0f3b23a2fcba20e9abd0c4b58ad9673908703395 | 7,665 | py | Python | tfsnippet/examples/auto_encoders/vae.py | 897615138/tfsnippet-jill | 2fc898a4def866c8d3c685168df1fa22083bb143 | [
"MIT"
] | null | null | null | tfsnippet/examples/auto_encoders/vae.py | 897615138/tfsnippet-jill | 2fc898a4def866c8d3c685168df1fa22083bb143 | [
"MIT"
] | null | null | null | tfsnippet/examples/auto_encoders/vae.py | 897615138/tfsnippet-jill | 2fc898a4def866c8d3c685168df1fa22083bb143 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import functools
import click
import tensorflow as tf
from tensorflow.contrib.framework import arg_scope, add_arg_scope
from tfsnippet.bayes import BayesianNet
from tfsnippet.distributions import Normal, Bernoulli
from tfsnippet.examples.datasets import load_mnist, bernoulli_flow
from tfsnippet.examples.nn import (l2_regularizer,
regularization_loss,
dense)
from tfsnippet.examples.utils import (MLConfig,
MLResults,
save_images_collection,
config_options,
pass_global_config,
bernoulli_as_pixel,
print_with_title)
from tfsnippet.scaffold import TrainLoop
from tfsnippet.trainer import AnnealingDynamicValue, Trainer, Evaluator
from tfsnippet.utils import global_reuse, flatten, unflatten, create_session
class ExpConfig(MLConfig):
# model parameters
z_dim = 40
x_dim = 784
# training parameters
write_summary = False
max_epoch = 3000
max_step = None
batch_size = 128
l2_reg = 0.0001
initial_lr = 0.001
lr_anneal_factor = 0.5
lr_anneal_epoch_freq = 300
lr_anneal_step_freq = None
# evaluation parameters
test_n_z = 500
test_batch_size = 128
@global_reuse
@add_arg_scope
@pass_global_config
def q_net(config, x, observed=None, n_z=None, is_training=True):
net = BayesianNet(observed=observed)
# compute the hidden features
with arg_scope([dense],
activation_fn=tf.nn.leaky_relu,
kernel_regularizer=l2_regularizer(config.l2_reg)):
h_x = tf.to_float(x)
h_x = dense(h_x, 500)
h_x = dense(h_x, 500)
# sample z ~ q(z|x)
z_mean = dense(h_x, config.z_dim, name='z_mean')
z_logstd = dense(h_x, config.z_dim, name='z_logstd')
z = net.add('z', Normal(mean=z_mean, logstd=z_logstd), n_samples=n_z,
group_ndims=1)
return net
@global_reuse
@add_arg_scope
@pass_global_config
def p_net(config, observed=None, n_z=None, is_training=True):
net = BayesianNet(observed=observed)
# sample z ~ p(z)
z = net.add('z', Normal(mean=tf.zeros([1, config.z_dim]),
logstd=tf.zeros([1, config.z_dim])),
group_ndims=1, n_samples=n_z)
# compute the hidden features
with arg_scope([dense],
activation_fn=tf.nn.leaky_relu,
kernel_regularizer=l2_regularizer(config.l2_reg)):
h_z, s1, s2 = flatten(z, 2)
h_z = dense(h_z, 500)
h_z = dense(h_z, 500)
# sample x ~ p(x|z)
x_logits = unflatten(dense(h_z, config.x_dim, name='x_logits'), s1, s2)
x = net.add('x', Bernoulli(logits=x_logits), group_ndims=1)
return net
@click.command()
@click.option('--result-dir', help='The result directory.', metavar='PATH',
required=False, type=str)
@config_options(ExpConfig)
@pass_global_config
def main(config, result_dir):
# print the config
print_with_title('Configurations', config.format_config(), after='\n')
# open the result object and prepare for result directories
results = MLResults(result_dir)
results.make_dirs('plotting', exist_ok=True)
results.make_dirs('train_summary', exist_ok=True)
# input placeholders
input_x = tf.placeholder(
dtype=tf.int32, shape=(None, config.x_dim), name='input_x')
is_training = tf.placeholder(
dtype=tf.bool, shape=(), name='is_training')
learning_rate = tf.placeholder(shape=(), dtype=tf.float32)
learning_rate_var = AnnealingDynamicValue(config.initial_lr,
config.lr_anneal_factor)
# build the model
with arg_scope([q_net, p_net], is_training=is_training):
# derive the loss and lower-bound for training
train_q_net = q_net(input_x)
train_chain = train_q_net.chain(
p_net, latent_names=['z'], latent_axis=0, observed={'x': input_x})
vae_loss = tf.reduce_mean(train_chain.vi.training.sgvb())
loss = vae_loss + regularization_loss()
# derive the nll and logits output for testing
test_q_net = q_net(input_x, n_z=config.test_n_z)
test_chain = test_q_net.chain(
p_net, latent_names=['z'], latent_axis=0, observed={'x': input_x})
test_nll = -tf.reduce_mean(test_chain.vi.evaluation.is_loglikelihood())
test_lb = tf.reduce_mean(test_chain.vi.lower_bound.elbo())
# derive the optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
params = tf.trainable_variables()
grads = optimizer.compute_gradients(loss, var_list=params)
with tf.control_dependencies(
tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_op = optimizer.apply_gradients(grads)
# derive the plotting function
with tf.name_scope('plot_x'):
plot_p_net = p_net(n_z=100, is_training=is_training)
x_plots = tf.reshape(bernoulli_as_pixel(plot_p_net['x']), (-1, 28, 28))
def plot_samples(loop):
with loop.timeit('plot_time'):
images = session.run(x_plots, feed_dict={is_training: False})
save_images_collection(
images=images,
filename='plotting/{}.png'.format(loop.epoch),
grid_size=(10, 10),
results=results
)
# prepare for training and testing data
(x_train, y_train), (x_test, y_test) = load_mnist()
train_flow = bernoulli_flow(
x_train, config.batch_size, shuffle=True, skip_incomplete=True)
test_flow = bernoulli_flow(
x_test, config.test_batch_size, sample_now=True)
with create_session().as_default() as session, \
train_flow.threaded(5) as train_flow:
# train the network
with TrainLoop(params,
var_groups=['q_net', 'p_net'],
max_epoch=config.max_epoch,
max_step=config.max_step,
summary_dir=(results.system_path('train_summary')
if config.write_summary else None),
summary_graph=tf.get_default_graph(),
early_stopping=False) as loop:
trainer = Trainer(
loop, train_op, [input_x], train_flow,
feed_dict={learning_rate: learning_rate_var, is_training: True},
metrics={'loss': loss}
)
trainer.anneal_after(
learning_rate_var,
epochs=config.lr_anneal_epoch_freq,
steps=config.lr_anneal_step_freq
)
evaluator = Evaluator(
loop,
metrics={'test_nll': test_nll, 'test_lb': test_lb},
inputs=[input_x],
data_flow=test_flow,
feed_dict={is_training: False},
time_metric_name='test_time'
)
evaluator.after_run.add_hook(
lambda: results.update_metrics(evaluator.last_metrics_dict))
trainer.evaluate_after_epochs(evaluator, freq=10)
trainer.evaluate_after_epochs(
functools.partial(plot_samples, loop), freq=10)
trainer.log_after_epochs(freq=1)
trainer.run()
# print the final metrics and close the results object
print_with_title('Results', results.format_metrics(), before='\n')
results.close()
if __name__ == '__main__':
main()
| 36.5 | 80 | 0.616438 | 401 | 0.052316 | 0 | 0 | 6,181 | 0.806393 | 0 | 0 | 854 | 0.111416 |
0f3b396237e70d3630a363876379a8df6d433857 | 7,582 | py | Python | HW2/Q3/Q3.py | markblitz/RU_573_HW | b6b9799b1af2f1d5e65362e7fb841cd30bbc951f | [
"MIT"
] | null | null | null | HW2/Q3/Q3.py | markblitz/RU_573_HW | b6b9799b1af2f1d5e65362e7fb841cd30bbc951f | [
"MIT"
] | null | null | null | HW2/Q3/Q3.py | markblitz/RU_573_HW | b6b9799b1af2f1d5e65362e7fb841cd30bbc951f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import time
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import copy
# In[22]:
# helps from: https://www.geeksforgeeks.org/merge-sort/
def RecursiveMergeSort(input_array, is_first = True):
time_start = time.time()
compare_time = 0
if is_first:
sort_array = copy.deepcopy(input_array) # sort, but not change orignial array(strange)
else:
sort_array = input_array
if len(sort_array) > 1: # stop base
mid = len(sort_array)//2
left_array = sort_array[:mid]
right_array = sort_array[mid:]
# recursive
left_temp = RecursiveMergeSort(left_array, is_first = False)
if left_temp != None:
compare_time += left_temp[0]
right_temp = RecursiveMergeSort(right_array, is_first = False)
if right_temp != None:
compare_time += right_temp[0]
# merge part
i = j = k = 0
while i < len(left_array) and j < len(right_array):
compare_time += 1
if left_array[i] < right_array[j]:
sort_array[k] = left_array[i]
i += 1
else:
sort_array[k] = right_array[j]
j += 1
k += 1
while i < len(left_array):
sort_array[k] = left_array[i]
k += 1
i += 1
while j < len(right_array):
sort_array[k] = right_array[j]
k += 1
j += 1
time_finish = time.time()
time_run = time_finish - time_start
# check if sort_array is sorted, of course
#if is_first:
# print(sort_array)
return compare_time, time_run
# iterative merge sort
# helps: https://www.geeksforgeeks.org/iterative-merge-sort/
def IterativeMergeSort(input_array):
time_start = time.time()
compare_time = 0
current_size = 1
sort_array = copy.deepcopy(input_array) # sort, but not change orignial array(strange)
while current_size < len(sort_array) - 1:
left = 0
while left < len(sort_array)-1:
mid = left + current_size - 1
right = ((2 * current_size + left - 1, len(sort_array) - 1) [2 * current_size + left - 1 > len(sort_array)-1])
# Merge each subarray
compare_time += merge(sort_array, left, mid, right)
left = left + current_size*2
# have new sixe for subarray
current_size = 2 * current_size
time_finish = time.time()
time_run = time_finish - time_start
return compare_time, time_run
def merge(input_array, left, mid, right):
compare_time = 0
# length for each subarray to be merged
n1 = mid - left + 1
n2 = right - mid
# create zreos subarrays
L = [0] * n1
R = [0] * n2
for i in range(0, n1):
L[i] = input_array[left + i]
for i in range(0, n2):
R[i] = input_array[mid + i + 1]
# merge
i, j, k = 0, 0, left
while i < n1 and j < n2:
compare_time += 1
if L[i] > R[j]:
input_array[k] = R[j]
j += 1
else:
input_array[k] = L[i]
i += 1
k += 1
while i < n1:
input_array[k] = L[i]
i += 1
k += 1
while j < n2:
input_array[k] = R[j]
j += 1
k += 1
return compare_time
# In[23]:
input_1024_0 = np.loadtxt('./data/data0.1024', int)
input_2048_0 = np.loadtxt('./data/data0.2048', int)
input_4096_0 = np.loadtxt('./data/data0.4096', int)
input_8192_0 = np.loadtxt('./data/data0.8192', int)
input_16384_0 = np.loadtxt('./data/data0.16384', int)
input_32768_0 = np.loadtxt('./data/data0.32768', int)
input_1024_1 = np.loadtxt('./data/data1.1024', int)
input_2048_1 = np.loadtxt('./data/data1.2048', int)
input_4096_1 = np.loadtxt('./data/data1.4096', int)
input_8192_1 = np.loadtxt('./data/data1.8192', int)
input_16384_1 = np.loadtxt('./data/data1.16384', int)
input_32768_1 = np.loadtxt('./data/data1.32768', int)
input_data = [input_1024_0, input_1024_1, input_2048_0, input_2048_1, input_4096_0, input_4096_1, input_8192_0, input_8192_1, input_16384_0, input_16384_1, input_32768_0, input_32768_1]
# In[24]:
result = []
for i in input_data:
result.append(RecursiveMergeSort(i))
print(result)
# In[8]:
recursive_merge_compare_0 = []
recursive_merge_compare_1 = []
recursive_merge_runtime_0 = []
recursive_merge_runtime_1 = []
for i in range(0, len(result), 2):
recursive_merge_compare_0.append(result[i][0])
recursive_merge_runtime_0.append(result[i][1])
recursive_merge_compare_1.append(result[i+1][0])
recursive_merge_runtime_1.append(result[i+1][1])
print(recursive_merge_compare_1)
# In[9]:
result = []
for i in input_data:
result.append(IterativeMergeSort(i))
print(result)
# In[10]:
iterative_merge_compare_0 = []
iterative_merge_compare_1 = []
iterative_merge_runtime_0 = []
iterative_merge_runtime_1 = []
for i in range(0, len(result), 2):
iterative_merge_compare_0.append(result[i][0])
iterative_merge_runtime_0.append(result[i][1])
iterative_merge_compare_1.append(result[i+1][0])
iterative_merge_runtime_1.append(result[i+1][1])
print(iterative_merge_compare_1)
# In[11]:
np.savetxt('./result/recursice compare 0.txt', recursive_merge_compare_0, fmt='%f')
np.savetxt('./result/recursice compare 1.txt', recursive_merge_compare_1, fmt='%f')
np.savetxt('./result/recursice runtime 0.txt', recursive_merge_runtime_0, fmt='%f')
np.savetxt('./result/recursice runtime 1.txt', recursive_merge_runtime_1, fmt='%f')
np.savetxt('./result/iterative compare 0.txt', iterative_merge_compare_0, fmt='%f')
np.savetxt('./result/iterative compare 1.txt', iterative_merge_compare_1, fmt='%f')
np.savetxt('./result/iterative runtime 0.txt', iterative_merge_runtime_0, fmt='%f')
np.savetxt('./result/iterative runtime 1.txt', iterative_merge_runtime_1, fmt='%f')
# In[12]:
input_size = [1024, 2048, 4096, 8192, 16384, 32768]
plt.figure()
plt.plot(input_size, recursive_merge_compare_0, label = 'recursive compare times with sorted data')
plt.plot(input_size, recursive_merge_compare_1, label = 'recursive compare times with random data')
plt.plot(input_size, iterative_merge_compare_0, label = 'iterative compare times with sorted data')
plt.plot(input_size, iterative_merge_compare_1, label = 'iterative compare times with random data')
plt.legend(loc='upper left')
plt.title('Compare times as function of input size')
plt.xlabel('input size')
plt.ylabel('compare times')
#ax = plt.gca()
#ax.get_yaxis().get_major_formatter().set_scientific(False)
plt.savefig('./result/compare times vs input.jpg')
plt.show()
# In[13]:
plt.figure()
plt.plot(input_size, recursive_merge_runtime_0, label = 'recursive runtime with sorted data')
plt.plot(input_size, recursive_merge_runtime_1, label = 'recursive runtime with random data')
plt.plot(input_size, iterative_merge_runtime_0, label = 'iterative runtime with sorted data')
plt.plot(input_size, iterative_merge_runtime_1, label = 'iterative runtime with random data')
plt.legend(loc='upper left')
plt.title('Runtime as function of input size')
plt.xlabel('input size')
plt.ylabel('runtime(s)')
#ax = plt.gca()
#ax.get_yaxis().get_major_formatter().set_scientific(False)
plt.savefig('./result/runtime vs input.jpg')
plt.show()
# In[ ]:
| 30.087302 | 198 | 0.643366 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,805 | 0.238064 |
0f3b73e98465b8b1b4c9ad2d85ba8b9865ebbc38 | 653 | py | Python | pybwap/__init__.py | NQysit/pybwap | f95af52ed756f0a18eb32cd2b3917f7f66848a9d | [
"MIT"
] | null | null | null | pybwap/__init__.py | NQysit/pybwap | f95af52ed756f0a18eb32cd2b3917f7f66848a9d | [
"MIT"
] | null | null | null | pybwap/__init__.py | NQysit/pybwap | f95af52ed756f0a18eb32cd2b3917f7f66848a9d | [
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
from flask import Flask, render_template, send_from_directory
app = Flask(__name__)
app.config.from_object('config.DevelopmentConfig')
from .main import main_blueprint
app.register_blueprint(main_blueprint)
from .ch_0x00 import ch_0x00_blueprint
app.register_blueprint(ch_0x00_blueprint)
from .xss_0x01 import xss_0x01_blueprint
app.register_blueprint(xss_0x01_blueprint)
@app.route('/static/<folder>/<name>')
def serve(folder, name):
'''
Only for developing purpose
'''
dirname = os.path.dirname(__file__)
return send_from_directory(os.path.join(dirname, 'static', folder), filename=name)
| 23.321429 | 86 | 0.767228 | 0 | 0 | 0 | 0 | 237 | 0.36294 | 0 | 0 | 125 | 0.191424 |
0f3c60ee8b70b4ccd75b5a518e5ea43030d24c12 | 2,757 | py | Python | Main.py | zy-zhou/MLCS | 478c95efb63d6c285a22c469fa7773e8f801052a | [
"Apache-2.0"
] | null | null | null | Main.py | zy-zhou/MLCS | 478c95efb63d6c285a22c469fa7773e8f801052a | [
"Apache-2.0"
] | null | null | null | Main.py | zy-zhou/MLCS | 478c95efb63d6c285a22c469fa7773e8f801052a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 8 19:47:52 2019
@author: Zhou
"""
import torch
from Utils import load
from Data import load_data
from Modules import BasicDecoder, RNNEncoder
from Models import Model, MetaTranslator
from Train import MetaTrainer
import warnings
warnings.filterwarnings("ignore")
device = 'cuda' if torch.cuda.is_available() else 'cpu'
meta_epoches = 15
meta_batch_size = 20
adapt_lr = 0.4
n = 5
if __name__ == '__main__':
from Utils import batch_bleu, batch_meteor, batch_rouge
fields, train_gen = load_data('train', 'meta', batch_size=meta_batch_size, k=1, epsilon=0.7,
device=device, meta_weights=True)
_, val_gen = load_data('valid', 'meta', batch_size=meta_batch_size, k=10, epsilon=0.7,
device=device, meta_weights=True)
e = RNNEncoder(fields[0], bidirectional=True)
d = BasicDecoder(fields[1], memory_dim=e.units * 2, glob_attn='mul')
model = Model(e, d)
model = model.to(device)
checkpoint = torch.load('checkpoints/nmt.pt')
model.load_state_dict(checkpoint['model'])
trainer = MetaTrainer(model, epoches=meta_epoches, temperature=0,
metrics=['bleu'], smooth=0, patience=4, save_per_epoch=False,
beam_width=5, length_penalty=1, val_metric='bleu',
adapt_lr=adapt_lr, first_order=True,
save_path='checkpoints/nmt_meta.pt')
reports = trainer(train_gen, val_gen)
####################################################################################
_, test_gen = load_data('test', 'meta', batch_size=meta_batch_size, k=10, epsilon=0.7, meta_weights=True)
trainer = MetaTrainer(model, adapt_lr=adapt_lr / (n - 1) if n > 2 else 2/3 * adapt_lr,
load_path='checkpoints/nmt_meta.pt')
evaluator = MetaTranslator(trainer.model, metrics=[], adapt_steps=n, unk_replace=False)
predicts, reports = evaluator(test_gen, save_path='predicts/nmt_meta.txt')
####################################################################################
hyp = [s.split() for s in predicts]
ref = load('data/preprocessed/test.nl.json', is_json=True)
bleu_4 = batch_bleu(hyp, ref, smooth_method=0)
print('BLEU-4: {:.2f}'.format(bleu_4 * 100))
bleu_s = batch_bleu(hyp, ref, smooth_method=3)
print('Smoothed BLEU-4: {:.2f}'.format(bleu_s * 100))
hyp = predicts
ref = [' '.join(s) for s in ref]
rouge = batch_rouge(hyp, ref)
print('ROUGE-L: {:.2f}'.format(rouge['rouge-l']['f'] * 100))
meteor = batch_meteor(hyp, ref)
print('METEOR: {:.2f}'.format(meteor * 100))
| 42.415385 | 110 | 0.590497 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 561 | 0.203482 |
0f3c685bf3c706b184f1be524f6612e1e97c2875 | 1,704 | py | Python | designate/backend/impl_infoblox/record_factory.py | infobloxopen/designate | 531a28b8453cfe5641284a16e0342db8d709ab36 | [
"Apache-2.0"
] | null | null | null | designate/backend/impl_infoblox/record_factory.py | infobloxopen/designate | 531a28b8453cfe5641284a16e0342db8d709ab36 | [
"Apache-2.0"
] | null | null | null | designate/backend/impl_infoblox/record_factory.py | infobloxopen/designate | 531a28b8453cfe5641284a16e0342db8d709ab36 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Infoblox
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from designate.i18n import _LI
from designate.backend.impl_infoblox.records import a
from designate.backend.impl_infoblox.records import aaaa
from designate.backend.impl_infoblox.records import cname
from designate.backend.impl_infoblox.records import ptr
from designate.backend.impl_infoblox.records import soa
from designate.backend.impl_infoblox.records import ns
LOG = logging.getLogger(__name__)
class RecordFactory(object):
@staticmethod
def get_record(recordset, infoblox, tenant_name):
if recordset.type == "A":
return a.ARecord(infoblox, tenant_name)
if recordset.type == "CNAME":
return cname.CNameRecord(infoblox, tenant_name)
if recordset.type == "NS":
return ns.NSRecord(infoblox, tenant_name)
if recordset.type == "SOA":
return soa.SOARecord(infoblox, tenant_name)
if recordset.type == "PTR":
return ptr.PTRRecord(infoblox, tenant_name)
if recordset.type == "AAAA":
return aaaa.AAAARecord(infoblox, tenant_name)
LOG.error(_LI("Unknown type %s"), recordset.type)
| 39.627907 | 75 | 0.7277 | 710 | 0.416667 | 0 | 0 | 677 | 0.3973 | 0 | 0 | 607 | 0.356221 |
0f3cbdf274389ec4a32274a5efe6d186c978a7c6 | 4,403 | py | Python | infer.py | AnnLIU15/SegCovid | e8a1ccadfbe56ddc7f1adf33225f77836436fa85 | [
"MIT"
] | null | null | null | infer.py | AnnLIU15/SegCovid | e8a1ccadfbe56ddc7f1adf33225f77836436fa85 | [
"MIT"
] | null | null | null | infer.py | AnnLIU15/SegCovid | e8a1ccadfbe56ddc7f1adf33225f77836436fa85 | [
"MIT"
] | null | null | null | import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from datasets.inferDataSet import infer_DataSet
from models.model import U2NET
from segConfig import getConfig
def infer(model, test_loader, device, n_classes, save_seg):
model.eval()
with torch.no_grad():
for idx, (imgs, imgs_name) in tqdm(enumerate(test_loader), desc='infer', total=len(test_loader)):
imgs = imgs.to(device)
d0, d1, d2, d3, d4, d5, d6 = model(imgs)
d0, d1, d2, d3, d4, d5, d6 = nn.Softmax(dim=1)(d0),\
nn.Softmax(dim=1)(d1), nn.Softmax(dim=1)(d2),\
nn.Softmax(dim=1)(d3), nn.Softmax(dim=1)(d4),\
nn.Softmax(dim=1)(d5), nn.Softmax(dim=1)(d6)
# d0, d1, d2, d3, d4, d5, d6 = d0[:, 1:n_classes, :, :]*1.01,\
# d1[:, 1:n_classes, :, :]*1.01, d2[:, 1:n_classes, :, :]*1.01,\
# d3[:, 1:n_classes, :, :]*1.01, d4[:, 1:n_classes, :, :]*1.01,\
# d5[:, 1:n_classes, :, :]*1.01, d6[:, 1:n_classes, :, :]*1.01
d0_tmp = F.one_hot(d0.clone().argmax(
dim=1), n_classes).permute(0, 3, 1, 2)
d1_tmp = F.one_hot(d1.clone().argmax(
dim=1), n_classes).permute(0, 3, 1, 2)
d2_tmp = F.one_hot(d2.clone().argmax(
dim=1), n_classes).permute(0, 3, 1, 2)
d3_tmp = F.one_hot(d3.clone().argmax(
dim=1), n_classes).permute(0, 3, 1, 2)
d4_tmp = F.one_hot(d4.clone().argmax(
dim=1), n_classes).permute(0, 3, 1, 2)
d5_tmp = F.one_hot(d5.clone().argmax(
dim=1), n_classes).permute(0, 3, 1, 2)
d6_tmp = F.one_hot(d6.clone().argmax(
dim=1), n_classes).permute(0, 3, 1, 2)
d = torch.Tensor([3.5, 2.5, 1, 1, 1, 1, 1])
add_lesion = -4.1
tmp = d0_tmp*d[0]+d1_tmp*d[1]+d2_tmp*d[2]+d3_tmp*d[3]\
+ d4_tmp*d[4]+d5_tmp*d[5]+d6_tmp*d[6]
tmp[:, 1:n_classes, :, :] = tmp[:, 1:n_classes, :, :]+add_lesion
out_mask = tmp.argmax(dim=1).squeeze()
np.save(save_seg+'/'+imgs_name[0],
out_mask.clone().detach().cpu().numpy().astype(np.uint8).squeeze())
torch.cuda.empty_cache()
def main(args):
device, num_classes, pth, infer_data_dirs = \
args.device, args.num_classes, args.pth, args.infer_data_dirs
if device == 'cuda':
torch.cuda.set_device(0)
if not torch.cuda.is_available():
print('Cuda is not available, use CPU to train.')
device = 'cpu'
device = torch.device(device)
print('===>device:', device)
torch.cuda.manual_seed_all(0)
# Load data
print('===>Setup Model')
model = U2NET(in_channels=1, out_channels=num_classes).to(device)
print('===>Loaded Weight')
checkpoint = torch.load(pth)
model.load_state_dict(checkpoint['model_weights'])
SegDataSet = infer_DataSet
print('===>check infer_data_dirs')
if isinstance(infer_data_dirs, str):
infer_data_dirs = [infer_data_dirs]
total_infer_begin = time.time()
for idx, infer_data_dir in enumerate(infer_data_dirs):
imgs_dir = infer_data_dir+'/imgs/'
masks_save_dir = infer_data_dir+'/masks/'
if not os.path.exists(masks_save_dir):
os.makedirs(masks_save_dir)
print('===>Loading dataset')
test_data_loader = DataLoader(
dataset=SegDataSet(imgs_dir), batch_size=1,
num_workers=8, shuffle=False, drop_last=False)
print('='*30)
print('===>Infering %d' % (idx+1))
print('===>Start infer '+imgs_dir)
print('===>Save to '+masks_save_dir)
infer_start_time = time.time()
infer(model=model, test_loader=test_data_loader, device=device,
n_classes=num_classes, save_seg=masks_save_dir)
infer_end_time = time.time()
print('Infer cost %.2fs' % (infer_end_time-infer_start_time))
del test_data_loader
total_infer_end = time.time()
print('Total Infer cost %.2fs' % (total_infer_end-total_infer_begin))
if __name__ == '__main__':
'''
推断,没有做性能评估,只需加载imgs,代码几乎与segTest一致,如有不懂
请看segTest注释
'''
args = getConfig('infer')
main(args)
| 38.286957 | 105 | 0.578015 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 713 | 0.159615 |
0f3d7d67b5dc23a19bea382775a9d2a046dac5d0 | 11,458 | py | Python | spgateway/helpers.py | cjltsod/django-spgateway | ba7a85394473254942417889f0a9f9fad0d43e3f | [
"MIT"
] | 2 | 2018-03-13T06:50:25.000Z | 2021-12-21T11:33:11.000Z | spgateway/helpers.py | cjltsod/django-spgateway | ba7a85394473254942417889f0a9f9fad0d43e3f | [
"MIT"
] | 4 | 2018-03-13T08:38:45.000Z | 2018-08-11T19:54:45.000Z | spgateway/helpers.py | cjltsod/django-spgateway | ba7a85394473254942417889f0a9f9fad0d43e3f | [
"MIT"
] | 1 | 2020-01-07T23:21:52.000Z | 2020-01-07T23:21:52.000Z | import logging
class Warnings(object):
def __init__(self, logger=None):
self.warnings = list()
self.logger = logger or logging
def warning(self, message):
self.warnings.append(message)
self.logger.warning(message)
def __bool__(self):
if self.warnings:
return True
else:
return False
def decrypt_TradeInfo_TradeSha(
HashKey,
HashIV,
TradeInfo,
TradeSha=None,
use_json=True,
):
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from urllib.parse import parse_qs
import codecs
import json
def removepadding(message, blocksize=32):
if message[-1] < blocksize:
return message[:(-1 * ord(message[-1:])):]
return message
aes_obj = AES.new(HashKey, AES.MODE_CBC, HashIV)
if TradeSha:
hash = SHA256.new()
hash.update('HashKey={}&{}&HashIV={}'.format(HashKey, TradeInfo, HashIV).encode())
TradeSha_verify = hash.hexdigest().upper()
if TradeSha != TradeSha_verify:
raise Exception('TradeSha not match')
TradeInfo_decrypted = removepadding(aes_obj.decrypt(codecs.decode(TradeInfo, 'hex_codec'))).decode()
if use_json:
TradeInfo_dict = json.loads(TradeInfo_decrypted)
else:
TradeInfo_dict = parse_qs(TradeInfo_decrypted)
return TradeInfo_dict
def generate_TradeInfo_TradeSha(
HashKey, # HashKey
HashIV, # HashIV
MerchantID, # 商店代號 智付通商店代號
##################################################################################################
MerchantOrderNo, # 商店訂單編號 商店自訂訂單編號,限英、數字、”_ ” 格式。
Amt, # 訂單金額 純數字不含符號 幣別:新台幣
ItemDesc, # 商品資訊 限制長度為 50 字
Email=None, # 付款人電子信箱
LoginType=False, # 智付通會員 1=須要登入智付通會員 0=不須登入智付通會員
##################################################################################################
RespondType='JSON', # 回傳格式 JSON 或是 String
TimeStamp=None, # 時間戳記 自從 Unix 纪元
Version='1.4', # 串接程式版本
LangType='zh-tw', # 語系 英文版參數為 en 繁體中文版參數為 zh-tw
# 當未提供此參數或此參數數值錯誤時,將預設為繁體中文版
##################################################################################################
EmailModify=None, # 不須登入智付通會員 1=可修改(預設) 0=不可修改
OrderComment=None, # 商店備註 限制長度為 300 字 若有提供此參數,將會於 MPG 頁面呈現 商店備註內容
TradeLimit=None, # 交易限制秒數 秒數下限為 60 秒 上限為 900 秒 若未帶此參數,或是為 0 時,會視作為不啟用交易限制秒數。
ExpireDate=None, # 繳費有效期限 (適用於非即時交易) 格式為 date('Ymd') ,例:20140620
# 此參數若為空值,系統預設為 7 天。自取 號時間起算至第 7 天 23:59:59。
##################################################################################################
ReturnURL=None, # 支付完成返回商店網址 若為空值,交易完成後,消費者將停留在智付通付款或取號完成頁面
NotifyURL=None, # 支付通知網址
CustomerURL=None, # 商店取號網址 此參數若為空值,則會顯示取號結果在智付通頁面。
ClientBackURL=None, # 支付取消返回商店網址 此參數若為空值時,則無返回鈕。
##################################################################################################
CREDIT=None, # 信用卡一次付清啟用 設定是否啟用信用卡一次付清支付方式 1=啟用 0或者未有此參數=不啟用
InstFlag=None, # 信用卡分期付款啟用 此欄位值=1 時,即代表開啟所有分期期別
# 同時開啟多期別時,將此參數用","分隔,例如:3,6,12,代表開啟分 3、6、12期的功能
# 此欄位值=0或無值時,即代表不開啟分期
CreditRed=None, # 信用卡紅利啟用 1=啟用 0或者未有此參數=不啟用
UNIONPAY=None, # 信用卡銀聯卡啟用 1=啟用 0或者未有此參數=不啟用
WEBATM=None, # WEBATM啟用 1=啟用 0或者未有此參數=不啟用
VACC=None, # ATM轉帳啟用 1=啟用 0或者未有此參數=不啟用
CVS=None, # 超商代碼繳費啟用 1=啟用 0或者未有此參數=不啟用 訂單金額小於30元或超過2萬元仍不會顯示此支付方式
BARCODE=None, # 超商條碼繳費啟用 1=啟用 0或者未有此參數=不啟用 訂單金額小於20元或超過4萬元仍不會顯示此支付方式
##################################################################################################
order_list=None,
request=None,
*args,
**kwargs
):
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from email.utils import parseaddr
from urllib.parse import urlencode
import datetime
import re
import time
warnings = Warnings()
def addpadding(message, blocksize=32):
pad = blocksize - (len(message) % blocksize)
return message + chr(pad) * pad
if not isinstance(MerchantID, str):
warnings.warning('MerchantID is not string')
MerchantID = str(MerchantID)
if not isinstance(MerchantOrderNo, str):
MerchantOrderNo = str(MerchantOrderNo)
if not re.match('^[A-Za-z0-9_]*$', MerchantOrderNo):
warnings.warning('MerchantOrderNo only accept string contains only letters, numbers, and underscores. Bypass then')
if not isinstance(Amt, int):
warnings.warning('Amt is not integer')
Amt = int(Amt)
if not isinstance(ItemDesc, str):
ItemDesc = str(ItemDesc)
if len(ItemDesc) > 50:
warnings.warning('Length of ItemDesc should be less than 50. Bypass then')
if Email is not None:
parsed_email = parseaddr(Email)[-1]
if '@' in parsed_email and parsed_email != Email:
warnings.warning('Email is not legal. Bypass then')
else:
warnings.warning('Email is not optional field. Bypass then')
if LoginType is not None:
LoginType = 1 if LoginType else 0
else:
warnings.warning('LoginType is not optional field. Bypass then')
if RespondType not in ('JSON', 'String'):
warnings.warning('RespondType should be \'JSON\' or \'String\'')
if TimeStamp is None:
TimeStamp = int(time.time())
elif isinstance(TimeStamp, datetime.datetime):
TimeStamp = int(TimeStamp.timestamp())
elif isinstance(TimeStamp, int) or isinstance(TimeStamp, float):
TimeStamp = int(datetime.datetime.fromtimestamp(TimeStamp).timestamp())
elif isinstance(TimeStamp, str):
warnings.warning('Unable to parse TimeStamp by . Bypass then')
else:
warnings.warning('TimeStamp not str, time, or datetime. Bypass then.')
if Version != '1.4':
warnings.warning('Version is not current version, which is 1.4')
if LangType:
if LangType not in ('en', 'zh-tw'):
warnings.warning('LangType is not set to en or zh-tw. Service provider may use zh-tw instead. By pass then.')
if TradeLimit is not None:
if isinstance(TradeLimit, int):
warnings.warning('TradeLimit is not integer. By pass then.')
elif TradeLimit < 60:
warnings.warning('TradeLimit is less than 60. Service provider may use 60 instead. By pass then.')
elif TradeLimit > 900:
warnings.warning('TradeLimit is greater than 900. Service provider may use 900 instead. By pass then.')
if ExpireDate is not None:
if isinstance(ExpireDate, datetime.datetime) or isinstance(ExpireDate, time):
ExpireDate = ExpireDate.strftime('%Y%m%d')
if EmailModify is not None:
EmailModify = 1 if EmailModify else 0
if OrderComment is not None:
OrderComment = str(OrderComment) if OrderComment else None
if CREDIT is not None:
CREDIT = 1 if CREDIT else 0
if InstFlag is not None:
if InstFlag is True:
InstFlag = '1'
elif InstFlag is False:
InstFlag = '0'
elif isinstance(InstFlag, list):
InstFlag = ','.join(InstFlag)
if CreditRed is not None:
CreditRed = 1 if CreditRed else 0
if UNIONPAY is not None:
UNIONPAY = 1 if UNIONPAY else 0
if WEBATM is not None:
WEBATM = 1 if WEBATM else 0
if VACC is not None:
VACC = 1 if VACC else 0
if CVS is not None:
CVS = 1 if CVS else 0
if CVS:
if Amt < 30 or Amt > 20000:
warnings.warning('Service provider may not display CVS due to Amt is not between 30 and 20000')
if BARCODE is not None:
BARCODE = 1 if BARCODE else 0
if BARCODE:
if Amt < 20 or Amt > 40000:
warnings.warning('Service provider may not display BARCODE due to Amt is not between 20 and 40000')
if request:
if ReturnURL and not ReturnURL.startswith('http://') and not ReturnURL.startswith('https://'):
ReturnURL = request.build_absolute_uri(str(ReturnURL))
if NotifyURL and not NotifyURL.startswith('http://') and not NotifyURL.startswith('https://'):
NotifyURL = request.build_absolute_uri(str(NotifyURL))
if CustomerURL and not CustomerURL.startswith('http://') and not CustomerURL.startswith('https://'):
CustomerURL = request.build_absolute_uri(str(CustomerURL))
if ClientBackURL and not ClientBackURL.startswith('http://') and not ClientBackURL.startswith('https://'):
ClientBackURL = request.build_absolute_uri(str(ClientBackURL))
if order_list is None:
order_list = list()
original_order_list = (
'MerchantID',
'RespondType',
'TimeStamp',
'Version',
'LangType',
'MerchantOrderNo',
'Amt',
'ItemDesc',
'TradeLimit',
'ExpireDate',
'ReturnURL',
'NotifyURL',
'CustomerURL',
'ClientBackURL',
'Email',
'EmailModify',
'LoginType',
'OrderComment',
'CREDIT',
'InstFlag',
'CreditRed',
'UNIONPAY',
'WEBATM',
'VACC',
'CVS',
'BARCODE',
)
aes_obj = AES.new(HashKey, AES.MODE_CBC, HashIV)
message = dict(
MerchantID=MerchantID,
MerchantOrderNo=MerchantOrderNo,
Amt=Amt,
ItemDesc=ItemDesc,
Email=Email,
RespondType=RespondType,
TimeStamp=TimeStamp,
Version=Version,
LangType=LangType,
TradeLimit=TradeLimit,
ExpireDate=ExpireDate,
ReturnURL=ReturnURL,
NotifyURL=NotifyURL,
CustomerURL=CustomerURL,
ClientBackURL=ClientBackURL,
EmailModify=EmailModify,
LoginType=LoginType,
OrderComment=OrderComment,
CREDIT=CREDIT,
InstFlag=InstFlag,
CreditRed=CreditRed,
UNIONPAY=UNIONPAY,
WEBATM=WEBATM,
VACC=VACC,
CVS=CVS,
BARCODE=BARCODE,
)
message_list = list()
for each_key in order_list:
value = message.get(each_key)
if value is not None:
message_list.append((each_key, value))
for each_key in original_order_list:
value = message.get(each_key)
if each_key not in order_list and value is not None:
message_list.append((each_key, value))
from collections import OrderedDict
ordered_message = OrderedDict(message_list)
message_str = addpadding(urlencode(ordered_message))
TradeInfo = aes_obj.encrypt(message_str).hex()
hash = SHA256.new()
hash.update('HashKey={}&{}&HashIV={}'.format(HashKey, TradeInfo, HashIV).encode())
TradeSha = hash.hexdigest().upper()
return TradeInfo, TradeSha, warnings
| 37.322476 | 126 | 0.566242 | 353 | 0.027087 | 0 | 0 | 0 | 0 | 0 | 0 | 4,793 | 0.367787 |
0f42949be84c6e30aca5f319502468abb01b7512 | 849 | py | Python | crescent/resources/s3/bucket/transition.py | mpolatcan/zepyhrus | 2fd0b1b9b21613b5876a51fe8b5f9e3afbec1b67 | [
"Apache-2.0"
] | 1 | 2020-03-26T19:20:03.000Z | 2020-03-26T19:20:03.000Z | crescent/resources/s3/bucket/transition.py | mpolatcan/zepyhrus | 2fd0b1b9b21613b5876a51fe8b5f9e3afbec1b67 | [
"Apache-2.0"
] | null | null | null | crescent/resources/s3/bucket/transition.py | mpolatcan/zepyhrus | 2fd0b1b9b21613b5876a51fe8b5f9e3afbec1b67 | [
"Apache-2.0"
] | null | null | null | from crescent.core import Model
from crescent.functions import AnyFn
from .constants import AllowedValues, ModelRequiredProperties
from typing import Union
class Transition(Model):
def __init__(self):
super(Transition, self).__init__(
allowed_values={self.StorageClass.__name__: AllowedValues.TRANSITION_SC},
required_properties=ModelRequiredProperties.TRANSITION
)
def StorageClass(self, storage_class: Union[str, AnyFn]):
return self._set_field(self.StorageClass.__name__, storage_class)
def TransitionDate(self, transition_date: Union[str, AnyFn]):
return self._set_field(self.TransitionDate.__name__, transition_date)
def TransitionInDays(self, transition_in_days: Union[int, AnyFn]):
return self._set_field(self.TransitionInDays.__name__, transition_in_days)
| 38.590909 | 85 | 0.759717 | 690 | 0.812721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0f42eaeb31887e9ae1caee055573b220ada36e35 | 1,347 | py | Python | shop/migrations/0002_add_example_data.py | Chaiok/-django_ne_copipast_shop_master2 | 67abde1191f15bbf366b8666f0a1c17f7c4e0c9f | [
"MIT"
] | 1 | 2022-02-05T17:28:54.000Z | 2022-02-05T17:28:54.000Z | shop/migrations/0002_add_example_data.py | Chaiok/-django_ne_copipast_shop_master2 | 67abde1191f15bbf366b8666f0a1c17f7c4e0c9f | [
"MIT"
] | null | null | null | shop/migrations/0002_add_example_data.py | Chaiok/-django_ne_copipast_shop_master2 | 67abde1191f15bbf366b8666f0a1c17f7c4e0c9f | [
"MIT"
] | null | null | null | # Generated by Django 4.0 on 2021-12-13 17:54
from django.db import migrations
_CAR_GOODS = 'Автотовары'
_APPLIANCES = 'Бытовая техника'
def _create_categories(apps, schema_editor) -> None:
"""Создает две категории"""
# noinspection PyPep8Naming
Category = apps.get_model('shop', 'Category')
Category.objects.get_or_create(name=_CAR_GOODS)
Category.objects.get_or_create(name=_APPLIANCES)
def _create_products(apps, schema_editor) -> None:
"""Создает два товара"""
# noinspection PyPep8Naming
Product = apps.get_model('shop', 'Product')
# noinspection PyPep8Naming
Category = apps.get_model('shop', 'Category')
Product.objects.get_or_create(
name='Зимняя резина',
category=Category.objects.get(name=_CAR_GOODS),
price=4990.00,
)
Product.objects.get_or_create(
name='Холодильник',
category=Category.objects.get(name=_APPLIANCES),
price=49990.00,
)
class Migration(migrations.Migration):
dependencies = [
('shop', '0001_initial'),
]
operations = [
migrations.RunPython(
code=_create_categories,
reverse_code=migrations.RunPython.noop,
),
migrations.RunPython(
code=_create_products,
reverse_code=migrations.RunPython.noop,
),
]
| 24.944444 | 56 | 0.6585 | 383 | 0.26802 | 0 | 0 | 0 | 0 | 0 | 0 | 383 | 0.26802 |
0f431252562898282c2c89c51ee2ef686e116a5d | 17,992 | py | Python | modules/account.py | keyvantaj/Quantitative | 77c7c414c47ed3fe22873b87ed15e92dc62226da | [
"MIT"
] | 9 | 2020-10-11T21:09:41.000Z | 2022-02-17T01:52:04.000Z | modules/account.py | ajmal017/Quantitative | 7af681677031987c64f402d8cb06b358cedd184a | [
"MIT"
] | null | null | null | modules/account.py | ajmal017/Quantitative | 7af681677031987c64f402d8cb06b358cedd184a | [
"MIT"
] | 3 | 2020-07-18T02:19:08.000Z | 2022-01-30T15:37:02.000Z | from ibapi.client import EClient
from ibapi.wrapper import EWrapper
from ibapi.contract import Contract
from ibapi.order import Order
from ibapi.scanner import ScannerSubscription
from ibapi.ticktype import TickTypeEnum
from ibapi.common import *
from ibapi.tag_value import TagValue
from ibapi.execution import ExecutionFilter
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
from datetime import datetime
from time import sleep, strftime, localtime, time
sleeptime = 5
class AccountManagement:
def read_nextvalidid(self):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.nextValidOrderId = []
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
def nextValidId(self, orderId):
super().nextValidId(orderId)
self.nextValidOrderId.append(orderId)
print("NextValidId:", orderId)
self.disconnect()
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
sleep(sleeptime)
app.reqIds(-1)
nid = app.nextValidOrderId
app.run()
return nid[0]
def placing_orders(self, symbol, sec_type, exch, prim_exch, curr, order_type, quantity, action):
contract = Contract()
contract.symbol = symbol
contract.secType = sec_type
contract.exchange = exch
contract.primaryExchange = prim_exch
contract.currency = curr
order = Order()
order.orderType = order_type
order.totalQuantity = quantity
order.action = action
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
app.placeOrder(orderId=orderId, contract=contract, order=order)
print('order quantity placed for {} is: {} '.format(contract.symbol, order.totalQuantity))
sleep(sleeptime)
return order, contract
app.disconnect()
app.run()
def read_positions(self, subscribe, acctCode):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.up = pd.DataFrame([], columns=['Position', 'marketPrice', 'marketValue', 'averageCost',
'unrealizedPNL', 'realizedPNL'])
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
def updatePortfolio(self, contract, position, marketPrice, marketValue, averageCost, unrealizedPNL,
realizedPNL, accountName):
self.up.index.name = 'Symbol'
self.up.loc[
contract.symbol] = position, marketPrice, marketValue, averageCost, unrealizedPNL, realizedPNL
def positionEnd(self):
super().positionEnd()
print("PositionEnd")
self.cancelPositions()
self.disconnect()
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
sleep(sleeptime)
app.reqAccountUpdates(subscribe=subscribe, acctCode=acctCode)
app.reqPositions()
update = app.up
app.run()
print('Reading Portfolio')
rows = update[update['Position'] == 0].index
update.drop(rows, axis=0, inplace=True)
return update
def read_account(self, subscribe, acctCode):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.up = pd.DataFrame([], columns=['Values'])
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
def updateAccountValue(self, key, value, currency, accountName):
self.up.index.name = 'Keys'
self.up.loc[key] = value
def accountDownloadEnd(self, account):
print("AccountDownloadEnd. Account:", account)
self.disconnect()
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
sleep(sleeptime)
app.reqAccountUpdates(subscribe=subscribe, acctCode=acctCode)
update = app.up
app.reqAccountUpdates(False, acctCode)
app.run()
print('Reading Account')
return update
def cancel_openorders(self):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.open_orders = pd.DataFrame(columns=['action', 'quantity',
'type', 'algoStrategy',
'algoParams', 'pre_status'])
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
def cancelOrder(self, orderId):
super().cancelOrder(orderId)
print('cancel order ended')
def openOrder(self, orderId, Contract, Order, OrderState):
super().openOrder(orderId, Contract, Order, OrderState)
self.open_orders.loc[Contract.symbol, :] = [Order.action,
Order.totalQuantity,
Order.orderType,
Order.algoStrategy,
Order.algoParams[0],
OrderState.status]
def openOrderEnd(self):
super().openOrderEnd()
print('open order ended')
self.disconnect()
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
sleep(sleeptime)
app.reqIds(-1)
app.reqAllOpenOrders()
open_orders = app.open_orders
app.reqGlobalCancel()
app.run()
return open_orders
def get_openorders(self):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.open_orders = pd.DataFrame(columns=['action', 'open orders',
'type', 'algoStrategy',
'algoParams', 'status'])
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
def openOrder(self, orderId, Contract, Order, OrderState):
super().openOrder(orderId, Contract, Order, OrderState)
self.open_orders.loc[Contract.symbol, :] = [Order.action,
Order.totalQuantity,
Order.orderType,
Order.algoStrategy,
Order.algoParams[0],
OrderState.status]
def openOrderEnd(self):
super().openOrderEnd()
print('open order ended')
self.disconnect()
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
app.reqIds(-1)
app.reqAllOpenOrders()
sleep(sleeptime)
open_orders = app.open_orders
app.run()
return open_orders
def closing_positions(self, portfolio, order_id, ordersPriority, transmit):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
if app.isConnected():
print('app is running ...')
print('closing {} positions which are not present in action'.format(len(stock_to_close)))
# Closing Position
for i in stock_to_close:
contract = Contract()
contract.symbol = i
contract.secType = 'STK'
contract.exchange = 'SMART'
# contract.primaryExchange = 'ISLAND'
contract.currency = 'USD'
order = Order()
order.orderType = 'MKT'
order.totalQuantity = int(np.abs(portfolio.loc[i, 'Position']))
order.transmit = transmit
if portfolio.loc[i, 'Position'] > 0:
order.action = 'SELL'
# order.cashQty = weigth * 1.5 * net_liq
order.algoStrategy = 'Adaptive'
order.algoParams = []
order.algoParams.append(TagValue("adaptivePriority", ordersPriority))
app.placeOrder(orderId=order_id, contract=contract, order=order)
sleep(sleeptime)
order_id = order_id + 1
print('closing position for {} is: {} '.format(contract.symbol, order.totalQuantity))
elif portfolio.loc[i, 'Position'] < 0:
order.action = 'BUY'
# order.cashQty = weigth * 1.5 * net_liq
order.algoStrategy = 'Adaptive'
order.algoParams = []
order.algoParams.append(TagValue("adaptivePriority", ordersPriority))
app.placeOrder(orderId=order_id, contract=contract, order=order)
sleep(sleeptime)
order_id = order_id + 1
print('closing position for {} is: {} '.format(contract.symbol, order.totalQuantity))
else:
print('app not connected')
app.disconnect()
return order_id + 1
def rebalancing_to_leverage(self, order_id, ordersPriority, transmit):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
if app.isConnected():
print('app is running ...')
print('balancing {} positions'.format(len(action_balance.index)))
# Closing Position
for i in action_balance.index:
contract = Contract()
contract.symbol = i
contract.secType = 'STK'
contract.exchange = 'SMART'
contract.currency = 'USD'
order = Order()
order.orderType = 'MKT'
order.totalQuantity = np.abs(action_balance.loc[i, 'shares'])
order.transmit = transmit
if action_balance.loc[i, 'shares'] > 0:
order.action = 'BUY'
order.algoStrategy = 'Adaptive'
order.algoParams = []
order.algoParams.append(TagValue("adaptivePriority", ordersPriority))
app.placeOrder(orderId=order_id, contract=contract, order=order)
sleep(sleeptime)
order_id = order_id + 1
print(' buy order quantity placed for {} is: {} '.format(contract.symbol, order.totalQuantity))
elif action_balance.loc[i, 'shares'] < 0:
order.action = 'SELL'
order.algoStrategy = 'Adaptive'
order.algoParams = []
order.algoParams.append(TagValue("adaptivePriority", ordersPriority))
app.placeOrder(orderId=order_id, contract=contract, order=order)
sleep(sleeptime)
order_id = order_id + 1
print(' sell order quantity placed for {} is: {} '.format(contract.symbol, order.totalQuantity))
else:
print('app not connected')
app.disconnect()
def placing_final_orders(self, order_id, ordersPriority, transmit):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
for ticker in action_final.index:
contract = Contract()
contract.symbol = ticker
contract.secType = 'STK'
contract.exchange = 'SMART'
# contract.primaryExchange = 'ISLAND'
contract.currency = 'USD'
order = Order()
order.orderType = 'MKT'
order.transmit = transmit
order.totalQuantity = np.abs(action_final.loc[ticker])[0]
if action_final.loc[ticker][0] > 0:
order.action = 'BUY'
order.algoStrategy = 'Adaptive'
order.algoParams = []
order.algoParams.append(TagValue("adaptivePriority", ordersPriority))
app.placeOrder(orderId=order_id, contract=contract, order=order)
sleep(sleeptime)
order_id = order_id + 1
print('buy order quantity placed for {} is: {} '.format(contract.symbol, order.totalQuantity))
elif action_final.loc[ticker][0] < 0:
order.action = 'SELL'
order.algoStrategy = 'Adaptive'
order.algoParams = []
order.algoParams.append(TagValue("adaptivePriority", ordersPriority))
app.placeOrder(orderId=order_id, contract=contract, order=order)
sleep(sleeptime)
order_id = order_id + 1
print('sell order quantity placed for {} is: {} '.format(contract.symbol, order.totalQuantity))
app.disconnect()
def commission_report(self, time):
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.executed_orders = pd.DataFrame(columns=['ticker',
'time', 'shares', 'action',
'price', 'marketValue',
'RealizedPNL', 'commission'])
self.val = 0
self.val2 = 0
def error(self, reqId: TickerId, errorCode: int, errorString: str):
if reqId > -1:
print("Error. Id: ", reqId, " Code: ", errorCode, " Msg: ", errorString)
def execDetails(self, reqId, contract, execution):
super().execDetails(reqId, contract, execution)
self.executed_orders.loc[self.val, ['ticker',
'time',
'shares',
'action',
'price',
'marketValue']] = [contract.symbol,
pd.to_datetime(execution.time),
execution.shares, execution.side,
execution.price,
execution.shares * execution.price]
self.val = self.val + 1
def commissionReport(self, commissionReport):
super().commissionReport(commissionReport)
self.executed_orders.loc[self.val2, ['RealizedPNL', 'commission']] = [
float(commissionReport.realizedPNL),
float(commissionReport.commission)]
self.val2 = self.val2 + 1
def execDetailsEnd(self, reqId):
super().execDetailsEnd(reqId)
self.disconnect()
app = TestApp()
app.connect('127.0.0.1', 7497, 0)
execution_filter = ExecutionFilter()
execution_filter.acctCode = acctCode
execution_filter.time = time
app.reqExecutions(0, execution_filter)
sleep(sleeptime)
df = app.executed_orders
app.run()
sleep(sleeptime)
df.set_index('time', inplace=True)
df.sort_index(inplace=True)
df['RealizedPNL'][df['RealizedPNL'] > 1000000] = 'OPEN'
return df
| 35.769384 | 116 | 0.508337 | 17,442 | 0.969431 | 0 | 0 | 0 | 0 | 0 | 0 | 1,922 | 0.106825 |
0f4337c77c2dd91a0006d123d0ff0ba1d1d0ff4b | 3,767 | py | Python | senior/StavleLLVE/train.py | LeiGitHub1024/lowlight | 86649ae4275b8908d39561792a8068d391318f8d | [
"MIT"
] | 79 | 2021-03-27T04:22:12.000Z | 2022-03-30T03:15:04.000Z | senior/StavleLLVE/train.py | LeiGitHub1024/lowlight | 86649ae4275b8908d39561792a8068d391318f8d | [
"MIT"
] | 3 | 2021-03-30T09:31:32.000Z | 2021-09-02T02:02:33.000Z | senior/StavleLLVE/train.py | LeiGitHub1024/lowlight | 86649ae4275b8908d39561792a8068d391318f8d | [
"MIT"
] | 16 | 2021-04-09T05:38:32.000Z | 2022-03-23T13:33:50.000Z | import argparse
import os, socket
from datetime import datetime
import shutil
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from model import UNet
from warp import WarpingLayerBWFlow
from torch.utils.tensorboard import SummaryWriter
from dataloader import llenDataset
from torch.utils.data import DataLoader
import cv2
import kornia
import random
def save_checkpoint(state, epoch, output_directory):
checkpoint_filename = os.path.join(output_directory, 'checkpoint-' + str(epoch) + '.pth')
torch.save(state, checkpoint_filename)
# Parse arguments
parser = argparse.ArgumentParser(description='Low light enhancement')
parser.add_argument('--data-path', default='./data', type=str, help='path to the dataset')
parser.add_argument('--epochs', default=50, type=int, help='n of epochs (default: 50)')
parser.add_argument('--bs', default=1, type=int, help='[train] batch size(default: 1)')
parser.add_argument('--bs-test', default=1, type=int, help='[test] batch size (default: 1)')
parser.add_argument('--lr', default=1e-4, type=float, help='learning rate (default: 1e-4)')
parser.add_argument('--gpu', default='0', type=str, help='GPU id to use (default: 0)')
parser.add_argument('--checkpoint', default=None, type=str, help='path to checkpoint')
parser.add_argument('--log', default=None, type=str, help='folder to log')
parser.add_argument('--weight', default=20, type=float, help='weight of consistency loss')
args = parser.parse_args()
print(args)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
train_set = llenDataset(args.data_path, type='train')
train_loader = DataLoader(train_set, batch_size=args.bs, shuffle=True, num_workers=8, pin_memory=True)
torch.manual_seed(ord('c')+137)
random.seed(ord('c')+137)
np.random.seed(ord('c')+137)
start_epoch = 0
model = UNet(n_channels=3, bilinear=True).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))
criterion = nn.L1Loss()
warp = WarpingLayerBWFlow().cuda()
# Create logger
if args.log==None:
log_dir = os.path.join(os.path.abspath(os.getcwd()), 'logs', datetime.now().strftime('%b%d_%H-%M-%S_') + socket.gethostname())
else:
log_dir = os.path.join(os.path.abspath(os.getcwd()), 'logs', args.log)
os.makedirs(log_dir)
logger = SummaryWriter(log_dir)
# Log arguments
with open(os.path.join(log_dir, "config.txt"), "a") as f:
print(args, file=f)
iters = 0
for epoch in range(start_epoch, args.epochs):
# log learning rate
for i, param_group in enumerate(optimizer.param_groups):
logger.add_scalar('Lr/lr_' + str(i), float(param_group['lr']), epoch)
# Training stage
print('Epoch', epoch, 'train in progress...')
model.train()
for i, (input, target, flow) in enumerate(train_loader):
input, target, flow= input.cuda(), target.cuda(), flow.cuda()
# the 1st pass
pred = model(input)
loss = criterion(pred, target)
# the 2nd pass
input_t = warp(input, flow)
input_t_pred = model(input_t)
pred_t = warp(pred, flow)
loss_t = criterion(input_t_pred, pred_t)
total_loss = loss + loss_t * args.weight
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
logger.add_scalar('Train/Loss', loss.item(), iters)
logger.add_scalar('Train/Loss_t', loss_t.item(), iters)
iters += 1
if (i + 1) % 10 == 0:
print('Train Epoch: {0} [{1}/{2}]\t'
'l1Loss={Loss1:.8f} '
'conLoss={Loss2:.8f} '.format(
epoch, i + 1, len(train_loader), Loss1=loss.item(), Loss2=loss_t.item()))
save_checkpoint(model.state_dict(), epoch, log_dir)
print()
logger.close() | 33.633929 | 130 | 0.674542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 704 | 0.186886 |
0f4590671b6b3912fdd3111d3e454e140576dfa5 | 190 | py | Python | dask_image/__init__.py | akhalighi/dask-image | 8ff0f16dba8a874c7d8d3adf4e5e8bac5f4ee1bf | [
"BSD-3-Clause"
] | 2 | 2020-06-05T07:17:57.000Z | 2020-06-05T09:10:16.000Z | dask_image/__init__.py | akhalighi/dask-image | 8ff0f16dba8a874c7d8d3adf4e5e8bac5f4ee1bf | [
"BSD-3-Clause"
] | 15 | 2016-11-01T12:54:03.000Z | 2019-02-28T18:16:48.000Z | mplview/__init__.py | jakirkham/mplview | 0847e4ccf3c4247cb72f35600b7f5f553b429c2d | [
"BSD-3-Clause"
] | 1 | 2016-11-17T06:09:16.000Z | 2016-11-17T06:09:16.000Z | # -*- coding: utf-8 -*-
__author__ = """John Kirkham"""
__email__ = "kirkhamj@janelia.hhmi.org"
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 21.111111 | 39 | 0.721053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.405263 |
0f46ab87cf6a22e3811c59d7f17625bcf1eea00d | 10,999 | py | Python | OPTICS2.py | k-kapp/Clustering-Algos | 0e8925b854f70eabc926de331cbd3478de6cf90c | [
"Apache-2.0"
] | null | null | null | OPTICS2.py | k-kapp/Clustering-Algos | 0e8925b854f70eabc926de331cbd3478de6cf90c | [
"Apache-2.0"
] | null | null | null | OPTICS2.py | k-kapp/Clustering-Algos | 0e8925b854f70eabc926de331cbd3478de6cf90c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 30 21:25:24 2015
@author: Konrad
"""
import copy
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as sc_p
def gen_clusters(means, num_each):
tup = ();
for m in means:
tup = tup + (np.random.multivariate_normal(m, np.diag(np.ones(2)), num_each),)
data = np.concatenate(tup);
np.random.shuffle(data);
return data;
def make_pts(data):
pts = [];
for pos in data:
pts.append(Point(pos));
return pts;
def euclid(obj1, obj2):
if (isinstance(obj1, Point) and isinstance(obj2, Point)):
return np.sqrt(sum(( obj1.pos - obj2.pos )**2));
elif (isinstance(obj1, np.ndarray) and isinstance(obj2, np.ndarray)):
return np.sqrt(sum(( obj1 - obj2 )**2))
else:
return None;
class Point:
def __init__(self, pos):
self.pos = copy.deepcopy(pos);
self.processed = False;
self.core_dist = None;
self.reach_dist = None;
self.in_seed = False;
class OPTICS:
def __init__(self, min_pts, data, max_eps = None):
self.max_eps = max_eps;
self.min_pts = min_pts;
self.data = copy.deepcopy(data);
self.dim = self.data[0].pos.size;
self.main_list = [];
if (self.max_eps == None):
self.get_max_eps();
self.main_loop();
def __call__(self, main_idx):
return self.data[self.main_list[main_idx]].reach_dist;
def main_loop(self):
for idx, obj in enumerate(self.data):
if (not obj.processed):
self.expand_point(idx);
for idx, obj in enumerate(self.data):
if (not obj.processed):
self.append_main(idx);
def get_max_eps(self):
extr_x = self.get_extr_x();
extr_y = self.get_extr_y();
area = (extr_x[1] - extr_x[0])*(extr_y[1] - extr_y[0]);
self.max_eps = ((area*self.min_pts*sc_p.gamma(2))/(len(self.data)*np.sqrt(np.pi**2)))**0.5
def get_extr_x(self):
min_x = float("inf");
max_x = -float("inf");
for obj in self.data:
if obj.pos[0] < min_x:
min_x = obj.pos[0];
if obj.pos[0] > max_x:
max_x = obj.pos[0];
return (min_x, max_x);
def get_extr_y(self):
min_y = float("inf");
max_y = -float("inf");
for obj in self.data:
if obj.pos[1] < min_y:
min_y = obj.pos[1];
if obj.pos[1] > max_y:
max_y = obj.pos[1];
return (min_y, max_y);
def append_main(self, idx):
self.data[idx].processed = True;
if (self.data[idx].reach_dist == None):
self.data[idx].reach_dist = self.max_eps;
self.main_list.append(idx);
def expand_point(self, idx):
self.get_neighbours(idx);
self.get_core_dist(idx);
if (self.data[idx].core_dist == -1):
return;
else:
self.data[idx].processed = True;
self.append_main(idx);
seed_list = [];
self.append_seed(seed_list, self.data[idx].neighbours, idx)
while (len(seed_list) > 0):
curr_idx = seed_list[0];
self.get_neighbours(curr_idx);
self.get_core_dist(curr_idx);
self.data[curr_idx].processed = True;
self.append_main(curr_idx);
self.remove_seed(seed_list);
if (not (self.data[curr_idx].core_dist == -1)):
self.append_seed(seed_list, self.data[curr_idx].neighbours, curr_idx);
def get_core_dist(self, idx):
if (len(self.data[idx].neighbours) >= self.min_pts):
self.data[idx].core_dist = self.data[idx].neighbours[self.min_pts - 1][1];
else:
self.data[idx].core_dist = -1;
def get_reach_dist(self, center_idx, idx, dist):
r_dist = max(dist, self.data[center_idx].core_dist);
if (self.data[idx].reach_dist == None):
self.data[idx].reach_dist = r_dist;
return True;
elif (self.data[idx].reach_dist > r_dist):
self.data[idx].reach_dist = r_dist;
return True;
else:
return False;
def get_neighbours(self, idx):
self.data[idx].neighbours = [];
for n_idx, obj in enumerate(self.data):
dist = euclid(obj, self.data[idx])
if (dist <= self.max_eps):
self.data[idx].neighbours.append([n_idx, dist]);
self.data[idx].neighbours.sort(key = lambda x : x[1]);
def append_seed(self, seed_list, neighbours, center_idx):
for n_tup in neighbours:
changed = self.get_reach_dist(center_idx, n_tup[0], n_tup[1]);
if (self.data[n_tup[0]].in_seed and changed):
del seed_list[seed_list.index(n_tup[0])];
self.data[n_tup[0]].in_seed = False;
elif (self.data[n_tup[0]].processed or self.data[n_tup[0]].in_seed):
continue;
for idx, obj in enumerate(seed_list):
if ( self.data[n_tup[0]].reach_dist < self.data[obj].reach_dist ):
seed_list.insert(idx, n_tup[0]);
self.data[n_tup[0]].in_seed = True;
break;
if (not self.data[n_tup[0]].in_seed):
seed_list.append(n_tup[0]);
self.data[n_tup[0]].in_seed = True;
def remove_seed(self, seed_list):
self.data[seed_list[0]].in_seed = False;
del seed_list[0];
def reach_plot(self):
x = list(range(len(self.main_list)));
y = [];
for idx in self.main_list:
y.append(self.data[idx].reach_dist);
f, ax = plt.subplots();
ax.bar(x, y);
def print_reach_dist(self):
for idx in self.main_list:
print (idx)
print (self.data[idx].reach_dist)
def plot_data(self):
x = [];
y = [];
for obj in self.data:
x.append(obj.pos[0]);
y.append(obj.pos[1]);
f, ax = plt.subplots();
ax.scatter(x, y);
def get_num_clusters(self):
clusters = [];
up = True;
top, bottom = -1, -1;
for i, idx in enumerate(self.main_list[:-1]):
if (up and (self.data[idx].reach_dist > self.data[self.main_list[i + 1]])):
up = not up;
if (not bottom == -1):
clusters.append(top - bottom);
top = self.data[idx].reach_dist;
continue;
if (not up) and (self.data[idx].reach_dist < self.data[self.main_list[i + 1]].reach_dist):
up = not up;
bottom = self.data[idx].reach_dist;
class Clusters:
def __init__(optics_obj, eps):
self.optics_obj = optics_obj;
self.main_list = optics_obj.main_list;
self.eps = eps;
self.min_pts = optics_obj.min_pts;
def find(self):
idx = 0;
#down, up = False, False;
downs = [];
clusters = [];
while idx < len(self.main_list):
diff = self.main_list[idx] - self.main_list[idx + 1];
if (diff >= self.optics_obj(idx)*self.eps):
new_down, idx = self.proc_down(idx);
downs.append([new_down, -float("inf")]);
#glob_mib = self.optics_obj(downs[-1][0][0]]);
#self.filter_downs(glob_mib, downs);
elif (-diff >= self.optics_obj(idx)*self.eps):
glob_mib = self.get_glob_mib(downs[-1], idx);
self.filter_downs(glob_mib, downs);
up, idx = self.proc_up(idx);
for down in downs:
if (self.optics_obj(up[1]).reach_dist*(1 - self.eps) >= down[1]):
clusters.append((down[0][0], up[1]));
else:
idx += 1;
def get_glob_mib(self, last_down, curr_idx):
begin_idx, end_idx = last_down[0][1], curr_idx;
glob_mib = -float("inf");
for i in range(begin_idx, end_idx + 1):
if (self.optics_obj(i) > glob_mib):
glob_mib = self.optics_obj(i);
return glob_mib;
def proc_down(self, idx):
bad_inrow = 0;
begin_idx = idx;
while (idx < len(self.main_list)):
idx += 1;
diff = self.main_list[idx].reach_dist - self.main_list[idx + 1].reach_dist;
if (diff < 0):
return (begin_idx, idx - 1);
if (diff > 0):
if (diff >= self.eps*self.main_list[idx]):
bad_inrow = 0;
else:
if (bad_inrow == 0):
last_good = idx - 1;
bad_inrow += 1;
if bad_inrow > self.min_pts:
# include a check that ensures region does not have
# length zero?
return (begin_idx, last_good), idx;
def proc_up(self, idx):
bad_inrow = 0;
begin_idx = idx;
while (idx < len(self.main_list)):
idx += 1;
diff = self.main_list[idx].reach_dist[idx + 1] - self.main_list[idx].reach_dist;
if (diff < 0):
return (begin_idx, idx - 1);
if (diff > 0):
if (diff >= self.eps*self.main_list[idx + 1]):
bad_inrow = 0;
else:
if (bad_inrow == 0):
last_good = idx - 1;
bad_inrow += 1;
if (bad_inrow > self.min_pts):
return (begin_idx, last_good), idx;
def filter_downs(self, glob_mib, downs):
del_idx = [];
for idx, obj in enumerate(downs[:-1]):
if self.main_list[obj[0][0]].reach_dist*(1 - self.eps) < glob_mib:
del_idx.append(idx);
elif (obj[1] < glob_mib):
downs[idx][1] = glob_mib;
del_idx.reverse();
for i in del_idx:
del downs[i];
dat = gen_clusters([[1, 1], [6, 7], [10, 15], [15, 15]], 200);
data = make_pts(dat);
optics = OPTICS(15, data);
optics.reach_plot();
optics.plot_data();
plt.show();
#optics.print_reach_dist();
print ("Done") | 35.028662 | 103 | 0.489408 | 9,890 | 0.899173 | 0 | 0 | 0 | 0 | 0 | 0 | 329 | 0.029912 |
0f4747bfbaa5296131febb1ca35e289b15c1fa5d | 921 | py | Python | pandayoda/test/test_interaction.py | PalNilsson/panda-yoda | cf3c3786ca8937c9a61730b6201edd5bfaeda022 | [
"Apache-2.0"
] | null | null | null | pandayoda/test/test_interaction.py | PalNilsson/panda-yoda | cf3c3786ca8937c9a61730b6201edd5bfaeda022 | [
"Apache-2.0"
] | 22 | 2018-07-26T15:54:36.000Z | 2019-05-27T09:28:48.000Z | pandayoda/test/test_interaction.py | PalNilsson/panda-yoda | cf3c3786ca8937c9a61730b6201edd5bfaeda022 | [
"Apache-2.0"
] | 3 | 2018-05-21T13:12:10.000Z | 2019-04-02T08:41:57.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Taylor Childers (john.taylor.childers@cern.ch)
# - Paul Nilsson (paul.nilsson@cern.ch)
from mpi4py import MPI
from pandayoda.yodacore import Interaction
comm = MPI.COMM_WORLD
mpirank = comm.Get_rank()
if mpirank == 0:
rsv = Interaction.Receiver()
while rsv.activeRanks():
tmpStat, method, params = rsv.receiveRequest()
print(mpirank, 'got', tmpStat, method, params)
print(rsv.returnResponse({'msg': 'Done'}))
rsv.decrementNumRank()
print(mpirank, "done")
else:
snd = Interaction.Requester()
print(mpirank, "sending req")
res = snd.sendRequest('dummy', {1: 2, 3: 4, 'rank': mpirank})
print(res)
print(mpirank, "done")
| 30.7 | 66 | 0.67861 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 370 | 0.401737 |
0f47798905b7c3b31ed013305b5da89834823c20 | 3,452 | py | Python | tools/instrumentation_helpers/instrumentor.py | mikezucc/xchammer | 15659a678c24208e80e11713559b0380cc01f8fd | [
"Apache-2.0"
] | null | null | null | tools/instrumentation_helpers/instrumentor.py | mikezucc/xchammer | 15659a678c24208e80e11713559b0380cc01f8fd | [
"Apache-2.0"
] | null | null | null | tools/instrumentation_helpers/instrumentor.py | mikezucc/xchammer | 15659a678c24208e80e11713559b0380cc01f8fd | [
"Apache-2.0"
] | null | null | null | import os
import time
import re
import socket
import json
import platform
import multiprocessing
import getpass
# Set this to the value of the statsd backend
# Consider:
# - allowing the user to specify this as a config
# - adding the ability to load hooks as an external repo.
TSD_HOST = ""
TSD_PORT = 80
TSD_TIMEOUT_SEC = 5
INVALID_LABEL_REG = re.compile(r"[^\._a-zA-Z0-9]")
def extract_profile_line(line, item):
if item in line:
result = line.split(item + ": ")
if len(result) > 1:
return result[1]
return None
# Memoize calls to system_profiler
PROFILE_INFO = None
def get_system_profile():
global PROFILE_INFO
if PROFILE_INFO:
return PROFILE_INFO
process = os.popen("system_profiler SPHardwareDataType")
result = process.read()
lines = result.split("\n")
output = {}
for line in lines:
memory = extract_profile_line(line, "Memory")
if memory:
output["memory"] = memory
model_identifier = extract_profile_line(line, "Model Identifier")
if model_identifier:
output["model_identifier"] = model_identifier
processor_speed = extract_profile_line(line, "Processor Speed")
if processor_speed:
output["processor_speed"] = processor_speed
processor_name = extract_profile_line(line, "Processor Name")
if processor_name:
output["processor_name"] = processor_name
# Get some stats about the host
output["os_version"] = platform.mac_ver()[0]
# This will optionally print the number of virtual cores - see docs for more info
output["cpu_count"] = multiprocessing.cpu_count()
output["host"] = socket.gethostname()
output["username"] = getpass.getuser()
PROFILE_INFO = output
return output
def get_tsd(tags_dict):
items = []
for key, value in tags_dict.items():
str_val = str(value)
items.append("{}={}".format(key, INVALID_LABEL_REG.sub("_", str_val)))
return " ".join(items)
def write_tsd(metric, delta):
timestamp = int(round(time.time()))
tags_dict = get_system_profile()
tags = get_tsd(tags_dict)
tsd = "put {metric} {timestamp} {delta} {tags}\n".format(
metric=metric,
timestamp=timestamp,
delta=delta,
tags=tags)
try:
sock = socket.create_connection(
(TSD_HOST, TSD_PORT),
timeout=TSD_TIMEOUT_SEC)
sock.sendall(tsd)
sock.close()
except Exception:
print("failed to write '{}' to {}:{}".format(tsd, TSD_HOST, TSD_PORT))
def write_build_metric():
start_time_f = os.path.join(
os.environ.get("TARGET_BUILD_DIR"), "xchammer.build_start")
start_time = os.path.getmtime(start_time_f)
delta = ((time.time()-start_time)*1000)
metric = "xchammer.build"
build_target = os.environ.get("TARGET_NAME")
if build_target:
metric += "." + build_target
write_tsd(metric, delta)
def write_last_generation_metric():
build_dir = os.environ.get("OBJROOT")
if build_dir:
base_path = build_dir
else:
base_path = "/private/var/tmp"
last_generation_log = os.path.join(base_path, "xchammer.log")
with open(last_generation_log, "r") as f:
for l, i in enumerate(f):
json_str = i.split("\n")[0]
trace_entry = json.loads(json_str)
write_tsd(trace_entry["name"], trace_entry["ts"])
| 28.065041 | 85 | 0.64774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 746 | 0.216107 |
0f47f785479aecc6801edc61672d6949600791f3 | 5,271 | py | Python | peer/lifecycle/db_pb2.py | jeffgarratt/fabric-prototype | 46cfc67a1d74d1f38f498d3409327692fb733fd0 | [
"CC-BY-4.0"
] | 6 | 2017-10-16T13:46:46.000Z | 2020-02-28T07:48:51.000Z | peer/lifecycle/db_pb2.py | jeffgarratt/fabric-prototype | 46cfc67a1d74d1f38f498d3409327692fb733fd0 | [
"CC-BY-4.0"
] | 18 | 2017-10-02T16:31:51.000Z | 2020-02-24T21:39:20.000Z | peer/lifecycle/db_pb2.py | jeffgarratt/fabric-prototype | 46cfc67a1d74d1f38f498d3409327692fb733fd0 | [
"CC-BY-4.0"
] | 4 | 2019-02-01T14:46:21.000Z | 2021-06-01T05:49:11.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: peer/lifecycle/db.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='peer/lifecycle/db.proto',
package='lifecycle',
syntax='proto3',
serialized_pb=_b('\n\x17peer/lifecycle/db.proto\x12\tlifecycle\"1\n\rStateMetadata\x12\x10\n\x08\x64\x61tatype\x18\x01 \x01(\t\x12\x0e\n\x06\x66ields\x18\x02 \x03(\t\"G\n\tStateData\x12\x0f\n\x05Int64\x18\x01 \x01(\x03H\x00\x12\x0f\n\x05\x42ytes\x18\x02 \x01(\x0cH\x00\x12\x10\n\x06String\x18\x03 \x01(\tH\x00\x42\x06\n\x04TypeBf\n,org.hyperledger.fabric.protos.peer.lifecycleZ6github.com/hyperledger/fabric-protos-go/peer/lifecycleb\x06proto3')
)
_STATEMETADATA = _descriptor.Descriptor(
name='StateMetadata',
full_name='lifecycle.StateMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='datatype', full_name='lifecycle.StateMetadata.datatype', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fields', full_name='lifecycle.StateMetadata.fields', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=38,
serialized_end=87,
)
_STATEDATA = _descriptor.Descriptor(
name='StateData',
full_name='lifecycle.StateData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Int64', full_name='lifecycle.StateData.Int64', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Bytes', full_name='lifecycle.StateData.Bytes', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='String', full_name='lifecycle.StateData.String', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='Type', full_name='lifecycle.StateData.Type',
index=0, containing_type=None, fields=[]),
],
serialized_start=89,
serialized_end=160,
)
_STATEDATA.oneofs_by_name['Type'].fields.append(
_STATEDATA.fields_by_name['Int64'])
_STATEDATA.fields_by_name['Int64'].containing_oneof = _STATEDATA.oneofs_by_name['Type']
_STATEDATA.oneofs_by_name['Type'].fields.append(
_STATEDATA.fields_by_name['Bytes'])
_STATEDATA.fields_by_name['Bytes'].containing_oneof = _STATEDATA.oneofs_by_name['Type']
_STATEDATA.oneofs_by_name['Type'].fields.append(
_STATEDATA.fields_by_name['String'])
_STATEDATA.fields_by_name['String'].containing_oneof = _STATEDATA.oneofs_by_name['Type']
DESCRIPTOR.message_types_by_name['StateMetadata'] = _STATEMETADATA
DESCRIPTOR.message_types_by_name['StateData'] = _STATEDATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StateMetadata = _reflection.GeneratedProtocolMessageType('StateMetadata', (_message.Message,), dict(
DESCRIPTOR = _STATEMETADATA,
__module__ = 'peer.lifecycle.db_pb2'
# @@protoc_insertion_point(class_scope:lifecycle.StateMetadata)
))
_sym_db.RegisterMessage(StateMetadata)
StateData = _reflection.GeneratedProtocolMessageType('StateData', (_message.Message,), dict(
DESCRIPTOR = _STATEDATA,
__module__ = 'peer.lifecycle.db_pb2'
# @@protoc_insertion_point(class_scope:lifecycle.StateData)
))
_sym_db.RegisterMessage(StateData)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n,org.hyperledger.fabric.protos.peer.lifecycleZ6github.com/hyperledger/fabric-protos-go/peer/lifecycle'))
# @@protoc_insertion_point(module_scope)
| 36.604167 | 447 | 0.751281 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,378 | 0.26143 |
0f4928cee3b81be294472e379d117e0b188de64b | 2,025 | py | Python | minepdf/cidsystem.py | jonix6/minepdf | 6c57427fb16622a1b9960f6e7d514487d9bcd877 | [
"MIT"
] | 2 | 2021-01-29T07:59:17.000Z | 2021-04-01T06:08:30.000Z | minepdf/cidsystem.py | jonix6/minepdf | 6c57427fb16622a1b9960f6e7d514487d9bcd877 | [
"MIT"
] | null | null | null | minepdf/cidsystem.py | jonix6/minepdf | 6c57427fb16622a1b9960f6e7d514487d9bcd877 | [
"MIT"
] | 1 | 2021-03-05T11:14:14.000Z | 2021-03-05T11:14:14.000Z |
import re
from collections import OrderedDict
import struct
import os
import decoder748
REG_EXP = re.compile(r'^\s*<([0-9a-f]+)>\s+<([0-9a-f]+)>\s+(\d+)$', re.M)
class CMap:
MAP_STRING = ''
def __init__(self):
self.codePoints = set()
self.cid2unicode = {}
self._feed()
def _feed(self):
for (s, e, code) in re.findall(REG_EXP, self.MAP_STRING):
s = int(s, 16)
e = int(e, 16)
self.codePoints.add(s)
self.cid2unicode[s] = int(code)
def to_unicode(self, cid):
for point in self.codePoints:
if cid <= point:
break
d = cid - point
code = self.cid2unicode[point]
return chr(code + d)
def to_unicode(klass, cid):
if cid in klass.diff:
return klass.diff[cid]
point = 0
for next_point in sorted(klass.cid2unicode.keys()):
if cid < next_point:
break
point = next_point
e = cid - point
code = klass.cid2unicode[point] + e
if code < 0x100:
c = chr(code)
elif code < 0x10000:
c = struct.pack('>H', code).decode('gb18030')
else:
c = struct.pack('>L', code).decode('gb18030')
return c
def to_unicode_wrapper(klass):
def func(cid):
return to_unicode(klass, cid)
return func
class UnicodeMap:
@property
def DESC(self):
return './cidtounicode'
def __init__(self, cmap={}):
self.cid2unicode = {}
self.diff = cmap
def get(self, cid):
if cid in self.diff:
return self.diff[cid]
return chr(cid)
class ADOBE_GB1(UnicodeMap):
FILE_NAME = 'Adobe-GB1.cidToUnicode'
def getCMap(cmapType, cmap={}):
if cmapType.startswith('Founder-') and cmapType.endswith('748'):
decoder = decoder748.encoding(cmapType)
for cid in cmap:
cmap[cid] = decoder.decode(cmap[cid].encode('gb18030'))
elif cmapType == 'Adobe-GB1':
cmap = ADOBE_GB1(cmap=cmap)
return cmap
| 23.275862 | 73 | 0.571358 | 926 | 0.457284 | 0 | 0 | 61 | 0.030123 | 0 | 0 | 148 | 0.073086 |
0f498fca97c908ff5fcb81796dcc212092f249b8 | 6,570 | py | Python | src/libtakiyasha/qmc/ciphers/modern.py | nukemiko/takiyasha | d0e2ebab2a476646313a1fd412f55b9d1300ea87 | [
"MIT"
] | 15 | 2022-01-16T16:13:20.000Z | 2022-03-27T15:30:51.000Z | src/libtakiyasha/qmc/ciphers/modern.py | nukemiko/takiyasha | d0e2ebab2a476646313a1fd412f55b9d1300ea87 | [
"MIT"
] | null | null | null | src/libtakiyasha/qmc/ciphers/modern.py | nukemiko/takiyasha | d0e2ebab2a476646313a1fd412f55b9d1300ea87 | [
"MIT"
] | 2 | 2022-01-20T18:59:07.000Z | 2022-01-26T08:09:15.000Z | from __future__ import annotations
import os
from typing import Generator
from ...common import Cipher, KeylessCipher
from ...utils import bytesxor
QMCv1_KEYSTREAM_1ST_SEGMENT = b''
QMCv1_KEYSTREAM_REMAINING_SEGMENT = b''
__all__ = ['DynamicMap', 'ModifiedRC4', 'StaticMap']
def load_segment_file() -> None:
global QMCv1_KEYSTREAM_1ST_SEGMENT, QMCv1_KEYSTREAM_REMAINING_SEGMENT
if not (QMCv1_KEYSTREAM_1ST_SEGMENT and QMCv1_KEYSTREAM_REMAINING_SEGMENT):
with open(os.path.join(os.path.dirname(__file__), 'binaries/QMCv1-keystream-segment'), 'rb') as seg_file:
QMCv1_KEYSTREAM_1ST_SEGMENT = seg_file.read(32768)
QMCv1_KEYSTREAM_REMAINING_SEGMENT = seg_file.read(32767)
class StaticMap(KeylessCipher):
@staticmethod
def cipher_name() -> str:
return 'Static Mapping'
def __init__(self):
load_segment_file()
def decrypt(self, cipherdata: bytes, start_offset: int = 0) -> bytes:
first_seg = QMCv1_KEYSTREAM_1ST_SEGMENT
remain_seg = QMCv1_KEYSTREAM_REMAINING_SEGMENT
first_seg_len = len(first_seg)
remain_seg_len = len(remain_seg)
end_offset = start_offset + len(cipherdata)
if start_offset < 0:
raise ValueError("'start_offset' must be a positive integer")
else:
data = cipherdata.rjust(end_offset, b'\x00')
data_len = len(data)
if data_len <= first_seg_len:
return bytesxor(data[start_offset:], first_seg[start_offset:end_offset])
else:
remain_data_len = data_len - first_seg_len
required_remain_seg_count = remain_data_len // remain_seg_len
if remain_data_len % remain_seg_len != 0:
required_remain_seg_count += 1
keystream = first_seg + remain_seg * required_remain_seg_count
return bytesxor(data[start_offset:], keystream[start_offset:end_offset])
class DynamicMap(Cipher):
@staticmethod
def cipher_name() -> str:
return 'Dynamic Mapping'
def yield_mask(self, data_offset: int, data_len: int):
key: bytes = self._key
key_len = len(key)
for i in range(data_offset, data_offset + data_len):
if i > 0x7fff:
i %= 0x7fff
idx = (i ** 2 + 71214) % key_len
value = key[idx]
rotate = ((idx & 7) + 4) % 8
yield ((value << rotate) % 256) | ((value >> rotate) % 256)
def decrypt(self, cipherdata: bytes, start_offset: int = 0) -> bytes:
keystream = bytes(self.yield_mask(start_offset, len(cipherdata)))
return bytesxor(cipherdata, keystream)
class ModifiedRC4(Cipher):
@staticmethod
def cipher_name() -> str:
return 'Modified RC4'
@staticmethod
def first_segsize() -> int:
return 128
@staticmethod
def remain_segsize() -> int:
return 5120
@staticmethod
def get_hash_base(key: bytes) -> int:
hash_base = 1
key_len = len(key)
for i in range(key_len):
v: int = key[i]
if v == 0:
continue
next_hash: int = (hash_base * v) & 0xffffffff
if next_hash == 0 or next_hash <= hash_base:
break
hash_base = next_hash
return hash_base
def __init__(self, key: bytes):
super().__init__(key)
key_len = len(key)
self._key_len = key_len
box: bytearray = bytearray(i % 256 for i in range(key_len))
j: int = 0
for i in range(key_len):
j = (j + box[i] + key[i % key_len]) % key_len
box[i], box[j] = box[j], box[i]
self._box: bytearray = box
self._hash_base = self.get_hash_base(key)
def get_seg_skip(self, v: int) -> int:
key: bytes = self._key
key_len: int = self._key_len
hash_: int = self._hash_base
seed: int = key[v % key_len]
idx: int = int(hash_ / ((v + 1) * seed) * 100)
return idx % key_len
def gen_first_seg(self,
data_offset: int,
data_len: int
) -> Generator[int, None, None]:
key = self._key
for i in range(data_offset, data_offset + data_len):
yield key[self.get_seg_skip(i)]
def gen_remain_seg(self,
data_offset: int,
data_len: int
) -> Generator[int, None, None]:
key_len = self._key_len
box = self._box.copy()
j, k = 0, 0
skip_len = (data_offset % self.remain_segsize()) + self.get_seg_skip(data_offset // self.remain_segsize())
for i in range(-skip_len, data_len):
j = (j + 1) % key_len
k = (box[j] + k) % key_len
box[j], box[k] = box[k], box[j]
if i >= 0:
yield box[(box[j] + box[k]) % key_len]
def decrypt(self, cipherdata: bytes, start_offset: int = 0) -> bytes:
first_segsize = self.first_segsize()
remain_segsize = self.remain_segsize()
gen_remain_seg = self.gen_remain_seg
pending = len(cipherdata)
done = 0
offset = int(start_offset)
keystream_buffer = bytearray(pending)
def mark(p: int) -> None:
nonlocal pending, done, offset
pending -= p
done += p
offset += p
if 0 <= offset < first_segsize:
blksize = pending
if blksize > first_segsize - offset:
blksize = first_segsize - offset
keystream_buffer[:blksize] = self.gen_first_seg(offset, blksize)
mark(blksize)
if pending <= 0:
return bytesxor(cipherdata, keystream_buffer)
if offset % remain_segsize != 0:
blksize = pending
if blksize > remain_segsize - (offset % remain_segsize):
blksize = remain_segsize - (offset % remain_segsize)
keystream_buffer[done:done + blksize] = gen_remain_seg(offset, blksize)
mark(blksize)
if pending <= 0:
return bytesxor(cipherdata, keystream_buffer)
while pending > remain_segsize:
keystream_buffer[done:done + remain_segsize] = gen_remain_seg(offset, remain_segsize)
mark(remain_segsize)
if pending > 0:
keystream_buffer[done:] = gen_remain_seg(offset, len(keystream_buffer[done:]))
return bytesxor(cipherdata, keystream_buffer)
| 32.85 | 114 | 0.586149 | 5,847 | 0.889954 | 1,299 | 0.197717 | 764 | 0.116286 | 0 | 0 | 177 | 0.026941 |
0f49b746e381f08fdc32de7957fed297c294cb90 | 1,797 | py | Python | src/process_user_input.py | AndreasVikke/ComputerScience-Final | 52d09a5876bfde661a00736712db6e3d19be877d | [
"MIT"
] | 1 | 2021-01-15T11:23:20.000Z | 2021-01-15T11:23:20.000Z | src/process_user_input.py | AndreasVikke/ComputerScience-Final | 52d09a5876bfde661a00736712db6e3d19be877d | [
"MIT"
] | null | null | null | src/process_user_input.py | AndreasVikke/ComputerScience-Final | 52d09a5876bfde661a00736712db6e3d19be877d | [
"MIT"
] | null | null | null | """
Processes user input from slack
:license: MIT
"""
import json
from typing import Dict
from src.modules.user_input_global import UserInputGlobal
from src.modules.user_input_handle_block_action import UserInputHandleBlockAction
from src.modules.user_input_handle_view_submission import UserInputHandleViewSubmission
def process(event, context):
'''
AWS Serverless Handler
-
:param event: AWS event
:param context: AWS Lambda context
'''
print("Context: ", context)
return process_template(event)
def process_template(event: Dict, test: bool = False) -> Dict:
'''
Template processor
-
:param event: AWS event
:param context: AWS Lambda context
:param sqs_client: Boto3 sqs client
:param requests_client: Request client
:param checkin_modal: Checkin modal
:param customers_model: Customer modal
:param consultant_model: Consultant modal
'''
print("Event: ", event)
response = event['body']['payload']
payload = json.loads(response)
user_input_global = UserInputGlobal(payload, test)
payload_type = user_input_global.get_payload_type()
if payload_type == 'block_actions':
UserInputHandleBlockAction(payload, test).handle_block_action()
# handle_block_action(payload, requests_client, checkin_model, customers_model,\
# sqs_client, consultant_model)
update = {'statuscode': 200}
elif payload['type'] == 'view_submission':
update = UserInputHandleViewSubmission(payload, test).handle_view_submission()
# update = handle_view_submission(payload, checkin_model,
# sqs_client, customers_model, stepfunctions_client)
print('UPDATE: ', update)
return update
| 30.982759 | 88 | 0.694491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 866 | 0.481914 |
0f4a2c92b201d6a14dd11e24d76deccb2e16f0c1 | 15,082 | py | Python | cflearn/api/ml/interface.py | carefree0910/carefree-learn | 2043812afbe9c56f01ec1639961736313ee062ba | [
"MIT"
] | 400 | 2020-07-05T18:55:49.000Z | 2022-02-21T02:33:08.000Z | cflearn/api/ml/interface.py | carefree0910/carefree-learn | 2043812afbe9c56f01ec1639961736313ee062ba | [
"MIT"
] | 82 | 2020-08-01T13:29:38.000Z | 2021-10-09T07:13:44.000Z | cflearn/api/ml/interface.py | carefree0910/carefree-learn | 2043812afbe9c56f01ec1639961736313ee062ba | [
"MIT"
] | 34 | 2020-07-05T21:15:34.000Z | 2021-12-20T08:45:17.000Z | import os
import json
import shutil
import numpy as np
from typing import Any
from typing import Dict
from typing import List
from typing import Type
from typing import Tuple
from typing import Union
from typing import Callable
from typing import Optional
from typing import NamedTuple
from tqdm.autonotebook import tqdm
from cfdata.tabular import TabularData
from cftool.ml import ModelPattern
from cftool.ml import EnsemblePattern
from cftool.dist import Parallel
from cftool.misc import update_dict
from cftool.misc import shallow_copy_dict
from cftool.ml.utils import patterns_type
from cftool.ml.utils import Comparer
from cftool.ml.utils import Estimator
from .pipeline import SimplePipeline
from .pipeline import CarefreePipeline
from ...data import MLData
from ...data import MLInferenceData
from ...trainer import get_sorted_checkpoints
from ...constants import SCORES_FILE
from ...constants import WARNING_PREFIX
from ...constants import CHECKPOINTS_FOLDER
from ...constants import ML_PIPELINE_SAVE_NAME
from ...dist.ml import Experiment
from ...dist.ml import ExperimentResults
from ...misc.toolkit import to_2d
from ...misc.toolkit import get_latest_workplace
from ...models.ml.protocol import MLCoreProtocol
def register_core(name: str) -> Callable[[Type], Type]:
return MLCoreProtocol.register(name)
pipelines_type = Dict[str, List[SimplePipeline]]
various_pipelines_type = Union[
SimplePipeline,
List[SimplePipeline],
Dict[str, SimplePipeline],
pipelines_type,
]
def _to_pipelines(pipelines: various_pipelines_type) -> pipelines_type:
if isinstance(pipelines, dict):
pipeline_dict = {}
for key, value in pipelines.items():
if isinstance(value, list):
pipeline_dict[key] = value
else:
pipeline_dict[key] = [value]
else:
if not isinstance(pipelines, list):
pipelines = [pipelines]
pipeline_dict = {}
for pipeline in pipelines:
assert pipeline.model is not None
key = pipeline.model.__identifier__
pipeline_dict.setdefault(key, []).append(pipeline)
return pipeline_dict
def evaluate(
data: Union[MLData, MLInferenceData],
*,
metrics: Union[str, List[str]],
metric_configs: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
contains_labels: bool = True,
pipelines: Optional[various_pipelines_type] = None,
predict_config: Optional[Dict[str, Any]] = None,
other_patterns: Optional[Dict[str, patterns_type]] = None,
comparer_verbose_level: Optional[int] = 1,
) -> Comparer:
if not contains_labels:
err_msg = "`cflearn.evaluate` must be called with `contains_labels = True`"
raise ValueError(err_msg)
if metric_configs is None:
metric_configs = [{} for _ in range(len(metrics))]
patterns = {}
x, y = data.x_train, data.y_train
if pipelines is None:
msg = None
if y is None:
msg = "either `pipelines` or `y` should be provided"
if other_patterns is None:
msg = "either `pipelines` or `other_patterns` should be provided"
if msg is not None:
raise ValueError(msg)
else:
pipelines = _to_pipelines(pipelines)
# get data
# TODO : different pipelines may have different labels
if y is not None:
y = to_2d(y)
else:
if not isinstance(x, str):
raise ValueError("`x` should be str when `y` is not provided")
data_pipeline = list(pipelines.values())[0][0]
if not isinstance(data_pipeline, CarefreePipeline):
raise ValueError("only `CarefreePipeline` can handle file inputs")
cf_data = data_pipeline.cf_data
assert cf_data is not None
x, y = cf_data.read_file(x, contains_labels=contains_labels)
y = cf_data.transform(x, y).y
# get metrics
if predict_config is None:
predict_config = {}
predict_config.setdefault("contains_labels", contains_labels)
for name, pipeline_list in pipelines.items():
patterns[name] = [
pipeline.to_pattern(**predict_config) for pipeline in pipeline_list
]
if other_patterns is not None:
for other_name in other_patterns.keys():
if other_name in patterns:
print(
f"{WARNING_PREFIX}'{other_name}' is found in "
"`other_patterns`, it will be overwritten"
)
update_dict(other_patterns, patterns)
if isinstance(metrics, list):
metrics_list = metrics
else:
assert isinstance(metrics, str)
metrics_list = [metrics]
if isinstance(metric_configs, list):
metric_configs_list = metric_configs
else:
assert isinstance(metric_configs, dict)
metric_configs_list = [metric_configs]
estimators = [
Estimator(metric, metric_config=metric_config)
for metric, metric_config in zip(metrics_list, metric_configs_list)
]
comparer = Comparer(patterns, estimators)
comparer.compare(data, y, verbose_level=comparer_verbose_level)
return comparer
def task_loader(
workplace: str,
pipeline_base: Type[SimplePipeline] = CarefreePipeline,
compress: bool = True,
) -> SimplePipeline:
export_folder = os.path.join(workplace, ML_PIPELINE_SAVE_NAME)
m = pipeline_base.load(export_folder=export_folder, compress=compress)
assert isinstance(m, SimplePipeline)
return m
def load_experiment_results(
results: ExperimentResults,
pipeline_base: Type[SimplePipeline],
) -> pipelines_type:
pipelines_dict: Dict[str, Dict[int, SimplePipeline]] = {}
iterator = list(zip(results.workplaces, results.workplace_keys))
for workplace, workplace_key in tqdm(iterator, desc="load"):
pipeline = task_loader(workplace, pipeline_base)
model, str_i = workplace_key
pipelines_dict.setdefault(model, {})[int(str_i)] = pipeline
return {k: [v[i] for i in sorted(v)] for k, v in pipelines_dict.items()}
class RepeatResult(NamedTuple):
data: Optional[TabularData]
experiment: Optional[Experiment]
pipelines: Optional[Dict[str, List[SimplePipeline]]]
patterns: Optional[Dict[str, List[ModelPattern]]]
def repeat_with(
data: MLData,
*,
pipeline_base: Type[SimplePipeline] = CarefreePipeline,
workplace: str = "_repeat",
models: Union[str, List[str]] = "fcnn",
model_configs: Optional[Dict[str, Dict[str, Any]]] = None,
predict_config: Optional[Dict[str, Any]] = None,
sequential: Optional[bool] = None,
num_jobs: int = 1,
num_repeat: int = 5,
return_patterns: bool = True,
compress: bool = True,
use_tqdm: bool = True,
available_cuda_list: Optional[List[int]] = None,
resource_config: Optional[Dict[str, Any]] = None,
task_meta_kwargs: Optional[Dict[str, Any]] = None,
is_fix: bool = False,
**kwargs: Any,
) -> RepeatResult:
if os.path.isdir(workplace) and not is_fix:
print(f"{WARNING_PREFIX}'{workplace}' already exists, it will be erased")
shutil.rmtree(workplace)
kwargs = shallow_copy_dict(kwargs)
if isinstance(models, str):
models = [models]
if sequential is None:
sequential = num_jobs <= 1
if model_configs is None:
model_configs = {}
def is_buggy(i_: int, model_: str) -> bool:
i_workplace = os.path.join(workplace, model_, str(i_))
i_latest_workplace = get_latest_workplace(i_workplace)
if i_latest_workplace is None:
return True
checkpoint_folder = os.path.join(i_latest_workplace, CHECKPOINTS_FOLDER)
if not os.path.isfile(os.path.join(checkpoint_folder, SCORES_FILE)):
return True
if not get_sorted_checkpoints(checkpoint_folder):
return True
return False
def fetch_config(core_name: str) -> Dict[str, Any]:
local_kwargs = shallow_copy_dict(kwargs)
assert model_configs is not None
local_core_config = model_configs.setdefault(core_name, {})
local_kwargs["core_name"] = core_name
local_kwargs["core_config"] = shallow_copy_dict(local_core_config)
return shallow_copy_dict(local_kwargs)
pipelines_dict: Optional[Dict[str, List[SimplePipeline]]] = None
if sequential:
cuda = kwargs.pop("cuda", None)
experiment = None
tqdm_settings = kwargs.setdefault("tqdm_settings", {})
tqdm_settings["tqdm_position"] = 2
if not return_patterns:
print(
f"{WARNING_PREFIX}`return_patterns` should be "
"True when `sequential` is True, because patterns "
"will always be generated"
)
return_patterns = True
pipelines_dict = {}
if not use_tqdm:
iterator = models
else:
iterator = tqdm(models, total=len(models), position=0)
for model in iterator:
local_pipelines = []
sub_iterator = range(num_repeat)
if use_tqdm:
sub_iterator = tqdm(
sub_iterator,
total=num_repeat,
position=1,
leave=False,
)
for i in sub_iterator:
if is_fix and not is_buggy(i, model):
continue
local_config = fetch_config(model)
local_workplace = os.path.join(workplace, model, str(i))
local_config.setdefault("workplace", local_workplace)
m = pipeline_base(**local_config)
m.fit(data, cuda=cuda)
local_pipelines.append(m)
pipelines_dict[model] = local_pipelines
else:
if num_jobs <= 1:
print(
f"{WARNING_PREFIX}we suggest setting `sequential` "
f"to True when `num_jobs` is {num_jobs}"
)
# data
data_folder = Experiment.dump_data_bundle(
data.x_train,
data.y_train,
data.x_valid,
data.y_valid,
workplace=workplace,
)
# experiment
experiment = Experiment(
num_jobs=num_jobs,
available_cuda_list=available_cuda_list,
resource_config=resource_config,
)
for model in models:
for i in range(num_repeat):
if is_fix and not is_buggy(i, model):
continue
local_config = fetch_config(model)
experiment.add_task(
model=model,
compress=compress,
root_workplace=workplace,
workplace_key=(model, str(i)),
config=local_config,
data_folder=data_folder,
**(task_meta_kwargs or {}),
)
# finalize
results = experiment.run_tasks(use_tqdm=use_tqdm)
if return_patterns:
pipelines_dict = load_experiment_results(results, pipeline_base)
patterns = None
if return_patterns:
assert pipelines_dict is not None
if predict_config is None:
predict_config = {}
patterns = {
model: [m.to_pattern(**predict_config) for m in pipelines]
for model, pipelines in pipelines_dict.items()
}
cf_data = None
if patterns is not None:
m = patterns[models[0]][0].model
if isinstance(m, CarefreePipeline):
cf_data = m.cf_data
return RepeatResult(cf_data, experiment, pipelines_dict, patterns)
def pack_repeat(
workplace: str,
pipeline_base: Type[SimplePipeline],
*,
num_jobs: int = 1,
) -> List[str]:
sub_workplaces = []
for stuff in sorted(os.listdir(workplace)):
stuff_path = os.path.join(workplace, stuff)
if not os.path.isdir(stuff_path):
continue
sub_workplaces.append(get_latest_workplace(stuff_path))
rs = Parallel(num_jobs).grouped(pipeline_base.pack, sub_workplaces).ordered_results
return sum(rs, [])
def pick_from_repeat_and_pack(
workplace: str,
pipeline_base: Type[SimplePipeline],
*,
num_pick: int,
num_jobs: int = 1,
) -> List[str]:
score_workplace_pairs = []
for stuff in sorted(os.listdir(workplace)):
stuff_path = os.path.join(workplace, stuff)
if not os.path.isdir(stuff_path):
continue
sub_workplace = get_latest_workplace(stuff_path)
assert sub_workplace is not None, "internal error occurred"
score_path = os.path.join(sub_workplace, CHECKPOINTS_FOLDER, SCORES_FILE)
with open(score_path, "r") as f:
score = float(max(json.load(f).values()))
score_workplace_pairs.append((score, sub_workplace))
score_workplace_pairs = sorted(score_workplace_pairs)[::-1]
sub_workplaces = [pair[1] for pair in score_workplace_pairs[:num_pick]]
rs = Parallel(num_jobs).grouped(pipeline_base.pack, sub_workplaces).ordered_results
return sum(rs, [])
def make_toy_model(
model: str = "fcnn",
config: Optional[Dict[str, Any]] = None,
*,
pipeline_type: str = "ml.carefree",
is_classification: bool = False,
cf_data_config: Optional[Dict[str, Any]] = None,
data_tuple: Optional[Tuple[np.ndarray, np.ndarray]] = None,
cuda: Optional[str] = None,
) -> SimplePipeline:
if config is None:
config = {}
if data_tuple is not None:
x_np, y_np = data_tuple
else:
if not is_classification:
x, y = [[0]], [[1.0]]
else:
x, y = [[0], [1]], [[1], [0]]
x_np, y_np = map(np.array, [x, y])
model_config = {}
if model in ("fcnn", "tree_dnn"):
model_config = {
"hidden_units": [100],
"batch_norm": False,
"dropout": 0.0,
}
base_config = {
"core_name": model,
"core_config": model_config,
"output_dim": 1 + int(is_classification),
"num_epoch": 2,
"max_epoch": 4,
}
updated = update_dict(config, base_config)
m = SimplePipeline.make(pipeline_type, updated)
assert isinstance(m, SimplePipeline)
if cf_data_config is None:
cf_data_config = {}
cf_data_config = update_dict(
cf_data_config,
dict(
valid_columns=list(range(x_np.shape[1])),
label_process_method="identical",
),
)
data = MLData.with_cf_data(
x_np,
y_np,
is_classification=is_classification,
cf_data_config=cf_data_config,
valid_split=0.0,
)
m.fit(data, cuda=cuda)
return m
__all__ = [
"register_core",
"evaluate",
"task_loader",
"load_experiment_results",
"repeat_with",
"pack_repeat",
"pick_from_repeat_and_pack",
"make_toy_model",
"ModelPattern",
"EnsemblePattern",
]
| 34.045147 | 87 | 0.630553 | 211 | 0.01399 | 0 | 0 | 0 | 0 | 0 | 0 | 1,175 | 0.077907 |
0f4a41bba9d795071136091d1dc496c27479a9f9 | 1,163 | py | Python | netta/a.py | zhangdafu12/web | 64ce7db4697167215bf9ee25cd5bdc0bd15b5831 | [
"MIT"
] | null | null | null | netta/a.py | zhangdafu12/web | 64ce7db4697167215bf9ee25cd5bdc0bd15b5831 | [
"MIT"
] | 1 | 2020-03-30T09:26:59.000Z | 2020-03-30T09:26:59.000Z | netta/a.py | zhangdafu12/web | 64ce7db4697167215bf9ee25cd5bdc0bd15b5831 | [
"MIT"
] | null | null | null | # -*- encoding:utf8 -*-
# author: Shulei
# e-mail: 1191543592@qq.com
# time: 2019/4/2 10:00
import time
# 一个描述器就是一个实现了三个核心的属性访问操作(get、set、delete)的类,分别为__get__(), __set__(),__delete__()
# 这些方法接受一个实例作为输入,之后相应的操作实例底层的字典, 为了使用一个描述器,需要将这个描述器的实例作为类属性放到一个类的定义中Dadej
# Descriptors are class attributes (like properties or methods) with any of the following special methods:
# __get__ (non-data descriptor method, for example on a method/function)
# __set__ (data descriptor method, for example on a property instance)
# __delete__ (data descriptor method)
class Celsius:
def __init__(self, value=0.0):
self.value = float(value)
def __get__(self, instance, owner):
print(instance)
print(owner)
print("执行的是get")
return self.value
def __set__(self, instance, value):
self.value = float(value)
def __delete__(self, instance):
print("删除。。。。")
print(self)
print(instance)
print(instance.__dict__)
class Temperature:
celsius = Celsius()
t = Temperature()
print(t.celsius)
del t.celsius
print(t.celsius)
print(__name__)
if __name__ == '__main__':
print("这里执行了") | 24.229167 | 106 | 0.687876 | 495 | 0.356885 | 0 | 0 | 0 | 0 | 0 | 0 | 785 | 0.56597 |
0f4a88ad12911b2f8374918ec047ffaa3dd9469e | 21,511 | py | Python | ensembler/visualisation/plotConveyorBelt.py | philthiel/Ensembler | 943efac3c673eb40165927e81336386788e3a19f | [
"MIT"
] | 39 | 2020-05-19T08:45:27.000Z | 2022-03-17T16:58:34.000Z | ensembler/visualisation/plotConveyorBelt.py | SchroederB/Ensembler | 943efac3c673eb40165927e81336386788e3a19f | [
"MIT"
] | 38 | 2020-06-18T13:02:18.000Z | 2022-02-25T14:29:17.000Z | ensembler/visualisation/plotConveyorBelt.py | SchroederB/Ensembler | 943efac3c673eb40165927e81336386788e3a19f | [
"MIT"
] | 13 | 2020-05-19T08:45:57.000Z | 2022-03-10T16:18:20.000Z | import matplotlib.patches as patches
import matplotlib.patheffects as path_effects
import matplotlib.pyplot as plt
import numpy as np
def calc_lam(CapLam, i=0, numsys=8, w=0.1):
ome = (CapLam + i * np.pi * 2.0 / numsys) % (2. * np.pi)
if ome > np.pi:
ome = 2.0 * np.pi - ome
return ome / np.pi
def drawCirc(ax, radius, centX, centY, angle_, theta2_, lineWidth=3, color_='black'):
# ========Line
arc = patches.Arc([centX, centY], radius, radius, angle=angle_,
theta1=0, theta2=theta2_, capstyle='round', linestyle='-', lw=lineWidth, color=color_)
ax.add_patch(arc)
# ========Create the arrow head
# endX=centX+(radius/2)*np.cos((theta2_+angle_)/180*np.pi) #Do trig to determine end position
# endY=centY+(radius/2)*np.sin((theta2_+angle_)/180*np.pi)
# ax.add_patch( #Create triangle as arrow head
# patches.RegularPolygon(
# (endX, endY), # (x,y)
# 3, # number of vertices
# radius/10, # radius
# (angle_+theta2_)/180*np.pi, # orientation
# color=color_
# )
# )
# ========Create the arrow head
begX = centX + (radius / 2) * np.cos((angle_) / 180 * np.pi) # Do trig to determine end position
begY = centY + (radius / 2) * np.sin((angle_) / 180 * np.pi)
ax.add_patch( # Create triangle as arrow head
patches.RegularPolygon(
(begX, begY), # (x,y)
3, # number of vertices
radius / 20, # radius
(180 + angle_) / 180 * np.pi, # orientation
color=color_
)
)
ax.set_xlim([centX - radius, centY + radius]) and ax.set_ylim([centY - radius, centY + radius])
def drawFunicular(x, y, CapLam=0.1, M=2, drawArrows=False):
pSize = 2.009
goldRat = 1.618
lineWidth = 1
[path_effects.SimpleLineShadow(), path_effects.Normal()]
fig = plt.figure(figsize=(pSize * goldRat, pSize))
ax = fig.gca()
fig.subplots_adjust(left=0.1, right=1.0-0.1, bottom=0.24, top=0.99)
rx=0.05
ry=rx
shifty=0.75/goldRat
cvb_bot=np.zeros((90,2))
cvb_bot[:,0]=np.linspace(calc_lam(CapLam, 1, numsys=2), 1.0-rx, 90)
cvb_bot[:,1]=np.ones(90)*shifty
cvb_top=np.zeros((90,2))
cvb_top[:,0]=np.linspace(calc_lam(CapLam, 0, numsys=2), 1.0-rx, 90)
cvb_top[:,1]=np.ones(90)*(shifty+2.0*ry)
lamVals=x-x.min()
lamVals/=lamVals.max()
gVals=y-y.min()
if gVals.max() != 0.0:
gVals/=(2.0*gVals.max()*goldRat)
else:
gVals+=1/(2.0*goldRat)
ax.plot(lamVals[2:], gVals[2:], 'k', lw=lineWidth)
l = CapLam
numsys = M
rotation = []
y = []
for i in range(M):
if calc_lam(CapLam, i, numsys=M) > rx and calc_lam(CapLam, i, numsys=M) < (1.0 - rx):
rotation.append(45)
y.append(1.0)
elif calc_lam(CapLam, i, numsys=M) < rx:
alpha = np.arcsin((rx - calc_lam(CapLam, i, numsys=M)) / rx)
rotation.append(45 - alpha / np.pi * 180.0)
y.append(np.cos(alpha))
else:
alpha = np.arcsin((rx - (1 - calc_lam(CapLam, i, numsys=M))) / rx)
rotation.append(45 - alpha / np.pi * 180.0)
y.append(np.cos(alpha))
shiftMarker = 0.02 * np.sqrt(2)
ax.plot(cvb_bot[:, 0], cvb_bot[:, 1], 'k', lw=lineWidth, zorder=1)
ax.plot(cvb_top[:, 0], cvb_top[:, 1], 'k', lw=lineWidth, zorder=1)
# ax.add_artist(patches.Arc((rx,shifty+ry), 2*rx, 2*ry, theta1=90, theta2=270, lw=lineWidth))
ax.add_artist(patches.Arc((1.0 - rx, shifty + ry), 2 * rx, 2 * ry, theta1=270, theta2=90, lw=lineWidth))
# ax.add_artist(patches.Arc((rx,shifty+ry), 1.4*rx, 1.4*ry, lw=lineWidth))
ax.add_artist(patches.Arc((1.0 - rx, shifty + ry), 1.4 * rx, 1.4 * ry, lw=lineWidth))
# ax.annotate(r'$\Lambda=0$', xy=(-0.01, shifty+ry), xytext=(-0.05, shifty+ry), va='center', ha='right', arrowprops=dict(arrowstyle='-'))
# ax.annotate(r'$\Lambda=\frac{\pi}{2}$', xy=(0.5, shifty+2*ry+0.01), xytext=(0.5, shifty+2*ry+0.05), va='bottom', ha='center', arrowprops=dict(arrowstyle='-'))
# ax.annotate(r'$\Lambda=\frac{3\pi}{2}$', xy=(0.5, shifty-0.01), xytext=(0.5, shifty-0.05), va='top', ha='center', arrowprops=dict(arrowstyle='-'))
# ax.annotate(r'$\Lambda=\pi$', xy=(1.01, shifty+ry), xytext=(1.05, shifty+ry), va='center', ha='left', arrowprops=dict(arrowstyle='-'))
# if np.fabs(rotation[0]-45)>0.0001:
# print(alpha)
# ax.annotate('Current state:\n$\Lambda={:.1f}$'.format(CapLam), xy=(calc_lam(CapLam, 0, numsys=M), shifty+ry+np.cos(alpha)*ry),
# xytext=(calc_lam(CapLam, 0, numsys=M)-np.sin(alpha)*1.5*rx, shifty+(1+np.cos(alpha)*2.5)*ry),
# arrowprops=dict(arrowstyle='<-', linewidth=3), va='center', ha='center', zorder=0)
# else:
# ax.annotate('Current state:\n$\Lambda={:.1f}$'.format(CapLam), xy=(calc_lam(CapLam, 0, numsys=M), shifty+2.0*ry+shiftMarker),
# xytext=(calc_lam(CapLam, 0, numsys=M), shifty+3.5*ry),
# arrowprops=dict(arrowstyle='<-', linewidth=3), va='center', ha='center', zorder=0)
# arrows in the conveyor belt
# drawCirc(ax,rx*0.8,rx,shifty+ry,45,270, color_='red')
drawCirc(ax, rx * 0.8, 1.0 - rx, shifty + ry, 225, 270, lineWidth=lineWidth, color_='red')
for i in range(int(M / 2)):
x = calc_lam(CapLam, i, numsys=M) - np.sqrt(1 - y[i] ** 2) * shiftMarker
ax.add_patch( # Create triangle as arrow head
patches.RegularPolygon(
(x, shifty + ry + y[i] * ry), # (x,y)
4, # number of vertices
0.02, # radius
rotation[i] / 180.0 * np.pi, # orientation
color='red',
zorder=10
)
)
ax.scatter(x, gVals[np.abs(lamVals - x).argmin()] + shiftMarker, s=30, marker='o', edgecolors='face', color='r',
zorder=10)
if drawArrows:
ax.annotate('', xy=(x, gVals[np.abs(lamVals - x).argmin()] + shiftMarker),
xytext=(x + 0.1, gVals[np.abs(lamVals - x - 0.1).argmin()] + shiftMarker),
arrowprops=dict(arrowstyle='<-', linewidth=lineWidth))
ax.plot([x, x], [gVals[np.abs(lamVals - x).argmin()], shifty + ry + y[i] * ry], color='0.8', lw=lineWidth,
zorder=0)
for i in range(int(M / 2)):
x = calc_lam(CapLam, i + int(M / 2), numsys=M) - np.sqrt(1 - y[i] ** 2) * shiftMarker
ax.add_patch( # Create triangle as arrow head
patches.RegularPolygon(
(x, shifty), # (x,y)
4, # number of vertices
0.02, # radius
rotation[i] / 180.0 * np.pi, # orientation
color='red',
zorder=10
)
)
ax.plot([x, x], [gVals[np.abs(lamVals - x).argmin()], shifty + (1.0 - y[i]) * ry], color='0.8', lw=lineWidth,
zorder=0)
ax.scatter(x, gVals[np.abs(lamVals - x).argmin()] + shiftMarker, s=30, marker='o', edgecolors='face', color='r',
zorder=10)
if drawArrows:
ax.annotate('', xy=(x, gVals[np.abs(lamVals - x).argmin()] + shiftMarker),
xytext=(x - 0.1, gVals[np.abs(lamVals - x + 0.1).argmin()] + shiftMarker),
arrowprops=dict(arrowstyle='<-', linewidth=lineWidth))
ax.set_xlim(-0.1, 1.1)
ax.set_ylim(0, 1.2 / goldRat)
ax.set_xticks([0.0, 0.5, 1.0])
ax.set_xticklabels(['0\n(A)', r'$\sfrac{1}{2}$', '1\n(B)'])
# ax.text(lamVals[-1], gVals[-1]-0.05, 'Free energy profile', ha='right', va='top')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_yticks([])
ax.spines['left'].set_color('None')
ax.spines['right'].set_color('None')
ax.spines['top'].set_color('None')
ax.annotate('', xy=(0, 0),
xytext=(0, 0.5 / goldRat), ha='center', va='bottom',
arrowprops=dict(arrowstyle='<|-', facecolor='k', linewidth=1.5))
ax.text(-0.025, 0.25 / goldRat, '$G(\lambda)$', ha='right', va='center', fontsize=14)
ax.text(1.025, 0.0, '$\lambda$', ha='left', va='center', fontsize=14)
return fig
def plotEnsembler(x, y, CapLam=0.1, M=8, drawArrows=False):
pSize = 6.027
goldRat = 1.70
lineWidth = 1
[path_effects.SimpleLineShadow(), path_effects.Normal()]
fig = plt.figure(figsize=(pSize * goldRat, pSize))
ax = fig.gca()
fig.subplots_adjust(left=0.1, right=1.0-0.1, bottom=0.25, top=0.964)
rx=0.05
ry=rx
shifty=0.75/goldRat
cvb_bot=np.zeros((90,2))
cvb_bot[:,0]=np.linspace(rx, 1.0-rx, 90)
cvb_bot[:,1]=np.ones(90)*shifty
cvb_top=np.zeros((90,2))
cvb_top[:,0]=np.linspace(rx, 1.0-rx, 90)
cvb_top[:,1]=np.ones(90)*(shifty+2.0*ry)
lamVals=x-x.min()
lamVals/=lamVals.max()
gVals=y-y.min()
if gVals.max() != 0.0:
gVals/=(2.0*gVals.max()*goldRat)
else:
gVals+=1/(2.0*goldRat)
ax.plot(lamVals[2:], gVals[2:], 'k', lw=lineWidth)
l = CapLam
numsys = M
rotation = []
y = []
# replicas boxes
for i in range(M):
if calc_lam(CapLam, i, numsys=M) > rx and calc_lam(CapLam, i, numsys=M) < (1.0 - rx):
rotation.append(45)
y.append(1.0)
elif calc_lam(CapLam, i, numsys=M) < rx:
alpha = np.arcsin((rx - calc_lam(CapLam, i, numsys=M)) / rx)
if (CapLam + i * 2 * np.pi / float(M)) % (2. * np.pi) < np.pi:
rotation.append(45 + alpha / np.pi * 180.0)
else:
rotation.append(45 - alpha / np.pi * 180.0)
y.append(np.cos(alpha))
else:
alpha = np.arcsin((rx - (1 - calc_lam(CapLam, i, numsys=M))) / rx)
if (CapLam + i * 2 * np.pi / float(M)) % (2. * np.pi) < np.pi:
rotation.append(45 - alpha / np.pi * 180.0)
else:
rotation.append(45 + alpha / np.pi * 180.0)
y.append(np.cos(alpha))
shiftMarker = 0.02 * np.sqrt(2)
# funicular
ax.plot(cvb_bot[:, 0], cvb_bot[:, 1], 'k', lw=lineWidth)
ax.plot(cvb_top[:, 0], cvb_top[:, 1], 'k', lw=lineWidth)
ax.add_artist(patches.Arc((rx, shifty + ry), 2 * rx, 2 * ry, theta1=90, theta2=270, lw=lineWidth))
ax.add_artist(patches.Arc((1.0 - rx, shifty + ry), 2 * rx, 2 * ry, theta1=270, theta2=90, lw=lineWidth))
ax.add_artist(patches.Arc((rx, shifty + ry), 1.4 * rx, 1.4 * ry, lw=lineWidth))
ax.add_artist(patches.Arc((1.0 - rx, shifty + ry), 1.4 * rx, 1.4 * ry, lw=lineWidth))
ax.annotate(r'$\Lambda=0$', xy=(0.01, shifty + ry), xytext=(-0.05, shifty + ry), va='center', ha='right',
fontsize='small', arrowprops=dict(arrowstyle='-', linewidth=lineWidth))
ax.annotate(r'$\Lambda=\frac{\pi}{2}$', xy=(0.5, shifty + 2 * ry + 0.01), xytext=(0.5, shifty + 2 * ry + 0.05),
va='bottom', ha='center', fontsize='small', arrowprops=dict(arrowstyle='-', linewidth=lineWidth))
ax.annotate(r'$\Lambda=\frac{3\pi}{2}$', xy=(0.5, shifty - 0.01), xytext=(0.5, shifty - 0.05), va='top',
ha='center', fontsize='small', arrowprops=dict(arrowstyle='-', linewidth=lineWidth))
ax.annotate(r'$\Lambda=\pi$', xy=(.99, shifty + ry), xytext=(1.05, shifty + ry), va='center', ha='left',
fontsize='small', arrowprops=dict(arrowstyle='-', linewidth=lineWidth))
if drawArrows:
if np.fabs(rotation[0] - 45) > 0.0001:
ax.annotate('Current state:\n$\Lambda={:.1f}$'.format(CapLam),
xy=(calc_lam(CapLam, 0, numsys=M), shifty + ry + np.cos(alpha) * (ry + shiftMarker)),
xytext=(
calc_lam(CapLam, 0, numsys=M) - np.sin(alpha) * 2 * rx, shifty + (1 + np.cos(alpha) * 5) * ry),
fontsize='small',
arrowprops=dict(arrowstyle='<-', linewidth=1.0, shrinkA=0.0), va='top', ha='center', zorder=0,
bbox=dict(pad=-.1, lw=0.0, color='None'))
else:
ax.annotate('Current state:\n$\Lambda={:.1f}$'.format(CapLam),
xy=(calc_lam(CapLam, 0, numsys=M), shifty + 2.0 * ry + shiftMarker),
xytext=(calc_lam(CapLam, 0, numsys=M), shifty + 6 * ry),
arrowprops=dict(arrowstyle='<-', linewidth=1.0, shrinkA=0.0), fontsize='small', va='top',
ha='center', zorder=0, bbox=dict(pad=-.1, lw=0.0, color='None'))
# arrows in the conveyor belt
drawCirc(ax, rx * 0.8, rx, shifty + ry, 45, 270, lineWidth=1.0, color_='red')
drawCirc(ax, rx * 0.8, 1.0 - rx, shifty + ry, 225, 270, lineWidth=1.0, color_='red')
# lines and markers for Epot
for i in range(M):
x = calc_lam(CapLam, i, numsys=M)
if x < rx:
rx -= np.sqrt(1 - y[i] ** 2) * shiftMarker
elif x > 1 - rx:
rx += np.sqrt(1 - y[i] ** 2) * shiftMarker
if (CapLam + i * 2 * np.pi / float(M)) % (2. * np.pi) < np.pi:
ax.add_patch( # Create triangle as arrow head
patches.RegularPolygon(
(x, shifty + ry + y[i] * ry + y[i] * shiftMarker), # (x,y)
4, # number of vertices
0.02, # radius
rotation[i] / 180.0 * np.pi, # orientation
color='red',
zorder=10
)
)
ax.scatter(x, gVals[np.abs(lamVals - x).argmin()] + shiftMarker, s=30, marker='o', edgecolors='face',
color='r', zorder=10)
if drawArrows:
ax.annotate('', xy=(x, gVals[np.abs(lamVals - x).argmin()] + shiftMarker),
xytext=(x + 0.1, gVals[np.abs(lamVals - x - 0.1).argmin()] + shiftMarker),
arrowprops=dict(arrowstyle='<-', linewidth=lineWidth))
ax.plot([x, x], [gVals[np.abs(lamVals - x).argmin()], shifty + ry + y[i] * ry + y[i] * shiftMarker],
color='0.8', lw=lineWidth, zorder=0)
else:
ax.add_patch( # Create triangle as arrow head
patches.RegularPolygon(
(x, shifty - y[i] * shiftMarker), # (x,y)
4, # number of vertices
0.02, # radius
rotation[i] / 180.0 * np.pi, # orientation
color='red',
zorder=10
)
)
ax.plot([x, x], [gVals[np.abs(lamVals - x).argmin()], shifty + (1.0 - y[i]) * ry - y[i] * shiftMarker],
color='0.8', lw=lineWidth, zorder=0)
ax.scatter(x, gVals[np.abs(lamVals - x).argmin()] + shiftMarker, s=30, marker='o', edgecolors='face',
color='r', zorder=10)
if drawArrows:
ax.annotate('', xy=(x, gVals[np.abs(lamVals - x).argmin()] + shiftMarker),
xytext=(x - 0.1, gVals[np.abs(lamVals - x + 0.1).argmin()] + shiftMarker),
arrowprops=dict(arrowstyle='<-', linewidth=lineWidth))
# formatting
ax.set_xlim(-0.1, 1.1)
ax.set_ylim(0, 1.2 / goldRat)
ax.set_xticks([0.0, 0.5, 1.0])
ax.set_xticklabels(['0\n(A)', r'$\sfrac{1}{2}$', '1\n(B)'])
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_yticks([])
ax.spines['left'].set_color('None')
ax.spines['right'].set_color('None')
ax.spines['top'].set_color('None')
ax.set_title("Conveyor Belt over simulated Free Energy Landscape")
ax.annotate('', xy=(0, 0),
xytext=(0, 0.5 / goldRat), ha='center', va='bottom',
arrowprops=dict(arrowstyle='<|-', facecolor='k', linewidth=1.5))
ax.text(-0.025, 0.25 / goldRat, '$G(\lambda)$', ha='right', va='center', fontsize=14)
ax.text(1.025, 0.0, '$\lambda$', ha='left', va='center', fontsize=14)
return fig, ax
def updateEnsembler(x, y, ax, CapLam=0.1, M=8, drawArrows=False):
pSize = 6.027
goldRat = 1.70
lineWidth = 1
[path_effects.SimpleLineShadow(), path_effects.Normal()]
rx=0.05
ry=rx
shifty=0.75/goldRat
cvb_bot=np.zeros((90,2))
cvb_bot[:,0]=np.linspace(rx, 1.0-rx, 90)
cvb_bot[:,1]=np.ones(90)*shifty
cvb_top=np.zeros((90,2))
cvb_top[:,0]=np.linspace(rx, 1.0-rx, 90)
cvb_top[:,1]=np.ones(90)*(shifty+2.0*ry)
lamVals=x-x.min()
lamVals/=lamVals.max()
gVals=y-y.min()
if gVals.max() != 0.0:
gVals/=(2.0*gVals.max()*goldRat)
else:
gVals+=1/(2.0*goldRat)
ax.plot(lamVals[2:], gVals[2:], 'k', lw=lineWidth)
rotation = []
y = []
# buildBox
for i in range(M):
if calc_lam(CapLam, i, numsys=M) > rx and calc_lam(CapLam, i, numsys=M) < (1.0 - rx):
rotation.append(45)
y.append(1.0)
elif calc_lam(CapLam, i, numsys=M) < rx:
alpha = np.arcsin((rx - calc_lam(CapLam, i, numsys=M)) / rx)
if (CapLam + i * 2 * np.pi / float(M)) % (2. * np.pi) < np.pi:
rotation.append(45 + alpha / np.pi * 180.0)
else:
rotation.append(45 - alpha / np.pi * 180.0)
y.append(np.cos(alpha))
else:
alpha = np.arcsin((rx - (1 - calc_lam(CapLam, i, numsys=M))) / rx)
if (CapLam + i * 2 * np.pi / float(M)) % (2. * np.pi) < np.pi:
rotation.append(45 - alpha / np.pi * 180.0)
else:
rotation.append(45 + alpha / np.pi * 180.0)
y.append(np.cos(alpha))
shiftMarker = 0.02 * np.sqrt(2)
# arrow
if drawArrows:
if np.fabs(rotation[0] - 45) > 0.0001:
ax.annotate('Current state:\n$\Lambda={:.1f}$'.format(CapLam),
xy=(calc_lam(CapLam, 0, numsys=M), shifty + ry + np.cos(alpha) * (ry + shiftMarker)),
xytext=(
calc_lam(CapLam, 0, numsys=M) - np.sin(alpha) * 2 * rx, shifty + (1 + np.cos(alpha) * 5) * ry),
fontsize='small',
arrowprops=dict(arrowstyle='<-', linewidth=1.0, shrinkA=0.0), va='top', ha='center', zorder=0,
bbox=dict(pad=-.1, lw=0.0, color='None'))
else:
ax.annotate('Current state:\n$\Lambda={:.1f}$'.format(CapLam),
xy=(calc_lam(CapLam, 0, numsys=M), shifty + 2.0 * ry + shiftMarker),
xytext=(calc_lam(CapLam, 0, numsys=M), shifty + 6 * ry),
arrowprops=dict(arrowstyle='<-', linewidth=1.0, shrinkA=0.0), fontsize='small', va='top',
ha='center', zorder=0, bbox=dict(pad=-.1, lw=0.0, color='None'))
# arrows in the conveyor belt
drawCirc(ax, rx * 0.8, rx, shifty + ry, 45, 270, lineWidth=1.0, color_='red')
drawCirc(ax, rx * 0.8, 1.0 - rx, shifty + ry, 225, 270, lineWidth=1.0, color_='red')
# box arrow?
for i in range(M):
x = calc_lam(CapLam, i, numsys=M)
if x < rx:
rx -= np.sqrt(1 - y[i] ** 2) * shiftMarker
elif x > 1 - rx:
rx += np.sqrt(1 - y[i] ** 2) * shiftMarker
if (CapLam + i * 2 * np.pi / float(M)) % (2. * np.pi) < np.pi:
ax.add_patch( # Create triangle as arrow head
patches.RegularPolygon(
(x, shifty + ry + y[i] * ry + y[i] * shiftMarker), # (x,y)
4, # number of vertices
0.02, # radius
rotation[i] / 180.0 * np.pi, # orientation
color='red',
zorder=10
)
)
ax.scatter(x, gVals[np.abs(lamVals - x).argmin()] + shiftMarker, s=30, marker='o', edgecolors='face',
color='r', zorder=10)
if drawArrows:
ax.annotate('', xy=(x, gVals[np.abs(lamVals - x).argmin()] + shiftMarker),
xytext=(x + 0.1, gVals[np.abs(lamVals - x - 0.1).argmin()] + shiftMarker),
arrowprops=dict(arrowstyle='<-', linewidth=lineWidth))
ax.plot([x, x], [gVals[np.abs(lamVals - x).argmin()], shifty + ry + y[i] * ry + y[i] * shiftMarker],
color='0.8', lw=lineWidth, zorder=0)
else:
ax.add_patch( # Create triangle as arrow head
patches.RegularPolygon(
(x, shifty - y[i] * shiftMarker), # (x,y)
4, # number of vertices
0.02, # radius
rotation[i] / 180.0 * np.pi, # orientation
color='red',
zorder=10
)
)
ax.plot([x, x], [gVals[np.abs(lamVals - x).argmin()], shifty + (1.0 - y[i]) * ry - y[i] * shiftMarker],
color='0.8', lw=lineWidth, zorder=0)
ax.scatter(x, gVals[np.abs(lamVals - x).argmin()] + shiftMarker, s=30, marker='o', edgecolors='face',
color='r', zorder=10)
if drawArrows:
ax.annotate('', xy=(x, gVals[np.abs(lamVals - x).argmin()] + shiftMarker),
xytext=(x - 0.1, gVals[np.abs(lamVals - x + 0.1).argmin()] + shiftMarker),
arrowprops=dict(arrowstyle='<-', linewidth=lineWidth))
| 48.448198 | 165 | 0.5182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,942 | 0.183255 |
0f4aea28cd9c70e23bcd7e3a57f7b80215d70f0c | 476 | py | Python | data_structures/binary_tree/__init__.py | Mhassanbughio/Python-1 | 704c0f93425c53fcadcdbb3dbdf337b0598079fd | [
"MIT"
] | 2 | 2022-01-13T04:56:29.000Z | 2022-01-26T05:09:34.000Z | data_structures/binary_tree/__init__.py | Mhassanbughio/Python-1 | 704c0f93425c53fcadcdbb3dbdf337b0598079fd | [
"MIT"
] | null | null | null | data_structures/binary_tree/__init__.py | Mhassanbughio/Python-1 | 704c0f93425c53fcadcdbb3dbdf337b0598079fd | [
"MIT"
] | null | null | null | class Rectangle:
def __init__(self, length, breadth, unit_cost=0):
self.length = length
self.breadth = breadth
self.unit_cost = unit_cost
def get_area(self):
return self.length * self.breadth
def calculate_cost(self):
area = self.get_area()
return area * self.unit_cost
# breadth = 120 units, length = 160 units, 1 sq unit cost = Rs 2000
r = Rectangle(160, 120, 2000)
print("Area of Rectangle: %s sq units" % (r.get_area()))
| 34 | 67 | 0.657563 | 320 | 0.672269 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.207983 |
0f4afaff8b78fa0f03f69df31da420a7dd7792c9 | 20,637 | py | Python | mll/discrete_agent_play.py | asappresearch/compositional-inductive-bias | 2c67713306ec6591f397ca252f915c3edc5a794f | [
"MIT"
] | 2 | 2021-07-09T16:32:00.000Z | 2022-03-21T17:32:39.000Z | mll/discrete_agent_play.py | asappresearch/compositional-inductive-bias | 2c67713306ec6591f397ca252f915c3edc5a794f | [
"MIT"
] | null | null | null | mll/discrete_agent_play.py | asappresearch/compositional-inductive-bias | 2c67713306ec6591f397ca252f915c3edc5a794f | [
"MIT"
] | 1 | 2021-07-09T16:32:02.000Z | 2021-07-09T16:32:02.000Z | """
one agent chooses an action, says it. other agent does it. both get a point if right
this file was forked from mll/discrete_bottleneck_discrete_input.py
"""
import torch
import torch.nn.functional as F
from torch import nn, optim
# from envs.world3c import World
from ulfs import alive_sieve, rl_common
from ulfs.stats import Stats
from ulfs.stochastic_trajectory import StochasticTrajectory
from ulfs.lexicon_recorder import LexiconRecorder
from ulfs.runner_base_v1 import RunnerBase
# this is just for display to human, cos reading 'badccd' gets annoying after a while :P
# this is somewhat based on how kirby 2001 does this
# we can randomize these potentially
phonemes = [
'ba',
'bo',
'bu',
'bi',
'be',
'to',
'ti',
'ta',
'te',
'tu',
'ra',
're',
'ri',
'ru',
'ro',
'la',
'le',
'li',
'lo',
'lu',
'ga',
'ge',
'gi',
'go',
'gu',
'ma',
'me',
'mu',
'mi',
'mo'
]
class AgentOneActionSelector(nn.Module):
"""
this is going to choose what action to do, given ... nothing :P
maybe given the previous few actions and results?
hmmm, given this model looks to be an LSTM (because we want to model a sequence
so I guess this makes sense?), a question arises which is: what are we going to predict?
so, we'll memorize a bunch of actions and rewards like:
1 => 0
5 => 0
1 => 1
5 => 1
5 => 0
3 => 0
3 => 0
3 => 1
...
we could train in at least a few ways:
- push the memories through, predict the next action to do, backprop on that reward
This has an issue though. clearly the result will be that the net just chooses the same
action over and over. how to mitigate this? hack the reward to decrease with
recency? hack the reward to contain some reward for entropy over action space?
somehow do something that doesnt appear superficially hacky? :P
how about hacking the reward for now, and then thinking about how to justify the hacking
later?
so, if we will hack the reward, then this can in fact predict the reward. eg:
3 => 1
3 => 0.7
3 => 0.5
3 => 0.3
...
what we really want is it for learn to do something like:
3
3
3
3
4
4
4
4
4
3
4
3
4
3
5
5
5
5
5
This probably needs something symbolic? I kind of think an LSTM is a bit too stupid
to be able to learn something like this, without either feature hacking and/or
becoming symbolic?
Let's go with feature hacking and/or symbolic perhaps???
To be symbolic, we no longer care about the actual sequence of actions, but we normalize the
actions to look the same each time. eg:
3,3,3,4,4,
4,4,4,2,2,
1,1,1,5,5
are all identicla sequences, if we consider that symbols are interchangeable. eg we could normalize them to:
1,1,1,2,2
or perhaps to:
2,2,2,1,1
we should find an appropriate way to normalize them. Perhaps, in full feature engineering mode :/ , based on
how the previous rewards have looked? In fact, in this case, we dont even need an LSTM, can just use some
linear classifier thing... Is there a way to make an LSTM symbolic though???
well... one way is to augment the data, by simpling rotating/permuting which symbols are which. that sounds
easy to do, and not horribly hacky. another way is to treat each specific action type, eg 3s, as independent
of the others. Both of these assume a bunch of prior knowledge....
unless we learn this knowledge in a meta-way ????
Hmmm, maybe we just have a simple model, with no explicit memory, no sequences
So, it's basically going to output a fixed probability distribution over actions,
and the RL reward, combined with ent-reg, will push this around. (initially will
likely become spiky, and then flatten??? (we can think about meta-learning it later...))
"""
# def __init__(self, num_actions, num_timesteps, embedding_size):
def __init__(self, num_actions):
"""
memorizes num_timesteps timesteps
(or perhaps should use a replay buffer?)
"""
# self.num_timesteps = num_timesteps
self.num_actions = num_actions
# self.embedding_size = embedding_size
super().__init__()
# self.memory = []
# self.rnn = nn.GRUcell(embedding_size, embedding_size)
# self.rnn = nn.LSTM(embedding_size, embedding_size)
# self.e2d = nn.Linear(embedding_size, num_actions)
self.action_distribution = nn.Parameter(torch.zeros(1, num_actions))
self.action_distribution.data.fill_(1 / num_actions)
def forward(self, batch_size):
"""
"""
# for i, m in enumerate(self.memory):
probs = F.softmax(self.action_distribution).expand(batch_size, self.num_actions)
s = rl_common.draw_categorical_sample(
action_probs=probs, batch_idxes=None)
return s
# def memorize(self, most_recent_action, most_recent_reward):
# self.memory.append({'action': most_recent_action, 'reward': most_recent_reward})
# self.memory = self.memory[-self.num_timesteps:]
class AgentOneLM(nn.Module):
"""
takes in a discrete action (1-in-k), converts to utterance
"""
def __init__(self, p, embedding_size, utterance_max, vocab_size, num_actions):
"""
Note that vocab_size excludes terminator character 0
"""
self.embedding_size = embedding_size
self.utterance_max = utterance_max
self.num_actions = num_actions
super().__init__()
self.h1 = nn.Embedding(num_actions, embedding_size)
# d2e "discrete to embed"
self.d2e = nn.Embedding(vocab_size + 1, embedding_size)
RNNCell = getattr(nn, f'{p.rnn_type}Cell')
self.rnn = RNNCell(embedding_size, embedding_size)
self.e2d = nn.Linear(embedding_size, vocab_size + 1)
def forward(self, actions, global_idxes):
"""
This agent will receive the image of the world
x might have been sieved. global_idxes too. but global_idxes contents are the global
indexes
"""
batch_size = actions.size()[0]
x = self.h1(actions)
state = x
global_idxes = global_idxes.clone()
# note that this sieve might start off smaller than the global batch_size
sieve = alive_sieve.AliveSieve(batch_size=batch_size, enable_cuda=x.is_cuda)
type_constr = torch.cuda if x.is_cuda else torch
last_token = type_constr.LongTensor(batch_size).fill_(0)
utterance = type_constr.LongTensor(batch_size, self.utterance_max).fill_(0)
# N_outer might not be the full episode batch size, but a subset
N_outer = type_constr.LongTensor(batch_size).fill_(self.utterance_max)
stochastic_trajectory = StochasticTrajectory()
for t in range(self.utterance_max):
emb = self.d2e(last_token)
state = self.rnn(emb, state)
token_logits = self.e2d(state)
token_probs = F.softmax(token_logits, dim=-1)
if self.training:
s = rl_common.draw_categorical_sample(
action_probs=token_probs, batch_idxes=global_idxes[sieve.global_idxes])
stochastic_trajectory.append_stochastic_sample(s=s)
token = s.actions.view(-1)
# print('stochastic')
# print('token.size()', token.size())
# die()
else:
# print('argmax')
_, token = token_probs.max(-1)
# print('token.size()', token.size())
# die()
utterance[:, t][sieve.global_idxes] = token
last_token = token
sieve.mark_dead(last_token == 0)
sieve.set_global_dead(N_outer, t)
if sieve.all_dead():
break
state = state[sieve.alive_idxes]
last_token = last_token[sieve.alive_idxes]
sieve.self_sieve_()
res = {
'stochastic_trajectory': stochastic_trajectory,
'utterance': utterance,
'utterance_lens': N_outer
}
return res
class AgentTwo(nn.Module):
def __init__(self, p, embedding_size, vocab_size, num_actions):
"""
- input: utterance
- output: action
"""
super().__init__()
self.num_actions = num_actions
self.embedding_size = embedding_size
self.d2e = nn.Embedding(vocab_size + 1, embedding_size)
RNNCell = getattr(nn, f'{p.rnn_type}Cell')
self.rnn = RNNCell(embedding_size, embedding_size)
self.h1 = nn.Linear(embedding_size, num_actions)
def forward(self, utterance, global_idxes):
"""
utterance etc might be sieved, which is why we receive global_idxes
alive_masks will then create subsets of this already-sieved set
"""
batch_size = utterance.size()[0]
utterance_max = utterance.size()[1]
type_constr = torch.cuda if utterance.is_cuda else torch
sieve = alive_sieve.AliveSieve(batch_size=batch_size, enable_cuda=utterance.is_cuda)
state = type_constr.FloatTensor(batch_size, self.embedding_size).fill_(0)
output_state = state.clone()
for t in range(utterance_max):
emb = self.d2e(utterance[:, t])
state = self.rnn(emb, state)
output_state[sieve.global_idxes] = state
sieve.mark_dead(utterance[:, t] == 0)
if sieve.all_dead():
break
utterance = utterance[sieve.alive_idxes]
state = state[sieve.alive_idxes]
sieve.self_sieve_()
state = output_state
action_logits = self.h1(state)
action_probs = F.softmax(action_logits, dim=-1)
s = rl_common.draw_categorical_sample(
action_probs=action_probs, batch_idxes=global_idxes)
return s
def run_episode(actions, one, two, utterance_len_reg, enable_cuda, render=False):
batch_size = actions.size()[0]
global_idxes = torch.LongTensor(batch_size).fill_(1).cumsum(-1) - 1
one_res = one(
actions=actions, global_idxes=global_idxes)
one_stochastic_trajectory, utterances, utterances_lens = map(one_res.__getitem__, [
'stochastic_trajectory', 'utterance', 'utterance_lens'
])
two_s = two(
utterance=utterances, global_idxes=global_idxes)
stats = Stats([])
episode_result = {
'one_stochastic_trajectory': one_stochastic_trajectory,
'two_s': two_s,
'utterances': utterances,
'utterances_lens': utterances_lens,
'stats': stats
}
return episode_result
class Runner(RunnerBase):
def __init__(self):
super().__init__(
save_as_statedict_keys=['action_selector', 'one', 'two', 'opt_one', 'opt_two'],
additional_save_keys=['baseline'],
step_key='episode'
)
def setup(self, p):
num_actions = p.num_actions
self.lexicon_recorder = LexiconRecorder(num_actions=num_actions)
self.test_lexicon_recorder = LexiconRecorder(num_actions=num_actions)
self.action_selector = AgentOneActionSelector(num_actions=num_actions)
self.one = AgentOneLM(
embedding_size=p.embedding_size, vocab_size=p.vocab_size, utterance_max=p.utterance_max,
num_actions=num_actions)
self.two = AgentTwo(
embedding_size=p.embedding_size, vocab_size=p.vocab_size, num_actions=num_actions)
if p.enable_cuda:
self.one = self.one.cuda()
self.two = self.two.cuda()
self.action_selector = self.action_selector.cuda()
Opt = getattr(optim, p.opt)
self.opt_action_selector = Opt(lr=0.001, params=self.action_selector.parameters())
self.opt_one = Opt(lr=0.001, params=self.one.parameters())
self.opt_two = Opt(lr=0.001, params=self.two.parameters())
self.stats = Stats([
# 'batches_count',
'episodes_count',
'baseline_sum',
'train_len_sum',
'train_len_count',
'train_acc_sum',
'train_rewards_sum',
'test_acc_sum',
'test_len_sum',
'test_len_count',
'test_rewards_sum'
])
self.baseline = 0
def step(self, p):
render = self.should_render()
stats = self.stats
# dopamine per action is initially 1, decreases a bit each time we
# succeed on the action
# we gradually top it up over time too
# this is highly engineered, but we're trying to learn nlp, not
# learn curiosity, in this particular paper
dopamine_per_action = torch.ones(p.num_actions, dtype=torch.float32)
s_actions_in = self.action_selector(batch_size=p.batch_size)
actions_in = s_actions_in.actions
# print('s_actions_in', s_actions_in)
# print('s_actions_in.actions', s_actions_in.actions)
# print('dir(s_actions_in)', dir(s_actions_in))
# die()
# actions_in = s_actions_in
# actions_in = torch.from_numpy(
# np.random.choice(p.num_actions, p.batch_size, replace=True)
# ).long()
if p.enable_cuda:
actions_in = actions_in.cuda()
self.one.train()
self.two.train()
episode_result = run_episode(
actions=actions_in,
one=self.one, two=self.two,
render=render, utterance_len_reg=p.utterance_len_reg, enable_cuda=p.enable_cuda)
utterances, utterances_lens, _stats = map(episode_result.__getitem__, [
'utterances', 'utterances_lens', 'stats'
])
one_stochastic_trajectory, two_s = map(episode_result.__getitem__, [
'one_stochastic_trajectory', 'two_s'
])
self.lexicon_recorder.record(
action_probs_l=[two_s.action_probs], utterances_by_t=[utterances], utterance_lens_by_t=[utterances_lens])
# self.stats += _stats
self.stats.train_len_sum += utterances_lens.sum().item()
self.stats.train_len_count += len(utterances_lens)
self.stats.episodes_count += 1
# rewards = (two_s.actions == actions_in).float() * 2 - 1
correct_mask = two_s.actions == actions_in
rewards = correct_mask.float()
# if rewards_std > 0:
# rewards = rewards / rewards_std
zero_length_idxes = (utterances_lens == 0).nonzero().view(-1).long()
rewards[zero_length_idxes] = 0
rewards -= utterances_lens.float() * p.utterance_len_reg
rewards = rewards.clamp(min=0)
self.baseline = 0.7 * self.baseline + 0.3 * rewards.mean().item()
rewards_std = rewards.detach().std().item()
baselined_rewards = (rewards - self.baseline)
if rewards_std > 0:
baselined_rewards = baselined_rewards / rewards_std
dopamine_per_action = (dopamine_per_action + 0.1).clamp(max=1.0, min=0.1)
acc = (two_s.actions == actions_in).float().mean().item()
stats.train_acc_sum += acc
stats.train_rewards_sum += rewards.mean().item()
reinforce_loss_action_selector = s_actions_in.calc_loss(baselined_rewards)
reinforce_loss_one = one_stochastic_trajectory.calc_loss(baselined_rewards)
reinforce_loss_two = two_s.calc_loss(baselined_rewards)
ent_loss_action_selector = - p.actions_ent_reg * s_actions_in.entropy
ent_loss_one = - p.ent_reg * one_stochastic_trajectory.entropy
ent_loss_two = - p.ent_reg * two_s.entropy
loss_action_selector = reinforce_loss_action_selector + ent_loss_action_selector
loss_one = reinforce_loss_one + ent_loss_one
loss_two = reinforce_loss_two + ent_loss_two
self.opt_action_selector.zero_grad()
loss_action_selector.backward()
self.opt_action_selector.step()
self.opt_one.zero_grad()
loss_one.backward()
self.opt_one.step()
self.opt_two.zero_grad()
loss_two.backward()
self.opt_two.step()
# self.baseline = 0.7 * self.baseline + 0.3 * rewards.mean().item()
stats.baseline_sum += self.baseline
# ========================
self.one.eval()
self.two.eval()
episode_result = run_episode(
actions=actions_in,
one=self.one, two=self.two,
render=render, utterance_len_reg=p.utterance_len_reg, enable_cuda=p.enable_cuda)
utterances, utterances_lens, _stats = map(episode_result.__getitem__, [
'utterances', 'utterances_lens', 'stats'
])
one_stochastic_trajectory, two_s = map(episode_result.__getitem__, [
'one_stochastic_trajectory', 'two_s'
])
self.test_lexicon_recorder.record(
action_probs_l=[two_s.action_probs], utterances_by_t=[utterances], utterance_lens_by_t=[utterances_lens])
test_rewards = (two_s.actions == actions_in).float() * 2 - 1
# test_rewards = (two_s.actions == actions_in).float()
test_rewards -= utterances_lens.float() * p.utterance_len_reg
test_acc = (two_s.actions == actions_in).float().mean().item()
self.stats.test_acc_sum += test_acc
self.stats.test_len_sum += utterances_lens.sum().item()
self.stats.test_len_count += len(utterances_lens)
stats.test_rewards_sum += test_rewards.mean().item()
if render:
lex_stats = self.lexicon_recorder.calc_stats()
test_lex_stats = self.test_lexicon_recorder.calc_stats()
print('')
print('rewards[:16]', rewards[:16])
self.test_lexicon_recorder.print_lexicon()
self.lexicon_recorder.reset()
self.test_lexicon_recorder.reset()
stats = self.stats
log_dict = {
'baseline': stats.baseline_sum / stats.episodes_count,
'train_acc': stats.train_acc_sum / stats.episodes_count,
'train_reward': stats.train_rewards_sum / stats.episodes_count,
'train_utt_len': stats.train_len_sum / stats.train_len_count,
'train_lex_size': lex_stats['total_unique'],
'test_reward': stats.test_rewards_sum / stats.episodes_count,
'test_lex_size': test_lex_stats['total_unique'],
'test_utt_len': stats.test_len_sum / stats.test_len_count,
'test_acc': stats.test_acc_sum / stats.episodes_count,
'actions_ent': s_actions_in.entropy.mean().item()
}
for k, v in lex_stats.items():
log_dict[k] = v
self.print_and_log(
log_dict,
formatstr='e={episode} '
'b={baseline:.3f} '
'| train '
'len {train_utt_len:.2f} '
'acc {train_acc:.3f} '
'r {train_reward:.3f} '
'lex_size {train_lex_size} '
'| test '
'len {test_utt_len:.2f} '
'acc {test_acc:.3f} '
'r {test_reward:.3f} '
'lex_size {test_lex_size} '
'ent {actions_ent:.3f} '
)
stats.reset()
if __name__ == '__main__':
runner = Runner()
runner.add_param('--embedding-size', type=int, default=50)
runner.add_param('--num-actions', type=int, default=32)
runner.add_param('--utterance-max', type=int, default=10)
runner.add_param('--utterance-len-reg', type=float, default=0.01, help='how much to penalize longer utterances')
runner.add_param('--ent-reg', type=float, default=0.2)
runner.add_param('--actions-ent-reg', type=float, default=0.2)
runner.add_param('--boredom-reg', type=float, default=0.1,
help='less dopamine for repeated identical successes (I guess this is similar to count-based :/ )')
runner.add_param('--vocab-size', type=int, default=2, help='excludes terminator')
runner.add_param('--batch-size', type=int, default=128)
runner.add_param('--opt', type=str, default='Adam')
runner.add_param('--rnn-type', type=str, default='GRU')
runner.parse_args()
runner.setup_base()
runner.run_base()
| 36.91771 | 120 | 0.621941 | 17,882 | 0.866502 | 0 | 0 | 0 | 0 | 0 | 0 | 7,204 | 0.349082 |
0f4bf459dfea724ef4363f595d32c0bc5922dd5e | 17,993 | py | Python | maggy/searchspace.py | amacati/maggy | 4d4fef5e3dbb60e2aa935d6236cc0cd53477277a | [
"Apache-2.0"
] | 81 | 2019-02-18T15:16:45.000Z | 2022-02-17T06:13:17.000Z | maggy/searchspace.py | ssheikholeslami/maggy | 0d9a4cbbf6120d7453b75515b082feae3c3530c2 | [
"Apache-2.0"
] | 40 | 2019-04-29T15:03:28.000Z | 2022-03-09T09:29:28.000Z | maggy/searchspace.py | ssheikholeslami/maggy | 0d9a4cbbf6120d7453b75515b082feae3c3530c2 | [
"Apache-2.0"
] | 16 | 2019-02-19T14:25:35.000Z | 2021-02-25T11:41:04.000Z | #
# Copyright 2020 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import random
import numpy as np
class Searchspace(object):
"""Create an instance of `Searchspace` from keyword arguments.
A searchspace is essentially a set of key value pairs, defining the
hyperparameters with a name, type and a feasible interval. The keyword
arguments specify name-values pairs for the hyperparameters,
where values are tuples of the form (type, list). Type is a string with
one of the following values:
- DOUBLE
- INTEGER
- DISCRETE
- CATEGORICAL
And the list in the tuple specifies either two values only, the start
and end point of of the feasible interval for DOUBLE and INTEGER,
or the discrete possible values for the types DISCRETE and CATEGORICAL.
Sample usage:
>>> # Define Searchspace
>>> from maggy import Searchspace
>>> # The searchspace can be instantiated with parameters
>>> sp = Searchspace(kernel=('INTEGER', [2, 8]), pool=('INTEGER', [2, 8]))
>>> # Or additional parameters can be added one by one
>>> sp.add('dropout', ('DOUBLE', [0.01, 0.99]))
The `Searchspace` object can also be initialized from a python dictionary:
>>> sp_dict = sp.to_dict()
>>> sp_new = Searchspace(**sp_dict)
The parameter names are added as attributes of `Searchspace` object,
so they can be accessed directly with the dot notation
`searchspace._name_`.
"""
DOUBLE = "DOUBLE"
INTEGER = "INTEGER"
DISCRETE = "DISCRETE"
CATEGORICAL = "CATEGORICAL"
def __init__(self, **kwargs):
self._hparam_types = {}
self._names = []
for name, value in kwargs.items():
self.add(name, value)
def add(self, name, value):
"""Adds {name, value} pair to hyperparameters.
:param name: Name of the hyperparameter
:type name: str
:param value: A tuple of the parameter type and its feasible region
:type value: tuple
:raises ValueError: Hyperparameter name is reserved
:raises ValueError: Hyperparameter feasible region in wrong format
"""
if getattr(self, name, None) is not None:
raise ValueError("Hyperparameter name is reserved: {}".format(name))
if isinstance(value, tuple) or isinstance(value, list):
if len(value) != 2:
raise ValueError(
"Hyperparameter tuple has to be of length "
"two and format (type, list): {0}, {1}".format(name, value)
)
param_type = value[0].upper()
param_values = value[1]
if param_type in [
Searchspace.DOUBLE,
Searchspace.INTEGER,
Searchspace.DISCRETE,
Searchspace.CATEGORICAL,
]:
if len(param_values) == 0:
raise ValueError(
"Hyperparameter feasible region list "
"cannot be empty: {0}, {1}".format(name, param_values)
)
if param_type in [Searchspace.DOUBLE, Searchspace.INTEGER]:
assert len(param_values) == 2, (
"For DOUBLE or INTEGER type parameters, list "
"can only contain upper and lower bounds: {0}, {1}".format(
name, param_values
)
)
if param_type == Searchspace.DOUBLE:
if type(param_values[0]) not in [int, float] or type(
param_values[1]
) not in [int, float]:
raise ValueError(
"Hyperparameter boundaries for type DOUBLE need to be integer "
"or float: {}".format(name)
)
elif param_type == Searchspace.INTEGER:
if type(param_values[0]) != int or type(param_values[1]) != int:
raise ValueError(
"Hyperparameter boundaries for type INTEGER need to be integer: "
"{}".format(name)
)
assert param_values[0] < param_values[1], (
"Lower bound {0} must be "
"less than upper bound {1}: {2}".format(
param_values[0], param_values[1], name
)
)
self._hparam_types[name] = param_type
setattr(self, name, value[1])
self._names.append(name)
else:
raise ValueError(
"Hyperparameter type is not of type DOUBLE, "
"INTEGER, DISCRETE or CATEGORICAL: {}".format(name)
)
else:
raise ValueError("Value is not an appropriate tuple: {}".format(name))
print("Hyperparameter added: {}".format(name))
def to_dict(self):
"""Return the hyperparameters as a Python dictionary.
:return: A dictionary with hyperparameter names as keys. The values are
the hyperparameter values.
:rtype: dict
"""
return {
n: (self._hparam_types[n], getattr(self, n))
for n in self._hparam_types.keys()
}
def names(self):
"""Returns the dictionary with the names and types of all
hyperparameters.
:return: Dictionary of hyperparameter names, with types as value
:rtype: dict
"""
return self._hparam_types
def get(self, name, default=None):
"""Returns the value of `name` if it exists, else `default`."""
if name in self._hparam_types:
return getattr(self, name)
return default
def get_random_parameter_values(self, num):
"""Generate random parameter dictionaries, e.g. to be used for initializing an optimizer.
:param num: number of random parameter dictionaries to be generated.
:type num: int
:raises ValueError: `num` is not an int.
:return: a list containing parameter dictionaries
:rtype: list
"""
return_list = []
for _ in range(num):
params = {}
for name, value in self.names().items():
feasible_region = self.get(name)
if value == Searchspace.DOUBLE:
params[name] = random.uniform(
feasible_region[0], feasible_region[1]
)
elif value == Searchspace.INTEGER:
params[name] = random.randint(
feasible_region[0], feasible_region[1]
)
elif value == Searchspace.DISCRETE:
params[name] = random.choice(feasible_region)
elif value == Searchspace.CATEGORICAL:
params[name] = random.choice(feasible_region)
return_list.append(params)
return return_list
def __iter__(self):
self._returned = self._names.copy()
return self
def __next__(self):
# if list not empty
if self._returned:
# pop from left and get parameter tuple
name = self._returned.pop(0)
return {
"name": name,
"type": self._hparam_types[name],
"values": self.get(name),
}
else:
raise StopIteration
def items(self):
"""Returns a sorted iterable over all hyperparameters in the searchspace.
Allows to iterate over the hyperparameters in a searchspace. The parameters
are sorted in the order of which they were added to the searchspace by the user.
:return: an iterable of the searchspace
:type: Searchspace
"""
# for consistency and serves mainly as syntactic sugar
return self
def keys(self):
"""Returns a sorted iterable list over the names of hyperparameters in
the searchspace.
:return: names of hyperparameters as a list of strings
:type: list
"""
return self._names
def values(self):
"""Returns a sorted iterable list over the types and feasible intervals of
hyperparameters in the searchspace.
:return: types and feasible interval of hyperparameters as tuple
:type: tuple
"""
return [(self._hparam_types[name], self.get(name)) for name in self._names]
def __contains__(self, name):
return name in self._hparam_types
def __str__(self):
return json.dumps(self.to_dict(), sort_keys=True)
def json(self):
return json.dumps(self.to_dict(), sort_keys=True)
def transform(self, hparams, normalize_categorical=False):
"""Transforms array of hypeparameters for one trial.
+--------------+-----------------------------------------------------+
| Hparam Type | Transformation |
+==============+=====================================================+
| DOUBLE | Max-Min Normalization |
+--------------+-----------------------------------------------------+
| INTEGER | Max-Min Normalization |
+--------------+-----------------------------------------------------+
| CATEGORICAL | Encoding: index in list + opt. Max-Min Normalization|
+--------------+-----------------------------------------------------+
:param hparams: hparams in original representation for one trial
:type hparams: 1D np.ndarray
:param normalize_categorical: If True, the encoded categorical hparam is also max-min normalized between 0 and 1
`inverse_transform()` must use the same value for this parameter
:type normalize_categorical: bool
:return: transformed hparams
:rtype: np.ndarray[np.float]
"""
transformed_hparams = []
# loop through hparams
for hparam, hparam_spec in zip(hparams, self.items()):
if hparam_spec["type"] == "DOUBLE":
normalized_hparam = Searchspace._normalize_scalar(
hparam_spec["values"], hparam
)
transformed_hparams.append(normalized_hparam)
elif hparam_spec["type"] == "INTEGER":
normalized_hparam = Searchspace._normalize_integer(
hparam_spec["values"], hparam
)
transformed_hparams.append(normalized_hparam)
elif hparam_spec["type"] == "CATEGORICAL":
encoded_hparam = Searchspace._encode_categorical(
hparam_spec["values"], hparam
)
if normalize_categorical:
encoded_hparam = Searchspace._normalize_integer(
[0, len(hparam_spec["values"]) - 1], encoded_hparam
)
transformed_hparams.append(encoded_hparam)
else:
raise NotImplementedError("Not Implemented other types yet")
return transformed_hparams
def inverse_transform(self, transformed_hparams, normalize_categorical=False):
"""Returns array of hparams in same representation as specified when instantiated
:param transformed_hparams: hparams in transformed representation for one trial
:type transformed_hparams: 1D np.ndarray
:param normalize_categorical: If True, the encoded categorical hparam was also max-min normalized between 0 and 1
`transform()` must use the same value for this parameter
:type normalize_categorical: bool
:return: transformed hparams
:rtype: np.ndarray
"""
hparams = []
for hparam, hparam_spec in zip(transformed_hparams, self.items()):
if hparam_spec["type"] == "DOUBLE":
value = Searchspace._inverse_normalize_scalar(
hparam_spec["values"], hparam
)
hparams.append(value)
elif hparam_spec["type"] == "INTEGER":
value = Searchspace._inverse_normalize_integer(
hparam_spec["values"], hparam
)
hparams.append(value)
elif hparam_spec["type"] == "CATEGORICAL":
if normalize_categorical:
value = Searchspace._inverse_normalize_integer(
[0, len(hparam_spec["values"]) - 1], hparam
)
value = Searchspace._decode_categorical(
hparam_spec["values"], value
)
else:
value = Searchspace._decode_categorical(
hparam_spec["values"], hparam
)
hparams.append(value)
else:
raise NotImplementedError("Not Implemented other types yet")
return hparams
@staticmethod
def _encode_categorical(choices, value):
"""Encodes category to integer. The encoding is the list index of the category
:param choices: possible values of the categorical hparam
:type choices: list
:param value: category to encode
:type value: str
:return: encoded category
:rtype: int
"""
return choices.index(value)
@staticmethod
def _decode_categorical(choices, encoded_value):
"""Decodes integer to corresponding category value
:param choices: possible values of the categorical hparam
:type choices: list
:param encoded_value: encoding of category
:type encoded_value: int
:return: category value
:rtype: str
"""
encoded_value = int(
encoded_value
) # it is possible that value gets casted to np.float by numpy
return choices[encoded_value]
@staticmethod
def _normalize_scalar(bounds, scalar):
"""Returns max-min normalized scalar
:param bounds: list containing lower and upper bound, e.g.: [-3,3]
:type bounds: list
:param scalar: scalar value to be normalized
:type scalar: float
:return: normalized scalar
:rtype: float
"""
scalar = float(scalar)
scalar = (scalar - bounds[0]) / (bounds[1] - bounds[0])
scalar = np.minimum(1.0, scalar)
scalar = np.maximum(0.0, scalar)
return scalar
@staticmethod
def _inverse_normalize_scalar(bounds, normalized_scalar):
"""Returns inverse normalized scalar
:param bounds: list containing lower and upper bound, e.g.: [-3,3]
:type bounds: list
:param normalized_scalar: normalized scalar value
:type normalized_scalar: float
:return: original scalar
:rtype: float
"""
normalized_scalar = float(normalized_scalar)
normalized_scalar = normalized_scalar * (bounds[1] - bounds[0]) + bounds[0]
return normalized_scalar
@staticmethod
def _normalize_integer(bounds, integer):
"""
:param bounds: list containing lower and upper bound, e.g.: [-3,3]
:type bounds: list
:param integer: value to be normalized
:type normalized_scalar: int
:return: normalized value between 0 and 1
:rtype: float
"""
integer = int(integer)
return Searchspace._normalize_scalar(bounds, integer)
@staticmethod
def _inverse_normalize_integer(bounds, scalar):
"""Returns inverse normalized scalar
:param bounds: list containing lower and upper bound, e.g.: [-3,3]
:type bounds: list
:param normalized_scalar: normalized scalar value
:type normalized_scalar: float
:return: original integer
:rtype: int
"""
x = Searchspace._inverse_normalize_scalar(bounds, scalar)
return int(np.round(x))
@staticmethod
def dict_to_list(hparams):
"""Transforms dict of hparams to list representation ( for one hparam config )
example:
{'x': -3.0, 'y': 3.0, 'z': 'green'} to [-3.0, 3.0, 'green']
:param hparams: hparams in dict representation
:type hparams: dict
:return: hparams in list representation
:rtype: list
"""
return list(hparams.values())
def list_to_dict(self, hparams):
"""Transforms list of hparams to dict representation ( for one hparam config )
example:
[-3.0, 3.0, 'green'] to {'x': -3.0, 'y': 3.0, 'z': 'green'}
:param hparams: hparams in list representation
:type hparams: list
:return: hparams in dict representation
:rtype: dict
"""
hparam_names = self.keys()
if len(hparam_names) != len(hparams):
raise ValueError(
"hparam_names and hparams have to have same length (and order!)"
)
hparam_dict = {
hparam_name: hparam for hparam_name, hparam in zip(hparam_names, hparams)
}
return hparam_dict
| 37.485417 | 121 | 0.562997 | 17,338 | 0.963597 | 0 | 0 | 3,384 | 0.188073 | 0 | 0 | 9,175 | 0.509921 |
0f4d1234aeefbd86b891c4d016b9bc7565fad846 | 440 | py | Python | examples/fiducials.py | alisterburt/yet-another-imod-wrapper | c99eb751ccca59d83400f734dd8af621954ecd5c | [
"BSD-3-Clause"
] | null | null | null | examples/fiducials.py | alisterburt/yet-another-imod-wrapper | c99eb751ccca59d83400f734dd8af621954ecd5c | [
"BSD-3-Clause"
] | null | null | null | examples/fiducials.py | alisterburt/yet-another-imod-wrapper | c99eb751ccca59d83400f734dd8af621954ecd5c | [
"BSD-3-Clause"
] | null | null | null | from pathlib import Path
import numpy as np
from yet_another_imod_wrapper.fiducials import run_fiducial_based_alignment
TEST_DATA_DIR = Path(__file__).parent.parent / 'tilt_series'
run_fiducial_based_alignment(
tilt_series_file=TEST_DATA_DIR / 'my_prefix_TS_01.mrc',
tilt_angles=np.arange(-60, 63, 3),
pixel_size=1.35,
fiducial_size=10,
nominal_rotation_angle=85,
output_directory=Path('test_TS_01_fiducials')
)
| 25.882353 | 75 | 0.786364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.127273 |
0f4db0e6bc89de8ac27da4ed1b34ec1251d19ce7 | 6,107 | py | Python | Testing/daemon_Fake_Dev.py | nandor1992/FogOfThings | c412c26bfbd31162683e57b3dc2b5a0a5f21d9b0 | [
"Apache-2.0"
] | 1 | 2020-06-23T10:41:33.000Z | 2020-06-23T10:41:33.000Z | Testing/daemon_Fake_Dev.py | nandor1992/FogOfThings | c412c26bfbd31162683e57b3dc2b5a0a5f21d9b0 | [
"Apache-2.0"
] | null | null | null | Testing/daemon_Fake_Dev.py | nandor1992/FogOfThings | c412c26bfbd31162683e57b3dc2b5a0a5f21d9b0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import couchdb
import pika
import ast
import time
import threading
import ctypes
import datetime
import sys
from daemon import Daemon
import ConfigParser
import logging
t=time
t.clock()
PIDFILE="/home/pi/FogOfThings/Device/pid/fake_dev.pid"
Config=ConfigParser.ConfigParser()
Config.read("/home/pi/FogOfThings/Device/config.ini")
LOGFILE = Config.get("Log","location")+'/fake_dev.log'
logging.basicConfig(filename=LOGFILE,level=logging.DEBUG)
logging.getLogger("pika").setLevel(logging.ERROR)
class Listener():
def __init__(self,c_user,c_pass,user,passw,port,virt,que,dev,rte,rate):
logging.debug("Initialized!")
self.c_user=c_user
self.c_pass=c_pass
self.couch=couchdb.Server('http://'+self.c_user+':'+self.c_pass+'@127.0.0.1:5984/')
self.credentials = pika.PlainCredentials(user,passw)
self.parameters = pika.ConnectionParameters('localhost',port,virt,self.credentials)
self.connection = pika.BlockingConnection(self.parameters);
self.channel = self.connection.channel()
self.channel.basic_qos(prefetch_count=1)
self.device="Python_Dev"
self.channel.basic_consume(self.on_request, queue=que,no_ack=True)
self.sum=0.0
self.rec_time=t.time()
self.t_start=datetime.datetime.now()
self.count=0
self.proc=0.0
self.first=0;
self.rate=rate
self.t_start=datetime.datetime.now()
self.C_Dev=dev
self.C_Rate=rte
def saveToCouch(self,data):
db=self.couch['monitoring']
data['type']="Driver"
data['Gateway']=Config.get("General","gateway_name")
data['Device']=self.C_Dev
data['Rate']=self.C_Rate
logging.debug(data)
db.save(data)
def on_request(self,ch, method, properties, body):
global t
global rate
data=ast.literal_eval(body)
#logging.debug("---------------Message Received-----------")
#logging.debug("Cnt:"+str(self.count))
end=t.time()
#logging.debug(data['start_time'])
#logging.debug(end)
start=float(data['start_time'])
#logging.debug("Elapsed: "+str((end-start)*1000))
#logging.debug("Processing: "+str(int(data['proc_time'])/1000.0))
if end-self.rec_time >= self.rate:
logging.debug("Started at:"+str(self.t_start))
data=self.summary()
data['date']=self.t_start.strftime("%Y-%m-%d %H:%M:%S")
logging.debug(data)
self.t_start=datetime.datetime.now()
self.sum=0.0
self.count=0
self.proc=0.0
self.rec_time=end
self.saveToCouch(data)
else:
self.sum=self.sum+(end-start)*1000
self.count=self.count+1
self.proc=self.proc+int(data['proc_time'])/1000.0
def putData(data):
db=self.couch['monitoring']
db.save(data);
def stop(self):
self.channel.stop_consuming()
self.channel.close()
self.connection.close()
logging.debug("Stopped Listening!")
def summary(self):
return {'response_time':self.sum/self.count,'proc_time':self.proc/self.count}
def run(self):
logging.debug("Started Listening!")
self.channel.start_consuming()
class Poster(threading.Thread):
global running
running = True
def __init__(self,c_user,c_pass,user,passw,port,virt,rate,dev):
logging.debug("Initialized!")
threading.Thread.__init__(self)
self.c_user=c_user
self.c_pass=c_pass
self.couch=couchdb.Server('http://'+self.c_user+':'+self.c_pass+'@127.0.0.1:5984/')
self.credentials = pika.PlainCredentials(user,passw)
self.parameters = pika.ConnectionParameters('localhost',port,virt,self.credentials)
self.connection = pika.BlockingConnection(self.parameters);
self.channel = self.connection.channel()
self.channel.basic_qos(prefetch_count=1)
self.device=dev
self.rate=10.0/rate
self.count=0
def send_request(self):
global t
message_amqp=t.time()
properties_m=pika.BasicProperties(headers={'device':self.device})
self.channel.basic_publish(exchange='device', routing_key='', body="{:10.8f}".format(message_amqp), properties=properties_m)
def stop(self):
global running
running = False
def run(self):
global running
logging.debug("Started Posting!")
# while not self.stopMe:
# self.send_request()
# time.sleep(self.rate)
while running:
self.send_request()
time.sleep(self.rate)
self.channel.close()
self.connection.close()
logging.debug("Posting Done!")
class Fake_Dev():
def init(self,dev,rate,que):
self.l=Listener("admin","hunter","admin","hunter",5672,"test",que,dev,rate,10)
self.p=Poster("admin","hunter","admin","hunter",5672,"test",rate,dev)
def run(self):
self.p.start()
self.l.run()
def stop(self):
try:
self.p.stop()
time.sleep(2)
self.l.stop()
except AttributeError:
pass
logging.debug("Interrupt Keyboard - Stop!")
if __name__ == "__main__":
up=Fake_Dev()
try:
for arg in sys.argv:
part=arg.split("=")
if part[0][2:]=="device":
dev=part[1]
elif part[0][2:]=="rate":
rate=float(part[1])
elif part[0][2:]=="que":
que=part[1]
if dev!=None and rate!=None and que!=None:
msg_rate=rate*10
else:
exit()
up.init(dev,rate,que)
up.run()
except Exception , e:
logging.debug(e)
up.stop()
logging.debug("Exiting Main Thread - Keyboard")
except KeyboardInterrupt:
up.stop()
logging.debug("Exiting Main Thread - Keyboard")
| 33.190217 | 132 | 0.594727 | 4,876 | 0.798428 | 0 | 0 | 0 | 0 | 0 | 0 | 1,069 | 0.175045 |
0f4dea2186bb00ab02de2842112fe6cfb2d318d8 | 3,663 | py | Python | src/mavelp/kernel_methods.py | nanoMFG/VELP | 08225e863a9e9811fcd9e09cd32abc97719fbb7b | [
"MIT"
] | null | null | null | src/mavelp/kernel_methods.py | nanoMFG/VELP | 08225e863a9e9811fcd9e09cd32abc97719fbb7b | [
"MIT"
] | 1 | 2020-12-11T19:18:17.000Z | 2020-12-15T02:59:14.000Z | src/mavelp/kernel_methods.py | nanoMFG/VELP | 08225e863a9e9811fcd9e09cd32abc97719fbb7b | [
"MIT"
] | 1 | 2021-03-16T02:04:12.000Z | 2021-03-16T02:04:12.000Z | import numpy as np
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
from sklearn.gaussian_process import GaussianProcessRegressor
import sklearn.gaussian_process.kernels as Kernels
from scipy.optimize import minimize
from numpy.linalg import norm
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected as tf_layer
class Kernel_Optimization():
def __init__(self, dict_mat=None, kernel_type='RBF', CV=5, X=np.array([[1,2],[2,3],[3,4]]) , y=np.array([[1],[2],[3]]),
All_material = ['K+','P-']):
self._kernel_type = kernel_type
self.All_material = All_material
kernel = getattr(Kernels,kernel_type)
self.dict_mat = dict_mat
if kernel_type =='ExpSineSquared':
param_grid = {"alpha": [1e0, 1e-1, 1e-2, 1e-3],
"kernel": [kernel(length_scale=l,periodicity=p)
for l in np.logspace(-2, 2, 500)
for p in np.logspace(-2, 2, 500)]}
elif kernel_type =='RBF':
param_grid = {"alpha": [1e0, 1e-1, 1e-2, 1e-3],
"kernel": [kernel(length_scale=l)
for l in np.logspace(-2, 2, 100)]}
self._CV = CV
self.kr= GridSearchCV(KernelRidge(), cv=self._CV, param_grid=param_grid)
self.X , self.y = X, y
self.kr.fit(self.X, self.y)
def kr_func(self, x):
return self.kr.predict(x)
def constraint(self, x):
''' Create Constraints for physically-consistent solvent decomposition
sum_cat x_i = 1.0 & sum_an x_i = 1.0 , x_i > 0 for both cation and anaion
'''
n_cations = 0
n_anions = 0
for k in self.All_material:
if k[-1] =='+':
n_cations += 1
else:
n_anions += 1
n_constraints = len(self.All_material)+ 2
for cnt, m in enumerate(self.All_material):
if m[:-1] in self.dict_mat.keys():
n_constraints -= 1
if x[cnt] <0 or x[cnt] > 1:
n_constraints += 1
val_constraints = np.zeros((n_constraints))
cat_list = []
an_list = []
# active (user selected) materials constraints
for k, v in self.dict_mat.items():
if v =='+':
cat_list.append(k)
if v =='-':
an_list.append(k)
cnt = 2
for i in range(len(self.All_material)):
if self.All_material[i][:-1] in cat_list:
val_constraints[0] += x[i]
elif self.All_material[i][:-1] in an_list:
val_constraints[1] += x[i]
else:
val_constraints[cnt] += x[i]
cnt += 1
if x[i] < 0 or x[i] > 1:
val_constraints[cnt] += x[i]
cnt += 1
val_constraints[0] -= 1.0
val_constraints[1] -= 1.0
return val_constraints
def minimize_func(self, optimal, sig,i=0):
if i==0:
optimal = self.X[np.random.randint(self.X.shape[0])]
def funct(x):
const = self.constraint(x)
f = 0
for i in range(len(const)):
f += sig*max(0.0, const[i]**2)
return self.kr_func(x) + f
res = minimize(funct, optimal, method='nelder-mead', options={'xtol': 1e-16, 'disp': False, 'maxiter': 1000})
optimal = res.x
return optimal
| 34.885714 | 123 | 0.514606 | 3,277 | 0.894622 | 0 | 0 | 0 | 0 | 0 | 0 | 322 | 0.087906 |
0f4e902dad9f3ec645661170ae5c6747b0ba3683 | 307 | py | Python | menpo/io/input/landmark_image.py | yuxiang-zhou/menpo | 01deaf3808cbe7a3d9db5542ac9d9f53cd81743a | [
"BSD-3-Clause"
] | null | null | null | menpo/io/input/landmark_image.py | yuxiang-zhou/menpo | 01deaf3808cbe7a3d9db5542ac9d9f53cd81743a | [
"BSD-3-Clause"
] | 1 | 2019-03-09T16:01:46.000Z | 2019-03-09T16:01:46.000Z | menpo/io/input/landmark_image.py | yuxiang-zhou/menpo | 01deaf3808cbe7a3d9db5542ac9d9f53cd81743a | [
"BSD-3-Clause"
] | 1 | 2020-05-01T09:55:57.000Z | 2020-05-01T09:55:57.000Z | from functools import partial
from .landmark import asf_importer, pts_importer
asf_image_importer = partial(asf_importer, image_origin=True)
asf_image_importer.__doc__ = asf_importer.__doc__
pts_image_importer = partial(pts_importer, image_origin=True)
pts_image_importer.__doc__ = pts_importer.__doc__
| 27.909091 | 61 | 0.85342 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0f502bd0a6d75cd12fc714caaff3cdee0cd284dd | 2,603 | py | Python | corpus2alpino/targets/filesystem.py | UUDigitalHumanitieslab/folia2alpino | 4181955ccecadd02e311a548d67b17c09bfd81d4 | [
"MIT"
] | 2 | 2018-09-13T15:47:11.000Z | 2021-06-02T16:06:11.000Z | corpus2alpino/targets/filesystem.py | UUDigitalHumanitieslab/folia2alpino | 4181955ccecadd02e311a548d67b17c09bfd81d4 | [
"MIT"
] | 8 | 2018-07-24T15:34:35.000Z | 2021-05-03T10:29:28.000Z | corpus2alpino/targets/filesystem.py | UUDigitalHumanitieslab/folia2alpino | 4181955ccecadd02e311a548d67b17c09bfd81d4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from corpus2alpino.abstracts import Target
from corpus2alpino.models import Document
from os import path, makedirs
from pathlib import Path
from typing import cast, Any
class FilesystemTarget(Target):
"""
Output chunks to a file using newline separators.
"""
__current_output_path = None
def __open_file(self, document: Document, filename: str = None, suffix: str = None):
if not self.merge_files:
output_path = path.join(self.output_path,
document.collected_file.relpath,
document.collected_file.filename)
if document.subpath:
output_path = path.join(output_path, document.subpath)
if filename != None:
output_path = path.join(output_path, cast(str, filename))
if suffix != None:
output_path = str(
Path(output_path).with_suffix(cast(str, suffix)))
if self.__current_output_path != output_path:
if self.file: # type: ignore
self.file.close() # type: ignore
self.__current_output_path = output_path # type: ignore
directory, filename = path.split(output_path)
makedirs(directory, exist_ok=True)
self.file = self.__open_unique(directory, filename)
def __open_unique(self, directory: str, filename: str):
attempts = 0
prefix = ""
while True:
if attempts > 0:
prefix = f"{attempts}-"
target = Path(path.join(directory, prefix + filename))
if not target.is_file():
# new file!
return target.open('w', encoding='utf-8')
attempts += 1
def __init__(self, output_path: str, merge_files=False) -> None:
self.output_path = output_path
self.index = 1
self.merge_files = merge_files
if self.merge_files:
# using a single file
self.file = open(output_path, 'w', encoding='utf-8')
else:
self.file = None # type: ignore
def write(self,
document: Document,
content: str,
filename: str = None,
suffix: str = None):
self.__open_file(document, filename, suffix)
if self.file:
self.file.write(content)
def flush(self):
return
def close(self):
"""
Release resources.
"""
if self.file:
self.file.close()
| 32.135802 | 88 | 0.558202 | 2,407 | 0.924702 | 0 | 0 | 0 | 0 | 0 | 0 | 253 | 0.097196 |
0f508fc8a3488c8b14e0ec286b215e156a8f0aea | 1,633 | py | Python | Substring with Concatenation of All Words.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | Substring with Concatenation of All Words.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | Substring with Concatenation of All Words.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | '''
You are given a string, s, and a list of words, words, that are all of the same length. Find all starting indices of substring(s) in s that is a concatenation of each word in words exactly once and without any intervening characters.
Example 1:
Input:
s = "barfoothefoobarman",
words = ["foo","bar"]
Output: [0,9]
Explanation: Substrings starting at index 0 and 9 are "barfoor" and "foobar" respectively.
The output order does not matter, returning [9,0] is fine too.
Example 2:
Input:
s = "wordgoodstudentgoodword",
words = ["word","student"]
Output: []
'''
class Solution(object):
def findSubstring(self, s, words):
"""
:type s: str
:type words: List[str]
:rtype: List[int]
"""
res = []
if not s or not words or not words[0]:
return res
wdic = {}
for word in words:
if word in wdic:
wdic[word] += 1
else:
wdic[word] = 1
for i in xrange(len(s) - len(words) * len(words[0]) + 1):
tdic = {}
for j in xrange(len(words)):
tmp = s[i + j * len(words[0]): i + (j+1) * len(words[0])]
if tmp in wdic:
if tmp in tdic:
tdic[tmp] += 1
else:
tdic[tmp] = 1
if tdic[tmp] > wdic[tmp]:
break
if tdic == wdic:
res.append(i)
else:
break
return res
| 27.677966 | 233 | 0.471525 | 1,033 | 0.632578 | 0 | 0 | 0 | 0 | 0 | 0 | 669 | 0.409675 |
0f50e2935cc8d40993b0200cc3b73be06a60af61 | 372 | py | Python | renomearArquivo.py | MarianaFRocha/Manipulacao-de-Arquivos | ee1c23e98579cb987d5df159536c6f366657efb0 | [
"MIT"
] | null | null | null | renomearArquivo.py | MarianaFRocha/Manipulacao-de-Arquivos | ee1c23e98579cb987d5df159536c6f366657efb0 | [
"MIT"
] | null | null | null | renomearArquivo.py | MarianaFRocha/Manipulacao-de-Arquivos | ee1c23e98579cb987d5df159536c6f366657efb0 | [
"MIT"
] | null | null | null | import os
# exemplo alterado de EX_10.5.py para 10_5.py
for nome in os.listdir('./Minicurso/Minicurso API'):
# alterar conforme sua necessidade de geração de nomes e layout de arquivos
os.rename("./Minicurso/Minicurso API/"+nome, "./Minicurso/Minicurso API/"+nome+"_Minicurso_API.png")
print("arquivo " + nome + " alterado para " +nome+"_Minicurso_API") | 46.5 | 104 | 0.712366 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 273 | 0.729947 |
0f510ccb2bf6d059b49802e8ca3cb256a8c92416 | 1,939 | py | Python | setup.py | JoffreyN/HTMLReport | 5216eeed4b30219dcef7d162f8ad6575d0e8b553 | [
"MIT"
] | 4 | 2018-08-17T03:58:41.000Z | 2022-01-19T06:52:21.000Z | setup.py | JoffreyN/HTMLReport | 5216eeed4b30219dcef7d162f8ad6575d0e8b553 | [
"MIT"
] | null | null | null | setup.py | JoffreyN/HTMLReport | 5216eeed4b30219dcef7d162f8ad6575d0e8b553 | [
"MIT"
] | 3 | 2018-11-09T03:47:31.000Z | 2022-01-19T07:23:47.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
if sys.version_info < (3, 5):
raise RuntimeError("The minimum support Python 3.5")
from setuptools import find_packages
from setuptools import setup
from HTMLReport import __version__, __author__
try:
from pypandoc import convert
read_md = convert('README.md', 'rst')
except ImportError:
print("warning: pypandoc module not found, could not convert Markdown to RST")
read_md = open('README.md', 'r', encoding="utf-8").read()
setup(
name='HTMLReport',
version=__version__,
description="Python3 Unittest HTML报告生成器",
long_description=read_md,
author=__author__,
author_email='liushilive@outlook.com',
url='https://github.com/liushilive/HTMLReport',
project_urls={
'The report template': 'https://liushilive.github.io/report/report/#en',
'报告样板': 'https://liushilive.github.io/report/report/#cn'
},
packages=find_packages(),
package_dir={'HTMLReport': 'HTMLReport'},
include_package_data=True,
license="MIT license",
zip_safe=False,
keywords='HtmlTestRunner test runner html reports unittest',
classifiers=[
'Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Testing :: Unit',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: User Interfaces',
'License :: OSI Approved :: MIT License',
'Natural Language :: Chinese (Simplified)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
test_suite='tests'
)
| 34.625 | 83 | 0.635379 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,083 | 0.553398 |
0f51ec37186fd8a1db6bbc54e44a5583d0b59c2f | 426 | py | Python | batch.py | SergioLaRosa/downloaderdude | 7f3fea4d5019d199b1ae2f5a62c66a4550321e67 | [
"MIT"
] | null | null | null | batch.py | SergioLaRosa/downloaderdude | 7f3fea4d5019d199b1ae2f5a62c66a4550321e67 | [
"MIT"
] | null | null | null | batch.py | SergioLaRosa/downloaderdude | 7f3fea4d5019d199b1ae2f5a62c66a4550321e67 | [
"MIT"
] | null | null | null | # URLs processed simultaneously
class Batch():
def __init__(self):
self._batch = 0
def set_batch(self, n_batch):
try:
self._batch = n_batch
except BaseException:
print("[ERROR] Can't set task batch number.")
def get_batch(self):
try:
return self._batch
except BaseException:
print("[ERROR] Can't get task batch number.")
| 21.3 | 57 | 0.570423 | 391 | 0.91784 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.251174 |
0f524b0eb768223ca18a95403ff45b448471be06 | 2,712 | py | Python | workflow/scripts/download_flyxcdb_data.py | tomasMasson/wiring-molecules | 71a77de122a558a56fd7b9a735c1fc00e4a32c90 | [
"MIT"
] | null | null | null | workflow/scripts/download_flyxcdb_data.py | tomasMasson/wiring-molecules | 71a77de122a558a56fd7b9a735c1fc00e4a32c90 | [
"MIT"
] | null | null | null | workflow/scripts/download_flyxcdb_data.py | tomasMasson/wiring-molecules | 71a77de122a558a56fd7b9a735c1fc00e4a32c90 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"Download Drosophila melanogaster extracellular domain batabase (FlyXCDB) table, published in the Journal of Molecular Biology"
import click
import requests
import pandas as pd
from bs4 import BeautifulSoup
def scrape_url(url):
"""
Scrap the content from the input URL using Beautiful Soup.
"""
# Retrieve the data using a request
r = requests.get(url)
# Get the content in HTML format
html = r.text
# Parse the HTML file with BS4
soup = BeautifulSoup(html, 'html.parser')
# Print a brief report
print('FlyXCDB was succesfully scraped')
return soup
def extract_flyxcdb_data(html, output):
"""
Parse the HTML data from FlyXCDB and write the table into a the output file.
"""
# Search for any kind of tabular data (ignores descriptions and supporting data)
table = html.find_all('table')
# Define the table that we are interested (in this case, we have only one table)
table = table[0]
# Extract the columns headers for the CSV table
headers = [th.text.strip()
for th in table.find('tr').find_all('th')]
# Now, extract the data fields for each row
rows = []
# First, iterate over all the record rows
for tr in table.find_all('tr')[1:]:
# Then, create a list to put all the individual data fields
cells = []
# Find these data fields
tds = tr.find_all('td')
# Iterate over the data fields and append them to cells (remove trailing characters)
for td in tds:
cells.append(td.text.strip())
# Finally, add all the extracted data to the rows list
rows.append(cells)
# Create a Pandas DataFrame using the columns and rows list
df = pd.DataFrame(rows, columns=headers)
# Save the DataFrame to a CSV file
df.to_csv(output, index=False)
print(f"FlyXCDB data has been saved to '{output}'")
# FlyXCDB data URL
URL = 'http://prodata.swmed.edu/FlyXCDB/info.list.new21_26.html'
# CLI options
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('--html',
default=URL,
help="Url used to scrap the data from FlyXCDB. Default is FlyXCDB URL")
@click.option('--output',
default='flyxcdb_data.csv',
help="Output name for the scraped table. Defaults is 'flyxcdb_data.csv'")
# CLI main function
def command_line_interface(html, output):
"""
Provides the HTML address and the output file name through the CLI
"""
content = scrape_url(html)
extract_flyxcdb_data(content, output)
if __name__ == '__main__':
command_line_interface()
| 29.802198 | 127 | 0.669985 | 0 | 0 | 0 | 0 | 567 | 0.209071 | 0 | 0 | 1,532 | 0.564897 |
0f559b826d34c24f7992bcda817728b961f99017 | 17,662 | py | Python | common/updatefiles.py | cheersalam/webrtc | 29e9cad470e6f8a95e2fc32bc111c8550c2ef0ca | [
"BSD-3-Clause"
] | null | null | null | common/updatefiles.py | cheersalam/webrtc | 29e9cad470e6f8a95e2fc32bc111c8550c2ef0ca | [
"BSD-3-Clause"
] | null | null | null | common/updatefiles.py | cheersalam/webrtc | 29e9cad470e6f8a95e2fc32bc111c8550c2ef0ca | [
"BSD-3-Clause"
] | null | null | null |
playerFilesWin = {
"lib/avcodec-56.dll" : { "flag_deps" : True, "should_be_removed" : True },
"lib/avformat-56.dll" : { "flag_deps" : True, "should_be_removed" : True },
"lib/avutil-54.dll" : { "flag_deps" : True, "should_be_removed" : True },
"lib/swscale-3.dll" : { "flag_deps" : True, "should_be_removed" : True },
"lib/gen_files_list.py" : { "flag_deps" : True, "should_be_removed" : True },
"lib/crashrpt_lang.ini" : { "flag_deps" : True, "should_be_removed" : True },
"lib/CrashSender.exe" : { "flag_deps" : True, "should_be_removed" : True },
"lib/avcodec-57.dll" : { "flag_deps" : True, "should_be_removed" : True },
"lib/avformat-57.dll" : { "flag_deps" : True, "should_be_removed" : True },
"lib/avutil-55.dll" : { "flag_deps" : True, "should_be_removed" : True },
"lib/swscale-4.dll" : { "flag_deps" : True, "should_be_removed" : True },
"lib/avcodec-58.dll" : { "flag_deps" : True},
"lib/avformat-58.dll" : { "flag_deps" : True},
"lib/avutil-56.dll" : { "flag_deps" : True},
"lib/swscale-5.dll" : { "flag_deps" : True},
"lib/cef.pak" : { "flag_deps" : True },
"lib/cef_100_percent.pak" : { "flag_deps" : True },
"lib/cef_200_percent.pak" : { "flag_deps" : True },
"lib/cef_extensions.pak" : { "flag_deps" : True },
"lib/chrome_elf.dll" : { "flag_deps" : True },
"lib/d3dcompiler_43.dll" : { "flag_deps" : True },
"lib/d3dcompiler_47.dll" : { "flag_deps" : True },
"lib/devtools_resources.pak" : { "flag_deps" : True },
"lib/icudtl.dat" : { "flag_deps" : True },
"lib/libcef.dll" : { "flag_deps" : True },
"lib/libEGL.dll" : { "flag_deps" : True },
"lib/libGLESv2.dll" : { "flag_deps" : True },
"lib/libGLESv1_CM.dll" : { "flag_deps" : True },
"lib/angle_util.dll" : { "flag_deps" : True },
"lib/natives_blob.bin" : { "flag_deps" : True },
"lib/snapshot_blob.bin" : { "flag_deps" : True },
"lib/v8_context_snapshot.bin" : { "flag_deps" : True },
"lib/widevinecdmadapter.dll" : { "flag_deps" : True, "should_be_removed" : True},
"lib/images/sample_app.png" : { "flag_deps" : True, "should_be_removed" : True},
"lib/locales/am.pak" : { "flag_deps" : True },
"lib/locales/ar.pak" : { "flag_deps" : True },
"lib/locales/bg.pak" : { "flag_deps" : True },
"lib/locales/bn.pak" : { "flag_deps" : True },
"lib/locales/ca.pak" : { "flag_deps" : True },
"lib/locales/cs.pak" : { "flag_deps" : True },
"lib/locales/da.pak" : { "flag_deps" : True },
"lib/locales/de.pak" : { "flag_deps" : True },
"lib/locales/el.pak" : { "flag_deps" : True },
"lib/locales/en-GB.pak" : { "flag_deps" : True },
"lib/locales/en-US.pak" : { "flag_deps" : True },
"lib/locales/es-419.pak" : { "flag_deps" : True },
"lib/locales/es.pak" : { "flag_deps" : True },
"lib/locales/et.pak" : { "flag_deps" : True },
"lib/locales/fa.pak" : { "flag_deps" : True },
"lib/locales/fi.pak" : { "flag_deps" : True },
"lib/locales/fil.pak" : { "flag_deps" : True },
"lib/locales/fr.pak" : { "flag_deps" : True },
"lib/locales/gu.pak" : { "flag_deps" : True },
"lib/locales/he.pak" : { "flag_deps" : True },
"lib/locales/hi.pak" : { "flag_deps" : True },
"lib/locales/hr.pak" : { "flag_deps" : True },
"lib/locales/hu.pak" : { "flag_deps" : True },
"lib/locales/id.pak" : { "flag_deps" : True },
"lib/locales/it.pak" : { "flag_deps" : True },
"lib/locales/ja.pak" : { "flag_deps" : True },
"lib/locales/kn.pak" : { "flag_deps" : True },
"lib/locales/ko.pak" : { "flag_deps" : True },
"lib/locales/lt.pak" : { "flag_deps" : True },
"lib/locales/lv.pak" : { "flag_deps" : True },
"lib/locales/ml.pak" : { "flag_deps" : True },
"lib/locales/mr.pak" : { "flag_deps" : True },
"lib/locales/ms.pak" : { "flag_deps" : True },
"lib/locales/nb.pak" : { "flag_deps" : True },
"lib/locales/nl.pak" : { "flag_deps" : True },
"lib/locales/pl.pak" : { "flag_deps" : True },
"lib/locales/pt-BR.pak" : { "flag_deps" : True },
"lib/locales/pt-PT.pak" : { "flag_deps" : True },
"lib/locales/ro.pak" : { "flag_deps" : True },
"lib/locales/ru.pak" : { "flag_deps" : True },
"lib/locales/sk.pak" : { "flag_deps" : True },
"lib/locales/sl.pak" : { "flag_deps" : True },
"lib/locales/sr.pak" : { "flag_deps" : True },
"lib/locales/sv.pak" : { "flag_deps" : True },
"lib/locales/sw.pak" : { "flag_deps" : True },
"lib/locales/ta.pak" : { "flag_deps" : True },
"lib/locales/te.pak" : { "flag_deps" : True },
"lib/locales/th.pak" : { "flag_deps" : True },
"lib/locales/tr.pak" : { "flag_deps" : True },
"lib/locales/uk.pak" : { "flag_deps" : True },
"lib/locales/vi.pak" : { "flag_deps" : True },
"lib/locales/zh-CN.pak" : { "flag_deps" : True },
"lib/locales/zh-TW.pak" : { "flag_deps" : True },
# "lib/localhost.pack" : {},
"lib/u2ec.dll" : { "should_be_removed" : True },
"lib/collector.dll" : { "should_be_removed" : True },
"lib/logging.dll" : {},
"lib/rtspclient.dll" : { "should_be_removed" : True },
"lib/crash_reporter.cfg" : {},
"lib/benchmark.data" : { "should_be_removed" : True },
"lib/background.ts" : { "should_be_removed" : True },
"lib/benchmark_fullhd_hi.data" : {},
"lib/benchmark_fullhd_low.data" : { "should_be_removed" : True },
"lib/benchmark_hdready_hi.data" : {},
"lib/benchmark_hdready_low.data" : { "should_be_removed" : True },
"lib/config.ini" : { "may_be_modified" : True, "may_be_removed" : True },
"quickreset.exe" : { "should_be_removed" : True },
"lib/LiquidSkyHelper.exe" : { "should_be_removed" : True },
"lib/lang_en.ini" : { "should_be_removed" : True },
"lib/render-cef.dll" : { "should_be_removed" : True },
"lib/player-cef.dll" : { "should_be_removed" : True },
"lib/render-cuda.dll" : { "should_be_removed" : True },
"lib/render-ffmpeg.dll" : {},
"lib/render-ffmpeg-hw.dll" : { "should_be_removed" : True},
"lib/usb_driver.exe" : { "should_be_removed" : True },
#default liquidsky version
"LiquidSkyClient.exe" : {},
"lib/LiquidSky.exe" : {},
"lib/usb_driver.msi" : { "should_be_removed" : True },
"lib/UsbHelper.exe" : { "should_be_removed" : True },
"lib/Vivien.exe" : { "should_be_removed" : True },
"VivienClient.exe" : { "should_be_removed" : True },
}
#if you add any new file to verizon, please make sure that it is included to main list too
playerFilesWinCast = {
"lib/usb_driver.msi" : {},
"lib/UsbHelper.exe" : {},
"lib/Vivien.exe" : {},
"VivienClient.exe" : {},
"lib/LiquidSky.exe" : { "should_be_removed" : True },
"LiquidSkyClient.exe" : { "should_be_removed" : True },
"lib/localhost1.pack" : { "should_be_removed" : True },
"lib/localhost2.pack" : { "should_be_removed" : True },
"lib/localhost3.pack" : { "should_be_removed" : True },
"lib/localhost4.pack" : { "should_be_removed" : True },
}
playerFilesMac = {
"Frameworks/Chromium Embedded Framework.framework/Chromium Embedded Framework" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/am.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/ar.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/bg.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/bn.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/ca.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/cef.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/cef_100_percent.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/cef_200_percent.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/cef_extensions.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/cs.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/da.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/de.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/devtools_resources.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/el.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/en.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/en_GB.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/es.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/es_419.lproj/locale.pak": { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/et.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/fa.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/fi.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/fil.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/fr.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/gu.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/he.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/hi.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/hr.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/hu.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/icudtl.dat" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/id.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/Info.plist" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/it.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/ja.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/kn.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/ko.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/lt.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/lv.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/ml.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/mr.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/ms.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/natives_blob.bin" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/nb.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/nl.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/pl.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/pt_BR.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/pt_PT.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/ro.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/ru.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/sk.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/sl.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/snapshot_blob.bin" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/sr.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/sv.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/sw.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/ta.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/te.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/th.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/tr.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/uk.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/vi.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/zh_CN.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/Chromium Embedded Framework.framework/Resources/zh_TW.lproj/locale.pak" : { "access_mode" : "644" },
"Frameworks/LiquidSky Helper.app/Contents/Info.plist" : { "access_mode" : "644" },
"Frameworks/LiquidSky Helper.app/Contents/MacOS/LiquidSky Helper" : { "access_mode" : "755" },
"Info.plist" : { "access_mode" : "644" },
"MacOS/libavcodec.dylib" : { "access_mode" : "644" },
"MacOS/libavformat.dylib" : { "access_mode" : "644" },
"MacOS/libavutil.dylib" : { "access_mode" : "644" },
"MacOS/libcollector.dylib" : { "access_mode" : "644", "should_be_removed" : True },
"MacOS/liblogging.dylib" : { "access_mode" : "644" },
"MacOS/libplayer-cef.dylib" : { "access_mode" : "644" },
"MacOS/librender-ffmpeg-hw.dylib" : { "access_mode" : "644" },
"MacOS/librender-ffmpeg.dylib" : { "access_mode" : "644" },
"MacOS/librtspclient.dylib" : { "access_mode" : "644" },
"MacOS/libswscale.dylib" : { "access_mode" : "644" },
"MacOS/LiquidSky" : { "access_mode" : "755", "should_be_removed" : True },
"MacOS/liquidsky-client" : { "access_mode" : "755" },
"Resources/background.ts" : { "access_mode" : "644", "should_be_removed" : True },
"Resources/benchmark_fullhd_hi.data" : { "access_mode" : "644", "should_be_removed" : True },
"Resources/benchmark_hdready_hi.data" : { "access_mode" : "644", "should_be_removed" : True },
"Resources/lang_en.ini" : { "access_mode" : "644" },
"Resources/liquidsky.icns" : { "access_mode" : "644" },
"Resources/localhost.pack" : { "access_mode" : "644" },
}
streamerFilesWin = {
"avcodec-57.dll" : { "should_be_removed" : True },
"avformat-57.dll" : { "should_be_removed" : True },
"avutil-55.dll" : { "should_be_removed" : True },
"swscale-4.dll" : { "should_be_removed" : True },
"avcodec-58.dll" : {},
"avformat-58.dll" : {},
"avutil-56.dll" : {},
"swscale-5.dll" : {},
"crashrpt_lang.ini" : {},
"CrashSender.exe" : {},
"osk-monitor.exe" : {},
"osk.exe" : {},
"sky.cfg" : { "may_be_modified" : True, "may_be_removed" : True },
"streamer-service.exe" : {},
"streamer-updater.exe" : { "should_be_removed" : True },
"update-executor.exe" : {},
"streamer.exe" : { "should_be_removed" : True },
"SkyBeam.exe" : {},
"storagetool.exe" : {},
"setuptool.exe" : {},
"vmhelper.dll" : {},
"logging.dll" : {},
"changelang.exe" : {},
"app-sender.exe" : {},
"process-monitor.exe" : {},
"webrtc-lib.dll" : {},
"lsfb.exe" : {},
}
streamerFilesWinCast = {
"usb-service.exe" : {},
}
branchIniDisabled = {
"lib/branch.ini" : { "should_be_removed" : True },
}
branchIniEnabled = {
"lib/branch.ini" : {},
}
targets = {
"player-win" : { "files" : playerFilesWin, "path" : ["buildproject", "player"], "brands" : { "cast" : playerFilesWinCast }, },
"player-mac" : { "files" : playerFilesMac, "path" : ["buildproject", "LiquidSky.app", "Contents"], "brands" : {}, },
"streamer-win" : { "files" : streamerFilesWin, "path" : ["buildproject", "streamer"], "brands" : { "cast" : streamerFilesWinCast }, },
}
| 60.486301 | 154 | 0.630563 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12,522 | 0.70898 |
0f58ae9c56e868959385146e7aab8ad6eb80f5da | 2,344 | py | Python | models/base_model.py | siyuhuang/PoseStylizer | d1d832781ddfd3efde24bf32b36a4074fafebcc1 | [
"BSD-3-Clause"
] | 75 | 2020-07-20T01:33:15.000Z | 2022-03-26T11:55:20.000Z | models/base_model.py | siyuhuang/PoseStylizer | d1d832781ddfd3efde24bf32b36a4074fafebcc1 | [
"BSD-3-Clause"
] | 12 | 2020-07-20T12:16:11.000Z | 2022-01-13T19:31:04.000Z | models/base_model.py | siyuhuang/PoseStylizer | d1d832781ddfd3efde24bf32b36a4074fafebcc1 | [
"BSD-3-Clause"
] | 16 | 2020-07-20T01:19:16.000Z | 2022-03-26T09:34:35.000Z | import os
import torch
import torch.nn as nn
import numpy as np
import pickle
class BaseModel(nn.Module):
def __init__(self):
super(BaseModel, self).__init__()
def name(self):
return 'BaseModel'
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
def set_input(self, input):
self.input = input
def forward(self):
pass
# used in test time, no backprop
def test(self):
pass
def get_image_paths(self):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
return self.input
def get_current_errors(self):
return {}
def save(self, label):
pass
# helper saving function that can be used by subclasses
def save_network(self, network, network_label, epoch_label, gpu_ids, epoch, total_steps):
save_filename = '%s_%s.pth' % (epoch_label, network_label)
save_infoname = '%s.pkl' % (epoch_label)
save_path = os.path.join(self.save_dir, save_filename)
save_infoname = os.path.join(self.save_dir, save_infoname)
torch.save(network.cpu().state_dict(), save_path)
network.cuda()
info = {'epoch':epoch, 'total_steps':total_steps}
filehandler = open(save_infoname, "wb")
pickle.dump(info, filehandler)
filehandler.close()
# helper loading function that can be used by subclasses
def load_network(self, network, network_label, epoch_label):
save_filename = '%s_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(self.save_dir, save_filename)
if os.path.exists(save_path):
network.load_state_dict(torch.load(save_path))
print("Found checkpoints. Network loaded.")
else:
print("Not found checkpoints. Network from scratch.")
# update learning rate (called once every epoch)
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
| 30.441558 | 93 | 0.636092 | 2,264 | 0.96587 | 0 | 0 | 0 | 0 | 0 | 0 | 364 | 0.15529 |
0f5971ebdf54a0b419e55ff08d2e2bce549802f0 | 983 | py | Python | backend/tuber/migrations/versions/4ae40638e863_adding_hotel_block_to_requests.py | bitbyt3r/2ber | db1689010072c877cc83bd5b5922922456616094 | [
"MIT"
] | 6 | 2019-12-06T07:32:22.000Z | 2021-12-05T19:14:03.000Z | backend/tuber/migrations/versions/4ae40638e863_adding_hotel_block_to_requests.py | bitbyt3r/2ber | db1689010072c877cc83bd5b5922922456616094 | [
"MIT"
] | 99 | 2019-10-03T05:21:16.000Z | 2022-01-21T02:35:25.000Z | backend/tuber/migrations/versions/4ae40638e863_adding_hotel_block_to_requests.py | bitbyt3r/2ber | db1689010072c877cc83bd5b5922922456616094 | [
"MIT"
] | 6 | 2019-09-30T22:01:55.000Z | 2021-11-03T20:53:00.000Z | """Adding hotel block to requests
Revision ID: 4ae40638e863
Revises: 1708acb6e515
Create Date: 2021-11-15 20:42:51.723559
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4ae40638e863'
down_revision = '1708acb6e515'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('hotel_room_request', schema=None) as batch_op:
batch_op.add_column(sa.Column('hotel_block', sa.Integer(), nullable=True))
batch_op.create_foreign_key(None, 'hotel_room_block', ['hotel_block'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('hotel_room_request', schema=None) as batch_op:
batch_op.drop_constraint(None, type_='foreignkey')
batch_op.drop_column('hotel_block')
# ### end Alembic commands ###
| 28.085714 | 86 | 0.709054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 490 | 0.498474 |
0f5990fc41a4d76565a7455e7e0dbf6f8cf899b9 | 8,660 | py | Python | xoinvader/collision.py | pankshok/xoinvader | f25e30431d4898f20626056c12853595857b4f02 | [
"MIT"
] | 13 | 2016-06-26T10:30:14.000Z | 2021-09-04T19:39:14.000Z | xoinvader/collision.py | pkulev/xoinvader | f25e30431d4898f20626056c12853595857b4f02 | [
"MIT"
] | 70 | 2016-10-10T18:27:17.000Z | 2020-05-18T11:27:59.000Z | xoinvader/collision.py | pankshok/xoinvader | f25e30431d4898f20626056c12853595857b4f02 | [
"MIT"
] | null | null | null | """Collision detection system and component module."""
import functools
import logging
import re
import weakref
from xoinvader import app
from xoinvader.utils import Point
LOG = logging.getLogger(__name__)
COLLISIONS = {}
"""Global mapping TypePair <=> [callable]."""
class CollisionManagerNotFound(Exception):
"""Raises on try to register collider without instantiated manager."""
def __init__(self):
super(CollisionManagerNotFound, self).__init__(
"You can't use Collider objects without "
"CollisionManager. Please create it first."
)
class TypePair(object):
"""Class for hashable ordered string pairs.
Used as collision dictionary keys, containing pair of collider types.
Not commutative, TypePair(a, b) != TypePair(b, a)
It's needed to store and get exact handler as it was registered.
:param str first: first collider type
:param str second: second collider type
"""
def __init__(self, first, second):
self._first = first
self._second = second
self._pair = first + "_" + second
@property
def first(self):
"""First collider type.
:getter: yes
:setter: no
:type: type
"""
return self._first
@property
def second(self):
"""Second collider type.
:getter: yes
:setter: no
:type: type
"""
return self._second
def __eq__(self, other):
return self._pair == other._pair # pylint: disable=protected-access
def __hash__(self):
return hash(self._pair)
def __str__(self):
return "TypePair({0}, {1})".format(self._first, self._second)
def register(left, right):
"""Collision handler registration decorator.
.. Note:: Argument order matters! Handler must belong to first object.
:param str left: first collidable object
:param str right: right collidable object
"""
def decorator(handler):
COLLISIONS.setdefault(TypePair(left, right), []).append(handler)
@functools.wraps(handler)
def handle(*args, **kwargs):
return handler(*args, **kwargs) # pragma: no cover
return handle
return decorator
class CollisionManager(object):
"""Class for collision detection between known components.
To process collisions, first update the positions of all objects of
interest, then call `update` method. It will traverse all registered
collisions (between pairs of types) and call appropriate handlers in order
they were registered for the two types of colliding objects.
If you just want to check, if two Colliders collide, call `check_collision`
on them.
"""
# Marker of solid matter inside collider physics map
SOLID_MATTER = "#"
def __init__(self):
self._colliders = weakref.WeakSet()
self._collisions = COLLISIONS
def add(self, collider):
"""Add collider.
:param :class:`xoinvader.collision.Collider` collider:
"""
LOG.debug("Adding collider %s\n pos: %s", collider, collider.pos)
self._colliders.add(collider)
def remove(self, collider):
"""Remove collider.
:param :class:`xoinvader.collision.Collider` collider:
"""
LOG.debug("Removing collider %s\n pos %s", collider, collider.pos)
self._colliders.remove(collider)
# pylint: disable=too-many-nested-blocks
def update(self):
"""Detect and process all collisions."""
for pair in self._collisions:
colliders_type_1 = [
item for item in self._colliders if item.col_type == pair.first
]
colliders_type_2 = [
item for item in self._colliders if item.col_type == pair.second
]
for collider_1 in colliders_type_1:
for collider_2 in colliders_type_2:
collision_rect = self.check_collision(
collider_1, collider_2
)
if collision_rect:
for callback in self._collisions[pair]:
callback(
collider_1.obj, collider_2.obj, collision_rect
)
# pylint: disable=too-many-locals
@staticmethod
def check_collision(col_1, col_2):
"""Check collisions between two colliders.
Returns `None` if no collision occured, or returns rectangle of
overlapping region between collider maps.
:param col1: first collider
:type col1: :class:`Collider`
:param col2: second collider
:type col2: :class:`Collider`
:rtype: tuple of two :class:`Point`
"""
width_1 = max(map(len, col_1.phys_map))
height_1 = len(col_1.phys_map)
topleft_1 = col_1.pos
botright_1 = topleft_1 + Point(width_1, height_1)
width_2 = max(map(len, col_2.phys_map))
height_2 = len(col_2.phys_map)
topleft_2 = col_2.pos
botright_2 = topleft_2 + Point(width_2, height_2)
if (
topleft_1.x >= botright_2.x
or topleft_1.y >= botright_2.y
or botright_1.x <= topleft_2.x
or botright_1.y <= topleft_2.y
):
# Definelty not overlapping
return
# Now find where exactly overlapping occured
topleft_overlap = Point(
max(topleft_1.x, topleft_2.x), max(topleft_1.y, topleft_2.y)
)
botright_overlap = Point(
min(botright_1.x, botright_2.x), min(botright_1.y, botright_2.y)
)
# Now find if they actually collided
# first, calculate offsets
overlap_1 = Point()
overlap_2 = Point()
overlap_1.x = abs(topleft_1.x - topleft_overlap.x)
overlap_1.y = abs(topleft_1.y - topleft_overlap.y)
overlap_2.x = abs(topleft_2.x - topleft_overlap.x)
overlap_2.y = abs(topleft_2.y - topleft_overlap.y)
# iterate over overlapping region
# and search for collision
for i in range(botright_overlap.x - topleft_overlap.x):
for j in range(botright_overlap.y - topleft_overlap.y):
# TODO: check length of current y-level string
# it might be not enough to contain i + ox1/2 element
if (
col_1.phys_map[j + overlap_1.y][i + overlap_1.x]
== col_2.phys_map[j + overlap_2.y][i + overlap_2.x]
== CollisionManager.SOLID_MATTER
):
return (topleft_overlap, botright_overlap)
class Collider(object):
"""Collider component class.
When added to object, enables it to participate in coliision processing
system: i.e. to be able to detect and process collisions between the object
and other ones.
.. Attention:: CollisionManager must be created first and must be
accessible via State.
:param object obj: GameObject to which the collider is linked
:param list phys_map: list of strings representing collider physical
geometry. All strings must be of equal length. Class member
SOLID_MATTER of CollisionManager represents solid geometry, all other
chars are treated as void space and may be any.
"""
def __init__(self, obj, phys_map):
self._obj = obj
self._col_type = self._obj.type
self._phys_map = phys_map
# TODO: move collision to State.systems
try:
app.current().state.collision.add(self)
except:
raise CollisionManagerNotFound()
@classmethod
def simple(cls, obj):
"""Make simple collider based on object's image.
All characters except space considered as solid matter.
"""
return cls(
obj,
[
re.sub(r"[^\ ]", CollisionManager.SOLID_MATTER, row)
for row in obj.image.raw.image
],
)
@property
def phys_map(self):
"""Collider physical geometry.
:getter: yes
:setter: no
:type: list
"""
return self._phys_map
@property
def col_type(self):
"""Collider type name.
:getter: yes
:setter: no
:type: str
"""
return self._col_type
@property
def pos(self):
"""Collider's left top position.
:getter: yes
:setter: no
:type: :class:`Point`
"""
return self._obj.pos[int]
@property
def obj(self):
return self._obj
| 29.657534 | 80 | 0.60254 | 7,837 | 0.904965 | 0 | 0 | 3,740 | 0.431871 | 0 | 0 | 3,749 | 0.43291 |
0f5b07565ad9c9887805e72a860f709924e9bf7f | 1,816 | py | Python | test/testing/test_pandas_assert.py | S-aiueo32/gokart | 00475f36513da3fba65b44a10ae97246e0690b54 | [
"MIT"
] | 255 | 2019-01-02T01:31:37.000Z | 2022-03-29T13:35:18.000Z | test/testing/test_pandas_assert.py | S-aiueo32/gokart | 00475f36513da3fba65b44a10ae97246e0690b54 | [
"MIT"
] | 208 | 2018-12-25T04:32:24.000Z | 2022-03-28T07:28:59.000Z | test/testing/test_pandas_assert.py | S-aiueo32/gokart | 00475f36513da3fba65b44a10ae97246e0690b54 | [
"MIT"
] | 48 | 2019-01-11T06:22:05.000Z | 2022-02-11T07:38:42.000Z | import unittest
import pandas as pd
import gokart
class TestPandasAssert(unittest.TestCase):
def test_assert_frame_contents_equal(self):
expected = pd.DataFrame(data=dict(f1=[1, 2, 3], f3=[111, 222, 333], f2=[4, 5, 6]), index=[0, 1, 2])
resulted = pd.DataFrame(data=dict(f2=[5, 4, 6], f1=[2, 1, 3], f3=[222, 111, 333]), index=[1, 0, 2])
gokart.testing.assert_frame_contents_equal(resulted, expected)
def test_assert_frame_contents_equal_with_small_error(self):
expected = pd.DataFrame(data=dict(f1=[1.0001, 2.0001, 3.0001], f3=[111, 222, 333], f2=[4, 5, 6]), index=[0, 1, 2])
resulted = pd.DataFrame(data=dict(f2=[5, 4, 6], f1=[2.0002, 1.0002, 3.0002], f3=[222, 111, 333]), index=[1, 0, 2])
gokart.testing.assert_frame_contents_equal(resulted, expected, atol=1e-1)
def test_assert_frame_contents_equal_with_duplicated_columns(self):
expected = pd.DataFrame(data=dict(f1=[1, 2, 3], f3=[111, 222, 333], f2=[4, 5, 6]), index=[0, 1, 2])
expected.columns = ['f1', 'f1', 'f2']
resulted = pd.DataFrame(data=dict(f2=[5, 4, 6], f1=[2, 1, 3], f3=[222, 111, 333]), index=[1, 0, 2])
resulted.columns = ['f2', 'f1', 'f1']
with self.assertRaises(AssertionError):
gokart.testing.assert_frame_contents_equal(resulted, expected)
def test_assert_frame_contents_equal_with_duplicated_indexes(self):
expected = pd.DataFrame(data=dict(f1=[1, 2, 3], f3=[111, 222, 333], f2=[4, 5, 6]), index=[0, 1, 2])
expected.index = [0, 1, 1]
resulted = pd.DataFrame(data=dict(f2=[5, 4, 6], f1=[2, 1, 3], f3=[222, 111, 333]), index=[1, 0, 2])
expected.index = [1, 0, 1]
with self.assertRaises(AssertionError):
gokart.testing.assert_frame_contents_equal(resulted, expected)
| 47.789474 | 122 | 0.630507 | 1,761 | 0.969714 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.013216 |
0f5bc614d9041794b1473f521f648a2ae8fe38a7 | 1,523 | py | Python | binsdpy/similarity/group_b.py | mikulatomas/binsdpy | d54d452e5e7cf310747e94c5c94c4875add869ca | [
"MIT"
] | null | null | null | binsdpy/similarity/group_b.py | mikulatomas/binsdpy | d54d452e5e7cf310747e94c5c94c4875add869ca | [
"MIT"
] | 2 | 2022-02-08T19:45:51.000Z | 2022-02-23T15:21:01.000Z | binsdpy/similarity/group_b.py | mikulatomas/binsdpy | d54d452e5e7cf310747e94c5c94c4875add869ca | [
"MIT"
] | null | null | null | import math
from binsdpy.utils import operational_taxonomic_units, BinaryFeatureVector
def russell_rao(
x: BinaryFeatureVector, y: BinaryFeatureVector, mask: BinaryFeatureVector = None
) -> float:
"""Russel-Rao similarity
Russell, P. F., & Rao, T. R. (1940).
On habitat and association of species of anopheline larvae in south-eastern Madras.
Journal of the Malaria Institute of India, 3(1).
Rao, C. R. (1948).
The utilization of multiple measurements in problems of biological classification.
Journal of the Royal Statistical Society. Series B (Methodological), 10(2), 159-203.
Args:
x (BinaryFeatureVector): binary feature vector
y (BinaryFeatureVector): binary feature vector
Returns:
float: similarity of given vectors
"""
a, b, c, d = operational_taxonomic_units(x, y, mask)
return a / (a + b + c + d)
def consonni_todeschini3(
x: BinaryFeatureVector, y: BinaryFeatureVector, mask: BinaryFeatureVector = None
) -> float:
"""Consonni and Todeschini (v3)
Consonni, V., & Todeschini, R. (2012).
New similarity coefficients for binary data.
Match-Communications in Mathematical and Computer Chemistry, 68(2), 581.
Args:
x (BinaryFeatureVector): binary feature vector
y (BinaryFeatureVector): binary feature vector
Returns:
float: similarity of given vectors
"""
a, b, c, d = operational_taxonomic_units(x, y, mask)
return math.log(1 + a) / math.log(1 + a + b + c + d)
| 30.46 | 88 | 0.683519 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 980 | 0.643467 |
0f5d2a27cb70dddd601735fe0a2a3f6e251cb949 | 16,810 | py | Python | design/gpgpu/configs/gpu_protocol/VI_hammer_fusion.py | chisuhua/gem5 | 74694255d90e0892d5f5c4d13f6077cdb9b9cb95 | [
"BSD-3-Clause"
] | null | null | null | design/gpgpu/configs/gpu_protocol/VI_hammer_fusion.py | chisuhua/gem5 | 74694255d90e0892d5f5c4d13f6077cdb9b9cb95 | [
"BSD-3-Clause"
] | null | null | null | design/gpgpu/configs/gpu_protocol/VI_hammer_fusion.py | chisuhua/gem5 | 74694255d90e0892d5f5c4d13f6077cdb9b9cb95 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
import math
import m5
import VI_hammer
from m5.objects import *
from m5.defines import buildEnv
from topologies.Cluster import Cluster
class L1Cache(RubyCache): pass
class L2Cache(RubyCache): pass
def create_system(options, full_system, system, dma_devices, bootmem, ruby_system):
#if not buildEnv['GPGPU_SIM']:
# m5.util.panic("This script requires GPGPU-Sim integration to be built.")
# Run the protocol script to setup CPU cluster, directory and DMA
(all_sequencers, dir_cntrls, dma_cntrls, cpu_cluster) = \
VI_hammer.create_system(options,
full_system,
system,
dma_devices,
bootmem,
ruby_system)
cpu_cntrl_count = len(cpu_cluster) + len(dir_cntrls)
#
# Build GPU cluster
#
# Empirically, Fermi per-core bandwidth peaks at roughly 23GB/s
# (32B/cycle @ 772MHz). Use ~16B per Ruby cycle to match this. Maxwell
# per-core bandwidth peaks at 40GB/s (42B/cycle @ 1029MHz). Use ~24B per
# Ruby cycle to match this.
if options.gpu_core_config == 'Fermi':
l1_cluster_bw = 16
elif options.gpu_core_config == 'Maxwell':
l1_cluster_bw = 24
elif options.gpu_core_config == 'Volta':
# FIXME on Volta bw
l1_cluster_bw = 24
elif options.gpu_core_config == 'Ppu':
# FIXME on Volta bw
l1_cluster_bw = 24
else:
m5.util.fatal("Unknown GPU core config: %s" % options.gpu_core_config)
gpu_cluster = Cluster(intBW = l1_cluster_bw, extBW = l1_cluster_bw)
gpu_cluster.disableConnectToParent()
l2_bits = int(math.log(options.num_l2caches, 2))
block_size_bits = int(math.log(options.cacheline_size, 2))
# This represents the L1 to L2 interconnect latency
# NOTES! 1) This latency is in Ruby (cache) cycles, not SM cycles
# 2) Since the cluster interconnect doesn't model multihop latencies,
# model these latencies with the controller latency variables. If
# the interconnect model is changed, latencies will need to be
# adjusted for reasonable total memory access delay.
per_hop_interconnect_latency = 45 # ~15 GPU cycles
num_dance_hall_hops = int(math.log(options.num_sc, 2))
if num_dance_hall_hops == 0:
num_dance_hall_hops = 1
l1_to_l2_noc_latency = per_hop_interconnect_latency * num_dance_hall_hops
#
# Caches for GPU cores
#
for i in xrange(options.num_sc):
#
# First create the Ruby objects associated with the GPU cores
#
cache = L1Cache(size = options.sc_l1_size,
assoc = options.sc_l1_assoc,
replacement_policy = TreePLRURP(), # LRUReplacementPolicy(),
start_index_bit = block_size_bits,
dataArrayBanks = 4,
tagArrayBanks = 4,
dataAccessLatency = 4,
tagAccessLatency = 4,
resourceStalls = False)
l1_cntrl = GPUL1Cache_Controller(version = i,
cache = cache,
l2_select_num_bits = l2_bits,
num_l2 = options.num_l2caches,
transitions_per_cycle = options.ports,
issue_latency = l1_to_l2_noc_latency,
number_of_TBEs = options.gpu_l1_buf_depth,
ruby_system = ruby_system)
gpu_seq = RubySequencer(version = options.num_cpus + i,
icache = cache,
dcache = cache,
max_outstanding_requests = options.gpu_l1_buf_depth,
ruby_system = ruby_system,
deadlock_threshold = 2000000)
# connect_to_io = False)
l1_cntrl.sequencer = gpu_seq
exec("ruby_system.l1_cntrl_sp%02d = l1_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
all_sequencers.append(gpu_seq)
gpu_cluster.add(l1_cntrl)
# Connect the controller to the network
l1_cntrl.requestFromL1Cache = MessageBuffer(ordered = True)
l1_cntrl.requestFromL1Cache.master = ruby_system.network.slave
l1_cntrl.responseToL1Cache = MessageBuffer(ordered = True)
l1_cntrl.responseToL1Cache.slave = ruby_system.network.master
l1_cntrl.mandatoryQueue = MessageBuffer()
l2_index_start = block_size_bits + l2_bits
# Use L2 cache and interconnect latencies to calculate protocol latencies
# NOTES! 1) These latencies are in Ruby (cache) cycles, not SM cycles
# 2) Since the cluster interconnect doesn't model multihop latencies,
# model these latencies with the controller latency variables. If
# the interconnect model is changed, latencies will need to be
# adjusted for reasonable total memory access delay.
l2_cache_access_latency = 30 # ~10 GPU cycles
l2_to_l1_noc_latency = per_hop_interconnect_latency * num_dance_hall_hops
l2_to_mem_noc_latency = 125 # ~40 GPU cycles
# Empirically, Fermi per-L2 bank bandwidth peaks at roughly 66GB/s
# (92B/cycle @ 772MHz). Use ~34B per Ruby cycle to match this. Maxwell
# per-L2 bank bandwidth peaks at 123GB/s (128B/cycle @ 1029MHz). Use ~64B
# per Ruby cycle to match this.
if options.gpu_core_config == 'Fermi':
l2_cluster_bw = 34
elif options.gpu_core_config == 'Maxwell':
l2_cluster_bw = 68
elif options.gpu_core_config == 'Volta':
# FIXME
l2_cluster_bw = 68
elif options.gpu_core_config == 'Ppu':
# FIXME
l2_cluster_bw = 68
else:
m5.util.fatal("Unknown GPU core config: %s" % options.gpu_core_config)
l2_clusters = []
for i in xrange(options.num_l2caches):
#
# First create the Ruby objects associated with this cpu
#
l2_cache = L2Cache(size = options.sc_l2_size,
assoc = options.sc_l2_assoc,
start_index_bit = l2_index_start,
replacement_policy = TreePLRURP(), # LRUReplacementPolicy(),
dataArrayBanks = 4,
tagArrayBanks = 4,
dataAccessLatency = 4,
tagAccessLatency = 4,
resourceStalls = options.gpu_l2_resource_stalls)
l2_cntrl = GPUL2Cache_Controller(version = i,
L2cache = l2_cache,
transitions_per_cycle = options.ports,
l2_response_latency = l2_cache_access_latency +
l2_to_l1_noc_latency,
l2_request_latency = l2_to_mem_noc_latency,
cache_response_latency = l2_cache_access_latency,
ruby_system = ruby_system)
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cluster = Cluster(intBW = l2_cluster_bw, extBW = l2_cluster_bw)
l2_cluster.add(l2_cntrl)
gpu_cluster.add(l2_cluster)
l2_clusters.append(l2_cluster)
# Connect the controller to the network
l2_cntrl.responseToL1Cache = MessageBuffer(ordered = True)
l2_cntrl.responseToL1Cache.master = ruby_system.network.slave
l2_cntrl.requestFromCache = MessageBuffer()
l2_cntrl.requestFromCache.master = ruby_system.network.slave
l2_cntrl.responseFromCache = MessageBuffer()
l2_cntrl.responseFromCache.master = ruby_system.network.slave
l2_cntrl.unblockFromCache = MessageBuffer()
l2_cntrl.unblockFromCache.master = ruby_system.network.slave
l2_cntrl.requestFromL1Cache = MessageBuffer(ordered = True)
l2_cntrl.requestFromL1Cache.slave = ruby_system.network.master
l2_cntrl.forwardToCache = MessageBuffer()
l2_cntrl.forwardToCache.slave = ruby_system.network.master
l2_cntrl.responseToCache = MessageBuffer()
l2_cntrl.responseToCache.slave = ruby_system.network.master
l2_cntrl.triggerQueue = MessageBuffer()
############################################################################
# Pagewalk cache
# NOTE: We use a CPU L1 cache controller here. This is to facilatate MMU
# cache coherence (as the GPU L1 caches are incoherent without flushes
# The L2 cache is small, and should have minimal affect on the
# performance (see Section 6.2 of Power et al. HPCA 2014).
pwd_cache = L1Cache(size = options.pwc_size,
assoc = 16, # 64 is fully associative @ 8kB
replacement_policy = TreePLRURP(), # LRUReplacementPolicy(),
start_index_bit = block_size_bits,
resourceStalls = False)
# Small cache since CPU L1 requires I and D
pwi_cache = L1Cache(size = "512B",
assoc = 2,
replacement_policy = TreePLRURP(), # LRUReplacementPolicy(),
start_index_bit = block_size_bits,
resourceStalls = False)
# Small cache since CPU L1 controller requires L2
l2_cache = L2Cache(size = "512B",
assoc = 2,
start_index_bit = block_size_bits,
resourceStalls = False)
l1_cntrl = L1Cache_Controller(version = options.num_cpus,
L1Icache = pwi_cache,
L1Dcache = pwd_cache,
L2cache = l2_cache,
send_evictions = False,
transitions_per_cycle = options.ports,
issue_latency = l1_to_l2_noc_latency,
cache_response_latency = 1,
l2_cache_hit_latency = 1,
number_of_TBEs = options.gpu_l1_buf_depth,
ruby_system = ruby_system)
cpu_seq = RubySequencer(version = options.num_cpus + options.num_sc,
icache = pwd_cache, # Never get data from pwi_cache
dcache = pwd_cache,
# TODO the latency value is setting in controller
# dcache_hit_latency = 8,
# icache_hit_latency = 8,
max_outstanding_requests = options.gpu_l1_buf_depth,
ruby_system = ruby_system,
deadlock_threshold = 2000000)
#connect_to_io = False)
l1_cntrl.sequencer = cpu_seq
ruby_system.l1_pw_cntrl = l1_cntrl
all_sequencers.append(cpu_seq)
gpu_cluster.add(l1_cntrl)
# Connect the L1 controller and the network
# Connect the buffers from the controller to network
l1_cntrl.requestFromCache = MessageBuffer()
l1_cntrl.requestFromCache.master = ruby_system.network.slave
l1_cntrl.responseFromCache = MessageBuffer()
l1_cntrl.responseFromCache.master = ruby_system.network.slave
l1_cntrl.unblockFromCache = MessageBuffer()
l1_cntrl.unblockFromCache.master = ruby_system.network.slave
# Connect the buffers from the network to the controller
l1_cntrl.forwardToCache = MessageBuffer()
l1_cntrl.forwardToCache.slave = ruby_system.network.master
l1_cntrl.responseToCache = MessageBuffer()
l1_cntrl.responseToCache.slave = ruby_system.network.master
l1_cntrl.mandatoryQueue = MessageBuffer()
l1_cntrl.triggerQueue = MessageBuffer()
#
# Create controller for the copy engine to connect to in GPU cluster
# Cache is unused by controller
#
cache = L1Cache(size = "4096B", assoc = 2)
# Setting options.ce_buffering = 0 indicates that the CE can use infinite
# buffering, but we need to specify a finite number of outstandng accesses
# that the CE is allowed to issue. Just set it to some large number greater
# than normal memory access latencies to ensure that the sequencer could
# service one access per cycle.
max_out_reqs = options.ce_buffering
if max_out_reqs == 0:
max_out_reqs = 1024
gpu_ce_seq = RubySequencer(version = options.num_cpus + options.num_sc+1,
icache = cache,
dcache = cache,
max_outstanding_requests = max_out_reqs,
support_inst_reqs = False,
ruby_system = ruby_system)
#connect_to_io = False)
gpu_ce_cntrl = GPUCopyDMA_Controller(version = 0,
sequencer = gpu_ce_seq,
transitions_per_cycle = options.ports,
number_of_TBEs = max_out_reqs,
ruby_system = ruby_system)
gpu_ce_cntrl.responseFromDir = MessageBuffer(ordered = True)
gpu_ce_cntrl.responseFromDir.slave = ruby_system.network.master
gpu_ce_cntrl.reqToDirectory = MessageBuffer(ordered = True)
gpu_ce_cntrl.reqToDirectory.master = ruby_system.network.slave
gpu_ce_cntrl.mandatoryQueue = MessageBuffer()
ruby_system.ce_cntrl = gpu_ce_cntrl
all_sequencers.append(gpu_ce_seq)
# To limit the copy engine's bandwidth, we add it to a limited bandwidth
# cluster. Approximate settings are as follows (assuming 2GHz Ruby clock):
# PCIe v1.x x16 effective bandwidth ~= 4GB/s: intBW = 3, extBW = 3
# PCIe v2.x x16 effective bandwidth ~= 8GB/s: intBW = 5, extBW = 5
# PCIe v3.x x16 effective bandwidth ~= 16GB/s: intBW = 10, extBW = 10
# PCIe v4.x x16 effective bandwidth ~= 32GB/s: intBW = 21, extBW = 21
# NOTE: Bandwidth may bottleneck at other parts of the memory hierarchy,
# so bandwidth considerations should be made in other parts of the memory
# hierarchy also.
gpu_ce_cluster = Cluster(intBW = 10, extBW = 10)
gpu_ce_cluster.add(gpu_ce_cntrl)
complete_cluster = Cluster(intBW = 32, extBW = 32)
complete_cluster.add(gpu_ce_cluster)
complete_cluster.add(cpu_cluster)
complete_cluster.add(gpu_cluster)
for cntrl in dir_cntrls:
complete_cluster.add(cntrl)
for cntrl in dma_cntrls:
complete_cluster.add(cntrl)
for cluster in l2_clusters:
complete_cluster.add(cluster)
return (all_sequencers, dir_cntrls, complete_cluster)
| 46.565097 | 88 | 0.612731 | 60 | 0.003569 | 0 | 0 | 0 | 0 | 0 | 0 | 5,642 | 0.335634 |
0f602012992f3815e49879e39ae8eeb897b0b859 | 11,102 | py | Python | testing/test_awswrangler/test_athena.py | stijndehaes/aws-data-wrangler | 4c95e87d2850ce028c00525c73567c77f83001c5 | [
"Apache-2.0"
] | null | null | null | testing/test_awswrangler/test_athena.py | stijndehaes/aws-data-wrangler | 4c95e87d2850ce028c00525c73567c77f83001c5 | [
"Apache-2.0"
] | null | null | null | testing/test_awswrangler/test_athena.py | stijndehaes/aws-data-wrangler | 4c95e87d2850ce028c00525c73567c77f83001c5 | [
"Apache-2.0"
] | 1 | 2019-09-19T13:09:42.000Z | 2019-09-19T13:09:42.000Z | import logging
import pytest
import boto3
from awswrangler import Session
from awswrangler.exceptions import QueryCancelled, QueryFailed
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s][%(levelname)s][%(name)s][%(funcName)s] %(message)s")
logging.getLogger("awswrangler").setLevel(logging.DEBUG)
@pytest.fixture(scope="module")
def cloudformation_outputs():
response = boto3.client("cloudformation").describe_stacks(
StackName="aws-data-wrangler-test-arena")
outputs = {}
for output in response.get("Stacks")[0].get("Outputs"):
outputs[output.get("OutputKey")] = output.get("OutputValue")
yield outputs
@pytest.fixture(scope="module")
def session():
yield Session()
@pytest.fixture(scope="module")
def database(cloudformation_outputs):
if "GlueDatabaseName" in cloudformation_outputs:
database = cloudformation_outputs["GlueDatabaseName"]
else:
raise Exception(
"You must deploy the test infrastructure using Cloudformation!")
yield database
def test_query_cancelled(session, database):
client_athena = boto3.client("athena")
query_execution_id = session.athena.run_query(query="""
SELECT
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(),
rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand(), rand()
""",
database=database)
client_athena.stop_query_execution(QueryExecutionId=query_execution_id)
with pytest.raises(QueryCancelled):
assert session.athena.wait_query(query_execution_id=query_execution_id)
def test_query_failed(session, database):
query_execution_id = session.athena.run_query(query="SELECT random(-1)",
database=database)
with pytest.raises(QueryFailed):
assert session.athena.wait_query(query_execution_id=query_execution_id)
| 72.562092 | 95 | 0.532246 | 0 | 0 | 623 | 0.056116 | 719 | 0.064763 | 0 | 0 | 9,649 | 0.869123 |
0f634577290308b8a34cd2796dad52f018337f67 | 112 | py | Python | hello_world.py | hmallen/mrhawaii | 4b973c185e9acf030342435204480467d189f013 | [
"MIT"
] | null | null | null | hello_world.py | hmallen/mrhawaii | 4b973c185e9acf030342435204480467d189f013 | [
"MIT"
] | 1 | 2020-07-30T01:46:09.000Z | 2020-07-30T01:46:09.000Z | hello_world.py | hmallen/mrhawaii | 4b973c185e9acf030342435204480467d189f013 | [
"MIT"
] | null | null | null | # Hello world program to demonstrate running PYthon files
print('Hello, world!')
print('I live on a volcano!')
| 22.4 | 57 | 0.741071 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 94 | 0.839286 |
0f63dcf544f98cf5aa08276887974023d5184846 | 161 | py | Python | lib/JumpScale/lib/ms1/__init__.py | rudecs/jumpscale_core7 | 30c03f26f1cdad3edbb9d79d50fbada8acc974f5 | [
"Apache-2.0"
] | null | null | null | lib/JumpScale/lib/ms1/__init__.py | rudecs/jumpscale_core7 | 30c03f26f1cdad3edbb9d79d50fbada8acc974f5 | [
"Apache-2.0"
] | 4 | 2016-08-25T12:08:39.000Z | 2018-04-12T12:36:01.000Z | lib/JumpScale/lib/ms1/__init__.py | rudecs/jumpscale_core7 | 30c03f26f1cdad3edbb9d79d50fbada8acc974f5 | [
"Apache-2.0"
] | 3 | 2016-03-08T07:49:34.000Z | 2018-10-19T13:56:43.000Z | from JumpScale import j
def cb():
from .ms1 import MS1Factory
return MS1Factory()
j.base.loader.makeAvailable(j, 'tools')
j.tools._register('ms1', cb)
| 17.888889 | 39 | 0.708075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.074534 |
0f641b2b38f0d89181b82feb7d73b51398cf8e53 | 1,052 | py | Python | erpnext/utilities/__init__.py | nagendrarawat/erpnext_custom | 1b94ecc3e66eae402347c302cd1663b690fb1ade | [
"MIT"
] | 2 | 2019-10-01T13:07:39.000Z | 2019-10-03T03:52:19.000Z | erpnext/utilities/__init__.py | nagendrarawat/erpnext_custom | 1b94ecc3e66eae402347c302cd1663b690fb1ade | [
"MIT"
] | null | null | null | erpnext/utilities/__init__.py | nagendrarawat/erpnext_custom | 1b94ecc3e66eae402347c302cd1663b690fb1ade | [
"MIT"
] | 3 | 2019-09-30T19:17:44.000Z | 2019-10-23T18:59:12.000Z | ## temp utility
from __future__ import print_function
import frappe
from erpnext.utilities.activation import get_level
from frappe.utils import cstr
def update_doctypes():
for d in frappe.db.sql("""select df.parent, df.fieldname
from tabDocField df, tabDocType dt where df.fieldname
like "%description%" and df.parent = dt.name and dt.istable = 1""", as_dict=1):
dt = frappe.get_doc("DocType", d.parent)
for f in dt.fields:
if f.fieldname == d.fieldname and f.fieldtype in ("Text", "Small Text"):
print(f.parent, f.fieldname)
f.fieldtype = "Text Editor"
dt.save()
break
def get_site_info(site_info):
# called via hook
company = frappe.db.get_single_value('Global Defaults', 'default_company')
domain = None
if not company:
company = frappe.db.sql('select name from `tabCompany` order by creation asc')
company = company[0][0] if company else None
if company:
domain = frappe.db.get_value('Company', cstr(company), 'domain')
return {
'company': company,
'domain': domain,
'activation': get_level()
}
| 28.432432 | 81 | 0.713878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 363 | 0.345057 |
0f6569a4372117c916cf30a866126442a8d5e66b | 6,742 | py | Python | test/workers/test_websocket_worker.py | theoptips/PySyft | 4b68c3c6fbe0c18cdf87dfe6ddc3c2071a71f1cc | [
"Apache-2.0"
] | 1 | 2019-07-23T20:36:18.000Z | 2019-07-23T20:36:18.000Z | test/workers/test_websocket_worker.py | theoptips/PySyft | 4b68c3c6fbe0c18cdf87dfe6ddc3c2071a71f1cc | [
"Apache-2.0"
] | null | null | null | test/workers/test_websocket_worker.py | theoptips/PySyft | 4b68c3c6fbe0c18cdf87dfe6ddc3c2071a71f1cc | [
"Apache-2.0"
] | 1 | 2021-02-12T12:11:44.000Z | 2021-02-12T12:11:44.000Z | import io
from os.path import exists, join
import time
from socket import gethostname
from OpenSSL import crypto, SSL
import pytest
import torch
from syft.workers import WebsocketClientWorker
from syft.workers import WebsocketServerWorker
@pytest.mark.parametrize("secure", [True, False])
def test_websocket_worker_basic(hook, start_proc, secure, tmpdir):
"""Evaluates that you can do basic tensor operations using
WebsocketServerWorker in insecure and secure mode."""
def create_self_signed_cert(cert_path, key_path):
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 1024)
# create a self-signed cert
cert = crypto.X509()
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(1000)
cert.set_pubkey(k)
cert.sign(k, "sha1")
# store keys and cert
open(cert_path, "wb").write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
open(key_path, "wb").write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k))
kwargs = {
"id": "secure_fed" if secure else "fed",
"host": "localhost",
"port": 8766 if secure else 8765,
"hook": hook,
}
if secure:
# Create cert and keys
cert_path = tmpdir.join("test.crt")
key_path = tmpdir.join("test.key")
create_self_signed_cert(cert_path, key_path)
kwargs["cert_path"] = cert_path
kwargs["key_path"] = key_path
process_remote_worker = start_proc(WebsocketServerWorker, **kwargs)
time.sleep(0.1)
x = torch.ones(5)
if secure:
# unused args
del kwargs["cert_path"]
del kwargs["key_path"]
kwargs["secure"] = secure
local_worker = WebsocketClientWorker(**kwargs)
x = x.send(local_worker)
y = x + x
y = y.get()
assert (y == torch.ones(5) * 2).all()
del x
local_worker.ws.shutdown()
time.sleep(0.1)
local_worker.remove_worker_from_local_worker_registry()
process_remote_worker.terminate()
def test_websocket_workers_search(hook, start_proc):
"""Evaluates that a client can search and find tensors that belong
to another party"""
# Sample tensor to store on the server
sample_data = torch.tensor([1, 2, 3, 4]).tag("#sample_data", "#another_tag")
# Args for initializing the websocket server and client
base_kwargs = {"id": "fed2", "host": "localhost", "port": 8767, "hook": hook}
server_kwargs = base_kwargs
server_kwargs["data"] = [sample_data]
process_remote_worker = start_proc(WebsocketServerWorker, **server_kwargs)
time.sleep(0.1)
local_worker = WebsocketClientWorker(**base_kwargs)
# Search for the tensor located on the server by using its tag
results = local_worker.search("#sample_data", "#another_tag")
assert results
assert results[0].owner.id == "me"
assert results[0].location.id == "fed2"
# Search multiple times should still work
results = local_worker.search("#sample_data", "#another_tag")
assert results
assert results[0].owner.id == "me"
assert results[0].location.id == "fed2"
local_worker.ws.shutdown()
local_worker.ws.close()
time.sleep(0.1)
local_worker.remove_worker_from_local_worker_registry()
process_remote_worker.terminate()
def test_list_objects_remote(hook, start_proc):
kwargs = {"id": "fed", "host": "localhost", "port": 8765, "hook": hook}
process_remote_fed1 = start_proc(WebsocketServerWorker, **kwargs)
time.sleep(0.1)
kwargs = {"id": "fed", "host": "localhost", "port": 8765, "hook": hook}
local_worker = WebsocketClientWorker(**kwargs)
x = torch.tensor([1, 2, 3]).send(local_worker)
res = local_worker.list_objects_remote()
res_dict = eval(res.replace("tensor", "torch.tensor"))
assert len(res_dict) == 1
y = torch.tensor([4, 5, 6]).send(local_worker)
res = local_worker.list_objects_remote()
res_dict = eval(res.replace("tensor", "torch.tensor"))
assert len(res_dict) == 2
# delete x before terminating the websocket connection
del x
del y
time.sleep(0.1)
local_worker.ws.shutdown()
time.sleep(0.1)
local_worker.remove_worker_from_local_worker_registry()
process_remote_fed1.terminate()
def test_objects_count_remote(hook, start_proc):
kwargs = {"id": "fed", "host": "localhost", "port": 8764, "hook": hook}
process_remote_worker = start_proc(WebsocketServerWorker, **kwargs)
time.sleep(0.1)
kwargs = {"id": "fed", "host": "localhost", "port": 8764, "hook": hook}
local_worker = WebsocketClientWorker(**kwargs)
x = torch.tensor([1, 2, 3]).send(local_worker)
nr_objects = local_worker.objects_count_remote()
assert nr_objects == 1
y = torch.tensor([4, 5, 6]).send(local_worker)
nr_objects = local_worker.objects_count_remote()
assert nr_objects == 2
x.get()
nr_objects = local_worker.objects_count_remote()
assert nr_objects == 1
# delete remote object before terminating the websocket connection
del y
time.sleep(0.1)
local_worker.ws.shutdown()
time.sleep(0.1)
local_worker.remove_worker_from_local_worker_registry()
process_remote_worker.terminate()
def test_connect_close(hook, start_proc):
kwargs = {"id": "fed", "host": "localhost", "port": 8763, "hook": hook}
process_remote_worker = start_proc(WebsocketServerWorker, **kwargs)
time.sleep(0.1)
kwargs = {"id": "fed", "host": "localhost", "port": 8763, "hook": hook}
local_worker = WebsocketClientWorker(**kwargs)
x = torch.tensor([1, 2, 3])
x_ptr = x.send(local_worker)
assert local_worker.objects_count_remote() == 1
local_worker.close()
time.sleep(0.1)
local_worker.connect()
assert local_worker.objects_count_remote() == 1
x_val = x_ptr.get()
assert (x_val == x).all()
local_worker.ws.shutdown()
time.sleep(0.1)
process_remote_worker.terminate()
def test_websocket_worker_multiple_output_response(hook, start_proc):
"""Evaluates that you can do basic tensor operations using
WebsocketServerWorker"""
kwargs = {"id": "socket_multiple_output", "host": "localhost", "port": 8768, "hook": hook}
process_remote_worker = start_proc(WebsocketServerWorker, **kwargs)
time.sleep(0.1)
x = torch.tensor([1.0, 3, 2])
local_worker = WebsocketClientWorker(**kwargs)
x = x.send(local_worker)
p1, p2 = torch.sort(x)
x1, x2 = p1.get(), p2.get()
assert (x1 == torch.tensor([1.0, 2, 3])).all()
assert (x2 == torch.tensor([0, 2, 1])).all()
x.get() # retrieve remote object before closing the websocket connection
local_worker.ws.shutdown()
process_remote_worker.terminate()
| 29.313043 | 94 | 0.673391 | 0 | 0 | 0 | 0 | 1,784 | 0.26461 | 0 | 0 | 1,395 | 0.206912 |
0f657e59d927029cc829b093d74f43fd3c3ab0fd | 13,195 | py | Python | PiCN/Simulations/BalancedForwardingStrategySimulation.py | DimaMansour/PiCN | 90ced1cde2a8fd457e873e8bbad1fd7c21bbe56b | [
"BSD-3-Clause"
] | null | null | null | PiCN/Simulations/BalancedForwardingStrategySimulation.py | DimaMansour/PiCN | 90ced1cde2a8fd457e873e8bbad1fd7c21bbe56b | [
"BSD-3-Clause"
] | null | null | null | PiCN/Simulations/BalancedForwardingStrategySimulation.py | DimaMansour/PiCN | 90ced1cde2a8fd457e873e8bbad1fd7c21bbe56b | [
"BSD-3-Clause"
] | null | null | null | """Simulate a Map Reduce Scenario where timeout prevention is required.
In this simulation we are using an Optimizer created for map reduce scenarios.
This improves the distribution of the computation no matter how the interest is formated.
Scenario consists of two NFN nodes and a Client. Goal of the simulation is to add en
Client <--------> NFN0 <-*-----------> NFN1 <-----------> Repo1
\-----------> NFN2 <-----------> Repo2
\-----------> NFN3 <-----------> Repo3
\-----------> NFN4 <-----------> Repo4
"""
import abc
import queue
import unittest
import os
import shutil
import time
import _thread
import threading
from PiCN.Layers.LinkLayer.Interfaces import SimulationBus
from PiCN.Layers.LinkLayer.Interfaces import AddressInfo
from PiCN.Layers.NFNLayer.NFNOptimizer import MapReduceOptimizer
from PiCN.ProgramLibs.Fetch import Fetch
from PiCN.ProgramLibs.NFNForwarder import NFNForwarder
from PiCN.ProgramLibs.ICNForwarder import ICNForwarder
from PiCN.ProgramLibs.ICNDataRepository import ICNDataRepository
from PiCN.Layers.PacketEncodingLayer.Encoder import BasicEncoder, SimpleStringEncoder, NdnTlvEncoder
from PiCN.Packets import Content, Interest, Name
from PiCN.Mgmt import MgmtClient
class Fs_thread(threading.Thread):
def __init__(self, name: Name, fetch_tool):
threading.Thread.__init__(self)
self.name = name
self.fetch_tool = fetch_tool
def run(self):
self.request_function()
def request_function(self):
self.fetch_tool.fetch_data(self.name,timeout= 10)
class Initiation(unittest.TestCase):
"""Simulate a Map Reduce Scenario where timeout prevention is required."""
@abc.abstractmethod
def get_encoder(self) -> BasicEncoder:
return SimpleStringEncoder
def setUp(self):
self.encoder_type = self.get_encoder()
self.simulation_bus = SimulationBus(packetencoder=self.encoder_type())
self.fetch_tool1 = Fetch("distributor", None, 255, self.encoder_type(), interfaces=[self.simulation_bus.add_interface("fetchtool1")])
self.distributor = NFNForwarder(port=0, encoder=self.encoder_type(),
interfaces=[self.simulation_bus.add_interface("distributor")], log_level=255,
ageing_interval=3)
self.nfn1 = NFNForwarder(port=0, encoder=self.encoder_type(),
interfaces=[self.simulation_bus.add_interface("nfn1")], log_level=255,
ageing_interval=3)
self.nfn2 = NFNForwarder(port=0, encoder=self.encoder_type(),
interfaces=[self.simulation_bus.add_interface("nfn2")], log_level=255,
ageing_interval=3)
self.nfn3 = NFNForwarder(port=0, encoder=self.encoder_type(),
interfaces=[self.simulation_bus.add_interface("nfn3")], log_level=255,
ageing_interval=3)
self.nfn4 = NFNForwarder(port=0, encoder=self.encoder_type(),
interfaces=[self.simulation_bus.add_interface("nfn4")], log_level=255,
ageing_interval=3)
self.repo1 = ICNDataRepository("/tmp/repo1", Name("/repo/r1"), 0, 255, self.encoder_type(), False, False,
[self.simulation_bus.add_interface("repo1")])
self.repo2 = ICNDataRepository("/tmp/repo2", Name("/repo/r2"), 0, 255, self.encoder_type(), False, False,
[self.simulation_bus.add_interface("repo2")])
self.repo3 = ICNDataRepository("/tmp/repo3", Name("/repo/r3"), 0, 255, self.encoder_type(), False, False,
[self.simulation_bus.add_interface("repo3")])
self.repo4 = ICNDataRepository("/tmp/repo4", Name("/repo/r4"), 0, 255, self.encoder_type(), False, False,
[self.simulation_bus.add_interface("repo4")])
self.nfn1.icnlayer.pit.set_pit_timeout(50)
self.nfn1.icnlayer.cs.set_cs_timeout(0)
self.nfn2.icnlayer.pit.set_pit_timeout(50)
self.nfn2.icnlayer.cs.set_cs_timeout(0)
self.nfn3.icnlayer.pit.set_pit_timeout(50)
self.nfn3.icnlayer.cs.set_cs_timeout(0)
self.nfn4.icnlayer.pit.set_pit_timeout(50)
self.nfn4.icnlayer.cs.set_cs_timeout(0)
self.mgmt_client0 = MgmtClient(self.distributor.mgmt.mgmt_sock.getsockname()[1])
self.mgmt_client1 = MgmtClient(self.nfn1.mgmt.mgmt_sock.getsockname()[1])
self.mgmt_client2 = MgmtClient(self.nfn2.mgmt.mgmt_sock.getsockname()[1])
self.mgmt_client3 = MgmtClient(self.nfn3.mgmt.mgmt_sock.getsockname()[1])
self.mgmt_client4 = MgmtClient(self.nfn4.mgmt.mgmt_sock.getsockname()[1])
def tearDown(self):
self.distributor.stop_forwarder()
self.nfn1.stop_forwarder()
self.nfn2.stop_forwarder()
self.nfn3.stop_forwarder()
self.nfn4.stop_forwarder()
self.repo1.stop_repo()
self.repo2.stop_repo()
self.repo3.stop_repo()
self.repo4.stop_repo()
self.fetch_tool1.stop_fetch()
self.simulation_bus.stop_process()
self.tearDown_repo()
def setup_faces_and_connections(self):
self.distributor.start_forwarder()
self.nfn1.start_forwarder()
self.nfn2.start_forwarder()
self.nfn3.start_forwarder()
self.nfn4.start_forwarder()
self.repo1.start_repo()
self.repo2.start_repo()
self.repo3.start_repo()
self.repo4.start_repo()
self.simulation_bus.start_process()
time.sleep(3)
# setup forwarding rules
self.mgmt_client0.add_face("nfn1", None, 0)
self.mgmt_client0.add_forwarding_rule(Name("/lib"), [0])
self.mgmt_client0.add_face("nfn2", None, 0)
self.mgmt_client0.add_forwarding_rule(Name("/lib"), [1])
self.mgmt_client0.add_face("nfn3", None, 0)
self.mgmt_client0.add_forwarding_rule(Name("/lib"), [2])
self.mgmt_client0.add_face("nfn4", None, 0)
self.mgmt_client0.add_forwarding_rule(Name("/lib4/func4"), [3])
self.mgmt_client1.add_face("repo1", None, 0)
self.mgmt_client1.add_forwarding_rule(Name("/repo/r1"), [0])
self.mgmt_client2.add_face("repo2", None, 0)
self.mgmt_client2.add_forwarding_rule(Name("/repo/r2"), [0])
self.mgmt_client3.add_face("repo3", None, 0)
self.mgmt_client3.add_forwarding_rule(Name("/repo/r3"), [0])
self.mgmt_client4.add_face("repo4", None, 0)
self.mgmt_client4.add_forwarding_rule(Name("/repo/r4"), [0])
self.mgmt_client1.add_face("nfn0", None, 0)
self.mgmt_client1.add_forwarding_rule(Name("/lib"), [1])
self.mgmt_client2.add_face("nfn0", None, 0)
self.mgmt_client2.add_forwarding_rule(Name("/lib"), [1])
self.mgmt_client3.add_face("nfn0", None, 0)
self.mgmt_client3.add_forwarding_rule(Name("/lib"), [1])
self.mgmt_client4.add_face("nfn0", None, 0)
self.mgmt_client4.add_forwarding_rule(Name("/lib4/func4"), [1])
#setup function code
#self.mgmt_client1.add_new_content(Name("/lib/func1"),"PYTHON\nf\ndef f(n):\n return n")
self.mgmt_client1.add_new_content(Name("/lib/func1"), "PYTHON\nf\ndef f():\n result =[]\n x,y =0,1\n while x<n:\n result.append(x)\n x,y = y, y+x\n return result")
self.mgmt_client2.add_new_content(Name("/lib/func1"), "PYTHON\nf\ndef f(n):\n result =[]\n x,y =0,1\n while x<n:\n result.append(x)\n x,y = y, y+x\n return result")
# self.mgmt_client2.add_new_content(Name("/lib/func2"),"func2")
self.mgmt_client3.add_new_content(Name("/lib/func1"), "PYTHON\nf\ndef f(n):\n result =[]\n x,y =0,1\n while x<n:\n result.append(x)\n x,y = y, y+x\n return result")
self.mgmt_client4.add_new_content(Name("/lib4/func4"),"func4")
# self.mgmt_client1.add_new_content(Name("/lib/func1"),
# "PYTHON\nf\ndef f():\n for i in range(0,100000000):\n a.upper()\n return a.upper()")
# self.mgmt_client2.add_new_content(Name("/lib/func2"),
# "PYTHON\nf\ndef f(a):\n for i in range(0,100000000):\n a.upper()\n return a.upper()")
# self.mgmt_client3.add_new_content(Name("/lib/func3"),
# "PYTHON\nf\ndef f(a):\n for i in range(0,100000000):\n a.upper()\n return a.upper()")
# self.mgmt_client4.add_new_content(Name("/lib/func4"),
# "PYTHON\nf\ndef f(a):\n for i in range(0,100000000):\n a.upper()\n return a.upper()")
#
def setup_repo(self):
for i in range(1,5):
self.path = "/tmp/repo" + str(i)
try:
os.stat(self.path)
except:
os.mkdir(self.path)
with open(self.path + "/data" + str(i), 'w+') as content_file:
content_file.write("data" + str(i))
def tearDown_repo(self):
try:
shutil.rmtree(self.path)
os.remove("/tmp/repo")
except:
pass
def test_simple_Fs(self):
self.setup_repo()
self.setup_faces_and_connections()
name1 = Name("/lib/func1")
name1 += '_()'
name1 += "NFN"
name2 = Name("/lib/func1")
name2 += '_(500000)'
name2 += "NFN"
name3 = Name("/lib/func1")
name3 += '_(5000)'
name3 += "NFN"
name4 = Name("/lib/func1")
name4 += '_(900000000000000000)'
name4 += "NFN"
name5 = Name("/lib/func1")
name5 += '_(68899455874)'
name5 += "NFN"
t1= Fs_thread(name1, fetch_tool= self.fetch_tool1)
t2= Fs_thread(name2, fetch_tool= self.fetch_tool1)
t3= Fs_thread(name3, fetch_tool= self.fetch_tool1)
t4= Fs_thread(name4, fetch_tool= self.fetch_tool1)
t5= Fs_thread(name5, fetch_tool= self.fetch_tool1)
t1.start()
t2.start()
t3.start()
t4.start()
t5.start()
t1.join()
t2.join()
t3.join()
t4.join()
t5.join()
# def first_request(self):
# name1 = Name("/lib/func1")
# name1 += '_(100000000000000000000000000)'
# name1 += "NFN"
# self.fetch_tool1.fetch_data(name1, timeout=10)
#
# def second_request(self):
# name1 = Name("/lib/func1")
# name1 += '_(5)'
# name1 += "NFN"
# self.fetch_tool1.fetch_data(name1, timeout=10)
#
# def third_request(self):
# name1 = Name("/lib/func1")
# name1 += '_(1000000)'
# name1 += "NFN"
# self.fetch_tool1.fetch_data(name1, timeout=10)
#
# def test_simple_Fs(self):
# """Simple FS test"""
# self.setup_repo()
# self.setup_faces_and_connections()
# t1= threading.Thread(self.first_request())
# t2= threading.Thread(self.second_request())
# t3= threading.Thread(self.third_request())
# t1.start()
# t2.start()
# t3.start()
# t1 = threading.Thread(self.first_request())
# t2 = threading.Thread(self.second_request())
# t3 = threading.Thread(self.third_request())
# t1.start()
# t2.start()
# t3.start()
#
# name2= Name("/lib/func1")
# name2 += '_(5)'
# name2 += "NFN"
#
# res1 = self.
# res2 = self.fetch_tool1.fetch_data(name2, timeout=0)
# print(res1)
# print(res2)
# self.assertEqual("func1", res1)
# res2 = self.fetch_tool1.fetch_data(name2, timeout=0)
# time.sleep(3)
# print(res2)
# self.assertEqual("func2", res2)
# def test_simple_map_reduce_data_from_repo(self):
# """Simple map reduce test with input data from repo"""
# self.setup_repo()
# self.setup_faces_and_connections()
#
# name = Name("/lib/reduce4")
# name += '_(/lib/func1(/repo/r1/data1),/lib/func2(/repo/r2/data2),/lib/func3(/repo/r3/data3),/lib/func4(/repo/r4/data4))'
# name += "NFN"
#
# res = self.fetch_tool1.fetch_data(name, timeout=0)
# time.sleep(3)
# print(res)
# self.assertEqual("DATA1DATA2DATA3DATA4", res)
#
#
# def test_simple_map_reduce_data_from_repo_to_data(self):
# """Simple map reduce test with input data from repo forwarding to data"""
# self.setup_repo()
# self.setup_faces_and_connections()
#
# name = Name("/repo/r1/data1")
# name += '/lib/reduce4(/lib/func1(_),/lib/func2(/repo/r2/data2),/lib/func3(/repo/r3/data3),/lib/func4(/repo/r4/data4))'
# name += "NFN"
#
# res = self.fetch_tool1.fetch_data(name, timeout=0)
# time.sleep(3)
# print(res)
# self.assertEqual("DATA1DATA2DATA3DATA4", res) | 42.022293 | 182 | 0.593103 | 11,912 | 0.902766 | 0 | 0 | 97 | 0.007351 | 0 | 0 | 4,909 | 0.372035 |
0f6811584c35d57112dde4d376d40b9b051263d0 | 48 | py | Python | Tools/MagicPanels/panelMoveXp.py | dprojects/Woodworking | 24420b248e3343a387ae1328fc6dcbf97e433242 | [
"MIT"
] | 6 | 2022-02-25T19:11:40.000Z | 2022-03-24T22:03:47.000Z | Tools/MagicPanels/panelMoveXp.py | dprojects/Woodworking | 24420b248e3343a387ae1328fc6dcbf97e433242 | [
"MIT"
] | 1 | 2022-03-13T09:35:22.000Z | 2022-03-13T13:30:36.000Z | Tools/MagicPanels/panelMoveXp.py | dprojects/Woodworking | 24420b248e3343a387ae1328fc6dcbf97e433242 | [
"MIT"
] | 3 | 2022-02-26T15:01:08.000Z | 2022-03-20T21:30:04.000Z | import MagicPanels
MagicPanels.panelMove("Xp")
| 12 | 27 | 0.8125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.083333 |
0f686508655f6cb5a008cef527be098e9ada85a3 | 1,399 | py | Python | app/api/v2/models/user_model.py | MbuguaCaleb/Questioner-V2-API | 9e3a5593250a12b74ad5dcbe220827040fa3d676 | [
"MIT"
] | null | null | null | app/api/v2/models/user_model.py | MbuguaCaleb/Questioner-V2-API | 9e3a5593250a12b74ad5dcbe220827040fa3d676 | [
"MIT"
] | 1 | 2019-01-19T13:10:02.000Z | 2019-01-19T13:10:02.000Z | app/api/v2/models/user_model.py | MbuguaCaleb/Questioner-V2-API | 9e3a5593250a12b74ad5dcbe220827040fa3d676 | [
"MIT"
] | null | null | null | from ....db_conn import initialize_db
from psycopg2.extras import RealDictCursor
from werkzeug.security import generate_password_hash,check_password_hash
con = initialize_db()
cur = con.cursor(cursor_factory=RealDictCursor)
class User(object):
table = 'users'
""" Model class for the user object """
table ='users'
def save(self, data=None):
query = """
INSERT INTO users (firstname, lastname, email, phone_number, username, password) VALUES (%(firstname)s, %(lastname)s, %(email)s, %(phone_number)s, %(username)s, %(password)s);"""
cur.execute(query, data)
con.commit()
return data
def exists(self, key, value):
""" Function to check if user exists """
query = "SELECT * FROM {} WHERE {} = '{}'".format(
self.table, key, value)
cur.execute(query)
result = cur.fetchall()
return len(result) > 0
def find(self, key, value):
"""it checks whether the user exists from the DB"""
query = "SELECT * FROM {} WHERE {} = '{}'".format(
self.table, key, value)
cur.execute(query)
result = cur.fetchone()
return result
@staticmethod
def checkpassword(hashed_password, password):
""" Function to check if passwords match """
return check_password_hash(hashed_password, password)
| 25.436364 | 186 | 0.609721 | 1,170 | 0.836312 | 0 | 0 | 179 | 0.127949 | 0 | 0 | 446 | 0.318799 |
0f68bd486acd6f559b6e55931f138347c5551170 | 2,654 | py | Python | Python/17.letter-combinations-of-a-phone-number.py | Dxyk/LeetCode | e64b405f40b4e3c0f14c617897b775699dd46872 | [
"MIT"
] | null | null | null | Python/17.letter-combinations-of-a-phone-number.py | Dxyk/LeetCode | e64b405f40b4e3c0f14c617897b775699dd46872 | [
"MIT"
] | null | null | null | Python/17.letter-combinations-of-a-phone-number.py | Dxyk/LeetCode | e64b405f40b4e3c0f14c617897b775699dd46872 | [
"MIT"
] | null | null | null | from typing import Dict, List
class Solution:
DIGIT_TO_LETTER: Dict[str, List[str]] = {
"1": [],
"2": ["a", "b", "c"],
"3": ["d", "e", "f"],
"4": ["g", "h", "i"],
"5": ["j", "k", "l"],
"6": ["m", "n", "o"],
"7": ["p", "q", "r", "s"],
"8": ["t", "u", "v"],
"9": ["w", "x", "y", "z"],
"0": []
}
def letterCombinations(self, digits: str) -> List[str]:
"""
Given a string containing digits from 2-9 inclusive, return all possible letter combinations that the number could represent.
A mapping of digit to letters (just like on the telephone buttons) is given below. Note that 1 does not map to any letters.
See [http://upload.wikimedia.org/wikipedia/commons/thumb/7/73/Telephone-keypad2.svg/200px-Telephone-keypad2.svg.png] for image
Note: Although the above answer is in lexicographical order, your answer could be in any order you want.
>>> s = Solution().letterCombinations("23")
>>> s.sort()
>>> s
['ad', 'ae', 'af', 'bd', 'be', 'bf', 'cd', 'ce', 'cf']
:param digits: the input digits
:return: all possible letter combinations
"""
if not digits.isdigit() or len(digits) == 0:
return []
return self.standard_soln(digits)
def standard_soln(self, digits: str) -> List[str]:
"""
T: O(n)
use of recursion
:param digits: the input digits
:return: all possible letter combinations
"""
# base case
if len(digits) == 0:
return []
if len(digits) == 1:
return Solution.DIGIT_TO_LETTER[digits[0]]
# recursive step
prev = self.standard_soln(digits[: -1])
new = Solution.DIGIT_TO_LETTER[digits[-1]]
return [p + n for n in new for p in prev]
def my_soln(self, digits: str) -> List[str]:
"""
T: O(n)
:param digits: the input digits
:return: all possible letter combinations
"""
result = []
for i in range(len(digits)):
digit = digits[i]
if i == 0:
result = Solution.DIGIT_TO_LETTER[digit][:]
else:
new_result = []
while len(result) != 0:
prev_combination = result.pop(0)
new_result += [prev_combination + letter for letter in Solution.DIGIT_TO_LETTER[digit]]
result = new_result[:]
return result
def main():
print(Solution().letterCombinations("23"))
if __name__ == "__main__":
main()
| 29.488889 | 134 | 0.520723 | 2,520 | 0.94951 | 0 | 0 | 0 | 0 | 0 | 0 | 1,193 | 0.44951 |
0f69cabd8fe171f0dca1ced002ed424271bae43e | 696 | py | Python | django-backend/mission/settings_dev_dummy.py | isystematics/SoarCast | 6ba8d7db76e2ba1ad5fd1ab3636dd202b11450b0 | [
"Apache-2.0"
] | 2 | 2021-06-08T16:35:30.000Z | 2022-03-18T16:04:48.000Z | django-backend/mission/settings_dev_dummy.py | isystematics/SoarCast | 6ba8d7db76e2ba1ad5fd1ab3636dd202b11450b0 | [
"Apache-2.0"
] | 4 | 2022-02-22T18:43:07.000Z | 2022-02-22T18:45:25.000Z | django-backend/mission/settings_dev_dummy.py | isystematics/SoarCast | 6ba8d7db76e2ba1ad5fd1ab3636dd202b11450b0 | [
"Apache-2.0"
] | null | null | null | from .settings import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'mission',
'USER': 'postgres',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
DEBUG = True
VAULT_HOST = 'http://127.0.0.1:8200'
VAULT_ROOT_PATH = 'mission'
# VAULT_TOKEN = 's.xxxxxxxxxxxxx'
VAULT_TOKEN = ''
MODULES_PATH = '/Volumes/second/work/mission-control/modules/'
VAULT_VERIFY_CERTIFICATE = False
TOKEN_LIFE_TIME = 365
SIMPLE_JWT = {
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.SlidingToken',),
'SLIDING_TOKEN_LIFETIME': timedelta(days=365),
'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=1),
} | 26.769231 | 76 | 0.653736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 356 | 0.511494 |
0f69d3eab2dbb5cfe8989db4cb4da5f04dcd2474 | 1,653 | py | Python | src/moodlews/service.py | pystardust/Welearn-bot | 87a8c388201629c7ce94e6f9730a4ac7442b36f6 | [
"MIT"
] | null | null | null | src/moodlews/service.py | pystardust/Welearn-bot | 87a8c388201629c7ce94e6f9730a4ac7442b36f6 | [
"MIT"
] | null | null | null | src/moodlews/service.py | pystardust/Welearn-bot | 87a8c388201629c7ce94e6f9730a4ac7442b36f6 | [
"MIT"
] | null | null | null | from requests import Session
import urllib.parse
import json
class ServerFunctions:
SITE_INFO = "core_webservice_get_site_info"
ALL_COURSES = "core_course_get_courses_by_field"
USER_COURSES = "core_enrol_get_users_courses"
COURSE_CONTENTS = "core_course_get_contents"
ASSIGNMENTS = "mod_assign_get_assignments"
ASSIGNMENT_STATUS = "mod_assign_get_submission_status"
URLS = "mod_url_get_urls_by_courses"
RESOURCES = "mod_resource_get_resources_by_courses"
class MoodleClient:
def __init__(self, baseurl):
self.baseurl = baseurl
self.login_url = urllib.parse.urljoin(baseurl, "login/token.php")
self.server_url = urllib.parse.urljoin(baseurl, "webservice/rest/server.php")
self.session = Session()
self.token = ""
def response(self, url, **data):
return self.session.post(url, data)
def response_json(self, url, **data):
response = self.response(url, **data)
return json.loads(response.content)
def authenticate(self, username, password):
login = self.response_json(
self.login_url,
username=username,
password=password,
service="moodle_mobile_app",
)
try:
self.token = login["token"]
return self.token
except KeyError:
return False
def server(self, function, **data):
return self.response_json(
self.server_url,
wstoken=self.token,
moodlewsrestformat="json",
wsfunction=function,
**data
)
def close(self):
self.session.close()
| 30.611111 | 85 | 0.643678 | 1,588 | 0.960678 | 0 | 0 | 0 | 0 | 0 | 0 | 330 | 0.199637 |
0f69e78024609f6fbaf1f9eeb13bf6a51e75d7da | 6,319 | py | Python | agents/wann_agent.py | Miffyli/policy-supervectors | 358284805e5bc96b95cae15e9741571e46d84bc9 | [
"MIT"
] | 17 | 2020-12-02T15:27:03.000Z | 2022-01-14T06:42:14.000Z | agents/wann_agent.py | Miffyli/policy-supervectors | 358284805e5bc96b95cae15e9741571e46d84bc9 | [
"MIT"
] | null | null | null | agents/wann_agent.py | Miffyli/policy-supervectors | 358284805e5bc96b95cae15e9741571e46d84bc9 | [
"MIT"
] | null | null | null | import numpy as np
from gym import spaces
from agents import SimpleAgentClass
# Create agents for the CMA-ES, NEAT and WANN agents
# defined in the weight-agnostic paper repo:
# https://github.com/google/brain-tokyo-workshop/tree/master/WANNRelease/
# -------------------------------------------------------------------
# Here begins copy/paste from WANNRelease code linked above
def weightedRandom(weights):
"""Returns random index, with each choices chance weighted
Args:
weights - (np_array) - weighting of each choice
[N X 1]
Returns:
i - (int) - chosen index
"""
minVal = np.min(weights)
weights = weights - minVal # handle negative vals
cumVal = np.cumsum(weights)
pick = np.random.uniform(0, cumVal[-1])
for i in range(len(weights)):
if cumVal[i] >= pick:
return i
def selectAct(action, actSelect):
"""Selects action based on vector of actions
Single Action:
- Hard: a single action is chosen based on the highest index
- Prob: a single action is chosen probablistically with higher values
more likely to be chosen
We aren't selecting a single action:
- Softmax: a softmax normalized distribution of values is returned
- Default: all actions are returned
Args:
action - (np_array) - vector weighting each possible action
[N X 1]
Returns:
i - (int) or (np_array) - chosen index
[N X 1]
"""
if actSelect == 'softmax':
action = softmax(action)
elif actSelect == 'prob':
action = weightedRandom(np.sum(action,axis=0))
else:
action = action.flatten()
return action
def act(weights, aVec, nInput, nOutput, inPattern):
"""Returns FFANN output given a single input pattern
If the variable weights is a vector it is turned into a square weight matrix.
Allows the network to return the result of several samples at once if given a matrix instead of a vector of inputs:
Dim 0 : individual samples
Dim 1 : dimensionality of pattern (# of inputs)
Args:
weights - (np_array) - ordered weight matrix or vector
[N X N] or [N**2]
aVec - (np_array) - activation function of each node
[N X 1] - stored as ints (see applyAct in ann.py)
nInput - (int) - number of input nodes
nOutput - (int) - number of output nodes
inPattern - (np_array) - input activation
[1 X nInput] or [nSamples X nInput]
Returns:
output - (np_array) - output activation
[1 X nOutput] or [nSamples X nOutput]
"""
# Turn weight vector into weight matrix
if np.ndim(weights) < 2:
nNodes = int(np.sqrt(np.shape(weights)[0]))
wMat = np.reshape(weights, (nNodes, nNodes))
else:
nNodes = np.shape(weights)[0]
wMat = weights
wMat[np.isnan(wMat)]=0
# Vectorize input
if np.ndim(inPattern) > 1:
nSamples = np.shape(inPattern)[0]
else:
nSamples = 1
# Run input pattern through ANN
nodeAct = np.zeros((nSamples,nNodes))
nodeAct[:,0] = 1 # Bias activation
nodeAct[:,1:nInput+1] = inPattern
# Propagate signal through hidden to output nodes
iNode = nInput+1
for iNode in range(nInput+1,nNodes):
rawAct = np.dot(nodeAct, wMat[:,iNode]).squeeze()
nodeAct[:,iNode] = applyAct(aVec[iNode], rawAct)
#print(nodeAct)
output = nodeAct[:,-nOutput:]
return output
def applyAct(actId, x):
"""Returns value after an activation function is applied
Lookup table to allow activations to be stored in numpy arrays
case 1 -- Linear
case 2 -- Unsigned Step Function
case 3 -- Sin
case 4 -- Gausian with mean 0 and sigma 1
case 5 -- Hyperbolic Tangent [tanh] (signed)
case 6 -- Sigmoid unsigned [1 / (1 + exp(-x))]
case 7 -- Inverse
case 8 -- Absolute Value
case 9 -- Relu
case 10 -- Cosine
case 11 -- Squared
Args:
actId - (int) - key to look up table
x - (???) - value to be input into activation
[? X ?] - any type or dimensionality
Returns:
output - (float) - value after activation is applied
[? X ?] - same dimensionality as input
"""
if actId == 1: # Linear
value = x
if actId == 2: # Unsigned Step Function
value = 1.0*(x>0.0)
#value = (np.tanh(50*x/2.0) + 1.0)/2.0
elif actId == 3: # Sin
value = np.sin(np.pi*x)
elif actId == 4: # Gaussian with mean 0 and sigma 1
value = np.exp(-np.multiply(x, x) / 2.0)
elif actId == 5: # Hyperbolic Tangent (signed)
value = np.tanh(x)
elif actId == 6: # Sigmoid (unsigned)
value = (np.tanh(x/2.0) + 1.0)/2.0
elif actId == 7: # Inverse
value = -x
elif actId == 8: # Absolute Value
value = abs(x)
elif actId == 9: # Relu
value = np.maximum(0, x)
elif actId == 10: # Cosine
value = np.cos(np.pi*x)
elif actId == 11: # Squared
value = x**2
else:
value = x
return value
# End of copypaste
# -------------------------------------------------------------------
# This action is original to this repository
def create_wann_agent(agent_path, agent_type, env):
"""
Load and return a WANN agent.
The agent has a function `get_action` that takes in
an observation and returns an appropiate action.
"""
np_data = np.load(agent_path)
wMat = np_data["wMat"]
aVec = np_data["aVec"]
# TODO support for other input spaces?
nInput = env.observation_space.shape[0]
nOutput = 0
action_type = "all"
if isinstance(env.action_space, spaces.Box):
nOutput = env.action_space.shape[0]
elif isinstance(env.action_space, spaces.Discrete):
nOutput = env.action_space.n
action_type = "prob"
else:
raise ValueError("Unsupported action space")
def get_action(obs):
# Includes batch-size
output = act(wMat, aVec, nInput, nOutput, obs)
action = selectAct(output, action_type)
return action
agent = SimpleAgentClass(lambda obs: get_action(obs))
return agent
| 29.666667 | 119 | 0.592182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,592 | 0.568444 |
0f6ab9f6902561656b7e6574e6d06dd8dae26076 | 483 | py | Python | imagr_users/admin.py | sazlin/cfpydev-imagr | e34ac025e357694f40034ab1c02ed3be5294c2d8 | [
"MIT"
] | null | null | null | imagr_users/admin.py | sazlin/cfpydev-imagr | e34ac025e357694f40034ab1c02ed3be5294c2d8 | [
"MIT"
] | null | null | null | imagr_users/admin.py | sazlin/cfpydev-imagr | e34ac025e357694f40034ab1c02ed3be5294c2d8 | [
"MIT"
] | null | null | null | from django.contrib import admin
from models import ImagrUser, Relationship
# Register your models here.
class ImagrUserAdmin(admin.ModelAdmin):
fields = ('username',
'first_name',
'last_name',
'email',
)
search_fields = ['username',
'email',
'first_name',
'last_name']
admin.site.register(ImagrUser, ImagrUserAdmin)
admin.site.register(Relationship)
| 23 | 46 | 0.565217 | 293 | 0.606625 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.223602 |
0f6b058045d24130832a31ba7ff907e1cd47ac61 | 4,115 | py | Python | src/scripts/ia.py | BureauTech/BTAlert-AI | 945f6dfc10cdfa1ca99ff929136b40cadb40bc21 | [
"MIT"
] | 1 | 2022-03-17T20:27:27.000Z | 2022-03-17T20:27:27.000Z | src/scripts/ia.py | BureauTech/BTAlert-IA | e2d250bbaf363eb7f32dfee7e1a112326053bb57 | [
"MIT"
] | null | null | null | src/scripts/ia.py | BureauTech/BTAlert-IA | e2d250bbaf363eb7f32dfee7e1a112326053bb57 | [
"MIT"
] | null | null | null | import os
import warnings
from datetime import datetime, timedelta
from typing import Tuple
import matplotlib.pyplot as plt
import pandas as pd
from dotenv import load_dotenv
from prometheus_api_client import MetricSnapshotDataFrame, PrometheusConnect
from prometheus_api_client.utils import parse_datetime
from skforecast.ForecasterAutoreg import ForecasterAutoreg
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
plt.style.use('fivethirtyeight')
plt.rcParams['lines.linewidth'] = 1.5
warnings.filterwarnings('ignore')
load_dotenv()
class BtalertIA:
def __init__(self, last_minutes_importance: int, regressor=None) -> None:
"""
Args:
last_minutes_importance (int): The last minutes that matter to foreacasting (context)
"""
self.prom = PrometheusConnect(
url=os.getenv('PROMETHEUS_URL'), disable_ssl=True)
self.regressor = regressor
if regressor is None:
self.regressor = RandomForestRegressor(
max_depth=40,
n_estimators=3,
random_state=123,
)
self.forecaster = ForecasterAutoreg(
regressor=self.regressor,
lags=self.minutes_to_step(last_minutes_importance)
)
self.original_dataframe = pd.DataFrame()
self.data_train = pd.DataFrame()
self.data_test = pd.DataFrame()
self.predictions = pd.Series()
self.value_column = 'value'
self.timestamp_column = 'timestamp'
def load_metric_as_dataframe(self, start: str, end: str, metric_name: str) -> pd.DataFrame:
start_time = parse_datetime(start)
end_time = parse_datetime(end)
original_dataframe = MetricSnapshotDataFrame(
self.prom.get_metric_range_data(
metric_name, start_time=start_time, end_time=end_time, chunk_size=timedelta(seconds=15))
)
new_dataframe = original_dataframe[[
self.timestamp_column, self.value_column]].copy()
new_dataframe[self.timestamp_column] = [datetime.fromtimestamp(
timestamp) for timestamp in new_dataframe[self.timestamp_column]]
new_dataframe[self.value_column] = [
float(value) for value in new_dataframe[self.value_column]]
new_dataframe[self.timestamp_column] = new_dataframe[self.timestamp_column].astype(
'datetime64[s]')
new_dataframe = new_dataframe.set_index(
new_dataframe[self.timestamp_column])
new_dataframe = new_dataframe.asfreq(freq='15S', method='bfill')
self.original_dataframe = new_dataframe
return new_dataframe
def split_test_train_dataframe(self, minutes_split: int) -> Tuple[pd.DataFrame, pd.DataFrame]:
steps = self.minutes_to_step(minutes_split)
self.data_train = self.original_dataframe[:-steps]
self.data_test = self.original_dataframe[-steps:]
return self.data_train, self.data_test
def minutes_to_step(self, min: int) -> int:
return int((min * 60) / 15)
def train_model(self) -> None:
self.forecaster.fit(y=self.data_train[self.value_label])
def predict(self, minutes_prediction: int) -> pd.Series:
return self.forecaster.predict(steps=self.minutes_to_step(minutes_prediction))
def plot_graphic(self):
fig, ax = plt.subplots(figsize=(18, 12))
self.data_train[self.value_column].plot(ax=ax, label='train')
self.data_test[self.value_column].plot(ax=ax, label='test')
self.predictions.plot(ax=ax, label='predictions')
ax.legend()
def get_mean_squared_error(self) -> float:
error_mse: float = mean_squared_error(
y_true=self.data_test[self.value_column],
y_pred=self.predictions
)
return error_mse
def execute(self, start: str, end: str, metric_name: str, minutes_split: int):
self.load_metric_as_dataframe(start, end, metric_name)
self.split_test_train_dataframe(minutes_split)
self.train_model()
self.predict()
| 37.072072 | 104 | 0.684569 | 3,523 | 0.856136 | 0 | 0 | 0 | 0 | 0 | 0 | 256 | 0.062211 |
0f6b864680738e0f6318834ca0ebb3c3d6afb4ba | 3,306 | py | Python | data_loaders/anime_loader.py | dchenam/AnimeGAN | 15707a99dde000a6d7f283f4f82d5176b8313e0a | [
"MIT"
] | 1 | 2020-01-19T15:04:42.000Z | 2020-01-19T15:04:42.000Z | data_loaders/anime_loader.py | dchenam/AnimeGAN | 15707a99dde000a6d7f283f4f82d5176b8313e0a | [
"MIT"
] | null | null | null | data_loaders/anime_loader.py | dchenam/AnimeGAN | 15707a99dde000a6d7f283f4f82d5176b8313e0a | [
"MIT"
] | null | null | null | import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
class Anime_Dataset(Dataset):
def __init__(self, config, transform):
self.config = config
self.transform = transform
self.lines = open(config.label_path, 'r').readlines()
self.num_data = len(self.lines)
self.image_ids = []
self.labels = []
self.tag_dict = {'orange_hair': 0, 'white_hair': 1, 'aqua_hair': 2, 'gray_hair': 3, 'green_hair': 4,
'red_hair': 5, 'purple_hair': 6, 'pink_hair': 7, 'blue_hair': 8, 'black_hair': 9,
'brown_hair': 10, 'blonde_hair': 11, 'gray_eyes': 12, 'black_eyes': 13, 'orange_eyes': 14,
'pink_eyes': 15, 'yellow_eyes': 16, 'aqua_eyes': 17, 'purple_eyes': 18, 'green_eyes': 19,
'brown_eyes': 20, 'red_eyes': 21, 'blue_eyes': 22, 'bicolored_eyes': 23}
print('preprocessing...')
print('number of images: ', self.num_data)
self.preprocess()
def __len__(self):
return self.num_data
def __getitem__(self, index):
correct_image = Image.open(os.path.join(self.config.image_dir, self.image_ids[index] + '.jpg'))
correct_text = self.labels[index]
# wrong_text = self.labels[np.random.randint(low=0, high=self.num_data)]
random_index = np.random.randint(low=0, high=self.num_data)
wrong_image = Image.open(os.path.join(self.config.image_dir, self.image_ids[random_index] + '.jpg'))
return self.transform(correct_image), torch.Tensor(correct_text), self.transform(wrong_image)
def preprocess(self):
for i, line in enumerate(self.lines):
splits = line.split()
image_id = splits[0]
attr_values = splits[1:]
one_hot = np.zeros(len(self.tag_dict))
for value in attr_values:
index = self.tag_dict[value]
one_hot[index] = 1
self.labels += [one_hot]
self.image_ids += [image_id]
def generate_embedding(self):
test_str = ['blue_hair, red_eyes', 'brown_hair, brown_eyes', 'black_hair, blue_eyes', 'red_hair, green_eyes']
embeddings = {}
for str in test_str:
split = str.split(', ')
one_hot = np.zeros(len(self.tag_dict))
for tag in split:
one_hot[self.tag_dict[tag]] = 1
embeddings[str] = one_hot
return embeddings
def get_loader(config):
transform = transforms.Compose([
# transforms.CenterCrop(config.crop_size),
transforms.Scale(config.image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), # 3 for RGB channels
std=(0.5, 0.5, 0.5))
])
dataset = Anime_Dataset(config, transform)
print('generating test embeddings...')
embeddings = dataset.generate_embedding()
data_loader = DataLoader(dataset,
config.batch_size,
shuffle=True,
num_workers=4,
drop_last=True)
return data_loader, embeddings
| 40.317073 | 117 | 0.588929 | 2,400 | 0.725953 | 0 | 0 | 0 | 0 | 0 | 0 | 598 | 0.180883 |
0f6d1c33773b442b8a76170619e0410a30ccb153 | 4,650 | py | Python | bitwarden_pyro/controller/cache.py | apetresc/bitwarden-pyro | 2312452b7c56b1c534c4f3874f162b9dc0df92c3 | [
"MIT"
] | 7 | 2019-12-10T12:51:48.000Z | 2021-11-26T23:27:37.000Z | bitwarden_pyro/controller/cache.py | apetresc/bitwarden-pyro | 2312452b7c56b1c534c4f3874f162b9dc0df92c3 | [
"MIT"
] | 13 | 2019-12-06T11:53:31.000Z | 2022-01-01T16:45:33.000Z | bitwarden_pyro/controller/cache.py | apetresc/bitwarden-pyro | 2312452b7c56b1c534c4f3874f162b9dc0df92c3 | [
"MIT"
] | 3 | 2020-09-25T05:59:30.000Z | 2022-01-08T03:09:29.000Z | import os
import json
import stat
import time
from bitwarden_pyro.util.logger import ProjectLogger
from bitwarden_pyro.settings import NAME
class CacheMetadata:
"""Model class containing cache metadata"""
def __init__(self, time_created=None, count=None):
self.time_created = time_created
self.count = count
def to_dict(self):
"""Convert the instance to a dict ready to be serialised"""
return {
'time': self.time_created,
'count': self.count
}
@staticmethod
def create(dictionary):
"""Create an instance of CacheMetadata from a dict"""
return CacheMetadata(
dictionary['time'],
dictionary['count']
)
class Cache:
"""Read and write item data to cache files"""
_cache_dir = f'~/.cache/{NAME}/'
_items_file = 'items.json'
_meta_file = 'items.metadata'
def __init__(self, expiry):
self._path = None
self._meta = None
self._logger = ProjectLogger().get_logger()
self._expiry = expiry # Negative values disable cache
self.__items_path = lambda: os.path.join(self._path, self._items_file)
self.__meta_path = lambda: os.path.join(self._path, self._meta_file)
self.__init_meta()
def __init_meta(self):
if not self.should_cache():
self._logger.info("Disabling caching of items")
return
try:
self._path = os.path.expanduser(self._cache_dir)
if not os.path.isdir(self._path):
os.makedirs(self._path)
else:
mpath = self.__meta_path()
ipath = self.__items_path()
# Both items and metadata file need to be present,
# however, their validity is not checked
if os.path.isfile(ipath) and os.path.isfile(mpath):
with open(mpath, 'r') as file:
meta_json = json.load(file)
self._meta = CacheMetadata.create(meta_json)
self._logger.debug("Initialised meta data from %s", mpath)
except IOError:
raise CacheException("Failed to initialise cache metadata")
def should_cache(self):
""" Returns true if expiry is a positive number """
return self._expiry > 0
def __cache_age(self):
"""Returns the age in days of the saved cache"""
if self._meta is None:
raise CacheException("Cache metadata has not been initialised")
seconds = (time.time() - self._meta.time_created)
days = seconds / 86_400
return days
def get(self):
"""Return a collection of cached items"""
try:
ipath = self.__items_path()
self._logger.debug("Reading cached items from %s", ipath)
with open(ipath, 'r') as file:
items = json.load(file)
return items
except IOError:
raise CacheException(f"Failed to write cache data to {self._path}")
def save(self, items):
"""Sanitise and save a collection of items to a cache files"""
try:
self._logger.debug("Writing cache to %s", self._path)
self._meta = CacheMetadata(time.time(), len(items))
meta_path = os.path.join(self._path, self._meta_file)
with open(meta_path, 'w') as file:
json.dump(self._meta.to_dict(), file)
# Chmod to 600
os.chmod(meta_path, stat.S_IWRITE | stat.S_IREAD)
# Sanitize cache by removing sensitive data
for item in items:
if item.get('login'):
if item.get('login').get('password') is not None:
item['login']['password'] = None
if item.get('login').get('totp') is not None:
item['login']['totp'] = None
item_path = os.path.join(self._path, self._items_file)
with open(item_path, 'w') as file:
json.dump(items, file)
# Chmod to 600
os.chmod(item_path, stat.S_IWRITE | stat.S_IREAD)
except IOError:
raise CacheException(f"Failed to write cache data to {self._path}")
def has_items(self):
"""Returns true if cache is enabled, not expired and contains items"""
return self._expiry > 0 \
and self._meta is not None \
and self._meta.count > 0 \
and self.__cache_age() < self._expiry
class CacheException(Exception):
"""Base exception raised by Cache objects"""
| 31.208054 | 79 | 0.576774 | 4,500 | 0.967742 | 0 | 0 | 208 | 0.044731 | 0 | 0 | 1,138 | 0.244731 |
0f6dc02e799dc65a7634ed67eacfd39cd76ca1b9 | 210 | py | Python | library/render_partial.py | pythononwheels/diary | 87ffd08d0dffe979799d9f441014bd5d71f00fc6 | [
"MIT"
] | null | null | null | library/render_partial.py | pythononwheels/diary | 87ffd08d0dffe979799d9f441014bd5d71f00fc6 | [
"MIT"
] | null | null | null | library/render_partial.py | pythononwheels/diary | 87ffd08d0dffe979799d9f441014bd5d71f00fc6 | [
"MIT"
] | null | null | null | #
# One Tornado UIModule to render them all ;)
#
import tornado.web
class RenderPatialModule(tornado.web.UIModule):
def render(self, partial=None):
return "<h1>Hello, world!</h1><p>" + str(partial) + "</p>" | 26.25 | 60 | 0.695238 | 141 | 0.671429 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.37619 |
0f70e53221634c25ef5b9365ad4e3d8d00516b0d | 1,605 | py | Python | doc/source/isphx/objpull.py | flying-sheep/sphobjinv | 0aa56e3982f99bf811cef4126e452ddd65cae088 | [
"MIT"
] | 55 | 2016-10-30T05:03:16.000Z | 2022-03-13T18:00:44.000Z | doc/source/isphx/objpull.py | flying-sheep/sphobjinv | 0aa56e3982f99bf811cef4126e452ddd65cae088 | [
"MIT"
] | 202 | 2016-05-16T13:25:50.000Z | 2022-03-22T20:05:45.000Z | doc/source/isphx/objpull.py | bskinn/sphinx-objectsinv | 505c7afc656f20b9e105f4ead9c6eb570eef971a | [
"MIT"
] | 4 | 2020-03-29T01:47:50.000Z | 2021-04-07T13:37:05.000Z | # Quickie script for refreshing the local objects.inv cache
# OVERWRITES EXISTING FILES, WITH PRE-DELETION
def pullobjs():
import os
import urllib.request as urlrq
import certifi
# Open conf.py, retrieve content and compile
with open(os.path.join(os.pardir, 'conf.py'), 'r') as f:
confcode = compile(f.read(), 'conf.py', 'exec')
# Execute conf.py into the global namespace (I know, sloppy)
exec(confcode, globals())
# Iterate intersphinx_mapping from conf.py to retrieve the objects.inv files
# Make use of the conf.py 'isphx_objstr' substitution string, too
for n, t in intersphinx_mapping.items():
print('{0}:\n'.format(n) + '-' * 16)
try:
os.remove(isphx_objstr.format(n))
except FileNotFoundError:
pass # No big deal
try:
resp = urlrq.urlopen(t[0] + '/objects.inv', cafile=certifi.where())
except Exception as e:
print('HTTP request failed:\n' + str(e) + '\n')
continue
else:
print('... located ...')
try:
b_s = resp.read()
except Exception as e:
print('Download failed:\n' + str(e) + '\n')
continue
else:
print('... downloaded ...')
try:
with open(isphx_objstr.format(n), 'wb') as f:
f.write(b_s)
except Exception as e:
print('Write failed:\n' + str(e) + '\n')
continue
else:
print('... done.')
print('')
if __name__ == '__main__':
pullobjs()
| 25.887097 | 80 | 0.54704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 552 | 0.343925 |
0f716674c3dc2ac73901984ceb01683e57a08e32 | 4,487 | py | Python | python-client/cloudera/director/v8/models/__init__.py | daanknoope/director-sdk | a099fedd5afe365aedbb50daa75de048ef6f7ab4 | [
"Apache-2.0"
] | 24 | 2015-03-04T01:39:36.000Z | 2020-06-30T13:34:27.000Z | python-client/cloudera/director/v8/models/__init__.py | daanknoope/director-sdk | a099fedd5afe365aedbb50daa75de048ef6f7ab4 | [
"Apache-2.0"
] | 5 | 2015-11-04T08:18:47.000Z | 2019-01-05T11:12:19.000Z | python-client/cloudera/director/v8/models/__init__.py | daanknoope/director-sdk | a099fedd5afe365aedbb50daa75de048ef6f7ab4 | [
"Apache-2.0"
] | 26 | 2015-02-24T21:13:53.000Z | 2020-12-15T06:01:46.000Z | # coding: utf-8
# flake8: noqa
"""
Licensed to Cloudera, Inc. under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. Cloudera, Inc. licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
# import models into model package
from cloudera.director.v8.models.capabilities import Capabilities
from cloudera.director.v8.models.cloud_provider_metadata import CloudProviderMetadata
from cloudera.director.v8.models.cluster import Cluster
from cloudera.director.v8.models.cluster_template import ClusterTemplate
from cloudera.director.v8.models.configuration_property import ConfigurationProperty
from cloudera.director.v8.models.configuration_property_value import ConfigurationPropertyValue
from cloudera.director.v8.models.deployment import Deployment
from cloudera.director.v8.models.deployment_template import DeploymentTemplate
from cloudera.director.v8.models.diagnostic_data_summary import DiagnosticDataSummary
from cloudera.director.v8.models.display_property import DisplayProperty
from cloudera.director.v8.models.environment import Environment
from cloudera.director.v8.models.error_info import ErrorInfo
from cloudera.director.v8.models.eula import Eula
from cloudera.director.v8.models.external_database import ExternalDatabase
from cloudera.director.v8.models.external_database_server import ExternalDatabaseServer
from cloudera.director.v8.models.external_database_server_template import ExternalDatabaseServerTemplate
from cloudera.director.v8.models.external_database_server_usage import ExternalDatabaseServerUsage
from cloudera.director.v8.models.external_database_template import ExternalDatabaseTemplate
from cloudera.director.v8.models.health import Health
from cloudera.director.v8.models.health_check import HealthCheck
from cloudera.director.v8.models.import_result import ImportResult
from cloudera.director.v8.models.import_status import ImportStatus
from cloudera.director.v8.models.instance import Instance
from cloudera.director.v8.models.instance_provider_config import InstanceProviderConfig
from cloudera.director.v8.models.instance_state import InstanceState
from cloudera.director.v8.models.instance_template import InstanceTemplate
from cloudera.director.v8.models.login import Login
from cloudera.director.v8.models.metering_setting import MeteringSetting
from cloudera.director.v8.models.metrics import Metrics
from cloudera.director.v8.models.migrating_group import MigratingGroup
from cloudera.director.v8.models.migration import Migration
from cloudera.director.v8.models.notification import Notification
from cloudera.director.v8.models.password_change import PasswordChange
from cloudera.director.v8.models.resource_provider_metadata import ResourceProviderMetadata
from cloudera.director.v8.models.service import Service
from cloudera.director.v8.models.ssh_credentials import SshCredentials
from cloudera.director.v8.models.status import Status
from cloudera.director.v8.models.time_series import TimeSeries
from cloudera.director.v8.models.time_series_aggregate_statistics import TimeSeriesAggregateStatistics
from cloudera.director.v8.models.time_series_cross_entity_metadata import TimeSeriesCrossEntityMetadata
from cloudera.director.v8.models.time_series_data import TimeSeriesData
from cloudera.director.v8.models.time_series_metadata import TimeSeriesMetadata
from cloudera.director.v8.models.time_series_response import TimeSeriesResponse
from cloudera.director.v8.models.time_series_response_list import TimeSeriesResponseList
from cloudera.director.v8.models.time_series_row import TimeSeriesRow
from cloudera.director.v8.models.user import User
from cloudera.director.v8.models.validation_exception_condition import ValidationExceptionCondition
from cloudera.director.v8.models.virtual_instance import VirtualInstance
from cloudera.director.v8.models.virtual_instance_group import VirtualInstanceGroup
| 59.826667 | 104 | 0.872075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 811 | 0.180744 |
0f7258c30ea450c0d6df7038fbaab0f28f88f98e | 1,844 | py | Python | wagtailshowsimilar/views.py | ongchi/wagtail-showsimilaritems | d358d8bb48b759e96bf7519bfe9925a6364cc7fc | [
"Apache-2.0"
] | null | null | null | wagtailshowsimilar/views.py | ongchi/wagtail-showsimilaritems | d358d8bb48b759e96bf7519bfe9925a6364cc7fc | [
"Apache-2.0"
] | null | null | null | wagtailshowsimilar/views.py | ongchi/wagtail-showsimilaritems | d358d8bb48b759e96bf7519bfe9925a6364cc7fc | [
"Apache-2.0"
] | null | null | null | from django.apps import apps
from django.views.decorators.http import require_GET
from django.http import JsonResponse
from django.urls import reverse
from wagtail.search.backends import get_search_backend
from wagtail.core.models import Page
backend = get_search_backend()
@require_GET
def search(request):
search_query = request.GET.get("query")
model = apps.get_model(request.GET.get("model"))
field = request.GET.get("field")
try:
instance_id = int(request.GET.get("instance_id"))
except ValueError:
instance_id = None
threshold = float(request.GET.get("threshold"))
max_items = int(request.GET.get("max_items"))
query = backend.search(search_query, model, fields=[field])
try:
q = query.annotate_score("_score")
# Since django queryset is lazy, we have to trigger queryset to be evaluated
# to make sure whether annotate_score is supported by current search backend.
_ = q[0]
query = q
with_score = True
except:
with_score = False
response = { "items": [], "is_trimmed": False }
for q in query:
if (with_score and q._score > threshold) or not with_score:
if len(response["items"]) < max_items:
if q.id != instance_id:
if hasattr(q, "get_showsimilar_url"):
url = getattr(q, "get_showsimilar_url")()
elif isinstance(q, Page):
url = reverse("wagtailadmin_pages:edit", args=(q.page_ptr.id,))
else:
url = None
response["items"].append({"value": getattr(q, field), "url": url})
else:
response["is_trimmed"] = True
break
else:
break
return JsonResponse(response)
| 34.792453 | 87 | 0.60141 | 0 | 0 | 0 | 0 | 1,566 | 0.849241 | 0 | 0 | 342 | 0.185466 |
0f73367ba6109ee049aa54a44a28f5bd872d3d4e | 5,149 | py | Python | views.py | zhoubogao/hhlyDevops | 832684bb7fa4e67a23b2e6171ed0433fe0748de5 | [
"Unlicense"
] | null | null | null | views.py | zhoubogao/hhlyDevops | 832684bb7fa4e67a23b2e6171ed0433fe0748de5 | [
"Unlicense"
] | null | null | null | views.py | zhoubogao/hhlyDevops | 832684bb7fa4e67a23b2e6171ed0433fe0748de5 | [
"Unlicense"
] | null | null | null | #-*-coding:utf-8-*-
from flask import url_for, redirect, request, current_app
from flask_admin.contrib.sqla import ModelView
from flask_admin import AdminIndexView, helpers, expose
from werkzeug.security import generate_password_hash
from flask_login import current_user, login_user, logout_user
from models import User, Role, Device, Platforms_info, Ip, Project, Domain, Port, App
from forms import LoginForm
from flask_principal import (
ActionNeed,
AnonymousIdentity,
Identity,
identity_changed,
identity_loaded,
Permission,
Principal,
RoleNeed,
Denial
)
# Initialize flask-principal
prcp = Principal()
anon_permission = Permission()
admin_permission = Permission(RoleNeed('Admin'))
admin_or_editor = Permission(RoleNeed('Admin'), RoleNeed('Devlop'))
devlop_permission = Permission(RoleNeed('Devlop'))
admin_denied = Denial(RoleNeed('Admin'))
# Create customized model view class
class UserModelView(ModelView):
can_export = True
can_view_details = True
column_exclude_list = ['password', ]
column_searchable_list = ( 'real_name', 'login', Role.name)
column_display_all_relations = True
def is_accessible(self):
return current_user.is_authenticated
def on_model_change(self, form, User, is_created):
User.password = generate_password_hash(form.password.data)
# Create customized model view class
class RoleModelView(ModelView):
can_export = True
can_view_details = True
column_editable_list = ('description',)
column_searchable_list = ( 'name', User.login)
column_display_all_relations = True
# Create customized model view class
class Platforms_infoModelView(ModelView):
can_export = True
can_view_details = True
column_editable_list = ('platform', 'description','url',
'username', 'password', 'ps',
)
column_searchable_list = ('platform', 'description','url',
'username', 'password', 'ps',
)
column_display_all_relations = True
# Create customized model view class
class DeviceModelView(ModelView):
can_export = True
can_view_details = True
column_editable_list = ('device_num', 'device_name', 'idc', 'location',
'used_to','hardware_type', 'brand', 'buy_date',
'brand','fast_repair_code', 'cpu', 'memory',
'disk',
)
column_searchable_list = ('device_num', 'device_name', 'idc', 'location',
'used_to','hardware_type', 'brand', 'buy_date',
'brand','fast_repair_code', 'cpu', 'memory',
'disk', Ip.ip
)
column_display_all_relations = True
# Create customized model view class
class IpModelView(ModelView):
can_export = True
can_view_details = True
column_editable_list = ('isp','ip', 'use', 'mask', 'mac',
'route', 'switch_port',
'username', 'password',
)
column_searchable_list = ('isp','ip', 'use', 'mask', 'mac',
'route', 'switch_port',
'username', 'password',
)
column_display_all_relations = True
# Create customized model view class
class ProjectModelView(ModelView):
can_export = True
can_view_details = True
column_editable_list = ('project', )
column_searchable_list = ( 'project', App.app)
column_display_all_relations = True
# Create customized model view class
class DomainModelView(ModelView):
can_export = True
can_view_details = True
column_editable_list = ('domain',)
column_searchable_list = ( 'domain', App.app)
column_display_all_relations = True
# Create customized model view class
class PortModelView(ModelView):
can_export = True
can_view_details = True
column_editable_list = ('port',)
column_searchable_list = ( 'port', App.app)
column_display_all_relations = True
# Create customized model view class
class AppModelView(ModelView):
can_export = True
can_view_details = True
column_editable_list = ('description', 'ps',)
column_searchable_list = ( 'app', Domain.domain, Port.port)
column_display_all_relations = True
# Create customized index view class that handles login & registration
class OpsAdminIndexView(AdminIndexView):
@expose('/')
# @anon_permission.require(http_exception=403)
# @admin_denied.require(http_exception=403)
def index_view(self):
if not current_user.is_authenticated:
return redirect(url_for('auth.login'))
# with admin_permission.require(http_exception=403):
return super(OpsAdminIndexView, self).index_view()
| 33.435065 | 85 | 0.619344 | 3,573 | 0.693921 | 0 | 0 | 0 | 0 | 0 | 0 | 1,246 | 0.241989 |
0f7394ec885b966aaf92685817a31c344da144b3 | 577 | py | Python | unittests/validators.py | lspestrip/stdb2 | 80703385f6e681962140d82a4991878a995c90fd | [
"MIT"
] | 1 | 2018-03-07T10:13:12.000Z | 2018-03-07T10:13:12.000Z | unittests/validators.py | lspestrip/stdb2 | 80703385f6e681962140d82a4991878a995c90fd | [
"MIT"
] | 31 | 2017-10-28T07:17:38.000Z | 2018-05-11T11:28:02.000Z | unittests/validators.py | lspestrip/stdb2 | 80703385f6e681962140d82a4991878a995c90fd | [
"MIT"
] | 1 | 2017-11-28T21:50:27.000Z | 2017-11-28T21:50:27.000Z | # -*- encoding: utf-8 -*-
VALID_REPORT_EXTENSIONS = [
'.pdf',
'.doc',
'.docx',
'.html',
'.htm',
'.xsl',
'.xslx',
'.md',
'.rst',
'.zip',
]
def validate_report_file_ext(value):
import os
from django.core.exceptions import ValidationError
ext = os.path.splitext(value.name)[1]
if not ext.lower() in VALID_REPORT_EXTENSIONS:
raise ValidationError('unsupported file extension "{0}", valid extensions are {1}'
.format(ext, ', '.join(['"' + x + '"' for x in VALID_REPORT_EXTENSIONS])))
| 23.08 | 104 | 0.561525 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.272097 |
0f7407e7b2ba5174648101b672fee04fc30da7a2 | 2,531 | py | Python | python/demo_confidence_map.py | doublechenching/UltrasondConfienceMap | e345b3fbf658817e3b4af57e32d5ecb4b7073595 | [
"Apache-2.0"
] | 13 | 2019-03-12T08:47:29.000Z | 2022-03-01T18:07:30.000Z | python/demo_confidence_map.py | doublechenching/UltrasondConfienceMap | e345b3fbf658817e3b4af57e32d5ecb4b7073595 | [
"Apache-2.0"
] | 1 | 2020-06-16T14:29:30.000Z | 2020-06-16T14:29:30.000Z | python/demo_confidence_map.py | doublechenching/UltrasondConfienceMap | e345b3fbf658817e3b4af57e32d5ecb4b7073595 | [
"Apache-2.0"
] | 2 | 2020-04-25T03:06:47.000Z | 2020-06-02T02:59:55.000Z | #encoding: utf-8
from __future__ import print_function
from skimage import io
from confidence_map import confidence_map3d, confidence_map2d
import numpy as np
import pydicom
from skimage.external.tifffile import imshow
import time
from matplotlib import pyplot as plt
from skimage.exposure import rescale_intensity
from skimage.transform import resize
def conf3d_demo(path):
volume_dcm = pydicom.read_file(path)
volume = volume_dcm.pixel_array
slice_spacing = volume_dcm.SpacingBetweenSlices # 0.520
height_spacing = volume_dcm.PixelSpacing[0] # 0.082
width_spacing = volume_dcm.PixelSpacing[1] # 0.2
volume = np.transpose(volume, [1, 2, 0]) # depth last
volume = volume[:,:,:20]
v_min, v_max = (0.007, 0.81)
volume = rescale_intensity(volume, in_range=(v_min * 255, v_max * 255))
print(volume.shape)
now = time.time()
conf_map = confidence_map3d(volume, alpha=1.5, beta=90, gamma=0.03, solver_mode='gpu')
print(conf_map.shape)
print("Runtime ", time.time() - now)
conf_map = np.transpose(conf_map, [2, 0, 1])
volume = np.transpose(volume, [2, 0, 1])
fig = plt.figure()
imshow(conf_map, figure=fig, subplot=(211), cmap='gray')
imshow(volume, figure=fig, subplot=(212), cmap='gray')
plt.show()
def conf2d_demo(image_path, v_min=0.007, v_max=0.81, threshold=0.5):
"""confidence map 2d demo
# Args
image_path: str, image path
v_min, v_max: scale image intensity to range of [v_min, v_max]
# Retrun
confience map, actully it is a probility map with range [0, 1.0]
"""
img = io.imread(image_path)
img = rescale_intensity(img, in_range=(v_min*255, v_max*255))
height_spacing = 0.082
width_spacing = 0.2
print('image shape is ', img.shape)
now = time.time()
spacing = [1.0, width_spacing / height_spacing]
conf_map = confidence_map2d(img, alpha=1.5, beta=90, gamma=0.03, spacing=None, solver_mode='bf')
conf_map = np.clip(conf_map, 0, 1)
print("Runtime ", time.time() - now)
plt.subplot(221)
plt.imshow(img)
plt.axis('off')
plt.subplot(223)
plt.imshow(conf_map)
plt.axis('off')
plt.subplot(222)
plt.imshow((conf_map > threshold).astype('uint8')*img)
plt.axis('off')
plt.subplot(224)
plt.imshow((conf_map > threshold).astype('uint8'))
plt.axis('off')
plt.savefig('confidence2d.png')
return conf_map
if __name__ == "__main__":
# conf3d_demo('./test.dcm')
conf2d_demo('./images/test.bmp') | 33.302632 | 100 | 0.671276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 456 | 0.180166 |
0f75773a133923f483bf0f32ed5fc3578f9eaa52 | 1,566 | py | Python | src/ui/pgUI.py | yuj09161/MoneyManager | 32d57ca05d0c182ace68ea960a7bf7dfaad0fc79 | [
"MIT"
] | null | null | null | src/ui/pgUI.py | yuj09161/MoneyManager | 32d57ca05d0c182ace68ea960a7bf7dfaad0fc79 | [
"MIT"
] | null | null | null | src/ui/pgUI.py | yuj09161/MoneyManager | 32d57ca05d0c182ace68ea960a7bf7dfaad0fc79 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'pg.ui'
##
## Created by: Qt User Interface Compiler version 6.0.0
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import *
from PySide6.QtGui import *
from PySide6.QtWidgets import *
class Ui_Pg(object):
def setupUi(self, Pg):
if not Pg.objectName():
Pg.setObjectName(u"Pg")
Pg.setFixedSize(200, 70)
self.centralwidget = QWidget(Pg)
self.centralwidget.setObjectName(u"centralwidget")
self.vlCent = QVBoxLayout(self.centralwidget)
self.vlCent.setObjectName(u"vlCent")
self.lbStatus = QLabel(self.centralwidget)
self.lbStatus.setObjectName(u"lbStatus")
self.lbStatus.setAlignment(Qt.AlignCenter)
self.vlCent.addWidget(self.lbStatus)
self.pgPg = QProgressBar(self.centralwidget)
self.pgPg.setObjectName(u"pgPg")
self.pgPg.setValue(24)
self.vlCent.addWidget(self.pgPg)
Pg.setCentralWidget(self.centralwidget)
self.retranslateUi(Pg)
QMetaObject.connectSlotsByName(Pg)
# setupUi
def retranslateUi(self, Pg):
Pg.setWindowTitle(QCoreApplication.translate("Pg", u"\uc9c4\ud589 \uc911", None))
self.lbStatus.setText(QCoreApplication.translate("Pg", u"TextLabel", None))
# retranslateUi
| 31.959184 | 89 | 0.598978 | 1,095 | 0.699234 | 0 | 0 | 0 | 0 | 0 | 0 | 482 | 0.307791 |