hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9fb93c91fe5323fc4654268fd1ccfdbdc90fbcc1 | 8,130 | py | Python | 11.Introduction to Databases in Python/Chapter 4 - Creating and Manipulating your own Databases.py | prakashcc/datacamp-python-data-science-track | 8d35b2d78e5f923c7320e33bfc7b038556efe30a | [
"MIT"
] | 1 | 2020-04-03T21:39:36.000Z | 2020-04-03T21:39:36.000Z | 11.Introduction to Databases in Python/Chapter 4 - Creating and Manipulating your own Databases.py | NileshRathore/datacamp-python-data-science-track | 8d35b2d78e5f923c7320e33bfc7b038556efe30a | [
"MIT"
] | null | null | null | 11.Introduction to Databases in Python/Chapter 4 - Creating and Manipulating your own Databases.py | NileshRathore/datacamp-python-data-science-track | 8d35b2d78e5f923c7320e33bfc7b038556efe30a | [
"MIT"
] | 1 | 2020-02-07T07:34:07.000Z | 2020-02-07T07:34:07.000Z | #Chapter 4 - Creating and Manipulating your own Databases
#*******************************************************************************************#
#Creating Tables with SQLAlchemy
# Import Table, Column, String, Integer, Float, Boolean from sqlalchemy
from sqlalchemy import Table, Column, String, Integer, Float, Boolean
# Define a new table with a name, count, amount, and valid column: data
data = Table('data', metadata,
Column('name', String(255)),
Column('count', Integer()),
Column('amount', Float()),
Column('valid', Boolean())
)
# Use the metadata to create the table
metadata.create_all(engine)
# Print table details
print(repr(data))
#*******************************************************************************************#
#Constraints and Data Defaults
# Import Table, Column, String, Integer, Float, Boolean from sqlalchemy
from sqlalchemy import Table, Column, String, Integer, Float, Boolean
# Define a new table with a name, count, amount, and valid column: data
data = Table('data', metadata,
Column('name', String(255), unique=True),
Column('count', Integer(), default=1),
Column('amount', Float()),
Column('valid', Boolean(), default=False)
)
# Use the metadata to create the table
metadata.create_all(engine)
# Print the table details
print(repr(metadata.tables['data']))
#*******************************************************************************************#
#Inserting a single row with an insert() statement
# Import insert from sqlalchemy
from sqlalchemy import insert, select
# Build an insert statement to insert a record into the data table: stmt
stmt = insert(data).values(name='Anna', count=1, amount=1000.00, valid=True)
# Execute the statement via the connection: results
results = connection.execute(stmt)
# Print result rowcount
print(results.rowcount)
# Build a select statement to validate the insert
stmt = select([data]).where(data.columns.name == 'Anna')
# Print the result of executing the query.
print(connection.execute(stmt).first())
#*******************************************************************************************#
#Inserting Multiple Records at Once
# Build a list of dictionaries: values_list
values_list = [
{'name': 'Anna', 'count': 1, 'amount': 1000.00, 'valid': True},
{'name': 'Taylor', 'count': 1, 'amount': 750.00, 'valid': False}
]
# Build an insert statement for the data table: stmt
stmt = insert(data)
# Execute stmt with the values_list: results
results = connection.execute(stmt, values_list)
# Print rowcount
print(results.rowcount)
#*******************************************************************************************#
#Loading a CSV into a Table
# Create a insert statement for census: stmt
stmt = insert(census)
# Create an empty list and zeroed row count: values_list, total_rowcount
values_list = []
total_rowcount = 0
# Enumerate the rows of csv_reader
for idx, row in enumerate(csv_reader):
#create data and append to values_list
data = {'state': row[0], 'sex': row[1], 'age': row[2], 'pop2000': row[3],
'pop2008': row[4]}
values_list.append(data)
# Check to see if divisible by 51
if idx % 51 == 0:
results = connection.execute(stmt, values_list)
total_rowcount += results.rowcount
values_list = []
# Print total rowcount
print(total_rowcount)
#*******************************************************************************************#
#Updating individual records
# Build a select statement: select_stmt
select_stmt = select([state_fact]).where(state_fact.columns.name == 'New York')
# Print the results of executing the select_stmt
print(connection.execute(select_stmt).fetchall())
# Build a statement to update the fips_state to 36: stmt
stmt = update(state_fact).values(fips_state=36)
# Append a where clause to limit it to records for New York state
stmt = stmt.where(state_fact.columns.name == 'New York')
# Execute the statement: results
results = connection.execute(stmt)
# Print rowcount
print(results.rowcount)
# Execute the select_stmt again to view the changes
print(connection.execute(select_stmt).fetchall())
#*******************************************************************************************#
#Updating Multiple Records
#
# Build a statement to update the notes to 'The Wild West': stmt
stmt = update(state_fact).values(notes='The Wild West')
# Append a where clause to match the West census region records
stmt = stmt.where(state_fact.columns.census_region_name == 'West')
# Execute the statement: results
results = connection.execute(stmt)
# Print rowcount
print(results.rowcount)
#*******************************************************************************************#
## Correlated Updates
# Build a statement to select name from state_fact: stmt
fips_stmt = select([state_fact.columns.name])
# Append a where clause to Match the fips_state to flat_census fips_code
fips_stmt = fips_stmt.where(
state_fact.columns.fips_state == flat_census.columns.fips_code)
# Build an update statement to set the name to fips_stmt: update_stmt
update_stmt = update(flat_census).values(state_name=fips_stmt)
# Execute update_stmt: results
results = connection.execute(update_stmt)
# Print rowcount
print(results.rowcount)
#*******************************************************************************************#
#Deleting all the records from a table
# Import delete, select
from sqlalchemy import delete, select
# Build a statement to empty the census table: stmt
stmt = delete(census)
# Execute the statement: results
results = connection.execute(stmt)
# Print affected rowcount
print(results.rowcount)
# Build a statement to select all records from the census table
stmt = select([census])
# Print the results of executing the statement to verify there are no rows
print(connection.execute(stmt).fetchall())
#*******************************************************************************************#
## Deleting specific records
# Build a statement to count records using the sex column for Men ('M') age 36: stmt
stmt = select([func.count(census.columns.sex)]).where(
and_(census.columns.sex == 'M',
census.columns.age == 36)
)
# Execute the select statement and use the scalar() fetch method to save the record count
to_delete = connection.execute(stmt).scalar()
# Build a statement to delete records from the census table: stmt_del
stmt_del = delete(census)
# Append a where clause to target Men ('M') age 36
stmt_del = stmt_del.where(
and_(census.columns.sex == 'M',
census.columns.age == 36)
)
# Execute the statement: results
results = connection.execute(stmt_del)
# Print affected rowcount and to_delete record count, make sure they match
print(results.rowcount, to_delete)
#*******************************************************************************************#
#Deleting a Table Completely
#
# Drop the state_fact tables
state_fact.drop(engine)
# Check to see if state_fact exists
print(state_fact.exists(engine))
# Drop all tables
metadata.drop_all(engine)
# Check to see if census exists
print(census.exists(engine))
#*******************************************************************************************#
| 33.875 | 145 | 0.558303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,412 | 0.542681 |
9fb9dea256439a1de1f2a404478d0b9b9c24fdb8 | 2,735 | py | Python | archived-stock-trading-bot-v1/yf_extender.py | Allcallofduty10/stock-trading-bot | 54e608b3c0b95b87e7753b065307fc23a045e230 | [
"MIT"
] | 101 | 2020-05-20T02:17:45.000Z | 2022-03-31T12:22:09.000Z | archived-stock-trading-bot-v1/yf_extender.py | Allcallofduty10/stock-trading-bot | 54e608b3c0b95b87e7753b065307fc23a045e230 | [
"MIT"
] | 10 | 2020-09-02T14:55:12.000Z | 2022-02-21T08:50:48.000Z | archived-stock-trading-bot-v1/yf_extender.py | Allcallofduty10/stock-trading-bot | 54e608b3c0b95b87e7753b065307fc23a045e230 | [
"MIT"
] | 33 | 2021-02-13T15:38:51.000Z | 2022-03-21T10:39:15.000Z | import sys
from datetime import datetime
import yfinance as yf
def get_ticker_symbol(ticker: yf.Ticker) -> str:
try:
return ticker.get_info()['symbol']
except ImportError:
return ""
def get_stock_state(ticker: yf.Ticker) -> {}:
stock_info = ticker.history("1d").iloc[0].to_dict()
stock_info['Time'] = datetime.now().strftime("%H:%M:%S")
del stock_info['Dividends']
del stock_info['Stock Splits']
return stock_info
# Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
def previous_high(ticker: yf.Ticker, time_period: str) -> float:
high = 0
stock_history = ticker.history(time_period)
for i in range(0, len(stock_history) - 2):
temp_high = stock_history.iloc[i].to_dict()['High']
if temp_high > high:
high = temp_high
return high
# Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
def calculate_sma(ticker: yf.Ticker, time_period="1mo", interval="1d") -> float:
stock_history = ticker.history(period=time_period, interval=interval)
summation = 0
time_period_days = 0
for i in range(0, len(stock_history) - 1):
summation += stock_history.iloc[i].to_dict()['Close']
time_period_days += 1
if time_period_days > 0:
return summation / time_period_days
return sys.maxsize
# Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
def calculate_ema(ticker: yf.Ticker, time_period="1mo") -> float:
stock_history = ticker.history(period=time_period)
return stock_history.iloc[len(stock_history) - 1].to_dict()['Close'] * (
2.5 / (1 + len(stock_history))) + calculate_sma(ticker, time_period) * (
1 - (2.5 / (1 + len(stock_history))))
# Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
def calculate_previous_ema(ticker: yf.Ticker, time_period="1mo", days_previous=1) -> float:
time_period_days = len(ticker.history(period=time_period))
stock_history = ticker.history(period=time_period)
return stock_history.iloc[time_period_days - days_previous - 1].to_dict()['Close'] * (
2.5 / (1 + time_period_days)) + calculate_sma(ticker, time_period) * (
1 - (2.5 / (1 + time_period_days)))
def get_high2current_price_change_percent(ticker: yf.Ticker) -> float:
stock_info = ticker.history("1d").iloc[0].to_dict()
return (stock_info['Close'] - stock_info['High']) / stock_info['High']
def get_direction(ticker: yf.Ticker) -> float:
stock_history = ticker.history(period="1d", interval="1m")
return (stock_history.iloc[len(stock_history) - 1].to_dict()['Close'] - stock_history.iloc[len(stock_history) - 2].to_dict()['Close'])/stock_history.iloc[len(stock_history) - 2].to_dict()['Close']
| 38.521127 | 200 | 0.672761 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 373 | 0.13638 |
9fbb4a14759bdd30ee6ecafbe29a12fea7b4e1d5 | 4,214 | py | Python | bill_backend/remedi_backend_processor.py | sarahjliu/remedi | 222daeb1719726bfcb704c7fd1e772444815e488 | [
"MIT"
] | null | null | null | bill_backend/remedi_backend_processor.py | sarahjliu/remedi | 222daeb1719726bfcb704c7fd1e772444815e488 | [
"MIT"
] | null | null | null | bill_backend/remedi_backend_processor.py | sarahjliu/remedi | 222daeb1719726bfcb704c7fd1e772444815e488 | [
"MIT"
] | 2 | 2018-02-25T18:11:54.000Z | 2018-02-25T22:24:47.000Z | # _____ _ _
# | __ \ | (_)
# | |__) |___ _ __ ___ ___ __| |_
# | _ // _ \ '_ ` _ \ / _ \/ _` | |
# | | \ \ __/ | | | | | __/ (_| | |
# |_| \_\___|_| |_| |_|\___|\__,_|_|
# Azure Vision API Key 1: 8ce845a5fcb44327aeed5dbd0debc2c0
# Azure Vision API Key 2: 3e8a6f7e78694f9787c7cae8c760f0ec
# Using 'https://docs.microsoft.com/en-us/azure/cognitive-services/computer-vision/quickstarts/python'
# Image URL is located at: "https://i.imgur.com/MhOJquU.jpg"
import requests
import json
import math
import collections
import medical_api
def myround(x, base=10):
return int(base * round(float(x)/base))
def machine_vision_stuff(image_url):
subscription_key = "8ce845a5fcb44327aeed5dbd0debc2c0"
vision_base_url = "https://southcentralus.api.cognitive.microsoft.com/vision/v1.0/"
ocr_url = vision_base_url + "ocr"
headers = {'Ocp-Apim-Subscription-Key': subscription_key}
params = {'language': 'unk', 'detectOrientation ': 'true'}
data = {'url': image_url}
response = requests.post(ocr_url, headers=headers, params=params, json=data)
response.raise_for_status()
analysis = response.json()
# Create a dictionary to hold the operation code, cost, and combined
cost_dict = {}
code_dict = {}
# operations_dict format is y_axis:[code(0),orig_cost(1),uninsured_cost_est(2),insured_cost_est(3),short_desc(4),long_desc(5),CPT_code(6)]
operations_dict = collections.defaultdict(lambda: [0,0,0,0,0,0,0])
# Parse through the JSON looking for the code and cost columns
for region in analysis['regions']:
for line in region['lines']:
for word in line['words']:
if word['text'] == 'CODE':
boundingBox = word['boundingBox']
CODE_x_axis = int(boundingBox.split(',')[0])-10
CODE_y_axis = int(boundingBox.split(',')[1])
elif word['text'] == 'AMOUNT':
boundingBox = word['boundingBox']
COST_x_axis = int(boundingBox.split(',')[0])
COST_y_axis = int(boundingBox.split(',')[1])
# Parse through the JSON again looking for elements in the code and cost columns, adding them both to the operations_dict
for region in analysis['regions']:
for line in region['lines']:
for word in line['words']:
boundingBox = word['boundingBox']
x_axis = int(boundingBox.split(',')[0])
y_axis = int(boundingBox.split(',')[1])
# Check if element in the code column
if math.isclose(x_axis, CODE_x_axis, abs_tol=20) and y_axis > CODE_y_axis:
code_dict[y_axis] = word['text']
# Check if element in the cost column
elif math.isclose(x_axis, COST_x_axis, abs_tol=20) and y_axis > COST_y_axis:
cost_dict[y_axis] = word['text']
# Combine the code dict and the cost dict
for key, code in code_dict.items():
operations_dict[key][0] = code
for key,cost in cost_dict.items():
for fuzziness in range(5):
if (key + fuzziness) in operations_dict:
operations_dict[key + fuzziness][1] = cost
break
elif (key - fuzziness) in operations_dict:
operations_dict[key - fuzziness][1] = cost
break
# Using the provided hardcoded dicts, populate the rest of the data
for key,value in operations_dict.items():
operations_dict[key][2] = medical_api.operation_price[value[0]][0] # Uninsured cost estimate
operations_dict[key][3] = medical_api.operation_price[value[0]][1] # Insured cost estimate
operations_dict[key][4] = medical_api.operation_short_description[value[0]] # Short description
operations_dict[key][5] = medical_api.operation_long_description[value[0]] # Long description
operations_dict[key][6] = medical_api.operation_CPT_code[value[0]] # Relevant CPT code
return operations_dict
| 46.307692 | 143 | 0.600617 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,514 | 0.359279 |
9fbc462d704378fb13b5f3d14d7cb984d4a7c69e | 660 | py | Python | Level1/Lessons64061/minari.py | StudyForCoding/ProgrammersLevel | dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25 | [
"MIT"
] | null | null | null | Level1/Lessons64061/minari.py | StudyForCoding/ProgrammersLevel | dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25 | [
"MIT"
] | null | null | null | Level1/Lessons64061/minari.py | StudyForCoding/ProgrammersLevel | dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25 | [
"MIT"
] | 1 | 2021-04-05T07:35:59.000Z | 2021-04-05T07:35:59.000Z | ```python
def solution(board,moves):
basket=[]
answer=[]
for move in moves:
for i in range(len(board)): # range(len(board)) 에 있는 인형갯수만큼 반복
if board[i][move-1]>0: # board[][] 안에 인형이 존재할 때에만 실행하도록
basket.append(board[i][move-1])
board[i][move-1]=0 # board[][] 초기화
break
else:
pass
if len(basket)>=2 and basket[len(basket)-1]==basket[len(basket)-2]:
#다음 move로 이동하기 전, basket에 들어있는 인형의 종류 두개가 같은지 확인
basket.pop(-1)
basket.pop(-1)
answer.append(i)
return len(answer)*2 #사라지는 인형의 갯수는 answer*2
```
| 31.428571 | 75 | 0.509091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 287 | 0.362374 |
9fbd0e54893daa5f9059045624cb8777d5342c64 | 578 | py | Python | Physics250-ME29/peakOutputVoltageGenerator.py | illusion173/Physics250 | 69f2ffdb8af013e8b0739779861c1455b579ddaf | [
"MIT"
] | null | null | null | Physics250-ME29/peakOutputVoltageGenerator.py | illusion173/Physics250 | 69f2ffdb8af013e8b0739779861c1455b579ddaf | [
"MIT"
] | null | null | null | Physics250-ME29/peakOutputVoltageGenerator.py | illusion173/Physics250 | 69f2ffdb8af013e8b0739779861c1455b579ddaf | [
"MIT"
] | null | null | null | import numpy as np
import math
extraNumber = 4 * math.pi * pow(10,-7)
def introducedEMF():
freq = input("Input the frequency (Hz): ")
turns = input("Input how many turns of the squrare frame: ")
area = input("Input the area (m) (ignore the 10^-2): ")
magField = input("Input magnetic Field Magnitude (T): ")
freq = float(freq)
turns = float(turns)
area = float(area)
magField = float(magField)
area = area * pow(10,-2)
genVolt = freq * turns * area * magField * 2 * math.pi
print(genVolt)
introducedEMF()
| 26.272727 | 65 | 0.602076 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.264706 |
9fbe9037f44203dcea331b5e2e317610d2f79dbe | 1,917 | py | Python | functions/helpers/pagination.py | haynieresearch/unusual_options_activity | f87619244bf72e603032bf5f66963b5a692bace2 | [
"Apache-2.0"
] | null | null | null | functions/helpers/pagination.py | haynieresearch/unusual_options_activity | f87619244bf72e603032bf5f66963b5a692bace2 | [
"Apache-2.0"
] | null | null | null | functions/helpers/pagination.py | haynieresearch/unusual_options_activity | f87619244bf72e603032bf5f66963b5a692bace2 | [
"Apache-2.0"
] | null | null | null | #**********************************************************
#* CATEGORY SOFTWARE
#* GROUP MARKET DATA
#* AUTHOR LANCE HAYNIE <LANCE@HAYNIEMAIL.COM>
#* DATE 2020-10-20
#* PURPOSE UNUSUAL OPTIONS ACTIVITY
#* FILE PAGINATION.PY
#**********************************************************
#* MODIFICATIONS
#* 2020-10-20 - LHAYNIE - INITIAL VERSION
#**********************************************************
#UNUSUAL OPTIONS ACTIVITY
#Copyright 2020 Haynie IPHC, LLC
#Developed by Haynie Research & Development, LLC
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import re
import math
class Pagination:
def __init__(self, body_response):
self._body_response = body_response
self.total_records = None
self.per_page = None
self.pages_needed_to_paginate = None
def get_pagination(self):
"""Retrieves the total amount of records and per page; occasionaly, pagianted_text will be empty"""
try:
paginated_text = self._body_response.html.find('.pagination-info')[0].text
self.total_records = int(re.search('of(.*)', paginated_text).group(1).strip())
self.per_page = int(re.search('-(.*)of', paginated_text).group(1).strip())
except:
return None
def calculate_pages_to_paginate(self):
"""Number of pages needed for async requests"""
if self.total_records and self.per_page:
self.pages_needed_to_paginate = math.ceil(self.total_records/self.per_page)-1
else:
self.pages_needed_to_paginate = 0
| 39.122449 | 101 | 0.684924 | 851 | 0.443923 | 0 | 0 | 0 | 0 | 0 | 0 | 1,200 | 0.625978 |
9fc4c964acc5d110da462d160816bb6e225c453a | 4,297 | py | Python | glhooks/mailer/messages.py | miso-belica/gitlab-webhooks | 12e161244655a37cb795ba826149a9685ae74f70 | [
"Apache-2.0"
] | 13 | 2015-01-08T22:37:55.000Z | 2019-06-27T08:19:15.000Z | glhooks/mailer/messages.py | miso-belica/gitlab-webhooks | 12e161244655a37cb795ba826149a9685ae74f70 | [
"Apache-2.0"
] | 1 | 2017-01-27T20:29:39.000Z | 2017-01-27T20:29:39.000Z | glhooks/mailer/messages.py | miso-belica/gitlab-webhooks | 12e161244655a37cb795ba826149a9685ae74f70 | [
"Apache-2.0"
] | 9 | 2015-01-25T05:46:12.000Z | 2021-01-12T08:22:20.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from time import strftime, gmtime
from email.header import make_header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from .utils import strip_tags, format_email_address
from .attachment import Attachment
from .compat import unicode_compatible, to_unicode, to_string, PY3
@unicode_compatible
class PlainMessage(object):
"""Simple wrapper for data of e-mail message with plain text."""
_PREAMBLE_TEXT = "This is a multi-part message in MIME format."
def __init__(self, sender, subject, content, charset="utf-8"):
self._sender = format_email_address(sender)
self._charset = to_string(charset)
self._content = to_unicode(content)
self._subject = to_unicode(subject)
self._attachments = []
self._recipients = {"To": [], "Cc": [], "Bcc": []}
@property
def sender(self):
return self._sender
@property
def subject(self):
return self._subject
@property
def recipients(self):
to = self._recipients["To"]
cc = self._recipients["Cc"]
bcc = self._recipients["Bcc"]
return frozenset(to + cc + bcc)
def add_recipients(self, *recipients):
recipients = self._unique_recipients(recipients)
self._recipients["To"].extend(recipients)
def add_recipients_cc(self, *recipients):
recipients = self._unique_recipients(recipients)
self._recipients["Cc"].extend(recipients)
def add_recipients_bcc(self, *recipients):
recipients = self._unique_recipients(recipients)
self._recipients["Bcc"].extend(recipients)
def _unique_recipients(self, recipients):
recipients = map(format_email_address, recipients)
return frozenset(recipients) - self.recipients
@property
def content(self):
return self._content
@property
def payload(self):
payload = self._build_content_payload(self._content)
if self._attachments:
content_payload = payload
payload = MIMEMultipart("mixed")
payload.attach(content_payload)
payload.preamble = self._PREAMBLE_TEXT
payload = self._set_payload_headers(payload)
for attachment in self._attachments:
payload.attach(attachment.payload)
return payload
def _build_content_payload(self, content):
return MIMEText(content.encode(self._charset), "plain", self._charset)
def _set_payload_headers(self, payload):
for copy_type, recipients in self._recipients.items():
for recipient in recipients:
payload[copy_type] = self._make_header(recipient)
payload["From"] = self._make_header(self._sender)
payload["Subject"] = self._make_header(self._subject)
payload["Date"] = strftime("%a, %d %b %Y %H:%M:%S %z", gmtime())
return payload
def _make_header(self, value):
return make_header([(self._to_string(value), self._charset)])
def _to_string(self, value):
if PY3:
return value
else:
return value.encode(self._charset)
def attach(self, file, charset=None, mimetype=None):
if charset is None:
charset = self._charset
attachment = Attachment(file, charset, mimetype)
self._attachments.append(attachment)
return attachment
if PY3:
def __str__(self):
return self.payload.as_string()
else:
def __bytes__(self):
return self.payload.as_string()
def __repr__(self):
return to_string("<PlainMessage: %s>" % self.subject)
class HtmlMessage(PlainMessage):
"""Simple wrapper for data of e-mail message with HTML content."""
def _build_content_payload(self, content):
content = content.encode(self._charset)
payload = MIMEMultipart("alternative", charset=self._charset)
text_alternative = MIMEText(strip_tags(content), "plain", self._charset)
payload.attach(text_alternative)
html_alternative = MIMEText(content, "html", self._charset)
payload.attach(html_alternative)
return payload
| 31.364964 | 80 | 0.666279 | 3,831 | 0.891552 | 0 | 0 | 3,326 | 0.774028 | 0 | 0 | 352 | 0.081918 |
9fc5a772eac2a63c47ac1bf4d12da1436b080955 | 2,709 | py | Python | network/LeNet.py | cersar/BasicNetwork | 119ebb745e67a9b74b72cc4635fea360db0ed43f | [
"MIT"
] | 4 | 2019-01-02T07:54:51.000Z | 2019-01-04T06:11:15.000Z | network/LeNet.py | cersar/BasicNetwork | 119ebb745e67a9b74b72cc4635fea360db0ed43f | [
"MIT"
] | null | null | null | network/LeNet.py | cersar/BasicNetwork | 119ebb745e67a9b74b72cc4635fea360db0ed43f | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
from model.train import fit
from keras.datasets import mnist
def LeNet(input_shape):
iw,ih,c = input_shape
net = tf.Graph()
with net.as_default():
x = tf.placeholder(tf.float32,shape=(None,iw,ih,c),name='x')
y = tf.placeholder(tf.int32,name='y')
conv1_W=tf.get_variable("conv1_W",shape=[5,5,1,6],initializer=tf.contrib.layers.xavier_initializer())
conv1=tf.nn.conv2d(x,conv1_W,[1,1,1,1],padding='SAME')
conv1_act = tf.nn.tanh(conv1)
pool1 = tf.nn.avg_pool(conv1_act,[1,2,2,1],[1,2,2,1],padding='VALID')
conv2_W = tf.get_variable("conv2_W", shape=[5, 5, 6, 16], initializer=tf.contrib.layers.xavier_initializer())
conv2 = tf.nn.conv2d(pool1, conv2_W, [1, 1, 1, 1], padding='VALID')
conv2_act = tf.nn.tanh(conv2)
pool2 = tf.nn.avg_pool(conv2_act, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID')
flatten = tf.reshape(pool2,(-1,pool2.shape[1]*pool2.shape[2]*pool2.shape[3]))
dense1_W = tf.get_variable("dense1_W",shape=[flatten.shape[1],120],initializer=tf.contrib.layers.xavier_initializer())
dense1_b = tf.get_variable("dense1_b", shape=[1,120],initializer=tf.initializers.zeros())
dense1 = tf.matmul(flatten,dense1_W)+dense1_b
dense1_act = tf.nn.tanh(dense1)
dense2_W = tf.get_variable("dense2_W", shape=[120,84],
initializer=tf.contrib.layers.xavier_initializer())
dense2_b = tf.get_variable("dense2_b", shape=[1, 84], initializer=tf.initializers.zeros())
dense2 = tf.matmul(dense1_act,dense2_W ) + dense2_b
dense2_act = tf.nn.tanh(dense2)
dense3_W = tf.get_variable("dense3_W", shape=[84, 10],
initializer=tf.contrib.layers.xavier_initializer())
dense3_b = tf.get_variable("dense3_b", shape=[1, 10], initializer=tf.initializers.zeros())
logit = tf.matmul(dense2_act,dense3_W) + dense3_b
y_hat = tf.nn.softmax(logit,name='y_hat')
loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.one_hot(y,depth=10),logits=logit)
loss = tf.reduce_mean(loss)
net.add_to_collection('input', {'x':x,'y':y})
net.add_to_collection('loss', {'loss':loss})
net.add_to_collection('output', {'y_hat':y_hat})
return net
if __name__ == '__main__':
(x_train, y_train), (x_test, y_test) = mnist.load_data('../dataset/mnist.npz')
x_train = x_train[:, :, :, np.newaxis] / 255.
x_test = x_test[:, :, :, np.newaxis] / 255.
net = LeNet(input_shape=(28, 28, 1))
fit(net, x_train, y_train, 64, 10,x_test,y_test,save_model_dir='../model_saved/LeNet')
| 46.706897 | 126 | 0.638612 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 212 | 0.078258 |
9fc6625edca5f3680489dcc397b225e54927655e | 29 | py | Python | _filament/__init__.py | comstud/filament | be6dbd6bf76dbcb0655c7fae239333d64ee8bb5f | [
"MIT"
] | 2 | 2017-03-08T20:29:52.000Z | 2019-05-15T20:15:42.000Z | _filament/__init__.py | comstud/filament | be6dbd6bf76dbcb0655c7fae239333d64ee8bb5f | [
"MIT"
] | null | null | null | _filament/__init__.py | comstud/filament | be6dbd6bf76dbcb0655c7fae239333d64ee8bb5f | [
"MIT"
] | null | null | null | from _filament.core import *
| 14.5 | 28 | 0.793103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9fcb149ac5dfe464c79d244e6065b0b4f62a43f1 | 20,865 | py | Python | modules/simulation/simulation.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | modules/simulation/simulation.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | modules/simulation/simulation.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | """
Module to execute the simulation for a given instance.
"""
""" import packages """
import logging
from importlib import import_module
import numpy.random as rdm
import copy
import numpy as np
""" import project configurations """
import configurations.settings_simulation as config
""" import project libraries """
import modules.data.datamgm as dtm
from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log
# Global logger
logger = dtm.initialise_logger(__name__)
"""
GLOBAL VARIABLES
----------------
- These variables must be resetted after every simulation run
"""
#: now Simulation Clock
now = -1
#: last_now Last event
last_now = 0
#:event_queue Event queue
event_queue = []
#:trams List of running trams
trams = []
#:stops List of stops
stops = []
#:cargo List of cargo
cargo = []
#:updates List of updates
updates = set()
#:numEvents Number of total events
numEvents = 0
def reset_variables():
"""
Function to reset all global variables
"""
global now, last_now, numEvents, trams, stops, event_queue, cargo, updates
now = -1
last_now = 0
numEvents = 0
if trams:
trams[0].reset()
trams.clear()
for stop in stops:
stop.reset()
stops.clear()
event_queue.clear()
Passengers.reset()
if cargo:
cargo[0].reset()
cargo.clear()
updates.clear()
"""
SIMULATION LOGGING
------------------
- Simluation log (Text File): Includes all information about the events in the simulation
- Entities Log (csv file): Includes the relevant data information of single entities
"""
# "Simulation Log": What does in a single simulation run happen? (Descriptive)
sim_log = logging.getLogger("simulation")
# "Entities Log": How do the variables change during one simulation run?
ent_log = logging.getLogger("entities")
"""
SIMULATION METHODS
------------------
"""
def run(instance, passengerData, seed=False, index_child_seed=False):
"""
Run the simulation
:param instance: Path to the instance file
:param passengerData: Path to the passenger data file
:param seed: Seed to replicate the simulation
:param index_child_see: Index of the child of the global seedsequence
"""
# Used global variables
global inst, now, last_now, event_queue, numEvents
""" Initialise random generator """
# Check seed for random generator
if seed:
# seed sequence
entropy = seed.entropy
else:
seed = rdm.SeedSequence()
entropy = seed.entropy
# Import instance (from .py-file)
inst = dtm.import_instance(instance)
# Initialize the simulation
passenger = initialize(seed, passengerData)
# Run the simulation
running = True
while running:
# sort the upcoming events according to the time they occur
event_queue = sorted(event_queue,key = lambda i: i['time'])
if event_queue:
if event_queue[0]['time'] != now:
if now >= 0:
status(now)
for entity in updates:
if entity == "passenger":
entity = passenger
entity.last_event = now
write_entities_log(entity,now)
updates.clear()
last_now = now
now = event_queue[0]['time']
sim_log.info("\n-----------------------------------------------------------------------------------")
sim_log.info(f"Events at {now}:")
sim_log.info("***")
next_event()
numEvents+= 1
event_queue.pop(0)
# No more events
else:
last_time_period(inst.numPeriods-1,passenger)
running = False
# Save values for replicability
sim_log.info(f"\nentropy:\n{entropy}\n")
sim_log.info(f"index_child_seed:\n{entropy}\n")
# Reset after simulation run
reset_variables()
# Initialisation
def initialize(seed, passengerData):
"""
This function initialises the simulation run, i.e., creates the needed variables and adds the first events to the event log.
:param seed: Seed for replicability
:type seed: int
:param passengerData: Path to passenger data file
:type passengerData: string or path
:return: Global passenger object to track number of passengers
:rtype: Passengers object
"""
global event_queue
sim_log.info("Initialisation...\n--------------------------------------")
# Create child seedsequence per entity
seeds = seed.spawn(10)
# Entities Log
init_entities_log()
# initialize stops
for s in range(inst.numStops):
#sim_log.info("Creating Stop {}.".format(s))
distance_to = {"Stop": inst.stops_distance[s],"Customer": [0]}
distance_from = {"Stop": [inst.stops_distance[j][s] for j in range(inst.numStops)], "Customer": [0]}
if s == 0:
stops.append(Stop(distance_to,distance_from,True))
else:
stops.append(Stop(distance_to,distance_from))
pas = dtm.import_instance(passengerData)
""" Initialize passengers """
passenger_seeds = seeds[0].spawn(6)
if config.random_passenger_arrival:
arriving = pas.arriving_intensity
config.random_passenger_arrival = passenger_seeds[0]
else:
arriving = pas.passenger_arriving
# instantiate passenger arrivals
nonzero = np.nonzero(arriving)
for i in range(len(nonzero[0])):
p = nonzero[0][i]
s = nonzero[1][i]
create_event(p, 6, [s])
if config.random_passenger_boarding:
config.random_passenger_boarding = passenger_seeds[1]
if config.random_passenger_alighting:
config.random_passenger_boarding = passenger_seeds[2]
if config.random_passenger_changing:
config.random_passenger_changing = passenger_seeds[3]
if config.random_boarding_time:
config.random_boarding_time = passenger_seeds[4]
if config.random_alighting_time:
config.random_alighting_time = passenger_seeds[5]
""" Global passenger variables """
passenger = Passengers(
# passenger arrival
random_arrival = config.random_passenger_arrival,
arriving_passengers = arriving,
arriving_passengers_cum = pas.passenger_arriving_acc,
# passenger boarding
random_boarding = config.random_passenger_boarding,
boarding_rate = [1 for tram in range(inst.numTrams)],
# passenger alighting
random_alighting = config.random_passenger_alighting,
alighting_rate = pas.passenger_allighting_rate,
# passenger changing
random_changing = config.random_passenger_changing,
changing_rate = [0 for tram in range(inst.numStops)],
# time
random_boarding_time = config.random_boarding_time,
random_alighting_time = config.random_alighting_time,
service_time = inst.passenger_service_time_board,
service_time_alight = inst.passenger_service_time_alight,
)
# Initialize the starting times of each tram
tram_seeds = seeds[1].spawn(inst.numTrams)
for t in range(inst.numTrams):
sim_log.info(f"Tram {t} will start at {inst.tram_time_arrival[t][0]}.")
Tram.numTotal += 1
create_event(inst.tram_time_arrival[t][0],1,[t,tram_seeds[t]])
# Initialize the cargo release
cargo_seeds = seeds[2].spawn(inst.numCargo)
for c in range(inst.numCargo):
sim_log.info(f"Cargo request {c} will start at {inst.cargo_release[c]}.")
create_event(inst.cargo_release[c],5,[c,cargo_seeds[c]])
# sort the event queue according to the time
event_queue = sorted(event_queue,key = lambda i: i['time'])
sim_log.info("\n-----------------------------------------------------------------------------------\n")
return passenger
def last_time_period(time,passenger):
"""
Write the log for the last period of the simulation
:param time: last period
:type time: float
:param passenger: passenger object
:type passenger: Passengers object
"""
status(time)
for t in trams:
write_entities_log(t,time)
for s in stops:
write_entities_log(s,time)
write_entities_log(passenger,time)
for c in cargo:
c.estimate_delay(time)
write_entities_log(c,time)
def status(time):
"""
Add the status of all entities to the simulation log
:param time: Time of update
:type time: float
"""
global updates
sim_log.info("\n*~* Status *~*")
for t in trams:
t.info()
if len(t.sequences) < t.stopped:
t.sequences.append( {"time": time, "cargo": t.cargosize, "passengers": t.passengers, "delay": t.delay} )
for t in stops:
t.info()
if len(t.sequences) < t.stopped:
t.sequences.append( {"time": time, "cargo": t.cargosize, "passengers": t.passengers} )
CargoRequest.info()
Passengers.info()
"""
METHODS FOR HANDLING EVENTS
---------------------------
"""
def create_event(t,event_id,par):
"""
Creating a new event given an event id and a list of parameters (if the event is within the time horizon)
:param t: time
:type t: float
:param event_id: event id
:type event_id: int
:param par: event parameters
:type par: list
"""
if np.ceil(t) < inst.numPeriods:
event_queue.append({"time": t, "id":event_id,"par":par})
def next_event():
"""
Execute the next event in the event queue
"""
# Choose the next event
event = event_queue[0]
# Extract event id and parameters
event_id = event["id"]
par = event["par"]
# Event-id: 1
# Description: Starting a new tram
if event_id == 1:
starting_tram(par[0],seed=par[1])
# Event-id: 2
# Description: Tram reaches stop (but does not enter yet)
if event_id == 2:
tram_reaches_stop(par[0])
# Event-id: 3
# Description: Tram enters stop
if event_id == 3:
tram_entering_stop(par[0])
# Event-id: 4
# Description: Tram leaves stop (and next tram can enter this stop)
if event_id == 4:
tram_leaves_stop(par[0])
# Event-id: 5
# Description: Cargo is released
if event_id == 5:
starting_cargo(par[0], seed=par[1])
# Event-id 6:
# Description: Update passengers
if event_id == 6:
passenger_update(par[0])
"""
EVENT METHODS
-----------------------------------
"""
def starting_tram(index,seed):
"""
Event no. 1: Starting a tram
:param index: Index of the tram
:type index: int
:param seed: Seed for replicability
:type seed: int
"""
global now, updates
tram_id = len(trams)
if config.random_travel_time:
config.random_travel_time = seed
# debugging
#logger.debug(f"tram_travel_deviation: {config.tram_travel_deviation}")
# if passengers and cargo share vehicles
if inst.scheme == "SV":
trams.append(Tram(
tour = inst.tram_tour[index],
capacity_passenger = inst.tram_capacity-inst.tram_capacity_min_cargo,
capacity_cargo = inst.tram_capacity-inst.tram_capacity_min_passenger,
capacity_total = inst.tram_capacity,
schedule_arrival = inst.tram_time_arrival[index],
schedule_departure = inst.tram_time_departure[index],
speed = inst.tram_speed,
# Simulation deterministic by default
random_travel_time = config.random_travel_time,
travel_deviation = config.tram_travel_deviation,
max_service = inst.tram_max_service
)
)
# if passengers and cargo have dedicated vehicles
elif inst.scheme == "SI":
if index in inst.cargo_tram_assignment:
# cargo tram
trams.append(Tram(
tour = inst.tram_tour[index],
capacity_passenger = 0,
capacity_cargo = inst.tram_capacity_cargo,
capacity_total = inst.tram_capacity,
schedule_arrival = inst.tram_time_arrival[index],
schedule_departure = inst.tram_time_departure[index],
speed = inst.tram_speed,
# Simulation deterministic by default
random_travel_time = config.random_travel_time,
travel_deviation = config.tram_travel_deviation,
max_service = inst.tram_max_service
)
)
else:
# passenger tram
trams.append(Tram(
tour = inst.tram_tour[index],
capacity_passenger = inst.tram_capacity,
capacity_cargo = 0,
capacity_total = inst.tram_capacity,
schedule_arrival = inst.tram_time_arrival[index],
schedule_departure = inst.tram_time_departure[index],
speed = inst.tram_speed,
# Simulation deterministic by default
random_travel_time = config.random_travel_time,
travel_deviation = config.tram_travel_deviation,
max_service = inst.tram_max_service
)
)
tram = trams[-1]
if tram.is_operating:
tram_reaches_stop(tram_id)
else:
updates.add(tram)
def tram_reaches_stop(tram_id):
"""
Event no. 2: Tram reaches stop. It either queues up or enters the stop.
:param tram_id: tram id
:type tram_id: int
"""
global now
tram = trams[tram_id]
tram.reach_next_location(now)
stop = stops[tram.tour[tram.position]]
if stop.check_queue(tram):
tram_entering_stop(tram_id)
else:
updates.add(tram)
def tram_entering_stop(tram_id):
"""
Event no. 3: Tram enters the platform of the stop.
:param tram_id: tram id
:type tram_id: int
"""
global now, updates
tram = trams[tram_id]
stop=stops[tram.tour[tram.position]]
tram.enter_next_stop(stop,now)
boarding_time = 0
alighting_time = 0
# Update passengers
if tram.passenger_transport:
boarding_time, alighting_time = passenger_update(stop.index,True,True)
# Compute leaving time with passengers only
leaving_time = tram.compute_leaving_time(now,boarding_time,alighting_time)
new_leaving_time = False
if tram.cargo_transport:
# unloading
tram_cargoload = copy.copy(tram.cargoload)
for c in tram_cargoload:
request = cargo[c]
if request.end_stop == stop.index:
unloading_time = request.unload(tram,stop,now)
new_leaving_time = tram.compute_leaving_time(now,unloading_time=unloading_time)
updates.add(request)
tram_cargoload.clear()
# loading
stop_cargoload = copy.copy(stop.cargoload)
for c in stop_cargoload:
request = cargo[c]
if request.assigned_vehicle == tram.index:
loading_time = request.load(tram,stop)
new_leaving_time = tram.compute_leaving_time(now,loading_time=loading_time)
updates.add(request)
stop_cargoload.clear()
updates.add(tram)
create_event(tram.leaving_time, 4, [tram_id])
return updates
def tram_leaves_stop(tram_id):
"""
Event no. 4: Tram leaves the stop.
:param tram_id: tram id
:type tram_id: int
"""
global now
tram = trams[tram_id]
stop = stops[tram.tour[tram.position]]
if tram.leaving_time == now:
travel_time = tram.leave_location(stop,now)
updates.add(tram)
updates.add(stop)
if tram.is_operating:
create_event(now + travel_time, 2, [tram_id])
next_tram = stop.next_tram_in_queue(tram)
if next_tram >= 0:
create_event(now + inst.min_time_next_tram , 3, [next_tram])
def starting_cargo(index,seed):
"""
Event no. 5: New cargo request arrives
:param index: cargo index
:type index: int
:param seed: seed for randomisation
:type seed: int
"""
global now, updates, trams
# Generate new cargo request
cargo.append(CargoRequest(
release = inst.cargo_release[index],
deadline = inst.cargo_station_deadline[index],
end_stop = inst.cargo_station_destination[index],
assigned_vehicle = inst.cargo_tram_assignment[index],
stop = stops[0],
service_time = inst.cargo_service_time_load,
service_time_unload = inst.cargo_service_time_unload,
size = inst.cargo_size,
random_service_time = seed,
)
)
request = cargo[-1]
# Check if tram is currently at platform
stop = stops[request.start_stop]
# Update the log of stop and request
updates.add(stop)
updates.add(request)
# If the assigned vehicle is currently at the depot
if stop.current_tram == request.assigned_vehicle:
# load tram
tram = trams[request.assigned_vehicle]
# update the current loading and leaving time of the tram
loading_time = request.load(tram, stop)
leaving_time = tram.compute_leaving_time(now,loading_time = loading_time)
# update the log of the tram
updates.add(tram)
# Did the leaving time change?
if leaving_time:
# -> Create a new event for leaving the stop
create_event(leaving_time, 4, [tram.index])
def passenger_update(stop_id,recent_tram_arrival = False, consider_tram=False):
"""
Event no. 6: New passengers arrive and/or alight and board a vehicle
:param stop_id: Index of the stop
:type stop_id: int
:param recent_tram_arrival: New arrival of tram (True) or update while tram is waiting (False)?, defaults to False
:type recent_tram_arrival: bool, optional
:param consider_tram: Consider boarding and alighting process (True) or only arrival (False), defaults to False
:type consider_tram: bool, optional
:return: boarding and alighting time
:rtype: tuple
"""
global now, updates
stop = stops[stop_id]
if consider_tram:
tram_id = stop.current_tram
else:
tram_id = -1
# Update arriving passengers
Passengers.arrival(now,stop)
boarding_time = 0
alighting_time = 0
# if currently a tram waits at the platform
if tram_id >= 0:
tram = trams[tram_id]
if recent_tram_arrival or tram.leaving_time != now:
if recent_tram_arrival:
# compute number and time for alighting passengers
alighting_passengers, alighting_time = Passengers.alighting(stop,tram,now)
# compute number and time for boarding passengers
boarding_passengers, boarding_time = Passengers.boarding(stop,tram,now)
if recent_tram_arrival:
# compute number and time for changing passengers
changing_passengers = Passengers.changing(stop,alighting_passengers,now)
# Update leaving time
if not recent_tram_arrival:
leaving_time = tram.compute_leaving_time(now,boarding_time,alighting_time, 0, 0)
updates.add(tram)
#write_entities_log(tram,now)
# Did the leaving time change?
if leaving_time:
create_event(leaving_time, 4, [tram_id])
#next_arrival = Passengers.compute_next_arrival_time(now,stop,tram)
#if next_arrival:
# create new event (for passengers that may arrive before the current tram leaves)
#create_event(next_arrival, 6, [stop_id])
updates.add(stop)
updates.add("passenger")
return boarding_time, alighting_time
| 29.100418 | 134 | 0.600096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,682 | 0.320249 |
9fcb7c9ad45cf8f1c22dd43dd8aff7340d0d4f39 | 1,957 | py | Python | looking_for_group/discord/views.py | andrlik/looking-for-group | 0b1cecb37ef0f6d75692fd188130e2c60d09b7d2 | [
"BSD-3-Clause"
] | null | null | null | looking_for_group/discord/views.py | andrlik/looking-for-group | 0b1cecb37ef0f6d75692fd188130e2c60d09b7d2 | [
"BSD-3-Clause"
] | null | null | null | looking_for_group/discord/views.py | andrlik/looking-for-group | 0b1cecb37ef0f6d75692fd188130e2c60d09b7d2 | [
"BSD-3-Clause"
] | null | null | null | import requests
from allauth.socialaccount.providers.discord.views import DiscordOAuth2Adapter
from allauth.socialaccount.providers.oauth2.views import OAuth2CallbackView, OAuth2LoginView
from .permissions import Permissions
from .provider import DiscordProviderWithGuilds
# Create your views here.
class DiscordGuildOAuth2Adapater(DiscordOAuth2Adapter):
'''
Override adapter for local provider.
'''
provider_id = DiscordProviderWithGuilds.id
guilds_url = 'https://discordapp.com/api/users/@me/guilds'
get_guild_url = 'https://discordapp.com/api/guilds'
def get_guilds_with_permissions(self, app, token, test_response=None, **kwargs):
'''
Fetches the current user's guild listings.
:returns: A python representation of the JSON list of discord guilds.
'''
headers = {
'Authorization': 'Bearer {0}'.format(token.token),
'Content-Type': 'application/json',
}
if test_response:
guild_data = test_response.json()
else:
guild_data = requests.get(self.guilds_url, headers=headers).json()
for guild in guild_data:
guild['comm_role'] = self.parse_permissions(guild)
return guild_data
def parse_permissions(self, guild_dict):
'''
For a given permissions listing in a discord guild, evaluate whether
the user is an admin or moderator and return the role.
'''
if guild_dict['owner']:
return 'admin'
permission_inspector = Permissions(guild_dict['permissions'])
if permission_inspector.administrator:
return 'admin'
if permission_inspector.manage_messages or permission_inspector.manage_server:
return 'moderator'
return 'member'
oauth2_login = OAuth2LoginView.adapter_view(DiscordGuildOAuth2Adapater)
oauth2_callback = OAuth2CallbackView.adapter_view(DiscordGuildOAuth2Adapater)
| 36.240741 | 92 | 0.695452 | 1,501 | 0.76699 | 0 | 0 | 0 | 0 | 0 | 0 | 580 | 0.296372 |
9fccc2d887be2764a1520f735d223ce37ff4b9e3 | 10,174 | py | Python | src/page/rebasetrackingreview.py | darobin/critic | 9d09f3ae45d0b37fb899c5323409c06e8622a2a1 | [
"Apache-2.0",
"MIT"
] | 1 | 2020-12-04T18:43:10.000Z | 2020-12-04T18:43:10.000Z | src/page/rebasetrackingreview.py | darobin/critic | 9d09f3ae45d0b37fb899c5323409c06e8622a2a1 | [
"Apache-2.0",
"MIT"
] | null | null | null | src/page/rebasetrackingreview.py | darobin/critic | 9d09f3ae45d0b37fb899c5323409c06e8622a2a1 | [
"Apache-2.0",
"MIT"
] | null | null | null | # -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2012 Jens Lindström, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import page
import htmlutils
import gitutils
import request
from page.parameters import Optional, ReviewId
class RebaseTrackingReview(page.Page):
def __init__(self):
super(RebaseTrackingReview, self).__init__("rebasetrackingreview",
{ "review": ReviewId,
"newbranch": Optional(str),
"upstream": Optional(str),
"newhead": Optional(str),
"newupstream": Optional(str) },
RebaseTrackingReview.Handler)
class Handler(page.Page.Handler):
def __init__(self, review, newbranch=None, upstream=None, newhead=None, newupstream=None):
super(RebaseTrackingReview.Handler, self).__init__(review)
self.newbranch = newbranch
self.upstream = upstream
self.newhead = newhead
self.newupstream = newupstream
def generateHeader(self):
self.document.addExternalStylesheet("resource/rebasetrackingreview.css")
self.document.addExternalScript("resource/autocomplete.js")
self.document.addExternalScript("resource/rebasetrackingreview.js")
def generateContent(self):
trackedbranch = self.review.getTrackedBranch(self.db)
if not trackedbranch:
raise request.DisplayMessage("Not supported!", "The review r/%d is not tracking a remote branch." % self.review.id)
self.document.addInternalScript(self.review.repository.getJS())
self.document.addInternalScript(self.review.getJS())
self.document.addInternalScript("var trackedbranch = { remote: %s, name: %s };"
% (htmlutils.jsify(trackedbranch.remote),
htmlutils.jsify(trackedbranch.name)))
table = page.utils.PaleYellowTable(self.body, "Rebase tracking review")
def renderRemote(target):
target.span("value", id="remote").text(trackedbranch.remote)
def renderCurrentBranch(target):
target.span("value", id="currentbranch").text("refs/heads/" + trackedbranch.name)
table.addItem("Remote", renderRemote)
table.addItem("Current branch", renderCurrentBranch)
table.addSeparator()
if self.newbranch is not None and self.upstream is not None and self.newhead is not None and self.newupstream is not None:
import log.html
import log.commitset
sha1s = self.review.repository.revlist(included=[self.newhead], excluded=[self.newupstream])
new_commits = log.commitset.CommitSet(gitutils.Commit.fromSHA1(self.db, self.review.repository, sha1) for sha1 in sha1s)
new_heads = new_commits.getHeads()
if len(new_heads) != 1:
raise page.utils.DisplayMessage("Invalid commit-set!", "Multiple heads. (This ought to be impossible...)")
new_upstreams = new_commits.getFilteredTails(self.review.repository)
if len(new_upstreams) != 1:
raise page.utils.DisplayMessage("Invalid commit-set!", "Multiple upstreams. (This ought to be impossible...)")
new_head = new_heads.pop()
new_upstream_sha1 = new_upstreams.pop()
old_commits = log.commitset.CommitSet(self.review.branch.commits)
old_upstreams = old_commits.getFilteredTails(self.review.repository)
if len(old_upstreams) != 1:
raise page.utils.DisplayMessage("Rebase not supported!", "The review has mulitple upstreams and can't be rebased.")
if len(old_upstreams) == 1 and new_upstream_sha1 in old_upstreams:
# This appears to be a history rewrite.
new_upstream = None
new_upstream_sha1 = None
rebase_type = "history"
else:
old_upstream = gitutils.Commit.fromSHA1(self.db, self.review.repository, old_upstreams.pop())
new_upstream = gitutils.Commit.fromSHA1(self.db, self.review.repository, new_upstream_sha1)
if old_upstream.isAncestorOf(new_upstream):
rebase_type = "move:ff"
else:
rebase_type = "move"
self.document.addInternalScript("var check = { rebase_type: %s, old_head_sha1: %s, new_head_sha1: %s, new_upstream_sha1: %s, new_trackedbranch: %s };"
% (htmlutils.jsify(rebase_type),
htmlutils.jsify(self.review.branch.head.sha1),
htmlutils.jsify(new_head.sha1),
htmlutils.jsify(new_upstream_sha1),
htmlutils.jsify(self.newbranch[len("refs/heads/"):])))
def renderNewBranch(target):
target.span("value", id="newbranch").text(self.newbranch)
target.text(" @ ")
target.span("value").text(new_head.sha1[:8] + " " + new_head.niceSummary())
def renderUpstream(target):
target.span("value", id="upstream").text(self.upstream)
target.text(" @ ")
target.span("value").text(new_upstream.sha1[:8] + " " + new_upstream.niceSummary())
table.addItem("New branch", renderNewBranch)
if new_upstream:
table.addItem("New upstream", renderUpstream)
table.addSeparator()
def renderMergeStatus(target):
target.a("status", id="status_merge").text("N/A")
def renderConflictsStatus(target):
target.a("status", id="status_conflicts").text("N/A")
def renderHistoryRewriteStatus(target):
target.a("status", id="status_historyrewrite").text("N/A")
table.addSection("Status")
if rebase_type == "history":
table.addItem("History rewrite", renderHistoryRewriteStatus)
else:
if rebase_type == "move:ff":
table.addItem("Merge", renderMergeStatus)
table.addItem("Conflicts", renderConflictsStatus)
def renderRebaseReview(target):
target.button(id="rebasereview", onclick="rebaseReview();", disabled="disabled").text("Rebase Review")
table.addSeparator()
table.addCentered(renderRebaseReview)
log.html.render(self.db, self.body, "Rebased commits", commits=list(new_commits))
else:
try:
from customization.branches import getRebasedBranchPattern
except ImportError:
def getRebasedBranchPattern(branch_name): return None
pattern = getRebasedBranchPattern(trackedbranch.name)
try:
from customization.branches import isRebasedBranchCandidate
except ImportError:
isRebasedBranchCandidate = None
if pattern or isRebasedBranchCandidate:
candidates = [name[len("refs/heads/"):]
for sha1, name in gitutils.Repository.lsremote(trackedbranch.remote, pattern=pattern, include_heads=True)
if name.startswith("refs/heads/")]
if isRebasedBranchCandidate is not None:
def isCandidate(name):
return isRebasedBranchCandidate(trackedbranch.name, name)
candidates = filter(isCandidate, candidates)
else:
candidates = []
if len(candidates) > 1:
def renderCandidates(target):
target.text("refs/heads/")
dropdown = target.select(id="newbranch")
for name in candidates:
dropdown.option(value=name).text(name)
table.addItem("New branch", renderCandidates,
buttons=[("Edit", "editNewBranch(this);")])
else:
if len(candidates) == 1:
default_value = candidates[0]
else:
default_value = trackedbranch.name
def renderEdit(target):
target.text("refs/heads/")
target.input(id="newbranch", value=default_value)
table.addItem("New branch", renderEdit)
def renderUpstreamInput(target):
target.input(id="upstream", value="refs/heads/master")
table.addItem("Upstream", renderUpstreamInput)
def renderFetchBranch(target):
target.button(onclick="fetchBranch();").text("Fetch Branch")
table.addSeparator()
table.addCentered(renderFetchBranch)
| 48.218009 | 166 | 0.553666 | 9,422 | 0.925995 | 0 | 0 | 0 | 0 | 0 | 0 | 1,941 | 0.190762 |
9fccdafbf659c38a1a762c6f3bc28239cbcc246f | 3,175 | py | Python | parser.py | bitounu/startupy | 490a48a5e83900d91c5a2a67bb7fd286112f49f4 | [
"Unlicense"
] | null | null | null | parser.py | bitounu/startupy | 490a48a5e83900d91c5a2a67bb7fd286112f49f4 | [
"Unlicense"
] | null | null | null | parser.py | bitounu/startupy | 490a48a5e83900d91c5a2a67bb7fd286112f49f4 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# zależności Pythona: BeatifulSoup
# instalacja z pakietu
# Debian /Ubuntu: apt-get install python-bs4
# albo
# easy_install beautifulsoup4
# lub
# pip install beautifulsoup4
# skrypt robi spis firm ze stron mambiznes.pl
# i wypluwa CSV:
# Kolumny:
# fid
# nazwa - Nazwa firmy
# url - url do strony w mambiznes.pl
# opis - skrócony opis
# full - link do lokalnego pliku z pełnym opisem
# ourl - url do oryginalnej strony firmy
import sys
import urllib2
import random
from time import sleep
from bs4 import BeautifulSoup
from bs4 import SoupStrainer
# identyfikator firmy
fid = 0
# ile stron ma indeks na mambiznes.pl (trzeba sprawdzać ręcznie)
# dziś (18.09.2017) jest 53
ILE_STRON = 53
# plik z indeksem firm
CSV_FILE = "startupy.csv"
# parametr do sleep() do oszukiwania firewalli
MNOZNIK = 10
# nagłówek każdego pliku z pełnym opisem firmy
html_header = """
<!DOCTYPE html>
<html lang="pl-PL">
<head>
<meta charset="UTF-8">
<link rel="stylesheet" href="mambiznes.css" type="text/css">
"""
html_footer = """
</body>
</html>
"""
# zawężam wyszukiwanie na stronach indeksów do diva "main"
only_main = SoupStrainer("main")
# zawężam wyszukiwanie na stronie firmy do diva z klasą
only_opis = SoupStrainer("div", class_="post-desc np")
# ćwiczę na lokalnym pliku
#plik = open('test.html', 'r').read()
#artin = (BeautifulSoup(plik, "html.parser", parse_only=only_main))
# wypluwam CSV
def skanuj(artin):
global fid
linia = ""
for x in artin.find_all("div", class_="article-bottom"):
fid += 1
sys.stdout.write('.')
sys.stdout.flush()
opis_file = str(fid) + ".html"
url = x.find('a', class_='dib title').get('href')
nazwa = x.find('a', class_='dib title').contents[0]
linia += \
'"' + \
str(fid) + \
'","' + \
nazwa + \
'","' + \
url + \
'","' + \
x.find('p', class_="excerpt").contents[0] + \
'","' + \
opis_file + \
'",""' + \
"\n"
# trzeba pobrać pełny opis firmy
# opóźnienie żeby zmylić ew. proxy
sleep(random.random() * MNOZNIK/1.3)
opis_url = urllib2.urlopen(url)
opis = (BeautifulSoup(opis_url, "html.parser", parse_only=only_opis))
plout = open(opis_file, 'w')
txtout = html_header + "<title>" + nazwa.encode('utf-8') + "</title>\n</head>\n\n<body>" + str(opis) + html_footer
plout.write(str(txtout))
plout.close()
return linia.encode('utf-8')
# pobieram dane z portalu
print "Pobieram strone:"
out = "fid,nazwa,url,opis,full,ourl\n"
for i in range(1, ILE_STRON+1):
sys.stdout.write(str(i))
sys.stdout.flush()
weburl = "https://mambiznes.pl/startupy/page/" + str(i)
data = urllib2.urlopen(weburl)
artin = (BeautifulSoup(data, "html.parser", parse_only=only_main))
out += skanuj(artin)
sys.stdout.write('done\n')
sys.stdout.flush()
#print out
# można do pliku, żeby mieć to w d.
fout = open(CSV_FILE, 'w')
fout.write(out)
fout.close()
| 28.097345 | 122 | 0.610079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,602 | 0.500156 |
9fce30754fe976da1842b3aa8008d94f1ad68697 | 63 | py | Python | utils/checks.py | WJxReloaded/pkbt2 | 3c6512ef3e5b5f7fe077c8a1adbe9d75c692b485 | [
"MIT"
] | 4 | 2017-09-19T12:51:40.000Z | 2018-02-16T01:02:16.000Z | utils/checks.py | tacopill/Pokebot | 1abf35c35897bdddb17d5f079a6d1432c4ba1431 | [
"MIT"
] | null | null | null | utils/checks.py | tacopill/Pokebot | 1abf35c35897bdddb17d5f079a6d1432c4ba1431 | [
"MIT"
] | 3 | 2017-10-17T22:29:09.000Z | 2018-09-03T03:47:27.000Z | def no_delete(cmd):
cmd._delete_ctx = False
return cmd
| 15.75 | 27 | 0.68254 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4c7de526802297e77a682fdac5f19a9acc13c428 | 275 | py | Python | contratospr/contracts/manager.py | jycordero/contratospr-api | 6778b02b42305aa7ce65c956a0d89029ddd857a4 | [
"Apache-2.0"
] | 15 | 2019-02-26T12:40:18.000Z | 2020-01-24T00:58:00.000Z | contratospr/contracts/manager.py | jycordero/contratospr-api | 6778b02b42305aa7ce65c956a0d89029ddd857a4 | [
"Apache-2.0"
] | 52 | 2019-02-13T03:54:34.000Z | 2020-01-20T16:39:56.000Z | contratospr/contracts/manager.py | jycordero/contratospr-api | 6778b02b42305aa7ce65c956a0d89029ddd857a4 | [
"Apache-2.0"
] | 6 | 2019-02-18T13:59:55.000Z | 2019-11-30T23:36:43.000Z | from django.db import models
from .queryset import ContractQuerySet
class BaseContractManager(models.Manager):
def get_queryset(self):
return super().get_queryset().defer("search_vector")
ContractManager = BaseContractManager.from_queryset(ContractQuerySet)
| 22.916667 | 69 | 0.792727 | 131 | 0.476364 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.054545 |
4c80ff310733bd5d3259086a59e004736e492ea2 | 1,520 | py | Python | tests/integration/test_integration_article.py | pwitab/visma | ffa6698738fcc1be9de727e7fe77cce30310f830 | [
"BSD-3-Clause"
] | 5 | 2018-08-10T19:12:48.000Z | 2021-07-08T12:43:24.000Z | tests/integration/test_integration_article.py | pwitab/visma | ffa6698738fcc1be9de727e7fe77cce30310f830 | [
"BSD-3-Clause"
] | 16 | 2018-06-17T18:51:05.000Z | 2021-01-10T10:44:36.000Z | tests/integration/test_integration_article.py | pwitab/visma | ffa6698738fcc1be9de727e7fe77cce30310f830 | [
"BSD-3-Clause"
] | 3 | 2019-03-05T15:01:13.000Z | 2021-06-15T14:35:37.000Z | import pytest
from visma.api import VismaClientException
from visma.models import Article, ArticleAccountCoding, Unit
class TestCRUDArticle:
@pytest.fixture()
def article(self):
article = Article.objects.all()[0]
yield article
@pytest.fixture()
def coding(self):
coding = ArticleAccountCoding.objects.all()[0]
yield coding
@pytest.fixture()
def unit(self):
unit = Unit.objects.all()[0]
yield unit
def test_list_articles(self):
articles = Article.objects.all()
assert len(articles) is not 0
def test_create_article(self, coding, unit):
# article = Article(number=100, name='test article', coding_id=coding.id, unit_id=unit.id)
# article.save()
# assert article.id is not None
# Since we cannot delete articles we don't want to keep on creating new ones.
pass
def test_read_article(self, article):
read_article = Article.objects.get(article.id)
assert read_article.id == article.id
def test_update_article(self, article):
article.net_price = 50
article.save()
updated_article = Article.objects.get(article.id)
assert updated_article.net_price == 50
updated_article.net_price = 10
updated_article.save()
def test_delete_article(self, article):
# Not allowed
# TODO: raise more explaining exception
with pytest.raises(VismaClientException):
article.delete()
| 25.762712 | 98 | 0.651316 | 1,398 | 0.919737 | 247 | 0.1625 | 313 | 0.205921 | 0 | 0 | 266 | 0.175 |
4c825229094a03967edb19104397280b43928ad1 | 6,549 | py | Python | zdata.py | streemline/zmap-tools | 064e66636e0d1bc7f47f57a0ab53904e6173497a | [
"BSD-3-Clause"
] | 2 | 2016-12-30T13:54:54.000Z | 2022-01-25T00:38:06.000Z | zdata.py | tejado/zmap-tools | 064e66636e0d1bc7f47f57a0ab53904e6173497a | [
"BSD-3-Clause"
] | 1 | 2022-01-19T16:16:09.000Z | 2022-01-19T16:16:09.000Z | zdata.py | streemline/zmap-tools | 064e66636e0d1bc7f47f57a0ab53904e6173497a | [
"BSD-3-Clause"
] | 1 | 2022-01-19T16:15:51.000Z | 2022-01-19T16:15:51.000Z | #!/usr/bin/env python3
import sys
import ujson as json
import json as json_orig
import traceback
import re
import argparse
import os.path
import operator
import requests
from threading import Thread
from queue import Queue
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
parser = argparse.ArgumentParser()
parser.add_argument('-f', "--file", help='Input JSON file', required=True)
parser.add_argument('-i', "--index-of", help='Show all with Index Of /', action="store_true")
parser.add_argument('-e', "--index-of-extended", help='Extract and show directory listing', action="store_true")
parser.add_argument('-r', "--recursive", help='Recursive directory listing', action="store_true")
parser.add_argument("-c", "--cn", help='Output TLS Cert Common Names', action="store_true")
parser.add_argument('-s', "--summary", help='Output summary', action="store_true")
parser.add_argument("--no-header", help='Suppress header', action="store_true")
args = parser.parse_args()
if not args.no_header:
print("==============================================")
print("| zdata v0.33c3 - A zmap JSON Output Utility |")
print("==============================================")
file = args.file
if not os.path.isfile(file):
exit('Error: Input file not found')
regex_indexof_links_all = re.compile(r'<a href="[^\?]', re.MULTILINE)
regex_indexof_links_path = re.compile(r'<a href="([^\?]+?)"', re.MULTILINE)
line_count = 0
line_count_with_data = 0
status_codes = {}
tls_count = 0
listing_indexof = {}
listing_cn = {}
listing_directory = {}
concurrent = 200
q = Queue(concurrent * 2)
def doWork():
while True:
data = q.get()
host = data[0]
url = data[1]
content = data[2]
requests_session = requests.Session()
requests_session.headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
listing_directory[host] = indexof_extended(url, content, requests_session)
q.task_done()
for i in range(concurrent):
t = Thread(target=doWork)
t.daemon = True
t.start()
def indexof_extended(url, content, sess, level=0):
global requests_session
level += 1
if level > 5:
return 'RECURSION_LIMIT'
folder = {}
folder_links = regex_indexof_links_path.findall(content)
try:
folder_links.remove('../')
except:
pass
try:
folder_links.remove('/')
except:
pass
for link in folder_links:
if link.endswith('/'):
if args.recursive:
subfolder_url = "{}{}".format(url, link)
try:
r = sess.get(subfolder_url, verify=False, timeout=2)
except:
folder[link] = 'DIRECTORY_SUBFOLDER_LOAD_ERROR'
continue
subfolder_content = r.text
if 'Index of' in subfolder_content:
folder[link] = indexof_extended(subfolder_url, subfolder_content, sess, level)
else:
folder[link] = 'DIRECTORY_WITHOUT_INDEX'
else:
folder[link] = 'DIRECTORY'
else:
folder[link] = 'FILE'
return folder
def process_entry(line):
global q, line_count, line_count_with_data, status_codes, tls_count, listing_indexof, listing_cn
line_count += 1
try:
result = json.loads(line)
except:
traceback.print_exc()
if 'data' in result:
line_count_with_data += 1
status_code = 0
try:
status_code = result['data']['http']['response']['status_code']
except KeyError as e:
pass
if status_code not in status_codes:
status_codes[status_code] = 1
else:
status_codes[status_code] += 1
cn = "n/a"
tls = False
url = "/"
try:
#host = result['data']['http']['response']['request']['host']
host = result['data']['http']['response']['request']['url']['host']
schema = result['data']['http']['response']['request']['url']['scheme']
url = "{}://{}/".format(schema, host)
if 'tls_handshake' in result['data']['http']['response']['request']:
tls_count += 1
tls = True
try:
cn = result['data']['http']['response']['request']['tls_handshake']['server_certificates']['certificate']['parsed']['subject']['common_name'][0].encode('latin-1')
except:
cn = result['data']['http']['response']['request']['tls_handshake']['server_certificates']['certificate']['parsed']['subject']['common_name'][0]
except KeyError as e:
pass
if tls and args.cn:
listing_cn[host] = cn
try:
content = result['data']['http']['response']['body']
if 'Index of /' in content:
match = regex_indexof_links_all.findall(content)
#print( "{} has index ({})".format(host, len(match)) )
listing_indexof[host] = len(match)
if args.index_of_extended:
#print('===================================================================')
#print(url)
#struct = indexof_extended(url, content)
#print( json_orig.dumps(struct, indent=4, sort_keys=True) )
q.put([host, url, content])
except KeyError as e:
pass
except:
traceback.print_exc()
with open(file) as f:
for line in f:
process_entry(line)
if args.cn:
for host in listing_cn:
print("{} -> {}".format(host, listing_cn[host]))
def print_folder_structure(structure, level=0):
level += 1
indent = ' ' * 4 * (level)
for key in structure:
value = structure[key]
if isinstance(value, dict):
print(indent + key)
print_folder_structure(value, level)
else:
print(indent + key)
if args.index_of:
sort = sorted(listing_indexof.items(), key=operator.itemgetter(1),reverse=True)
for entry in sort:
print("{} has index ({})".format(entry[0], entry[1]))
if args.index_of_extended and entry[0] in listing_directory:
#print( json_orig.dumps(listing_directory[entry[0]], indent=4, sort_keys=True) )
print_folder_structure(listing_directory[entry[0]])
if args.summary:
print("=====================================")
print("Line Count: %s" % line_count)
print("Line Count with data: %s" % line_count_with_data)
print("TLS count: %s" % tls_count)
print("=====================================")
for status_code in status_codes:
print("Status Code {: >3}: {: >8} responses".format(status_code, status_codes[status_code]))
| 31.185714 | 174 | 0.616735 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,884 | 0.287678 |
4c831cc2e73069abed966a40785b1807f8d8eb10 | 818 | py | Python | tests/test_debianpkg.py | trathborne/nvchecker | d8c26fa66640d46a0bc099cd9f070b7b8c408479 | [
"MIT"
] | 320 | 2015-01-11T06:58:09.000Z | 2022-03-31T10:26:27.000Z | tests/test_debianpkg.py | trathborne/nvchecker | d8c26fa66640d46a0bc099cd9f070b7b8c408479 | [
"MIT"
] | 142 | 2015-06-28T03:09:56.000Z | 2022-02-28T06:09:26.000Z | tests/test_debianpkg.py | trathborne/nvchecker | d8c26fa66640d46a0bc099cd9f070b7b8c408479 | [
"MIT"
] | 68 | 2015-04-15T05:09:45.000Z | 2022-02-23T05:52:47.000Z | # MIT licensed
# Copyright (c) 2020 lilydjwg <lilydjwg@gmail.com>, et al.
# Copyright (c) 2017 Felix Yan <felixonmars@archlinux.org>, et al.
from flaky import flaky
import pytest
pytestmark = [pytest.mark.asyncio, pytest.mark.needs_net]
@flaky(max_runs=10)
async def test_debianpkg(get_version):
assert await get_version("sigrok-firmware-fx2lafw", {
"source": "debianpkg",
}) == "0.1.7-1"
@flaky(max_runs=10)
async def test_debianpkg_strip_release(get_version):
assert await get_version("sigrok-firmware-fx2lafw", {
"source": "debianpkg",
"strip_release": 1,
}) == "0.1.7"
@flaky(max_runs=10)
async def test_debianpkg_suite(get_version):
assert await get_version("sigrok-firmware-fx2lafw", {
"source": "debianpkg",
"suite": "buster",
}) == "0.1.6-1"
| 29.214286 | 66 | 0.677262 | 0 | 0 | 0 | 0 | 574 | 0.701711 | 514 | 0.628362 | 325 | 0.397311 |
4c8343bd0395981669f44890366d05a0d442060e | 158 | py | Python | Post-Exploitation/LaZagne/Linux/lazagne/config/color.py | FOGSEC/TID3xploits | b57d8bae454081a3883a5684679e2a329e72d6e5 | [
"MIT"
] | 5 | 2018-01-15T13:58:40.000Z | 2022-02-17T02:38:58.000Z | Post-Exploitation/LaZagne/Linux/lazagne/config/color.py | bhattsameer/TID3xploits | b57d8bae454081a3883a5684679e2a329e72d6e5 | [
"MIT"
] | null | null | null | Post-Exploitation/LaZagne/Linux/lazagne/config/color.py | bhattsameer/TID3xploits | b57d8bae454081a3883a5684679e2a329e72d6e5 | [
"MIT"
] | 4 | 2019-06-21T07:51:11.000Z | 2020-11-04T05:20:09.000Z |
class bcolors():
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OK = '\033[92m'
WARNING = '\033[96m'
FAIL = '\033[91m'
TITLE = '\033[93m'
ENDC = '\033[0m'
| 14.363636 | 21 | 0.550633 | 154 | 0.974684 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 0.436709 |
4c852eb9b907c34839876a0167f2d8033a969748 | 6,093 | py | Python | src/commands/trajectories.py | SpookyWoogin/robot2018 | a8ddf6a64b883904b15031e0ae13b2056faed4f5 | [
"MIT"
] | 1 | 2018-10-24T21:43:00.000Z | 2018-10-24T21:43:00.000Z | src/commands/trajectories.py | SpookyWoogin/robot2018 | a8ddf6a64b883904b15031e0ae13b2056faed4f5 | [
"MIT"
] | 1 | 2018-03-10T01:25:47.000Z | 2018-03-10T03:33:36.000Z | src/commands/trajectories.py | SpookyWoogin/robot2018 | a8ddf6a64b883904b15031e0ae13b2056faed4f5 | [
"MIT"
] | 6 | 2018-01-13T17:54:31.000Z | 2018-02-13T23:46:50.000Z | import csv
import math
from wpilib import Timer
from wpilib.command import Command
from commands.statespace import StateSpaceDriveController
from data_logger import DataLogger
from pidcontroller import PIDController
from drivecontroller import DriveController
def read_trajectories(fnom):
from os.path import dirname, join
trajectory_points = []
with open(join(dirname(__file__), "..", "trajectories", fnom)) as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
if i == 0:
# header
assert row == ['dt', 'xl', 'xr', 'vl', 'vr','al', 'ar', 'heading']
else:
trajectory_points.append(tuple(float(x) for x in row))
return trajectory_points
class _CsvTrajectoryCommand(Command):
def __init__(self, fnom, name=None):
super().__init__(name)
self.drivetrain = self.getRobot().drivetrain
self.requires(self.drivetrain)
self.timer = Timer()
self.period = self.getRobot().getPeriod()
self.fnom = fnom
self.trajectory_points = read_trajectories(self.fnom)
#assert self.trajectory_points[0][0] == self.period
self.i = 0
self.target_v_l = 0
self.target_v_r = 0
self.target_a_l = 0
self.target_a_r = 0
self.target_heading = 0
def get_trajectory_point_m(self, i):
(_, xl_m, xr_m, vl_mps, vr_mps, al_mps2, ar_mps2, heading_rad) = self.trajectory_points[i]
return (_, xl_m, -xr_m, vl_mps, -vr_mps, al_mps2, -ar_mps2, heading_rad)
def get_trajectory_point_enc(self, i):
(dt_s, xl_m, xr_m, vl_mps, vr_mps, al_mps2, ar_mps2, heading_rad) = self.trajectory_points[i]
def m_to_enc(x):
return self.drivetrain.ratio * x / 0.3048
def mps_to_encp100ms(v):
return self.drivetrain.fps_to_encp100ms(v / 0.3048)
def mps2_to_encp100msps(a):
return self.drivetrain.fps2_to_encpsp100ms(a / 0.3048)
return (dt_s, m_to_enc(xl_m), m_to_enc(xr_m),
mps_to_encp100ms(vl_mps), mps_to_encp100ms(vr_mps),
mps2_to_encp100msps(al_mps2), mps2_to_encp100msps(ar_mps2),
heading_rad)
def isFinished(self):
return self.i >= len(self.trajectory_points)
class CsvTrajectoryCommand(_CsvTrajectoryCommand):
def __init__(self, fnom):
super().__init__(fnom)
self.ctrl_heading = PIDController(
Kp=0, Ki=0, Kd=0, Kf=0,
source=self.drivetrain.getAngle,
output=self.correct_heading,
period=self.period,
)
self.ctrl_heading.setInputRange(-180, 180)
self.ctrl_heading.setOutputRange(-0.5, 0.5)
self.ctrl_heading.setContinuous(True)
self.max_velocity_fps = 11
self.max_velocity_encps = self.drivetrain.fps_to_encp100ms(self.max_velocity_fps)
self.ctrl_l = DriveController(
Kp=0, Kd=0,
Ks=1.293985, Kv=0.014172, Ka=0.005938,
get_voltage=self.drivetrain.getVoltage,
source=self.drivetrain.getLeftEncoderVelocity,
output=self.drivetrain.setLeftMotor,
period=self.period,
)
self.ctrl_l.setInputRange(-self.max_velocity_encps, self.max_velocity_encps)
self.ctrl_r = DriveController(
Kp=0, Kd=0,
Ks=1.320812, Kv=0.013736, Ka=0.005938,
get_voltage=self.drivetrain.getVoltage,
source=self.drivetrain.getRightEncoderVelocity,
output=self.drivetrain.setRightMotor,
period=self.period,
)
self.ctrl_r.setInputRange(-self.max_velocity_encps, self.max_velocity_encps)
def initialize(self):
self.drivetrain.zeroEncoders()
self.drivetrain.zeroNavx()
self.ctrl_l.enable()
self.ctrl_r.enable()
self.ctrl_heading.enable()
self.logger = DataLogger("csv_trajectory1.csv")
self.drivetrain.init_logger(self.logger)
self.logger.add("profile_vel_r", lambda: self.target_v_r)
self.logger.add("profile_vel_l", lambda: self.target_v_l)
self.logger.add("pos_ft_l", lambda: self.pos_ft_l)
self.logger.add("i", lambda: self.i)
self.timer.start()
self.i = 0
#print ('pdf init')
def execute(self):
self.pos_ft_l = self.drivetrain.getLeftEncoder() / self.drivetrain.ratio
self.pos_ft_r = self.drivetrain.getRightEncoder() / self.drivetrain.ratio
(_, _, _, self.target_v_l, self.target_v_r, self.target_a_l,
self.target_a_r, self.target_heading) = self.get_trajectory_point_enc(self.i)
self.ctrl_l.setSetpoint(self.target_v_l)
self.ctrl_l.setAccelerationSetpoint(self.target_a_l)
self.ctrl_r.setSetpoint(self.target_v_r)
self.ctrl_r.setAccelerationSetpoint(self.target_a_r)
self.ctrl_heading.setSetpoint(self.target_heading)
self.drivetrain.feed()
self.logger.log()
self.i += 1
def end(self):
self.ctrl_l.disable()
self.ctrl_r.disable()
self.ctrl_heading.disable()
self.drivetrain.off()
self.logger.flush()
#print ('pdf end')
def correct_heading(self, correction):
pass
class StateSpaceDriveCommand(_CsvTrajectoryCommand, StateSpaceDriveController):
def __init__(self, fnom):
_CsvTrajectoryCommand.__init__(self, fnom)
StateSpaceDriveController.__init__(self, Command.getRobot().drivetrain)
self.u_min = -8
self.u_max = 8
def initialize(self):
self.drivetrain.zeroEncoders()
self.drivetrain.zeroNavx()
self.i = 0
self.logger = DataLogger("ss_trajectory.csv")
self.drivetrain.init_logger(self.logger)
def execute(self):
(dt_s, xl_m, xr_m, vl_mps, vr_mps, al_mps2, ar_mps2, heading_rad) = self.get_trajectory_point_m(self.i)
self.update(xl_m, xr_m, vl_mps, vr_mps)
self.logger.log()
self.i += 1
def end(self):
self.drivetrain.off()
self.logger.flush()
| 35.631579 | 111 | 0.643689 | 5,331 | 0.874938 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.038405 |
4c85c3a37cdee1127ce905401fbfe9eb13640820 | 2,608 | py | Python | yateto/codegen/test_framework.py | PhuNH/yateto | bfc7f1faa9b47a1a6a1655cf633c80174b10d0b8 | [
"BSD-3-Clause"
] | null | null | null | yateto/codegen/test_framework.py | PhuNH/yateto | bfc7f1faa9b47a1a6a1655cf633c80174b10d0b8 | [
"BSD-3-Clause"
] | null | null | null | yateto/codegen/test_framework.py | PhuNH/yateto | bfc7f1faa9b47a1a6a1655cf633c80174b10d0b8 | [
"BSD-3-Clause"
] | null | null | null | from abc import ABC, abstractmethod
class TestFramework(ABC):
@abstractmethod
def functionArgs(self, testName):
"""functionArgs.
:param testName: Name of test
"""
pass
@abstractmethod
def assertLessThan(self, x, y):
"""Should return code which checks x < y."""
pass
@abstractmethod
def generate(self, cpp, namespace, kernelsInclude, initInclude, body):
"""generate unit test file for cxxtest.
:param cpp: code.Cpp object
:param namespace: Namespace string
:param kernelsInclude: Kernels header file
:param initInclude: Init header File
:param body: Function which accepts cpp and self
"""
cpp.include(kernelsInclude)
cpp.include(initInclude)
cpp.include('yateto.h')
with cpp.PPIfndef('NDEBUG'):
with cpp.PPIfndef('YATETO_TESTING_NO_FLOP_COUNTER'):
cpp('long long libxsmm_num_total_flops = 0;')
cpp('long long pspamm_num_total_flops = 0;')
class CxxTest(TestFramework):
TEST_CLASS = 'KernelTestSuite'
TEST_NAMESPACE = 'unit_test'
TEST_PREFIX = 'test'
def functionArgs(self, testName):
return {'name': self.TEST_PREFIX + testName}
def assertLessThan(self, x, y):
return 'TS_ASSERT_LESS_THAN({}, {});'.format(x, y);
def generate(self, cpp, namespace, kernelsInclude, initInclude, body):
super().generate(cpp, namespace, kernelsInclude, initInclude, body)
cpp.includeSys('cxxtest/TestSuite.h')
with cpp.Namespace(namespace):
with cpp.Namespace(self.TEST_NAMESPACE):
cpp.classDeclaration(self.TEST_CLASS)
with cpp.Class('{}::{}::{} : public CxxTest::TestSuite'.format(namespace, self.TEST_NAMESPACE, self.TEST_CLASS)):
cpp.label('public')
body(cpp, self)
class Doctest(TestFramework):
TEST_CASE = 'yateto kernels'
def functionArgs(self, testName):
"""functionArgs.
:param testName: Name of test
"""
return {'name': 'SUBCASE', 'arguments': '"{}"'.format(testName), 'returnType': ''}
def assertLessThan(self, x, y):
return 'CHECK({} < {});'.format(x, y);
def generate(self, cpp, namespace, kernelsInclude, initInclude, body):
super().generate(cpp, namespace, kernelsInclude, initInclude, body)
cpp.include('doctest.h')
cpp('using namespace {};'.format(namespace))
with cpp.Function(name='TEST_CASE', arguments='"{}"'.format(self.TEST_CASE), returnType=''):
body(cpp, self)
| 34.773333 | 121 | 0.62615 | 2,566 | 0.983896 | 0 | 0 | 968 | 0.371166 | 0 | 0 | 860 | 0.329755 |
4c86d36d1ca7f5676ec707c02279a0b7c737bbd9 | 337 | py | Python | shop_thienhi/utils/format_time.py | Lesson-ThienHi/thienhi_shop | 1c595d70299e1fcce12c3610e27b66c89bbadda6 | [
"MIT"
] | null | null | null | shop_thienhi/utils/format_time.py | Lesson-ThienHi/thienhi_shop | 1c595d70299e1fcce12c3610e27b66c89bbadda6 | [
"MIT"
] | 2 | 2022-03-30T06:34:29.000Z | 2022-03-31T06:34:49.000Z | shop_thienhi/utils/format_time.py | Lesson-ThienHi/thienhi_shop | 1c595d70299e1fcce12c3610e27b66c89bbadda6 | [
"MIT"
] | null | null | null | from datetime import datetime
def format_time_filter():
start_time = datetime.now().utcnow().replace(hour=0, minute=0, second=0, microsecond=0).timestamp()
end_time = datetime.utcnow().replace(second=0, microsecond=0).timestamp()
data = {
"start_time": start_time,
"end_time": end_time
}
return data
| 30.636364 | 103 | 0.676558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.065282 |
4c8719fed243367528ac749c01c04b3271e74999 | 923 | py | Python | Algorithms/PCA/solutions.py | lcbendall/numerical_computing | 565cde92525ea44c55abe933c6419c1543f9800b | [
"CC-BY-3.0"
] | null | null | null | Algorithms/PCA/solutions.py | lcbendall/numerical_computing | 565cde92525ea44c55abe933c6419c1543f9800b | [
"CC-BY-3.0"
] | null | null | null | Algorithms/PCA/solutions.py | lcbendall/numerical_computing | 565cde92525ea44c55abe933c6419c1543f9800b | [
"CC-BY-3.0"
] | 1 | 2020-12-08T01:19:23.000Z | 2020-12-08T01:19:23.000Z | import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg as la
def PCA(dat, center=False, percentage=0.8):
M, N = dat.shape
if center:
mu = np.mean(dat,0)
dat -= mu
U, L, Vh = la.svd(dat, full_matrices=False)
V = Vh.T.conjugate()
SIGMA = np.diag(L)
X = U.dot(SIGMA)
Lam = L**2
normalized_eigenvalues = Lam/Lam.sum(dtype=float)
csum = [normalized_eigenvalues[:i+1].sum() for i in xrange(N)]
n_components = [x < percentage for x in csum].index(False) + 1
return (normalized_eigenvalues,
V[:,0:n_components],
SIGMA[0:n_components,0:n_components],
X[:,0:n_components])
def scree(normalized_eigenvalues):
fig = plt.figure()
plt.plot(normalized_eigenvalues,'b-', normalized_eigenvalues, 'bo')
plt.xlabel("Principal Components")
plt.ylabel("Percentage of Variance")
return fig
| 27.147059 | 71 | 0.630553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.058505 |
4c87539de9adc1fe44ae28fbd4feebd9d222ca61 | 25,502 | py | Python | core/agent.py | liruiw/HCG | a928ce7fb0df022cb2ceaeff32925f13de369519 | [
"MIT"
] | 3 | 2021-09-29T07:08:21.000Z | 2022-01-13T06:04:32.000Z | core/agent.py | liruiw/HCG | a928ce7fb0df022cb2ceaeff32925f13de369519 | [
"MIT"
] | 1 | 2021-07-11T04:27:55.000Z | 2021-07-11T05:37:01.000Z | core/agent.py | liruiw/HCG | a928ce7fb0df022cb2ceaeff32925f13de369519 | [
"MIT"
] | 1 | 2021-07-18T09:35:28.000Z | 2021-07-18T09:35:28.000Z | # --------------------------------------------------------
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import os
import torch
import torch.nn.functional as F
import numpy as np
from core import networks
from core.utils import *
from core.loss import *
import IPython
import time
class Agent(object):
"""
A general agent class
"""
def __init__(self, num_inputs, action_space, args, name):
for key, val in args.items():
setattr(self, key, val)
self.name = name
self.device = "cuda"
self.update_step = 1
self.init_step = 1
self.action_dim = action_space.shape[0]
self.has_critic = self.name != "BC"
self.action_space = action_space
self.num_inputs = num_inputs + self.num_input_extra
self.traj_feat = None
self.latent_sample = None
self.test_mode = False
self.use_debug_latent = False
self.gaddpg_pred = 0.
if has_check(self, 'traj_goal_mutual_conditioned') :
self.num_inputs += self.policy_traj_latent_size
self.policy, self.policy_optim, self.policy_scheduler, self.policy_target = get_policy_class('GaussianPolicy', self)
def unpack_batch(
self,
state,
point_state=None,
vis=False,
gt_goal=None,
val=False,
grasp_set=None,
vis_image=False,
repeat=False,
traj_latent=None,
separate=True
):
"""
Extract features from point cloud input
"""
if type(point_state) is list or type(point_state) is np.ndarray:
point_state = torch.cuda.FloatTensor(point_state )
if type(state) is list or type(state) is np.ndarray:
state = torch.cuda.FloatTensor(state)
state_feature, network_input = self.state_feature_extractor(
point_state,
feature_2=val,
traj_latent=traj_latent,
train=not self.test_mode)
if len(state_feature) != 2 or type(state_feature) is torch.Tensor: state_feature = [state_feature, None]
return state_feature
def gaddpg_step(self, state, remain_timestep, curr_joint ):
""" use GADDPG to forward pass """
state = select_target_point(state)
gaddpg_remain_step = max(min(remain_timestep + 1, 25), 1)
return self.gaddpg.select_action(state, remain_timestep=gaddpg_remain_step, curr_joint=curr_joint)
@torch.no_grad()
def batch_select_action(
self,
state,
actions=None,
goal_state=None,
vis=False,
remain_timestep=0,
repeat=False,
curr_joint=None,
gt_traj=None,
sample_num=None
):
"""
run policy forward pass in batch simulation
"""
self.set_mode(True)
traj = None
curr_joint_th = torch.cuda.FloatTensor(curr_joint)[:, :7]
img_state = torch.cuda.FloatTensor(state[0][1])
point_state = torch.cuda.FloatTensor(state[0][0])
timestep = remain_timestep
self.timestep = timestep
agent = self
feature, extra = agent.extract_feature( img_state,
point_state,
time_batch=timestep,
goal_batch=goal_state,
vis=vis,
value=False,
train=False,
repeat=repeat,
curr_joint=curr_joint_th )
actions = agent.policy.sample(feature)
action = actions[0].detach().cpu().numpy()
extra_pred = actions[1].detach().cpu().numpy()
action_sample = actions[2].detach().cpu().numpy()
aux_pred = actions[3].detach().cpu().numpy()
return action, traj, extra_pred, aux_pred
@torch.no_grad()
def select_action(
self,
state,
actions=None,
goal_state=None,
vis=False,
remain_timestep=0,
repeat=False,
curr_joint=None,
gt_traj=None,
sample_num=None
):
"""
policy output in test time
"""
self.set_mode(True)
multi_sample = has_check(self, 'multi_traj_sample') and gt_traj is None
if multi_sample and hasattr(self, 'critic') and self.train_traj_sampler and self.critic_mpc:
return self.critic_select_action(state, remain_timestep, curr_joint, vis=vis)
if self.name == 'DQN_HRL' and gt_traj is None and vis:
return self.critic_select_action(state, remain_timestep, curr_joint, vis=vis)
curr_joint_th = torch.Tensor([curr_joint.flatten()]).float().cuda()[:, :7]
img_state = torch.cuda.FloatTensor(state[0][1])[None]
point_state = torch.cuda.FloatTensor(state[0][0])[None]
timestep = torch.cuda.FloatTensor([remain_timestep])
self.timestep = timestep
if has_check(self, 'train_traj_sampler') and gt_traj is None and has_check(self, 'train_traj_feature'):
if multi_sample: # multiple traj samples
traj = self.select_traj(img_state,
point_state.repeat((self.test_traj_num, 1, 1)),
goal_state,
vis=vis,
remain_timestep=remain_timestep,
curr_joint=curr_joint_th.repeat((self.test_traj_num, 1)))
timestep = torch.Tensor([remain_timestep]).float().cuda()
opt_idx = 0
self.traj_feat = self.traj_feat[[opt_idx]]
else:
traj = self.select_traj(img_state, point_state, goal_state,
vis=vis, remain_timestep=remain_timestep,
curr_joint=curr_joint_th )
else:
traj = None
# policy
feature, extra = self.extract_feature( img_state,
point_state,
time_batch=timestep,
goal_batch=goal_state,
value=False,
train=False,
repeat=repeat,
curr_joint=curr_joint_th[:,:7] )
if self.name == 'DQN_HRL' and vis and hasattr(self, 'sampler_traj_feat'):
self.compute_critic_value( img_state, point_state, timestep, curr_joint_th, goal_state)
actions = self.policy.sample(feature)
action = actions[0].detach().cpu().numpy()[0]
extra_pred = actions[1].detach().cpu().numpy()[0]
action_sample = actions[2].detach().cpu().numpy()[0]
aux_pred = actions[3].detach().cpu().numpy()[0]
return action, traj, extra_pred, aux_pred
def update_parameters(self, batch_data, updates, k):
"""
To be inherited
"""
return {}
def compute_loss(self):
"""
compute loss for policy and trajectory embedding
"""
self.policy_grasp_aux_loss = goal_pred_loss(self.aux_pred[self.target_goal_reward_mask, :7], self.target_grasp_batch[self.target_goal_reward_mask, :7] )
self.bc_loss = traj_action_loss(self, self.pi, self.traj_expert_action_batch, self.target_expert_mask)
return sum([getattr(self, name) for name in self.loss_info if name.endswith('loss') and not name.startswith('critic')])
def prepare_data(self, batch_data):
"""
load batch data dictionary and compute extra data
"""
update_step = self.update_step - self.init_step
self.loss_info = list(get_loss_info_dict().keys())
for name in self.loss_info:
setattr(self, name, torch.zeros(1, device=torch.device('cuda')))
for k, v in batch_data.items():
setattr(self, k, torch.cuda.FloatTensor(v))
self.traj_time_batch = self.traj_idx_batch[:, 1, None]
self.cont_traj_inbatch_index = self.traj_idx_batch[:, 0].cuda().long()
self.traj_feat = None
self.reward_mask = (self.return_batch > 0).view(-1)
self.expert_mask = (self.expert_flag_batch >= 1).view(-1)
self.expert_reward_mask = self.reward_mask * (self.expert_flag_batch >= 1).squeeze()
self.perturb_flag_batch = self.perturb_flag_batch.bool()
self.traj_expert_reward_mask = self.expert_reward_mask[self.cont_traj_inbatch_index]
self.train_traj_idx_batch = self.cont_traj_inbatch_index
self.sparsify_sim_traj_time_batch = self.sparsify_sim_traj_idx_batch[:, 1, None]
self.sparsify_sim_cont_traj_inbatch_index = self.sparsify_sim_traj_idx_batch[:, 0].cuda().long()
self.sparsify_sim_traj_expert_reward_mask = self.expert_reward_mask[self.sparsify_sim_cont_traj_inbatch_index]
self.goal_reward_mask = torch.ones_like(self.time_batch).bool()
self.traj_goal_reward_mask = torch.ones_like(self.traj_integer_time_batch).bool()
self.target_grasp_batch = self.traj_goal_batch[:, :7] if self.full_traj_embedding else self.goal_batch[:, :7]
self.target_goal_reward_mask = self.goal_reward_mask[self.cont_traj_inbatch_index] if self.full_traj_embedding else self.goal_reward_mask
self.target_reward_mask = self.reward_mask[self.cont_traj_inbatch_index] if self.full_traj_embedding else self.reward_mask
self.target_return = self.return_batch[self.cont_traj_inbatch_index] if self.full_traj_embedding else self.return_batch
self.target_expert_mask = self.expert_mask[self.cont_traj_inbatch_index] if self.full_traj_embedding else self.expert_mask
self.target_gaddpg_batch = (self.gaddpg_batch * self.reward_mask)
self.target_expert_reward_mask = self.traj_expert_reward_mask if self.full_traj_embedding else self.expert_reward_mask
self.next_time_batch = self.time_batch - 1
self.next_traj_time_batch = self.traj_integer_time_batch - 1
self.target_reward_batch = self.traj_reward_batch if self.full_traj_embedding else self.reward_batch
self.target_mask_batch = self.traj_mask_batch if self.full_traj_embedding else self.mask_batch
def log_stat(self):
"""
log grad and param statistics for tensorboard
"""
self.policy_grad = module_max_gradient(self.policy)
self.feat_grad = module_max_gradient(self.state_feature_extractor.module.encoder)
self.feat_param = module_max_param(self.state_feature_extractor.module.encoder)
self.val_feat_grad = module_max_gradient(self.state_feature_extractor.module.value_encoder)
self.val_feat_param = module_max_param(self.state_feature_extractor.module.value_encoder)
self.policy_param = module_max_param(self.policy)
self.reward_mask_num = self.reward_mask.float().sum()
self.max_traj_sample_len = torch.unique(self.cont_traj_inbatch_index, return_counts=True)[1].max()
self.traj_num = len(self.reward_mask)
self.train_batch_size = len(self.target_expert_reward_mask)
if hasattr(self, 'traj_feature_extractor'):
self.traj_grad = module_max_gradient(self.traj_feature_extractor)
self.traj_param = module_max_param(self.traj_feature_extractor)
if hasattr(self, 'sampler_gaussian'):
self.sampler_mean = self.sampler_gaussian[0].mean().item()
self.sampler_logsigma = self.sampler_gaussian[1].mean().item()
if self.train_traj_sampler and hasattr(self, 'sampler_traj_feat'):
self.traj_sampler_grad = module_max_gradient(self.traj_feature_sampler)
self.traj_sampler_param = module_max_param(self.traj_feature_sampler)
if self.has_critic:
self.value_mean, self.value_mean_2 = self.qf1.mean(), self.qf2.mean()
self.target_mean = self.next_q_value.mean()
self.return_mean = self.traj_return_batch.mean()
self.value_min, self.value_max = self.qf1.min(), self.qf1.max()
self.expert_reward_mask_num = self.expert_reward_mask.sum()
self.goal_reward_mask_num = self.goal_reward_mask.sum()
self.reward_mask_num = self.reward_mask.sum()
self.return_min, self.return_max = self.return_batch.min(), self.return_batch.max()
self.critic_grad = module_max_gradient(self.critic)
self.critic_param = module_max_param(self.critic)
def set_mode(self, test):
"""
set training or test mode for network
"""
self.test_mode = test
if not test:
self.state_feature_extractor.train()
self.policy.train()
if hasattr(self, "critic"):
self.critic.train()
self.critic_optim.zero_grad()
self.state_feat_val_encoder_optim.zero_grad()
if hasattr(self, 'traj_feature_extractor'):
if self.train_traj_feature and not self.fix_traj_feature:
self.traj_feature_extractor.train()
else:
self.traj_feature_extractor.eval()
if self.train_traj_sampler:
self.traj_feature_sampler.train()
else:
torch.no_grad()
self.policy.eval()
self.state_feature_extractor.eval()
if hasattr(self, "critic"): self.critic.eval()
if hasattr(self, "traj_feature_extractor"): self.traj_feature_extractor.eval()
if hasattr(self, "traj_feature_sampler"): self.traj_feature_sampler.eval()
def setup_feature_extractor(self, net_dict, test_time=False):
"""
Load networks
"""
if "traj_feature_extractor" in net_dict:
self.traj_feature_extractor = net_dict["traj_feature_extractor"]["net"]
self.traj_feature_extractor_opt = net_dict["traj_feature_extractor"]["opt"]
self.traj_feature_extractor_sch = net_dict["traj_feature_extractor"]["scheduler"]
else:
self.traj_feature_extractor = net_dict["state_feature_extractor"]["net"]
if 'traj_feature_sampler' in net_dict:
self.traj_feature_sampler = net_dict["traj_feature_sampler"]["net"]
self.traj_feature_sampler_opt = net_dict["traj_feature_sampler"]["opt"]
self.traj_feature_sampler_sch = net_dict["traj_feature_sampler"]["scheduler"]
self.state_feature_extractor = net_dict["state_feature_extractor"]["net"]
self.state_feature_extractor_optim = net_dict["state_feature_extractor"]["opt"]
self.state_feature_extractor_scheduler = net_dict["state_feature_extractor"]["scheduler"]
self.state_feat_encoder_optim = net_dict["state_feature_extractor"][ "encoder_opt" ]
self.state_feat_encoder_scheduler = net_dict["state_feature_extractor"][ "encoder_scheduler" ]
self.state_feat_val_encoder_optim = net_dict["state_feature_extractor"][ "val_encoder_opt" ]
self.state_feat_val_encoder_scheduler = net_dict["state_feature_extractor"][ "val_encoder_scheduler" ]
self.test_time = test_time
def get_mix_ratio(self, update_step):
"""
Get a mixed schedule for supervised learning and RL
"""
idx = int((self.update_step > np.array(self.mix_milestones)).sum())
mix_policy_ratio = get_valid_index(self.mix_policy_ratio_list, idx)
mix_policy_ratio = min(mix_policy_ratio, self.ddpg_coefficients[4])
mix_value_ratio = get_valid_index(self.mix_value_ratio_list, idx)
mix_value_ratio = min(mix_value_ratio, self.ddpg_coefficients[3])
return mix_value_ratio, mix_policy_ratio
def get_lr(self):
"""
Get network learning rates
"""
lrs = {
"policy_lr": self.policy_optim.param_groups[0]["lr"],
"feature_lr": self.state_feature_extractor_optim.param_groups[0]["lr"],
}
if self.train_traj_feature:
lrs["traj_feature_lr"] = self.traj_feature_extractor_opt.param_groups[0]["lr"]
if self.train_traj_sampler:
lrs["traj_sampler_lr"] = self.traj_feature_sampler_opt.param_groups[0]["lr"]
if hasattr(self, 'critic_optim'):
lrs["value_lr"] = self.critic_optim.param_groups[0]["lr"]
lrs["val_feat_lr"] = self.state_feat_val_encoder_optim.param_groups[0]["lr"]
headers = ["network", "learning rate"]
data = [(name, lr) for name, lr in lrs.items()]
return lrs
def optimize(self, loss, update_step):
"""
Backward loss and update optimizer
"""
self.state_feat_encoder_optim.zero_grad()
self.policy_optim.zero_grad()
if self.train_traj_feature:
self.traj_feature_extractor_opt.zero_grad()
if self.train_traj_sampler:
self.traj_feature_sampler_opt.zero_grad()
loss.backward(retain_graph=self.re_sampler_step)
self.policy_optim.step()
if self.train_feature:
self.state_feat_encoder_optim.step()
if self.train_traj_feature:
self.traj_feature_extractor_opt.step()
if self.train_traj_sampler:
self.traj_feature_sampler_opt.step()
def step_scheduler(self, step=None):
"""
Update network scheduler
"""
if self.train_traj_sampler:
self.traj_feature_sampler_sch.step()
if self.train_traj_feature:
self.traj_feature_extractor_sch.step()
if hasattr(self, "critic"):
self.critic_scheduler.step()
if hasattr(self, "policy"):
self.policy_scheduler.step()
if self.train_feature or self.train_value_feature:
self.state_feature_extractor_scheduler.step()
self.state_feat_encoder_scheduler.step()
if self.train_value_feature and hasattr(self, 'state_feat_val_encoder_scheduler'):
self.state_feat_val_encoder_scheduler.step()
def save_model(
self,
step,
output_dir="",
surfix="latest",
actor_path=None,
critic_path=None,
traj_feat_path=None,
state_feat_path=None,
):
"""
save model
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
actor_path, critic_path, traj_feat_path, traj_sampler_path, state_feat_path = get_model_path(output_dir,
self.name, self.env_name, surfix)
print("Saving models to {} and {}".format(actor_path, critic_path))
if hasattr(self, "policy"):
torch.save(
{
"net": self.policy.state_dict(),
"opt": self.policy_optim.state_dict(),
"sch": self.policy_scheduler.state_dict(),
},
actor_path,
)
if hasattr(self, "critic"):
torch.save(
{
"net": self.critic.state_dict(),
"opt": self.critic_optim.state_dict(),
"sch": self.critic_scheduler.state_dict(),
},
critic_path,
)
if hasattr(self, 'traj_feature_extractor_opt'):
torch.save(
{
"net": self.traj_feature_extractor.state_dict(),
"opt": self.traj_feature_extractor_opt.state_dict(),
"sch": self.traj_feature_extractor_sch.state_dict(),
},
traj_feat_path,
)
if hasattr(self, 'traj_feature_sampler_opt'):
torch.save(
{
"net": self.traj_feature_sampler.state_dict(),
"opt": self.traj_feature_sampler_opt.state_dict(),
"sch": self.traj_feature_sampler_sch.state_dict(),
},
traj_sampler_path,
)
torch.save(
{
"net": self.state_feature_extractor.state_dict(),
"opt": self.state_feature_extractor_optim.state_dict(),
"encoder_opt": self.state_feat_encoder_optim.state_dict(),
"sch": self.state_feature_extractor_scheduler.state_dict(),
"encoder_sch": self.state_feat_encoder_scheduler.state_dict(),
"val_encoder_opt": self.state_feat_val_encoder_optim.state_dict(),
"val_encoder_sch": self.state_feat_val_encoder_scheduler.state_dict(),
"step": step,
},
state_feat_path,
)
def load_model(
self, output_dir, surfix="latest", set_init_step=False, reinit_value_feat=False
):
"""
Load saved model
"""
actor_path, critic_path, traj_feat_path, traj_sampler_path, state_feat_path = get_model_path(output_dir,
self.name, self.env_name, surfix)
if hasattr(self, "policy") and os.path.exists(actor_path):
net_dict = torch.load(actor_path)
self.policy.load_state_dict(net_dict["net"])
self.policy_optim.load_state_dict(net_dict["opt"])
self.policy_scheduler.load_state_dict(net_dict["sch"])
if self.reinit_optim and set_init_step:
for g in self.policy_optim.param_groups:
g["lr"] = self.reinit_lr
self.policy_scheduler = torch.optim.lr_scheduler.MultiStepLR(
self.policy_optim, milestones=self.policy_milestones, gamma=0.5 )
self.policy_scheduler.initial_lr = self.reinit_lr
self.policy_scheduler.base_lrs[0] = self.reinit_lr
print("reinit policy optim")
print("load policy weight: {:.3f} from {} !!!!".format(module_max_param(self.policy), actor_path))
hard_update(self.policy_target, self.policy, self.tau)
if hasattr(self, "critic") and os.path.exists(critic_path):
net_dict = torch.load(critic_path)
self.critic.load_state_dict(net_dict["net"])
self.critic_optim.load_state_dict(net_dict["opt"])
self.critic_scheduler.load_state_dict(net_dict["sch"])
print("load critic weight: {:.3f} !!!!".format(module_max_param(self.critic)))
hard_update(self.critic_target, self.critic, self.tau)
if hasattr(self, 'traj_feature_extractor') and os.path.exists(traj_feat_path):
net_dict = torch.load(traj_feat_path)
self.traj_feature_extractor.load_state_dict(net_dict["net"], strict=False)
print('load traj feature weight: {:.3f} from {} !!!!'.format(module_max_param(self.traj_feature_extractor), traj_feat_path))
try:
self.traj_feature_extractor_opt.load_state_dict(net_dict["opt"])
self.traj_feature_extractor_sch.load_state_dict(net_dict["sch"])
except:
pass
if hasattr(self, 'train_traj_sampler') and os.path.exists(traj_sampler_path):
net_dict = torch.load(traj_sampler_path)
self.traj_feature_sampler.load_state_dict(net_dict["net"], strict=False)
print('load traj sampler weight: {:.3f} from {} !!!!'.format(module_max_param(self.traj_feature_sampler), traj_sampler_path))
try:
self.traj_feature_sampler_opt.load_state_dict(net_dict["opt"])
self.traj_feature_sampler_sch.load_state_dict(net_dict["sch"])
except:
pass
if os.path.exists(state_feat_path):
net_dict = torch.load(state_feat_path)
if has_check(self, 'reinit_feat_opt'):
self.state_feature_extractor.load_state_dict(dict([(n, p) for n, p in net_dict["net"].items() if 'value' not in n ]),strict=False)
else:
self.state_feature_extractor.load_state_dict(net_dict["net"] )
self.state_feature_extractor_optim.load_state_dict(net_dict["opt"])
self.state_feature_extractor_scheduler.load_state_dict( net_dict["sch"] )
self.state_feat_encoder_optim.load_state_dict( net_dict["encoder_opt"] )
self.state_feat_encoder_scheduler.load_state_dict( net_dict["encoder_sch"] )
if not has_check(self, 'reinit_feat_opt'):
self.state_feat_val_encoder_optim.load_state_dict(
net_dict["val_encoder_opt"] )
self.state_feat_val_encoder_scheduler.load_state_dict(
net_dict["val_encoder_sch"] )
print(
"load feature weight: {} !!!! from: {} step :{}".format(
module_max_param(self.state_feature_extractor), state_feat_path, net_dict["step"]))
self.update_step = net_dict["step"]
self.init_step = self.update_step
return self.update_step
return 0
| 44.661996 | 161 | 0.610736 | 25,144 | 0.985962 | 0 | 0 | 4,687 | 0.18379 | 0 | 0 | 2,906 | 0.113952 |
4c8896a63d170ec55dc8e93c9856c824836a264a | 1,800 | py | Python | environment.py | CorodescuMihnea/NnProject | 2767b71145f5f3bb2e84aa37edbb6d58134d679a | [
"MIT"
] | null | null | null | environment.py | CorodescuMihnea/NnProject | 2767b71145f5f3bb2e84aa37edbb6d58134d679a | [
"MIT"
] | null | null | null | environment.py | CorodescuMihnea/NnProject | 2767b71145f5f3bb2e84aa37edbb6d58134d679a | [
"MIT"
] | null | null | null | import gym
import datetime
import os
import numpy as np
from agent import DeepQAgent
def main():
env = gym.make("LunarLander-v2")
timestamp = '{:%Y-%m-%d-%H:%M}'.format(datetime.datetime.now())
o_dir = "LunarLander-v2/{}/models".format(timestamp)
if not os.path.exists(o_dir):
os.makedirs(o_dir)
nof_episodes = 500
# 8 values in [0, 1]
state_size = env.observation_space.shape[0]
# 0, 1, 2, 3
action_size = env.action_space.n
agent = DeepQAgent(state_size, action_size, model=2)
batch_size = 32
for episode in range(nof_episodes):
state = env.reset()
state = np.reshape(state, [1, state_size])
done = False
t = 0
episode_reward = 0
# Iterate over the timesteps
while not done:
env.render()
# Instruct the agent to choose an action based on the current state of the environment
# This may be a random action depending on the value of the exploration_rate(epsilon)
action = agent.act(state)
# Execute said action
next_state, reward, done, _ = env.step(action)
episode_reward += reward
next_state = np.reshape(next_state, [1, state_size])
agent.memorize(state, action, reward, next_state, done)
state = next_state
if done:
print("episode: {}/{}, time: {}, total_reward: {}"
.format(episode, nof_episodes - 1, t, episode_reward))
t += 1
if len(agent.memory) / batch_size > 1:
agent.train(batch_size)
# Save model after training
if episode % batch_size == 1:
agent.save(o_dir + "/model_" + str(episode) + ".hdf5")
if __name__ == "__main__":
main()
| 30.508475 | 98 | 0.586111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 410 | 0.227778 |
4c8a0d1bb9255782fe923e33bd79defeacecfa0f | 1,298 | py | Python | tests/serialization/test_deserialization/flows/flow_template.py | dazzag24/prefect | 9d36c989c95cbbed091b071932553286edf25bb6 | [
"Apache-2.0"
] | null | null | null | tests/serialization/test_deserialization/flows/flow_template.py | dazzag24/prefect | 9d36c989c95cbbed091b071932553286edf25bb6 | [
"Apache-2.0"
] | null | null | null | tests/serialization/test_deserialization/flows/flow_template.py | dazzag24/prefect | 9d36c989c95cbbed091b071932553286edf25bb6 | [
"Apache-2.0"
] | null | null | null | import datetime
from prefect import task, Flow, Parameter
from prefect.engine.cache_validators import partial_parameters_only
from prefect.environments.execution import RemoteEnvironment
from prefect.environments.storage import Docker
from prefect.engine.result_handlers import JSONResultHandler, S3ResultHandler
from prefect.tasks.shell import ShellTask
@task(max_retries=5, retry_delay=datetime.timedelta(minutes=10))
def root_task():
pass
@task(
cache_for=datetime.timedelta(days=10),
cache_validator=partial_parameters_only(["x"]),
result_handler=JSONResultHandler(),
)
def cached_task(x, y):
pass
x = Parameter("x")
y = Parameter("y", default=42)
@task(name="Big Name", checkpoint=True, result_handler=S3ResultHandler(bucket="blob"))
def terminal_task():
pass
env = RemoteEnvironment(
executor="prefect.engine.executors.DaskExecutor",
executor_kwargs={"scheduler_address": "tcp://"},
)
storage = Docker(
registry_url="prefecthq",
image_name="flows",
image_tag="welcome-flow",
python_dependencies=["boto3"],
)
with Flow("test-serialization", storage=storage, environment=env) as f:
result = cached_task.map(x, y, upstream_tasks=[root_task, root_task])
terminal_task(upstream_tasks=[result, root_task])
f.storage.add_flow(f)
| 25.45098 | 86 | 0.75963 | 0 | 0 | 0 | 0 | 381 | 0.293529 | 0 | 0 | 150 | 0.115562 |
4c8a63609fc662bd88f868ef8238e6f25e44baa6 | 9,616 | py | Python | blog/models.py | wjhgg/DBlog | 59274ac4353068a3795731c3f786748ba9095701 | [
"MulanPSL-1.0"
] | null | null | null | blog/models.py | wjhgg/DBlog | 59274ac4353068a3795731c3f786748ba9095701 | [
"MulanPSL-1.0"
] | null | null | null | blog/models.py | wjhgg/DBlog | 59274ac4353068a3795731c3f786748ba9095701 | [
"MulanPSL-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.conf import settings
# Create your models here.
# 用户
# class User(AbstractUser):
# u_name = models.CharField(max_length=20, verbose_name='昵称', default='')
# birthday = models.DateField(verbose_name='生日', null=True, blank=True)
# genter = models.CharField(max_length=2, choices=(("male", '男'), ('female', '女')), default='male')
# image = models.ImageField(default='images/login/', max_length=200, null=True)
# describe = models.CharField(max_length=500, default='', verbose_name='个性签名')
#
# class Meta:
# verbose_name = '用户信息'
# verbose_name_plural = verbose_name
#
# def __unicode__(self):
# return self.username
#
# # 邮箱验证码
# class EmailVerificationCode(models.Model):
# code = models.CharField(max_length=20, verbose_name=u'验证码')
# email = models.EmailField(max_length=200, verbose_name=u'邮箱')
# send_type = models.CharField(max_length=10, choices=(("register", u'注册'), ("forget", u'密码找回')))
# send_time = models.DateTimeField(auto_now_add=True, )
#
# class Meta:
# verbose_name = u'邮箱验证码'
# verbose_name_plural = verbose_name
from django.db.models.signals import post_delete, post_init, post_save, pre_delete
from django.dispatch import receiver
from django.utils.html import format_html
from mdeditor.fields import MDTextField
class Friend(models.Model):
"""
友链
"""
url = models.CharField(max_length=200, verbose_name='友链链接', default='https://my.oschina.net/chulan')
title = models.CharField(max_length=100, verbose_name='超链接title', default='OSCHINA')
name = models.CharField(max_length=20, verbose_name='友链名称', default='chulan')
class Meta:
verbose_name = '友链'
verbose_name_plural = verbose_name
def __str__(self):
return self.url
class Carousel(models.Model):
"""
首页轮播图配置
"""
carousel = models.ImageField(upload_to='carousel', verbose_name='轮播图')
carousel_title = models.TextField(blank=True, null=True, max_length=100, verbose_name='轮播图左下标题')
img_link_title = models.TextField(blank=True, null=True, max_length=100, verbose_name='图片标题')
img_alt = models.TextField(blank=True, null=True, max_length=100, verbose_name='轮播图alt')
class Meta:
verbose_name = '首页轮播图配置'
verbose_name_plural = verbose_name
def __str__(self):
return self.carousel_title
@receiver(pre_delete, sender=Carousel)
def delete_upload_files(sender, instance, **kwargs):
instance.carousel.delete(False)
@receiver(post_init, sender=Carousel)
def file_path(sender, instance, **kwargs):
instance._current_file = instance.carousel
@receiver(post_save, sender= Carousel)
def delete_old_image(sender, instance, **kwargs):
if hasattr(instance, '_current_file'):
if instance._current_file != instance.carousel.path:
instance._current_file.delete(save=False)
class Announcement(models.Model):
"""
公告
"""
head_announcement = models.CharField(max_length=30, verbose_name='头部轮播公告', default='热烈欢迎浏览本站')
main_announcement = models.TextField(blank=True, null=True, max_length=300, verbose_name='右侧公告', default='暂无公告......')
class Meta:
verbose_name = '公告'
verbose_name_plural = verbose_name
def __str__(self):
return self.head_announcement
class Conf(models.Model):
"""
网站配置信息
"""
main_website = models.CharField(max_length=64, verbose_name='主网站', default="xwboy.top")
name = models.CharField(max_length=8, verbose_name='关注我_名称', default="CL' WU")
chinese_description = models.CharField(max_length=30, verbose_name='关注我_中文描述', default='永不放弃坚持就是这么酷!要相信光')
english_description = models.TextField(max_length=100, verbose_name='关注我_英文描述', default='Never give up persistence is so cool!Believe in the light!!!')
avatar_link = models.CharField(max_length=150, verbose_name='关注我_头像超链接', default='https://avatars.githubusercontent.com/u/52145145?v=4')
website_author = models.CharField(max_length=20, verbose_name='网站作者', default='xiaowu')
website_author_link = models.CharField(max_length=200, verbose_name='网站作者链接', default='http://www.xwboy.top')
email = models.CharField(max_length=50, verbose_name='收件邮箱', default='2186656812@qq.com')
website_number = models.CharField(max_length=100, verbose_name='备案号', default='豫ICP备 2021019092号-1')
git = models.CharField(max_length=100, verbose_name='git链接', default='https://gitee.com/wu_cl')
website_logo = models.ImageField(upload_to='logo', blank=True, null=True, verbose_name='网站logo', default='')
class Meta:
verbose_name = '网站配置'
verbose_name_plural = verbose_name
def __str__(self):
return self.main_website
@receiver(pre_delete, sender=Conf)
def delete_upload_files(sender, instance, **kwargs):
instance.website_logo.delete(False)
@receiver(post_init, sender=Conf)
def file_path(sender, instance, **kwargs):
instance._current_file = instance.website_logo
@receiver(post_save, sender= Conf)
def delete_old_image(sender, instance, **kwargs):
if hasattr(instance, '_current_file'):
if instance._current_file != instance.website_logo.path:
instance._current_file.delete(save=False)
class Pay(models.Model):
"""
收款图
"""
payimg = models.ImageField(upload_to='pay', blank=True, null=True, verbose_name='捐助收款图')
class Meta:
verbose_name = '捐助收款图'
verbose_name_plural = verbose_name
@receiver(pre_delete, sender=Pay)
def delete_upload_files(sender, instance, **kwargs):
instance.payimg.delete(False)
@receiver(post_init, sender=Pay)
def file_path(sender, instance, **kwargs):
instance._current_file = instance.payimg
@receiver(post_save, sender= Pay)
def delete_old_image(sender, instance, **kwargs):
if hasattr(instance, '_current_file'):
if instance._current_file != instance.payimg.path:
instance._current_file.delete(save=False)
class Tag(models.Model):
"""
标签
"""
tag_name = models.CharField('标签名称', max_length=30, )
class Meta:
verbose_name = '标签'
verbose_name_plural = verbose_name
def __str__(self):
return self.tag_name
class Article(models.Model):
"""
文章
"""
title = models.CharField(max_length=200, verbose_name='文章标题') # 博客标题
category = models.ForeignKey('Category', verbose_name='文章类型', on_delete=models.CASCADE)
date_time = models.DateField(auto_now_add=True, verbose_name='创建时间')
content = MDTextField(blank=True, null=True, verbose_name='文章正文')
digest = models.TextField(blank=True, null=True, verbose_name='文章摘要')
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name='作者', on_delete=models.CASCADE)
view = models.BigIntegerField(default=0, verbose_name='阅读数')
comment = models.BigIntegerField(default=0, verbose_name='评论数')
picture = models.ImageField(upload_to='article_picture', blank=True, null=True, verbose_name='url(标题图)') # 标题图片地址
tag = models.ManyToManyField(Tag) # 标签
class Meta:
ordering = ['-date_time'] # 按时间降序
verbose_name = '博客文章'
verbose_name_plural = verbose_name
def sourceUrl(self):
source_url = settings.HOST + '/blog/detail/{id}'.format(id=self.pk)
return source_url
def content_validity(self):
"""
正文字数显示控制
"""
if len(str(self.content)) > 40: # 字数自己设置
return '{}……'.format(str(self.content)[0:40]) # 超出部分以省略号代替。
else:
return str(self.content)
def viewed(self):
"""
增加阅读数
:return:
"""
self.view += 1
self.save(update_fields=['view'])
def commenced(self):
"""
增加评论数
:return:
"""
self.comment += 1
self.save(update_fields=['comment'])
def __str__(self):
return self.title
# 需要放在最后
# 同步删除上传文件
@receiver(pre_delete, sender=Article)
def delete_upload_files(sender, instance, **kwargs):
"""
sender: 模型类名
instance.字段名
"""
instance.picture.delete(False)
# 同步修改文件
@receiver(post_init, sender=Article)
def file_path(sender, instance, **kwargs):
"""
instance.字段名
"""
instance._current_file = instance.picture
@receiver(post_save, sender= Article)
def delete_old_image(sender, instance, **kwargs):
"""
instance.字段名.path
"""
if hasattr(instance, '_current_file'):
if instance._current_file != instance.picture.path:
instance._current_file.delete(save=False)
class Category(models.Model):
"""
文章类型
"""
name = models.CharField('文章类型', max_length=30)
created_time = models.DateTimeField('创建时间', auto_now_add=True)
last_mod_time = models.DateTimeField('修改时间', auto_now=True)
class Meta:
ordering = ['name']
verbose_name = "文章类型"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Comment(models.Model):
"""
评论
"""
title = models.CharField("标题", max_length=100)
source_id = models.CharField('文章id或source名称', max_length=25)
create_time = models.DateTimeField('评论时间', auto_now=True)
user_name = models.CharField('评论用户', max_length=25)
url = models.CharField('链接', max_length=100)
comment = models.TextField('评论内容', max_length=500)
class Meta:
ordering = ['create_time']
verbose_name = '评论'
verbose_name_plural = verbose_name
def __str__(self):
return self.title
| 32.819113 | 155 | 0.683444 | 6,621 | 0.637738 | 0 | 0 | 2,120 | 0.2042 | 0 | 0 | 3,158 | 0.30418 |
4c8baa93c3b0d90c9a3d8b2aa1089d4f3e775bf1 | 6,385 | py | Python | update_supply_chain_information/supply_chains/test/test_extract_csv.py | uktrade/update-supply-chain-information | 5cdcc795257b8351cf11b57487b194012ee8886d | [
"MIT"
] | null | null | null | update_supply_chain_information/supply_chains/test/test_extract_csv.py | uktrade/update-supply-chain-information | 5cdcc795257b8351cf11b57487b194012ee8886d | [
"MIT"
] | 204 | 2021-05-26T16:15:04.000Z | 2022-02-14T05:10:44.000Z | update_supply_chain_information/supply_chains/test/test_extract_csv.py | uktrade/defend-data-capture | 5cdcc795257b8351cf11b57487b194012ee8886d | [
"MIT"
] | 1 | 2021-06-26T10:28:30.000Z | 2021-06-26T10:28:30.000Z | from io import StringIO
from typing import List
import os
import csv
import re
import pytest
from django.core.management import call_command
from django.core.management.base import CommandError
from django.core.files.temp import NamedTemporaryFile
import accounts.models
from supply_chains.management.commands.ingest_csv import (
MODEL_GOV_DEPT,
MODEL_SUPPLY_CHAIN,
MODEL_STRAT_ACTION,
MODEL_STRAT_ACTION_UPDATE,
)
from supply_chains.test.factories import (
SupplyChainFactory,
StrategicActionFactory,
StrategicActionUpdateFactory,
GovDepartmentFactory,
)
pytestmark = pytest.mark.django_db
class TestExtractCSV:
DUMP_CMD = "extract_csv"
def setup_method(self):
self.data_file = NamedTemporaryFile(suffix=".csv", delete=False)
def teardown_method(self):
os.remove(self.data_file.name)
def load_csv(self) -> List:
with open(self.data_file.name) as f:
reader = csv.DictReader(f)
rows = list(reader)
return rows
def invoke_dump(self, *args):
with StringIO() as status:
call_command(self.DUMP_CMD, *args, stdout=status)
return status.getvalue()
def test_dump_accounts_data(self):
# Arrange
trade_domian = "dosac.gov.uk"
trade_name = "DOSAC"
hmrc_domain = "hmrc.gov.uk"
hmrc_name = "HMRC"
GovDepartmentFactory(email_domains=[trade_domian], name=trade_name)
GovDepartmentFactory(email_domains=[hmrc_domain], name=hmrc_name)
# Act
self.invoke_dump(MODEL_GOV_DEPT, self.data_file.name)
rows = self.load_csv()
# Assert
assert len(rows) == 3
lookup = {x["name"]: x for x in rows}
assert (
lookup[trade_name]["name"] == trade_name
and lookup[trade_name]["email_domain_0"] == trade_domian
)
assert (
lookup[hmrc_name]["name"] == hmrc_name
and lookup[hmrc_name]["email_domain_0"] == hmrc_domain
)
def test_dump_accounts_data_multi_domain(self):
# Arrange
trade_domians = "dosac.gov.uk", "analogue.dosac.gov.uk"
trade_name = "DOSAC"
GovDepartmentFactory(email_domains=trade_domians, name=trade_name)
# Act
self.invoke_dump(MODEL_GOV_DEPT, self.data_file.name)
rows = self.load_csv()
# Assert
assert len(rows) == 2
assert all(k in rows[0] for k in ("email_domain_0", "email_domain_1"))
def test_dump_accounts_no_data(self):
# Arrange
accounts.models.GovDepartment.objects.all().delete()
# Act
self.invoke_dump(MODEL_GOV_DEPT, self.data_file.name)
# Assert
assert os.path.exists(self.data_file.name)
assert os.stat(self.data_file.name).st_size == 0
def test_dump_sc_data(self):
# Arrange
SupplyChainFactory()
# Act
self.invoke_dump(MODEL_SUPPLY_CHAIN, self.data_file.name)
rows = self.load_csv()
# Assert
assert len(rows) == 1
assert re.match(f"Product ", rows[0]["name"])
def test_dump_sc_data_multiple(self):
# Arrange
SupplyChainFactory.create_batch(5)
# Act
self.invoke_dump(MODEL_SUPPLY_CHAIN, self.data_file.name)
rows = self.load_csv()
# Assert
assert len(rows) == 5
names = [x["name"] for x in rows]
assert all([x.startswith("Product ") for x in names])
ids = [x["id"] for x in rows]
assert len(ids) == len(set(ids))
def test_dump_sa_data(self):
# Arrange
sc = SupplyChainFactory()
StrategicActionFactory(supply_chain=sc)
# Act
self.invoke_dump(MODEL_STRAT_ACTION, self.data_file.name)
rows = self.load_csv()
# Assert
assert len(rows) == 1
assert re.match(f"Strategic action ", rows[0]["name"])
assert rows[0]["supply_chain"] == str(sc.id)
def test_dump_sa_data_multiple(self):
# Arrange
exp_sc_ids = list()
for _ in range(4):
sc = SupplyChainFactory()
StrategicActionFactory(supply_chain=sc)
exp_sc_ids.append(str(sc.id))
# Act
self.invoke_dump(MODEL_STRAT_ACTION, self.data_file.name)
rows = self.load_csv()
# Assert
assert len(rows) == 4
ids = [x["id"] for x in rows]
assert len(ids) == len(set(ids))
sc_ids = [x["supply_chain"] for x in rows]
assert all([a == b for a, b in zip(sorted(sc_ids), sorted(exp_sc_ids))])
names = [x["name"] for x in rows]
assert all([x.startswith("Strategic action ") for x in names])
def test_dump_sau_data(self):
# Arrange
sc = SupplyChainFactory()
sa = StrategicActionFactory(supply_chain=sc)
StrategicActionUpdateFactory(supply_chain=sc, strategic_action=sa)
# Act
self.invoke_dump(MODEL_STRAT_ACTION_UPDATE, self.data_file.name)
rows = self.load_csv()
# Assert
assert len(rows) == 1
assert rows[0]["supply_chain"] == str(sc.id)
assert rows[0]["strategic_action"] == str(sa.id)
def test_dump_sau_data_multiple(self):
# Arrange
exp_sc_ids = list()
exp_sa_ids = list()
for _ in range(4):
sc = SupplyChainFactory()
sa = StrategicActionFactory(supply_chain=sc)
StrategicActionUpdateFactory(supply_chain=sc, strategic_action=sa)
exp_sc_ids.append(str(sc.id))
exp_sa_ids.append(str(sa.id))
# Act
self.invoke_dump(MODEL_STRAT_ACTION_UPDATE, self.data_file.name)
rows = self.load_csv()
# Assert
assert len(rows) == 4
ids = [x["id"] for x in rows]
assert len(ids) == len(set(ids))
sc_ids = [x["supply_chain"] for x in rows]
assert all([a == b for a, b in zip(sorted(sc_ids), sorted(exp_sc_ids))])
sa_ids = [x["strategic_action"] for x in rows]
assert all([a == b for a, b in zip(sorted(sa_ids), sorted(exp_sa_ids))])
def test_dump_inv_model(self):
# Arrange
inv_model = "hello world"
# Act
# Assert
with pytest.raises(CommandError, match=f"Unknown model {inv_model}"):
self.invoke_dump(inv_model, self.data_file.name)
| 29.288991 | 80 | 0.618011 | 5,755 | 0.901331 | 0 | 0 | 0 | 0 | 0 | 0 | 634 | 0.099295 |
4c8ce01c011cb806e29d1c5d44758d2a1fc1e41f | 2,909 | py | Python | 8_1_error.py | stnguyenn/learnpy | 4fc201bf461b0f7aa1a111a6a31b27dd492ad969 | [
"MIT"
] | null | null | null | 8_1_error.py | stnguyenn/learnpy | 4fc201bf461b0f7aa1a111a6a31b27dd492ad969 | [
"MIT"
] | null | null | null | 8_1_error.py | stnguyenn/learnpy | 4fc201bf461b0f7aa1a111a6a31b27dd492ad969 | [
"MIT"
] | null | null | null |
while True:
try:
x = int(input("Please enter a number: "))
break
except ValueError:
print("Oops! That was no valid number. Try again...")
class B(Exception):
pass
class C(B):
pass
class D(C):
pass
for cls in [B, C, D]:
try:
raise cls()
except D:
print("D")
except C:
print("C")
except B:
print("B")
import sys
try:
f = open('myfile.txt')
s = f.readline()
i = int(s.strip())
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not convert data to an integer.")
except:
print("Unexpected error:", sys.exc_info()[0])
raise
for arg in sys.argv[1:]:
try:
f = open(arg, 'r')
except OSError:
print('cannot open', arg)
else:
print(arg, 'has', len(f.readlines()), 'lines')
f.close()
try:
raise Exception('spam', 'eggs')
except Exception as inst:
print(type(inst)) # the exception instance
print(inst.args) # arguments stored in .args
print(inst) # __str__ allows args to be printed directly,
# but may be overridden in exception subclasses
x, y = inst.args # unpack args
print('x =', x)
print('y =', y)
def this_fails():
x = 1/0
try:
this_fails()
except ZeroDivisionError as err:
print('Handling run-time error:', err)
try:
raise NameError('HiThere')
except NameError:
None
try:
raise ValueError # shorthand for 'raise ValueError()'
except ValueError:
None
try:
raise NameError('HiThere')
except NameError:
print('An exception flew by!')
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class InputError(Error):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
class TransitionError(Error):
"""Raised when an operation attempts a state transition that's not
allowed.
Attributes:
previous -- state at beginning of transition
next -- attempted new state
message -- explanation of why the specific transition is not allowed
"""
def __init__(self, previous, next, message):
self.previous = previous
self.next = next
self.message = message
try:
raise KeyboardInterrupt
except KeyboardInterrupt:
None
finally:
print('Goodbye, world!')
KeyboardInterrupt
def divide(x, y):
try:
result = x / y
except ZeroDivisionError:
print("division by zero!")
else:
print("result is", result)
finally:
print("executing finally clause")
divide(2, 1)
divide(2, 0)
# divide("2", "1")
| 19.924658 | 76 | 0.60605 | 918 | 0.315572 | 0 | 0 | 0 | 0 | 0 | 0 | 1,064 | 0.365761 |
4c8d3953fb08de0e0e4a8b653c8af1e5bab0d0e4 | 250 | py | Python | TORS/visualizer/__init__.py | AlgTUDelft/cTORS | 1d34c26d912b37a09289d6fe52cb0d9aded6d77d | [
"Apache-2.0"
] | 5 | 2021-04-25T10:40:55.000Z | 2022-02-24T14:07:28.000Z | TORS/visualizer/__init__.py | UtrechtUniversity/cTORS | 1d34c26d912b37a09289d6fe52cb0d9aded6d77d | [
"Apache-2.0"
] | null | null | null | TORS/visualizer/__init__.py | UtrechtUniversity/cTORS | 1d34c26d912b37a09289d6fe52cb0d9aded6d77d | [
"Apache-2.0"
] | 1 | 2022-03-04T05:08:05.000Z | 2022-03-04T05:08:05.000Z | # This program has been developed by students from the bachelor Computer Science
# at Utrecht University within the Software and Game project course in 2019
# (c) Copyright Utrecht University (Department of Information and Computing Sciences)
# NOQA
| 50 | 85 | 0.812 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 246 | 0.984 |
4c8d702182139486c5571a1903ade0f4deb79eeb | 9,511 | py | Python | donkeycar/parts/lidar.py | bo-rc/donkeycar | 7770cc28948ad88b49cbf896d35694a6fa59c545 | [
"MIT"
] | null | null | null | donkeycar/parts/lidar.py | bo-rc/donkeycar | 7770cc28948ad88b49cbf896d35694a6fa59c545 | [
"MIT"
] | 1 | 2019-12-29T23:11:43.000Z | 2019-12-29T23:11:43.000Z | donkeycar/parts/lidar.py | bo-rc/donkeycar | 7770cc28948ad88b49cbf896d35694a6fa59c545 | [
"MIT"
] | null | null | null | """
Lidar
"""
import time
import math
import pickle
import serial
import logging
import numpy as np
from donkeycar.utils import norm_deg, dist, deg2rad, arr_to_img
from PIL import Image, ImageDraw
class YdLidar(object):
'''
https://pypi.org/project/PyLidar3/
'''
def __init__(self, port='/dev/ttyUSB0', model='G4', chunk_size='6000', freq=15):
'''
tune chunk_size for the speed of your compute board
default G4 uses '6000'
'''
if model == 'G4':
from PyLidar3 import YdLidarG4, FrequencyStep
else:
raise Exception("YdLidar module currently only supports 'G4'.")
self.port = port
self.distances = [] #a list of distance measurements
self.angles = [] # a list of angles corresponding to dist meas above
self.lidar = YdLidarG4(port=self.port)
if (self.lidar.Connect()):
print(self.lidar.GetDeviceInfo())
# # running at 15 Hz
# while self.lidar.GetCurrentFrequency() % 1 != 0:
# print("adjusting scanning frequency: ")
# self.lidar.IncreaseCurrentFrequency(FrequencyStep.oneTenthHertz)
# while self.lidar.GetCurrentFrequency() < freq - 1:
# print("adjusting scanning frequency: ")
# self.lidar.IncreaseCurrentFrequency(FrequencyStep.oneHertz)
# print("Current scanning frequency: ", self.lidar.GetCurrentFrequency())
self.gen = self.lidar.StartScanning()
else:
raise Exception("Lidar not connected, port: {}, model:{}".format(self.port, model))
self.on = True
self.scan = {}
# from threading import Lock
# self.frame_mutex = Lock()
def update(self):
while self.on:
try:
self.scan = next(self.gen)
self.angles = list(self.scan.keys())
self.distances = list(self.scan.values())
except serial.serialutil.SerialException:
print('serial.serialutil.SerialException from Lidar. common when shutting down.')
def run_threaded(self):
return self.distances, self.angles
def shutdown(self):
self.on = False
time.sleep(2)
self.lidar.StopScanning()
self.lidar.Disconnect()
class RPLidar(object):
'''
https://github.com/SkoltechRobotics/rplidar
'''
def __init__(self, port='/dev/ttyUSB0'):
from rplidar import RPLidar
self.port = port
self.distances = [] #a list of distance measurements
self.angles = [] # a list of angles corresponding to dist meas above
self.lidar = RPLidar(self.port)
self.lidar.clear_input()
time.sleep(1)
self.on = True
#print(self.lidar.get_info())
#print(self.lidar.get_health())
def update(self):
scans = self.lidar.iter_scans(550)
while self.on:
try:
for scan in scans:
self.distances = [item[2] for item in scan]
self.angles = [item[1] for item in scan]
except serial.serialutil.SerialException:
print('serial.serialutil.SerialException from Lidar. common when shutting down.')
def run_threaded(self):
return self.distances, self.angles
def shutdown(self):
self.on = False
time.sleep(2)
self.lidar.stop()
self.lidar.stop_motor()
self.lidar.disconnect()
class YdLidarPlot(object):
'''
takes the raw lidar measurements and plots it to an image
'''
PLOT_TYPE_LINE = 0
PLOT_TYPE_CIRC = 1
def __init__(self, scale=1.0, offset=(0., 0.), color=(255, 0, 0)):
self.scale = scale
self.offset = offset
self.origin = offset
self.color = color
self.max_dist= 8. # mm
self.min_dist = 0.2 # m
def plot(self, img, x, y, yaw, ranges, draw):
'''
scale dist so that max_dist is edge of img (mm)
and img is PIL Image, draw the circle using the draw ImageDraw object
'''
for angle in range(0, ranges.size):
if self.min_dist < ranges[angle] < self.max_dist:
plot_angle = round(min(359, angle + yaw))
radian = math.radians(plot_angle)
sx = round(x * self.scale + self.offset[0] + math.cos(radian) * ranges[plot_angle] * self.scale)
sy = round(y * self.scale + self.offset[1] + math.sin(radian) * ranges[plot_angle] * self.scale)
draw.point((sx, sy), fill=(128,128,128))
def run(self, img, x, y, yaw, ranges):
'''
takes two lists of equal length, one of distance values, the other of angles corresponding to the dist meas
'''
self.frame = img
draw = ImageDraw.Draw(self.frame)
self.plot(self.frame, x, -y, yaw, ranges, draw)
return self.frame
def update(self):
pass
def run_threaded(self, img, x, y, yaw, ranges):
return self.run(img, x, y, yaw, ranges)
def shutdown(self):
pass
class LidarPlot(object):
'''
takes the raw lidar measurements and plots it to an image
'''
PLOT_TYPE_LINE = 0
PLOT_TYPE_CIRC = 1
def __init__(self, resolution=(500,500),
max_dist=1000, #mm
radius_plot=3,
plot_type=PLOT_TYPE_CIRC):
self.frame = Image.new('RGB', resolution)
self.max_dist = max_dist
self.rad = radius_plot
self.resolution = resolution
if plot_type == self.PLOT_TYPE_CIRC:
self.plot_fn = self.plot_circ
else:
self.plot_fn = self.plot_line
def plot_line(self, img, dist, theta, max_dist, draw):
'''
scale dist so that max_dist is edge of img (mm)
and img is PIL Image, draw the line using the draw ImageDraw object
'''
center = (img.width / 2, img.height / 2)
max_pixel = min(center[0], center[1])
dist = dist / max_dist * max_pixel
if dist < 0 :
dist = 0
elif dist > max_pixel:
dist = max_pixel
theta = np.radians(theta)
sx = math.cos(theta) * dist + center[0]
sy = math.sin(theta) * dist + center[1]
ex = math.cos(theta) * (dist + self.rad) + center[0]
ey = math.sin(theta) * (dist + self.rad) + center[1]
fill = 128
draw.line((sx,sy, ex, ey), fill=(fill, fill, fill), width=1)
def plot_circ(self, img, dist, theta, max_dist, draw):
'''
scale dist so that max_dist is edge of img (mm)
and img is PIL Image, draw the circle using the draw ImageDraw object
'''
center = (img.width / 2, img.height / 2)
max_pixel = min(center[0], center[1])
dist = dist / max_dist * max_pixel
if dist < 0 :
dist = 0
elif dist > max_pixel:
dist = max_pixel
theta = np.radians(theta)
sx = round(math.cos(theta) * dist + center[0])
sy = round(math.sin(theta) * dist + center[1])
ex = round(math.cos(theta) * (dist + 2 * self.rad) + center[0])
ey = round(math.sin(theta) * (dist + 2 * self.rad) + center[1])
fill = 128
draw.ellipse((min(sx, ex), min(sy, ey), max(sx, ex), max(sy, ey)), fill=(fill, fill, fill))
def plot_scan(self, img, distances, angles, max_dist, draw):
for dist, angle in zip(distances, angles):
self.plot_fn(img, dist, angle, max_dist, draw)
def run(self, distances, angles):
'''
takes two lists of equal length, one of distance values, the other of angles corresponding to the dist meas
'''
self.frame = Image.new('RGB', self.resolution, (255, 255, 255))
draw = ImageDraw.Draw(self.frame)
self.plot_scan(self.frame, distances, angles, self.max_dist, draw)
return self.frame
def shutdown(self):
pass
class BreezySLAM(object):
'''
https://github.com/simondlevy/BreezySLAM
'''
def __init__(self, MAP_SIZE_PIXELS=500, MAP_SIZE_METERS=10):
from breezyslam.algorithms import RMHC_SLAM
from breezyslam.sensors import Laser
laser_model = Laser(scan_size=360, scan_rate_hz=10., detection_angle_degrees=360, distance_no_detection_mm=12000)
MAP_QUALITY=5
self.slam = RMHC_SLAM(laser_model, MAP_SIZE_PIXELS, MAP_SIZE_METERS, MAP_QUALITY)
def run(self, distances, angles, map_bytes):
self.slam.update(distances, scan_angles_degrees=angles)
x, y, theta = self.slam.getpos()
if map_bytes is not None:
self.slam.getmap(map_bytes)
#print('x', x, 'y', y, 'theta', norm_deg(theta))
return x, y, deg2rad(norm_deg(theta))
def shutdown(self):
pass
class BreezyMap(object):
'''
bitmap that may optionally be constructed by BreezySLAM
'''
def __init__(self, MAP_SIZE_PIXELS=500):
self.mapbytes = bytearray(MAP_SIZE_PIXELS * MAP_SIZE_PIXELS)
def run(self):
return self.mapbytes
def shutdown(self):
pass
class MapToImage(object):
def __init__(self, resolution=(500, 500)):
self.resolution = resolution
def run(self, map_bytes):
np_arr = np.array(map_bytes).reshape(self.resolution)
return arr_to_img(np_arr)
def shutdown(self):
pass
| 32.35034 | 121 | 0.586899 | 9,293 | 0.977079 | 0 | 0 | 0 | 0 | 0 | 0 | 2,259 | 0.237514 |
4c8dcae1615bebff8006d7fba1a12425b310ad35 | 477 | py | Python | engines/factory.py | valeoai/BEEF | f1c5f3708ba91f6402dd05814b76dca1d9012942 | [
"Apache-2.0"
] | 4 | 2021-05-31T16:53:35.000Z | 2021-11-30T03:03:34.000Z | engines/factory.py | valeoai/BEEF | f1c5f3708ba91f6402dd05814b76dca1d9012942 | [
"Apache-2.0"
] | 3 | 2022-02-02T20:41:56.000Z | 2022-02-24T11:47:44.000Z | engines/factory.py | valeoai/BEEF | f1c5f3708ba91f6402dd05814b76dca1d9012942 | [
"Apache-2.0"
] | null | null | null | from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
from .extract_engine import ExtractEngine
from .predict_engine import PredictEngine
def factory():
if Options()['engine']['name'] == 'extract':
engine = ExtractEngine()
elif Options()['engine']['name'] == 'predict':
opt = Options()['engine']
engine = PredictEngine(vid_id=opt.get('vid_id', None))
else:
raise ValueError
return engine | 34.071429 | 63 | 0.660377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.129979 |
4c90072340fcbafd34ed47f1674ba9b82fd3e4b6 | 121 | py | Python | src/daipecore/decorator/tests/notebook_function_fixture.py | daipe-ai/daipe-core | aa205495fa6b464fa6078d17e439c60345ac99ea | [
"MIT"
] | 1 | 2021-09-17T09:07:09.000Z | 2021-09-17T09:07:09.000Z | src/daipecore/decorator/tests/notebook_function_fixture.py | daipe-ai/daipe-core | aa205495fa6b464fa6078d17e439c60345ac99ea | [
"MIT"
] | 2 | 2021-12-20T07:46:33.000Z | 2022-02-24T07:02:05.000Z | src/daipecore/decorator/tests/notebook_function_fixture.py | daipe-ai/daipe-core | aa205495fa6b464fa6078d17e439c60345ac99ea | [
"MIT"
] | null | null | null | from daipecore.decorator.notebook_function import notebook_function
@notebook_function
def load_data():
return 155
| 17.285714 | 67 | 0.826446 | 0 | 0 | 0 | 0 | 50 | 0.413223 | 0 | 0 | 0 | 0 |
4c904c7ac1c81ad0f92f7369dfe650d29ed9f316 | 2,982 | py | Python | SNLI/encap_snli_bert.py | jind11/SememePSO-Attack | b29a5663258fd277eff892040106ca63a35bc0e1 | [
"MIT"
] | 74 | 2020-05-05T02:36:56.000Z | 2022-03-22T20:30:15.000Z | SNLI/encap_snli_bert.py | jind11/SememePSO-Attack | b29a5663258fd277eff892040106ca63a35bc0e1 | [
"MIT"
] | 6 | 2020-06-22T23:32:32.000Z | 2021-11-30T11:47:36.000Z | SNLI/encap_snli_bert.py | jind11/SememePSO-Attack | b29a5663258fd277eff892040106ca63a35bc0e1 | [
"MIT"
] | 14 | 2020-05-13T05:30:54.000Z | 2021-06-18T02:00:58.000Z | from SNLI_BERT import ModelTrainer
from SNLI_BERT import adjustBatchInputLen
from pytorch_transformers import BertTokenizer, BertModel, AdamW, WarmupLinearSchedule
from torch import nn
import torch
import config
class Model(nn.Module):
def __init__(self, inv_dict):
super(Model, self).__init__()
self.config = config.SNLIConfig()
model = BertModel.from_pretrained(self.config.BERT_MODEL)
self.model = ModelTrainer(model, 3)
self.model.load_state_dict(torch.load(self.config.model_name))
self.model = self.model.eval().cuda()
self.inv_dict = inv_dict
self.tokenizer = BertTokenizer.from_pretrained(self.config.BERT_MODEL)
self.m = nn.Softmax(1)
def forward(self,input_x):
assert len(input_x[0]) == len(input_x[1]), "premise and hypothesis should share the same batch lens!"
num_instance = len(input_x[0])
batch = dict()
batch["inputs"] = []
batch["labels"] = torch.zeros((num_instance,)).long()
for i in range(len(input_x[0])):
tokens = list()
tokens.append(self.tokenizer.cls_token)
for k in [0, 1]:
add_sep = False
if k == 0:
add_sep = True
for j in range(len(input_x[k][i])):
#print(input_x[i], tokens)
#print(type(input_x[i][j]))
#print(self.dataset.inv_dict[0])
# inv_dict has no padding, maybe because of keras setting
if input_x[k][i][j] != 0:
tokens.append(self.inv_dict[int(input_x[k][i][j])])
if add_sep:
tokens.append("[SEP]")
tokens = self.tokenizer.convert_tokens_to_ids(tokens)
batch["inputs"].append(tokens)
adjustBatchInputLen(batch)
end_id = self.tokenizer.convert_tokens_to_ids("[SEP]")
for i in range(len(input_x[0])):
tokens = batch["inputs"][i]
tokens.append(end_id)
batch["inputs"] = torch.stack([torch.LongTensor(x) for x in batch['inputs']])
with torch.no_grad():
loss, logits = self.model(batch)
logits = self.m(logits[:,[1,0,2]])
return logits.cpu().numpy()
def predict(self, input_x):
# sess is of no use, just to tailor the ugly interface
return self(input_x)
def pred(self, x, y):
return self([x, y])
def adjustBatchInputLen(self, batch):
inputs = batch["inputs"]
length = 0
for item in inputs:
length = max(length, len(item))
length = min(length, self.config.max_sent_lens)
num = len(inputs)
for i in range(num):
if length > len(inputs[i]):
for j in range(length - len(inputs[i])):
inputs[i].append(self.tokenizer.pad_token_id)
else:
inputs[i] = inputs[i][:length]
| 38.230769 | 109 | 0.571093 | 2,768 | 0.928236 | 0 | 0 | 0 | 0 | 0 | 0 | 324 | 0.108652 |
4c9085d1d96d921992baf8be9f0b9a1cf28931a4 | 2,443 | py | Python | learn/ML/tensor_flow/cifar_animals.py | nvkhedkar/python-code | a66f383368388953f9d01a46b45fcac69c06543d | [
"Apache-2.0"
] | null | null | null | learn/ML/tensor_flow/cifar_animals.py | nvkhedkar/python-code | a66f383368388953f9d01a46b45fcac69c06543d | [
"Apache-2.0"
] | null | null | null | learn/ML/tensor_flow/cifar_animals.py | nvkhedkar/python-code | a66f383368388953f9d01a46b45fcac69c06543d | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
import sys
num_classes = 10
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
print("Num CPUs Available: ", len(tf.config.list_physical_devices('CPU')))
print(tf.config.list_physical_devices())
# tf.debugging.set_log_device_placement(True)
# with tf.device('/CPU:0'):
with tf.device('/GPU:0'):
(x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()
print(x_train.shape, x_train.shape)
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.reshape((x_train.shape[0], 28, 28, 1))
x_test = x_test.reshape((x_test.shape[0], 28, 28, 1))
print(x_train.shape, y_train.shape)
# sys.exit()
input_shape = (28, 28, 1)
model = tf.keras.Sequential()
model.add(layers.Conv2D(4, kernel_size=(5, 5), activation="selu",
kernel_initializer="lecun_normal",
padding="same", input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Conv2D(8, kernel_size=(5, 5), activation="selu",
kernel_initializer="lecun_normal"))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Flatten())
# model.add(layers.Dropout(0.5))
model.add(layers.Dense(120, activation="selu", kernel_initializer="lecun_normal"))
model.add(layers.Dense(84, activation="selu", kernel_initializer="lecun_normal"))
model.add(layers.Dense(num_classes, activation="softmax"))
model.summary()
batch_size = 128
epochs = 10
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
print(test_acc)
plt.show()
| 35.926471 | 97 | 0.690135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 456 | 0.186656 |
4c90ae39bff1dade9d33ca5eca6ea5fdcec366f1 | 698 | py | Python | day-02/part-2/jules.py | lypnol/adventofcode-2017 | 03ced3df3eb80e5c7965c4120e3932919067cb15 | [
"MIT"
] | 16 | 2017-12-02T11:56:25.000Z | 2018-02-10T15:09:23.000Z | day-02/part-2/jules.py | lypnol/adventofcode-2017 | 03ced3df3eb80e5c7965c4120e3932919067cb15 | [
"MIT"
] | 19 | 2017-12-01T07:54:22.000Z | 2017-12-19T17:41:02.000Z | day-02/part-2/jules.py | lypnol/adventofcode-2017 | 03ced3df3eb80e5c7965c4120e3932919067cb15 | [
"MIT"
] | 4 | 2017-12-04T23:58:12.000Z | 2018-02-01T08:53:16.000Z | from submission import Submission
class JulesSubmission(Submission):
def run(self, s):
# :param s: input in string format
# :return: solution flag
# your solution code goes here
def find_for_row(row):
for fi in range(len(row)):
for si in range(fi + 1, len(row)):
if row[fi] > row[si] and row[fi] % row[si] == 0:
return int(row[fi] / row[si])
elif row[si] % row[fi] == 0:
return int(row[si] / row[fi])
row_list = [[int(x) for x in row.split()] for row in s.split('\n')]
return str(sum([find_for_row(row) for row in row_list]))
| 34.9 | 75 | 0.512894 | 661 | 0.946991 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.131805 |
4c90c659e07e9935f665a0f2295fac0cd75bf175 | 1,248 | py | Python | nrw/aachen.py | risklayer/corona-landkreis-crawler | 2e82448ff614240365de9493eafa0e6a620ac615 | [
"Unlicense"
] | 12 | 2022-02-23T11:06:06.000Z | 2022-03-04T17:21:44.000Z | nrw/aachen.py | risklayer/corona-landkreis-crawler | 2e82448ff614240365de9493eafa0e6a620ac615 | [
"Unlicense"
] | null | null | null | nrw/aachen.py | risklayer/corona-landkreis-crawler | 2e82448ff614240365de9493eafa0e6a620ac615 | [
"Unlicense"
] | null | null | null | #!/usr/bin/python3
## Tommy
from botbase import *
_aachen_c = re.compile(r"eit Ende Februar 2020 (?:wurden beim Robert.Koch.Institut \(RKI\) )?insgesamt ([0-9.]+)")
_aachen_d = re.compile(r"Die Zahl der gemeldeten Todesfälle liegt bei ([0-9.]+)")
_aachen_a = re.compile(r"Aktuell sind ([0-9.]+) Menschen nachgewiesen")
def aachen(sheets):
import locale
locale.setlocale(locale.LC_TIME, "de_DE.UTF-8")
soup = get_soup("https://www.aachen.de/DE/stadt_buerger/notfall_informationen/corona/aktuelles/index.html")
header = next(p.get_text() for p in soup.find_all(["p","h2"]) if "Zahlen zum Infektionsge" in p.get_text())
if not today().strftime("%e. %B %Y") in header: raise NotYetAvailableException("Aachen noch alt: " + header[24:])
content = soup.get_text()
c = force_int(_aachen_c.search(content).group(1))
d = force_int(_aachen_d.search(content).group(1))
g = (c - d - force_int(_aachen_a.search(content).group(1))) if _aachen_a.search(content) else None
com = "Bot ohne G" if g is None else "Bot"
update(sheets, 5334, c=c, d=d, g=g, sig="Bot", comment=com, ignore_delta=True)
return True
schedule.append(Task(10, 15, 17, 50, 600, aachen, 5334))
if __name__ == '__main__': aachen(googlesheets())
| 49.92 | 117 | 0.691506 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 418 | 0.334668 |
4c92640d168362510593976eed0dde878a84ff03 | 12,967 | py | Python | src/evaluate_massive.py | jamescporter/MACH-Pytorch | 32caddcc29541a1eb96dc3781d973b89bacfc2bb | [
"MIT"
] | 1 | 2020-06-06T19:54:47.000Z | 2020-06-06T19:54:47.000Z | src/evaluate_massive.py | jamescporter/MACH-Pytorch | 32caddcc29541a1eb96dc3781d973b89bacfc2bb | [
"MIT"
] | null | null | null | src/evaluate_massive.py | jamescporter/MACH-Pytorch | 32caddcc29541a1eb96dc3781d973b89bacfc2bb | [
"MIT"
] | 4 | 2020-06-06T19:55:03.000Z | 2020-08-23T19:56:53.000Z | from mach_utils import *
import logging
from argparse import ArgumentParser
from fc_network import FCNetwork
import tqdm
from dataset import XCDataset,XCDataset_massive
import json
from typing import Dict, List
from trim_labels import get_discard_set
from xclib.evaluation import xc_metrics
from xclib.data import data_utils
from torchnet import meter
import time
def get_args():
p = ArgumentParser()
p.add_argument("--model", '-m', dest = "model", type = str, required = True,
help = "Path to the model config yaml file.")
p.add_argument("--dataset", '-d', dest = "dataset", type = str, required = True,
help = "Path to the data config yaml file.")
p.add_argument("--gpus", '-g', dest = "gpus", type = str, required = False, default = "0",
help = "A string that specifies which GPU you want to use, split by comma. Eg 0,1")
p.add_argument("--cost", '-c', dest = "cost", type = str, required = False, default = '',
help = "Use cost-sensitive model or not. Should be in [hashed, original]. "
"Default empty string, which indicates that no cost-sensitive is used.")
p.add_argument("--type", '-t', dest = "type", type = str, required = False, default = "all",
help = """Evaluation type. Should be 'all'(default) and/or 'trim_eval', split by comma. Eg. 'all,trim_eval'. If it is 'trim_eval', the rate parameter should be specified.
'all': Evaluate normally. If the 'trimmed' field in data config file is true, the code will automatically map the rest of the labels back to the orginal ones.
'trim_eval': Trim labels when evaluating. The scores with tail labels will be set to 0 in order not to predict these ones. This checks how much tail labels affect final evaluation metrics. Plus it will evaluate average precision on tail and head labels only.
""")
p.add_argument("--rate", '-r', dest = "rate", type = str, required = False, default = "0.1",
help = """If evaluation needs trimming, this parameter specifies how many labels will be trimmed, decided by cumsum.
Should be a string containing trimming rates split by comma. Eg '0.1,0.2'. Default '0.1'.""")
p.add_argument("--batch_size", '-bs', dest = "bs", type = int, required = False, default = "32",
help = """Evaluation batch size.""")
return p.parse_args()
def get_inv_hash(counts, inv_mapping, j):
"""
:param counts:
:param inv_mapping:
:param j: \in [0,b), the index we want to map back. Can be a tensor
:return:
"""
labels = inv_mapping[counts[j]: counts[j + 1]]
return labels
def single_rep(data_cfg, model_cfg, r):
# load ground truth
a.__dict__['rep'] = r
model_dir = get_model_dir(data_cfg, model_cfg, a)
# load mapping
counts, label_mapping, inv_mapping = get_label_hash(label_path, r)
label_mapping = torch.from_numpy(label_mapping)
# load models
best_param = os.path.join(model_dir, model_cfg["best_file"])
preload_path = model_cfg["pretrained"] if model_cfg["pretrained"] else best_param
if os.path.exists(preload_path):
meta_info = torch.load(preload_path)
model.load_state_dict(meta_info['model'])
else:
raise FileNotFoundError(
"Model {} does not exist.".format(preload_path))
# predict. gt: original label. p: hashed.
gt, p, _, _ = compute_scores(model, test_loader)
return gt, p[:, label_mapping]
def map_trimmed_back(scores, data_dir, prefix, ori_labels):
mapping_file = os.path.join(data_dir, prefix + "_meta.json")
with open(mapping_file, 'r') as f:
trim_mapping: Dict = json.load(f)
reverse_mapping = {v[0]: int(k) for k, v in trim_mapping.items()}
reverse_mapping_tensor = torch.tensor(
[reverse_mapping[k] for k in sorted(reverse_mapping.keys())])
num_ins = scores.shape[0]
ori_scores = np.zeros([num_ins, ori_labels])
ori_scores[:, reverse_mapping_tensor] = scores
scores = ori_scores
return scores
def sanity_check(a):
assert a.type in ['all', 'trim_eval', 'only_tail']
if __name__ == "__main__":
a = get_args()
gpus = [int(i) for i in a.gpus.split(",")]
data_cfg = get_config(a.dataset)
model_cfg = get_config(a.model)
log_file = data_cfg['prefix'] + "_eval.log"
model_dir = os.path.join(model_cfg["model_dir"], data_cfg["prefix"])
logging.basicConfig(level = logging.INFO,
format = '%(asctime)s %(levelname)-8s %(message)s', datefmt = '%Y-%m-%d %H:%M:%S',
handlers = [
logging.FileHandler(
os.path.join(model_dir, log_file)),
logging.StreamHandler()
])
cuda = torch.cuda.is_available()
R = model_cfg['r']
b = model_cfg['b']
num_labels = data_cfg["num_labels"]
ori_dim = data_cfg['ori_dim']
dest_dim = model_cfg['dest_dim']
name = data_cfg['name']
prefix = data_cfg['prefix']
record_dir = data_cfg["record_dir"]
data_dir = os.path.join("data", name)
K = model_cfg['at_k']
feat_path = os.path.join(record_dir, "_".join([prefix, str(ori_dim), str(dest_dim)]))
# load dataset
test_file = os.path.join(data_dir, prefix + "_test.txt")
# this will take a lot of space!!!!!!
test_set = XCDataset_massive(test_file, 0, data_cfg, model_cfg, 'te')
# test_sets = [XCDataset(test_file, r, data_cfg, model_cfg, 'te') for r in range(R)]
test_loader = torch.utils.data.DataLoader(
test_set, batch_size = a.bs)
# construct model
layers = [dest_dim] + model_cfg['hidden'] + [b]
model = FCNetwork(layers)
model = torch.nn.DataParallel(model, device_ids=gpus)
if cuda:
model = model.cuda()
label_path = os.path.join(record_dir, "_".join(
[prefix, str(num_labels), str(b), str(R)])) # Bibtex_159_100_32
pred_avg_meter = AverageMeter()
gt = None
logging.info("Evaluating config %s" % (a.model))
logging.info("Dataset config %s" % (a.dataset))
if a.cost:
logging.info("Evaluating cost-sensitive method: %s" % (a.cost))
# get inverse propensity
_, labels, _, _, _ = data_utils.read_data(test_file)
inv_propen = xc_metrics.compute_inv_propesity(labels, model_cfg["ps_A"], model_cfg["ps_B"])
gts = []
scaled_eval_flags = []
eval_flags = []
ps_eval_flags = []
map_meter = meter.mAPMeter()
for i, data in enumerate(tqdm.tqdm(test_loader)):
print(i, 'th data')
pred_avg_meter = AverageMeter()
X, gt = data
bs = X.shape[0]
for r in range(R):
print("REP", r, end = '\t')
x = X
feat_mapping = get_feat_hash(feat_path, r)
if model_cfg['is_feat_hash']:
x = x.coalesce()
ind = x.indices()
v = x.values()
ind[1] = torch.from_numpy(feat_mapping[ind[1]])
x = torch.sparse_coo_tensor(ind, values = v, size = (bs, dest_dim))
else:
pass
x = x.to_dense()
if cuda:
x = x.cuda()
# load model
a.__dict__['rep'] = r
model_dir = get_model_dir(data_cfg, model_cfg, a)
# load mapping
counts, label_mapping, inv_mapping = get_label_hash(label_path, r)
label_mapping = torch.from_numpy(label_mapping)
# load models
best_param = os.path.join(model_dir, model_cfg["best_file"])
preload_path = model_cfg["pretrained"] if model_cfg["pretrained"] else best_param
if os.path.exists(preload_path):
start= time.perf_counter()
if cuda:
meta_info = torch.load(preload_path)
else:
meta_info = torch.load(
preload_path, map_location=lambda storage, loc: storage)
model.load_state_dict(meta_info['model'])
end = time.perf_counter()
# logging.info("Load model time: %.3f s." % (end - start))
else:
raise FileNotFoundError(
"Model {} does not exist.".format(preload_path))
# the r_th output
start = time.perf_counter()
model.eval()
with torch.no_grad():
out = model(x)
out = torch.sigmoid(out)
out = out.detach().cpu().numpy()[:, label_mapping]
pred_avg_meter.update(out, 1)
end = time.perf_counter()
# logging.info("Single model running time: %.3f s." % (end - start))
start=time.perf_counter()
if gt.is_sparse:
gt = gt.coalesce()
gt = scipy.sparse.coo_matrix((gt.values().cpu().numpy(),
gt.indices().cpu().numpy()),
shape = (bs, num_labels))
else:
gt = scipy.sparse.coo_matrix(gt.cpu().numpy())
# only a batch of eval flags
scores = pred_avg_meter.avg
# map_meter.add(scores, gt.todense())
indices, true_labels, ps_indices, inv_psp = xc_metrics. \
_setup_metric(scores, gt, inv_propen)
eval_flag = xc_metrics._eval_flags(indices, true_labels, None)
ps_eval_flag = xc_metrics._eval_flags(ps_indices, true_labels, inv_psp)
# gts.append(gt)
scaled_eval_flag = np.multiply(inv_psp[indices], eval_flag)
eval_flags.append(eval_flag)
ps_eval_flags.append(ps_eval_flag)
scaled_eval_flags.append(scaled_eval_flag)
end = time.perf_counter()
logging.info("Eval collection time: %.3f s." % (end - start))
# eval all
# gts = np.concatenate(gts)
scaled_eval_flags = np.concatenate(scaled_eval_flags)
eval_flags = np.concatenate(eval_flags)
ps_eval_flags = np.concatenate(ps_eval_flags)
ndcg_denominator = np.cumsum(
1 / np.log2(np.arange(1, num_labels + 1) + 1))
_total_pos = np.asarray(
labels.sum(axis = 1),
dtype = np.int32)
n = ndcg_denominator[_total_pos - 1]
prec = xc_metrics._precision(eval_flags, K)
ndcg = xc_metrics._ndcg(eval_flags, n, K)
PSprec = xc_metrics._precision(scaled_eval_flags, K) / xc_metrics._precision(ps_eval_flags, K)
PSnDCG = xc_metrics._ndcg(scaled_eval_flags, n, K) / xc_metrics._ndcg(ps_eval_flags, n, K)
d = {
"prec": prec,
"ndcg": ndcg,
"psp": PSprec,
"psn": PSnDCG,
"mAP": [map_meter.value()]
}
log_eval_results(d)
# map trimmed labels back to original ones
# scores = pred_avg_meter.avg
# types = a.type.split(',')
# if 'all' in types:
# if data_cfg['trimmed']:
# # if use trim_eval or only_tail, data_cfg['trimmed'] should be false
# scores = map_trimmed_back(
# scores, data_dir, prefix, data_cfg['ori_labels'])
#
# if gt is None:
# raise Exception("You must have at least one model.")
# else:
# # Sum of avg is larger than 1 -> that is the feature, no problem
# d = evaluate_scores(gt, scores, model_cfg)
# log_eval_results(d)
#
# if 'trim_eval' in types or 'only_tail' in types:
# # find tail labels using training set.
# filepath = 'data/{n1}/{n1}_train.txt'.format(n1 = name)
# print(filepath)
# rate = [float(f) for f in a.rate.split(',')]
# discard_sets, count_np = get_discard_set(filepath, 'cumsum', rate)
# all_label_set = set(range(num_labels))
# rest_labels = [all_label_set - d for d in discard_sets]
# if 'trim_eval' in types:
# for r, dis_set, rest in zip(rate, discard_sets, rest_labels):
# logging.info(
# "Evaluate when trimming off {num_dis} labels (cumsum rate: {rate:.2f}%%, actual rate: {r2:.2f}%%)".format(
# num_dis = len(dis_set), rate = r * 100, r2 = len(dis_set) / num_labels * 100))
# dis_list = sorted(list(dis_set))
# rest_list = sorted(list(rest))
# new_score = np.copy(scores)
# new_score[:, dis_list] = 0
# log_eval_results(evaluate_scores(gt, new_score, model_cfg))
#
# # eval on head and tail labels, using original scores
# ap = APMeter()
# ap.add(scores, gt.todense())
# logging.info("AP of tail labels and head labels: %.2f, %.2f.\n" % (
# ap.value()[dis_list].mean() * 100, ap.value()[rest_list].mean() * 100))
| 42.937086 | 278 | 0.586797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,529 | 0.349271 |
4c927ebdbaca58badff81390823615cc6fb3db53 | 5,824 | py | Python | src/predict.py | cdc08x/automated-flight-diversion-detection | 4c6f2f208e4f54492905f6f550c0c37b4635d360 | [
"MIT"
] | null | null | null | src/predict.py | cdc08x/automated-flight-diversion-detection | 4c6f2f208e4f54492905f6f550c0c37b4635d360 | [
"MIT"
] | null | null | null | src/predict.py | cdc08x/automated-flight-diversion-detection | 4c6f2f208e4f54492905f6f550c0c37b4635d360 | [
"MIT"
] | null | null | null | import logging
predictLogger = logging.getLogger(__name__)
def predictDiversion(trajectory, classification, decfunout, threshold):
severities = computeSeverities(trajectory, decfunout, threshold)
(diversionDetections, firstDetectionIndex) = catchDiversionAlerts(trajectory, classification, threshold)
printResultsAsCSV(diversionDetections, classification, firstDetectionIndex, decfunout, severities, trajectory)
if firstDetectionIndex is not None:
predictLogger.debug("Diversion predicted for flight %s" %trajectory.flightId)
printAlert(trajectory, classification, decfunout, severities, firstDetectionIndex)
return True, severities, trajectory.positions[firstDetectionIndex]
return False, severities, None
def computeSeverities(trajectory, distances, threshold):
severity = 0.0
severities = []
i = 0
j = 0
k = 0
while k < len(trajectory.positions) - len(distances):
severities.append(0.0)
k += 1
while i < len(distances):
severity = 0.0
j = i
while j >= 0:
severity += distances[j]
j -= 1
severities.append(severity)
i += 1
return severities
def catchDiversionAlerts(trajectory, classification, threshold):
numOfConsecutiveAnomalies = 0
diversionDetections = []
firstDetectionIndex = None
i = 0
j = 0
while i < len(trajectory.positions) - len(classification):
diversionDetections.append(False)
i += 1
while j < len(classification):
if classification[j] == -1:
numOfConsecutiveAnomalies += 1
else:
numOfConsecutiveAnomalies = 0
diversionDetections.append(numOfConsecutiveAnomalies >= threshold);
if diversionDetections[i+j] and firstDetectionIndex is None:
firstDetectionIndex = i+j
j += 1
return diversionDetections, firstDetectionIndex
def printAlert(trajectory, classification, decfunout, severities, firstDetectionIndex):
alertString = "\n"
alertString += "div-alert-flightid:%s\n" %trajectory.flightId
alertString += "div-alert-aircraftid:%s\n" %trajectory.aircraftId
alertString += "div-alert-flightcode:%s\n" %trajectory.flightCode
alertString += "div-alert-origin:%s\n" %(trajectory.origin.code)
alertString += "div-alert-departurelatitude:%s\n" %trajectory.origin.position.lat
alertString += "div-alert-departurelongitude:%s\n" %trajectory.origin.position.lon
alertString += "div-alert-destination:%s\n" %(trajectory.destination.code)
alertString += "div-alert-arrivallatitude:%s\n" %trajectory.destination.position.lat
alertString += "div-alert-arrivallongitude:%s\n" %trajectory.destination.position.lon
alertString += "div-alert-certainty:%s\n" %severities[firstDetectionIndex]
alertString += "div-alert-latitude:%s\n" %trajectory.positions[firstDetectionIndex].lat
alertString += "div-alert-longitude:%s\n" %trajectory.positions[firstDetectionIndex].lon
alertString += "div-alert-timestamp:%s\n" %trajectory.positions[firstDetectionIndex].date
predictLogger.debug("Diversion detection alert%s" %alertString)
def printResultsAsCSV(diversionDetections, classification, firstDetectionIndex, decfunout, severities, trajectory):
try:
data = trajectory.getVectors()
positions = trajectory.getPositions()
# header
csv = "action-code;filename;flightId;departureCode;arrivalCode;firstEventDateTime;lastEventDateTime;predictionDateTime;latitude;longitude;speed;altitude;distLeft;distGain;dspeed;dalt;anomaly;distance;severity;diversionDetected;firstAlert\n"
i = 0
j = 0
while (i < len(positions) - len(classification)):
csv = csv + "%s;%s;%s;%s;%s;%s;%s;%s;%f;%f;%d;%d;%s;%s;%s;%s;%s;%f;%f;%s;%s\n" %("div-check",trajectory.filename, trajectory.flightId, trajectory.origin.code, trajectory.destination.code, trajectory.positions[0].date, trajectory.positions[-1].date, positions[i].date, positions[i].lat, positions[i].lon, positions[i].speed, positions[i].alt, "", "", "", "", "", 0.0, severities[i], "", "")
i += 1
while j < len(classification):
csv = csv + "%s;%s;%s;%s;%s;%s;%s;%s;%f;%f;%d;%d;%s;%s;%s;%s;%s;%f;%f;%s;%s\n" %("div-check",trajectory.filename, trajectory.flightId, trajectory.origin.code, trajectory.destination.code, trajectory.positions[0].date, trajectory.positions[-1].date, positions[i+j].date, positions[i+j].lat, positions[i+j].lon, positions[i+j].speed, positions[i+j].alt, data[j][0], data[j][1], data[j][2], data[j][3], (classification[j] == -1), decfunout[j], severities[i+j], diversionDetections[i+j], "Alert!" if firstDetectionIndex is not None and firstDetectionIndex == i+j else "")
# at what (date)time was the diversion predicted?
# if not diversionAlreadyPredicted:
# if numOfConsecutiveAnomalies == threshold:
# diversionDetectedDate = positions[i].date
# landingDate = landingPosition[-1].date
# timeDiff = landingDate - diversionDetectedDate
# #print "Diversion predicted " + str(timeDiff) + " before landing (minutes: " + str(int(timeDiff.total_seconds() / 60)) + ")"
# total_time_saved += int(timeDiff.total_seconds() / 60)
# diversionAlreadyPredicted = True
j += 1
# print " = "
# print fsum(scores)
predictLogger.debug("Diversion detection CSV traceback\n%s" %csv)
except Exception as e:
predictLogger.error("Error in diversion detection CSV dump for flight %s: %s" %(trajectory.flightId, format(e))) | 56 | 580 | 0.663462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,634 | 0.280563 |
4c92ce244030df317c3a30e338dd9e45f85fd368 | 192 | py | Python | data_collection/gazette/spiders/sc_chapeco.py | kaiocp/querido-diario | 86004049c6eee305e13066cf3607d30849bb099a | [
"MIT"
] | 454 | 2018-04-07T03:32:57.000Z | 2020-08-17T19:56:22.000Z | data_collection/gazette/spiders/sc_chapeco.py | kaiocp/querido-diario | 86004049c6eee305e13066cf3607d30849bb099a | [
"MIT"
] | 254 | 2020-08-18T14:09:43.000Z | 2022-03-28T11:30:51.000Z | data_collection/gazette/spiders/sc_chapeco.py | kaiocp/querido-diario | 86004049c6eee305e13066cf3607d30849bb099a | [
"MIT"
] | 183 | 2018-04-11T15:09:37.000Z | 2020-08-15T18:55:11.000Z | from gazette.spiders.base.fecam import FecamGazetteSpider
class ScChapecoSpider(FecamGazetteSpider):
name = "sc_chapeco"
FECAM_QUERY = "cod_entidade:71"
TERRITORY_ID = "4204202"
| 24 | 57 | 0.765625 | 131 | 0.682292 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.197917 |
4c932b77520032b5bb728a4df1354693ea393c21 | 3,101 | py | Python | ros/src/tl_detector/light_classification/tl_classifier.py | bogdan-kovalchuk/CarND-Capstone | 1ac1228dac0733f80a13cb378523bb8369289dfd | [
"MIT"
] | null | null | null | ros/src/tl_detector/light_classification/tl_classifier.py | bogdan-kovalchuk/CarND-Capstone | 1ac1228dac0733f80a13cb378523bb8369289dfd | [
"MIT"
] | 6 | 2020-11-13T18:39:31.000Z | 2022-03-12T00:17:41.000Z | ros/src/tl_detector/light_classification/tl_classifier.py | ChitraChaudhari/SDND-Capstone-Project | 4cfb7e94d9b1e337612733330989ec1fbf8c1854 | [
"MIT"
] | 3 | 2020-02-17T15:32:55.000Z | 2020-02-21T22:36:02.000Z | from styx_msgs.msg import TrafficLight
import cv2
import numpy as np
import tensorflow as tf
from keras.models import load_model
import os
class TLClassifier(object):
def __init__(self):
self.true_path = os.path.dirname(os.path.realpath('models/'))
self.init_classifier()
self.init_graph()
self.match_dict = {0: TrafficLight.GREEN,
1: TrafficLight.RED,
2: TrafficLight.YELLOW,
3: TrafficLight.UNKNOWN}
def get_classification(self, image):
self.localize_obj(image)
if self.img_out is None:
#print('Didnt find traffic lights')
return self.match_dict[3]
self.classify_img()
return self.match_dict[self.state]
def localize_obj(self,img):
# net was trained in bgr colorspace
self.img_out = None
self.img = img
# shape of (1,?,?,3)
input_img = np.expand_dims(cv2.cvtColor(img, cv2.COLOR_RGB2BGR), axis=0)
with self.dg.as_default():
(detection_boxes, detection_scores, detection_classes,num_detections) = self.sess.run(
[self.box_t, self.score_t, self.class_t, self.num_t],
feed_dict={self.img_t: input_img})
for obs in zip(detection_boxes[0], detection_classes[0], detection_scores[0]):
# did we observe traffic lights with high certainty?
if obs[1] == 10 and obs[2] >= .5:
# get box and img for classification
box = obs[0]
x_min = int(box[0] * self.img.shape[0])
x_max = int(box[2] * self.img.shape[0])
y_min = int(box[1] * self.img.shape[1])
y_max = int(box[3] * self.img.shape[1])
self.img_out = cv2.resize(cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB)[x_min:x_max,y_min:y_max,:],(14,32))
break
def classify_img(self):
with self.class_graph.as_default():
self.state = np.argmax(self.classifier.predict(self.img_out.reshape(1,32,14,3)))
def init_graph(self):
self.path = self.true_path + "/light_classification/models/frozen_inference_graph.pb"
self.dg = tf.Graph()
with self.dg.as_default():
gdef = tf.GraphDef()
with open(self.path, 'rb') as f:
gdef.ParseFromString(f.read())
tf.import_graph_def(gdef, name="")
self.sess = tf.Session(graph=self.dg)
self.img_t = self.dg.get_tensor_by_name('image_tensor:0')
self.box_t = self.dg.get_tensor_by_name('detection_boxes:0')
self.score_t = self.dg.get_tensor_by_name('detection_scores:0')
self.class_t = self.dg.get_tensor_by_name('detection_classes:0')
self.num_t = self.dg.get_tensor_by_name('num_detections:0')
def init_classifier(self):
self.classifier = load_model(self.true_path + '/light_classification/models/model.h5')
self.class_graph = tf.get_default_graph()
| 39.75641 | 119 | 0.594002 | 2,959 | 0.954208 | 0 | 0 | 0 | 0 | 0 | 0 | 382 | 0.123186 |
4c93974dec5f6d2a04ca20b46bed365d7a5932aa | 25,394 | py | Python | engine/src/hopeit/server/api.py | leosmerling/hopeit.engine | d95a130b03db4c5c6265d13256e77bf3fa2f6a42 | [
"Apache-2.0"
] | null | null | null | engine/src/hopeit/server/api.py | leosmerling/hopeit.engine | d95a130b03db4c5c6265d13256e77bf3fa2f6a42 | [
"Apache-2.0"
] | null | null | null | engine/src/hopeit/server/api.py | leosmerling/hopeit.engine | d95a130b03db4c5c6265d13256e77bf3fa2f6a42 | [
"Apache-2.0"
] | null | null | null | """
Open API spec creation and server helpers
"""
import json
import re
from copy import deepcopy
from functools import partial
from pathlib import Path
from typing import Dict, List, Tuple, Type, Optional, Callable, Awaitable, Union
from datetime import date, datetime
from aiohttp import web
from aiohttp_swagger3 import RapiDocUiSettings # type: ignore
from aiohttp_swagger3.swagger import Swagger # type: ignore
from aiohttp_swagger3.exceptions import ValidatorError # type: ignore
from aiohttp_swagger3 import validators # type: ignore
from aiohttp_swagger3.validators import MISSING, _MissingType # type: ignore
from aiohttp_swagger3.swagger_route import SwaggerRoute # type: ignore
from stringcase import titlecase # type: ignore
import typing_inspect as typing # type: ignore
from dataclasses_jsonschema import SchemaType
from hopeit.dataobjects import BinaryAttachment, BinaryDownload # type: ignore
from hopeit.app.config import AppConfig, AppDescriptor, EventDescriptor, EventPlugMode, EventType
from hopeit.server.config import ServerConfig, AuthType
from hopeit.server.errors import ErrorInfo
from hopeit.server.imports import find_event_handler
from hopeit.server.logger import engine_logger
from hopeit.server.names import route_name
from hopeit.server.steps import extract_module_steps, extract_postprocess_handler, extract_preprocess_handler, \
StepInfo
__all__ = ['init_empty_spec',
'load_api_file',
'save_api_file',
'setup',
'clear',
'app_route_name',
'register_server_config',
'register_apps',
'enable_swagger',
'diff_specs']
logger = engine_logger()
swagger: Optional[Swagger] = None
spec: Optional[dict] = None
static_spec: Optional[dict] = None
runtime_schemas = {}
_options = {
'generate_mode': False
}
OPEN_API_VERSION = '3.0.3'
METHOD_MAPPING = {
EventType.GET: 'get',
EventType.POST: 'post',
EventType.MULTIPART: 'post'
}
class APIError(Exception):
"""
Error thrown when API incompatibilities are detected
"""
def setup(**kwargs):
"""
Setup additional options for api module. Supported options are:
:param generate_mode: bool, default False: creates empty path placholders for modules not defining __api__
specification
"""
_options.update(**kwargs)
def clear():
"""
Clears api configuration stored in memory. This disables api module.
"""
global spec, static_spec, swagger, runtime_schemas, _options
spec = None
static_spec = None
swagger = None
runtime_schemas = {}
_options = {
'generate_mode': False
}
def init_empty_spec(api_version: str, title: str, description: str):
"""
Initializes internal spec and static_spec dictionaries with minimal Open API requirements:
openapi, info sections and empty paths. This method can be used to create new API specs.
:param api_version: info.version
:param title: info.title
:param description: info.description
"""
global spec, static_spec
logger.info(__name__, "Creating Open API spec...")
spec = {
"openapi": OPEN_API_VERSION,
"info": {
"version": api_version,
"title": title,
"description": description
},
"paths": {}
}
logger.info(__name__, f"API: openapi={spec['openapi']}, API version={spec['info']['version']}")
static_spec = deepcopy(spec)
def load_api_file(path: Union[str, Path]):
"""
Loads OpenAPI spec from a json file. Spec is loaded into the module.
@param path: path to json file
"""
global spec, static_spec
logger.info(__name__, f"Loading api spec from api_file={path}...")
with open(path, 'r') as f:
spec = json.loads(f.read())
assert spec is not None
logger.info(__name__, f"API: openapi={spec['openapi']}, API version={spec['info']['version']}")
static_spec = deepcopy(spec)
def save_api_file(path: Union[str, Path], api_version: str):
"""
Saves module Open API spec to json file.
:param path: path to json file
:param api_version: new api_version, in case changes between previously loaded api file and api calculated at
runtime, a new api_version needs to be specified to allow saving the file.
"""
assert spec is not None
assert static_spec is not None
if diff_specs() and static_spec['info']['version'] == api_version:
err = APIError("Cannot save api file. Need to increment version number. Differences found.")
logger.error(__name__, err)
raise err
logger.info(__name__, f"Set API version={api_version}...")
spec['info']['version'] = api_version
logger.info(__name__, f"Saving api spec to api_file={path}...")
with open(path, 'w') as f:
f.write(json.dumps(spec, indent=2))
f.flush()
def register_server_config(server_config: ServerConfig):
"""
Register API definitions from server configuration. This consists of allowed and default authentication methods.
"""
if spec is not None:
if 'components' not in spec:
spec['components'] = {'schemas': {}}
_update_auth_methods()
_update_server_default_auth_methods(server_config)
def register_apps(apps_config: List[AppConfig]):
"""
Register api definition for a list of apps that conform to a single API specification.
@param apps_config: list of AppConfig objects to be introspected
"""
if spec is not None:
logger.info(__name__, "Registering apps...")
apps_config_by_key = {config.app.app_key(): config for config in apps_config}
for config in apps_config:
logger.info(__name__, f"Updating API spec for app={config.app_key()}...")
_register_api_spec(config)
for plugin in config.plugins:
logger.info(__name__, f"Updating API spec for app={config.app_key()}, plugin={plugin.app_key()}...")
plugin_config = apps_config_by_key[plugin.app_key()]
_register_api_spec(config, plugin_config)
_cleanup_api_schemas()
def _register_api_spec(app_config: AppConfig, plugin: Optional[AppConfig] = None):
if spec is not None:
if 'components' not in spec:
spec['components'] = {'schemas': {}}
_update_predefined_schemas()
_update_api_schemas(app_config)
_update_api_paths(app_config, plugin)
def diff_specs() -> bool:
"""
Detects differences between loaded API specification and spec calculated from server and apps.
:return: True if differences are found, False if loaded spec matches runtime.
"""
return static_spec != spec
async def _passthru_handler(request: web.Request) -> Tuple[web.Request, bool]:
return request, True
class CustomizedObjectValidator(validators.Object): # pragma: no cover
"""
Replacements of Object Validator provided by aiohttp3_swagger
to handle multipart form requests
"""
def validate(self, raw_value: Union[None, Dict, _MissingType],
raw: bool) -> Union[None, Dict, _MissingType]:
# FIXED: is_missing = isinstance(raw_value, _MissingType)
is_missing = (
isinstance(raw_value, _MissingType)
or ((raw_value is not None) and (not isinstance(raw_value, dict)))
)
# ORIGINAL CODE: https://github.com/hh-h/aiohttp-swagger3/blob/master/aiohttp_swagger3/validators.py
# FIXED END
if not is_missing and self.readOnly:
raise ValidatorError("property is read-only")
if raw_value is None:
if self.nullable:
return None
raise ValidatorError("value should be type of dict")
if not isinstance(raw_value, dict):
if is_missing:
return raw_value
raise ValidatorError("value should be type of dict")
value = {}
errors: Dict = {}
for name in self.required:
if name not in raw_value:
errors[name] = "required property"
if errors:
raise ValidatorError(errors)
for name, validator in self.properties.items():
prop = raw_value.get(name, MISSING)
try:
val = validator.validate(prop, raw)
if val != MISSING:
value[name] = val
except ValidatorError as e:
errors[name] = e.error
if errors:
raise ValidatorError(errors)
if isinstance(self.additionalProperties, bool):
if not self.additionalProperties:
additional_properties = raw_value.keys() - value.keys()
if additional_properties:
raise ValidatorError({k: "additional property not allowed" for k in additional_properties})
else:
for key in raw_value.keys() - value.keys():
value[key] = raw_value[key]
else:
for name in raw_value.keys() - value.keys():
validator = self.additionalProperties
value[name] = validator.validate(raw_value[name], raw)
if self.minProperties is not None and len(value) < self.minProperties:
raise ValidatorError(f"number or properties must be more than {self.minProperties}")
if self.maxProperties is not None and len(value) > self.maxProperties:
raise ValidatorError(f"number or properties must be less than {self.maxProperties}")
return value
setattr(validators.Object, "validate", CustomizedObjectValidator.validate)
def enable_swagger(server_config: ServerConfig, app: web.Application):
"""
Enables Open API (a.k.a Swagger) on this server. This consists of:
* All endpoints within API specification are to be handled by a Open API handler that will validate requests
* If specified in server_config.api_docs_path, API docs site will be available at the given route.
i.e. http://server-address:8020/api/docs
:param server_config: server configuration
:param app: aiohttp web Application to host routes and docs
"""
global swagger, static_spec
if spec is None:
logger.warning(__name__, "No api-file loaded. OpenAPI docs and validation disabled.")
return
if diff_specs():
err = APIError("Cannot enable OpenAPI. Differences found between api-file and running apps. "
"Run `hopeit openapi diff` to check and `hopeit openapi update` to generate spec file")
logger.error(__name__, err)
raise err
static_spec = None
logger.info(__name__, "Enabling OpenAPI endpoints...")
app["AIOHTTP_SWAGGER3_SWAGGER_SPECIFICATION"] = spec
api_docs_ui = None
if server_config.api.docs_path:
api_docs_ui = RapiDocUiSettings(
path=server_config.api.docs_path,
heading_text=spec['info']['title'],
theme='dark',
render_style='read',
layout='column',
schema_style='tree',
allow_spec_url_load=False,
allow_spec_file_load=False,
allow_server_selection=False,
show_header=False
)
logger.info(__name__, f"OpenAPI documentation available in {server_config.api.docs_path}")
else:
logger.warning(
__name__, "OpenAPI documentation path not specified in server config. API docs endpoint disabled.")
swagger = Swagger(
app,
validate=True,
spec=spec,
request_key="data",
rapidoc_ui_settings=api_docs_ui,
redoc_ui_settings=None,
swagger_ui_settings=None
)
swagger.register_media_type_handler("multipart/form-data", _passthru_handler)
logger.info(__name__, "OpenAPI validations enabled.")
def add_route(method: str,
path: str,
handler: Callable[..., Awaitable[web.StreamResponse]]) -> Callable[..., Awaitable[web.StreamResponse]]:
"""
Register a route handler. In case the path is associated with a path in Open API running spec,
handler is to be wrapped by an Open API handler, if not, handler will be returned with no changes
and a WARNING is logged.
:param method: str, valid Open API method (i.e. GET, POST)
:param path: str, route
:param handler: function to be used as handler
"""
if spec is None:
return handler
assert swagger is not None, "API module not initialized. Call `api.enable_swagger(...)`"
method_lower = method.lower()
if method_lower in spec["paths"].get(path, {}):
route = SwaggerRoute(method_lower, path, handler, swagger=swagger)
api_handler = partial(swagger._handle_swagger_call, route) # pylint: disable=protected-access
return api_handler
logger.warning(__name__, f"No API Spec defined for path={path}")
return handler
def app_route_name(app: AppDescriptor, *, event_name: str,
plugin: Optional[AppDescriptor] = None,
prefix: str = 'api', override_route_name: Optional[str] = None) -> str:
"""
Returns the full route name for a given app event
:param app: AppDescriptor, as defined in AppConfig
:param event_name: event name as defined in AppConfig
:param plugin: optional plugin if the event comes from a plugin and EventPlugMode=='OnApp'
:param prefix: route prefix, defaults to 'api'
:param override_route_name: Optional[str], provided route to be used instead app and event name,
if starts with '/', prefix will be ignored, otherwised appended to prefix
:return: str, full route name. i.e.:
/api/app-name/1x0/event-name or /api/app-name/1x0/plugin-name/1x0/event-name
"""
components = [
prefix, app.name, app.version,
*([plugin.name, plugin.version] if plugin else []),
*event_name.split('.')
] if override_route_name is None else [
override_route_name[1:]
] if override_route_name[0] == '/' else [
prefix, override_route_name
]
return route_name(*components)
def _schema_name(datatype: type) -> str:
return f"#/components/schemas/{datatype.__name__}"
def datatype_schema(event_name: str, datatype: Type) -> dict:
origin = typing.get_origin(datatype)
if origin is None:
origin = datatype
type_mapper = TYPE_MAPPERS.get(origin)
if type_mapper is None:
return {
"$ref": _schema_name(datatype)
}
return type_mapper(event_name, datatype) # type: ignore
def _update_auth_methods():
"""
Generate default securitySchemes section
"""
security_schemas = spec['components'].get('securitySchemes', {})
security_schemas.update({
'auth.basic': {
'type': 'http',
'scheme': 'basic'
},
'auth.bearer': {
'type': 'http',
'scheme': 'bearer'
}
})
spec['components']['securitySchemes'] = security_schemas
def _update_auth_refresh_method(app_key: str):
"""
Generate securitySchemes entries for REFRESH token cookie for each app
"""
assert spec is not None
security_schemas = spec['components'].get('securitySchemes', {})
security_schemas.update({
f"{app_key}.refresh": {
'type': 'apiKey',
'in': 'cookie',
'name': f"{app_key}.refresh"
}
})
spec['components']['securitySchemes'] = security_schemas
def _update_server_default_auth_methods(server_config: ServerConfig):
"""
Generate security section based on server default_auth_methods
"""
assert spec is not None
security = spec.get('security', [])
methods = {method for entry in security for method in entry.keys()}
for auth_method in server_config.auth.default_auth_methods:
auth_str = f"auth.{auth_method.value.lower()}"
if auth_str != 'auth.unsecured' and auth_str not in methods:
security.append({auth_str: []})
spec['security'] = security
def _update_api_schemas(app_config: AppConfig):
"""
Generate schemas for @dataobject annotated dataclasses discovered in event implementation modules
"""
assert spec is not None
schemas = spec['components'].get('schemas', {})
for event_name in app_config.events.keys():
event_schemas = _generate_schemas(app_config, event_name)
for name, event_schema in event_schemas.items():
if name in runtime_schemas:
if not event_schema == schemas.get(name):
logger.warning(__name__,
f"Schema ignored: same schema name has non-compatible implementations: "
f"event={event_name} schema={name}")
else:
schemas[name] = event_schema
runtime_schemas[name] = event_schema
spec['components']['schemas'] = schemas
def _update_predefined_schemas():
"""
Generate schemas for predefined classes
"""
assert spec is not None
spec['components']['schemas'].update(
ErrorInfo.json_schema(schema_type=SchemaType.V3, embeddable=True)
)
def _cleanup_api_schemas():
"""
Remove schemas from spec, if they are not used in paths
"""
assert spec is not None
modified = True
while modified:
clean = {}
spec_str = json.dumps(spec)
schemas = spec['components'].get('schemas', {})
for name, schema in schemas.items():
if spec_str.find(f"#/components/schemas/{name}") >= 0:
clean[name] = schema
modified = len(schemas) > len(clean)
spec['components']['schemas'] = clean
def _update_api_paths(app_config: AppConfig, plugin: Optional[AppConfig] = None):
"""
Populates paths section of spec based on __api__ specified in implemented events
"""
assert spec is not None
events = {
k: v for k, v in app_config.events.items() if v.plug_mode == EventPlugMode.STANDALONE
} if plugin is None else {
k: v for k, v in plugin.events.items() if v.plug_mode == EventPlugMode.ON_APP
}
plugin_app = None if plugin is None else plugin.app
paths = spec.get('paths', {})
for event_name, event_info in events.items():
route = app_route_name(app_config.app, event_name=event_name, plugin=plugin_app,
override_route_name=event_info.route)
method = METHOD_MAPPING.get(event_info.type)
if method is None:
continue
event_api_spec = _extract_event_api_spec(app_config if plugin is None else plugin, event_name)
if event_api_spec is None:
event_api_spec = paths.get(route, {}).get(method)
if event_api_spec is None and _options.get('generate_mode'):
event_api_spec = {"description": f"<<<{event_name}>>>", "parameters": [], "responses": {}}
if event_api_spec is not None:
event_api_spec['tags'] = [app_config.app_key()]
_set_optional_fixed_headers(event_api_spec)
_set_track_headers(event_api_spec, app_config)
_set_path_security(event_api_spec, app_config, event_info)
route_path = paths.get(route, {})
route_path[method] = event_api_spec
paths[route] = route_path
spec['paths'] = paths
def _set_optional_fixed_headers(event_api_spec: dict):
"""
Set arguments for request-id and request-ts track headers on every path entry
"""
if not any(param['name'] == 'X-Track-Request-Id' for param in event_api_spec['parameters']):
event_api_spec['parameters'].append({
"name": "X-Track-Request-Id",
"in": "header",
"required": False,
"description": "Track information: Request-Id",
"schema": {
"type": "string"
}
})
if not any(param['name'] == 'X-Track-Request-Ts' for param in event_api_spec['parameters']):
event_api_spec['parameters'].append({
"name": "X-Track-Request-Ts",
"in": "header",
"required": False,
"description": "Track information: Request-Ts",
"schema": {
"type": "string",
"format": "date-time"
}
})
def _set_track_headers(event_api_spec: dict, app_config: AppConfig):
"""
Set arguments for track headers specified in app_config for every path
"""
current_params = {entry['name'] for entry in event_api_spec['parameters']}
for track_header in app_config.engine.track_headers:
header_name = f"X-{re.sub(' ', '-', titlecase(track_header))}"
if header_name not in current_params:
event_api_spec['parameters'].append({
"name": header_name,
"in": "header",
"required": True,
"description": f"Track information: {track_header}",
"schema": {
"type": "string",
"default": track_header.replace('track', 'test')
}
})
def _set_path_security(event_api_spec: dict, app_config: AppConfig, event_info: EventDescriptor):
"""
Setup security schemes allowed for each path
"""
assert spec is not None
security: list = []
for auth in event_info.auth:
if auth == AuthType.REFRESH:
_update_auth_refresh_method(app_config.app_key())
auth_str = f"{app_config.app_key()}.refresh"
security.append({auth_str: []})
elif auth != AuthType.UNSECURED:
auth_str = f"auth.{auth.value.lower()}"
security.append({auth_str: []})
if len(security) == 0 and AuthType.UNSECURED not in event_info.auth:
security = spec['security']
if len(security) > 0:
event_api_spec['security'] = security
def _extract_event_api_spec(app_config: AppConfig, event_name: str) -> Optional[dict]:
"""
Extract __api__ definition from event implementation
"""
module = find_event_handler(app_config=app_config, event_name=event_name)
if hasattr(module, '__api__'):
method_spec = getattr(module, '__api__')
if isinstance(method_spec, dict):
return method_spec
return method_spec(module, app_config, event_name, None)
return None
def _generate_schemas(app_config: AppConfig, event_name: str) -> dict:
"""
Generate all schemas for a given event, based on steps signatures
"""
module = find_event_handler(app_config=app_config, event_name=event_name)
steps = extract_module_steps(module)
schemas: dict = {}
for _, step_info in steps:
_update_step_schemas(schemas, step_info)
step_info = extract_postprocess_handler(module)
_update_step_schemas(schemas, step_info)
step_info = extract_preprocess_handler(module)
_update_step_schemas(schemas, step_info)
return schemas
def _update_step_schemas(schemas: dict, step_info: Optional[StepInfo]):
if step_info is not None:
_, input_type, ret_type = step_info
datatypes = _explode_datatypes([input_type, ret_type])
for datatype in datatypes:
if datatype is not None and hasattr(datatype, '__data_object__'):
if datatype.__data_object__['schema']:
schemas.update(datatype.json_schema(schema_type=SchemaType.V3, embeddable=True))
def _explode_datatypes(datatypes: List[Type]) -> List[Type]:
result = []
for datatype in datatypes:
if datatype is not None:
if hasattr(datatype, '__args__'):
for arg in getattr(datatype, '__args__'):
result.extend(_explode_datatypes([arg]))
else:
result.append(datatype)
return result
def _array_schema(event_name: str, datatype: type):
args = typing.get_args(datatype)
return {
"type": "array",
"items": {
"$ref": _schema_name(args[0])
}
}
def _binary_download_schema(event_name: str, datatype: type):
return {
"type": "string",
"format": "binary"
}
def _builtin_schema(type_name: str, type_format: Optional[str],
event_name: str, datatype: type) -> dict:
"""
Build type schema for predefined datatypes
"""
schema = {
"type": "object",
"required": [
event_name
],
"properties": {
event_name: {
"type": type_name,
}
},
"description": f"{event_name} {type_name} payload"
}
if type_format is not None:
schema['properties'][event_name]['format'] = type_format # type: ignore
return schema
TYPE_MAPPERS = {
str: partial(_builtin_schema, 'string', None),
int: partial(_builtin_schema, 'integer', None),
float: partial(_builtin_schema, 'number', None),
bool: partial(_builtin_schema, 'boolean', None),
list: _array_schema,
BinaryAttachment: partial(_builtin_schema, 'string', 'binary'),
BinaryDownload: _binary_download_schema
}
BUILTIN_TYPES = {
str: ('string', None),
int: ('integer', None),
float: ('number', None),
bool: ('boolean', None),
date: ('string', 'date'),
datetime: ('string', 'date-time')
}
| 36.590778 | 117 | 0.642356 | 2,840 | 0.111837 | 0 | 0 | 0 | 0 | 103 | 0.004056 | 8,087 | 0.318461 |
4c939c9d91ef2f1339d601df09aa932fed49d35e | 384 | py | Python | testing/nxtpython_x_motion.py | ArVID220u/lego3dcopier | 1144352cbee45bae4dea5869c36a513949dc668f | [
"MIT"
] | null | null | null | testing/nxtpython_x_motion.py | ArVID220u/lego3dcopier | 1144352cbee45bae4dea5869c36a513949dc668f | [
"MIT"
] | null | null | null | testing/nxtpython_x_motion.py | ArVID220u/lego3dcopier | 1144352cbee45bae4dea5869c36a513949dc668f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from xmovement import XMovement
import nxt
brick = nxt.locator.find_one_brick(debug=True)
realport = nxt.motor.PORT_A
print("START")
#motor.debug_info()
xmovement = XMovement(realport, brick)
try:
while True:
position = int(input())
xmovement.set_position(position)
finally:
xmovement.reset()
#motor.debug_info()
print("END")
| 13.714286 | 46 | 0.703125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.1875 |
4c9417844003b03d92633f2f16b78fb62fd56a2d | 1,996 | py | Python | appreview/migrations/0001_initial.py | IsaiahKe/awward-mimic | 8a5ff40d9acfbdc5323c7e9b6b8e7438f9a85d21 | [
"MIT"
] | null | null | null | appreview/migrations/0001_initial.py | IsaiahKe/awward-mimic | 8a5ff40d9acfbdc5323c7e9b6b8e7438f9a85d21 | [
"MIT"
] | null | null | null | appreview/migrations/0001_initial.py | IsaiahKe/awward-mimic | 8a5ff40d9acfbdc5323c7e9b6b8e7438f9a85d21 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2021-09-22 09:28
import cloudinary.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AppVote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('appname', models.CharField(max_length=30)),
('appimage', cloudinary.models.CloudinaryField(max_length=255, verbose_name='image')),
('author', models.CharField(max_length=30)),
('livelink', models.URLField(null=True)),
('design', models.DecimalField(decimal_places=2, default=0.0, max_digits=3)),
('usability', models.DecimalField(decimal_places=2, default=0.0, max_digits=3)),
('content', models.DecimalField(decimal_places=2, default=0.0, max_digits=3)),
('total', models.DecimalField(decimal_places=2, default=0.0, max_digits=4)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userPhoto', cloudinary.models.CloudinaryField(max_length=255, verbose_name='image')),
('bio', models.TextField()),
('contact', phonenumber_field.modelfields.PhoneNumberField(max_length=128, null=True, region=None)),
('location', models.CharField(blank=True, max_length=30, null=True)),
('username', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 44.355556 | 136 | 0.627255 | 1,775 | 0.889279 | 0 | 0 | 0 | 0 | 0 | 0 | 212 | 0.106212 |
4c9526462b2989d51a6edf4e3c00fe7a125db0ec | 2,477 | py | Python | test_accuracy.py | ejmejm/GoHeuristics | 9336d661abd48aa31ff5c9ed50cc2fbbd4472ebe | [
"Apache-2.0"
] | 1 | 2017-07-18T22:24:30.000Z | 2017-07-18T22:24:30.000Z | test_accuracy.py | ejmejm/GoHeuristics | 9336d661abd48aa31ff5c9ed50cc2fbbd4472ebe | [
"Apache-2.0"
] | null | null | null | test_accuracy.py | ejmejm/GoHeuristics | 9336d661abd48aa31ff5c9ed50cc2fbbd4472ebe | [
"Apache-2.0"
] | null | null | null | import glob
import os
import sys
from sgfmill.sgfmill import sgf
import global_vars_go as gvg
import loader
import utils
import board3d as go_board
import numpy as np
kifuPath = "./kifu"
num_games = gvg.num_games
from_game = gvg.from_test_games
lb_size = 250.
correct = 0
total = 0
num_lb = int((num_games-1)/lb_size) + 1 # Number of loading batches
model = loader.load_model_from_file(gvg.nn_type)
for lb in range(num_lb):
games = []
print("Loading game data...")
i = 0
for filename in glob.glob(os.path.join(kifuPath, "*.sgf")):
load_limit = min((lb+1) * lb_size, num_games)
if from_game + (lb) * lb_size <= i < from_game + load_limit:
with open(filename, "rb") as f:
games.append(sgf.Sgf_game.from_bytes(f.read()))
i += 1
print("Done loading {} games".format(len(games)))
print("Being data processing...")
train_boards = []
train_next_moves = []
for game_index in range(len(games)):
board = go_board.setup_board(games[game_index])
for node in games[game_index].get_main_sequence():
board = go_board.switch_player_perspec(board) # Changes player perspective, black becomes white and vice versa
node_move = node.get_move()[1]
if node_move is not None:
train_boards.append(go_board.get_encoded_board(board))
next_move = np.zeros(gvg.board_size * gvg.board_size).reshape(gvg.board_size, gvg.board_size)
next_move[node_move[0], node_move[1]] = gvg.filled # y = an array in the form [board_x_position, board_y_position]
train_next_moves.append(next_move.reshape(gvg.board_size * gvg.board_size))
board = go_board.make_move(board, node_move, gvg.bot_channel, gvg.player_channel) # Update board with new move
if board is None:
print("ERROR! Illegal move, {}, while training".format(node_move))
print("Finished data processing...")
print("Begin testing...")
for i in range(len(train_boards)):
pred = np.asarray(model.predict(train_boards[i].reshape(1, gvg.board_size, gvg.board_size, gvg.enc_board_channels))) \
.reshape(gvg.board_size * gvg.board_size)
if pred.argmax() == train_next_moves[i].argmax():
correct += 1
total += 1
print("Accuracy: {}".format(correct/total))
print("Finished testing")
| 38.107692 | 131 | 0.63908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 396 | 0.159871 |
4c9540d917ffca6d79c34b67e169110a93828770 | 714 | py | Python | gpx_split/writer.py | mario-s/gpx_split | d043b1a887a4d42205c319b089a4e51594603dbe | [
"Apache-2.0"
] | null | null | null | gpx_split/writer.py | mario-s/gpx_split | d043b1a887a4d42205c319b089a4e51594603dbe | [
"Apache-2.0"
] | null | null | null | gpx_split/writer.py | mario-s/gpx_split | d043b1a887a4d42205c319b089a4e51594603dbe | [
"Apache-2.0"
] | null | null | null | import os
import gpxpy
import gpxpy.gpx
from gpx_split.log_factory import LogFactory
class Writer:
"""
This class will write a track segment into a gpx file.
"""
def __init__(self, dest_dir):
self.dest_dir = dest_dir
self.logger = LogFactory.create(__name__)
def write(self, name, segment):
file = f"{name}.gpx"
self.logger.debug(f"writing {len(segment.points)} points to {file}")
gpx = gpxpy.gpx.GPX()
gpx.name = name
gpx_track = gpxpy.gpx.GPXTrack()
gpx.tracks.append(gpx_track)
gpx_track.segments.append(segment)
with open(os.path.join(self.dest_dir, file), "w") as f:
f.write(gpx.to_xml())
| 23.8 | 76 | 0.62605 | 624 | 0.87395 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.189076 |
4c9630f53ceb58a9bfb694a986d27f1deb6933d1 | 1,303 | py | Python | utils/all_utils.py | RaghuprakashH/classification_Asssignment | ebf507736f0d67b1c0fd8451ca284a2fbee9465e | [
"MIT"
] | null | null | null | utils/all_utils.py | RaghuprakashH/classification_Asssignment | ebf507736f0d67b1c0fd8451ca284a2fbee9465e | [
"MIT"
] | null | null | null | utils/all_utils.py | RaghuprakashH/classification_Asssignment | ebf507736f0d67b1c0fd8451ca284a2fbee9465e | [
"MIT"
] | null | null | null | from typing import cast
import pandas as pd
import os
import numpy as np
import glob
from sklearn.model_selection import train_test_split
def prepare_data(random_state,path):
new_df = []
path1 = path
#path1 = ['C:\CassFlipkratScrappingProject\S1_Dataset','C:\CassFlipkratScrappingProject\S2_Dataset']
var = []
j = 0
tst1 = []
tst3 = pd.DataFrame()
for path2 in path1:
A = (os.listdir(path2))
for i in A:
j = j + 1
#print(i)
if i != 'README.txt':
#tst1.append(['a','b','c','d','e','f','g','h','i'])
cols = ['Time', 'Acceler_Front', 'Acceler_Vert', 'Acceler_later', 'Id_sensor', 'RSSI', 'Phase', 'Frequency', 'Label']
tst1.append(pd.read_csv(os.path.join(path2,i),sep=',',names=cols))
#print(tst1)
#print(tst1.isnull().sum())
#print(tst1.head(5))
A = pd.concat(tst1,ignore_index=False)
df = pd.DataFrame(A)
df.reset_index(inplace=True)
df.drop(columns=['index'],inplace=True)
df.Id_sensor = df.Id_sensor.astype('float64')
data = df.copy()
X = data.iloc[:,:-1]
y = data.Label
x_train , x_test, y_train,y_test = train_test_split(X,y,random_state = random_state)
return x_train , x_test, y_train,y_test
| 27.723404 | 133 | 0.595549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 342 | 0.262471 |
4c96717b178bd17f84a623b5cf3eca004be63ebd | 3,882 | py | Python | src/isanlp_rst/allennlp_segmenter.py | IINemo/isanlp_rst | 2d71b4fa874e6777aa437989024294bf9f6983c0 | [
"MIT"
] | 1 | 2020-07-30T08:29:56.000Z | 2020-07-30T08:29:56.000Z | src/isanlp_rst/allennlp_segmenter.py | IINemo/isanlp_rst | 2d71b4fa874e6777aa437989024294bf9f6983c0 | [
"MIT"
] | null | null | null | src/isanlp_rst/allennlp_segmenter.py | IINemo/isanlp_rst | 2d71b4fa874e6777aa437989024294bf9f6983c0 | [
"MIT"
] | null | null | null | import os
import numpy as np
from allennlp.predictors import Predictor
from isanlp.annotation_rst import DiscourseUnit
from symbol_map import SYMBOL_MAP
class AllenNLPSegmenter:
def __init__(self, model_dir_path, cuda_device=-1):
self._model_path = os.path.join(model_dir_path, 'segmenter_neural', 'model.tar.gz')
self._cuda_device = cuda_device
self.predictor = Predictor.from_path(self._model_path, cuda_device=self._cuda_device)
self._separator = 'U-S'
self._symbol_map = SYMBOL_MAP
def __call__(self, annot_text, annot_tokens, annot_sentences, annot_lemma, annot_postag, annot_synt_dep_tree,
start_id=0):
return self._build_discourse_units(annot_text, annot_tokens,
self._predict(annot_tokens, annot_sentences), start_id)
def _predict(self, tokens, sentences):
"""
:return: numbers of tokens predicted as EDU left boundaries
"""
_sentences = []
for sentence in sentences:
text = ' '.join([self._prepare_token(token.text) for token in tokens[sentence.begin:sentence.end]]).strip()
if text:
_sentences.append(text)
predictions = self.predictor.predict_batch_json([{'sentence': sentence} for sentence in _sentences])
result = []
for i, prediction in enumerate(predictions):
pred = np.array(prediction['tags'][:sentences[i].end - sentences[i].begin]) == self._separator
# The first token in a sentence is a separator
# if it is not a point in a list
if len(pred) > 0:
if i > 0:
if predictions[i - 1]['words'][1] == '.' and predictions[i - 1]['words'][0] in "0123456789":
pred[0] = False
else:
pred[0] = True
# No single-token EDUs
for j, token in enumerate(pred[:-1]):
if token and pred[j + 1]:
if j == 0:
pred[j + 1] = False
else:
pred[j] = False
result += list(pred)
return np.argwhere(np.array(result) == True)[:, 0]
def _build_discourse_units(self, text, tokens, numbers, start_id):
"""
:param text: original text
:param list tokens: isanlp.annotation.Token
:param numbers: positions of tokens predicted as EDU left boundaries (beginners)
:return: list of DiscourseUnit
"""
edus = []
if numbers.shape[0]:
for i in range(0, len(numbers) - 1):
new_edu = DiscourseUnit(start_id + i,
start=tokens[numbers[i]].begin,
end=tokens[numbers[i + 1]].begin - 1,
text=text[tokens[numbers[i]].begin:tokens[numbers[i + 1]].begin],
relation='elementary',
nuclearity='_')
edus.append(new_edu)
if numbers.shape[0] == 1:
i = -1
new_edu = DiscourseUnit(start_id + i + 1,
start=tokens[numbers[-1]].begin,
end=tokens[-1].end,
text=text[tokens[numbers[-1]].begin:tokens[-1].end],
relation='elementary',
nuclearity='_')
edus.append(new_edu)
return edus
def _prepare_token(self, token):
for key, value in self._symbol_map.items():
token = token.replace(key, value)
for keyword in ['www', 'http']:
if keyword in token:
return '_html_'
return token
| 38.435644 | 119 | 0.523442 | 3,725 | 0.959557 | 0 | 0 | 0 | 0 | 0 | 0 | 547 | 0.140907 |
4c9986463d2e2e151140a1d7be057768191f4a9b | 963 | py | Python | parser/team26/G26/Instrucciones/DDL/use.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 35 | 2020-12-07T03:11:43.000Z | 2021-04-15T17:38:16.000Z | parser/team26/G26/Instrucciones/DDL/use.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 47 | 2020-12-09T01:29:09.000Z | 2021-01-13T05:37:50.000Z | parser/team26/G26/Instrucciones/DDL/use.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | import sys
sys.path.append('../G26/Instrucciones')
sys.path.append('../G26/Utils')
sys.path.append('../G26/Librerias/storageManager')
from instruccion import *
from Lista import *
from TablaSimbolos import *
from jsonMode import *
class Use(Instruccion):
def __init__(self, dbid):
self.dbid = dbid
def execute(self, data):
databaseList = showDatabases()
for database in databaseList:
if self.dbid.column.upper() == database :
data.databaseSeleccionada = database
if database in data.tablaSimbolos:
''
else:
data.tablaSimbolos[database] = {'tablas' : {}, 'enum' : {}, 'owner' : 'CURRENT_USER', 'mode' : '1'}
return 'La database ' + database + ' ha sido seleccionada.'
return 'Error(???): La database ' + self.dbid.column.upper() + ' no existe.'
def __repr__(self):
return str(self.__dict__)
| 32.1 | 119 | 0.5919 | 729 | 0.757009 | 0 | 0 | 0 | 0 | 0 | 0 | 192 | 0.199377 |
4c9b0063f6db3d05efa749cbd3914b0058567581 | 2,337 | py | Python | Mundo 3/ex105.py | erickeloi/ExerciciosTreino | f5ac02f45e2eb27d5a8af87fca1227b5c88f523f | [
"MIT"
] | null | null | null | Mundo 3/ex105.py | erickeloi/ExerciciosTreino | f5ac02f45e2eb27d5a8af87fca1227b5c88f523f | [
"MIT"
] | null | null | null | Mundo 3/ex105.py | erickeloi/ExerciciosTreino | f5ac02f45e2eb27d5a8af87fca1227b5c88f523f | [
"MIT"
] | null | null | null | # Exercício Python 105: Analisando e gerando Dicionários
# Faça um programa que tenha uma função notas() que pode receber várias notas de alunos
# e vai retornar um dicionário com as seguintes informações:
#
# - Quantidade de notas
# - A maior nota
# - A menor nota
# - A média da turma
# - A situação (opcional)
#
# Adicione também as docstrings dessa função para consulta pelo desenvolvedor.
def notas(notas, situacao=True):
"""
-> Recebe várias notas de uma turma, faz uma análise dessas informações e retorna um Dicionário com elas.
:param notas: (Obrigatório) Várias Notas de alunos podem ser digitadas para a análise
:param situacao: (Opcional) True ou False, False por padrão
você escolhe se deseja que o dicionário contenha a análise subjetiva da turma
:return: Retorna um Dicionário com as informações:
- Maior Nota
- Menor Nota
- Média da Turma
- A Situação da Turma (Opcional): Ruim, Regular, Boa
"""
dicionario_de_alunos = dict()
sit = ""
maior = menor = media = total = 0
for contador, nota in enumerate(notas):
if contador == 0:
menor = nota
maior = nota
if nota > maior:
maior = nota
if nota < menor:
menor = nota
total += nota
media = total / len(notas)
dicionario_de_alunos = {
"Quantidade de Notas": len(notas),
"Maior Nota": maior,
"Menor Nota": menor,
"Média da Turma": media
}
if media >= 7:
sit = "Boa"
elif 5 <= media < 7:
sit = "Regular"
elif media < 5:
sit = "Ruim"
if situacao == False:
return dicionario_de_alunos
if situacao == True:
dicionario_de_alunos["Situação"] = sit
return dicionario_de_alunos
notas_alunos = list()
while True:
numero = float(input("Digite as notas dos alunos (999 para parar): "))
if numero == 999:
break
notas_alunos.append(numero)
situacao = str(input("Quer Mostrar a Situação das notas ? [S/N]")).strip().upper()[0]
while situacao not in 'SN':
situacao = str(input("Quer Mostrar a Situação das notas ? [S/N]")).strip().upper()[0]
if situacao == 'S':
situacao = True
elif situacao == 'N':
situacao = False
print(notas(notas_alunos, situacao))
| 28.156627 | 113 | 0.621737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,226 | 0.515776 |
4c9bc3f10c76957588e5e8306a910512d15f845e | 1,410 | py | Python | scraper/storage_spiders/halobuyvn.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | null | null | null | scraper/storage_spiders/halobuyvn.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | 10 | 2020-02-11T23:34:28.000Z | 2022-03-11T23:16:12.000Z | scraper/storage_spiders/halobuyvn.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | 3 | 2018-08-05T14:54:25.000Z | 2021-06-07T01:49:59.000Z | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='product-name']/h1",
'price' : "//div[@class='span8']/div[@class='price-box']/p[@class='special-price']/span[2] | //div[@class='span8']/div[@class='price-box']/span/span[@class='price']",
'category' : "//div[@class='col-main']/div/ul/li/a | //ul[@class='breadcrumb hidden-phone']/li/a",
'description' : "//div[@class='box-collateral box-description']",
'images' : "//div[@class='product-img-box span4']/p//a/@href | //div[@class='more-views']/ul/li/a/@href",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "",
'brand' : ""
}
name = 'halobuy.vn'
allowed_domains = ['halobuy.vn']
start_urls = ['http://halobuy.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+.html'], deny=['/thiet-bi-so','/gia-dung','/me-va-be','/the-thao','/du-lich','/qua-tang','/thiet-bi-cham-soc-suc-khoe','\?p=\d+']), 'parse_item'),
Rule(LinkExtractor(allow=['/thiet-bi-so','/gia-dung','/me-va-be','/the-thao','/du-lich','/qua-tang','/thiet-bi-cham-soc-suc-khoe','\?p=\d+'], deny=['www\.halobuy\.vn','limit=','dir=','oder=']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| 52.222222 | 207 | 0.619149 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 997 | 0.707092 |
4c9d0b8cbc12d186cf020ee76baabe0196201161 | 2,741 | py | Python | sdks/python/apache_beam/io/external/generate_sequence.py | ByteFlinger/beam | 21f1b0dab7ccb35f04bf0a0dc908f45c19a5d8c7 | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/io/external/generate_sequence.py | ByteFlinger/beam | 21f1b0dab7ccb35f04bf0a0dc908f45c19a5d8c7 | [
"Apache-2.0"
] | 1 | 2019-06-17T13:16:42.000Z | 2019-06-17T13:16:42.000Z | sdks/python/apache_beam/io/external/generate_sequence.py | ByteFlinger/beam | 21f1b0dab7ccb35f04bf0a0dc908f45c19a5d8c7 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from apache_beam import ExternalTransform
from apache_beam import pvalue
from apache_beam.coders import VarIntCoder
from apache_beam.portability.api.external_transforms_pb2 import ConfigValue
from apache_beam.portability.api.external_transforms_pb2 import ExternalConfigurationPayload
from apache_beam.transforms import ptransform
class GenerateSequence(ptransform.PTransform):
"""
A PTransform that provides a bounded or unbounded stream of integers.
"""
def __init__(self, start, stop=None,
elements_per_period=None, max_read_time=None,
expansion_service='localhost:8097'):
super(GenerateSequence, self).__init__()
self._urn = 'beam:external:java:generate_sequence:v1'
self.start = start
self.stop = stop
self.elements_per_period = elements_per_period
self.max_read_time = max_read_time
self.expansion_service = expansion_service
def expand(self, pbegin):
if not isinstance(pbegin, pvalue.PBegin):
raise Exception("GenerateSequence must be a root transform")
coder = VarIntCoder()
coder_urn = ['beam:coder:varint:v1']
args = {
'start':
ConfigValue(
coder_urn=coder_urn,
payload=coder.encode(self.start))
}
if self.stop:
args['stop'] = ConfigValue(
coder_urn=coder_urn,
payload=coder.encode(self.stop))
if self.elements_per_period:
args['elements_per_period'] = ConfigValue(
coder_urn=coder_urn,
payload=coder.encode(self.elements_per_period))
if self.max_read_time:
args['max_read_time'] = ConfigValue(
coder_urn=coder_urn,
payload=coder.encode(self.max_read_time))
payload = ExternalConfigurationPayload(configuration=args)
return pbegin.apply(
ExternalTransform(
self._urn,
payload.SerializeToString(),
self.expansion_service))
| 36.546667 | 92 | 0.725648 | 1,582 | 0.577162 | 0 | 0 | 0 | 0 | 0 | 0 | 1,022 | 0.372857 |
4c9d37382d716b5d2f7ff5351463737b4b1e2fce | 385 | py | Python | config.py | wangyida/voxel-dcgan | def3fc0e5788ef663feb1a37214117b378101da3 | [
"MIT"
] | 1 | 2018-02-01T16:13:39.000Z | 2018-02-01T16:13:39.000Z | config.py | wangyida/voxel-dcgan | def3fc0e5788ef663feb1a37214117b378101da3 | [
"MIT"
] | null | null | null | config.py | wangyida/voxel-dcgan | def3fc0e5788ef663feb1a37214117b378101da3 | [
"MIT"
] | 1 | 2020-09-16T08:29:12.000Z | 2020-09-16T08:29:12.000Z | nz = 512 # noize vector size
nsf = 4 # encoded voxel size, scale factor
nvx = 32 # output voxel size
batch_size = 64
learning_rate = 2e-4
dataset_path_i = "/media/wangyida/D0-P1/database/ShapeNetCore.v2/*/*/*/model_normalized.binvox.thinned"
dataset_path_o = "/media/wangyida/D0-P1/database/ShapeNetCore.v2/*/*/*/model_normalized.binvox"
params_path = "params/voxel_dcgan_model.ckpt"
| 42.777778 | 103 | 0.763636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 267 | 0.693506 |
4ca0d80847d16efc73837554e39a67770a6416fc | 1,971 | py | Python | nailgun/nailgun/test/performance/unit/test_node_group_operations.py | prmtl/fuel-web | 3577169e209596a8e4a95d1c41d2dde099a3945f | [
"Apache-2.0"
] | null | null | null | nailgun/nailgun/test/performance/unit/test_node_group_operations.py | prmtl/fuel-web | 3577169e209596a8e4a95d1c41d2dde099a3945f | [
"Apache-2.0"
] | null | null | null | nailgun/nailgun/test/performance/unit/test_node_group_operations.py | prmtl/fuel-web | 3577169e209596a8e4a95d1c41d2dde099a3945f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from nailgun import consts
from nailgun.test.base import EnvironmentManager
from nailgun.test.performance import base
class NodeGroupOperationsLoadTest(base.BaseUnitLoadTestCase):
@classmethod
def setUpClass(cls):
super(NodeGroupOperationsLoadTest, cls).setUpClass()
cls.env = EnvironmentManager(app=cls.app, session=cls.db)
cls.env.upload_fixtures(cls.fixtures)
cls.cluster = cls.env.create_cluster(
api=False,
net_provider=consts.CLUSTER_NET_PROVIDERS.neutron,
net_segment_type=consts.NEUTRON_SEGMENT_TYPES.gre,
)
cls.group = cls.env.create_node_group()
cls.env.create_nodes(cls.NODES_NUM, cluster_id=cls.cluster['id'])
@base.evaluate_unit_performance
def test_node_group_collection_retrieve(self):
func = functools.partial(
self.get_handler,
'NodeGroupCollectionHandler',
)
self.check_time_exec(func)
@base.evaluate_unit_performance
def test_node_group_collection_create(self):
func = functools.partial(
self.post_handler,
'NodeGroupCollectionHandler',
{
'cluster_id': self.cluster.id,
'name': 'test_group',
}
)
self.check_time_exec(func)
| 32.311475 | 78 | 0.675292 | 1,198 | 0.607813 | 0 | 0 | 1,119 | 0.567732 | 0 | 0 | 709 | 0.359716 |
4ca2201274eaddfe1362c3f7ce25b8cbc37de3da | 27 | py | Python | db_quick_setup/django/db/backends/sqlite3.py | amezin/django-db-quick-setup | e0c90c8b112b2230b19885e39a92b67b5a7d3819 | [
"BSD-2-Clause"
] | 1 | 2016-05-27T14:25:37.000Z | 2016-05-27T14:25:37.000Z | db_quick_setup/django/db/backends/sqlite3.py | amezin/django-db-quick-setup | e0c90c8b112b2230b19885e39a92b67b5a7d3819 | [
"BSD-2-Clause"
] | null | null | null | db_quick_setup/django/db/backends/sqlite3.py | amezin/django-db-quick-setup | e0c90c8b112b2230b19885e39a92b67b5a7d3819 | [
"BSD-2-Clause"
] | null | null | null | from .dummy import Backend
| 13.5 | 26 | 0.814815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4ca24d42104bcea1dfe7cf404fc35427c76c83f0 | 1,256 | py | Python | Solutions/0394.decodeString.py | lyhshang/LeetCode-Solutions | ecd4f193567bf87c9805f5ee871db9a7e1f3e9df | [
"Apache-2.0"
] | null | null | null | Solutions/0394.decodeString.py | lyhshang/LeetCode-Solutions | ecd4f193567bf87c9805f5ee871db9a7e1f3e9df | [
"Apache-2.0"
] | null | null | null | Solutions/0394.decodeString.py | lyhshang/LeetCode-Solutions | ecd4f193567bf87c9805f5ee871db9a7e1f3e9df | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# author:lyh
# datetime:2020/5/28 22:28
"""
394. 字符串解码
给定一个经过编码的字符串,返回它解码后的字符串。
编码规则为: k[encoded_string],表示其中方括号内部的 encoded_string 正好重复 k 次。注意 k 保证为正整数。
你可以认为输入字符串总是有效的;输入字符串中没有额外的空格,且输入的方括号总是符合格式要求的。
此外,你可以认为原始数据不包含数字,所有的数字只表示重复的次数 k ,例如不会出现像 3a 或 2[4] 的输入。
示例:
s = "3[a]2[bc]", 返回 "aaabcbc".
s = "3[a2[c]]", 返回 "accaccacc".
s = "2[abc]3[cd]ef", 返回 "abcabccdcdcdef".
"""
class Solution:
def decodeString(self, s: str) -> str:
index = 0
count = 0
res = ""
num = 0
for i in range(len(s)):
if s[i] == '[':
if count == 0:
index = i+1
count += 1
elif s[i] == ']':
count -= 1
if count == 0:
res += self.decodeString(s[index:i]) * num
num = 0
elif 0 <= ord(s[i]) - ord('0') < 10:
if count == 0:
num *= 10
num += ord(s[i]) - ord('0')
else:
if count == 0:
res += s[i]
return res
if __name__ == '__main__':
print(
Solution().decodeString("3[a]2[bc]"),
Solution().decodeString("3[a2[c]]"),
)
| 23.259259 | 72 | 0.449841 | 714 | 0.453621 | 0 | 0 | 0 | 0 | 0 | 0 | 761 | 0.483482 |
4ca5a9c56553252365bb928f5df0c8cc21a911fd | 5,872 | py | Python | odin/utils/iterator.py | rnt-pmi/odin | 8cfddf04f964393ef30217aa5f4aa61229d7e811 | [
"Apache-2.0"
] | 4 | 2021-01-09T10:46:31.000Z | 2021-12-16T14:38:06.000Z | emd_with_classes/utils/iterator.py | VidyaKamath1089/odin | da03f9a86cb2c66092815e3a57795be2db9150bd | [
"Apache-2.0"
] | null | null | null | emd_with_classes/utils/iterator.py | VidyaKamath1089/odin | da03f9a86cb2c66092815e3a57795be2db9150bd | [
"Apache-2.0"
] | 3 | 2021-01-09T10:46:15.000Z | 2021-05-11T01:33:30.000Z | import os
import glob
import random
from PIL import Image
from matplotlib import pyplot as plt
from ipywidgets import Button, Output, HBox, VBox, Label, BoundedIntText
from IPython.display import Javascript, display
class ImagesLoader:
def __init__(self, images_path, images_extension):
self.images_path = images_path
self.images_extension = images_extension
def get_images_array(self):
return glob.glob(os.path.join(self.images_path, "*" + self.images_extension))
class Iterator:
def __init__(self,
images,
name="iterator",
show_name=True,
show_axis=False,
show_random=True,
fig_size=(10, 10),
buttons_vertical=False,
image_display_function=None
):
if len(images) == 0:
raise Exception("No images provided")
self.show_axis = show_axis
self.name = name
self.show_name = show_name
self.show_random = show_random
self.images = images
self.max_pos = len(self.images) - 1
self.pos = 0
self.fig_size = fig_size
self.buttons_vertical = buttons_vertical
if image_display_function is None:
self.image_display_function = self.__show_image
else:
self.image_display_function = image_display_function
self.previous_button = self.__create_button("Previous", (self.pos == 0), self.__on_previous_clicked)
self.next_button = self.__create_button("Next", (self.pos == self.max_pos), self.__on_next_clicked)
self.save_button = self.__create_button("Save", False, self.__on_save_clicked)
self.save_function = self.__save_function # save_function
buttons = [self.previous_button, self.next_button]
if self.show_random:
self.random_button = self.__create_button("Random", False, self.__on_random_clicked)
buttons.append(self.random_button)
buttons.append(self.save_button)
label_total = Label(value='/ {}'.format(len(self.images)))
self.text_index = BoundedIntText(value=1, min=1, max=len(self.images))
self.text_index.layout.width = '80px'
self.text_index.layout.height = '35px'
self.text_index.observe(self.__selected_index)
self.out = Output()
self.out.add_class(name)
if self.buttons_vertical:
self.all_widgets = HBox(
children=[VBox(children=[HBox([self.text_index, label_total])] + buttons), self.out])
else:
self.all_widgets = VBox(children=[HBox([self.text_index, label_total]), HBox(children=buttons), self.out])
## loading js library to perform html screenshots
j_code = """
require.config({
paths: {
html2canvas: "https://html2canvas.hertzen.com/dist/html2canvas.min"
}
});
"""
display(Javascript(j_code))
def __create_button(self, description, disabled, function):
button = Button(description=description)
button.disabled = disabled
button.on_click(function)
return button
def __show_image(self, image_path, index):
img = Image.open(image_path)
if self.show_name:
print(os.path.basename(image_path))
plt.figure(figsize=self.fig_size)
if not self.show_axis:
plt.axis('off')
plt.imshow(img)
plt.show()
def __save_function(self, image_path, index):
img_name = os.path.basename(image_path).split('.')[0]
j_code = """
require(["html2canvas"], function(html2canvas) {
var element = $(".p-Widget.jupyter-widgets-output-area.output_wrapper.$it_name$")[0];
console.log(element);
html2canvas(element).then(function (canvas) {
var myImage = canvas.toDataURL();
var a = document.createElement("a");
a.href = myImage;
a.download = "$img_name$.png";
a.click();
a.remove();
});
});
"""
j_code = j_code.replace('$it_name$', self.name)
j_code = j_code.replace('$img_name$', img_name)
tmp_out = Output()
with tmp_out:
display(Javascript(j_code))
tmp_out.clear_output()
def __on_next_clicked(self, b):
self.pos += 1
self.__perform_action(self.pos, self.max_pos)
def __on_save_clicked(self, b):
self.save_function(self.images[self.pos], self.pos)
def __perform_action(self, index, max_pos):
self.next_button.disabled = (index == max_pos)
self.previous_button.disabled = (index == 0)
with self.out:
self.out.clear_output()
with self.out:
self.image_display_function(self.images[index], index)
self.text_index.unobserve(self.__selected_index)
self.text_index.value = index + 1
self.text_index.observe(self.__selected_index)
def __on_previous_clicked(self, b):
self.pos -= 1
self.__perform_action(self.pos, self.max_pos)
def __on_random_clicked(self, b):
self.pos = random.randint(0, self.max_pos)
self.__perform_action(self.pos, self.max_pos)
def __selected_index(self, t):
if t['owner'].value is None or t['name'] != 'value':
return
self.pos = t['new'] - 1
self.__perform_action(self.pos, self.max_pos)
def start_iteration(self):
if self.max_pos < self.pos:
print("No available images")
return
display(self.all_widgets)
self.__perform_action(self.pos, self.max_pos)
| 35.804878 | 118 | 0.600136 | 5,650 | 0.962193 | 0 | 0 | 0 | 0 | 0 | 0 | 1,026 | 0.174728 |
4ca69d037973302f62772df73b1764080320eb80 | 1,066 | py | Python | ahye/lib.py | kopf/ahye | 75ab5f3f901feb85a7779365f42e86f76d68083f | [
"Apache-2.0"
] | 2 | 2015-03-29T10:21:36.000Z | 2015-11-14T15:36:42.000Z | ahye/lib.py | kopf/ahye | 75ab5f3f901feb85a7779365f42e86f76d68083f | [
"Apache-2.0"
] | null | null | null | ahye/lib.py | kopf/ahye | 75ab5f3f901feb85a7779365f42e86f76d68083f | [
"Apache-2.0"
] | null | null | null | import magic
import os
import random
import string
from ahye.settings import LOCAL_UPLOADS_DIR
def generate_filename(image_data, detect_extension=True):
alphanum = string.ascii_letters + string.digits
retval = ''
while not retval or os.path.exists(os.path.join(LOCAL_UPLOADS_DIR, retval)):
retval = ''.join(random.sample(alphanum, 8))
if detect_extension:
retval += get_file_extension(image_data)
else:
retval += '.png'
return retval
def get_file_extension(image_data):
s = magic.from_buffer(image_data)
if s.startswith('JPEG'):
return '.jpg'
elif s.startswith('GIF'):
return '.gif'
elif s.startswith('PNG'):
return '.png'
def guess_file_extension(url):
""" Used by the image mirroring service """
url = url.lower()
if '.jpg' in url or '.jpeg' in url:
return '.jpg'
elif '.gif' in url:
return '.gif'
elif '.png' in url:
return '.png'
elif '.svg' in url:
return '.svg'
else:
return '.jpg'
| 25.380952 | 81 | 0.616323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.138837 |
4ca6d1e9adcbfc1659eea6898aaed8b50b3a6d86 | 3,650 | py | Python | editUser/lambda_function.py | LUDecomposition/YouTutor-lambda | 8d3e63ff968cef8deae6e8bd725b65614ddfa173 | [
"Apache-2.0"
] | null | null | null | editUser/lambda_function.py | LUDecomposition/YouTutor-lambda | 8d3e63ff968cef8deae6e8bd725b65614ddfa173 | [
"Apache-2.0"
] | null | null | null | editUser/lambda_function.py | LUDecomposition/YouTutor-lambda | 8d3e63ff968cef8deae6e8bd725b65614ddfa173 | [
"Apache-2.0"
] | null | null | null | import json
import boto3
from elasticsearch import Elasticsearch, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
from boto3.dynamodb.conditions import Key
user_table = 'user-profile'
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(user_table)
cognito = boto3.client('cognito-idp')
region = 'us-east-1'
service = 'es'
host = 'search-ccfinalsearcht-jdyfz3ale3zufejmvivdts3lea.us-east-1.es.amazonaws.com'
credentials = boto3.Session().get_credentials()
awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token)
def lambda_handler(event, context):
access_token = event['headers']['access_token']
try:
resp = cognito.get_user(
AccessToken=access_token,
)
except:
return {
'statusCode': 500,
'body': json.dumps('Error in your login'),
"headers": {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "*"
}
}
user = {i['Name']:i['Value'] for i in resp['UserAttributes']}
user_id = user['email']
update_expression = 'set '
expression_dict = {}
event['body'] = json.loads(event['body'])
if event['body']['isRegister']:
info = {}
for k in event['body']:
if k != 'isRegister':
info[k] = event['body'][k]
table.put_item(Item = info)
else:
for i in enumerate(event['body'].items()):
idx = i[0]
k = i[1][0]
v = i[1][1]
if k == 'user_id' or k=='isRegister':
continue
update = k+'=:val'+str(idx)+", "
update_expression += update
expression_dict[":val"+str(idx)] = v
update_expression = update_expression[:-2] # delete the last ", " in the expression
response = table.update_item(
Key={
'user_id': user_id
},
UpdateExpression=update_expression,
ExpressionAttributeValues=expression_dict,
ReturnValues="UPDATED_NEW"
)
es = Elasticsearch(
hosts = [{'host': host, 'port': 443}],
http_auth = awsauth,
use_ssl = True,
verify_certs = True,
connection_class = RequestsHttpConnection
)
if event['body']["tutor"]:
if es.exists(index="tutors",id=user_id):
es.update(index='tutors',doc_type='_doc',id=user_id,
body={"doc": {"degree":event['body']["degree"],
"first_name": event['body']['first_name'], "last_name": event['body']['last_name'],
"tags": event['body']['tags'],"school":event['body']["school"],"major":event['body']["major"]}})
else:
es.index(index="tutors",doc_type="_doc",id=user_id,body={
"degree":event['body']["degree"],
"tags": event['body']['tags'],
"school":event['body']["school"],
"major":event['body']["major"],
"last_name": event['body']['last_name'],
"first_name": event['body']['first_name']
})
else:
if es.exists(index="tutors",id=user_id):
es.delete(index="tutors", id=user_id)
return {
'statusCode': 200,
'body': json.dumps("successfully update/register your account"),
"headers": {
"Content-Type": "application/json",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "*"
}
}
| 35.096154 | 116 | 0.552329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,028 | 0.281644 |
4ca8d798d7a2b204a20d82a1615b959aa4293d08 | 1,168 | py | Python | pelicanconf.py | fluxoid-org/cyclismo_pelican | 388229ac122576171d925171e2556e839f764f64 | [
"MIT"
] | null | null | null | pelicanconf.py | fluxoid-org/cyclismo_pelican | 388229ac122576171d925171e2556e839f764f64 | [
"MIT"
] | null | null | null | pelicanconf.py | fluxoid-org/cyclismo_pelican | 388229ac122576171d925171e2556e839f764f64 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
PLUGIN_PATHS = ['plugins']
PLUGINS = ['jinja_filters']
AUTHOR = u'Fluxoid Ltd.'
DESCRIPTION = u'Site description'
FOOTER_TEXT = u'Copyright © Fluxoid Ltd. 2017'
SITENAME = u'Cyclismo by Fluxoid Ltd.'
SITEURL = 'http://127.0.0.1:8000'
NAVBAR = {
'title' : u'Cyclismo by Fluxoid Ltd.',
'link': u'/index.html'
}
MENUITEMS = (
('GitHub', 'https://github.com/fluxoid-org/CyclismoProject'),
('About Fluxoid', 'https://www.fluxoid.org'),
('About Cyclismo', '/pages/the-story.html'),
)
PAGE_HEADING = {
'title' : u'Cyclismo',
'subtitle': u'Free and open-source cycling simulator for Android',
'button_text': u'Available on GitHub',
'button_link': u'https://github.com/fluxoid-org/CyclismoProject'
}
PATH = 'content'
TIMEZONE = 'Europe/Paris'
DEFAULT_LANG = u'en'
# Anything tagged with these will be added to the respective area
CAROUSEL_TAG = 'carousel'
FEATURETTE_TAG = 'featurette'
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
THEME = '../FluxoidOnePageWonder/'
| 24.333333 | 77 | 0.704623 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 797 | 0.682363 |
4ca9e915a4cd09f2cd968664373f53f1b6e4c084 | 838 | py | Python | bloscpack/constants.py | sachk/bloscpack | c37b02eee0c66f7cfa11a2d0f3e1beb6d43064df | [
"MIT"
] | 87 | 2015-01-30T21:16:25.000Z | 2022-03-02T18:52:32.000Z | bloscpack/constants.py | sachk/bloscpack | c37b02eee0c66f7cfa11a2d0f3e1beb6d43064df | [
"MIT"
] | 91 | 2015-02-22T17:54:17.000Z | 2022-01-27T14:23:15.000Z | bloscpack/constants.py | sachk/bloscpack | c37b02eee0c66f7cfa11a2d0f3e1beb6d43064df | [
"MIT"
] | 20 | 2015-02-21T15:07:39.000Z | 2022-03-02T18:52:34.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim :set ft=py:
import blosc
from .compat_util import (OrderedDict,
)
# miscellaneous
FORMAT_VERSION = 3
MAGIC = b'blpk'
EXTENSION = '.blp'
# header lengths
BLOSC_HEADER_LENGTH = 16
BLOSCPACK_HEADER_LENGTH = 32
METADATA_HEADER_LENGTH = 32
# maximum/minimum values
MAX_FORMAT_VERSION = 255
MAX_CHUNKS = (2**63)-1
MAX_META_SIZE = (2**32-1) # uint32 max val
MIN_CLEVEL = 0
MAX_CLEVEL = 9
# lookup table for human readable sizes
SUFFIXES = OrderedDict((
("B", 2**0 ),
("K", 2**10),
("M", 2**20),
("G", 2**30),
("T", 2**40)))
# Codecs available from Blosc
CNAME_AVAIL = blosc.compressor_list()
CNAME_MAPPING = {
0: 'blosclz',
1: 'lz4',
2: 'snappy',
3: 'zlib',
4: 'zstd',
}
| 17.829787 | 43 | 0.577566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 262 | 0.312649 |
4ca9ec6965d0d2705091310ae77f83d79c68ebb5 | 2,595 | py | Python | nn_interpretability/interpretation/deconv/deconv_partial_reconstruction.py | miquelmn/nn_interpretability | 2b5d2b4102016189743e09f1f3a56f2ecddfde98 | [
"MIT"
] | 41 | 2020-10-13T18:46:32.000Z | 2022-02-21T15:52:50.000Z | nn_interpretability/interpretation/deconv/deconv_partial_reconstruction.py | miquelmn/nn_interpretability | 2b5d2b4102016189743e09f1f3a56f2ecddfde98 | [
"MIT"
] | 4 | 2021-07-11T12:38:03.000Z | 2022-03-08T14:47:38.000Z | nn_interpretability/interpretation/deconv/deconv_partial_reconstruction.py | miquelmn/nn_interpretability | 2b5d2b4102016189743e09f1f3a56f2ecddfde98 | [
"MIT"
] | 7 | 2020-10-21T13:03:16.000Z | 2022-03-07T11:45:00.000Z | import torch
import torch.nn as nn
from torch.nn import Module
from torchvision import transforms
from nn_interpretability.interpretation.deconv.deconv_base import DeconvolutionBase
class DeconvolutionPartialReconstruction(DeconvolutionBase):
"""
Partial Input Reconstruction Deconvolution is a decision-based interpretability method
which aims to partially recreate the input from the output of the model by using only
a single filter in a layer of choice. The procedure is executed for every filter
in the chosen layer.
"""
def __init__(self, model: Module, classes: [str], preprocess: transforms.Compose, layer_number):
"""
:param model: The model the decisions of which needs to be interpreted.
:param classes: A collection of all classes that the given model can classify
:param preprocess: The preprocessing functions that need to be invoked for the model input.
:param layer_number: The number of the convolutional layer for which the procedure should be executed.
For example, 1 for the first CONV layer. 2 for the second CONV layer and so on.
"""
DeconvolutionBase.__init__(self, model, classes, preprocess)
self.layer_number = layer_number
if self.layer_number <= 0:
raise ValueError("Layer number can not be negative!")
def interpret(self, x):
x = self._execute_preprocess(x)
results = []
layer_index = -1
counter = self.layer_number
for i, layer in enumerate(self.layers):
if isinstance(layer, nn.Conv2d):
counter -= 1
if counter == 0:
layer_index = i
break
if layer_index < 0:
raise ValueError("Layer number is not valid!")
filters_count = self.layers[layer_index].weight.size()[0]
for i in range(filters_count):
new_weights = torch.zeros(self.layers[layer_index].weight.size()).to(self.device)
new_weights[i] = self.layers[layer_index].weight[i].clone().to(self.device)
self.transposed_layers[len(self.transposed_layers) - layer_index - 1].weight = torch.nn.Parameter(new_weights).to(self.device)
y, max_pool_indices, prev_size, view_resize = self._execute_model_forward_pass(x)
y = self._execute_transposed_model_forward_pass(y, max_pool_indices, prev_size, view_resize)
y = y.detach().cpu()
y = (y - y.min()) / (y.max() - y.min())
results.append(y)
return results
| 43.25 | 138 | 0.660886 | 2,408 | 0.927938 | 0 | 0 | 0 | 0 | 0 | 0 | 861 | 0.331792 |
4caafefdae30664c014954671a3e827965070da3 | 68 | py | Python | test/files/first_spider.py | mawentao007/reading_grab | a8b64d235d60e5c895e70f59739888f6748d4407 | [
"MIT"
] | null | null | null | test/files/first_spider.py | mawentao007/reading_grab | a8b64d235d60e5c895e70f59739888f6748d4407 | [
"MIT"
] | null | null | null | test/files/first_spider.py | mawentao007/reading_grab | a8b64d235d60e5c895e70f59739888f6748d4407 | [
"MIT"
] | null | null | null | from grab.spider import Spider
class FirstSpider(Spider):
pass
| 13.6 | 30 | 0.764706 | 35 | 0.514706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4cab4a8359dd4ce2c56dafb5af2f65badffe704e | 45 | py | Python | vnpy_oracle/__init__.py | noranhe/vnpy_oracle | 73c2ce070f36703e78af752ce8483f8cd87cf9fa | [
"MIT"
] | 2 | 2021-04-06T14:25:35.000Z | 2021-07-10T02:04:59.000Z | vnpy_oracle/__init__.py | noranhe/vnpy_oracle | 73c2ce070f36703e78af752ce8483f8cd87cf9fa | [
"MIT"
] | null | null | null | vnpy_oracle/__init__.py | noranhe/vnpy_oracle | 73c2ce070f36703e78af752ce8483f8cd87cf9fa | [
"MIT"
] | 1 | 2021-04-06T09:47:48.000Z | 2021-04-06T09:47:48.000Z | from .oracle_database import database_manager | 45 | 45 | 0.911111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4cad15e70b748bdb4e072d4e8e11d1a0d8e91b07 | 269 | py | Python | irrd/storage/__init__.py | mirceaulinic/irrd | 24cf8812cabe46ea7eaff1c43c9b6a029c30f11c | [
"BSD-2-Clause"
] | null | null | null | irrd/storage/__init__.py | mirceaulinic/irrd | 24cf8812cabe46ea7eaff1c43c9b6a029c30f11c | [
"BSD-2-Clause"
] | null | null | null | irrd/storage/__init__.py | mirceaulinic/irrd | 24cf8812cabe46ea7eaff1c43c9b6a029c30f11c | [
"BSD-2-Clause"
] | null | null | null | import sqlalchemy as sa
import ujson
from sqlalchemy.pool import NullPool
from irrd.conf import get_setting
def get_engine():
return sa.create_engine(
get_setting('database_url'),
poolclass=NullPool,
json_deserializer=ujson.loads,
)
| 17.933333 | 38 | 0.717472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.052045 |
4cadcda3bac884ab250a8ab928b7b959bbc5ce4c | 333 | py | Python | monitor/nagios_check.py | caoghui/python | ca36be7d47bb8abe0561eef1e364a1edcae05088 | [
"MIT"
] | null | null | null | monitor/nagios_check.py | caoghui/python | ca36be7d47bb8abe0561eef1e364a1edcae05088 | [
"MIT"
] | null | null | null | monitor/nagios_check.py | caoghui/python | ca36be7d47bb8abe0561eef1e364a1edcae05088 | [
"MIT"
] | null | null | null | import sys
import json
import base64
status = sys.argv[1]
if status.lower() == "warnig":
print('Status is WARN')
exit(1)
elif status.lower() == 'critical':
print('Status is CRITICAL')
exit(2)
elif status.lower() == 'unknown':
print('Status is UNKNOWN')
exit(3)
else:
print('Status is OK')
exit(0)
| 15.857143 | 34 | 0.618619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.288288 |
4caf7b9a3203087c9923cbdada0e045dc5dd66e5 | 15,804 | py | Python | operations_api/v1/modelform/utils.py | Mirantis/python-operations-api | 65cc9bfe04037f2b70d272a33d9729219ecdc116 | [
"Apache-2.0"
] | null | null | null | operations_api/v1/modelform/utils.py | Mirantis/python-operations-api | 65cc9bfe04037f2b70d272a33d9729219ecdc116 | [
"Apache-2.0"
] | null | null | null | operations_api/v1/modelform/utils.py | Mirantis/python-operations-api | 65cc9bfe04037f2b70d272a33d9729219ecdc116 | [
"Apache-2.0"
] | 1 | 2018-10-04T16:46:25.000Z | 2018-10-04T16:46:25.000Z | import crypt
import io
import json
import logging
import re
import requests
import uuid
import yaml
from flask import current_app as app
from base64 import b64encode
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend
from docutils.core import publish_parts
from ipaddress import IPv4Network
from jinja2 import Environment, meta
from pygerrit2 import GerritRestAPI, HTTPBasicAuth
from requests import HTTPError
from os import urandom
from operations_api import exceptions
from operations_api.app import cache
log = logging.getLogger('operations_api')
####################################
# GET CONTEXT FROM REMOTE LOCATION #
####################################
# Custom Jinja2 filters
def subnet(subnet, host_ip):
"""
Create network object and get host by index
Example:
Context
-------
{'my_subnet': '192.168.1.0/24'}
Template
--------
{{ my_subnet|subnet(1) }}
Output
------
192.168.1.1
"""
if not subnet:
return ""
if '/' not in subnet:
subnet = str(subnet) + '/24'
try:
network = IPv4Network(str(subnet))
idx = int(host_ip) - 1
ipaddr = str(list(network.hosts())[idx])
except IndexError:
ipaddr = "Host index is out of range of available addresses"
except Exception:
ipaddr = subnet.split('/')[0]
return ipaddr
def netmask(subnet):
"""
Create network object and get netmask
Example:
Context
-------
{'my_subnet': '192.168.1.0/24'}
Template
--------
{{ my_subnet|netmask }}
Output
------
255.255.255.0
"""
if not subnet:
return ""
if '/' not in subnet:
subnet = str(subnet) + '/24'
try:
network = IPv4Network(str(subnet))
netmask = str(network.netmask)
except Exception:
netmask = "Cannot determine network mask"
return netmask
def generate_password(length):
"""
Generate password of defined length
Example:
Template
--------
{{ 32|generate_password }}
Output
------
Jda0HK9rM4UETFzZllDPbu8i2szzKbMM
"""
chars = "aAbBcCdDeEfFgGhHiIjJkKlLmMnNpPqQrRsStTuUvVwWxXyYzZ1234567890"
return "".join(chars[ord(c) % len(chars)] for c in b64encode(urandom(length)).decode('utf-8'))
def hash_password(password):
"""
Hash password
Example:
Context
-------
{'some_password': 'Jda0HK9rM4UETFzZllDPbu8i2szzKbMM'}
Template
--------
{{ some_password|hash_password }}
Output
------
$2b$12$HXXew12E9mN3NIXv/egSDurU.dshYQRepBoeY.6bfbOOS5IyFVIBa
"""
chars = "aAbBcCdDeEfFgGhHiIjJkKlLmMnNpPqQrRsStTuUvVwWxXyYzZ"
salt_str = "".join(chars[ord(c) % len(chars)] for c in b64encode(urandom(8)).decode('utf-8'))
salt = "$6$%s$" % salt_str
pw_hash = ''
if password:
pw_hash = crypt.crypt(password, salt)
return pw_hash
CUSTOM_FILTERS = [
('subnet', subnet),
('generate_password', generate_password),
('hash_password', hash_password),
('netmask', netmask)
]
def generate_ssh_keypair(seed=None):
if not seed:
private_key_str = ""
public_key_str = ""
else:
private_key_cache = 'private_key_' + str(seed)
public_key_cache = 'public_key_' + str(seed)
cached_private_key = cache.get(private_key_cache)
cached_public_key = cache.get(public_key_cache)
if cached_private_key and cached_public_key:
private_key_str = cached_private_key
public_key_str = cached_public_key
else:
private_key_obj = rsa.generate_private_key(
backend=default_backend(),
public_exponent=65537,
key_size=2048
)
public_key_obj = private_key_obj.public_key()
public_key = public_key_obj.public_bytes(
serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH)
private_key = private_key_obj.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
private_key_str = private_key.decode('utf-8')
public_key_str = public_key.decode('utf-8')
cache.set(private_key_cache, private_key_str, 3600)
cache.set(public_key_cache, public_key_str, 3600)
return (private_key_str, public_key_str)
def generate_uuid():
return uuid.uuid4()
CUSTOM_FUNCTIONS = [
('generate_ssh_keypair', generate_ssh_keypair),
('generate_uuid', generate_uuid)
]
DOCUTILS_RENDERER_SETTINGS = {
'initial_header_level': 2,
# important, to have even lone titles stay in the html fragment:
'doctitle_xform': False,
# we also disable the promotion of lone subsection title to a subtitle:
'sectsubtitle_xform': False,
'file_insertion_enabled': False, # SECURITY MEASURE (file hacking)
'raw_enabled': False, # SECURITY MEASURE (script tag)
'report_level': 2, # report warnings and above, by default
}
# Decorators
def requires(attributes):
# check if required attributes are present on object
# instance and have assigned values
# attributes: [string, ...]
def wrap(f):
def wrapped_f(self, *args):
for attr in attributes:
if not getattr(self, attr):
msg = ('Configuration key MODELFORM_{} is '
'required with remote {}').format(attr.upper(), self.remote)
raise exceptions.ImproperlyConfigured(msg)
return f(self, *args)
return wrapped_f
return wrap
# Template Collector
class FormTemplateCollector(object):
'''
TODO: document this class
'''
def __init__(self, *args, **kwargs):
self.url = kwargs.get('url', app.config.get('MODELFORM_URL', None))
self.path = kwargs.get('path', app.config.get('MODELFORM_PATH', None))
self.remote = kwargs.get('remote', app.config.get('MODELFORM_REMOTE', None))
self.username = kwargs.get('username', app.config.get('MODELFORM_USERNAME', None))
self.password = kwargs.get('password', app.config.get('MODELFORM_PASSWORD', None))
self.token = kwargs.get('token', app.config.get('MODELFORM_TOKEN', None))
self.versions = kwargs.get('versions', app.config.get('MODELFORM_VERSIONS', []))
self.project_name = kwargs.get('project_name', app.config.get('MODELFORM_PROJECT_NAME', None))
self.file_name = kwargs.get('file_name', app.config.get('MODELFORM_FILE_NAME', None))
self.version_filter = kwargs.get('version_filter', app.config.get('MODELFORM_VERSION_FILTER', None))
self.version_map = kwargs.get('version_map', app.config.get('MODELFORM_VERSION_MAP', {}))
self.collectors = {
'github': {
'template_collector': self._github_collector,
'version_collector': self._static_version_collector
},
'http': {
'template_collector': self._http_collector,
'version_collector': self._static_version_collector
},
'gerrit': {
'template_collector': self._gerrit_collector,
'version_collector': self._gerrit_version_collector
},
'localfs': {
'template_collector': self._localfs_collector,
'version_collector': self._static_version_collector
}
}
if not self.remote or (self.remote and self.remote not in self.collectors):
collectors = list(self.collectors.keys())
msg = ('Configuration key MODELFORM_REMOTE is '
'required, possible values are: {}').format(', '.join(collectors))
raise exceptions.ImproperlyConfigured(msg)
# GERRIT
def _gerrit_get(self, endpoint_url):
auth = HTTPBasicAuth(self.username, self.password)
rest = GerritRestAPI(url=self.url, auth=auth)
try:
response_body = rest.get(endpoint_url)
except HTTPError as e:
msg = "Failed to get response from Gerrit URL %s: %s" % (endpoint_url, str(e))
log.error(msg)
raise exceptions.HTTPError
return response_body
@requires(['username', 'password', 'url', 'project_name', 'file_name'])
def _gerrit_collector(self, version=None):
cache_key = 'workflow_context'
endpoint_url = '/projects/%s/branches/master/files/%s/content' % (self.project_name, self.file_name)
if version:
versions = self._gerrit_get_versions()
if version in self.version_map.values():
version = [v[0] for v in self.version_map.items() if v[1] == version][0]
revision = versions.get(version)
cache_key = 'workflow_context_%s' % revision
endpoint_url = '/projects/%s/commits/%s/files/%s/content' % (
self.project_name, revision, self.file_name)
cached_ctx = cache.get(cache_key)
if cached_ctx:
return cached_ctx
ctx = self._gerrit_get(endpoint_url)
cache.set(cache_key, ctx, 3600)
return ctx
def _gerrit_get_versions(self):
cache_key = 'workflow_versions_%s_%s' % (self.url, self.project_name)
cached_versions = cache.get(cache_key)
if cached_versions:
return cached_versions
tags_endpoint_url = '/projects/%s/tags/' % self.project_name
master_endpoint_url = '/projects/%s/branches/master/' % self.project_name
tags = self._gerrit_get(tags_endpoint_url)
master = self._gerrit_get(master_endpoint_url)
self.versions = {}
for tag in tags:
key = tag['ref'].replace('refs/tags/', '')
self.versions[key] = tag['revision']
self.versions['master'] = master['revision']
cache.set(cache_key, self.versions, 3600)
return self.versions
def _gerrit_version_collector(self):
versions = self._gerrit_get_versions()
return list(versions.keys())
# GITHUB
@requires(['url', 'token'])
def _github_collector(self, version=None):
session = requests.Session()
cached_ctx = cache.get('workflow_context')
if cached_ctx:
return cached_ctx
session.headers.update({'Accept': 'application/vnd.github.v3.raw'})
session.headers.update({'Authorization': 'token ' + str(self.token)})
response = session.get(self.url)
if response.status_code >= 300:
try:
response_json = json.loads(str(response.text))
response_text = response_json['message']
except Exception:
response_text = response.text
msg = "Could not get remote file from Github:\nSTATUS CODE: %s\nRESPONSE:\n%s" % (
str(response.status_code), response_text)
log.error(msg)
ctx = ""
else:
ctx = response.text
cache.set('workflow_context', ctx, 3600)
return ctx
# HTTP
@requires(['url'])
def _http_collector(self, version=None):
session = requests.Session()
cached_ctx = cache.get('workflow_context')
if cached_ctx:
return cached_ctx
if self.username and self.password:
response = session.get(self.url, auth=(self.username, self.password))
else:
response = session.get(self.url)
if response.status_code >= 300:
msg = "Could not get remote file from HTTP URL %s:\nSTATUS CODE: %s\nRESPONSE:\n%s" % (
self.url, str(response.status_code), response.text)
log.error(msg)
ctx = ""
else:
ctx = response.text
cache.set('workflow_context', ctx, 3600)
return ctx
# LOCALFS
@requires(['path'])
def _localfs_collector(self, version=None):
try:
with io.open(self.path, 'r') as file_handle:
ctx = file_handle.read()
except Exception as e:
msg = "Could not read file %s: %s" % (self.path, repr(e))
log.error(msg)
ctx = ""
return ctx
def _static_version_collector(self):
return self.versions
# PRIVATE
def _collect_template(self, version=None):
if version:
versions = self.list_versions()
if version not in versions:
log.warning('Selected version %s not available, using default. Available versions: %s' % (
version, versions))
version = None
collector = self.collectors.get(self.remote, {}).get('template_collector')
return collector(version)
def _render_doc(self, value, header_level=None, report_level=None):
settings_overrides = DOCUTILS_RENDERER_SETTINGS.copy()
if header_level is not None: # starts from 1
settings_overrides["initial_header_level"] = header_level
if report_level is not None: # starts from 1 too
settings_overrides["report_level"] = report_level
try:
parts = publish_parts(source=value.encode('utf-8'),
writer_name="html4css1",
settings_overrides=settings_overrides)
trimmed_parts = parts['html_body'][23:-8]
except Exception as e:
# return original .rst if HTML rendering failed
trimmed_parts = value
log.exception(e)
return trimmed_parts
def _update_template(self, obj):
""" Traverse rendered template and render all rst documentation into HTML.
"""
if isinstance(obj, dict):
if 'doc' in obj:
obj['doc'] = self._render_doc(obj['doc'])
return {k: self._update_template(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [self._update_template(elem) for elem in obj]
else:
return obj
# PUBLIC
def list_versions(self):
collector = self.collectors.get(self.remote, {}).get('version_collector')
versions = collector()
# filter versions by configured regular expression
if self.version_filter:
regex = re.compile(self.version_filter)
versions = list(filter(regex.search, versions))
# replace version names by names configured in version map
for idx, version in enumerate(versions):
if version in self.version_map:
versions[idx] = self.version_map[version]
return sorted(versions)
def render(self, version=None):
context = {}
env = Environment()
for fltr in CUSTOM_FILTERS:
env.filters[fltr[0]] = fltr[1]
for fnc in CUSTOM_FUNCTIONS:
env.globals[fnc[0]] = fnc[1]
source_context = self._collect_template(version)
tmpl = env.from_string(source_context)
parsed_source = env.parse(source_context)
for key in meta.find_undeclared_variables(parsed_source):
if key not in env.globals:
context[key] = ''
try:
rendered = yaml.load(tmpl.render(context))
self._update_template(rendered)
except Exception as e:
rendered = {}
log.exception(e)
return rendered
| 31.991903 | 108 | 0.606239 | 9,792 | 0.61959 | 0 | 0 | 3,010 | 0.190458 | 0 | 0 | 3,984 | 0.252088 |
4cb09e1fb81ed468b94d37828ac3a20046aaccd1 | 416 | py | Python | Contest/ABC086/d/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | Contest/ABC086/d/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | Contest/ABC086/d/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
# 2次元累積和 S の [x1, x2) × [y1, y2) 総和
def ac2(s, x1, x2, y1, y2):
return s[x2][y2] - s[x1][y2] - s[x2][y1] + s[x1][y1]
import numpy as np
_, *d = open(0)
n, k = map(int, _.split())
B = np.zeros((2*k, 2*k))
for e in d:
*z, c = e.split()
x, y = map(int, z)
B[x % (2*k)][(y + k * (z == "W")) % (2*k)] += 1
B.cumsum(axis = 0)
B.cumsum(axis = 1)
B = np.tile(B, (2,2))
print(B)
# 書きかけ | 23.111111 | 56 | 0.485577 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.206349 |
4cb0e7a0a07eb711d71379b5e3128a9be7f0538d | 7,374 | py | Python | prepare_data_file_cluster.py | prise-3d/LSTM-noise-detection | a468e6a142a2baa64bbbaba8469cb452c2f18fe3 | [
"MIT"
] | 2 | 2021-03-15T12:24:28.000Z | 2022-03-01T20:48:19.000Z | prepare_data_file_cluster.py | prise-3d/LSTM-noise-detection | a468e6a142a2baa64bbbaba8469cb452c2f18fe3 | [
"MIT"
] | null | null | null | prepare_data_file_cluster.py | prise-3d/LSTM-noise-detection | a468e6a142a2baa64bbbaba8469cb452c2f18fe3 | [
"MIT"
] | null | null | null | # main imports
import numpy as np
import pandas as pd
import sys, os, argparse
import joblib
# image processing
from PIL import Image
from ipfml import utils
from ipfml.processing import transform, segmentation, compression
# modules and config imports
sys.path.insert(0, '') # trick to enable import of main folder module
import custom_config as cfg
from modules.utils import data as dt
from processing.features_extractions import extract_data
from complexity.run.estimators import estimate, estimators_list
zones_indices = cfg.zones_indices
block_size = (200, 200)
'''
Display progress information as progress bar
'''
def write_progress(progress):
barWidth = 180
output_str = "["
pos = barWidth * progress
for i in range(barWidth):
if i < pos:
output_str = output_str + "="
elif i == pos:
output_str = output_str + ">"
else:
output_str = output_str + " "
output_str = output_str + "] " + str(int(progress * 100.0)) + " %\r"
print(output_str)
sys.stdout.write("\033[F")
def main():
parser = argparse.ArgumentParser(description="Extract data from image dataset")
parser.add_argument('--dataset', type=str, help='folder dataset with all scenes', required=True)
parser.add_argument('--cluster', type=str, help='clustering model to use', required=True)
parser.add_argument('--nclusters', type=int, help='number of clusters', required=True)
parser.add_argument('--estimators', type=str, help='list of estimators', default='l_mean,l_variance')
parser.add_argument('--thresholds', type=str, help='file which contains all thresholds', required=True)
parser.add_argument('--method', type=str, help='method name to used', choices=cfg.features_choices_labels, default=cfg.features_choices_labels[0])
parser.add_argument('--params', type=str, help='param of the method used', default="", required=True)
parser.add_argument('--imnorm', type=int, help="specify if image is normalized before computing something", default=0, choices=[0, 1])
parser.add_argument('--output', type=str, help='output folder name with all clusters files', required=True)
args = parser.parse_args()
p_folder = args.dataset
p_thresholds = args.thresholds
p_cluster = args.cluster
p_nclusters = args.nclusters
p_estimators = [ i.strip() for i in args.estimators.split(',') ]
p_output = args.output
p_method = args.method
p_params = args.params
p_imnorm = args.imnorm
# load cluster model
cluster_model = joblib.load(p_cluster)
# prepare output_file path
p_output_path = os.path.join(cfg.output_data_generated, p_output)
# create output path if not exists
if not os.path.exists(p_output_path):
os.makedirs(os.path.join(p_output_path))
output_files_list = []
for i in range(p_nclusters):
outfile = os.path.join(p_output_path, 'cluster_data_{}.csv'.format(i))
output_files_list.append(outfile)
with open(outfile, 'w') as f:
print('Creation of empty {0} data file'.format(outfile))
# extract all thresholds from threshold file
thresholds = {}
scenes_list = []
zones_list = np.arange(16)
with open(p_thresholds) as f:
thresholds_line = f.readlines()
for line in thresholds_line:
data = line.split(';')
del data[-1] # remove unused last element `\n`
scene = data[0]
thresholds_scene = data[1:]
scenes_list.append(scene)
thresholds[scene] = thresholds_scene
images_path = {}
number_of_images = 0
# get all images path
for scene in scenes_list:
scene_path = os.path.join(p_folder, scene)
images_path[scene] = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
number_of_images = number_of_images + len(images_path[scene])
# construct here dictionnary of associated cluster for each block
clusters_block = {}
for scene in scenes_list:
first_image = images_path[scene][0]
blocks = segmentation.divide_in_blocks(Image.open(first_image), block_size)
clusters_block[scene] = {}
for id_b, block in enumerate(blocks):
# extract data and write into file
x = []
for estimator in p_estimators:
estimated = estimate(estimator, block)
if not isinstance(estimated, np.float64):
for v in estimated:
x.append(v)
else:
x.append(estimated)
# call cluster model
predicted_label = cluster_model.predict([x])[0]
# add label for this specific zone
clusters_block[scene][id_b] = predicted_label
image_counter = 0
# compute entropy for each zones of each scene images
for scene in scenes_list:
image_indices = [ dt.get_scene_image_quality(img_path) for img_path in images_path[scene] ]
blocks_entropy = []
# append empty list
for zone in zones_list:
blocks_entropy.append([])
for img_path in images_path[scene]:
blocks = segmentation.divide_in_blocks(Image.open(img_path), block_size)
for index, block in enumerate(blocks):
# normalize if necessary
if p_imnorm:
block = np.array(block) / 255.
blocks_entropy[index].append(extract_data(block, p_method, p_params))
# write progress bar
write_progress((image_counter + 1) / number_of_images)
image_counter = image_counter + 1
# write data into files
for index, zone in enumerate(zones_list):
# get associated cluster for this zone
cluster_label = clusters_block[scene][index]
with open(output_files_list[cluster_label], 'a') as f:
zone_str = "zone" + str(zone)
if len(zone_str) < 2:
zone_str = '0' + zone_str
f.write(scene + ';')
f.write(str(index) + ';')
f.write(zone_str + ';')
f.write(str(thresholds[scene][index]) + ';')
for index_img, img_quality in enumerate(image_indices):
f.write(str(img_quality))
if index_img + 1 < len(image_indices):
f.write(',')
f.write(';')
for index_b, values in enumerate(blocks_entropy[index]):
# check if single values or multiple
if type(values) is list or (np.ndarray and not np.float64):
for index_v, v in enumerate(values):
f.write(str(v))
if index_v + 1 < len(values):
f.write(' ')
else:
f.write(str(values))
if index_b + 1 < len(blocks_entropy[index]):
f.write(',')
f.write(';\n')
if __name__== "__main__":
main()
| 32.628319 | 150 | 0.597505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,288 | 0.174668 |
4cb10ffb4a0caa2e9be7fffbd0ef91a1cb12509a | 2,699 | py | Python | applications/CoSimulationApplication/custom_data_structure/pyKratos/TriangleElement.py | lcirrott/Kratos | 8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea | [
"BSD-4-Clause"
] | 2 | 2019-10-25T09:28:10.000Z | 2019-11-21T12:51:46.000Z | applications/CoSimulationApplication/custom_data_structure/pyKratos/TriangleElement.py | lcirrott/Kratos | 8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea | [
"BSD-4-Clause"
] | 13 | 2019-10-07T12:06:51.000Z | 2020-02-18T08:48:33.000Z | applications/CoSimulationApplication/custom_data_structure/pyKratos/TriangleElement.py | lcirrott/Kratos | 8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea | [
"BSD-4-Clause"
] | null | null | null | from __future__ import print_function, absolute_import, division # makes these scripts backward compatible with python 2.6 and 2.7
# pyKratos imports
from .Element import Element
# Other imports
import numpy as np
class TriangleElement(Element):
def __init__(self, elem_id, nodes):
super(TriangleElement, self).__init__(elem_id, nodes)
if(len(self.GetNodes()) != 3):
raise Exception("wrong number of nodes! should be 3!")
for node in self.GetNodes():
if(node.Id < 0):
raise Exception("node with Id smaller than 0 found")
def ShapeFunctions(self, order=1):
'''this function provides the shape function values, derivatives and integration_weight
at the location of the gauss points. Order of integration is controlled
by the optional parameter "order".
N[gauss][i] contains the shape function of node i computed at the position of "gauss"
derivatives[gauss][i,k] contains the derivative of node i, component k at the position of gauss
weights[gauss] includes the integration weights, including the det of the jacobian, to be used
at the gauss point'''
derivatives = []
weights = []
Ncontainer = []
x10 = self.nodes[1].coordinates[0] - self.nodes[0].coordinates[0]
y10 = self.nodes[1].coordinates[1] - self.nodes[0].coordinates[1]
x20 = self.nodes[2].coordinates[0] - self.nodes[0].coordinates[0]
y20 = self.nodes[2].coordinates[1] - self.nodes[0].coordinates[1]
detJ = x10 * y20 - y10 * x20
DN_DX = np.zeros((3, 2), dtype=float)
DN_DX[0, 0] = -y20 + y10
DN_DX[0, 1] = x20 - x10
DN_DX[1, 0] = y20
DN_DX[1, 1] = -x20
DN_DX[2, 0] = -y10
DN_DX[2, 1] = x10
DN_DX /= detJ
if(order == 1): # give back 1 single integration point
one_third = 1.0 / 3.0
Ncontainer = [np.array([one_third, one_third, one_third])]
Area = 0.5 * detJ
weights = [Area]
derivatives = [DN_DX]
elif(order == 2): # gives back 3 integration points
one_sixt = 1.0 / 6.0
two_third = 2.0 / 3.0
Ncontainer.append(np.array([one_sixt, one_sixt, two_third]))
Ncontainer.append(np.array([one_sixt, two_third, one_sixt]))
Ncontainer.append(np.array([two_third, one_sixt, one_sixt]))
weights = [one_sixt * detJ, one_sixt * detJ, one_sixt * detJ]
derivatives = [DN_DX, DN_DX, DN_DX]
else:
raise Exception("integration order not implemented")
return [Ncontainer, derivatives, weights]
| 36.972603 | 131 | 0.609485 | 2,480 | 0.918859 | 0 | 0 | 0 | 0 | 0 | 0 | 817 | 0.302705 |
4cb24a662344c757d394dd28aa505276b9b46ee7 | 971 | py | Python | saleor/graphql/account/dataloaders.py | fairhopeweb/saleor | 9ac6c22652d46ba65a5b894da5f1ba5bec48c019 | [
"CC-BY-4.0"
] | 15,337 | 2015-01-12T02:11:52.000Z | 2021-10-05T19:19:29.000Z | saleor/graphql/account/dataloaders.py | fairhopeweb/saleor | 9ac6c22652d46ba65a5b894da5f1ba5bec48c019 | [
"CC-BY-4.0"
] | 7,486 | 2015-02-11T10:52:13.000Z | 2021-10-06T09:37:15.000Z | saleor/graphql/account/dataloaders.py | aminziadna/saleor | 2e78fb5bcf8b83a6278af02551a104cfa555a1fb | [
"CC-BY-4.0"
] | 5,864 | 2015-01-16T14:52:54.000Z | 2021-10-05T23:01:15.000Z | from collections import defaultdict
from ...account.models import Address, CustomerEvent, User
from ..core.dataloaders import DataLoader
class AddressByIdLoader(DataLoader):
context_key = "address_by_id"
def batch_load(self, keys):
address_map = Address.objects.in_bulk(keys)
return [address_map.get(address_id) for address_id in keys]
class UserByUserIdLoader(DataLoader):
context_key = "user_by_id"
def batch_load(self, keys):
user_map = User.objects.in_bulk(keys)
return [user_map.get(user_id) for user_id in keys]
class CustomerEventsByUserLoader(DataLoader):
context_key = "customer_events_by_user"
def batch_load(self, keys):
events = CustomerEvent.objects.filter(user_id__in=keys)
events_by_user_map = defaultdict(list)
for event in events:
events_by_user_map[event.user_id].append(event)
return [events_by_user_map.get(user_id, []) for user_id in keys]
| 30.34375 | 72 | 0.725026 | 824 | 0.84861 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.053553 |
4cb2cdedd09079e23a93411498c4e4df1b5bb2ca | 11,770 | py | Python | neurox/data/representations.py | qcri/NeuroX | a56528231f6514412f3703af48effce1404cb069 | [
"BSD-3-Clause"
] | 87 | 2018-12-12T11:58:21.000Z | 2022-03-26T19:19:46.000Z | neurox/data/representations.py | qcri/NeuroX | a56528231f6514412f3703af48effce1404cb069 | [
"BSD-3-Clause"
] | 16 | 2019-07-08T23:45:18.000Z | 2022-03-30T14:46:40.000Z | neurox/data/representations.py | qcri/NeuroX | a56528231f6514412f3703af48effce1404cb069 | [
"BSD-3-Clause"
] | 15 | 2019-02-12T08:52:35.000Z | 2022-03-15T13:13:32.000Z | """Utility functions to manage representations.
This module contains functions that will help in managing extracted
representations, specifically on sub-word based data.
"""
import numpy as np
from tqdm import tqdm
def bpe_get_avg_activations(tokens, activations):
"""Aggregates activations by averaging assuming BPE-based tokenization.
Given loaded tokens data and activations, this function aggeregates
activations based on tokenized text. BPE based tokenization is assumed,
with every non-terminal subword ending with "@@". The activations are
aggregated by averaging over subwords.
.. warning::
This function is deprecated and will be removed in future versions.
Parameters
----------
tokens : dict
Dictionary containing three lists, ``source``, ``source_aux`` and
``target``. Usually the output of ``data.loader.load_aux_data``.
activations : list of numpy.ndarray
Activations returned from ``loader.load_activations``.
Returns
-------
activations : list of numpy.ndarray
Subword aggregated activations corresponding to one per actual token
found in the untokenized text.
"""
all_activations = []
num_neurons = activations[0].size(1)
for i in range(0, len(tokens["source_aux"])):
sourceIndex = 0
thisBPE = ""
source = tokens["source"][i]
source_aux = tokens["source_aux"][i]
num_words = len(source)
new_activations = np.zeros((num_words, num_neurons))
word_boundaries = []
for j in range(0, len(tokens["source_aux"][i])):
currSourceWord = tokens["source"][i][sourceIndex]
thisBPE = thisBPE + tokens["source_aux"][i][j]
if thisBPE != currSourceWord:
thisBPE = thisBPE[:-2]
else:
word_boundaries.append(j)
sourceIndex = sourceIndex + 1
thisBPE = ""
assert len(word_boundaries) == num_words
prev_idx = 0
for word_idx, boundary in enumerate(word_boundaries):
avg_vector = np.average(activations[i][prev_idx : boundary + 1, :], axis=0)
new_activations[word_idx, :] = avg_vector
prev_idx = boundary + 1
all_activations.append(new_activations)
return all_activations
def bpe_get_last_activations(tokens, activations, is_brnn=True):
"""Aggregates activations by picking the last subword assuming BPE-based tokenization.
Given loaded tokens data and activations, this function aggeregates
activations based on tokenized text. BPE based tokenization is assumed,
with every non-terminal subword ending with "@@". The activations are
aggregated by picking the last subword for any given word.
.. warning::
This function is deprecated and will be removed in future versions.
Parameters
----------
tokens : dict
Dictionary containing three lists, ``source``, ``source_aux`` and
``target``. Usually the output of ``data.loader.load_aux_data``.
activations : list of numpy.ndarray
Activations returned from ``loader.load_activations``.
is_brnn : bool, optional
Whether the model from which activations were extracted was bidirectional.
Only applies for RNN models.
Returns
-------
activations : list of numpy.ndarray
Subword aggregated activations corresponding to one per actual token
found in the untokenized text.
"""
all_activations = []
num_neurons = activations[0].size(1)
for i in range(0, len(tokens["source_aux"])):
sourceIndex = 0
thisBPE = ""
source = tokens["source"][i]
source_aux = tokens["source_aux"][i]
num_words = len(source)
new_activations = np.zeros((num_words, num_neurons))
word_boundaries = []
for j in range(0, len(tokens["source_aux"][i])):
currSourceWord = tokens["source"][i][sourceIndex]
thisBPE = thisBPE + tokens["source_aux"][i][j]
if thisBPE != currSourceWord:
thisBPE = thisBPE[:-2]
else:
word_boundaries.append(j)
sourceIndex = sourceIndex + 1
thisBPE = ""
assert len(word_boundaries) == num_words
rnn_boundary = int(num_neurons / 2)
if not is_brnn:
rnn_boundary = num_neurons
prev_idx = 0
for word_idx, boundary in enumerate(word_boundaries):
# 0 - num_neurons/2: Forward
# num_neurons/2 - : Backward
new_activations[word_idx, :rnn_boundary] = activations[i][
boundary, :rnn_boundary
]
if is_brnn:
new_activations[word_idx, rnn_boundary:] = activations[i][
prev_idx, rnn_boundary:
]
prev_idx = boundary + 1
all_activations.append(new_activations)
return all_activations
def char_get_avg_activations(tokens, activations):
"""Aggregates activations by averaging assuming Character-based tokenization.
Given loaded tokens data and activations, this function aggeregates
activations based on character-tokenized text. The activations are
aggregated by averaging over characters.
.. warning::
This function is deprecated and will be removed in future versions.
Parameters
----------
tokens : dict
Dictionary containing three lists, ``source``, ``source_aux`` and
``target``. Usually the output of ``data.loader.load_aux_data``.
activations : list of numpy.ndarray
Activations returned from ``loader.load_activations``.
Returns
-------
activations : list of numpy.ndarray
Character aggregated activations corresponding to one per actual token
found in the untokenized text.
"""
all_activations = []
num_neurons = activations[0].size(1)
for i in tqdm(range(0, len(tokens["source_aux"]))):
sourceIndex = 0
thisChar = ""
source = tokens["source"][i]
source_aux = tokens["source_aux"][i]
num_words = len(source)
new_activations = np.zeros((num_words, num_neurons))
word_boundaries = []
for word_idx, word in enumerate(tokens["source"][i]):
if word_idx == 0:
word_boundaries.append(len(word) - 1)
else:
word_boundaries.append(len(word) + 1 + word_boundaries[-1])
if len(word_boundaries) != num_words:
print(i, len(word_boundaries), num_words)
assert len(word_boundaries) == num_words
assert (
tokens["source_aux"][i].count("_") + 1 - tokens["source"][i].count("_")
== num_words
), (
"Number of words dont match! (line: %d, source: %d, aux: %d)\n%s\n%s"
% (
i + 1,
num_words,
tokens["source_aux"][i].count("_") + 1,
" ".join(tokens["source"][i]),
" ".join(tokens["source_aux"][i]),
)
)
prev_idx = 0
for word_idx, boundary in enumerate(word_boundaries):
avg_vector = np.average(activations[i][prev_idx : boundary + 1, :], axis=0)
new_activations[word_idx, :] = avg_vector
prev_idx = boundary + 2
all_activations.append(new_activations)
return all_activations
def char_get_last_activations(tokens, activations, is_brnn=True):
"""Aggregates activations by picking the last subword assuming Character-based tokenization.
Given loaded tokens data and activations, this function aggeregates
activations based on character-tokenized text. The activations are
aggregated by picking the last character for any given word.
.. warning::
This function is deprecated and will be removed in future versions.
Parameters
----------
tokens : dict
Dictionary containing three lists, ``source``, ``source_aux`` and
``target``. Usually the output of ``data.loader.load_aux_data``.
activations : list of numpy.ndarray
Activations returned from ``loader.load_activations``.
is_brnn : bool, optional
Whether the model from which activations were extracted was bidirectional.
Only applies for RNN models.
Returns
-------
activations : list of numpy.ndarray
Character aggregated activations corresponding to one per actual token
found in the untokenized text.
"""
all_activations = []
num_neurons = activations[0].size(1)
for i in tqdm(range(0, len(tokens["source_aux"]))):
sourceIndex = 0
thisChar = ""
source = tokens["source"][i]
source_aux = tokens["source_aux"][i]
num_words = len(source)
new_activations = np.zeros((num_words, num_neurons))
word_boundaries = []
for word_idx, word in enumerate(tokens["source"][i]):
if word_idx == 0:
word_boundaries.append(len(word) - 1)
else:
word_boundaries.append(len(word) + 1 + word_boundaries[-1])
if len(word_boundaries) != num_words:
print(i, len(word_boundaries), num_words)
assert len(word_boundaries) == num_words
assert (
tokens["source_aux"][i].count("_") + 1 - tokens["source"][i].count("_")
== num_words
), (
"Number of words dont match! (line: %d, source: %d, aux: %d)\n%s\n%s"
% (
i + 1,
num_words,
tokens["source_aux"][i].count("_") + 1,
" ".join(tokens["source"][i]),
" ".join(tokens["source_aux"][i]),
)
)
rnn_boundary = int(num_neurons / 2)
if not is_brnn:
rnn_boundary = num_neurons
prev_idx = 0
for word_idx, boundary in enumerate(word_boundaries):
# 0 - num_neurons/2: Forward
# num_neurons/2 - : Backward
new_activations[word_idx, :rnn_boundary] = activations[i][
boundary, :rnn_boundary
]
if is_brnn:
new_activations[word_idx, rnn_boundary:] = activations[i][
prev_idx, rnn_boundary:
]
prev_idx = boundary + 1
all_activations.append(new_activations)
return all_activations
def sent_get_last_activations(tokens, activations):
"""Gets the summary vector for the input sentences.
Given loaded tokens data and activations, this function picks the final token's
activations for every sentence, essentially giving summary vectors for every
sentence in the dataset. This is mostly applicable for RNNs.
.. note::
Bidirectionality is currently not handled in the case of BiRNNs.
Parameters
----------
tokens : dict
Dictionary containing three lists, ``source``, ``source_aux`` and
``target``. Usually the output of ``data.loader.load_aux_data``.
activations : list of numpy.ndarray
Activations returned from ``loader.load_activations``.
Returns
-------
activations : list of numpy.ndarray
Summary activations corresponding to one per actual sentence in the
original text.
"""
all_activations = []
num_neurons = activations[0].size(1)
for i in tqdm(range(0, len(tokens["source"]))):
source = tokens["source"][i]
num_words = len(source)
new_activations = np.zeros((1, num_neurons))
new_activations[0, :] = activations[i][-1, :]
all_activations.append(new_activations)
return all_activations
| 34.017341 | 96 | 0.615293 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,546 | 0.471198 |
4cb312593753b7aa0dbd1194d0ea52750711b7d9 | 2,298 | py | Python | src/application/model/matkaroute.py | arpejupe/matkanaattori | d255a05baa1f856bc3f0a0254fe8af5c7b0fb91d | [
"BSD-3-Clause"
] | null | null | null | src/application/model/matkaroute.py | arpejupe/matkanaattori | d255a05baa1f856bc3f0a0254fe8af5c7b0fb91d | [
"BSD-3-Clause"
] | null | null | null | src/application/model/matkaroute.py | arpejupe/matkanaattori | d255a05baa1f856bc3f0a0254fe8af5c7b0fb91d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from urllib import urlencode
from xml.etree import ElementTree
from requests import get
from datetime import datetime
from pytz import timezone
matka_api = "http://api.matka.fi/?"
matka_api_timezone = timezone("Europe/Helsinki")
api_user = "matkanaattori"
api_pass = "ties532soa"
class MatkaException(Exception):
pass
class MatkaRoute(object):
# a: start point
# b: destination point
# time: date and time of departure/arrival
# timemode: time is 1: the time of departure, 2: the time of arrival
# show: number of valid routing results
# walkspeed: walking speeds 1,2,3,4,5
def __init__(self, a, b, time, walkspeed, timemode="2", show="1"):
self.start_point = a
self.end_point = b
self.time = time.astimezone(matka_api_timezone)
self.walkspeed = walkspeed
self.timemode = timemode
self.show = show
self.departure_time = self.getRouteDepartureTime()
def getRoute(self):
params = urlencode({
"a": self.start_point,
"b": self.end_point,
"time": self.time.strftime("%H%M"),
"date": self.time.strftime("%Y%m%d"),
"timemode": self.timemode,
"show": self.show,
"walkspeed": self.walkspeed,
"user": api_user,
"pass": api_pass
})
request = get(matka_api + params, stream=True)
if request.status_code is 200:
request.raw.decode_content = True
return ElementTree.iterparse(request.raw)
else:
raise MatkaException("Routing not available")
def getRouteDepartureTime(self):
for elem,routeData in self.getRoute():
if routeData.tag == "ERROR":
raise MatkaException(routeData.text)
elif routeData.tag == "DEPARTURE":
departure_date = routeData.attrib["date"]
departure_time = routeData.attrib["time"]
datetimeObject = datetime.strptime(departure_date + departure_time, "%Y%m%d%H%M")
return matka_api_timezone.localize(datetimeObject)
if __name__ == '__main__':
route = MatkaRoute("3597369,6784330", "3392009,6686355", datetime.now(matka_api_timezone), "2")
print route.departure_time
| 35.353846 | 99 | 0.627067 | 1,829 | 0.795909 | 0 | 0 | 0 | 0 | 0 | 0 | 503 | 0.218886 |
4cb35be46e8b753fc4c3da524508ad7692d3c234 | 319 | py | Python | numba/__init__.py | teoliphant/numba | a2a05737b306853c86c61ef6620c2cc43cb28c18 | [
"BSD-2-Clause"
] | 3 | 2015-08-28T21:13:58.000Z | 2022-01-21T17:02:14.000Z | numba/__init__.py | teoliphant/numba | a2a05737b306853c86c61ef6620c2cc43cb28c18 | [
"BSD-2-Clause"
] | null | null | null | numba/__init__.py | teoliphant/numba | a2a05737b306853c86c61ef6620c2cc43cb28c18 | [
"BSD-2-Clause"
] | null | null | null | import sys
try:
from . import minivect
except ImportError:
print >>sys.stderr, "Did you forget to update submodule minivect?"
print >>sys.stderr, "Run 'git submodule init' followed by 'git submodule update'"
raise
from . import _numba_types
from ._numba_types import *
__all__ = _numba_types.__all__
| 22.785714 | 85 | 0.733542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.335423 |
4cb37a738ea8912f45bc6b8e68783253722ed608 | 580 | py | Python | Algo and DSA/LeetCode-Solutions-master/Python/maximize-the-confusion-of-an-exam.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 3,269 | 2018-10-12T01:29:40.000Z | 2022-03-31T17:58:41.000Z | Algo and DSA/LeetCode-Solutions-master/Python/maximize-the-confusion-of-an-exam.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 53 | 2018-12-16T22:54:20.000Z | 2022-02-25T08:31:20.000Z | Algo and DSA/LeetCode-Solutions-master/Python/maximize-the-confusion-of-an-exam.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 1,236 | 2018-10-12T02:51:40.000Z | 2022-03-30T13:30:37.000Z | # Time: O(n)
# Space: O(1)
import collections
class Solution(object):
def maxConsecutiveAnswers(self, answerKey, k):
"""
:type answerKey: str
:type k: int
:rtype: int
"""
result = max_count = 0
count = collections.Counter()
for i in xrange(len(answerKey)):
count[answerKey[i]] += 1
max_count = max(max_count, count[answerKey[i]])
if result-max_count >= k:
count[answerKey[i-result]] -= 1
else:
result += 1
return result
| 24.166667 | 59 | 0.512069 | 529 | 0.912069 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.191379 |
4cb38436ee43de94cce46d68eb49a8de9473c484 | 32 | py | Python | homeassistant/components/eliqonline/__init__.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/eliqonline/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | homeassistant/components/eliqonline/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """The eliqonline component."""
| 16 | 31 | 0.6875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.96875 |
4cb84140a51272538c65d31e97df90e33e4ff301 | 354 | py | Python | model/fiz_contact.py | ol6a/training | 1eaaf751ff7bc0cf46ad1e32330d988c1a700da1 | [
"Apache-2.0"
] | null | null | null | model/fiz_contact.py | ol6a/training | 1eaaf751ff7bc0cf46ad1e32330d988c1a700da1 | [
"Apache-2.0"
] | null | null | null | model/fiz_contact.py | ol6a/training | 1eaaf751ff7bc0cf46ad1e32330d988c1a700da1 | [
"Apache-2.0"
] | null | null | null | class Fiz_contact:
def __init__(self, lastname, firstname, middlename, email, telephone, password, confirmpassword):
self.lastname=lastname
self.firstname=firstname
self.middlename=middlename
self.email=email
self.telephone=telephone
self.password=password
self.confirmpassword=confirmpassword
| 35.4 | 101 | 0.70904 | 353 | 0.997175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4cba3149350ff3b94810297495c09822eb3b9b0f | 1,587 | py | Python | src/zope/app/authentication/browser/loginform.py | zopefoundation/zope.app.authentication | 1100f938aa0e8d9b4d2378ce2534c4a3c0d11c00 | [
"ZPL-2.1"
] | null | null | null | src/zope/app/authentication/browser/loginform.py | zopefoundation/zope.app.authentication | 1100f938aa0e8d9b4d2378ce2534c4a3c0d11c00 | [
"ZPL-2.1"
] | 4 | 2017-05-01T12:56:58.000Z | 2021-01-13T07:35:20.000Z | src/zope/app/authentication/browser/loginform.py | zopefoundation/zope.app.authentication | 1100f938aa0e8d9b4d2378ce2534c4a3c0d11c00 | [
"ZPL-2.1"
] | 1 | 2015-04-03T07:28:05.000Z | 2015-04-03T07:28:05.000Z | ##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Login Form
"""
from zope.authentication.interfaces import IUnauthenticatedPrincipal
class LoginForm(object):
"""Mix-in class to implement login form logic"""
context = None
request = None
unauthenticated = None
camefrom = None
def __call__(self):
request = self.request
principal = request.principal
unauthenticated = IUnauthenticatedPrincipal.providedBy(principal)
self.unauthenticated = unauthenticated
camefrom = request.get('camefrom')
if isinstance(camefrom, list):
# Beginning on python2.6 this happens if the parameter is
# supplied more than once
camefrom = camefrom[0]
self.camefrom = camefrom
if not unauthenticated and 'SUBMIT' in request:
# authenticated by submitting
request.response.redirect(camefrom or '.')
return ''
return self.index() # call template
| 33.0625 | 78 | 0.617517 | 860 | 0.541903 | 0 | 0 | 0 | 0 | 0 | 0 | 838 | 0.52804 |
4cba3608e64aeaa57fb925f5a77219c212e22170 | 23 | py | Python | ncoreparser/constant.py | gszabi15/ncoreparser | cd7856a962ac82e31ae840ada77f6c25b2963919 | [
"Apache-2.0"
] | 10 | 2020-09-03T23:17:33.000Z | 2022-03-05T11:37:19.000Z | ncoreparser/constant.py | gszabi15/ncoreparser | cd7856a962ac82e31ae840ada77f6c25b2963919 | [
"Apache-2.0"
] | 12 | 2020-10-29T14:57:13.000Z | 2022-03-17T00:44:09.000Z | ncoreparser/constant.py | gszabi15/ncoreparser | cd7856a962ac82e31ae840ada77f6c25b2963919 | [
"Apache-2.0"
] | 1 | 2021-11-17T16:32:21.000Z | 2021-11-17T16:32:21.000Z | TORRENTS_PER_PAGE = 25
| 11.5 | 22 | 0.826087 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4cbb043ced4f02b5698668dfec4c28d61acb742f | 3,320 | py | Python | src/py/icosahedronlib/LinearSubdivisionFilter.py | lbumbolo/ShapeVariationAnalyzer | 976e22cbacc87fb593d92e24cbdbba6c99a64060 | [
"Apache-2.0"
] | 5 | 2018-09-05T19:49:35.000Z | 2022-03-17T16:48:37.000Z | src/py/icosahedronlib/LinearSubdivisionFilter.py | lbumbolo/ShapeVariationAnalyzer | 976e22cbacc87fb593d92e24cbdbba6c99a64060 | [
"Apache-2.0"
] | 19 | 2018-02-15T21:15:53.000Z | 2022-03-29T21:15:53.000Z | src/py/icosahedronlib/LinearSubdivisionFilter.py | lbumbolo/ShapeVariationAnalyzer | 976e22cbacc87fb593d92e24cbdbba6c99a64060 | [
"Apache-2.0"
] | 9 | 2018-02-23T21:17:25.000Z | 2022-03-25T15:23:57.000Z | import vtk
import numpy as np
class LinearSubdivisionFilter:
InputData = None
Output = None
NumberOfSubdivisions = 1
def SetInputData(self, polydata):
self.InputData = polydata
def GetOutput(self):
return self.Output
def SetNumberOfSubdivisions (self, subdivisions):
self.NumberOfSubdivisions = subdivisions
def Update(self):
self.GenerateData()
def GenerateData(self):
if self.InputData:
inputpolydata = self.InputData
subdivisionlevel = self.NumberOfSubdivisions
inputpolydata_points = inputpolydata.GetPoints()
appendpoly = vtk.vtkAppendPolyData()
# Iterate over the cells in the polydata
# The idea is to linearly divide every cell according to the subdivision level
for cellid in range(inputpolydata.GetNumberOfCells()):
idlist = vtk.vtkIdList()
inputpolydata.GetCellPoints(cellid, idlist)
# For every cell we create a new poly data, i.e, bigger triangle with the interpolated triangles inside
subdiv_poly = vtk.vtkPolyData()
subdiv_points = vtk.vtkPoints()
subdiv_cellarray = vtk.vtkCellArray()
if(idlist.GetNumberOfIds() != 3):
raise Exception("Only triangle meshes are supported. Convert your mesh to triangles!", idlist.GetNumberOfIds())
# Get the triangle points from the current cell
p1 = np.array(inputpolydata_points.GetPoint(idlist.GetId(0)))
p2 = np.array(inputpolydata_points.GetPoint(idlist.GetId(1)))
p3 = np.array(inputpolydata_points.GetPoint(idlist.GetId(2)))
# Calculate the derivatives according to the level
dp12 = (p2 - p1)/subdivisionlevel
dp13 = (p3 - p1)/subdivisionlevel
# Interpolate the points
for s13 in range(0, subdivisionlevel + 1):
for s12 in range(0, subdivisionlevel + 1 - s13):
interp = p1 + s12*dp12 + s13*dp13
subdiv_points.InsertNextPoint(interp[0], interp[1], interp[2])
# Using the interpolated points, create the cells, i.e., triangles
id1 = -1
for s13 in range(0, subdivisionlevel):
id1 += 1
for s12 in range(0, subdivisionlevel - s13):
id2 = id1 + 1
id3 = id1 + subdivisionlevel + 1 - s13
id4 = id3 + 1
triangle = vtk.vtkTriangle()
triangle.GetPointIds().SetId(0, id1);
triangle.GetPointIds().SetId(1, id2);
triangle.GetPointIds().SetId(2, id3);
subdiv_cellarray.InsertNextCell(triangle)
if s12 < subdivisionlevel - s13 - 1:
triangle = vtk.vtkTriangle()
triangle.GetPointIds().SetId(0, id2);
triangle.GetPointIds().SetId(1, id4);
triangle.GetPointIds().SetId(2, id3);
subdiv_cellarray.InsertNextCell(triangle)
id1 += 1
#Set all the interpolated points and generated cells to the polydata
subdiv_poly.SetPoints(subdiv_points)
subdiv_poly.SetPolys(subdiv_cellarray)
# Append the current interpolated triangle to the 'appendPolyDataFilter'
appendpoly.AddInputData(subdiv_poly)
# All interpolated triangles now from a single polydata
appendpoly.Update()
# Remove duplicate points (if you were paying attention, you know there are a lot of repetitions in every triangle edge)
cleanpoly = vtk.vtkCleanPolyData()
cleanpoly.SetInputData(appendpoly.GetOutput())
cleanpoly.Update()
# Return the subdivied polydata
self.Output = cleanpoly.GetOutput() | 32.54902 | 123 | 0.708434 | 3,289 | 0.990663 | 0 | 0 | 0 | 0 | 0 | 0 | 823 | 0.247892 |
4cbb33c2f4e123b773b6ed31e96a7f22c0768349 | 1,310 | py | Python | test/test_tpo_data_dt_os_erx_patient_prescriptions_patient_prescription_dto.py | my-workforce/TMB-SDK | bea9e8dd82240c30f7809b052a4a612202d4e607 | [
"CECILL-B"
] | null | null | null | test/test_tpo_data_dt_os_erx_patient_prescriptions_patient_prescription_dto.py | my-workforce/TMB-SDK | bea9e8dd82240c30f7809b052a4a612202d4e607 | [
"CECILL-B"
] | null | null | null | test/test_tpo_data_dt_os_erx_patient_prescriptions_patient_prescription_dto.py | my-workforce/TMB-SDK | bea9e8dd82240c30f7809b052a4a612202d4e607 | [
"CECILL-B"
] | null | null | null | # coding: utf-8
"""
Transaction Management Bus (TMB) API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: V3.2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.tpo_data_dt_os_erx_patient_prescriptions_patient_prescription_dto import TpoDataDTOsERXPatientPrescriptionsPatientPrescriptionDTO # noqa: E501
from swagger_client.rest import ApiException
class TestTpoDataDTOsERXPatientPrescriptionsPatientPrescriptionDTO(unittest.TestCase):
"""TpoDataDTOsERXPatientPrescriptionsPatientPrescriptionDTO unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTpoDataDTOsERXPatientPrescriptionsPatientPrescriptionDTO(self):
"""Test TpoDataDTOsERXPatientPrescriptionsPatientPrescriptionDTO"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.tpo_data_dt_os_erx_patient_prescriptions_patient_prescription_dto.TpoDataDTOsERXPatientPrescriptionsPatientPrescriptionDTO() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 32.75 | 178 | 0.789313 | 667 | 0.50916 | 0 | 0 | 0 | 0 | 0 | 0 | 700 | 0.534351 |
4cbc5d49a4a57adb7a4b7fa33c57c90c98c8a93f | 11,214 | py | Python | pyhsslms/hsslms.py | russhousley/pyhsslms | a9c6a9f5ba61beba16bf95d34d238c90ce05ba0a | [
"Python-2.0",
"OLDAP-2.7"
] | null | null | null | pyhsslms/hsslms.py | russhousley/pyhsslms | a9c6a9f5ba61beba16bf95d34d238c90ce05ba0a | [
"Python-2.0",
"OLDAP-2.7"
] | 1 | 2020-07-14T14:29:09.000Z | 2020-07-14T14:31:06.000Z | pyhsslms/hsslms.py | russhousley/pyhsslms | a9c6a9f5ba61beba16bf95d34d238c90ce05ba0a | [
"Python-2.0",
"OLDAP-2.7"
] | 2 | 2022-01-20T04:14:40.000Z | 2022-03-03T04:08:16.000Z | #!/usr/bin/env python
# hsslms.py
#
# This provides a command line interface for the pyhsslms.py
# implementation of HSS/LMS Hash-based Signatures as defined
# in RFC 8554.
#
#
# Copyright (c) 2020-2021, Vigil Security, LLC
# All rights reserved.
#
# Redistribution and use, with or without modification, are permitted
# provided that the following conditions are met:
#
# (1) Redistributions must retain the above copyright notice, this
# list of conditions, and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# (3) Neither the name of the Vigil Security, LLC nor the names of the
# contributors to this code may be used to endorse or promote any
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) REGARDLESS OF THE
# CAUSE AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import os.path
import argparse
import pyhsslms
from .__init__ import __version__ as VERSION
def usage(name):
"""
Display usage information and then exit.
"""
cmd_name = os.path.basename(name)
print("commands:")
print(cmd_name + " genkey <keyname> [<genparms>]")
print(" creates <keyname>.prv and <keyname>.pub")
print(" ")
print(cmd_name + " sign <keyname> <filename>")
print(" updates <keyname>.prv and makes the signature in <filename>.sig")
print(" ")
print(cmd_name + " verify <keyname> <filename>")
print(" verifies the signature in <filename>.sig with <keyname>.pub")
print(" ")
print(cmd_name + " showprv <keyname>")
print(" display <keyname>.prv")
print(" ")
print(cmd_name + " showpub <keyname>")
print(" display <keyname>.pub")
print(" ")
print(cmd_name + " showsig <filename>")
print(" display <filename>.sig")
print(" ")
print("optional <genparms> for the genkey command:")
print(" -l LEVELS, --levels LEVELS")
print(" Number of levels in HSS heirarchy")
print(" -s LMS_TYPE, --lms LMS_TYPE")
print(" Height of the LMS trees")
print(" -w LMOTS_TYPE, --lmots LMOTS_TYPE")
print(" Winternitz number")
print(" -a HASH_ALG, --alg HASH_ALG")
print(" Hash algorithm (sha256 or shake)")
print(" ")
print("optional command arguments:")
print(" -h, --help")
print(" Provides this information")
print(" -v, --version")
print(" Provids the program version number")
sys.exit(1)
def main():
"""
Command line interface for pyhsslms.py.
"""
cmds = ['genkey', 'keygen', 'sign', 'verify', \
'showprv', 'showpub', 'showsig', \
'--version', '-v', 'version', '--help', '-h', 'help']
if len(sys.argv) < 2 or sys.argv[1] not in cmds:
print("error: first argument must be a command")
usage(sys.argv[0])
sys.exit(1)
if sys.argv[1] == 'help' or '--help' in sys.argv or '-h' in sys.argv:
usage(sys.argv[0])
sys.exit(1)
if sys.argv[1] == 'version' or '--version' in sys.argv or '-v' in sys.argv:
print(os.path.basename(sys.argv[0]) + " " + VERSION)
sys.exit(1)
if sys.argv[1] in ['genkey', 'keygen']:
if len(sys.argv) < 3:
print("error: second argument must be a keyname")
usage(sys.argv[0])
sys.exit(1)
keyname = sys.argv[2]
levels = 2
lms_type = pyhsslms.lms_sha256_m32_h5
lmots_type = pyhsslms.lmots_sha256_n32_w8
if len(sys.argv) > 3:
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--levels', dest='levels', default=2,
type=int, choices=[1, 2, 3, 4, 5, 6, 7, 8],
metavar='LEVELS', help='Number of levels in HSS heirarchy')
parser.add_argument("-s", "--lms", dest='lms', default=5,
type=int, choices=[5, 10, 15, 20, 25],
metavar='LMS_TYPE', help='Height of the LMS trees')
parser.add_argument('-w', '--lmots', dest='lmots', default=8,
type=int, choices=[1, 2, 4, 8],
metavar='LMOTS_TYPE', help='Winternitz number')
parser.add_argument('-a', '--alg', dest='alg', default='sha256',
type=str, choices=['sha256', 'shake'],
metavar='HASH_ALG', help='Hash algorithm (sha256 or shake)')
parser.add_argument('-t', '--trunc', dest='trunc', default='32',
type=str, choices=[32, 24],
metavar='TRUNC', help='Hash algorithm truncation size')
args = parser.parse_args(sys.argv[3:])
levels = args.levels
if args.alg == 'sha256':
if args.trunc == 32:
if args.lms == 5: lms_type = pyhsslms.lms_sha256_m32_h5
if args.lms == 10: lms_type = pyhsslms.lms_sha256_m32_h10
if args.lms == 15: lms_type = pyhsslms.lms_sha256_m32_h15
if args.lms == 20: lms_type = pyhsslms.lms_sha256_m32_h20
if args.lms == 25: lms_type = pyhsslms.lms_sha256_m32_h25
if args.lmots == 1: lmots_type = pyhsslms.lmots_sha256_n32_w1
if args.lmots == 2: lmots_type = pyhsslms.lmots_sha256_n32_w2
if args.lmots == 4: lmots_type = pyhsslms.lmots_sha256_n32_w4
if args.lmots == 8: lmots_type = pyhsslms.lmots_sha256_n32_w8
else: # args.trunc == 24
if args.lms == 5: lms_type = pyhsslms.lms_sha256_m24_h5
if args.lms == 10: lms_type = pyhsslms.lms_sha256_m24_h10
if args.lms == 15: lms_type = pyhsslms.lms_sha256_m24_h15
if args.lms == 20: lms_type = pyhsslms.lms_sha256_m24_h20
if args.lms == 25: lms_type = pyhsslms.lms_sha256_m24_h25
if args.lmots == 1: lmots_type = pyhsslms.lmots_sha256_n24_w1
if args.lmots == 2: lmots_type = pyhsslms.lmots_sha256_n24_w2
if args.lmots == 4: lmots_type = pyhsslms.lmots_sha256_n24_w4
if args.lmots == 8: lmots_type = pyhsslms.lmots_sha256_n24_w8
else: # args.alg == 'shake'
if args.trunc == 32:
if args.lms == 5: lms_type = pyhsslms.lms_shake_m32_h5
if args.lms == 10: lms_type = pyhsslms.lms_shake_m32_h10
if args.lms == 15: lms_type = pyhsslms.lms_shake_m32_h15
if args.lms == 20: lms_type = pyhsslms.lms_shake_m32_h20
if args.lms == 25: lms_type = pyhsslms.lms_shake_m32_h25
if args.lmots == 1: lmots_type = pyhsslms.lmots_shake_n32_w1
if args.lmots == 2: lmots_type = pyhsslms.lmots_shake_n32_w2
if args.lmots == 4: lmots_type = pyhsslms.lmots_shake_n32_w4
if args.lmots == 8: lmots_type = pyhsslms.lmots_shake_n32_w8
else: # args.trunc == 24
if args.lms == 5: lms_type = pyhsslms.lms_shake_m24_h5
if args.lms == 10: lms_type = pyhsslms.lms_shake_m24_h10
if args.lms == 15: lms_type = pyhsslms.lms_shake_m24_h15
if args.lms == 20: lms_type = pyhsslms.lms_shake_m24_h20
if args.lms == 25: lms_type = pyhsslms.lms_shake_m24_h25
if args.lmots == 1: lmots_type = pyhsslms.lmots_shake_n24_w1
if args.lmots == 2: lmots_type = pyhsslms.lmots_shake_n24_w2
if args.lmots == 4: lmots_type = pyhsslms.lmots_shake_n24_w4
if args.lmots == 8: lmots_type = pyhsslms.lmots_shake_n24_w8
pyhsslms.HssLmsPrivateKey.genkey(keyname, levels=levels,
lms_type=lms_type, lmots_type=lmots_type)
if sys.argv[1] == 'sign':
if len(sys.argv) < 3:
print("error: second argument must be a keyname")
usage(sys.argv[0])
sys.exit(1)
if len(sys.argv) < 4:
print("error: third argument must be a file name")
usage(sys.argv[0])
sys.exit(1)
keyname = sys.argv[2]
filename = sys.argv[3]
print("Signing " + filename + " ...")
prv = pyhsslms.HssLmsPrivateKey(keyname)
if prv.signFile(filename):
print(" ... Success. Signature saved in " + filename + ".sig")
else:
print(" ... Failed!")
if sys.argv[1] == 'verify':
if len(sys.argv) < 3:
print("error: second argument must be a keyname")
usage(sys.argv[0])
sys.exit(1)
if len(sys.argv) < 4:
print("error: third argument must be a file name")
usage(sys.argv[0])
sys.exit(1)
keyname = sys.argv[2]
filename = sys.argv[3]
pub = pyhsslms.HssLmsPublicKey(keyname)
if pub.verifyFile(filename):
print("Signature in " + filename + ".sig is valid.")
else:
print("Signature verification failed!")
if sys.argv[1] == 'showprv':
if len(sys.argv) < 3:
print("error: second argument must be a keyname")
usage(sys.argv[0])
sys.exit(1)
keyname = sys.argv[2]
prv = pyhsslms.HssLmsPrivateKey(keyname)
print("Private Key: " + keyname + ".prv")
print(prv.hss_prv.prettyPrint())
if sys.argv[1] == 'showpub':
if len(sys.argv) < 3:
print("error: second argument must be a keyname")
usage(sys.argv[0])
sys.exit(1)
keyname = sys.argv[2]
pub = pyhsslms.HssLmsPublicKey(keyname)
print("Public Key: " + keyname + ".pub")
print(pub.hss_pub.prettyPrint())
if sys.argv[1] == 'showsig':
if len(sys.argv) < 3:
print("error: second argument must be a file name")
usage(sys.argv[0])
sys.exit(1)
filename = sys.argv[2]
sig = pyhsslms.HssLmsSignature(filename)
print("Signature: " + filename + ".sig")
print(sig.hss_sig.prettyPrint())
| 43.130769 | 82 | 0.57883 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,996 | 0.35634 |
4cbe3e42d77987567a49f38ef020ff5814e45f81 | 5,273 | py | Python | emoji/spec_parser.py | capnfabs/emoji-haiku | cacf011424a9d15b8cf6f17b2b815a85cf2b97f2 | [
"Apache-2.0"
] | 4 | 2017-04-16T01:07:31.000Z | 2020-05-02T18:29:45.000Z | emoji/spec_parser.py | capnfabs/emoji-haiku | cacf011424a9d15b8cf6f17b2b815a85cf2b97f2 | [
"Apache-2.0"
] | null | null | null | emoji/spec_parser.py | capnfabs/emoji-haiku | cacf011424a9d15b8cf6f17b2b815a85cf2b97f2 | [
"Apache-2.0"
] | null | null | null | """Methods for parsing the unicode spec, and retrieving a list of Emoji and Modifiers.
Note that the model of 'Emoji' here isn't sufficiently general to represent everything in the spec -
a visual / user-facing emoji could be, for example, a super complicated Zero-Width-Join sequence. I
wanted to go in favor of ease-of-use instead of comprehensiveness here, though, so there are some
emoji that aren't represented.
An important part of this module is emoji_unicode_11_manual_supplement.py. This is a manual
interpretation of a lot of the data in the emoji-zwj-sequences.txt file, based on my reading of The
Spec.
"""
import os
from collections import defaultdict
from typing import Dict, Iterable, List, NamedTuple, Optional, Set, Tuple
import emoji.emoji_unicode_11_manual_supplement as supplement
from emoji.core import Emoji, GenderMode, Modifier
class EmojiData(NamedTuple):
emojis: List[Emoji]
modifiers: List[Modifier]
# A Unicode code point, as defined by the Unicode spec. This is just an int; the type only exists to
# provide a way of documenting return types more precisely.
_CodePoint = int
class _CodePointInfo(NamedTuple):
classes: Set[str]
comments: Set[str]
def _make_cpi() -> _CodePointInfo:
return _CodePointInfo(set(), set())
def _load_codepoints(data_directory: str) -> Dict[_CodePoint, _CodePointInfo]:
"""Returns a Dict mapping every possible emoji character to information known about it, from the
unicode data specification.
"""
result: Dict[_CodePoint, _CodePointInfo] = defaultdict(_make_cpi)
for codepoint_or_range, codepoint_class, comment in _scan_codepoints_file(data_directory):
if '..' in codepoint_or_range:
start, end = codepoint_or_range.split('..')
else:
start = end = codepoint_or_range
# have to use end + 1 because the ranges specified up til here are _inclusive_ ranges.
for codepoint in range(int(start, base=16), int(end, base=16) + 1):
result[codepoint].classes.add(codepoint_class)
if comment:
result[codepoint].comments.add(comment)
return result
def _scan_codepoints_file(data_directory: str) -> Iterable[Tuple[str, str, Optional[str]]]:
"""Returns an Iterable of tuples from the codepoints file. Each Tuple is:
- codepoint / or range of codepoints. Examples: "2139", "2194..2199"
- unicode class
- any comment found on that line (useful for debugging.)
"""
path = os.path.join(data_directory, 'emoji-data.txt')
with open(path, 'r') as file:
# NOTE(fabian): I thought about using the csv module for this, but decided against it
# because of the fact that the file structure has comments with # at the end. If you _did_
# want to change this to CSV, I'd probably do it by wrapping `file` with something that
# stripped comments.
for line in file:
line, comment = _remove_comment(line)
if not line:
# It was just a comment, continue
continue
codepoint_or_range, unicode_class = (field.strip() for field in line.split(';'))
yield codepoint_or_range, unicode_class, comment
def _remove_comment(line: str) -> Tuple[str, Optional[str]]:
"""Returns: [data-part of line] [comment]"""
vals = line.split('#', maxsplit=1)
if len(vals) == 1:
# There is no comment if there is one element
return vals[0].strip(), None
else:
return vals[0].strip(), vals[1].strip()
def _get_gender_mode(codepoint: _CodePoint) -> GenderMode:
if codepoint in supplement.SUPPORTS_OBJECT_FORMAT_GENDERING:
return GenderMode.OBJECT_FORMAT
elif codepoint in supplement.SUPPORTS_SIGN_FORMAT_GENDERING:
return GenderMode.SIGN_FORMAT
else:
return GenderMode.NONE
def load_emoji_and_modifiers() -> EmojiData:
"""Returns a list of all Emoji and all Modifiers from the data source."""
emojis: List[Emoji] = []
modifiers: List[Modifier] = []
for k, v in _load_codepoints('datasources/emoji-unicode-11/').items():
if (v.classes & {'Emoji', 'Emoji_Component'}) == {'Emoji'}:
modifiable = 'Emoji_Modifier_Base' in v.classes
defaults_to_text = 'Emoji_Presentation' not in v.classes
gender_mode = _get_gender_mode(k)
if gender_mode == GenderMode.OBJECT_FORMAT:
# The non-gendered case has a different meaning from the gendered cases, so add both
# an Emoji with GenderMode.NONE _and_ an Emoji with GenderMode.OBJECT_FORMAT. The
# gendered cases are always modifiable (by manually examining the spec).
emojis.append(Emoji(k, defaults_to_text, modifiable, GenderMode.NONE))
emojis.append(Emoji(k, defaults_to_text, True, GenderMode.OBJECT_FORMAT))
else:
emojis.append(Emoji(k, defaults_to_text, modifiable, gender_mode))
elif {'Emoji', 'Emoji_Modifier'} <= v.classes:
# it's a modifier!
modifiers.append(chr(k))
else:
# ??? i dunno something else. It's probably better to handle this exhaustively.
pass
return EmojiData(emojis, modifiers)
| 42.524194 | 100 | 0.682344 | 160 | 0.030343 | 1,073 | 0.203489 | 0 | 0 | 0 | 0 | 2,207 | 0.418547 |
4cc29d6c2c7572e3d56f021b79147757d89d9e20 | 4,212 | py | Python | setup.py | aricci10/superpose3d | 36844b156d27d0f3c4a50757fe48f7a4d903f85e | [
"MIT"
] | null | null | null | setup.py | aricci10/superpose3d | 36844b156d27d0f3c4a50757fe48f7a4d903f85e | [
"MIT"
] | null | null | null | setup.py | aricci10/superpose3d | 36844b156d27d0f3c4a50757fe48f7a4d903f85e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='superpose3d',
packages=['superpose3d'],
description='Diamond\'s 1988 rotational superposition algorithm (+scale tranforms)',
long_description='''Register 3-D point clouds using rotation, translation, and scale transformations.
## Usage
```
def Superpose3D(X, # <-- Nx3 array of coords for the "frozen" point cloud
x, # <-- Nx3 array of coords for the "mobile" point cloud
# ---- optional arguments: ----
w = None, # optional weights for the calculation of RMSD
allow_rescale=False, # attempt to rescale mobile point cloud?
report_quaternion=False) # report rotation angle and axis?
```
Superpose3D() takes two ordered lists (or numpy arrays) of xyz coordinates
(*of the same length*, **N**) representing points in a point cloud
(**X** and **x**). Treating them as rigid objects,
"Superpose3D()" attempts to superimpose them using **rotations**,
**translations**, and (optionally) **scale** transformations in order
to minimize the root-mean-squared-distance (RMSD) between corresponding
points from either point cloud, where RMSD is defined as:
```
RMSD = sqrt( (Σ_n w[n] * Σ_i |X[n][i] - (Σ_j c*R[i][j]*x[n][j] + T[i])|^2) / (Σ_n w[n]) )
```
If *w=None*, equal weights are used. In that case:
```
RMSD = sqrt( (Σ_n Σ_i |X[n][i] - (Σ_j c*R[i][j]*x[n][j] + T[i])|^2) / N )
```
...where:
```
R = a rotation matrix (a 3x3 numpy array representing the rotation. |R|=1)
T = a translation vector (a 1-D numpy array containing x,y,z displacements)
c = a scalar (a number, 1 by default)
```
This function returns a 4-tuple containing the optimal values of:
```
(RMSD, R, T, c)
```
If the rotation angle and axis are needed, then set the *report_quaternion*
argument to *True*. In that case, the function will return this 4-tuple instead:
```
(RMSD, q, T, c)
```
...where *q* is the
[quaternion corresponding to rotation *R*](https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation),
from which the rotation angle and rotation axis can be easily determined.
This function implements a more general variant of the method from this paper:
R. Diamond, (1988)
"A Note on the Rotational Superposition Problem",
Acta Cryst. A44, pp. 211-216.
This version has been augmented slightly to support scale transformations. (I.E. multiplication by scalars. This can be useful for the registration of two different annotated volumetric 3-D images of the same object taken at different magnifications.)
Note that if you enable scale transformations (i.e. if *allow_rescale=True*), you should be wary if the function returns a negative **c** value. Negative **c** values correspond to inversions (reflections). For this reason, if you are using this function to compare the conformations of molecules, you should probably set *allow_rescale=False*. This will prevent matching a molecule with its stereoisomer.
Note: A C++ version of this repository is available at
https://github.com/jewettaij/superpose3d_cpp
''',
long_description_content_type='text/markdown',
author='Andrew Jewett',
author_email='jewett.aij@gmail.com',
url='https://github.com/jewettaij/superpose3d',
download_url='https://github.com/jewettaij/superpose3d/archive/v1.0.1.zip',
version='1.0.1',
install_requires=[
'numpy',
],
keywords=['registration', '3d', 'structure-comparison', 'molecular-structure',
'clem'],
license='MIT',
classifiers=['Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering'],
zip_safe=True,
include_package_data=True
)
| 39.364486 | 408 | 0.66548 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,684 | 0.873193 |
4cc2cc43040196bd3c73760172314b2b65f1c12f | 602 | py | Python | project/server/main/views.py | jkassel/cerebro | 387cdde4e5b95ca30b14d05526bc6357e5cfd418 | [
"MIT"
] | null | null | null | project/server/main/views.py | jkassel/cerebro | 387cdde4e5b95ca30b14d05526bc6357e5cfd418 | [
"MIT"
] | null | null | null | project/server/main/views.py | jkassel/cerebro | 387cdde4e5b95ca30b14d05526bc6357e5cfd418 | [
"MIT"
] | null | null | null | # project/server/main/views.py
import os
#################
#### imports ####
#################
from flask import render_template, Blueprint
from project.server import app
################
#### config ####
################
main_blueprint = Blueprint('main', __name__,)
################
#### routes ####
################
@main_blueprint.route('/')
def home():
#env = os.environ['APP_SETTINGS']
env = app.config.get('APP_SETTINGS')
return render_template('main/home.html', environment=env)
@main_blueprint.route("/about/")
def about():
return render_template("main/about.html")
| 17.705882 | 61 | 0.566445 | 0 | 0 | 0 | 0 | 270 | 0.448505 | 0 | 0 | 275 | 0.456811 |
4cc36da902f9ddb43d573a13ddbf89c3ff2bf7a5 | 5,598 | py | Python | src/dircifrar/main.py | ctchou/dircifrar | 6bfd0916613c8a4e4ff5058969824f59102fa939 | [
"MIT"
] | 1 | 2021-08-28T20:09:15.000Z | 2021-08-28T20:09:15.000Z | src/dircifrar/main.py | ctchou/dircifrar | 6bfd0916613c8a4e4ff5058969824f59102fa939 | [
"MIT"
] | null | null | null | src/dircifrar/main.py | ctchou/dircifrar | 6bfd0916613c8a4e4ff5058969824f59102fa939 | [
"MIT"
] | null | null | null |
from .__init__ import (
__pkg_name__,
__pkg_version__,
__pkg_description__,
)
import sys
if sys.version_info < (3, 6):
sys.stdout.write(f"Sorry, {__pkg_name__} requires Python 3.6 or above\n")
sys.exit(1)
from .dirconfig import (
init_config,
crypt_change_password,
crypt_rebuild_meta,
)
from .dirsync import DirSync
from .watchsync import WatchSync
import argparse
import logging
def make_logger(fmt):
logger = logging.getLogger(__pkg_name__)
logger.setLevel(logging.WARNING)
if not logger.handlers:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
return logger
def dirsync(command, prog, argv):
parser = argparse.ArgumentParser(
prog=prog,
description="""
Synchronize two directories via push or pull
push: copy local_dir to remote_dir
pull: copy remote_dir to local_dir
""",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('local_dir',
help='local directory (unencrypted)')
parser.add_argument('remote_dir',
help='remote directory (encrypted or unencrypted)')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='verbose output')
parser.add_argument('-d', '--diffonly', action='store_true', default=False,
help='only compute diffs between local_dir and remote_dir')
args = parser.parse_args(argv)
logger = make_logger('%(message)s')
if args.verbose or args.diffonly:
logger.setLevel(logging.INFO)
syncer = DirSync(logger, **vars(args))
syncer.sync(command)
def dirwatch(command, prog, argv):
parser = argparse.ArgumentParser(
prog=prog,
description="""
Keep two directories synchronized via push or pull
watch-push: watch local_dir and copy it to remote_dir whenever local_dir changes
watch-pull: watch remote_dir and copy it to local_dir whenever remote_dir changes
""",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('local_dir',
help='local directory (unencrypted)')
parser.add_argument('remote_dir',
help='remote directory (encrypted or unencrypted)')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='verbose output')
parser.add_argument('-d', '--diffonly', action='store_true', default=False,
help='only compute diffs between local_dir and remote_dir')
parser.add_argument('-s', '--settle', type=float, default=0.2,
help='Seconds to wait for changes to settle before synchronizing')
args = parser.parse_args(argv)
logger = make_logger('%(asctime)s %(message)s')
if args.verbose or args.diffonly:
logger.setLevel(logging.INFO)
WatchSync(logger, command, **vars(args))
def dirinit(command, prog, argv):
parser = argparse.ArgumentParser(
prog=prog,
description="""
init-plain: Initialize an unencrypted directory
init-crypt: Initialize an encrypted directory
""",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('dir_path',
help='directory path')
parser.add_argument('-o', '--overwrite', action='store_true', default=False,
help='Overwrite config file if it already exists')
parser.add_argument('-x', '--exclude', action='append', default=[],
help='filename pattern to exclude (there may be multiple such patterns)')
args = parser.parse_args(argv)
dir_type = 'crypt' if command == 'init-crypt' else 'plain'
init_config(dir_type, **vars(args))
def dirmod(command, prog, argv):
parser = argparse.ArgumentParser(
prog=prog,
description="""
change-password: Change the password of an encrypted directory
rebuild-meta: Rebuild the meta info of an encrypted directory
""",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('dir_path',
help='directory path')
args = parser.parse_args(argv)
if command == 'change-password':
crypt_change_password(**vars(args))
elif command == 'rebuild-meta':
crypt_rebuild_meta(**vars(args))
def main():
parser = argparse.ArgumentParser(
usage=f"{__pkg_name__} command [<args>]",
description=f"""
{__pkg_description__}
Version: {__pkg_version__}
""",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('command',
choices=[
'push', 'pull',
'watch-push', 'watch-pull',
'init-plain', 'init-crypt',
'change-password', 'rebuild-meta',
],
help='command')
command = parser.parse_args(sys.argv[1:2]).command
prog = f"{__pkg_name__} {command}"
argv = sys.argv[2:]
if command in ['push', 'pull']:
dirsync(command, prog, argv)
elif command in ['watch-push', 'watch-pull']:
dirwatch(command, prog, argv)
elif command in ['init-plain', 'init-crypt']:
dirinit(command, prog, argv)
elif command in ['change-password', 'rebuild-meta']:
dirmod(command, prog, argv)
else:
sys.stdout.write(f"Invalid command: {command}\n")
sys.exit(1)
| 38.606897 | 97 | 0.634155 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,863 | 0.332797 |
4cc491a284af54f47be381a4be2778e24923e8cc | 129 | py | Python | jumpscale/packages/threebot_deployer/models/backup_tokens.py | zaibon/js-sdk | cd1d26f2c3343884c1927ceef7c1e12e3f7da905 | [
"Apache-2.0"
] | 13 | 2020-09-02T09:05:08.000Z | 2022-03-12T02:43:24.000Z | jumpscale/packages/threebot_deployer/models/backup_tokens.py | zaibon/js-sdk | cd1d26f2c3343884c1927ceef7c1e12e3f7da905 | [
"Apache-2.0"
] | 1,998 | 2020-06-15T11:46:10.000Z | 2022-03-24T22:12:41.000Z | jumpscale/packages/threebot_deployer/models/backup_tokens.py | zaibon/js-sdk | cd1d26f2c3343884c1927ceef7c1e12e3f7da905 | [
"Apache-2.0"
] | 8 | 2020-09-29T06:50:35.000Z | 2021-06-14T03:30:52.000Z | from jumpscale.core.base import Base, fields
class BackupTokens(Base):
tname = fields.String()
token = fields.String()
| 18.428571 | 44 | 0.713178 | 81 | 0.627907 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4cc65d6319b85c1557e7a1f68dfdd66a75070baf | 3,511 | py | Python | mlexpt/data/dataload.py | stephenhky/ml-experiment | 2e0bd7945c3f9caed6dcecc1bdc49dbeec24d6ad | [
"MIT"
] | 4 | 2020-04-28T09:26:59.000Z | 2021-10-05T08:29:18.000Z | mlexpt/data/dataload.py | stephenhky/ml-experiment | 2e0bd7945c3f9caed6dcecc1bdc49dbeec24d6ad | [
"MIT"
] | null | null | null | mlexpt/data/dataload.py | stephenhky/ml-experiment | 2e0bd7945c3f9caed6dcecc1bdc49dbeec24d6ad | [
"MIT"
] | null | null | null |
import os
import tempfile
from glob import glob
import json
from collections import OrderedDict
import numpy as np
from .adding_features import adding_no_features
def iterate_json_data(filepath,
columns_to_keep=None,
feature_adder=adding_no_features,
data_filter=lambda datum: True,
missing_val_default={}):
inputfile = open(filepath, 'r')
for line in inputfile:
datum = json.loads(line)
datum = feature_adder(datum)
if not data_filter(datum):
continue
if columns_to_keep is not None:
filtered_datum = OrderedDict()
for column in columns_to_keep:
filtered_datum[column] = datum[column]
if column in missing_val_default.keys() and datum[column] is None:
filtered_datum[column] = missing_val_default[column]
yield filtered_datum
else:
yield OrderedDict(datum)
def iterate_json_files_directory(dir,
columns_to_keep=None,
feature_adder=adding_no_features,
data_filter=lambda datum: True,
missing_val_default={}
):
print('\tReading {}'.format(dir))
print('\tColumns: {}'.format(', '.join(columns_to_keep) if columns_to_keep is not None else 'ALL'))
for filepath in glob(os.path.join(dir, '*.json')):
for datum in iterate_json_data(filepath,
columns_to_keep=columns_to_keep,
feature_adder=feature_adder,
data_filter=data_filter,
missing_val_default=missing_val_default):
yield datum
def process_data(traindatafilepath, qual_features, binary_features, quant_features,
target_label,
feature_adder=adding_no_features,
nb_lines_per_tempfile=10000,
data_filter=lambda datum: True,
missing_val_default={},
filename_fmt='data_{0:09d}.json'):
tempdir = tempfile.TemporaryDirectory()
fileid = 0
tmpfile = None
nbdata = 0
for i, datum in enumerate(iterate_json_data(traindatafilepath,
columns_to_keep=qual_features+binary_features+quant_features+[target_label],
feature_adder=feature_adder,
data_filter=data_filter,
missing_val_default=missing_val_default)):
if i % nb_lines_per_tempfile == 0:
if tmpfile is not None:
tmpfile.close()
tmpfile = open(os.path.join(tempdir.name, filename_fmt.format(fileid)), 'w')
fileid += 1
print('\tRead {} lines...'.format(i))
nbdata += 1
tmpfile.write(json.dumps(datum)+'\n')
tmpfile.close()
return tempdir, nbdata
def assign_partitions(nbdata, cv_nfold, heldout_fraction, seed=None):
if seed is not None:
np.random.seed(seed)
return np.random.choice([-1] + list(range(cv_nfold)), # -1 indicating hold-out set
p=[heldout_fraction] + [(1 - heldout_fraction) / cv_nfold] * cv_nfold,
size=nbdata)
| 39.449438 | 124 | 0.55084 | 0 | 0 | 1,713 | 0.487895 | 0 | 0 | 0 | 0 | 123 | 0.035033 |
4cc72ebbf1d2a395ab61b0a358ef14e07350a1c2 | 333 | py | Python | Server/model/account.py | devArtoria/-Awesome-GraphQL- | db13f235b2d1e6aeee6e858a2c682b7f86bd7062 | [
"MIT"
] | 27 | 2019-03-20T14:13:09.000Z | 2022-03-18T20:36:39.000Z | Server/model/account.py | devArtoria/-Awesome-GraphQL- | db13f235b2d1e6aeee6e858a2c682b7f86bd7062 | [
"MIT"
] | 5 | 2018-04-17T10:54:13.000Z | 2018-09-25T10:30:29.000Z | Server/model/account.py | devArtoria/-Awesome-GraphQL- | db13f235b2d1e6aeee6e858a2c682b7f86bd7062 | [
"MIT"
] | 14 | 2019-02-26T05:43:39.000Z | 2022-03-01T15:39:26.000Z | from mongoengine import *
from datetime import datetime
class AccountModel(Document):
meta = {'collection': 'account'}
id = StringField(required=True, primary_key=True)
username = StringField(required=True)
password = StringField(required=True)
register_on = DateTimeField(required=True, default=datetime.now()) | 33.3 | 70 | 0.747748 | 275 | 0.825826 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.063063 |
4cc9907c3e6982c53be1c37022a333762d1c73f3 | 473 | py | Python | users/migrations/0010_auto_20200321_1902.py | jakubzadrozny/hackcrisis | 4fe27423cda013bf01d5e9d3fc734c707f06b708 | [
"MIT"
] | null | null | null | users/migrations/0010_auto_20200321_1902.py | jakubzadrozny/hackcrisis | 4fe27423cda013bf01d5e9d3fc734c707f06b708 | [
"MIT"
] | 4 | 2021-03-19T01:03:55.000Z | 2021-06-10T18:44:03.000Z | users/migrations/0010_auto_20200321_1902.py | jakubzadrozny/hackcrisis | 4fe27423cda013bf01d5e9d3fc734c707f06b708 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-03-21 19:02
from django.db import migrations
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('users', '0009_auto_20200321_1438'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='phone',
field=phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None, unique=True),
),
]
| 23.65 | 107 | 0.655391 | 351 | 0.742072 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.207188 |
4cc9c8edfe702c912f8657df41c4c8c831edd77a | 5,471 | py | Python | tests/conftest.py | TommasoBelluzzo/PyDTMC | ba6aac67940156cb14b05a906d9ced9b387d1e65 | [
"MIT"
] | 43 | 2019-03-18T11:19:52.000Z | 2022-02-21T15:25:11.000Z | tests/conftest.py | TommasoBelluzzo/PyDTMC | ba6aac67940156cb14b05a906d9ced9b387d1e65 | [
"MIT"
] | 7 | 2019-07-08T19:44:03.000Z | 2021-07-06T11:08:28.000Z | tests/conftest.py | TommasoBelluzzo/PyDTMC | ba6aac67940156cb14b05a906d9ced9b387d1e65 | [
"MIT"
] | 18 | 2019-07-05T16:27:49.000Z | 2022-02-02T21:24:19.000Z | # -*- coding: utf-8 -*-
###########
# IMPORTS #
###########
# Standard
from os.path import (
abspath as _os_abspath,
dirname as _os_dirname,
isfile as _os_isfile,
join as _os_join
)
from json import (
load as _json_load
)
#############
# CONSTANTS #
#############
_replacements = [
('NaN', float('nan')),
('-Infinity', float('-inf')),
('Infinity', float('inf'))
]
###########
# CACHING #
###########
_fixtures = {}
#############
# FUNCTIONS #
#############
def _sanitize_fixture_recursive(element, replacements):
if isinstance(element, dict):
return {key: _sanitize_fixture_recursive(value, replacements) for key, value in element.items()}
if isinstance(element, list):
return [_sanitize_fixture_recursive(item, replacements) for item in element]
for replacement in replacements:
if element == replacement[0]:
return replacement[1]
return element
def _parse_fixture_dictionary(fixture, fixture_names, subtest_name):
values = []
ids = []
expected_args = len(fixture_names)
subtest_reference = f'{subtest_name.replace("test_", "")}_data'
if subtest_reference in fixture:
fixture_data = fixture[subtest_reference]
if isinstance(fixture_data, dict):
values_current = tuple(fixture_data[fixture_name] for fixture_name in fixture_names if fixture_name in fixture_data)
if len(values_current) == expected_args:
values.append(values_current)
ids.append(f'{subtest_name}')
elif isinstance(fixture_data, list):
for index, case in enumerate(fixture_data):
case_id = f'_{case["id"]}' if 'id' in case else f' #{str(index + 1)}'
values_current = tuple(case[fixture_name] for fixture_name in fixture_names if fixture_name in case)
if len(values_current) == expected_args:
values.append(values_current)
ids.append(f'{subtest_name}{case_id}')
if len(values) != len(fixture_data):
values = []
ids = []
return values, ids
def _parse_fixture_list(fixture, fixture_names, subtest_name):
values = []
ids = []
expected_args = len(fixture_names)
subtest_reference = f'{subtest_name.replace("test_", "")}_data'
if any(subtest_reference in case for case in fixture):
flags = [False] * len(fixture)
for index_case, case in enumerate(fixture):
if subtest_reference in case:
case_id = case['id'] if 'id' in case else f' #{str(index_case + 1)}'
case_values = tuple(case[fixture_name] for fixture_name in fixture_names if fixture_name in case)
for index_subcase, subcase in enumerate(case[subtest_reference]):
values_current = case_values + tuple(subcase[fixture_name] for fixture_name in fixture_names if fixture_name in subcase)
if len(values_current) == expected_args:
values.append(values_current)
ids.append(f'{subtest_name} {case_id}-{str(index_subcase + 1)}')
flags[index_case] = True
if not all(flags):
values = []
ids = []
else:
for index, case in enumerate(fixture):
case_id = case['id'] if 'id' in case else f' #{str(index + 1)}'
values_current = tuple(case[fixture_name] for fixture_name in fixture_names if fixture_name in case)
if len(values_current) == expected_args:
values.append(values_current)
ids.append(f'{subtest_name} {case_id}')
if len(values) != len(fixture):
values = []
ids = []
return values, ids
#########
# SETUP #
#########
def pytest_configure(config):
config.addinivalue_line('filterwarnings', 'ignore::DeprecationWarning')
config.addinivalue_line('filterwarnings', 'ignore::PendingDeprecationWarning')
config.addinivalue_line('filterwarnings', 'ignore::matplotlib.cbook.mplDeprecation')
config.addinivalue_line('markers', 'slow: mark tests as slow (exclude them with \'-m "not slow"\').')
def pytest_generate_tests(metafunc):
module = metafunc.module.__name__
func = metafunc.definition.name
mark = metafunc.definition.get_closest_marker('parametrize')
names = metafunc.fixturenames
test_index = module.find('_') + 1
test_name = module[test_index:]
if test_name not in _fixtures:
base_directory = _os_abspath(_os_dirname(__file__))
fixtures_file = _os_join(base_directory, f'fixtures/fixtures_{test_name}.json')
if not _os_isfile(fixtures_file):
_fixtures[test_name] = None
else:
with open(fixtures_file, 'r') as file:
fixture = _json_load(file)
fixture = _sanitize_fixture_recursive(fixture, _replacements)
_fixtures[test_name] = fixture
fixture = _fixtures[test_name]
values = []
ids = []
if len(names) > 0 and mark is None and fixture is not None and len(fixture) > 0:
if isinstance(fixture, dict):
values, ids = _parse_fixture_dictionary(fixture, names, func)
elif isinstance(fixture, list):
values, ids = _parse_fixture_list(fixture, names, func)
metafunc.parametrize(names, values, False, ids)
| 27.492462 | 140 | 0.61561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 840 | 0.153537 |
4cc9e781c10a825149e73361b05a57d1750ea78f | 1,428 | py | Python | game-ai-ui/video/video.py | yugendra/game-ai-ui | 3209ca39475ca3781662e43c86ffe509784a52f3 | [
"Apache-2.0"
] | null | null | null | game-ai-ui/video/video.py | yugendra/game-ai-ui | 3209ca39475ca3781662e43c86ffe509784a52f3 | [
"Apache-2.0"
] | null | null | null | game-ai-ui/video/video.py | yugendra/game-ai-ui | 3209ca39475ca3781662e43c86ffe509784a52f3 | [
"Apache-2.0"
] | 1 | 2018-05-22T12:13:02.000Z | 2018-05-22T12:13:02.000Z | import logging
import re
import os
import mimetypes
from flask import Response
LOG = logging.getLogger(__name__)
MB = 1 << 20
BUFF_SIZE = 10 * MB
def partial_response(path, start, end=None):
LOG.info('Requested: %s, %s', start, end)
file_size = os.path.getsize(path)
# Determine (end, length)
if end is None:
end = start + BUFF_SIZE - 1
end = min(end, file_size - 1)
end = min(end, start + BUFF_SIZE - 1)
length = end - start + 1
# Read file
with open(path, 'rb') as fd:
fd.seek(start)
bytes = fd.read(length)
assert len(bytes) == length
response = Response(
bytes,
206,
mimetype=mimetypes.guess_type(path)[0],
direct_passthrough=True,
)
response.headers.add(
'Content-Range', 'bytes {0}-{1}/{2}'.format(
start, end, file_size,
),
)
response.headers.add(
'Accept-Ranges', 'bytes'
)
LOG.info('Response: %s', response)
LOG.info('Response: %s', response.headers)
return response
def get_range(request):
range = request.headers.get('Range')
LOG.info('Requested: %s', range)
m = re.match('bytes=(?P<start>\d+)-(?P<end>\d+)?', range)
if m:
start = m.group('start')
end = m.group('end')
start = int(start)
if end is not None:
end = int(end)
return start, end
else:
return 0, None
| 23.8 | 61 | 0.570728 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 213 | 0.14916 |
4ccb69dec50ffbff4817a4e9cbaedb4a7551f21e | 270 | py | Python | src/py/PrjRestfulApiService/RestfulApiService.py | PrQiang/aods | b743754740f5b5bb4217f06fd790dffa303f871f | [
"MIT"
] | 2 | 2020-12-14T14:24:56.000Z | 2021-06-16T09:22:13.000Z | example/PrjRestfulApiService/RestfulApiService.py | PrQiang/aods | b743754740f5b5bb4217f06fd790dffa303f871f | [
"MIT"
] | 1 | 2020-12-30T10:25:27.000Z | 2020-12-30T10:25:44.000Z | example/PrjRestfulApiService/RestfulApiService.py | PrQiang/aods | b743754740f5b5bb4217f06fd790dffa303f871f | [
"MIT"
] | 1 | 2021-06-16T09:22:17.000Z | 2021-06-16T09:22:17.000Z | import socketserver, threading
from RestfulApiHandler import RestfulApiHandler
if __name__ == "__main__":
server = socketserver.ThreadingTCPServer(('0.0.0.0', 32002), RestfulApiHandler)
t = threading.Thread(target= server.serve_forever, args = ())
t.start() | 38.571429 | 83 | 0.748148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.07037 |
4cccd7d1f2855b57b5553b54acf96bb9e987eb7f | 292 | py | Python | studio/urls.py | mrashidov/ColdWaterWebSite | 0d52860e8bb21f77aec744e3891364957ac75399 | [
"MIT"
] | null | null | null | studio/urls.py | mrashidov/ColdWaterWebSite | 0d52860e8bb21f77aec744e3891364957ac75399 | [
"MIT"
] | null | null | null | studio/urls.py | mrashidov/ColdWaterWebSite | 0d52860e8bb21f77aec744e3891364957ac75399 | [
"MIT"
] | null | null | null | from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^task$', views.ShowTask.as_view(), name='show_task'),
url(r'^task/add/$', views.AddTask.as_view(),name='add_task'),
url(r'^(?P<slug>[\w\-]+)$', views.ShowTask.as_view(), name='show'),
]
| 29.2 | 71 | 0.619863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.246575 |
4ccfcbd5467abde88fe98348a63720b73520fcc5 | 4,344 | py | Python | pycryptomkt/client.py | smaass/pycryptomkt | 02cb5ff114947090fc10c2120c4c89f479d82f08 | [
"MIT"
] | null | null | null | pycryptomkt/client.py | smaass/pycryptomkt | 02cb5ff114947090fc10c2120c4c89f479d82f08 | [
"MIT"
] | null | null | null | pycryptomkt/client.py | smaass/pycryptomkt | 02cb5ff114947090fc10c2120c4c89f479d82f08 | [
"MIT"
] | null | null | null | import hashlib
import hmac
import requests
import time
from functools import reduce
class CryptoMKT(object):
BASE_URL = 'https://api.cryptomkt.com'
API_VERSION = 'v1'
ENDPOINT_BALANCE = 'balance'
ENDPOINT_BOOK = 'book'
ENDPOINT_MARKETS = 'market'
ENDPOINT_TICKER = 'ticker'
ENDPOINT_TRADES = 'trades'
def __init__(self, api_key=None, api_secret=None):
self.api_key = api_key
self.api_secret = api_secret
def check_has_tokens(self):
if self.api_key is None:
raise InvalidTokensException('API Key is required')
if self.api_secret is None:
raise InvalidTokensException('API Secret is required')
def get_headers(self, endpoint, body):
timestamp = str(time.time())
payload = '{timestamp}/{version}/{endpoint}{body}'.format(
timestamp=timestamp,
version=self.API_VERSION,
endpoint=endpoint,
body=body
)
signature = hmac.new(
self.api_secret.encode(),
payload.encode(),
hashlib.sha384
).hexdigest()
return {
'X-MKT-APIKEY': self.api_key,
'X-MKT-SIGNATURE': signature,
'X-MKT-TIMESTAMP': timestamp
}
def get(self, endpoint, params=None, headers=None):
return requests.get(
'{}/{}/{}'.format(self.BASE_URL, self.API_VERSION, endpoint),
params=params,
headers=headers
).json()
def private_get(self, endpoint, params=None):
self.check_has_tokens()
headers = self.get_headers(endpoint, '')
return self.get(endpoint, params=params, headers=headers)
def post(self, endpoint, payload):
self.check_has_tokens()
body = [
str(p[1]) for p in sorted(payload.items(), key=lambda p: p[0])
]
headers = self.get_headers(endpoint, reduce(str.__add__, body))
return requests.post(
'{}/{}/{}'.format(self.BASE_URL, self.API_VERSION, endpoint),
data=payload,
headers=headers
).json()
def markets(self):
return self.get(self.ENDPOINT_MARKETS)
def ticker(self):
return self.get(self.ENDPOINT_TICKER)
def book(self, market, order_type, page=0, limit=20):
params = {
'market': market,
'type': order_type,
'page': page,
'limit': limit
}
return self.get(self.ENDPOINT_BOOK, params=params)
def trades(self, market, start=None, end=None, page=0, limit=20):
params = {
'market': market,
'page': page,
'limit': limit
}
if start is not None:
params['start'] = start
if end is not None:
params['end'] = end
return self.get(self.ENDPOINT_TRADES, params=params)
def balance(self):
return self.private_get(self.ENDPOINT_BALANCE)
@property
def orders(self):
return CryptoMKTOrdersAPI(self)
class CryptoMKTOrdersAPI(object):
ENDPOINT_ACTIVE = 'orders/active'
ENDPOINT_CANCEL = 'orders/cancel'
ENDPOINT_CREATE = 'orders/create'
ENDPOINT_EXECUTED = 'orders/executed'
ENDPOINT_STATUS = 'orders/status'
def __init__(self, api_wrapper):
self.api = api_wrapper
def active(self, market, page=0, limit=20):
params = {
'market': market,
'page': page,
'limit': limit
}
return self.api.private_get(self.ENDPOINT_ACTIVE, params)
def executed(self, market, page=0, limit=20):
params = {
'market': market,
'page': page,
'limit': limit
}
return self.api.private_get(self.ENDPOINT_EXECUTED, params)
def create(self, market, type, amount, price):
params = {
'market': market,
'type': type,
'amount': amount,
'price': price
}
return self.api.post(self.ENDPOINT_CREATE, params)
def cancel(self, order_id):
return self.api.post(self.ENDPOINT_CANCEL, {'id': order_id})
def status(self, order_id):
return self.api.private_get(self.ENDPOINT_STATUS, {'id': order_id})
class InvalidTokensException(Exception):
pass
| 25.255814 | 75 | 0.580801 | 4,249 | 0.978131 | 0 | 0 | 72 | 0.016575 | 0 | 0 | 441 | 0.101519 |
4cd052c6a05d57bb861358a08e06ee51ac5c5637 | 69 | py | Python | venv/Lib/site-packages/openpyxl/formula/__init__.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 26 | 2021-01-22T08:40:45.000Z | 2022-03-19T12:09:39.000Z | venv/Lib/site-packages/openpyxl/formula/__init__.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 5 | 2021-08-06T09:41:32.000Z | 2021-08-17T08:37:47.000Z | venv/Lib/site-packages/openpyxl/formula/__init__.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 12 | 2021-04-06T02:32:20.000Z | 2022-03-21T16:30:29.000Z | # Copyright (c) 2010-2021 openpyxl
from .tokenizer import Tokenizer
| 17.25 | 34 | 0.782609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.492754 |
4cd0870f8e1c2e5c492adaf82b4a9329b5b17f1d | 5,925 | py | Python | zplane.py | m1ch/pysim | 58b806d55585d785156813afa572741bfca6e3f1 | [
"MIT"
] | null | null | null | zplane.py | m1ch/pysim | 58b806d55585d785156813afa572741bfca6e3f1 | [
"MIT"
] | null | null | null | zplane.py | m1ch/pysim | 58b806d55585d785156813afa572741bfca6e3f1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Combination of
http://scipy-central.org/item/52/1/zplane-function
and
http://www.dsprelated.com/showcode/244.php
with my own modifications
"""
# Copyright (c) 2011 Christopher Felton
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# The following is derived from the slides presented by
# Alexander Kain for CS506/606 "Special Topics: Speech Signal Processing"
# CSLU / OHSU, Spring Term 2011.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
from matplotlib.pyplot import axvline, axhline
from collections import defaultdict
def zplane(z, p, filename=None):
"""Plot the complex z-plane given zeros and poles.
"""
# get a figure/plot
ax = plt.subplot(2, 2, 1)
# TODO: should just inherit whatever subplot it's called in?
# Add unit circle and zero axes
unit_circle = patches.Circle((0,0), radius=1, fill=False,
color='black', ls='solid', alpha=0.1)
ax.add_patch(unit_circle)
axvline(0, color='0.7')
axhline(0, color='0.7')
# Plot the poles and set marker properties
poles = plt.plot(p.real, p.imag, 'x', markersize=9, alpha=0.5)
# Plot the zeros and set marker properties
zeros = plt.plot(z.real, z.imag, 'o', markersize=9,
color='none', alpha=0.5,
markeredgecolor=poles[0].get_color(), # same color as poles
)
# Scale axes to fit
r = 1.5 * np.amax(np.concatenate((abs(z), abs(p), [1])))
plt.axis('scaled')
plt.axis([-r, r, -r, r])
# ticks = [-1, -.5, .5, 1]
# plt.xticks(ticks)
# plt.yticks(ticks)
"""
If there are multiple poles or zeros at the same point, put a
superscript next to them.
TODO: can this be made to self-update when zoomed?
"""
# Finding duplicates by same pixel coordinates (hacky for now):
poles_xy = ax.transData.transform(np.vstack(poles[0].get_data()).T)
zeros_xy = ax.transData.transform(np.vstack(zeros[0].get_data()).T)
# dict keys should be ints for matching, but coords should be floats for
# keeping location of text accurate while zooming
# TODO make less hacky, reduce duplication of code
d = defaultdict(int)
coords = defaultdict(tuple)
for xy in poles_xy:
key = tuple(np.rint(xy).astype('int'))
d[key] += 1
coords[key] = xy
print(d)
for key, value in d.items():
if value > 1:
x, y = ax.transData.inverted().transform(coords[key])
plt.text(x, y,
r' ${}^{' + str(value) + '}$',
fontsize=13,
)
d = defaultdict(int)
coords = defaultdict(tuple)
for xy in zeros_xy:
key = tuple(np.rint(xy).astype('int'))
d[key] += 1
coords[key] = xy
for key, value in d.items():
if value > 1:
x, y = ax.transData.inverted().transform(coords[key])
plt.text(x, y,
r' ${}^{' + str(value) + '}$',
fontsize=13,
)
if filename is None:
plt.show()
else:
plt.savefig(filename)
print( 'Pole-zero plot saved to ' + str(filename))
if __name__ == "__main__":
from scipy.signal import (freqz, butter, bessel, cheby1, cheby2, ellip,
tf2zpk, zpk2tf, lfilter, buttap, bilinear, cheb2ord, cheb2ap
)
from numpy import asarray, tan, array, pi, arange, cos, log10, unwrap, angle
from matplotlib.pyplot import (stem, title, grid, show, plot, xlabel,
ylabel, subplot, xscale, figure, xlim,
margins)
# # Cosine function
# omega = pi/4
# b = array([1.0, -cos(omega)])
# a = array([1, -2*cos(omega), 1.0])
b, a = butter(2, [0.06, 0.7], 'bandpass')
# Get the poles and zeros
z, p, k = tf2zpk(b, a)
# Create zero-pole plot
figure(figsize=(16, 9))
subplot(2, 2, 1)
zplane(z, p)
grid(True, color='0.9', linestyle='-', which='both', axis='both')
title('Poles and zeros')
# Display zeros, poles and gain
print( str(len(z)) + " zeros: " + str(z))
print( str(len(p)) + " poles: " + str(p))
print( "gain: " + str(k))
# Impulse response
index = arange(0,20)
u = 1.0*(index==0)
y = lfilter(b, a, u)
subplot(2, 2, 3)
stem(index,y)
title('Impulse response')
margins(0, 0.1)
grid(True, color='0.9', linestyle='-', which='both', axis='both')
show()
# Frequency response
w, h = freqz(b, a)
subplot(2, 2, 2)
plot(w/pi, 20*log10(abs(h)))
xscale('log')
title('Frequency response')
xlabel('Normalized frequency')
ylabel('Amplitude [dB]')
margins(0, 0.1)
grid(True, color = '0.7', linestyle='-', which='major', axis='both')
grid(True, color = '0.9', linestyle='-', which='minor', axis='both')
show()
# Phase
subplot(2, 2, 4)
plot(w/pi, 180/pi * unwrap(angle(h)))
xscale('log')
xlabel('Normalized frequency')
ylabel('Phase [degrees]')
grid(True, color = '0.7', linestyle='-', which='major')
grid(True, color = '0.9', linestyle='-', which='minor')
show() | 32.377049 | 90 | 0.585485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,424 | 0.409114 |
4cd33a36e5a202d2c15a3e020cfd9f9644dce14d | 81 | py | Python | lino_book/projects/igen/__init__.py | lino-framework/lino_book | 4eab916832cd8f48ff1b9fc8c2789f0b437da0f8 | [
"BSD-2-Clause"
] | 3 | 2016-08-25T05:58:09.000Z | 2019-12-05T11:13:45.000Z | lino_book/projects/igen/__init__.py | lino-framework/lino_book | 4eab916832cd8f48ff1b9fc8c2789f0b437da0f8 | [
"BSD-2-Clause"
] | 18 | 2016-11-12T21:38:58.000Z | 2019-12-03T17:54:38.000Z | lino_book/projects/igen/__init__.py | lino-framework/lino_book | 4eab916832cd8f48ff1b9fc8c2789f0b437da0f8 | [
"BSD-2-Clause"
] | 9 | 2016-10-15T11:12:33.000Z | 2021-09-22T04:37:37.000Z | """
igen stands for "invoice generator".
The project is currently inactive.
"""
| 13.5 | 36 | 0.716049 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.987654 |
4cd50bc65a9dffb20e6d862af0cb7471f35a384d | 70 | py | Python | code/arc028_1_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | 3 | 2019-08-16T16:55:48.000Z | 2021-04-11T10:21:40.000Z | code/arc028_1_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | code/arc028_1_01.py | KoyanagiHitoshi/AtCoder | 731892543769b5df15254e1f32b756190378d292 | [
"MIT"
] | null | null | null | N,A,B=map(int,input().split())
print("Ant" if 0<N%(A+B)<=A else "Bug") | 35 | 39 | 0.585714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.142857 |
4cd53feef0e51a343960a8e8ab7d831aca62ac7a | 30,518 | py | Python | assembly_examiner.py | jfitz/code-stat | dd2a13177f3ef03ab42123ef3cfcbbd062a2ae26 | [
"MIT"
] | null | null | null | assembly_examiner.py | jfitz/code-stat | dd2a13177f3ef03ab42123ef3cfcbbd062a2ae26 | [
"MIT"
] | null | null | null | assembly_examiner.py | jfitz/code-stat | dd2a13177f3ef03ab42123ef3cfcbbd062a2ae26 | [
"MIT"
] | null | null | null | import string
import math
from codestat_token import Token
from codestat_tokenizer import Tokenizer
from token_builders import (
InvalidTokenBuilder,
NullTokenBuilder,
WhitespaceTokenBuilder,
NewlineTokenBuilder,
EscapedStringTokenBuilder,
PrefixedStringTokenBuilder,
IntegerTokenBuilder,
IntegerExponentTokenBuilder,
PrefixedIntegerTokenBuilder,
SuffixedIntegerTokenBuilder,
RealTokenBuilder,
IdentifierTokenBuilder,
CaseInsensitiveListTokenBuilder,
CaseSensitiveListTokenBuilder,
LeadToEndOfLineTokenBuilder,
SingleCharacterTokenBuilder
)
from assembly_token_builders import (
LabelTokenBuilder,
AssemblyCommentTokenBuilder,
MultilineCommentTokenBuilder,
HashQuoteCharTokenBuilder
)
from examiner import Examiner
class AssemblyExaminer(Examiner):
@staticmethod
def __escape_z__():
InvalidTokenBuilder.__escape_z__()
WhitespaceTokenBuilder.__escape_z__()
NewlineTokenBuilder.__escape_z__()
EscapedStringTokenBuilder.__escape_z__()
PrefixedStringTokenBuilder.__escape_z__()
IntegerTokenBuilder.__escape_z__()
IntegerExponentTokenBuilder.__escape_z__()
PrefixedIntegerTokenBuilder.__escape_z__()
SuffixedIntegerTokenBuilder.__escape_z__()
RealTokenBuilder.__escape_z__()
IdentifierTokenBuilder.__escape_z__()
CaseInsensitiveListTokenBuilder.__escape_z__()
CaseSensitiveListTokenBuilder.__escape_z__()
LeadToEndOfLineTokenBuilder.__escape_z__()
SingleCharacterTokenBuilder.__escape_z__()
LabelTokenBuilder.__escape_z__()
AssemblyCommentTokenBuilder.__escape_z__()
MultilineCommentTokenBuilder.__escape_z__()
HashQuoteCharTokenBuilder.__escape_z__()
return 'Escape ?Z'
def __init__(self, code, tab_size, processor):
super().__init__()
self.newlines_important = 'always'
operand_types = []
whitespace_tb = WhitespaceTokenBuilder()
newline_tb = NewlineTokenBuilder()
comment_tb = LeadToEndOfLineTokenBuilder(';', True, 'comment')
if processor in ['pdp-8']:
comment_tb = LeadToEndOfLineTokenBuilder('/', True, 'comment')
comment_2_tb = NullTokenBuilder()
if processor in ['1802']:
comment_2_tb = LeadToEndOfLineTokenBuilder('..', True, 'comment')
line_comment_star_tb = AssemblyCommentTokenBuilder('*')
line_comment_hash_tb = NullTokenBuilder()
if processor in ['68000']:
line_comment_hash_tb = AssemblyCommentTokenBuilder('#')
stmt_separator_tb = NullTokenBuilder()
if processor in ['pdp-8']:
stmt_separator_tb = SingleCharacterTokenBuilder(';', 'statement separator', False)
integer_tb = IntegerTokenBuilder("'")
integer_exponent_tb = IntegerExponentTokenBuilder("'")
integer_1_tb = NullTokenBuilder()
integer_2_tb = NullTokenBuilder()
prefixed_integer_tb = PrefixedIntegerTokenBuilder('#', True, '0123456789')
if processor in ['pdp-11']:
integer_1_tb = SuffixedIntegerTokenBuilder('$', True, '0123456789')
if processor in ['z80']:
integer_1_tb = SuffixedIntegerTokenBuilder('O', True, '0123456789')
integer_2_tb = SuffixedIntegerTokenBuilder('D', True, '0123456789')
hex_integer_1_tb = PrefixedIntegerTokenBuilder('&', True, '0123456789abcdefABCDEF')
hex_integer_2_tb = SuffixedIntegerTokenBuilder('h', False, '0123456789abcdefABCDEF')
hex_integer_3_tb = PrefixedIntegerTokenBuilder('$', True, '0123456789abcdefABCDEF')
hex_integer_4_tb = PrefixedIntegerTokenBuilder('#$', True, '0123456789abcdefABCDEF')
hash_quote_value_tb = NullTokenBuilder()
if processor in ['pdp-11']:
hash_quote_value_tb = HashQuoteCharTokenBuilder()
operand_types.append('number')
leads = '_.$@#'
extras = '_.$@#'
identifier_tb = IdentifierTokenBuilder(leads, extras)
operand_types.append('identifier')
label_tb = LabelTokenBuilder(leads, extras, ':')
quotes = ['"', "'", "’"]
string_tb = EscapedStringTokenBuilder(quotes, 0)
operand_types.append('string')
known_operators = [
'+', '-', '*', '/', '&', '|', '=', '??', '#', '@', "'", '!'
]
self.unary_operators = [
'+', '-', '??', '#', '@', "'"
]
self.postfix_operators = ['+']
groupers = ['(', ')', ',', '[', ']', '<', '>', ':']
group_starts = ['(', '[', ',', '<']
group_ends = [')', ']', '>']
group_mids = [',', ':']
groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False)
known_operator_tb = CaseSensitiveListTokenBuilder(known_operators, 'operator', False)
preprocessors = [
'if', 'ifne', 'ifeq',
'else', 'endif', 'endc',
'error'
]
preprocessors_68000 = [
'MACRO', 'ENDM'
]
preprocessors_8080 = [
'MACRO', 'ENDM'
]
preprocessors_8086 = [
'ELSE', 'ELSEIF', 'ELSEIF2', 'ENDM', 'EXITM',
'FOR', 'FORC',
'GOTO',
'IF', 'IF2', 'IFB', 'IFNB', 'IFDEF', 'IFNDEF',
'IFDIF', 'IFDIF[[I]]', 'IFE', 'IFIDN', 'IFIDN[[I]]',
'LOCAL',
'MACRO',
'PURGE',
'.BREAK', '.CONTINUE',
'.ELSE', '.ELSEIF', '.ENDIF',
'.ERR', '.ERR2', '.ERRB', '.ERRDEF',
'.ERRDIF', '.ERRDIF[[I]]]', '.ERRE', '.ERRIDN', '.ERRIDN[[I]]',
'.ERRNB', '.ERRNDEF', '.ERRNZ', '.EXIT',
'.IF',
'.REPEAT', '.UNTIL', '.UNTILCXZ',
'.WHILE'
]
if processor in ['68000']:
preprocessors += preprocessors_68000
if processor in ['8080']:
preprocessors += preprocessors_8080
if processor in ['8086']:
preprocessors += preprocessors_8086
preprocessor_tb = CaseInsensitiveListTokenBuilder(preprocessors, 'preprocessor', False)
directives = [
'DB', 'DW', 'DS',
'EJECT', 'END', 'EQU', 'EXTRN',
'INCLUDE',
'NAME',
'ORG',
'PAGE',
'SECTION', 'SEGMENT', 'START', 'SUBTITLE',
'TEXT'
]
directives_6502 = [
'DFB', 'DFW'
]
directives_6800 = [
'CPU',
'NAM'
]
directives_68000 = [
'=',
'EVEN',
'ODD'
]
directives_8080 = [
'ASEG',
'CPU',
'LOCAL',
'TITLE',
'.8080', '.8086', '.6800', '.6502', ".386",
]
directives_z80 = [
'DEFB', 'DEFS', 'DEFW'
]
directives_8086 = [
'=',
'ABSOLUTE', 'ALIAS', 'ALIGN', 'AS', 'ASSUME', 'AT',
'BITS', 'BYTE',
'COMM', 'COMMON', 'CPU', 'CSEG',
'DEFAULT', 'DSEG', 'DWORD',
'ECHO', 'ENDP', 'ENDS', 'EVEN', 'EXTERNDEF',
'FWORD', 'FORMAT',
'GLOBAL', 'GROUP',
'INCLUDELIB', 'INS86', 'INVOKE',
'LABEL',
'MMWORD',
'OPTION',
'POPCONTEXT', 'PROC', 'PROTO', 'PUBLIC', 'PUSHCONTEXT',
'SEGMENT'
'QWORD',
'REAL4', 'REAL8', 'REAL10', 'RECORD',
'STRUCT',
'TEXTEQU', 'TBYTE', 'TYPEDEF',
'WORD',
'SBYTE', 'SDWORD', 'SWORD',
'SECT', 'SECTION', 'SEGMENT', 'STATIC'
'UNION', 'USE16', 'USE32', 'USE64',
'VIRTUAL',
'XMMWORD', 'YMMWORD',
'.386', '.386P', '.387', '.486', '.486P', '.586', '.586P',
'.686', '.686P', '.K3D',
'.ALLOCSTACK', '.ALPHA',
'.CODE', '.CONST', '.CREF',
'.DATA', '.DATA?', '.DOSSEG',
'.ENDW', '.ENDPROLOG',
'.FARDATA', '.FARDATA?', '.FPO',
'.LIST', '.LISTALL', '.LISTIF', '.LISTMACRO', '.LISTMACROALL',
'.MODEL', '.MMX',
'.NOCREF', '.NOLIST', '.NOLISTIF', '.NOLISTMACRO',
'.PUSHFRAME', '.PUSHREG',
'.RADIX',
'.SAFESEH', '.SALL', '.SAVEREG', '.SAVEXMM128', '.STACK', '.STARTUP',
'.SEQ', '.SETFRAME',
'.TFCOND',
'.XLIST', '.XMM',
]
directives_80386 = [
'ALIGN',
'BITS',
'GLOBAL',
'PROC',
'SECTION',
'RESB', 'RESD',
'.386',
'.CODE',
'.DATA',
'.MODEL',
'.TEXT',
'%INCLUDE',
]
directives_pdp8 = [
'='
]
directives_pdp11 = [
'=',
'BYTE',
'WORD',
'.odd', '.even', '.blkb', '.blkw', '.byte', '.word',
'.ascii', '.asciz', '.end', '.hex', '.radix',
'.ident', '.if', '.ift', '.endc', '.psect', '.mcall',
'.macro', '.endm', '.restore', '.print', '.error',
'.list', '.nlist'
]
if processor in ['6502']:
directives += directives_6502
if processor in ['6800']:
directives += directives_6800
if processor in ['68000']:
directives += directives_68000
if processor in ['8080']:
directives += directives_8080
if processor in ['z80']:
directives += directives_z80
if processor in ['8086']:
directives += directives_8086
if processor in ['80386']:
directives += directives_80386
if processor in ['pdp-8']:
directives += directives_pdp8
if processor in ['pdp-11']:
directives += directives_pdp11
directive_tb = CaseInsensitiveListTokenBuilder(directives, 'directive', False)
title_directive_tb = LeadToEndOfLineTokenBuilder('TITLE', False, 'directive')
title_directive_2_tb = LeadToEndOfLineTokenBuilder('.TITLE', False, 'directive')
subtitle_directive_tb = LeadToEndOfLineTokenBuilder('SUBTTL', False, 'directive')
subtitle_directive_2_tb = LeadToEndOfLineTokenBuilder('.SUBTTL', False, 'directive')
subtitle_directive_3_tb = LeadToEndOfLineTokenBuilder('.SBTTL', False, 'directive')
include_directive_tb = LeadToEndOfLineTokenBuilder('INCLUDE', False, 'directive')
include_directive_2_tb = LeadToEndOfLineTokenBuilder('.INCLUDE', False, 'directive')
multiline_comment_tb = MultilineCommentTokenBuilder()
opcodes_1802 = [
'IDL', 'LDN', 'INC', 'DEC', 'BR', 'BO', 'BZ', 'BDF', 'BPZ', 'BGE',
'B1', 'B2', 'B3', 'B4', 'SKP', 'NBR', 'BNO', 'BNZ', 'BNF', 'BM', 'BL',
'BN1', 'BN2', 'BN3', 'BN4', 'LDA', 'STR', 'IRX', 'OUT', 'INP',
'RET', 'DIS', 'LDXA', 'STXD', 'ADC', 'SDB', 'SHRC', 'RSHR', 'SMB',
'SAV', 'MARK', 'REQ', 'SEQ', 'ADCI', 'SDBI', 'SHLC', 'RSHL', 'SMBI',
'GLO', 'GHI', 'PLO', 'PHI', 'LBO', 'LBZ', 'LBDF', 'NOP', 'LSNO',
'LSNZ', 'LSNF', 'LSKP', 'NLBR', 'LBNQ', 'LBNZ', 'LBNF', 'LSIE', 'LSQ',
'LSZ', 'LSDF', 'SEP', 'SEX', 'LDX', 'OR', 'AND', 'XOR', 'ADD', 'SD',
'SHR', 'SM', 'LDI', 'ORI', 'ANI', 'XRI', 'ADI', 'SDI', 'SHL', 'SMI'
]
registers_1802 = []
opcodes_6502 = [
'ADC', 'AND', 'ASL', 'AST',
'BCC', 'BCS', 'BEQ', 'BIT', 'BMI', 'BNE', 'BPL', 'BRK', 'BVC', 'BVS',
'CLC', 'CLD', 'CLI', 'CLV', 'CMP', 'CPR', 'CPX', 'CPY',
'DEC', 'DEX', 'DEY',
'EOR',
'INC', 'INX', 'INY',
'JMP', 'JSR',
'LDA', 'LDX', 'LDY', 'LSR',
'NOP',
'ORA',
'PHA', 'PHP', 'PLA', 'PLP',
'ROL', 'ROR', 'RTI', 'RTS',
'SBC', 'SEC', 'SED', 'SEI', 'STA', 'STX', 'STY',
'TAX', 'TAY', 'TSX', 'TXA', 'TXS', 'TYA'
]
registers_6502 = ['A', 'X', 'Y', 'P', 'S']
opcodes_6800 = [
'ABA', 'ADC', 'ADCA', 'ADCB', 'ADD', 'AND', 'ASL', 'ASR',
'BCC', 'BCS', 'BEQ', 'BGE', 'BGT', 'BHI', 'BIT', 'BLE', 'BLS', 'BLT', 'BMI', 'BNE', 'BPL', 'BRA', 'BSR', 'BVC', 'BVS',
'CBA', 'CLC', 'CLI', 'CLR', 'CLRA', 'CLRB', 'CLV', 'CMP', 'COM', 'CPX',
'DAA', 'DEC', 'DES', 'DEX',
'EOR', 'EORA', 'EROB',
'INC', 'INS', 'INX',
'JMP', 'JSR',
'LDA', 'LDAA', 'LDAB', 'LDS', 'LDX', 'LSR',
'NEG', 'NOP',
'ORA',
'PSH', 'PUL',
'ROL', 'ROR', 'RTI', 'RTS',
'SBA', 'SBC', 'SEC', 'SEI', 'SEV', 'STA', 'STAA', 'STAB', 'STS', 'STX', 'SUB', 'SWI',
'TAB', 'TAP', 'TBA', 'TPA', 'TST', 'TSX', 'TXS',
'WAI'
]
registers_6800 = ['A', 'B', 'IX', 'PC', 'SP']
opcodes_68000 = [
'AND', 'ANDI', 'EOR', 'EORI', 'NOT', 'OR', 'ORI', 'CLR',
'BCHG', 'BCLR', 'BSET', 'BTST', 'EXT', 'EXTB',
'MOVE', 'MOVEA', 'MOVEM', 'MOVEP', 'MOVEQ',
'CMP', 'CMPA', 'CMPI', 'CMPM', 'CMP2',
'LEA', 'PEA', 'TAS', 'CHK',
'ADD', 'ADDA', 'ADDI', 'ADDQ', 'ADDX',
'SUB', 'SUBA', 'SUBI', 'SUBQ', 'SUBX',
'MULS', 'MULU', 'DIVS', 'DIVU', 'NEG', 'NEGX',
'ASL', 'ASR', 'LSL', 'LSR', 'ROL', 'ROR', 'ROXL', 'ROXR',
'DBCC', 'SWAP', 'TST',
'ANDB', 'ANDIB', 'EORB', 'EORIB', 'NOTB', 'ORB', 'ORIB', 'CLRB',
'BCHGB', 'BCLRB', 'BSETB', 'BTSTB', 'EXTB', 'EXTBB',
'MOVEB', 'MOVEAB', 'MOVEMB', 'MOVEPB', 'MOVEQB',
'CMPB', 'CMPAB', 'CMPIB', 'CMPMB', 'CMP2B',
'LEAB', 'PEAB', 'TASB', 'CHKB',
'ADDB', 'ADDAB', 'ADDIB', 'ADDQB', 'ADDXB',
'SUBB', 'SUBAB', 'SUBIB', 'SUBQB', 'SUBXB',
'MULSB', 'MULUB', 'DIVSB', 'DIVUB', 'NEGB', 'NEGXB',
'ASLB', 'ASRB', 'LSLB', 'LSRB', 'ROLB', 'RORB', 'ROXLB', 'ROXRB',
'DBCCB', 'SWAPB', 'TSTB',
'ANDW', 'ANDIW', 'EORW', 'EORIW', 'NOTW', 'ORW', 'ORIW', 'CLRW',
'BCHGW', 'BCLRW', 'BSETW', 'BTSTW', 'EXTW', 'EXTBW',
'MOVEW', 'MOVEAW', 'MOVEMW', 'MOVEPW', 'MOVEQW',
'CMPW', 'CMPAW', 'CMPIW', 'CMPMW', 'CMP2W',
'LEAW', 'PEAW', 'TASW', 'CHKW',
'ADDW', 'ADDAW', 'ADDIW', 'ADDQW', 'ADDXW',
'SUBW', 'SUBAW', 'SUBIW', 'SUBQW', 'SUBXW',
'MULSW', 'MULUW', 'DIVSW', 'DIVUW', 'NEGW', 'NEGXW',
'ASLW', 'ASRW', 'LSLW', 'LSRW', 'ROLW', 'RORW', 'ROXLW', 'ROXRW',
'DBCCW', 'SWAPW', 'TSTW',
'ANDL', 'ANDIL', 'EORL', 'EORIL', 'NOTL', 'ORL', 'ORIL', 'CLRL',
'BCHGL', 'BCLRL', 'BSETL', 'BTSTL', 'EXTL', 'EXTBL',
'MOVEL', 'MOVEAL', 'MOVEML', 'MOVEPL', 'MOVEQL',
'CMPL', 'CMPAL', 'CMPIL', 'CMPML', 'CMP2L',
'LEAL', 'PEAL', 'TASL', 'CHKL',
'ADDL', 'ADDAL', 'ADDIL', 'ADDQL', 'ADDXL',
'SUBL', 'SUBAL' 'SUBIL', 'SUBQL', 'SUBXL',
'MULSL', 'MULUL', 'DIVSL', 'DIVUL', 'NEGL', 'NEGXL',
'ASLL', 'ASRL', 'LSLL', 'LSRL', 'ROLL', 'RORL', 'ROXLL', 'ROXRL',
'DBCCL', 'SWAPL', 'TSTL',
'ABCD', 'NBCD', 'PACK', 'SBCD', 'UNPK',
'BSR', 'BRA', 'BT', 'BF',
'BEQ', 'BNE', 'BLS', 'BLT', 'BLE', 'BGT', 'BGE',
'BCC', 'BCS', 'BPL', 'BMI', 'BHI', 'BVC', 'BVS',
'BSRS', 'BRAS', 'BEQS', 'BNES', 'BLSS', 'BLTS', 'BLES', 'BGTS', 'BGES',
'BCCS', 'BCSS', 'BPLS', 'BMIS', 'BHIS', 'BVCS', 'BVSS',
'DBSR', 'DBRA', 'DBT', 'DBF',
'DBEQ', 'DBNE', 'DBLS', 'DBLT', 'DBLE', 'DBGT', 'DBGE',
'DBCC', 'DBCS', 'DBPL', 'DBMI', 'DBHI', 'DBVC', 'DBVS',
'JSR', 'JMP',
'TRAP', 'HALT', 'STOP',
'RTD', 'RTE', 'RTR', 'RTS',
'TRAP', 'HALT', 'STOP', 'NOP', 'MOVE16', 'EXG',
'BFCHG', 'BFCLR', 'BFEXTS', 'BFEXTU', 'BFFFO', 'BFINS', 'BFSET', 'BFTST',
'FNOP', 'FABS', 'FACOS', 'FASIN', 'FATAN', 'FCOS', 'FCOSH', 'FETOX',
'FETOXM1', 'FGETMAN', 'FINT', 'FINTRZ', 'FLOGN', 'FLOGNP1', 'FLOG10',
'FLOG2', 'FNEG', 'FSIN', 'FSINH', 'FSQRT', 'FTAN', 'FTANH',
'FTENTOX', 'FTWOTOX', 'FTST',
'DSB', 'DSW', 'DSL', 'DCB', 'DCW', 'DCL',
'AND.B', 'ANDI.B', 'EOR.B', 'EORI.B', 'NOT.B', 'OR.B', 'ORI.B', 'CLR.B',
'BCHG.B', 'BCLR.B', 'BSET.B', 'BTST.B', 'EXT.B', 'EXTB.B',
'MOVE.B', 'MOVEA.B', 'MOVEM.B', 'MOVEP.B', 'MOVEQ.B',
'CMP.B', 'CMPA.B', 'CMPI.B', 'CMPM.B', 'CMP2.B',
'LEA.B', 'PEA.B', 'TAS.B', 'CHK.B',
'ADD.B', 'ADDA.B', 'ADDI.B', 'ADDQ.B', 'ADDX.B',
'SUB.B', 'SUBA.B', 'SUBI.B', 'SUBQ.B', 'SUBX.B',
'MULS.B', 'MULU.B', 'DIVS.B', 'DIVU.B', 'NEG.B', 'NEGX.B',
'ASL.B', 'ASR.B', 'LSL.B', 'LSR.B', 'ROL.B', 'ROR.B', 'ROXL.B', 'ROXR.B',
'DBCC.B', 'SWAP.B', 'TST.B',
'AND.W', 'ANDI.W', 'EOR.W', 'EORI.W', 'NOT.W', 'OR.W', 'ORI.W', 'CLR.W',
'BCHG.W', 'BCLR.W', 'BSET.W', 'BTST.W', 'EXT.W', 'EXTB.W',
'MOVE.W', 'MOVEA.W', 'MOVEM.W', 'MOVEP.W', 'MOVEQ.W',
'CMP.W', 'CMPA.W', 'CMPI.W', 'CMPM.W', 'CMP2.W',
'LEA.W', 'PEA.W', 'TAS.W', 'CHK.W',
'ADD.W', 'ADDA.W', 'ADDI.W', 'ADDQ.W', 'ADDX.W',
'SUB.W', 'SUBA.W', 'SUBI.W', 'SUBQ.W', 'SUBX.W',
'MULS.W', 'MULU.W', 'DIVS.W', 'DIVU.W', 'NEG.W', 'NEGX.W',
'ASL.W', 'ASR.W', 'LSL.W', 'LSR.W', 'ROL.W', 'ROR.W', 'ROXL.W', 'ROXR.W',
'DBCC.W', 'SWAP.W', 'TST.W',
'AND.L', 'ANDI.L', 'EOR.L', 'EORI.L', 'NOT.L', 'OR.L', 'ORI.L', 'CLR.L',
'BCHG.L', 'BCLR.L', 'BSET.L', 'BTST.L', 'EXT.L', 'EXTB.L',
'MOVE.L', 'MOVEA.L', 'MOVEM.L', 'MOVEP.L', 'MOVEQ.L',
'CMP.L', 'CMPA.L', 'CMPI.L', 'CMPM.L', 'CMP2.L',
'LEA.L', 'PEA.L', 'TAS.L', 'CHK.L',
'ADD.L', 'ADDA.L', 'ADDI.L', 'ADDQ.L', 'ADDX.L',
'SUB.L', 'SUBA.L', 'SUBI.L', 'SUBQ.L', 'SUBX.L',
'MULS.L', 'MULU.L', 'DIVS.L', 'DIVU.L', 'NEG.L', 'NEGX.L',
'ASL.L', 'ASR.L', 'LSL.L', 'LSR.L', 'ROL.L', 'ROR.L', 'ROXL.L', 'ROXR.L',
'DBCC.L', 'SWAP.L', 'TST.L',
'BSR.S', 'BRA.S', 'BT.S', 'BF.S',
'BEQ.S', 'BNE.S', 'BLS.S', 'BLT.S', 'BLE.S', 'BGT.S', 'BGE.S',
'BCC.S', 'BCS.S', 'BPL.S', 'BMI.S', 'BHI.S', 'BVC.S', 'BVS.S',
'DS.B', 'DS.W', 'DS.L', 'DC.B', 'DC.W', 'DC.L'
]
registers_68000 = [
'D0', 'D1', 'D2', 'D3', 'D4', 'D5', 'D6', 'D7',
'A0', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7',
'FP0', 'FP1', 'FP2', 'FP3', 'FP4', 'FP5', 'FP6', 'FP7',
'PC', 'SR'
]
opcodes_8080 = [
'ACI', 'ADC', 'ADD', 'ADI', 'ANA', 'ANI',
'CALL', 'CC', 'CM', 'CMA', 'CMC', 'CMP', 'CNC', 'CNZ', 'CP', 'CPE', 'CPI',
'CPO', 'CZ',
'DAA', 'DAD', 'DCR', 'DCX', 'DI',
'EI',
'HLT',
'IN', 'INR', 'INX',
'JC', 'JM', 'JMP', 'JNC', 'JNZ', 'JP', 'JPE', 'JPO', 'JZ',
'LDAX', 'LHLD', 'LXI',
'MOV', 'MVI',
'NOP',
'ORA', 'ORI', 'OUT',
'PCHL', 'POP', 'PUSH',
'RAL', 'RAR', 'RC', 'RIM', 'RLC', 'RET', 'RM', 'RNC', 'RNZ', 'RP', 'RPE',
'RPO', 'RRC', 'RST', 'RZ ',
'SBB', 'SBI', 'SHLD', 'SIM', 'SPHL', 'STA', 'STC', 'STAX', 'SUB', 'SUI',
'XCHG', 'XRA', 'XRI', 'XTHL',
]
registers_8080 = [
'A', 'B', 'C', 'D', 'E', 'H', 'L', 'M', 'PSW', 'F'
]
opcodes_z80 = [
'ADC', 'ADD', 'AND',
'BIT',
'CALL', 'CCF', 'CP', 'CPD', 'CPDR', 'CPI', 'CPIR', 'CPL',
'DAA', 'DEC', 'DI', 'DJNZ',
'EI', 'EX', 'EXX',
'HALT',
'IM', 'IN', 'INC', 'IND', 'INDR', 'INI', 'INIR',
'JP', 'JR',
'LD', 'LDD', 'LDDR', 'LDI', 'LDIR',
'NEG', 'NOP',
'OR', 'OTDR', 'OTIR', 'OUT', 'OUTD', 'OUTI',
'POP', 'PUSH',
'RES', 'RET', 'RETI', 'RETN', 'RL', 'RLA', 'RLC', 'RLCA', 'RLD',
'RR', 'RRA', 'RRC', 'RRCA', 'RRD', 'RST',
'SBC', 'SCF', 'SET', 'SLA', 'SRA', 'SRL', 'SUB',
'XOR'
]
registers_z80 = [
'A', 'B', 'C', 'D', 'E', 'H', 'L', 'F', 'AF', 'BC', 'DE', 'HL',
"A'", "B'", "C'", "D'", "E'", "H'", "L'", "AF'", "F'", "BC'", "DE'", "HL'",
'IX', 'IY', 'PSW', 'M'
]
opcodes_8086 = [
'AAA', 'AAD', 'AAM', 'AAS', 'ADC', 'ADD', 'AND',
'CALL', 'CBW', 'CLC', 'CLD', 'CLI', 'CMC', 'CMP', 'CMPS', 'CMPSB', 'CMPW', 'CMPXCHG', 'CWD',
'DAA', 'DAS', 'DEC', 'DIV',
'ESC',
'FWAIT',
'F2XM1', 'FABS', 'FADD', 'FADDP', 'FBLD', 'FBSTP', 'FCHS', 'FCLEX', 'FCOM', 'FCOMP',
'FCOMPP', 'FCOS', 'FDECSTP', 'FDISI', 'FDIV', 'FDIVP', 'FDIVR', 'FDIVRP',
'FENI', 'FFREE', 'FIADD', 'FICOM', 'FICOMP', 'FIDIV', 'FIDIVR', 'FILD',
'FIMUL', 'FINCSTP', 'FINIT', 'FIST', 'FISTP', 'FISUB', 'FISUBR', 'FLD', 'FLD1',
'FLDCW', 'FLDENV', 'FLDL2E', 'FLDL2T', 'FLDLG2', 'FLDLN2', 'FLDPI',
'FLDZ', 'FMUL', 'FMULP', 'FNCLEX', 'FNDISI', 'FNENI', 'FNINIT', 'FNOP', 'FNSAVE',
'FNSTCW', 'FNSTENV', 'FNSTSW', 'FPATAN', 'FPREM', 'FPREM1', 'FPTAN', 'FRNDINT',
'FRSTOR', 'FSAVE', 'FSCALE', 'FSETPM', 'FSIN', 'FSINCOS', 'FSQRT', 'FST', 'FSTCW',
'FSTENV', 'FSTP', 'FSTSW', 'FSUB', 'FSUBP', 'FSUBRP', 'FTST', 'FUCOM', 'FUCOMP',
'FUCOMPP', 'FXAM', 'FXCH', 'FXTRACT', 'FYL2X', 'FYL2XP1',
'HLT',
'IDIV', 'IMUL', 'IN', 'INC', 'INT', 'INTO', 'INVD', 'IRET', 'IRETD',
'JA', 'JAE', 'JB', 'JBE', 'JC', 'JCXZ', 'JE', 'JECXZ', 'JG', 'JGE', 'JL', 'JLE', 'JMP', 'JNA', 'JNAE', 'JNB', 'JNBE', 'JNC', 'JNE', 'JNG', 'JNGE', 'JNL', 'JNLE', 'JNO', 'JNP', 'JNS', 'JO', 'JP', 'JPE', 'JPO', 'JNZ', 'JS', 'JZ',
'LAHF', 'LAR', 'LDS', 'LEA', 'LES', 'LOCK', 'LODS', 'LODSB', 'LODSW', 'LOOP', 'LOOPE', 'LOOPNE', 'LOOPNZ', 'LOOPZ',
'MOV', 'MOVS', 'MOVSB', 'MOVSW', 'MUL',
'NEG', 'NOP', 'NOT',
'OR', 'OUT',
'POP', 'POPF', 'POPFD', 'PUSH', 'PUSHF', 'PUSHFD',
'RCL', 'RCR', 'REP', 'REPE', 'REPNE', 'REPNZ', 'REPZ', 'RET', 'RETF', 'ROL', 'ROR',
'SAHF', 'SAL', 'SAR', 'SBB', 'SCAS', 'SCASB', 'SCASW', 'SHL', 'SHR', 'STC', 'STD', 'STI', 'STOS', 'STOSB', 'STOSW', 'SUB',
'TEST',
'WAIT', 'WBINVD',
'XCHG', 'XLAT', 'XLATB', 'XOR',
]
registers_8086 = [
'AL', 'AH', 'BL', 'BH', 'CL', 'CH', 'DL', 'DH',
'AX', 'BX', 'CX', 'DX', 'CS', 'DS', 'SS', 'ES',
'IP', 'SI', 'DI', 'BP', 'SP', 'FLAGS'
]
opcodes_80186 = [
'BOUND',
'ENTER',
'INS',
'LEAVE',
'OUTS',
'POPA', 'POPAD', 'PUSHA', 'PUSHAD'
]
opcodes_80286 = [
'ARPL',
'CLTS',
'LGDT', 'LIDT', 'LLDT', 'LMSW', 'LSL', 'LSS',
'SGDT', 'SIDT', 'SLDT', 'SMSW', 'STR',
'VERR', 'VERW'
]
registers_80286 = [
'TR'
]
opcodes_80386 = [
'BSF', 'BSR', 'BT', 'BTC', 'BTR', 'BTS',
'CDQ', 'CWDE',
'LFS', 'LGS', 'LSS',
'MOVSX', 'MOVZX',
'SETAE', 'SETB', 'SETC', 'SETNAE', 'SETNB', 'SETNE', 'SETNZ', 'SETG', 'SETGE', 'SETL', 'SETLE', 'SETNC', 'SETNG', 'SETNGE', 'SETNL', 'SETNLE', 'SETNO', 'SETNP', 'SETNS', 'SETE', 'SETO', 'SETP', 'SETPE', 'SETPO', 'SETS', 'SETZ',
'SHLD', 'SHRD'
]
registers_80386 = [
'EAX', 'EBX', 'ECX', 'EDX', 'ESI', 'EDI', 'EBP', 'ESP',
'FS', 'GS', 'EFLAGS'
]
opcodes_80486 = [
'BSWAP',
'INVPLG'
]
opcodes_pdp8 = [
'AND', 'TAD', 'ISZ', 'DCA', 'JMS', 'JMP',
'CDF', 'CIF', 'RDF', 'RIF', 'RIB', 'RMF',
'CLA', 'CLL', 'CMA', 'CML', 'IAC', 'RAR', 'RAL', 'RTR', 'RTL', 'BSW',
'SMA', 'SZA', 'SNL', 'SPA', 'SNA', 'SZL', 'OSR', 'HLT', 'MQA', 'MQL',
'SEL', 'LCD', 'XDR', 'STR', 'SER', 'SDN', 'INTR', 'INIT',
'DILC', 'DICD', 'DISD', 'DILX', 'DILY', 'DIXY', 'DILE', 'DIRE',
'RCSF', 'RCRA', 'RCRB', 'RCNO', 'RCRC', 'RCNI', 'RCSD', 'RCSE',
'RCRD', 'RCSI', 'RCTF',
'RPE', 'RSF', 'RRB', 'RFC', 'PCE', 'PSF', 'PCF', 'PPC', 'PLS',
'KCF', 'KSF', 'KCC', 'KRS', 'KIE', 'KRB', 'TFL', 'TSF', 'TCF',
'TPC', 'TSK', 'TLS'
]
opcodes_pdp11 = [
'CLR', 'CLRB', 'COM', 'COMB', 'INC', 'INCB', 'DEC', 'DECB', 'NEG', 'NEGB',
'NOP', 'TST', 'TSTB', 'TSTSET', 'WRTLCK', 'ASR', 'ASRB', 'ASL', 'ASLB',
'ROR', 'RORB', 'ROL', 'ROLB', 'SWAB', 'ADC', 'ADCB', 'SBC', 'SBCB', 'SXT',
'MOV', 'MOVB', 'ADD', 'SUB', 'CMP', 'CMPB', 'ASH', 'ASHC',
'MUL', 'DIV', 'BIT', 'BITB', 'BIC', 'BICB', 'BIS', 'BISB',
'XOR', 'CLR', 'CLRB', 'BR', 'BNE', 'BPL', 'BEQ', 'BMI', 'BVC',
'BVS', 'BCC', 'BCS', 'BGE', 'BLT', 'BGT', 'BLE', 'SOB', 'BHI',
'BLOS', 'BHIS', 'BLO',
'JMP', 'JSR', 'RTS', 'MARK', 'EMT', 'TRAP', 'BPT', 'IOT', 'CSM',
'RTI', 'RTT', 'HALT', 'WAIT', 'RESET',
'MTPD', 'MTPI', 'MFPD', 'MTPS', 'MFPS', 'MFPT',
'CLC', 'CLV', 'CLZ', 'CLN', 'CCC', 'SEC', 'SEV', 'SEZ', 'SEN', 'SCC',
'FADD', 'FSUB', 'FMUL', 'FDIV',
'DIV', 'MUL'
]
registers_pdp11 = [
'r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7'
]
opcodes = []
registers = []
if processor in ['1802']:
opcodes += opcodes_1802
registers += registers_1802
if processor in ['6502']:
opcodes += opcodes_6502
registers += registers_6502
if processor in ['6800']:
opcodes += opcodes_6800
registers += registers_6800
if processor in ['68000']:
opcodes += opcodes_68000
registers += registers_68000
if processor in ['8080']:
opcodes += opcodes_8080
registers += registers_8080
if processor in ['z80']:
opcodes += opcodes_z80
registers += registers_z80
if processor in ['8086', '80186', '80286', '80386', '80486']:
opcodes += opcodes_8086
registers += registers_8086
if processor in ['80286', '80386', '80486']:
opcodes += opcodes_80186
opcodes += opcodes_80286
registers += registers_80286
if processor in ['80386', '80486']:
opcodes += opcodes_80386
registers += registers_80386
if processor in ['80486']:
opcodes += opcodes_80486
if processor in ['pdp-8']:
opcodes += opcodes_pdp8
# registers += registers_pdp8
if processor in ['pdp-11']:
opcodes += opcodes_pdp11
registers += registers_pdp11
opcode_tb = CaseInsensitiveListTokenBuilder(opcodes, 'keyword', False)
register_tb = CaseInsensitiveListTokenBuilder(registers, 'register', True)
values = ['*', '$', '.']
values_tb = CaseSensitiveListTokenBuilder(values, 'value', True)
operand_types.append('value')
invalid_token_builder = InvalidTokenBuilder()
tokenbuilders = [
newline_tb,
whitespace_tb,
stmt_separator_tb,
integer_tb,
integer_exponent_tb,
integer_1_tb,
integer_2_tb,
prefixed_integer_tb,
hex_integer_1_tb,
hex_integer_2_tb,
hex_integer_3_tb,
hex_integer_4_tb,
hash_quote_value_tb,
values_tb,
groupers_tb,
register_tb,
opcode_tb,
directive_tb,
title_directive_tb,
title_directive_2_tb,
subtitle_directive_tb,
subtitle_directive_2_tb,
subtitle_directive_3_tb,
include_directive_tb,
include_directive_2_tb,
multiline_comment_tb,
preprocessor_tb,
identifier_tb,
label_tb,
string_tb,
comment_tb,
comment_2_tb,
line_comment_star_tb,
line_comment_hash_tb,
known_operator_tb,
self.unknown_operator_tb,
invalid_token_builder
]
opcode_tokenbuilders = [
opcode_tb,
directive_tb,
title_directive_tb,
subtitle_directive_tb,
include_directive_tb,
preprocessor_tb,
invalid_token_builder
]
args_tokenbuilders = [
integer_tb,
integer_exponent_tb,
hex_integer_1_tb,
hex_integer_2_tb,
hex_integer_3_tb,
hex_integer_4_tb,
values_tb,
groupers_tb,
known_operator_tb,
register_tb,
identifier_tb,
label_tb,
string_tb,
comment_tb,
line_comment_star_tb,
line_comment_hash_tb,
self.unknown_operator_tb,
invalid_token_builder
]
tokenizer = Tokenizer(tokenbuilders)
opcode_tokenizer = Tokenizer(opcode_tokenbuilders)
args_tokenizer = Tokenizer(args_tokenbuilders)
# tokenize as free-format
tokens_free = tokenizer.tokenize(code)
tokens_free = Examiner.combine_adjacent_identical_tokens(tokens_free, 'invalid operator')
tokens_free = Examiner.combine_adjacent_identical_tokens(tokens_free, 'invalid')
tokens_free = Examiner.combine_identifier_colon(tokens_free, ['newline'], [], [])
tokens_free = Tokenizer.combine_number_and_adjacent_identifier(tokens_free)
tokens_free = Examiner.convert_values_to_operators(tokens_free, known_operators)
self.tokens = tokens_free
self.convert_asm_identifiers_to_labels()
self.convert_asm_keywords_to_operators()
self.convert_asm_keywords_to_identifiers()
self.calc_statistics()
statistics_free = self.statistics
self.statistics = {}
self.calc_confidences(operand_types, group_starts, group_mids, group_ends, None)
self.calc_line_length_confidence(code, self.max_expected_line)
confidences_free = self.confidences
self.confidences = {}
errors_free = self.errors
self.errors = []
if processor in ['pdp-8', 'pdp-11']:
# do not try space-format, it never exists for these processors
tokens_space = []
statistics_space = {}
confidences_space = {}
errors_space = []
else:
# tokenize as space-format
opcode_extras = '.&=,()+-*/'
label_leads = '.&$@#'
label_mids = '.&$#@_'
label_ends = ':'
comment_leads = '*;'
line_comment_leads = ''
use_line_id = False
tokens_space, indents = Tokenizer.tokenize_asm_code(code, tab_size, opcode_tokenizer, opcode_extras, args_tokenizer, label_leads, label_mids, label_ends, comment_leads, line_comment_leads, use_line_id)
tokens_space = Examiner.combine_adjacent_identical_tokens(tokens_space, 'invalid operator')
tokens_space = Examiner.combine_adjacent_identical_tokens(tokens_space, 'invalid')
tokens_space = Examiner.combine_identifier_colon(tokens_space, ['newline'], [], [])
tokens_space = Tokenizer.combine_number_and_adjacent_identifier(tokens_space)
tokens_space = Examiner.convert_values_to_operators(tokens_space, known_operators)
self.tokens = tokens_space
self.convert_asm_identifiers_to_labels()
self.calc_statistics()
statistics_space = self.statistics
self.statistics = {}
self.calc_confidences(operand_types, group_starts, group_mids, group_ends, indents)
self.calc_line_length_confidence(code, self.max_expected_line)
confidences_space = self.confidences
self.confidences = {}
errors_space = self.errors
self.errors = []
# compute confidence for free-format and spaced-format
confidence_free = 1.0
if len(confidences_free) == 0:
confidence_free = 0.0
else:
for key in confidences_free:
factor = confidences_free[key]
confidence_free *= factor
confidence_space = 1.0
if len(confidences_space) == 0:
confidence_space = 0.0
else:
for key in confidences_space:
factor = confidences_space[key]
confidence_space *= factor
# select the better of free-format and spaced-format
if confidence_space > confidence_free:
self.tokens = tokens_space
self.statistics = statistics_space
self.confidences = confidences_space
self.errors = errors_space
else:
self.tokens = tokens_free
self.statistics = statistics_free
self.confidences = confidences_free
self.errors = errors_free
# combine numbers followed by identfiers to identifiers
@staticmethod
def combine_number_and_adjacent_identifier(tokens):
new_list = []
new_token = None
for token in tokens:
if token.group == 'identifier' and \
new_token is not None and new_token.group == 'number':
new_token = Token(new_token.text + token.text, 'identifier', True)
else:
if new_token is not None:
new_list.append(new_token)
new_token = token
if new_token is not None:
new_list.append(new_token)
return new_list
| 34.060268 | 233 | 0.543614 | 29,762 | 0.975164 | 0 | 0 | 1,408 | 0.046134 | 0 | 0 | 11,724 | 0.384142 |