max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
cogs/emojis.py
|
sah-py/twitch-bot-py
| 0
|
12780351
|
import discord
import requests
import json
import asyncio
from os import environ
from discord.ext import commands
from io import StringIO
from urllib.request import urlopen
from twitch import TwitchClient
class Emojis:
def __init__(self, bot):
self.bot = bot
self.messages = []
self.client = TwitchClient(
client_id= environ['twitch_key']
)
self.bot.loop.create_task(self.turn_off_buttons())
@commands.command(pass_context=True, name='emojis', aliases=['e', 'emoji'])
async def _emojis(self, ctx, name, mode='chat'):
try:
users = self.client.users.translate_usernames_to_ids([name])
user = users[0]
except:
await self.bot.send_message(ctx.message.channel, 'Can\'t find user. Use ``t?search (query)`` to find streams, users and more!')
return
id = user['id']
link = f'https://api.twitchemotes.com/api/v4/channels/{id}'
response = requests.get(link)
info = json.loads(response.text)
if info == {"error":"Channel not found"}:
await self.bot.send_message(ctx.message.channel, 'Channel not found')
#Generate dict {'EMOJI NAME': EMOJI_IMAGE_LINK, ...}
mode = mode.lower()
if mode == 'chat':
emojis = {}
for item in info['emotes']:
emojis[item['code']] = f'https://static-cdn.jtvnw.net/emoticons/v1/{item["id"]}/4.0'
elif mode == 'sub':
emojis = await self.get_emojis_links(info['channel_name'], info['subscriber_badges'])
if emojis == None:
await self.bot.send_message(ctx.message.channel, 'Subscriber emotes not found')
return
elif mode == 'bits':
emojis = await self.get_emojis_links(info['channel_name'], info['bits_badges'])
if emojis == None:
await self.bot.send_message(ctx.message.channel, 'Cheer emotes not found')
return
else:
await self.bot.send_message(ctx.message.channel, 'Unknown mode')
#if emotes more then 50
while len(emojis) > 50:
title = 'Error'
description = f'''
Too many emojis, write what you want to add like 6-11, 19, 20, 22-50
*6-11 is 6, 7, 8, 9, 10, 11 (emojis)*
'''
embed = discord.Embed(title=title, description=description, color=0x6441A4)
await self.bot.send_message(ctx.message.channel, embed=embed)
answer = await self.bot.wait_for_message(author=ctx.message.author)
text = answer.content.replace(' ', '')
emojis_splited = []
for item in text.split(','):
if '-' in item:
first_num = int(item[:item.find('-')])
end_num = int(item[item.find('-')+1:])
for num in range(first_num, end_num+1):
#num-1 to change 0-49 > 1-50
emojis_splited.append(num-1)
else:
#int(item)-1-1 to change 0,1,2 > 1,2,3
emojis_splited.append(int(item)-1)
n=0
for key in emojis.copy().keys():
if n not in emojis_splited:
del emojis[key]
n+=1
title = 'Warning'
description = f'Do you really want to add ``{len(emojis)}`` **{user["display_name"]}\'s** emojis to this server?'
embed = discord.Embed(title=title, description=description, color=0x6441A4)
message = await self.bot.send_message(ctx.message.channel, embed=embed)
for emoji in ['❌', '✅']:
await self.bot.add_reaction(message, emoji)
answer = await self.bot.wait_for_reaction(['❌', '✅'], message=message, user=ctx.message.author)
#Get emoji answers
if answer.reaction.emoji == '❌':
embed = discord.Embed(title=title, description='Canceled.', color=0x6441A4)
await self.bot.edit_message(message, embed=embed)
for emoji in ['❌', '✅']:
await self.bot.remove_reaction(message, emoji, self.bot.user)
elif answer.reaction.emoji == '✅':
#Send loading message & remove reaction
description='''
Loading...
*(if loading is infinte - wait ~30min, discord api can't load emoji after removing them)*'''
embed = discord.Embed(title=title, description=description, color=0x6441A4)
await self.bot.edit_message(message, embed=embed)
for emoji in ['❌', '✅']:
await self.bot.remove_reaction(message, emoji, self.bot.user)
#Add emojis to server
n=0
for key in emojis.copy().keys():
try:
image = urlopen(emojis[key]).read()
await self.bot.create_custom_emoji(server=ctx.message.server, name=key, image=image)
except Exception as e:
args = e.args
if args[0] == 'BAD REQUEST (status code: 400): Maximum number of emojis reached (50)':
description = 'Maximum number of emojis reached'
embed = discord.Embed(title=title, description=description, color=0x6441A4)
await self.bot.edit_message(message, embed=embed)
return
else:
del emojis[key]
await self.bot.send_message(ctx.message.channel, 'Сant load emoji')
#show percent
n+=1
percent = int(n / len(emojis) * 100)
description = f'Loading... | ``{percent}% / 100%``'
embed = discord.Embed(title=title, description=description, color=0x6441A4)
await self.bot.edit_message(message, embed=embed)
#Swap links to emojis
for key in emojis.keys():
for emoji in ctx.message.server.emojis:
if key == emoji.name:
emojis[key] = str(emoji)
#Send done message
embed = discord.Embed(title=title, description='Added!', color=0x6441A4)
await self.bot.edit_message(message, embed=embed)
#Create & send emojis list
max_page = len(emojis) // 5
if len(emojis) % 5 != 0:
max_page = len(emojis) // 5 + 1
embed, buttons = await self.generate_emoji_list(emojis, 1, max_page)
message = await self.bot.send_message(ctx.message.channel, embed=embed)
if buttons:
for emoji in ['⬅','➡']:
await self.bot.add_reaction(message, emoji)
self.messages.append({
'message': message,
'info': emojis,
'page': 1,
'max_page': max_page,
'emojis': ['⬅','➡']
})
async def generate_emoji_list(self, emojis, page, max_page):
description = ''
if len(emojis) <= 5:
buttons = False
title = f'Emojis | ``{len(emojis)}``'
for key in emojis.keys():
description+= f'{key} | {emojis[key]}\n'
else:
buttons = True
title = f'Emojis | {page}/{max_page}'
emojis_keys = list(emojis.keys())
for key in emojis_keys[(page-1)*5:page*5]:
description+= f'{key} | {emojis[key]}\n'
embed = discord.Embed(title=title, description=description, color=0x6441A4)
return embed, buttons
async def get_emojis_links(self, name, badges):
emojis = {}
if badges != None:
for key in badges.keys():
title = badges[key]['title'].lower().replace(' ', '_').replace('-','_').replace('.','_')
emoji_name = f'{name}_{title}'
emojis[emoji_name] = badges[key]['image_url_4x']
else:
return None
return emojis
async def on_reaction_add(self, reaction, user):
for message_info in self.messages:
if reaction.message.timestamp == message_info['message'].timestamp:
if user != self.bot.user:
await self.bot.remove_reaction(reaction.message, reaction.emoji, user)
if reaction.emoji in message_info['emojis']:
if reaction.emoji == '➡':
message_info['page'] += 1
if message_info['page'] > message_info['max_page']:
message_info['page'] = 1
embed, buttons = await self.generate_emoji_list(message_info['info'], message_info['page'], message_info['max_page'])
await self.bot.edit_message(message_info['message'], embed=embed)
if reaction.emoji == '⬅':
message_info['page'] -= 1
if message_info['page'] < 1:
message_info['page'] = message_info['max_page']
embed, buttons = await self.generate_emoji_list(message_info['info'], message_info['page'], message_info['max_page'])
await self.bot.edit_message(message_info['message'], embed=embed)
async def turn_off_buttons(self):
await self.bot.wait_until_ready()
while not self.bot.is_closed:
if len(self.messages) > 10:
self.messages = [self.messages.pop()]
await self.bot.send_message(
self.messages[0]['message'].channel,
'Old emoji-buttons no longer work'
)
await asyncio.sleep(60)
def setup(bot):
bot.add_cog(Emojis(bot))
| 2.640625
| 3
|
alpha_vantage.py
|
bubelov/market-plots
| 3
|
12780352
|
from dotenv import load_dotenv
from os.path import join, dirname
from dateutil import parser
from enum import Enum
from typing import List
import os
import urllib.request as url_request
import json
from dataclasses import dataclass
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
API_KEY = os.getenv('ALPHA_VANTAGE_KEY')
REQUEST_TIMEOUT_SECONDS = 20
class Interval(Enum):
DAILY = 'DAILY'
WEEKLY = 'WEEKLY'
MONTHLY = 'MONTHLY'
@dataclass
class AssetPrice:
date: str
price: float
def get_stock_returns_history(symbol: str,
interval: Interval) -> [float]:
price_history = get_stock_price_history(symbol, interval, adjusted=True)
returns: [float] = []
prev_price = None
for item in price_history:
if prev_price != None:
returns.append((item.price - prev_price) / prev_price)
prev_price = item.price
return returns
def get_stock_price_history(symbol: str,
interval: Interval,
adjusted=False) -> List[AssetPrice]:
url = url_for_function('TIME_SERIES_%s' % interval.value)
if adjusted == True:
url += '_ADJUSTED'
url += '&apikey=%s' % API_KEY
url += '&symbol=%s' % symbol
url += '&outputsize=full'
response = url_request.urlopen(url, timeout=REQUEST_TIMEOUT_SECONDS)
data = json.load(response)
prices_json = data[list(data.keys())[1]]
field_name = '4. close' if adjusted == False else '5. adjusted close'
prices: List[AssetPrice] = []
for k, v in sorted(prices_json.items()):
prices.append(AssetPrice(date=parser.parse(k),
price=float(v[field_name])))
return prices
def get_crypto_returns_history(currency: str, interval: Interval):
_, prices = get_crypto_price_history(currency, interval)
returns = []
prev_price = None
for price in prices:
if prev_price != None:
returns.append(((price / prev_price) - 1.0) * 100.0)
prev_price = price
return returns
def get_crypto_price_history(currency: str, interval: Interval):
url = url_for_function('DIGITAL_CURRENCY_%s' % interval.value)
url += '&apikey=%s' % API_KEY
url += '&symbol=%s' % currency
url += '&market=%s' % 'USD'
response = url_request.urlopen(url, timeout=REQUEST_TIMEOUT_SECONDS)
data = json.load(response)
_, dates_key = data.keys()
dates_data = data[dates_key]
dates = []
prices = []
for k, v in sorted(dates_data.items()):
dates.append(parser.parse(k))
prices.append(float(v['4a. close (USD)']))
return (dates, prices)
def url_for_function(function: str):
return f'https://www.alphavantage.co/query?function={function}'
| 2.546875
| 3
|
AlgoMethod/full_search/double_array_full_serach/01.py
|
Nishi05/Competitive-programming
| 0
|
12780353
|
import math
def isprime(x):
if x == 2:
return 1
if x < 2 or x % 2 == 0:
return 0
i = 3
while i <= math.sqrt(x):
if x % i == 0:
return 0
i += 2
return 1
n = int(input())
lst = list(map(int, input().split()))
print(sum([isprime(i) for i in lst]))
| 3.859375
| 4
|
ibsng/handler/user/change_status.py
|
ParspooyeshFanavar/pyibsng
| 6
|
12780354
|
"""Change user status API method."""
from ibsng.handler.handler import Handler
class changeStatus(Handler):
"""Change user status method class."""
def control(self):
"""Validate inputs after setup method.
:return: None
:rtype: None
"""
self.is_valid(self.user_id, int)
self.is_valid(self.status, str)
def setup(self, user_id, status):
"""Setup required parameters.
:param str user_id: ibsng user id
:param str status: new user status (Package, Recharged,
Temporary Extended)
:return: None
:rtype: None
"""
self.user_id = user_id
self.status = status
| 2.8125
| 3
|
record.py
|
kpgx/sensor_Tag
| 0
|
12780355
|
<filename>record.py
import time
import os
from threading import Thread
from bluepy.btle import BTLEException
from bluepy.sensortag import SensorTag
IR_TEMP = "ir_temp"
ACCELEROMETER = "accelerometer"
HUMIDITY = "humidity"
MAGNETOMETER = "magnetometer"
BAROMETER = "barometer"
GYROSCOPE = "gyroscope"
BATTERY = "battery"
LIGHT = "light"
DEFINED_SENSORS = [IR_TEMP, ACCELEROMETER, HUMIDITY, MAGNETOMETER, BAROMETER, GYROSCOPE, BATTERY, LIGHT]
INTERESTED_SENSORS = [LIGHT, BATTERY]
OUT_FILE = "lux.csv"
TIME_BETWEEN_READS = 5
TIME_BETWEEN_WRITES = 1
TIME_BETWEEN_RETRY = 5
SENSOR_TAG_LIST = [
{
"ble_mac": "54:6C:0E:53:45:B7",
"label": "a"
},
{
"ble_mac": "54:6C:0E:53:3B:0A",
"label": "b"
},
{
"ble_mac": "54:6C:0E:53:46:44",
"label": "c"
},
{
"ble_mac": "54:6C:0E:53:3F:77",
"label": "d"
},
{
"ble_mac": "54:6C:0E:78:BE:82",
"label": "e"
},
{
"ble_mac": "F0:F8:F2:86:31:86",
"label": "f"
},
]
LUX_READINGS = []
def enable_sensors(tag, sensor_list):
if IR_TEMP in sensor_list:
tag.IRtemperature.enable()
if ACCELEROMETER in sensor_list:
tag.accelerometer.enable()
if HUMIDITY in sensor_list:
tag.humidity.enable()
if MAGNETOMETER in sensor_list:
tag.magnetometer.enable()
if BAROMETER in sensor_list:
tag.barometer.enable()
if GYROSCOPE in sensor_list:
tag.gyroscope.enable()
if LIGHT in sensor_list:
tag.lightmeter.enable()
if BATTERY in sensor_list:
tag.battery.enable()
# Some sensors (e.g., temperature, accelerometer) need some time for initialization.
# Not waiting here after enabling a sensor, the first read value might be empty or incorrect.
time.sleep(1.0)
def disable_sensors(tag, sensor_list):
"""Disable sensors to improve battery life."""
if IR_TEMP in sensor_list:
tag.IRtemperature.disable()
if ACCELEROMETER in sensor_list:
tag.accelerometer.disable()
if HUMIDITY in sensor_list:
tag.humidity.disable()
if MAGNETOMETER in sensor_list:
tag.magnetometer.disable()
if BAROMETER in sensor_list:
tag.barometer.disable()
if GYROSCOPE in sensor_list:
tag.gyroscope.disable()
if LIGHT in sensor_list:
tag.lightmeter.disable()
if BATTERY in sensor_list:
tag.battery.disable()
def get_readings(tag, sensor_list):
"""Get sensor readings and collate them in a dictionary."""
try:
enable_sensors(tag, sensor_list)
readings = {}
timestamp = int(time.time())
if IR_TEMP in sensor_list:
readings["ir_temp"], readings["ir"] = tag.IRtemperature.read()
if ACCELEROMETER in sensor_list:
readings["x_accel"], readings["y_accel"], readings["z_accel"] = tag.accelerometer.read()
if HUMIDITY in sensor_list:
readings["humidity_temp"], readings["humidity"] = tag.humidity.read()
if MAGNETOMETER in sensor_list:
readings["x_magnet"], readings["y_magnet"], readings["z_magnet"] = tag.magnetometer.read()
if BAROMETER in sensor_list:
readings["baro_temp"], readings["pressure"] = tag.barometer.read()
if GYROSCOPE in sensor_list:
readings["x_gyro"], readings["y_gyro"], readings["z_gyro"] = tag.gyroscope.read()
if LIGHT in sensor_list:
readings["light"] = tag.lightmeter.read()
if BATTERY in sensor_list:
readings["battery"] = tag.battery.read()
disable_sensors(tag, sensor_list)
# round to 2 decimal places for all readings
readings = {key: round(value, 2) for key, value in readings.items()}
readings["timestamp"] = timestamp
return readings
except BTLEException as e:
print("Unable to take sensor readings.")
print(e)
return {}
def get_new_tag_reference(ble_mac, label):
print(ble_mac, label, "re-connecting...")
tag = None
while not tag:
try:
tag = SensorTag(ble_mac)
except Exception as e:
print(ble_mac, label, str(e))
print("will retry in %d seconds"%TIME_BETWEEN_RETRY)
time.sleep(TIME_BETWEEN_RETRY)
print(ble_mac, label, "re-connected")
return tag
def collect_lux_readings(label, ble_mac):
print(ble_mac, label, "starting collection thread")
print(ble_mac, label, "connecting...")
tag = None
while not tag:
try:
tag = SensorTag(ble_mac)
except Exception as e:
print(ble_mac, label, str(e))
print("will retry in %d seconds" % TIME_BETWEEN_RETRY)
time.sleep(TIME_BETWEEN_RETRY)
print(ble_mac, label, "connected")
while 1:
timestamp = TIME_BETWEEN_READS+1
while timestamp % TIME_BETWEEN_READS != 0: # for the sync purposes between other recordings
time.sleep(0.5)
timestamp = int(time.time())
timestamp = timestamp + 1 # to compensate the saturation time after turning the sensors on
readings = get_readings(tag, INTERESTED_SENSORS)
if not readings:
tag = get_new_tag_reference(ble_mac, label)
continue
readings["label"] = label
LUX_READINGS.append(readings)
def process_readings():
print("starting processing thread")
while 1:
current_records_number = len(LUX_READINGS)
if current_records_number > 0:
if not os.path.isfile(OUT_FILE):
create_csv_file_with_header(OUT_FILE, sorted(LUX_READINGS[0]))
i = 0
with open(OUT_FILE, 'a') as f:
while i < current_records_number:
values = []
readings = LUX_READINGS.pop()
with open(OUT_FILE, "a") as f:
for k in sorted(readings):
values.append(readings[k])
f.write(",".join([str(x) for x in values]) + "\n")
i += 1
time.sleep(TIME_BETWEEN_WRITES)
def create_csv_file_with_header(file_name, header):
header_line = ','.join(header)
print("creating file with header,", header)
with open(file_name, 'w') as f:
f.write(header_line + '\n')
def main():
start_time = int(time.time())
print('init time', start_time)
for sensor_tag in SENSOR_TAG_LIST:
Thread(target=collect_lux_readings, args=(sensor_tag["label"], sensor_tag["ble_mac"])).start()
time.sleep(1)
process_readings()
if __name__ == "__main__":
main()
| 2.5625
| 3
|
vk_bot/app.py
|
alexeyqu/2bots
| 0
|
12780356
|
<filename>vk_bot/app.py
#! /usr/bin/python3
from flask import Flask, request, json
from settings import token, confirmation_token
import vk
from time import sleep
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello from Flask!'
@app.route('/', methods=['POST'])
def processing():
#Распаковываем json из пришедшего POST-запроса
data = json.loads(request.data)
#Вконтакте в своих запросах всегда отправляет поле типа
if 'type' not in data.keys():
return 'not vk'
if data['type'] == 'confirmation':
return confirmation_token
elif data['type'] == 'message_new':
session = vk.Session()
api = vk.API(session, v=5.80)
chat_id = data['object']['peer_id'] - 2000000000
message_text = data['object']['text']
api.messages.send(access_token=token, chat_id=str(chat_id), message=message_text)
# Сообщение о том, что обработка прошла успешно
return 'ok'
session = vk.Session()
api = vk.API(session, v=5.80)
messages = api.messages.get(access_token=token, count=1)
last = messages['items'][0]['id']
while True:
try:
messages = api.messages.get(access_token=token, last_message_id=last)
except Exception as e:
print(e)
sleep(4)
continue
if not messages['items']: # Если нет новых сообщений
sleep(4)
continue
last = messages['items'][0]['id']
for message in messages['items']:
chat_id = message['peer_id'] - 2000000000
message_text = message['text']
api.messages.send(access_token=token, chat_id=str(chat_id), message=message_text)
| 2.328125
| 2
|
python/tests/test_product_service.py
|
gregorriegler/ValidateAndAddProduct-Refactoring-Kata
| 12
|
12780357
|
from approvaltests import verify
from database import DatabaseAccess
from product_service import validate_and_add
from response import ProductFormData
class FakeDatabase(DatabaseAccess):
def __init__(self):
self.product = None
def store_product(self, product):
self.product = product
return 1
def test_validate_and_add():
# Arrange
product_data = ProductFormData("Sample product", "Lipstick", 5, 10, False)
db = FakeDatabase()
# Act
response = validate_and_add(product_data, db)
# Assert
response_and_product = f"{response} {db.product}"
verify(response_and_product)
| 2.609375
| 3
|
src/dirbs/cli/listgen.py
|
a-wakeel/DIRBS-Core
| 19
|
12780358
|
<reponame>a-wakeel/DIRBS-Core<filename>src/dirbs/cli/listgen.py
"""
DIRBS CLI for list generation (Blacklist, Exception, Notification). Installed as a dirbs-listgen console script.
Copyright (c) 2018-2021 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
- Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
- The origin of this software must not be misrepresented; you must not claim that you wrote the original software.
If you use this software in a product, an acknowledgment is required by displaying the trademark/logo as per the
details provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
- Altered source versions must be plainly marked as such, and must not be misrepresented as being the original
software.
- This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import click
import dirbs.cli.common as common
from dirbs.listgen import ListsGenerator
@click.command()
@common.setup_initial_logging
@click.version_option()
@common.parse_verbosity_option
@common.parse_db_options
@common.parse_statsd_options
@common.parse_multiprocessing_options
@click.option('--curr-date',
help='DANGEROUS: Sets current date in YYYYMMDD format for testing. By default, uses '
'system current date.',
callback=common.validate_date)
@click.option('--no-full-lists',
is_flag=True,
help='If set, disable outputting full lists as CSV for a performance improvement.')
@click.option('--no-cleanup',
is_flag=True,
help='If set, intermediate tables used to calculate lists will not be deleted so that they can be '
'inspected.')
@click.option('--base', type=int, default=-1, help='If set, will use this run ID as the base for the delta CSV lists.')
@click.option('--disable-sanity-checks', is_flag=True,
help='If set sanity checks on list generation will be disabled (might cause large delta generation)')
@click.option('--conditions',
help='By default, dirbs-listgen generates lists for all blocking conditions. Specify a comma-separated '
'list of blocking condition names if you wish to generate lists only for those conditions. The '
'condition name corresponds to the label parameter of the condition in the DIRBS configuration.',
callback=common.validate_blocking_conditions,
default=None)
@click.argument('output_dir',
type=click.Path(exists=True, file_okay=False, writable=True))
@click.pass_context
@common.unhandled_exception_handler
@common.configure_logging
@common.cli_wrapper(command='dirbs-listgen', required_role='dirbs_core_listgen')
def cli(ctx, config, statsd, logger, run_id, conn, metadata_conn, command, metrics_root, metrics_run_root,
curr_date, no_full_lists, no_cleanup, base, disable_sanity_checks, output_dir, conditions):
"""DIRBS script to output CSV lists (blacklist, exception, notification) for the current classification state."""
if curr_date is not None:
logger.warning('*************************************************************************')
logger.warning('WARNING: --curr-date option passed to dirbs-listgen')
logger.warning('*************************************************************************')
logger.warning('')
logger.warning('This should not be done in a production DIRBS deployment for the following reasons:')
logger.warning('')
logger.warning('1. Current date determines which of the blacklist or the notifications list a classified')
logger.warning(' IMEI ends up on. If --curr-date is set to a date in the future, it is possible that ')
logger.warning(' classified IMEIs might erroneously end up on the blacklist before their grace period has')
logger.warning(' expired. If set to the past, blacklisted IMEIs will potentially be considered to be')
logger.warning(' in their grace period again and be re-notified.')
logger.warning('2. Because changing the current date can affect whether IMEIs are on the blacklist vs.')
logger.warning(' the notifications lists, this can produce large, invalid delta files in the lists.')
logger.warning('')
list_generator = ListsGenerator(config=config, logger=logger, run_id=run_id, conn=conn,
metadata_conn=metadata_conn, curr_date=curr_date, no_full_lists=no_full_lists,
no_cleanup=no_cleanup, base_run_id=base, conditions=conditions,
disable_sanity_checks=disable_sanity_checks, output_dir=output_dir)
list_generator.generate_lists()
| 1.476563
| 1
|
project/Lagou/Analyzer.py
|
zhengbomo/python_practice
| 2
|
12780359
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
from LagouDb import LagouDb
class Analyzer(object):
def __init__(self):
self.db = LagouDb()
# 统计最受欢迎的工作
@staticmethod
def get_popular_jobs(since=None):
if since:
pass
else:
pass
# 统计职位在不同城市的薪资情况
def get_salary_in_city(self, key, count, mincount=10):
result = self.db.salary_in_city_by_key(key, count, mincount)
kv = {}
for i in result:
if i['count'] >= 5:
# 过滤数量小于5的城市
k = '{0} ({1})'.format(i['city'], i['count'])
kv[k] = i['salary']
return kv
# 统计工资最高的工作
def get_high_salary_jobs(self, city, count, mincount=10):
result = self.db.high_salary(city, count, mincount=mincount)
kv = {}
for i in result:
k = '{0} ({1})'.format(i['key'], i['count'])
kv[k] = i['salary']
return kv
# 关键字搜索结果比例
def key_persent(self, city, count):
if city:
result = self.db.key_persent_for_city(city, count)
else:
result = self.db.key_persent(count)
kv = {}
for i in result:
k = '{0} ({1})'.format(i['key'], i['count'])
kv[k] = i['count']
return kv
| 3.109375
| 3
|
contrib/frontends/django/nntpchan/nntpchan/frontend/templatetags/chanup.py
|
majestrate/nntpchan
| 233
|
12780360
|
<gh_stars>100-1000
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from nntpchan.frontend.models import Newsgroup, Post
import re
from urllib.parse import urlparse
from html import unescape
register = template.Library()
re_postcite = re.compile('>> ?([0-9a-fA-F]+)')
re_boardlink = re.compile('>>> ?/([a-zA-Z0-9\.]+[a-zA-Z0-9])/')
re_redtext = re.compile('== ?(.+) ?==')
re_psytext = re.compile('@@ ?(.+) ?@@')
def greentext(text, esc):
return_text = ''
f = False
for line in text.split('\n'):
line = line.strip()
if len(line) < 2:
continue
if line[0] == '>' and line[1] != '>':
return_text += '<span class="greentext">%s </span>' % esc ( line ) + '\n'
f = True
else:
return_text += esc(line) + '\n'
return return_text, f
def blocktext(text, esc, delim='', css='', tag='span'):
parts = text.split(delim)
f = False
if len(parts) > 1:
parts.reverse()
return_text = ''
while len(parts) > 0:
return_text += esc(parts.pop())
if len(parts) > 0:
f = True
return_text += '<{} class="{}">%s</{}>'.format(tag,css,tag) % esc(parts.pop())
return return_text, f
else:
return text, f
redtext = lambda t, e : blocktext(t, e, '==', 'redtext')
psytext = lambda t, e : blocktext(t, e, '@@', 'psy')
codeblock = lambda t, e : blocktext(t, e, '[code]', 'code', 'pre')
def postcite(text, esc):
return_text = ''
filtered = False
for line in text.split('\n'):
for word in line.split(' '):
match = re_postcite.match(unescape(word))
if match:
posthash = match.groups()[0]
posts = Post.objects.filter(posthash__startswith=posthash)
if len(posts) > 0:
filtered = True
return_text += '<a href="%s" class="postcite">>>%s</a> ' % ( posts[0].get_absolute_url(), posthash)
else:
return_text += '<span class="greentext">>>%s</span> ' % match.string
elif filtered:
return_text += word + ' '
else:
return_text += esc(word) + ' '
return_text += '\n'
return return_text, filtered
def boardlink(text, esc):
return_text = ''
filtered = False
for line in text.split('\n'):
for word in line.split(' '):
match = re_boardlink.match(unescape(word))
if match:
name = match.groups()[0]
group = Newsgroup.objects.filter(name=name)
if len(group) > 0:
filtered = True
return_text += '<a href="%s" class="boardlink">%s</a> ' % ( group[0].get_absolute_url(), esc(match.string ) )
else:
return_text += '<span class="greentext">%s</span> ' % esc (match.string)
else:
return_text += esc(word) + ' '
return_text += '\n'
return return_text, filtered
def urlify(text, esc):
return_text = ''
filtered = False
for line in text.split('\n'):
for word in line.split(' '):
u = urlparse(word)
if u.scheme != '' and u.netloc != '':
return_text += '<a href="%s">%s</a> ' % ( u.geturl(), esc(word) )
filtered = True
else:
return_text += esc(word) + ' '
return_text += '\n'
return return_text, filtered
line_funcs = [
greentext,
redtext,
urlify,
psytext,
codeblock,
postcite,
boardlink,
]
@register.filter(needs_autoescape=True, name='memepost')
def memepost(text, autoescape=True):
text, _ = line_funcs[0](text, conditional_escape)
for f in line_funcs[1:]:
text, _ = f(text, lambda x : x)
return mark_safe(text)
@register.filter(name='truncate')
@stringfilter
def truncate(text, truncate=500):
if len(text) > truncate:
return text[:truncate] + '...'
return text
| 2.21875
| 2
|
data/__init__.py
|
LeileiCao/SFD_Pytorch
| 1
|
12780361
|
<filename>data/__init__.py
from .wider_face import detection_collate, FACE_CLASSES, FACEDetection, FACEAnnotationTransform
from .data_augment import *
from .config import *
| 1.226563
| 1
|
lib/python2.7/site-packages/tdl/queue/abstractions/response/fatal_error_response.py
|
DPNT-Sourcecode/CHK-uimw01
| 0
|
12780362
|
from tdl.queue.actions.stop_action import StopAction
class FatalErrorResponse:
def __init__(self, message):
self._message = message
self.client_action = StopAction
def get_audit_text(self):
return 'error = "{0}"'.format(self._message)
| 2.328125
| 2
|
explore/tests/test_ContCont.py
|
idc9/explore
| 0
|
12780363
|
<gh_stars>0
import numpy as np
import pandas as pd
from itertools import product
from explore.ContCont import ContCont
def data_iter():
n = 20
a = np.random.normal(size=n)
b = np.random.normal(size=n)
yield a, b
a = pd.Series(a, index=np.arange(n).astype(str))
b = pd.Series(b, index=np.arange(n).astype(str))
yield a, b
a.name = 'a'
b.name = 'b'
yield a, b
a.iloc[0] = np.nan
yield a, b
def settings_iter():
settings = {'alpha': 0.05}
for measure in ['pearson', 'spearman']:
settings['measure'] = measure
yield settings
def test_settings():
"""
Makes sure different settings of ContCont run.
"""
for (a, b), settings in product(data_iter(),
settings_iter()):
test = ContCont(**settings)
test = test.fit(a, b)
test.plot()
assert True
test.plow_kws = {'standardize': True}
test.plot()
assert True
| 2.46875
| 2
|
data_as_code/__main__.py
|
Mikuana/data_as_code
| 2
|
12780364
|
<reponame>Mikuana/data_as_code
from data_as_code._commands import menu
if __name__ == '__main__':
menu()
| 1.164063
| 1
|
test/test_extract.py
|
ta-assistant/Admin-CLI
| 1
|
12780365
|
<filename>test/test_extract.py
import unittest
import os,sys,inspect
import zipfile
import json
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from lib.file_management.file_management_lib import FileEditor, DirManagement
from lib.file_management import extract
class TestExtract(unittest.TestCase):
def setUp(self) -> None:
self.path_ta = os.path.join(currentdir,"ta")
DirManagement.create_dir(self.path_ta)
FileEditor.create_file(self.path_ta,"draft.json")
path_draft = os.path.join(self.path_ta,"draft.json")
self.draft = {
"fileDraft": "{student_id}_{name}_{ex}.zip",
"outputDraft": [
"student_id",
"name",
"ex",
"score1",
"score2",
"comment"
]
}
with open(path_draft,"r+") as file:
json.dump(self.draft,file)
file.close()
create_file = FileEditor().create_file
create_file(currentdir,"test_1.txt")
create_file(currentdir,"test_2.txt")
self.path_target = os.path.join(currentdir,"631055555_hi_ex1.zip")
with zipfile.ZipFile(self.path_target,"w") as my_zip:
text_1 = os.path.join(currentdir,"test_1.txt")
text_2 = os.path.join(currentdir,"test_2.txt")
my_zip.write(text_1,compress_type=zipfile.ZIP_DEFLATED)
my_zip.write(text_2,compress_type=zipfile.ZIP_DEFLATED)
my_zip.close()
FileEditor.delete_file(currentdir,"test_1.txt")
FileEditor.delete_file(currentdir,"test_2.txt")
return super().setUp()
def test_extract(self):
extract.unzipfile(currentdir,self.draft["fileDraft"])
listfile = os.listdir(currentdir)
self.assertIn("631055555_hi_ex1.zip",listfile)
def tearDown(self) -> None:
DirManagement.remove_dir(self.path_ta)
FileEditor.delete_file(currentdir,"631055555_hi_ex1.zip")
path_folder = os.path.join(currentdir,"631055555_hi_ex1")
DirManagement.remove_dir(path_folder)
return super().tearDown()
if __name__ == '__main__':
unittest.main()
| 2.921875
| 3
|
random_search/synthetic_environment.py
|
shercklo/LMRS
| 38
|
12780366
|
<filename>random_search/synthetic_environment.py<gh_stars>10-100
import torch
import torch.nn as nn
import torch.nn.functional as F
class SyntheticFunction(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, noise_level):
super(SyntheticFunction, self).__init__()
self.fc1 = nn.Linear(input_dim, input_dim)
self.fc2 = nn.Linear(input_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, output_dim)
self.noise_level = noise_level
print('Created Function with {} {} {} {}'.format(input_dim, hidden_dim, output_dim, noise_level))
def forward(self, x, with_noise=True):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
x = torch.relu(x)
x = self.fc3(x)
if with_noise:
x = x + torch.randn(x.size()).cuda()*self.noise_level
return (x - 1.0)*(x - 1.0)
class SyntheticFunctionLearner(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(SyntheticFunctionLearner, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
h = self.fc2(x)
y = torch.relu(h)
y = self.fc3(y)
return y, h
| 2.515625
| 3
|
sdk/yapily/models/overdraft_overdraft_tier_band.py
|
bs-yapily/yapily-sdk-python
| 0
|
12780367
|
<filename>sdk/yapily/models/overdraft_overdraft_tier_band.py
# coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
OpenAPI spec version: 0.0.155
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from yapily.models.overdraft_overdraft_fees_charges import OverdraftOverdraftFeesCharges # noqa: F401,E501
class OverdraftOverdraftTierBand(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bank_guaranteed_indicator': 'bool',
'ear': 'str',
'identification': 'str',
'notes': 'list[str]',
'overdraft_fees_charges': 'list[OverdraftOverdraftFeesCharges]',
'overdraft_interest_charging_coverage': 'str',
'tier_value_max': 'str',
'tier_value_min': 'str'
}
attribute_map = {
'bank_guaranteed_indicator': 'BankGuaranteedIndicator',
'ear': 'EAR',
'identification': 'Identification',
'notes': 'Notes',
'overdraft_fees_charges': 'OverdraftFeesCharges',
'overdraft_interest_charging_coverage': 'OverdraftInterestChargingCoverage',
'tier_value_max': 'TierValueMax',
'tier_value_min': 'TierValueMin'
}
def __init__(self, bank_guaranteed_indicator=None, ear=None, identification=None, notes=None, overdraft_fees_charges=None, overdraft_interest_charging_coverage=None, tier_value_max=None, tier_value_min=None): # noqa: E501
"""OverdraftOverdraftTierBand - a model defined in Swagger""" # noqa: E501
self._bank_guaranteed_indicator = None
self._ear = None
self._identification = None
self._notes = None
self._overdraft_fees_charges = None
self._overdraft_interest_charging_coverage = None
self._tier_value_max = None
self._tier_value_min = None
self.discriminator = None
if bank_guaranteed_indicator is not None:
self.bank_guaranteed_indicator = bank_guaranteed_indicator
if ear is not None:
self.ear = ear
if identification is not None:
self.identification = identification
if notes is not None:
self.notes = notes
if overdraft_fees_charges is not None:
self.overdraft_fees_charges = overdraft_fees_charges
if overdraft_interest_charging_coverage is not None:
self.overdraft_interest_charging_coverage = overdraft_interest_charging_coverage
if tier_value_max is not None:
self.tier_value_max = tier_value_max
if tier_value_min is not None:
self.tier_value_min = tier_value_min
@property
def bank_guaranteed_indicator(self):
"""Gets the bank_guaranteed_indicator of this OverdraftOverdraftTierBand. # noqa: E501
:return: The bank_guaranteed_indicator of this OverdraftOverdraftTierBand. # noqa: E501
:rtype: bool
"""
return self._bank_guaranteed_indicator
@bank_guaranteed_indicator.setter
def bank_guaranteed_indicator(self, bank_guaranteed_indicator):
"""Sets the bank_guaranteed_indicator of this OverdraftOverdraftTierBand.
:param bank_guaranteed_indicator: The bank_guaranteed_indicator of this OverdraftOverdraftTierBand. # noqa: E501
:type: bool
"""
self._bank_guaranteed_indicator = bank_guaranteed_indicator
@property
def ear(self):
"""Gets the ear of this OverdraftOverdraftTierBand. # noqa: E501
:return: The ear of this OverdraftOverdraftTierBand. # noqa: E501
:rtype: str
"""
return self._ear
@ear.setter
def ear(self, ear):
"""Sets the ear of this OverdraftOverdraftTierBand.
:param ear: The ear of this OverdraftOverdraftTierBand. # noqa: E501
:type: str
"""
self._ear = ear
@property
def identification(self):
"""Gets the identification of this OverdraftOverdraftTierBand. # noqa: E501
:return: The identification of this OverdraftOverdraftTierBand. # noqa: E501
:rtype: str
"""
return self._identification
@identification.setter
def identification(self, identification):
"""Sets the identification of this OverdraftOverdraftTierBand.
:param identification: The identification of this OverdraftOverdraftTierBand. # noqa: E501
:type: str
"""
self._identification = identification
@property
def notes(self):
"""Gets the notes of this OverdraftOverdraftTierBand. # noqa: E501
:return: The notes of this OverdraftOverdraftTierBand. # noqa: E501
:rtype: list[str]
"""
return self._notes
@notes.setter
def notes(self, notes):
"""Sets the notes of this OverdraftOverdraftTierBand.
:param notes: The notes of this OverdraftOverdraftTierBand. # noqa: E501
:type: list[str]
"""
self._notes = notes
@property
def overdraft_fees_charges(self):
"""Gets the overdraft_fees_charges of this OverdraftOverdraftTierBand. # noqa: E501
:return: The overdraft_fees_charges of this OverdraftOverdraftTierBand. # noqa: E501
:rtype: list[OverdraftOverdraftFeesCharges]
"""
return self._overdraft_fees_charges
@overdraft_fees_charges.setter
def overdraft_fees_charges(self, overdraft_fees_charges):
"""Sets the overdraft_fees_charges of this OverdraftOverdraftTierBand.
:param overdraft_fees_charges: The overdraft_fees_charges of this OverdraftOverdraftTierBand. # noqa: E501
:type: list[OverdraftOverdraftFeesCharges]
"""
self._overdraft_fees_charges = overdraft_fees_charges
@property
def overdraft_interest_charging_coverage(self):
"""Gets the overdraft_interest_charging_coverage of this OverdraftOverdraftTierBand. # noqa: E501
:return: The overdraft_interest_charging_coverage of this OverdraftOverdraftTierBand. # noqa: E501
:rtype: str
"""
return self._overdraft_interest_charging_coverage
@overdraft_interest_charging_coverage.setter
def overdraft_interest_charging_coverage(self, overdraft_interest_charging_coverage):
"""Sets the overdraft_interest_charging_coverage of this OverdraftOverdraftTierBand.
:param overdraft_interest_charging_coverage: The overdraft_interest_charging_coverage of this OverdraftOverdraftTierBand. # noqa: E501
:type: str
"""
allowed_values = ["Tiered", "Whole"] # noqa: E501
if overdraft_interest_charging_coverage not in allowed_values:
raise ValueError(
"Invalid value for `overdraft_interest_charging_coverage` ({0}), must be one of {1}" # noqa: E501
.format(overdraft_interest_charging_coverage, allowed_values)
)
self._overdraft_interest_charging_coverage = overdraft_interest_charging_coverage
@property
def tier_value_max(self):
"""Gets the tier_value_max of this OverdraftOverdraftTierBand. # noqa: E501
:return: The tier_value_max of this OverdraftOverdraftTierBand. # noqa: E501
:rtype: str
"""
return self._tier_value_max
@tier_value_max.setter
def tier_value_max(self, tier_value_max):
"""Sets the tier_value_max of this OverdraftOverdraftTierBand.
:param tier_value_max: The tier_value_max of this OverdraftOverdraftTierBand. # noqa: E501
:type: str
"""
self._tier_value_max = tier_value_max
@property
def tier_value_min(self):
"""Gets the tier_value_min of this OverdraftOverdraftTierBand. # noqa: E501
:return: The tier_value_min of this OverdraftOverdraftTierBand. # noqa: E501
:rtype: str
"""
return self._tier_value_min
@tier_value_min.setter
def tier_value_min(self, tier_value_min):
"""Sets the tier_value_min of this OverdraftOverdraftTierBand.
:param tier_value_min: The tier_value_min of this OverdraftOverdraftTierBand. # noqa: E501
:type: str
"""
self._tier_value_min = tier_value_min
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OverdraftOverdraftTierBand):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1.859375
| 2
|
mqtt2kasa/const.py
|
johnjones4/mqtt2kasa
| 12
|
12780368
|
<reponame>johnjones4/mqtt2kasa
#!/usr/bin/env python
MQTT_DEFAULT_CLIENT_ID = "mqtt2kasa"
MQTT_DEFAULT_CLIENT_TOPIC_FORMAT = "/kasa/device/{}"
MQTT_DEFAULT_BROKER_IP = "192.168.10.238"
MQTT_DEFAULT_RECONNECT_INTERVAL = 13 # [seconds]
KASA_DEFAULT_POLL_INTERVAL = 10 # [seconds]
KEEP_ALIVE_DEFAULT_TASK_INTERVAL = 1.5 # [seconds]
| 1.28125
| 1
|
formulario/migrations/0005_auto_20200910_2123.py
|
giuliocc/censo-querido-diario
| 40
|
12780369
|
# Generated by Django 3.1.1 on 2020-09-10 21:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('formulario', '0004_auto_20200909_2313'),
]
operations = [
migrations.RemoveField(
model_name='mapeamento',
name='links_fontes',
),
migrations.AddField(
model_name='mapeamento',
name='fonte_1',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='mapeamento',
name='fonte_2',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='mapeamento',
name='fonte_3',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='mapeamento',
name='fonte_4',
field=models.URLField(blank=True, null=True),
),
migrations.DeleteModel(
name='Fonte',
),
]
| 1.414063
| 1
|
dragon/python/_api/io/__init__.py
|
seetaresearch/Dragon
| 81
|
12780370
|
<filename>dragon/python/_api/io/__init__.py
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
from dragon.core.io.kpl_record import KPLRecordDataset
from dragon.core.io.kpl_record import KPLRecordWriter
from dragon.core.io.reader import DataReader
from dragon.core.io.tf_record import TFRecordExample
from dragon.core.io.tf_record import TFRecordWriter
__all__ = [_s for _s in dir() if not _s.startswith('_')]
| 1.960938
| 2
|
python/38041857/dict_count.py
|
jeyoor/stackoverflow-notes
| 0
|
12780371
|
<filename>python/38041857/dict_count.py
from collections import defaultdict
#https://stackoverflow.com/questions/38041857/checking-if-keys-already-in-dictionary-with-try-except
class SimpleDictCounter:
"""Test counting elements using a dict
Here's a next lines of doc comment"""
def __init__(self):
self.number_found = {}
def _count_occurences(self, item):
try:
#this checks to see if the item's already in the dict
self.number_found[item] = self.number_found[item] + 1
x = self.number_found[item]
#here's a line that fixes the issue presented by op
return x
except KeyError:
x = 1
#this adds an item if not in the dict
self.number_found[item] = x
return x
class BetterDictCounter:
"""Test counting elements using a defaultdict
Here's a next lines of doc comment"""
def __init__(self):
self.number_found = defaultdict(int)
def _count_occurences(self, item):
try:
#this checks to see if the item's already in the dict
self.number_found[item] = self.number_found[item] + 1
x = self.number_found[item]
#here's a line that fixes the issue presented by op
return x
except KeyError:
x = 1
#this adds an item if not in the dict
self.number_found[item] = x
return x
if __name__ == "__main__":
#TODO: import a little command line printout demo here (use unit testing too?)
pass
| 4
| 4
|
pompy/demos.py
|
alexliberzonlab/pompy
| 12
|
12780372
|
# -*- coding: utf-8 -*-
"""Demonstrations of setting up models and visualising outputs."""
from __future__ import division
__authors__ = '<NAME>'
__license__ = 'MIT'
import sys
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.animation import FuncAnimation
import numpy as np
from pompy import models, processors
DEFAULT_SEED = 20181108
def set_up_figure(fig_size=(10, 5)):
"""Set up Matplotlib figure with simulation time title text.
Parameters
----------
title_text : string
Text to set figure title to.
fig_size : tuple
Figure dimensions in inches in order `(width, height)`.
"""
fig, ax = plt.subplots(1, 1, figsize=fig_size)
title = ax.set_title('Simulation time = ---- seconds')
return fig, ax, title
def update_decorator(dt, title, steps_per_frame, models):
"""Decorator for animation update methods."""
def inner_decorator(update_function):
def wrapped_update(i):
for j in range(steps_per_frame):
for model in models:
model.update(dt)
t = i * steps_per_frame * dt
title.set_text('Simulation time = {0:.3f} seconds'.format(t))
return [title] + update_function(i)
return wrapped_update
return inner_decorator
def wind_model_demo(dt=0.01, t_max=100, steps_per_frame=20, seed=DEFAULT_SEED):
"""Set up wind model and animate velocity field with quiver plot.
Parameters
----------
dt : float
Simulation timestep.
t_max : float
End time to simulate to.
steps_per_frame: integer
Number of simulation time steps to perform between animation frames.
seed : integer
Seed for random number generator.
Returns
-------
fig : Figure
Matplotlib figure object.
ax : AxesSubplot
Matplotlib axis object.
anim : FuncAnimation
Matplotlib animation object.
"""
rng = np.random.RandomState(seed)
# define simulation region
wind_region = models.Rectangle(x_min=0., x_max=100., y_min=-25., y_max=25.)
# set up wind model
wind_model = models.WindModel(wind_region, 21, 11, rng=rng)
# let simulation run for 10s to equilibrate wind model
for t in np.arange(0, 10, dt):
wind_model.update(dt)
# generate figure and attach close event
fig, ax, title = set_up_figure()
# create quiver plot of initial velocity field
vf_plot = ax.quiver(wind_model.x_points, wind_model.y_points,
wind_model.velocity_field.T[0],
wind_model.velocity_field.T[1], width=0.003)
# expand axis limits to make vectors at boundary of field visible
ax.axis(ax.axis() + np.array([-0.25, 0.25, -0.25, 0.25]))
ax.set_xlabel('x-coordinate / m')
ax.set_ylabel('y-coordinate / m')
ax.set_aspect(1)
fig.tight_layout()
# define update function
@update_decorator(dt, title, steps_per_frame, [wind_model])
def update(i):
vf_plot.set_UVC(
wind_model.velocity_field.T[0], wind_model.velocity_field.T[1])
return [vf_plot]
# create animation object
n_frame = int(t_max / (dt * steps_per_frame) + 0.5)
anim = FuncAnimation(fig, update, n_frame, blit=True)
return fig, ax, anim
def plume_model_demo(dt=0.01, t_max=100, steps_per_frame=200,
seed=DEFAULT_SEED):
"""Set up plume model and animate puffs overlayed over velocity field.
Puff positions displayed using Matplotlib `scatter` plot function and
velocity field displayed using `quiver` plot function.
plot and quiver functions.
Parameters
----------
dt : float
Simulation timestep.
t_max : float
End time to simulate to.
steps_per_frame: integer
Number of simulation time steps to perform between animation frames.
seed : integer
Seed for random number generator.
Returns
-------
fig : Figure
Matplotlib figure object.
ax : AxesSubplot
Matplotlib axis object.
anim : FuncAnimation
Matplotlib animation object.
"""
rng = np.random.RandomState(seed)
# define simulation region
sim_region = models.Rectangle(x_min=0., x_max=100, y_min=-25., y_max=25.)
# set up wind model
wind_model = models.WindModel(sim_region, 21, 11, rng=rng)
# let simulation run for 10s to equilibrate wind model
for t in np.arange(0, 10, dt):
wind_model.update(dt)
# set up plume model
plume_model = models.PlumeModel(
sim_region, (5., 0., 0.), wind_model, rng=rng)
# set up figure window
fig, ax, title = set_up_figure()
# create quiver plot of initial velocity field
# quiver expects first array dimension (rows) to correspond to y-axis
# therefore need to transpose
vf_plot = plt.quiver(
wind_model.x_points, wind_model.y_points,
wind_model.velocity_field.T[0], wind_model.velocity_field.T[1],
width=0.003)
# expand axis limits to make vectors at boundary of field visible
ax.axis(ax.axis() + np.array([-0.25, 0.25, -0.25, 0.25]))
# draw initial puff positions with scatter plot
radius_mult = 200
pp_plot = plt.scatter(
plume_model.puff_array[:, 0], plume_model.puff_array[:, 1],
radius_mult * plume_model.puff_array[:, 3]**0.5, c='r',
edgecolors='none')
ax.set_xlabel('x-coordinate / m')
ax.set_ylabel('y-coordinate / m')
ax.set_aspect(1)
fig.tight_layout()
# define update function
@update_decorator(dt, title, steps_per_frame, [wind_model, plume_model])
def update(i):
# update velocity field quiver plot data
vf_plot.set_UVC(wind_model.velocity_field[:, :, 0].T,
wind_model.velocity_field[:, :, 1].T)
# update puff position scatter plot positions and sizes
pp_plot.set_offsets(plume_model.puff_array[:, :2])
pp_plot._sizes = radius_mult * plume_model.puff_array[:, 3]**0.5
return [vf_plot, pp_plot]
# create animation object
n_frame = int(t_max / (dt * steps_per_frame) + 0.5)
anim = FuncAnimation(fig, update, frames=n_frame, blit=True)
return fig, ax, anim
def conc_point_val_demo(dt=0.01, t_max=5, steps_per_frame=1, x=10., y=0.0,
seed=DEFAULT_SEED):
"""Set up plume model and animate concentration at a point as time series.
Demonstration of setting up plume model and processing the outputted
puff arrays with the ConcentrationPointValueCalculator class, the
resulting concentration time course at a point in the odour plume being
displayed with the Matplotlib `plot` function.
Parameters
----------
dt : float
Simulation timestep.
t_max : float
End time to simulate to.
steps_per_frame: integer
Number of simulation time steps to perform between animation frames.
x : float
x-coordinate of point to measure concentration at.
y : float
y-coordinate of point to measure concentration at.
seed : integer
Seed for random number generator.
Returns
-------
fig : Figure
Matplotlib figure object.
ax : AxesSubplot
Matplotlib axis object.
anim : FuncAnimation
Matplotlib animation object.
"""
rng = np.random.RandomState(seed)
# define simulation region
sim_region = models.Rectangle(x_min=0., x_max=100, y_min=-25., y_max=25.)
# set up wind model
wind_model = models.WindModel(sim_region, 21, 11, rng=rng)
# set up plume model
plume_model = models.PlumeModel(
sim_region, (5., 0., 0.), wind_model, rng=rng)
# let simulation run for 10s to initialise models
for t in np.arange(0, 10, dt):
wind_model.update(dt)
plume_model.update(dt)
# set up concentration point value calculator
val_calc = processors.ConcentrationValueCalculator(1.)
conc_vals = []
conc_vals.append(val_calc.calc_conc_point(plume_model.puff_array, x, y))
ts = [0.]
# set up figure
fig, ax, title = set_up_figure()
# display initial concentration field as image
conc_line, = plt.plot(ts, conc_vals)
ax.set_xlim(0., t_max)
ax.set_ylim(0., 150.)
ax.set_xlabel('Time / s')
ax.set_ylabel('Normalised concentration')
ax.grid(True)
fig.tight_layout()
# define update function
@update_decorator(dt, title, steps_per_frame, [wind_model, plume_model])
def update(i):
ts.append(dt * i * steps_per_frame)
conc_vals.append(
val_calc.calc_conc_point(plume_model.puff_array, x, y))
conc_line.set_data(ts, conc_vals)
return [conc_line]
# create animation object
n_frame = int(t_max / (dt * steps_per_frame) + 0.5)
anim = FuncAnimation(fig, update, frames=n_frame, blit=True)
return fig, ax, anim
def concentration_array_demo(dt=0.01, t_max=100, steps_per_frame=50,
seed=DEFAULT_SEED):
"""Set up plume model and animate concentration fields.
Demonstration of setting up plume model and processing the outputted
puff arrays with the `ConcentrationArrayGenerator` class, the resulting
arrays being displayed with the Matplotlib `imshow` function.
Parameters
----------
dt : float
Simulation timestep.
t_max : float
End time to simulate to.
steps_per_frame: integer
Number of simulation time steps to perform between animation frames.
seed : integer
Seed for random number generator.
Returns
-------
fig : Figure
Matplotlib figure object.
ax : AxesSubplot
Matplotlib axis object.
anim : FuncAnimation
Matplotlib animation object.
"""
rng = np.random.RandomState(seed)
# define simulation region
sim_region = models.Rectangle(x_min=0., x_max=100, y_min=-25., y_max=25.)
# set up wind model
wind_model = models.WindModel(sim_region, 21, 11, rng=rng)
# set up plume model
plume_model = models.PlumeModel(
sim_region, (5., 0., 0.), wind_model, rng=rng)
# let simulation run for 10s to initialise models
for t in np.arange(0, 10, dt):
wind_model.update(dt)
plume_model.update(dt)
# set up concentration array generator
array_gen = processors.ConcentrationArrayGenerator(
sim_region, 0.01, 500, 250, 1.)
# set up figure
fig, ax, title = set_up_figure()
# display initial concentration field as image
conc_array = array_gen.generate_single_array(plume_model.puff_array)
conc_im = plt.imshow(conc_array.T, extent=sim_region, cmap='Reds',
vmin=0., vmax=1.)
ax.set_xlabel('x-coordinate / m')
ax.set_ylabel('y-coordinate / m')
ax.set_aspect(1)
fig.tight_layout()
# define update function
@update_decorator(dt, title, steps_per_frame, [wind_model, plume_model])
def update(i):
conc_im.set_data(
array_gen.generate_single_array(plume_model.puff_array).T)
return [conc_im]
# create animation object
n_frame = int(t_max / (dt * steps_per_frame) + 0.5)
anim = FuncAnimation(fig, update, frames=n_frame, blit=True)
return fig, ax, anim
| 2.796875
| 3
|
genomvar/test/test_variant.py
|
mikpom/genomvar
| 0
|
12780373
|
<reponame>mikpom/genomvar
import copy
from pkg_resources import resource_filename as pkg_file
from genomvar import Reference
from genomvar.varset import VariantBase
from genomvar import variant
from genomvar.vcf import VCFReader, VCFWriter
from genomvar.vcf_utils import VCF_FIELDS, VCFRow
from genomvar.variant import GenomVariant
from genomvar.test import MyTestCase
# Factory normalizing indels
class TestVariantsCase(MyTestCase):
writer = VCFWriter()
def test_indel_equality(self):
# 1
# R TCACAG
# del1 T--CAG
# del2 TCA--G
adel1 = variant.AmbigDel('chrom',(1,1),(5,3),'CA','')
adel2 = variant.AmbigDel('chrom',(1,2),(5,4),'AC','')
self.assertTrue(adel1.ambig_equal(adel2))
self.assertFalse(adel1.edit_equal(adel2))
del1 = variant.Del('chrom',1,3)
self.assertTrue(del1.edit_equal(adel1))
self.assertTrue(del1.ambig_equal(adel1))
self.assertTrue(adel1.edit_equal(del1))
self.assertTrue(adel1.ambig_equal(del1))
def test_instantiation_from_edit(self):
# **Simple insertion**
# 15
# TTCACTTAGCATAATG|TCTT
# C
vb = self.nvf.from_edit('chr24',15,'G','GC')
self.assertEqual([vb.start,vb.end],[16,17])
# Now deletion
vb = self.nvf.from_edit(chrom='chr24',start=15,ref='GT',alt='G')
self.assertEqual([vb.start,vb.end],[16,17])
vb = self.nvf.from_edit('chr24',15,'GT','G')
self.assertEqual([vb.start,vb.end],[16,17])
# 575
# ATTTAATA
# T-AT v1
vb = self.nvf.from_edit('chr24',575,'TA','T')
self.assertTrue(isinstance(vb,variant.AmbigIndel))
# 65
# TAAGG CTG AATACTAT
# CTC
vb = self.svf.from_edit('chr24',start=65,ref='CTG',alt='CTC')
self.assertEqual([vb.start,vb.end,vb.ref,vb.alt,type(vb).__name__],
[67,68,'G','C','SNP'])
# 65
# TAAGG CTG AATACTAT
# CCGTCGTG
vb = self.svf.from_edit('chr24',start=65,ref='CTG',alt='CCGTCGTG')
self.assertEqual([vb.start,vb.end,bool(vb.ref),vb.alt,type(vb).__name__],
[66,67,False,'CGTCG','Ins'])
# 2343
# CTG TTTCCA ACATACATCATGAGACTTCTG
# TTCCATTCCA
vb = self.nvf.from_edit('chr24',2343,'TTTCCA','TTCCATTCCA')
self.assertEqual([vb.start,vb.end,vb.alt,type(vb).__name__],
[2344,2346,'CCAT','AmbigIns'])
# 3300
# TATC TTTTTGAC TGG
# --------
vb = self.nvf.from_edit('chr24',3300,'CTTTTTGAC','C')
self.assertEqual([vb.start,vb.end,vb.alt,type(vb).__name__],
[3300,3310,'','AmbigDel'])
# 0
# TTCACTTAGCA
vb = self.nvf.from_edit('chr24',0,'T','TT')
self.assertEqual([vb.start,vb.end,vb.alt,vb.vtp],
[0,3,'T',variant.AmbigIns])
def test_instantiation_from_hgvs(self):
# test SNP
vb = self.svf.from_hgvs('chr1:g.15C>A')
self.assertEqual([vb.chrom,vb.start,vb.end,vb.ref,vb.alt],
['chr1',14,15,'C','A'])
vb = self.svf.from_hgvs('NG_012232.1:g.19_21del')
self.assertEqual([vb.chrom,vb.start,vb.end,vb.ref,vb.alt],
['NG_012232.1',18,21,None,None])
vb = self.svf.from_hgvs('NG_012232.1:g.19del')
self.assertTrue(vb.is_variant_instance(variant.Del))
self.assertEqual([vb.chrom,vb.start,vb.end,vb.ref,vb.alt],
['NG_012232.1',18,19,None,None])
vb = self.svf.from_hgvs('NC_000023.10:g.10_11insCCT')
self.assertEqual([vb.chrom,vb.start,vb.end,vb.ref,vb.alt],
['NC_000023.10',10,11,None,'CCT'])
vb = self.svf.from_hgvs('NC_000023.11:g.10delinsGA')
self.assertTrue(vb.is_variant_instance(variant.Mixed))
self.assertEqual([vb.chrom,vb.start,vb.end,vb.ref,vb.alt],
['NC_000023.11',9,10,None,'GA'])
vb = self.svf.from_hgvs('LRG_199:g.145_147delinsTGG')
self.assertTrue(vb.is_variant_instance(variant.MNP))
self.assertEqual([vb.chrom,vb.start,vb.end,vb.ref,vb.alt],
['LRG_199',144,147,None,'TGG'])
# Test Ambig
# 3300
# TATC TTTTTGAC TGG
# --------
ve = self.nvf.from_edit('chr24',3299,'TCTTTTTGA','T')
vb = self.nvf.from_hgvs('chr24:g.3302_3309del')
self.assertEqual([vb.start,vb.end,vb.alt,vb.vtp],
[3300,3310,'',variant.AmbigDel])
self.assertFalse(vb.edit_equal(ve))
self.assertTrue(vb.ambig_equal(ve))
def test_haplotype_edit_equality(self):
factory = variant.VariantFactory()
v1 = factory.from_edit('chr24',2093,'TGG','CCC')
v2 = factory.from_edit('chr24',2098,'TT','GG')
v3 = factory.from_edit('chr24',2098,'TT','CC')
h1 = variant.Haplotype.from_variants([v1,v2])
h1_ = variant.Haplotype.from_variants([v1,v2])
h2 = variant.Haplotype.from_variants([v1,v3])
self.assertTrue(h1.edit_equal(h1_))
self.assertFalse(h1.edit_equal(h2))
def test_get_vcf_row_instantiated_variant(self):
factory = variant.VariantFactory()
v1 = factory.from_edit('chr24',2093,'TGG','CCC')
row = self.writer.get_row(v1)
self.assertEqual(row.REF, 'TGG')
self.assertEqual(row.POS, 2094)
self.assertEqual(str(row), 'chr24\t2094\t.\tTGG\tCCC\t.\t.\t.')
gv = GenomVariant(v1, attrib={'id':'vrtid', 'filter':'LOWQUAL',
'qual':100})
row = self.writer.get_row(gv)
self.assertEqual(
str(row),'chr24\t2094\tvrtid\tTGG\tCCC\t100\tLOWQUAL\t.')
vrt = factory.from_edit('chr20', 1253922,'TGT','G')
row = self.writer.get_row(vrt)
self.assertEqual(str(row), 'chr20\t1253923\t.\tTGT\tG\t.\t.\t.')
# vf = VariantFactory(reference=ref,
# normindel=True)
vrt = factory.from_edit('chr1',13957,'TCCCCCA','TCCCCA')
with self.assertRaises(ValueError) as cm:
row = self.writer.get_row(vrt)
self.assertIn('Reference is required',cm.exception.args[0])
def test_change_of_attributes(self):
reader = VCFReader(
pkg_file('genomvar.test','data/example1.vcf'))
vrt = list(reader.iter_vrt())[0]
self.assertEqual(str(self.writer.get_row(vrt)),
'chr24\t23\t1\tAG\tA\t100\tPASS\t.')
vrt2 = copy.deepcopy(vrt)
vrt2.attrib['id'] = '.'
vrt2.attrib['qual'] = '.'
vrt2.attrib['filter'] = '.'
self.assertEqual(str(self.writer.get_row(vrt2)),
'chr24\t23\t.\tAG\tA\t.\t.\t.')
self.assertEqual(str(self.writer.get_row(vrt2, id='.', qual='.', filter='.')),
'chr24\t23\t.\tAG\tA\t.\t.\t.')
vrt3 = copy.deepcopy(vrt)
vrt3.attrib['id'] = None
vrt3.attrib['qual'] = None
vrt3.attrib['filter'] = None
self.assertEqual(str(self.writer.get_row(vrt3)),
'chr24\t23\t.\tAG\tA\t.\t.\t.')
reader.close()
reader.close()
def test_to_vcf_row_from_file(self):
def _split_multiallelic(rows):
for row in rows:
for alt in row.ALT.split(','):
kwds = {f:getattr(row,f) for f in VCF_FIELDS}
kwds['ALT'] = alt
kwds['INFO'] = '.'
kwds['FORMAT'] = None
kwds['SAMPLES'] = None
yield str(VCFRow(**kwds))
reader = VCFReader(pkg_file('genomvar.test','data/example1.vcf'))
variants = list(reader.iter_vrt(
parse_info=False,parse_samples=False))
rows = [str(self.writer.get_row(v)) for v in variants]
for r1, r2 in zip(
_split_multiallelic(reader.iter_rows()), rows):
if 'AG\tAGG' in r1: # stripping
continue
self.assertEqual(r1,r2)
reader.close()
def test_to_vcf_row_instantiated_variant_numeric_chrom(self):
factory = variant.VariantFactory()
v1 = factory.from_edit(1,2093,'TGG','CCC')
row = str(self.writer.get_row(v1))
row2 = str(v1)
self.assertIn('TGG', row)
self.assertIn('TGG', row2)
| 2.0625
| 2
|
tests/filters/test_types.py
|
SocialFinanceDigitalLabs/sfdata-stream-parser
| 1
|
12780374
|
<reponame>SocialFinanceDigitalLabs/sfdata-stream-parser<filename>tests/filters/test_types.py
import datetime
from unittest.mock import MagicMock
from sfdata_stream_parser import events
from sfdata_stream_parser.filters.types import integer_converter, float_converter, cell_value_converter, _from_excel, \
date_converter
def cell(value):
return events.Cell(value=value)
def test_decorator():
mock_func = MagicMock()
converter = cell_value_converter(mock_func)
converter(cell(1))
mock_func.assert_called_once_with(1)
def test_integers():
assert integer_converter(cell('1')).value == 1
assert integer_converter(cell('1.0')).value == 1
assert isinstance(integer_converter(cell('1.0')).value, int)
assert integer_converter(cell('0.9998')).value == 1
assert integer_converter(cell('2.51')).value == 3
assert integer_converter(cell('100,000.00')).value == 100000
assert integer_converter(cell('£5')).value == 5
assert integer_converter(cell('1')).source.value == '1'
error_cell = integer_converter(cell('a'))
assert error_cell.value is None
assert error_cell.error_type == ValueError
assert error_cell.error_message == "ValueError: could not convert value to integer: 'a'"
def test_floats():
assert float_converter(cell('1')).value == 1.0
assert float_converter(cell('1.0')).value == 1.0
assert isinstance(float_converter(cell('1.0')).value, float)
assert float_converter(cell('0.9998')).value == 0.9998
assert float_converter(cell('2.51')).value == 2.51
assert float_converter(cell('100,000.00')).value == 100000.0
assert float_converter(cell('£5')).value == 5.0
assert float_converter(cell('100,000.00')).source.value == '100,000.00'
error_cell = float_converter(cell('a'))
assert error_cell.value is None
assert error_cell.error_type == ValueError
assert error_cell.error_message == "ValueError: could not convert value to float: 'a'"
def test_from_excel():
assert _from_excel(42154) == datetime.date(2015, 5, 30)
assert _from_excel(42154.5) == datetime.datetime(2015, 5, 30, 12, 0, 0)
assert _from_excel(42154.437675) == datetime.datetime(2015, 5, 30, 10, 30, 15)
def test_date_converter():
assert date_converter(cell(datetime.date(2015, 7, 13))).value == datetime.date(2015, 7, 13)
assert date_converter(cell('14/07/2015')).value == datetime.date(2015, 7, 14)
assert date_converter(cell('2015 - 07 - 14')).value == datetime.date(2015, 7, 14)
assert date_converter(cell(42200)).value == datetime.date(2015, 7, 15)
assert date_converter(cell('14/07/15')).error_message == "ValueError: unable to determine format for: '14/07/15'"
| 2.5625
| 3
|
refill/pptx.py
|
trackuity/refill
| 0
|
12780375
|
from __future__ import annotations
import re
import string
from abc import ABC, abstractmethod
from dataclasses import dataclass
from fnmatch import fnmatchcase
from io import BytesIO
from typing import IO, Dict, List, Optional, Union
from pptx import Presentation
from pptx.chart.data import ChartData
from pptx.enum.shapes import PP_PLACEHOLDER_TYPE
from typing_extensions import TypedDict
from .core import Filler, Params, Template
from .spec import Selector, Spec
class PPTXTableSpecDict(TypedDict):
keys: Selector
stubs: Selector
columns: Dict[str, Selector]
class PPTXChartSpecDict(TypedDict):
keys: Selector
categories: Selector
series: Dict[str, Selector]
@dataclass
class PPTXSpec(Spec):
variables: Dict[str, Selector]
pictures: Dict[str, Selector]
tables: Dict[str, PPTXTableSpecDict]
charts: Dict[str, PPTXChartSpecDict]
class PPTXTableParamsDict(TypedDict):
keys: List[str]
stubs: Dict[str, str]
columns: Dict[str, Dict[str, str]]
class PPTXChartParamsDict(TypedDict):
keys: List[str]
categories: Dict[str, str]
series: Dict[str, Dict[str, Union[int, float]]]
@dataclass
class PPTXParams(Params[PPTXSpec]):
variables: Dict[str, str]
pictures: Dict[str, bytes]
tables: Dict[str, PPTXTableParamsDict]
charts: Dict[str, PPTXChartParamsDict]
class PPTXTemplate(Template[PPTXParams]):
def render_to_file(self, params: PPTXParams, file_object: IO[bytes]) -> None:
self._params = params
shape_substituters = self._create_shape_substituters()
prs = Presentation(self._path_or_file)
for slide in prs.slides:
for shape in slide.shapes:
for shape_substituter in shape_substituters:
shape_substituter.substitute_shape(shape)
prs.save(file_object)
def _create_shape_substituters(self) -> List[ShapeSubstituter]:
return (
[
TextShapeSubstituter(self._params.variables),
PicturePlaceholderSubstituter(self._params.pictures),
]
+ [
TableShapeSubstituter(table_name, table_params)
for (table_name, table_params) in self._params.tables.items()
]
+ [
ChartShapeSubstituter(chart_name, chart_params)
for (chart_name, chart_params) in self._params.charts.items()
]
)
class PPTXFiller(Filler[PPTXSpec, PPTXParams, PPTXTemplate]):
params_cls = PPTXParams
class ShapeSubstituter(ABC):
@abstractmethod
def substitute_shape(self, shape):
...
class TextShapeSubstituter(ShapeSubstituter):
def __init__(self, variables: Dict[str, str]) -> None:
super().__init__()
self._variables = {
name: (value if value is not None else "")
for name, value in variables.items()
}
def substitute_shape(self, shape):
if shape.has_text_frame:
self.substitute_text_frame(shape.text_frame)
def substitute_text_frame(self, text_frame):
for paragraph in text_frame.paragraphs:
if paragraph.runs:
# Since powerpoint often splits text into multiple runs for some reason,
# we combine the text from all runs, substitute that, and put the result
# in the first run. The remaining runs are made empty. This implies that
# the formatting from the first run will apply to everything in the end,
# but templates can always use separate text frames if needed.
first_run = paragraph.runs[0]
first_run.text = self.substitute_text(
"".join(run.text for run in paragraph.runs)
)
for run in paragraph.runs[1:]:
run.text = ""
def substitute_text(self, text: str) -> str:
template = string.Template(text)
return template.substitute(self._variables)
class PlaceholderSubstituter(ShapeSubstituter):
def substitute_shape(self, shape):
if shape.is_placeholder:
self.substitute_placeholder(shape)
@abstractmethod
def substitute_placeholder(self, placeholder):
...
class PicturePlaceholderSubstituter(PlaceholderSubstituter):
def __init__(self, pictures: Dict[str, bytes]) -> None:
super().__init__()
self._pictures = pictures
def substitute_placeholder(self, placeholder):
type_: PP_PLACEHOLDER_TYPE = placeholder.placeholder_format.type
if type_ == PP_PLACEHOLDER_TYPE.PICTURE: # type: ignore
self.substitute_picture_placeholder(placeholder)
def substitute_picture_placeholder(self, picture_placeholder):
image = self._pictures.get(picture_placeholder.name)
if image is not None:
image_file = BytesIO(image)
picture_placeholder.insert_picture(image_file)
class TableShapeSubstituter(ShapeSubstituter):
def __init__(
self, table_name_pattern: str, table_params: PPTXTableParamsDict
) -> None:
super().__init__()
self._table_name_pattern = table_name_pattern
self._keys = table_params["keys"]
self._stubs = table_params["stubs"]
self._columns = table_params["columns"]
def substitute_shape(self, shape):
if shape.has_table and fnmatchcase(shape.name, self._table_name_pattern):
self.substitute_table(shape.table)
def substitute_table(self, table):
column_index_values = []
for i, row in enumerate(table.rows):
if not column_index_values: # first row is header
for j, cell in enumerate(row.cells):
column_index_values.append(
self.derive_table_column_index_value(cell.text, j)
)
else:
row_index_value = None
for j, cell in enumerate(row.cells):
if j == 0:
row_index_value = self.derive_table_row_index_value(
cell.text, i
)
for run in cell.text_frame.paragraphs[0].runs:
new_text = self.substitute_table_cell(
row_index_value, column_index_values[j], run.text
)
if new_text is not None:
run.text = new_text
break # there should only be one run at most
else:
assert row_index_value is not None
for run in cell.text_frame.paragraphs[0].runs:
new_text = self.substitute_table_cell(
row_index_value, column_index_values[j], run.text
)
if new_text is not None:
run.text = new_text
break # there should only be one run at most
def derive_table_row_index_value(self, text: str, row_number: int) -> str:
if text.startswith("$"):
key_index = int(re.sub(r"\$[a-zA-Z_]*", "", text)) - 1
else:
key_index = row_number
return self._keys[key_index]
def derive_table_column_index_value(
self, text: str, column_number: int
) -> Optional[str]:
if column_number > 0:
return text.lower()
def substitute_table_cell(
self,
row_index_value: str,
column_index_value: Optional[str],
text: str,
) -> Optional[str]:
if column_index_value is None:
return self._stubs[row_index_value]
else:
return self._columns[column_index_value].get(row_index_value)
class ChartShapeSubstituter(ShapeSubstituter):
def __init__(
self, chart_name_pattern: str, chart_params: PPTXChartParamsDict
) -> None:
super().__init__()
self._chart_name_pattern = chart_name_pattern
self._keys = chart_params["keys"]
self._categories = chart_params["categories"]
self._series = chart_params["series"]
def substitute_shape(self, shape):
if shape.has_chart and fnmatchcase(shape.name, self._chart_name_pattern):
self.substitute_chart(shape.chart)
def substitute_chart(self, chart):
index_values = self.generate_chart_index_values(chart.plots[0].categories)
metric_names = [self.derive_chart_metric_name(s.name) for s in chart.series]
chart_data = ChartData()
chart_data.categories = [
self.get_chart_category(index_value) for index_value in index_values
]
for metric_name in metric_names:
values = [
self.get_chart_value(index_value, metric_name)
for index_value in index_values
]
chart_data.add_series(metric_name, values)
chart.replace_data(chart_data)
def generate_chart_index_values(self, current_values) -> List[str]:
return self._keys
def derive_chart_metric_name(self, text: str) -> str:
return text.lower()
def get_chart_category(self, index_value: str) -> str:
return self._categories[index_value]
def get_chart_value(
self, index_value: str, metric_name: str
) -> Optional[Union[int, float]]:
return self._series[metric_name].get(index_value)
| 2.25
| 2
|
setup.py
|
CrossNox/symspellpy
| 0
|
12780376
|
from setuptools import setup, find_packages
setup(
name='symspellpy',
packages=find_packages(exclude=['test']),
package_data={
'symspellpy': ['README.md', 'LICENSE']
},
version='0.9.0',
description='Keyboard layout aware version of SymSpell',
long_description=open('README.md').read().split("========")[-1],
author='crossnox',
url='https://github.com/crossnox/symspellpy',
keywords=['symspellpy'],
install_requires=[
'scipy >= 0.19'
],
python_requires='>=3.4',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
],
test_suite="test",
entry_points={
}
)
| 1.054688
| 1
|
tests/runtests.py
|
pombredanne/reviewboard
| 0
|
12780377
|
#!/usr/bin/env python3
import sys
import pytest
if __name__ == '__main__':
sys.exit(pytest.main(sys.argv[1:]))
| 1.515625
| 2
|
1071-soma-de-impares-consecutivos-i.py
|
ErickSimoes/URI-Online-Judge
| 0
|
12780378
|
<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# In[19]:
n = int(input())
m = int(input())
total = 0
if n > m:
n, m = m, n
for i in range(n+1, m):
if i%2 != 0:
total += i
print(total)
| 3.484375
| 3
|
pydsdl/_serializable/_serializable.py
|
bbworld1/pydsdl
| 0
|
12780379
|
# Copyright (c) 2018 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: <NAME> <<EMAIL>>
import abc
from .. import _expression
from .. import _error
from .._bit_length_set import BitLengthSet
class TypeParameterError(_error.InvalidDefinitionError):
pass
class SerializableType(_expression.Any):
"""
Instances are immutable.
Invoking :meth:`__str__` on a data type returns its uniform normalized definition, e.g.,
``uavcan.node.Heartbeat.1.0[<=36]``, ``truncated float16[<=36]``.
"""
TYPE_NAME = "metaserializable"
BITS_PER_BYTE = 8
"""
This is dictated by the UAVCAN Specification.
"""
def __init__(self) -> None:
super().__init__()
@property
@abc.abstractmethod
def bit_length_set(self) -> BitLengthSet:
"""
A set of all possible bit length values of the serialized representations of this type.
Refer to the specification for the background. The returned set is guaranteed to be non-empty.
See :class:`pydsdl.BitLengthSet`.
"""
raise NotImplementedError
@property
@abc.abstractmethod
def alignment_requirement(self) -> int:
"""
Serialized representations of this type are required/guaranteed to be aligned such that their offset
from the beginning of the containing serialized representation, in bits, is a multiple of this value, in bits.
Alignment of a type whose alignment requirement is X bits is facilitated by injecting ``[0, X)`` zero
padding bits before the serialized representation of the type.
For any element ``L`` of the bit length set of a type whose alignment requirement is ``A``, ``L % A = 0``.
I.e., the length of a serialized representation of the type is always a multiple of its alignment requirement.
This value is always a non-negative integer power of two. The alignment of one is a degenerate case denoting
no alignment.
"""
raise NotImplementedError
def _attribute(self, name: _expression.String) -> _expression.Any:
if name.native_value == "_bit_length_": # Experimental non-standard extension
try:
return _expression.Set(map(_expression.Rational, self.bit_length_set))
except TypeError:
pass
return super()._attribute(name) # Hand over up the inheritance chain, important
@abc.abstractmethod
def __str__(self) -> str: # pragma: no cover
# Implementations must return a DSDL spec-compatible textual representation of the type.
# The string representation is used for determining equivalency by the comparison operator __eq__().
raise NotImplementedError
def __hash__(self) -> int:
try:
bls = self.bit_length_set
except TypeError: # If the type is non-serializable.
bls = BitLengthSet()
return hash(str(self) + str(bls))
def __eq__(self, other: object) -> bool:
if isinstance(other, SerializableType):
same_type = isinstance(other, type(self)) and isinstance(self, type(other))
try: # Ensure equality of the bit length sets, otherwise, different types like voids may compare equal.
same_bls = self.bit_length_set == other.bit_length_set
except TypeError: # If the type is non-serializable, assume equality.
same_bls = same_type
return same_type and same_bls and str(self) == str(other)
return NotImplemented
| 2.75
| 3
|
generate_titles.py
|
michaelcapps/arxivTitleGenerator_RNN
| 0
|
12780380
|
<reponame>michaelcapps/arxivTitleGenerator_RNN<filename>generate_titles.py
# Credit to this structure goes to <NAME>
# https://github.com/martin-gorner/tensorflow-rnn-shakespeare
#
# I used his shakespeare generator as a starting point and tutorial
import tensorflow as tf
import numpy as np
import text_handler as tx
# these must match what was saved !
NUM_CHARS = tx.NUM_CHARS
num_layers = 3
state_size = 512
arxiv_checkpoint = "checkpoints/rnn_train_final.meta"
topn = 2
ncnt = 0
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph(arxiv_checkpoint)
new_saver.restore(sess, tf.train.latest_checkpoint('checkpoints/'))
graph = sess.graph
#new_saver.restore(sess, 'checkpoints/rnn_train_final')
x = tx.char_to_code("L")
x = np.array([[x]]) # shape [BATCHSIZE, SEQLEN] with BATCHSIZE=1 and SEQLEN=1
# initial values
y = x
h = np.zeros([1, state_size*num_layers], dtype=np.float32) # [ BATCHSIZE, INTERNALSIZE * NLAYERS]
for i in range(10000):
yo, h = sess.run(['Yo:0', 'new_states:0'], feed_dict={'X:0': y, 'pkeep:0': 1.0, 'in_state:0': h, 'batchsize:0': 1})
c = tx.sample_probs(yo, topn)
y = np.array([[c]]) # shape [BATCHSIZE, SEQLEN] with BATCHSIZE=1 and SEQLEN=1
c = tx.code_to_char(c)
print(c, end="")
if c == '\n':
ncnt = 0
else:
ncnt += 1
if ncnt == 100:
print("")
ncnt = 0
| 2.625
| 3
|
mistos-backend/src/app/tests/databasetest.py
|
Maddonix/mistos_2
| 1
|
12780381
|
<reponame>Maddonix/mistos_2
from app.database import Base
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
SQLALCHEMY_DATABASE_URL = "sqlite:///./test.db"
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
TestingSessionLocal = sessionmaker(
autocommit=False, autoflush=False, bind=engine)
Base.metadata.create_all(bind=engine)
| 1.921875
| 2
|
colaboradores/forms.py
|
lauraziebarth/dispositivos_mercos
| 0
|
12780382
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from django import forms
from colaboradores.enums import AREAS
class FormColaborador(forms.Form):
nome = forms.CharField(max_length=100, required=True)
email = forms.CharField(max_length=100, required=True)
area = forms.ChoiceField(choices=AREAS, required=True)
senha = forms.CharField(widget=forms.PasswordInput, required=True)
class FormLogin(forms.Form):
email = forms.CharField(max_length=100)
senha = forms.CharField(widget=forms.PasswordInput)
| 2.125
| 2
|
DMOJ/CCC/ART.py
|
eddiegz/Personal-C
| 3
|
12780383
|
<reponame>eddiegz/Personal-C
N=float(input())
number=0
list1=[]
while True:
co=float(input())
number+=1
list1.append(co)
if number==N:
break
print(list1)
| 3.515625
| 4
|
EngLearner/mainsys/models.py
|
jiangyifan123/EngLearner
| 0
|
12780384
|
from django.db import models
import time
# Create your models here.
class words(models.Model):
word = models.CharField(max_length = 40, default = '', verbose_name = "单词")
symthm = models.CharField(max_length = 40, default = '', verbose_name = "音标")
chinese = models.CharField(max_length = 100, default = '', verbose_name = "中文")
analyzation = models.CharField(max_length = 200, default = '', verbose_name = "联想法")
product = models.ManyToManyField('myadmin.product')
class writes(models.Model):
title = models.CharField(max_length=200, default = "无标题", verbose_name="标题")
time = models.IntegerField(default=0, verbose_name="限制时间")
problem = models.CharField(max_length=2000, default="", verbose_name="问题")
top = models.CharField(max_length=300, default="", verbose_name="规范")
product = models.ForeignKey('myadmin.product', null = True, on_delete=models.CASCADE)
class write_saving(models.Model):
content = models.CharField(max_length=1000, default = "", verbose_name = "内容")
writes = models.ForeignKey('writes', on_delete = models.CASCADE, null = True)
user = models.ForeignKey('users.UserProfile', on_delete = models.CASCADE, null = True)
class read_data(models.Model):
Date = models.DateField(auto_now_add = True, verbose_name = "创建时间")
mp3_url = models.CharField(max_length=300, default = "", verbose_name = "mp3内容", null = True)
eng = models.CharField(max_length=3000, default="", verbose_name = "英文内容")
chinese = models.CharField(max_length=5000, default="", verbose_name = "中文内容")
title = models.CharField(max_length=300, default="", verbose_name = "标题")
url = models.CharField(max_length=300, default = "", verbose_name = "地址")
class listen_data(models.Model):
Date = models.DateField(auto_now_add = True, verbose_name = "创建时间")
mp3_url = models.CharField(max_length=300, default = "", verbose_name = "mp3内容", null = True)
eng = models.CharField(max_length=5500, default="", verbose_name = "英文内容")
chinese = models.CharField(max_length=3000, default="", verbose_name = "中文内容")
title = models.CharField(max_length=300, default="", verbose_name = "标题")
url = models.CharField(max_length=300, default = "", verbose_name = "地址")
data_time = models.CharField(max_length=1000, default = "", verbose_name = "地址")
| 2.28125
| 2
|
heartful/apps/core/serializers.py
|
DeWittmm/Heartful
| 2
|
12780385
|
from rest_framework import serializers
from .models import *
#MARK: User
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('googleid', 'name', 'heartrate', 'spO2', 'age',)
class UserDataSetSerializer(serializers.ModelSerializer):
# user = UserSerializer(many=False, read_only=True)
class Meta:
model = UserDataSet
fields = ('id', 'user', 'type',)
class DataEntrySerializer(serializers.ModelSerializer):
class Meta:
model = DataEntry
fields = ('userdataset', 'value', 'unit', 'date_time',)
class DataTypeSerializer(serializers.ModelSerializer):
class Meta:
model = DataType
fields = ('type',)
#MARK: Goals
class GoalSerializer(serializers.ModelSerializer):
class Meta:
model = Goal
fields = ('id', 'user', 'title', 'detail', 'status', 'importance',)
| 2.328125
| 2
|
academia_ai/preprocessing.py
|
Knuppknou/academia_AI
| 4
|
12780386
|
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import os
from .leafs import leafs
print("Reloaded preprocessing!")
def normalize(dataset):
'''normalize data so that over all imeges the pixels on place (x/y) have mean = 0 and are standart distributed'''
# calculate the mean
mean=np.zeros(dataset[0].image.shape)
for lea in dataset:
mean=mean+lea.image
mean/=len(dataset)
#calculating the variance
var=np.zeros(dataset[0].image.shape)
for lea in dataset:
var=var+(lea.image-mean)**2
var/=len(dataset)
f=0.1
var=(var-f>=0)*(var-f)+f # caps the minimal
for lea in dataset:
lea.image=(lea.image-mean)/var
def createTrainingAndTestingList(directory, shuffle = True):
'''
Takes as Input the matrices from collectData and creates a training and a testing list'''
l_train = []
l_test = []
for n in range (7):
matrices = np.load(os.path.join(directory, str(n)+'.npy'))
for i in range(759): # 2x800 for training
l_train += [leafs.Leaf(i+n*1000, n, matrices[i]/255)]
for i in range(760,839): # 2x80 for testing
l_test += [leafs.Leaf(i+n*1000, n, matrices[i]/255)]
if shuffle:
np.random.shuffle(l_train)
np.random.shuffle(l_test)
return([l_train,l_test])
def collectData(root_path, save_path, cfactor, overwrite = False):
'''processes images from root_path one-by-one and save them in same directory
collect them tree by tree, set their labels and return a training and a testing list'''
sizeOfMatrixes = int(2000//cfactor)
#processing images to arrays one-by-one and save inplace
iid = 0
for (root, dirnames, filenames) in os.walk(root_path, topdown = True):
for f in filenames:
if f.endswith('.JPG'):
savepath = os.path.join(root, os.path.splitext(f)[0])
savepath += ('_' + str(sizeOfMatrixes) + 'x' + str(sizeOfMatrixes)) # for example + _50x50
if(not(os.path.isfile(savepath+'.npy')) or overwrite):
matriX = centr_cut_compress(os.path.join(root, f), cfactor)
np.save(savepath, matriX, allow_pickle=False)
iid += 1
# collecting all arrays from tree i into one big folder calld i.npy
for i in range (0,8):
tree_path = os.path.join(root_path, str(i))
tree_save_path = os.path.join(save_path, str(sizeOfMatrixes) + 'x' + str(sizeOfMatrixes) ,str(i))
leaf_list = []
for (root, dirnames, filenames) in os.walk(tree_path , topdown=True):
for f in filenames:
if f.endswith('_' + str(sizeOfMatrixes) + 'x' + str(sizeOfMatrixes) + '.npy'):
leaf_list.append(np.load(os.path.join(root, f)))
leaf_array = np.array(leaf_list)
np.save(tree_save_path, leaf_array, allow_pickle=False)
def desired_output(label):
res = -1 * np.ones((7,1,1))
res[label, 0, 0] = +1
return res
def centr_cut_compress(path, cfactor = 50, square_side = 2000, debug=False):
'''centers, cuts and compresses a picture
Input: path, compressionfactor = 50, squareside of new image= 2000, debug=False
Output: matrix that can be use as a CNN Input
'''
im = center_leaf(path, square_side)
new_shape = im.size[0] // cfactor
new_im = im.resize((new_shape, new_shape)) # makes the resolution smaller
matriz = np.array(new_im) # convert image to numpy matrix
matriz ^= 0xFF # invert matrix
oneD_matriz = matriz[:, :, 1] # only looking at one dimension, 1 = green
if debug:
print('Image “',path,'“ opened with size:',im.size,'and mode:',im.mode)
print('compressed the square-image with lenght :',
oneD_matriz.shape[0], ' with factor:', cfactor)
print('output matrix has shape:', oneD_matriz.shape)
plt.imshow(oneD_matriz)
plt.tight_layout()
plt.show()
return oneD_matriz
def center_leaf(path, square_side=2000):
'''
region we look at, because of the border we found with overlappingcenters a square on the leaf
input: path of image square_side of matriz thats cut away
output: cut image
ATTENTION: the cutting borders are fixed
'''
up = 500
down = 2900
left = 400
right = 4000
s = square_side // 2
im = Image.open(path).convert('RGB')
matriz = np.array(im) # convert image to numpy matrix
matriz ^= 0xFF # invert matrix
oneD_matriz = matriz[up:down,left:right,1] #only look at the green canal 1
indices = np.argwhere(oneD_matriz >= 180) # give all pixel cordinates where the value is higer than 179
meanx = np.average(indices[:,0]) + up
meany = np.average(indices[:,1]) + left
# select new area of the matrix, that is the input for CNN
box = (meany - s, meanx - s, meany + s , meanx + s)
new_image = im.crop(box) # crop is Pill function
im.close()
return new_image
def find_overlap(root_path):
'''function to overlap all pictures
creates a image of all overlayed pictures so the interesting area of the picture can manually be classified
the size of the imaage has to bee ajusted
'''
maximum = np.zeros((3456, 4608))
for root, dirs, files in os.walk(root_path, topdown=False):
for name in files:
im_path = (os.path.join(root, name))
if name[0] == 'I': #making sure its an image, because there are some other files in the directory
image = Image.open(im_path)
image.convert('RGB')
matriz = np.array(image)
maximum = np.maximum(maximum, matriz[:, :, 0])
maximum = np.maximum(maximum, matriz[:, :, 1])
maximum = np.maximum(maximum, matriz[:, :, 2])
image.close()
return maximum
| 3.125
| 3
|
data_processor/imgs_to_arr.py
|
BoyuanChen/visual_behavior_modeling
| 9
|
12780387
|
<gh_stars>1-10
import os
from PIL import Image
import numpy as np
from tqdm import tqdm
def mkdir(folder):
if os.path.exists(folder):
shutil.rmtree(folder)
os.makedirs(folder)
data_filepath = '/home/cml/bo/ToM_Base/sim_tom/rgb/tom_simple_rgb/data_processor/augmented_train_data_imgs/rgb_data_imgs'
target_filepath = '/home/cml/bo/ToM_Base/sim_tom/rgb/tom_simple_rgb/data_processor/augmented_train_data_imgs/rgb_target_imgs'
data_arr_filepath = '/home/cml/bo/ToM_Base/sim_tom/rgb/tom_simple_rgb/data_processor/augmented_train_data_imgs/rgb_data_arr'
target_arr_filepath = '/home/cml/bo/ToM_Base/sim_tom/rgb/tom_simple_rgb/data_processor/augmented_train_data_imgs/rgb_target_arr'
mkdir(data_arr_filepath)
mkdir(target_arr_filepath)
filelist = os.listdir(data_filepath)
for p_file in tqdm(filelist):
im = Image.open(os.path.join(data_filepath, p_file))
np_im = np.array(im)
np.save(os.path.join(data_arr_filepath, p_file.split('.')[0] + '.npy'), np_im)
filelist = os.listdir(target_filepath)
for p_file in tqdm(filelist):
im = Image.open(os.path.join(target_filepath, p_file))
np_im = np.array(im)
np.save(os.path.join(target_arr_filepath, p_file.split('.')[0] + '.npy'), np_im)
data_filepath = '/home/cml/bo/ToM_Base/sim_tom/rgb/tom_simple_rgb/data_processor/augmented_test_data_imgs/rgb_data_imgs'
target_filepath = '/home/cml/bo/ToM_Base/sim_tom/rgb/tom_simple_rgb/data_processor/augmented_test_data_imgs/rgb_target_imgs'
data_arr_filepath = '/home/cml/bo/ToM_Base/sim_tom/rgb/tom_simple_rgb/data_processor/augmented_test_data_imgs/rgb_data_arr'
target_arr_filepath = '/home/cml/bo/ToM_Base/sim_tom/rgb/tom_simple_rgb/data_processor/augmented_test_data_imgs/rgb_target_arr'
mkdir(data_arr_filepath)
mkdir(target_arr_filepath)
filelist = os.listdir(data_filepath)
for p_file in tqdm(filelist):
im = Image.open(os.path.join(data_filepath, p_file))
np_im = np.array(im)
np.save(os.path.join(data_arr_filepath, p_file.split('.')[0] + '.npy'), np_im)
filelist = os.listdir(target_filepath)
for p_file in tqdm(filelist):
im = Image.open(os.path.join(target_filepath, p_file))
np_im = np.array(im)
np.save(os.path.join(target_arr_filepath, p_file.split('.')[0] + '.npy'), np_im)
| 2.40625
| 2
|
examples/affect/affect_mfm.py
|
kapikantzari/MultiBench
| 148
|
12780388
|
import torch
import sys
import os
sys.path.append(os.getcwd())
sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))
from unimodals.MVAE import TSEncoder, TSDecoder # noqa
from utils.helper_modules import Sequential2 # noqa
from objective_functions.objectives_for_supervised_learning import MFM_objective # noqa
from torch import nn # noqa
from unimodals.common_models import MLP # noqa
from training_structures.Supervised_Learning import train, test # noqa
from datasets.affect.get_data import get_dataloader # noqa
from fusions.common_fusions import Concat # noqa
classes = 2
n_latent = 256
dim_0 = 35
dim_1 = 74
dim_2 = 300
timestep = 50
# mosi_data.pkl, mosei_senti_data.pkl
# mosi_raw.pkl, mosei_raw.pkl, sarcasm.pkl, humor.pkl
# raw_path: mosi.hdf5, mosei.hdf5, sarcasm_raw_text.pkl, humor_raw_text.pkl
traindata, validdata, test_robust = get_dataloader(
'/home/paul/MultiBench/mosi_raw.pkl', task='classification', robust_test=False, max_pad=True, max_seq_len=timestep)
encoders = [TSEncoder(dim_0, 30, n_latent, timestep, returnvar=False).cuda(), TSEncoder(
dim_1, 30, n_latent, timestep, returnvar=False).cuda(), TSEncoder(dim_2, 30, n_latent, timestep, returnvar=False).cuda()]
decoders = [TSDecoder(dim_0, 30, n_latent, timestep).cuda(), TSDecoder(
dim_1, 30, n_latent, timestep).cuda(), TSDecoder(dim_2, 30, n_latent, timestep).cuda()]
fuse = Sequential2(Concat(), MLP(3*n_latent, n_latent, n_latent//2)).cuda()
intermediates = [MLP(n_latent, n_latent//2, n_latent//2).cuda(), MLP(n_latent,
n_latent//2, n_latent//2).cuda(), MLP(n_latent, n_latent//2, n_latent//2).cuda()]
head = MLP(n_latent//2, 20, classes).cuda()
argsdict = {'decoders': decoders, 'intermediates': intermediates}
additional_modules = decoders+intermediates
objective = MFM_objective(2.0, [torch.nn.MSELoss(
), torch.nn.MSELoss(), torch.nn.MSELoss()], [1.0, 1.0, 1.0])
train(encoders, fuse, head, traindata, validdata, 200, additional_modules,
objective=objective, objective_args_dict=argsdict, save='mosi_mfm_best.pt')
print("Testing:")
model = torch.load('mosi_mfm_best.pt').cuda()
test(model=model, test_dataloaders_all=test_robust,
dataset='mosi', is_packed=False, no_robust=True)
| 1.921875
| 2
|
utils/generation.py
|
mazzzystar/WaveRNN
| 0
|
12780389
|
<reponame>mazzzystar/WaveRNN
import hparams as hp
from utils.dsp import *
def gen_testset(model, test_set, samples, batched, target, overlap, save_path) :
k = model.get_step() // 1000
for i, (m, x) in enumerate(test_set, 1):
if i > samples : break
print('\n| Generating: %i/%i' % (i, samples))
x = x[0].numpy()
if hp.mu_law :
x = decode_mu_law(x, 2**hp.bits, from_labels=True)
else :
x = label_2_float(x, hp.bits)
save_wav(x, f'{save_path}{k}k_steps_{i}_target.wav')
batch_str = f'gen_batched_target{target}_overlap{overlap}' if batched else 'gen_NOT_BATCHED'
save_str = f'{save_path}{k}k_steps_{i}_{batch_str}.wav'
_ = model.generate(m, save_str, batched, target, overlap, hp.mu_law)
| 2.3125
| 2
|
.archived/snakecode/0017.py
|
gearbird/calgo
| 4
|
12780390
|
<gh_stars>1-10
class Solution:
pad = ['', '', 'abc', 'def', 'ghi', 'jkl', 'mno', 'pqrs', 'tuv', 'wxyz']
def letterCombinations(self, digits: str) -> list[str]:
res: list[str] = []
if not digits:
return res
res.append('')
for d in digits:
newRes: list[str] = []
for comb in res:
for alpha in self.pad[int(d)]:
newRes.append(comb + alpha)
res = newRes
return res
| 2.96875
| 3
|
clinicadl/tests/test_classify.py
|
yogeshmj/AD-DL
| 112
|
12780391
|
<gh_stars>100-1000
# coding: utf8
import pytest
import os
from os.path import join, exists
@pytest.fixture(params=[
'classify_image',
'classify_slice',
'classify_patch'
])
def classify_commands(request):
out_filename = 'fold-0/cnn_classification/best_balanced_accuracy/DB-TEST_image_level_prediction.tsv'
if request.param == 'classify_image':
data_folder = 'data/models/image_model_baseline_AD_CN_single_fold/'
test_input = [
'classify',
'data/classify/OASIS_test',
'data/classify/OASIS_test/data.tsv',
data_folder,
'--prefix_output', 'DB-TEST',
'-cpu'
]
output_files = join(data_folder, out_filename)
elif request.param == 'classify_slice':
data_folder = 'data/models/slice_model_baseline_AD_CN_single_fold/'
test_input = [
'classify',
'data/classify/OASIS_test',
'data/classify/OASIS_test/data.tsv',
data_folder,
'--prefix_output', 'DB-TEST',
'-cpu'
]
output_files = join(data_folder, out_filename)
elif request.param == 'classify_patch':
data_folder = 'data/models/patch_model_baseline_AD_CN_multicnn_single_fold/'
test_input = [
'classify',
'data/classify/OASIS_test',
'data/classify/OASIS_test/data.tsv',
data_folder,
'--prefix_output', 'DB-TEST',
'-cpu'
]
output_files = join(data_folder, out_filename)
else:
raise NotImplementedError(
"Test %s is not implemented." %
request.param)
return test_input, output_files
def test_classify(classify_commands):
test_input = classify_commands[0]
output_files = classify_commands[1]
flag_error = not os.system("clinicadl " + " ".join(test_input))
assert flag_error
assert exists(output_files)
| 2.046875
| 2
|
heaps/max-heap.py
|
neerajp99/algorithms
| 1
|
12780392
|
<gh_stars>1-10
"""
1. Create a max heap
2. Insert into the max heap
3. Heapify the max heap upwards
4. Heapify the max heap downwards
5. Delete from the max-heap
"""
class MaxHeap:
def __init__(self, capacity):
self.storage = [0] * capacity
self.capacity = capacity
self.size = 0
def get_parent_index(self, index: int):
return (index - 1) // 2
def get_left_child_index(self, index: int):
return (2 * index) + 1
def get_right_child_index(self, index: int):
return (2 * index) + 2
def check_for_parent_presence(self, index: int):
return self.get_parent_index(index) >= 0
def check_for_left_child_presence(self, index: int):
return self.get_left_child_index(index) < self.size
def check_for_right_child_presence(self, index: int):
return self.get_right_child_index(index) < self.size
def is_full(self):
return self.size == self.capacity
def get_parent_value(self, index: int):
return self.storage[self.get_parent_index(index)]
def get_left_child_value(self, index: int):
return self.storage[self.get_left_child_index(index)]
def get_right_child_value(self, index: int):
return self.storage[self.get_right_child_index(index)]
def swap_indexes(self, index_1, index_2):
temp = self.storage[index_1]
self.storage[index_1] = self.storage[index_2]
self.storage[index_2] = temp
def heapifyUp_iterative(self):
index = self.size - 1
while (self.get_parent_value(index) and self.get_parent_value(index) < self.storage[index]):
self.swap_indexes(self.get_parent_index(index), index)
index = self.get_parent_index(index)
def heapifyUp_recursive(self, index):
if self.get_parent_value(index) and self.get_parent_value(index) < self.storage[index]:
self.swap_indexes(self.get_parent_index(index), index)
self.heapifyUp_recursive(self.get_parent_index(index))
def heapifyDown_iterative(self):
index = 0
while self.check_for_left_child_presence(index):
greater_child_index = self.get_left_child_index(index)
if self.check_for_right_child_presence(index) and self.get_right_child_value(index) > self.get_left_child_value(index):
greater_child_index = self.get_right_child_index(index)
if self.storage[index] > self.storage[greater_child_index]:
break
else:
self.swap_indexes(index, greater_child_index)
index = greater_child_index
def heapifyDown_recursive(self, index):
greater_child_index = index
if self.check_for_left_child_presence(index) and self.get_left_child_value(index) > self.storage[greater_child_index]:
greater_child_index = self.get_left_child_index(index)
if self.check_for_right_child_presence(index) and self.get_right_child_value(index) > self.storage[greater_child_index]:
greater_child_index = self.get_right_child_index(index)
if greater_child_index != index:
self.swap_indexes(index, greater_child_index)
self.heapifyDown_recursive(greater_child_index)
def insert_iterative(self, data: int):
if self.is_full():
raise ('Heap is already full')
else:
self.storage[self.size] = data
self.size += 1
self.heapifyUp_iterative()
def insert_recursive(self, data: int):
if self.is_full():
raise ('Heap is already full')
else:
self.storage[self.size] = data
self.size += 1
self.heapifyUp_recursive(self.size - 1)
def remove_max_iterative(self):
if self.size == 0:
raise ('Heap is empty!')
data = self.storage[0]
self.storage[0] = self.storage[self.size - 1]
self.size -= 1
self.heapifyDown_iterative()
return data
def remove_max_recursive(self):
if self.size == 0:
raise ('Heap is empty!')
else:
data = self.storage[0]
self.storage[0] = self.storage[self.size - 1]
self.size -= 1
self.heapifyDown_recursive(0)
return data
def delete_element(self, index):
if self.size == 0:
raise('Heap is empty!')
data = self.storage[index]
self.storage[index] = self.storage[self.size - 1]
self.size -= 1
self.heapifyUp_recursive(index)
self.heapifyDown_recursive(0)
return data
def print_heap(self):
print(self.storage)
x = MaxHeap(10)
x.insert_recursive(1)
x.insert_recursive(10)
x.insert_recursive(3)
x.insert_recursive(5)
x.insert_recursive(2)
x.insert_recursive(4)
x.insert_recursive(0)
x.print_heap()
print(x.size)
x.remove_max_recursive()
x.print_heap()
print(x.size)
x.delete_element(4)
x.print_heap()
| 3.84375
| 4
|
qcloudsdkticket/GetCategoryListRequest.py
|
f3n9/qcloudcli
| 0
|
12780393
|
# -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class GetCategoryListRequest(Request):
def __init__(self):
super(GetCategoryListRequest, self).__init__(
'ticket', 'qcloudcliV1', 'GetCategoryList', 'ticket.api.qcloud.com')
| 1.59375
| 2
|
fixture/user.py
|
planofmind/python_training
| 0
|
12780394
|
<reponame>planofmind/python_training<gh_stars>0
from selenium.webdriver.support.select import Select
class UserHelper:
def __init__(self, app):
self.app = app
def create(self, user):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
wd.find_element_by_name("firstname").send_keys(user.firstname)
wd.find_element_by_name("middlename").send_keys(user.middlename)
wd.find_element_by_name("lastname").send_keys(user.lastname)
wd.find_element_by_name("nickname").send_keys(user.nickname)
wd.find_element_by_name("title").send_keys(user.title)
wd.find_element_by_name("company").send_keys(user.company)
wd.find_element_by_name("address").send_keys(user.address)
wd.find_element_by_name("home").send_keys(user.home_phone)
wd.find_element_by_name("mobile").send_keys(user.mobile_phone)
wd.find_element_by_name("work").send_keys(user.work_phone)
wd.find_element_by_name("fax").send_keys(user.fax)
wd.find_element_by_name("email").send_keys(user.email)
wd.find_element_by_name("email2").send_keys(user.email2)
wd.find_element_by_name("email3").send_keys(user.email3)
wd.find_element_by_name("homepage").send_keys(user.homepage)
Select(wd.find_element_by_name("bday")).select_by_visible_text(user.bday)
Select(wd.find_element_by_name("bmonth")).select_by_visible_text(user.bmonth)
wd.find_element_by_name("byear").send_keys(user.byear)
Select(wd.find_element_by_name("aday")).select_by_visible_text(user.aday)
Select(wd.find_element_by_name("amonth")).select_by_visible_text(user.amonth)
wd.find_element_by_name("ayear").send_keys(user.ayear)
wd.find_element_by_name("address2").send_keys(user.address2)
wd.find_element_by_name("phone2").send_keys(user.phone2)
wd.find_element_by_name("notes").send_keys(user.notes)
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.return_to_home_page()
def return_to_home_page(self):
wd = self.app.wd
wd.find_element_by_link_text("home page").click()
| 2.5625
| 3
|
nerblackbox/cli.py
|
af-ai-center/nerblackbox
| 11
|
12780395
|
"""Command Line Interface of the nerblackbox package."""
import os
import subprocess
from os.path import join
import click
from typing import Dict, Any
from nerblackbox.modules.main import NerBlackBoxMain
########################################################################################################################
# CLI
########################################################################################################################
@click.group()
@click.option(
"--data_dir", default="data", type=str, help="[str] relative path of data directory"
)
@click.option(
"--modify/--no-modify", default=False, help="[bool] if flag=set_up_dataset"
)
@click.option(
"--val_fraction", default=None, type=float, help="[float] if flag=set_up_dataset"
)
@click.option(
"--verbose/--no-verbose", default=False, help="[bool] if flag=set_up_dataset"
)
@click.option("--run_name", default=None, type=str, help="[str] if flag=run_experiment")
@click.option("--device", default=None, type=str, help="[str] if flag=run_experiment")
@click.option("--fp16/--no-fp16", default=False, help="[bool] if flag=run_experiment")
@click.option("--results/--no-results", default=False, help="[bool] if flag=clear_data")
@click.pass_context
def nerbb(ctx, **kwargs_optional):
ctx.ensure_object(dict)
# kwargs
kwargs = {k: v for k, v in kwargs_optional.items() if v is not None}
# environ
base_dir = os.getcwd()
data_dir = kwargs.pop("data_dir")
os.environ["BASE_DIR"] = base_dir
os.environ["DATA_DIR"] = join(base_dir, data_dir)
# print('BASE_DIR = ', os.environ.get('BASE_DIR'))
# print('DATA_DIR = ', os.environ.get('DATA_DIR'))
# context
ctx.obj = kwargs
########################################################################################################################
# COMMANDS HELPER FUNCTION
########################################################################################################################
def _run_nerblackbox_main(_ctx_obj: Dict[str, Any], _kwargs: Dict[str, str]) -> None:
"""
given context (_ctx_obj) and all relevant arguments (_kwargs), invoke NerBlackBoxMain
is used by every nerbb command
"""
kwargs = dict(**_ctx_obj, **_kwargs)
nerblackbox_main = NerBlackBoxMain(**kwargs)
nerblackbox_main.main()
########################################################################################################################
# COMMANDS
########################################################################################################################
@nerbb.command(name="analyze_data")
@click.pass_context
@click.argument("dataset_name")
def analyze_data(ctx, dataset_name: str):
"""analyze a dataset."""
kwargs = {
"flag": "analyze_data",
"dataset_name": dataset_name,
}
_run_nerblackbox_main(ctx.obj, kwargs)
@nerbb.command(name="clear_data")
@click.pass_context
def clear_data(ctx):
"""clear data (checkpoints and optionally results)."""
kwargs = {
"flag": "clear_data",
}
_run_nerblackbox_main(ctx.obj, kwargs)
@nerbb.command(name="download")
@click.pass_context
def download(ctx):
"""
download & prepare built-in datasets, prepare experiment configuration.
needs to be called exactly once before any other CLI/API commands of the package are executed
in case built-in datasets shall be used.
"""
kwargs = {
"flag": "download",
}
_run_nerblackbox_main(ctx.obj, kwargs)
@nerbb.command(name="get_experiments")
@click.pass_context
def get_experiments(ctx):
"""get overview on experiments."""
kwargs = {
"flag": "get_experiments",
}
_run_nerblackbox_main(ctx.obj, kwargs)
@nerbb.command(name="get_experiment_results")
@click.pass_context
@click.argument("experiment_name")
def get_experiment_results(ctx, experiment_name: str):
"""get results for a single experiment."""
kwargs = {
"flag": "get_experiment_results",
"experiment_name": experiment_name,
}
_run_nerblackbox_main(ctx.obj, kwargs)
@nerbb.command(name="get_experiments_results")
@click.pass_context
def get_experiments_results(ctx):
"""get results for multiple experiments."""
kwargs = {
"flag": "get_experiments_results",
}
_run_nerblackbox_main(ctx.obj, kwargs)
@nerbb.command(name="init")
@click.pass_context
def init(ctx):
"""
initialize the data_dir directory.
needs to be called exactly once before any other CLI/API commands of the package are executed.
"""
kwargs = {
"flag": "init",
}
_run_nerblackbox_main(ctx.obj, kwargs)
@nerbb.command(name="mlflow")
def mlflow():
"""show detailed experiment results in mlflow (port = 5000)."""
cd_dir = f'{join(os.environ.get("DATA_DIR"), "results")}'
subprocess.run(f"cd {cd_dir}; mlflow ui", shell=True)
@nerbb.command(name="predict")
@click.pass_context
@click.argument("experiment_name")
@click.argument("text_input")
def predict(ctx, experiment_name: str, text_input: str):
"""predict labels for text_input using the best model of a single experiment."""
kwargs = {
"flag": "predict",
"experiment_name": experiment_name,
"text_input": text_input,
}
_run_nerblackbox_main(ctx.obj, kwargs)
@nerbb.command(name="predict_proba")
@click.pass_context
@click.argument("experiment_name")
@click.argument("text_input")
def predict_proba(ctx, experiment_name: str, text_input: str):
"""predict label probabilities for text_input using the best model of a single experiment."""
kwargs = {
"flag": "predict_proba",
"experiment_name": experiment_name,
"text_input": text_input,
}
_run_nerblackbox_main(ctx.obj, kwargs)
@nerbb.command(name="run_experiment")
@click.pass_context
@click.argument("experiment_name")
def run_experiment(ctx, experiment_name: str):
"""run a single experiment."""
kwargs = {
"flag": "run_experiment",
"experiment_name": experiment_name,
}
_run_nerblackbox_main(ctx.obj, kwargs)
@nerbb.command(name="set_up_dataset")
@click.pass_context
@click.argument("dataset_name")
def set_up_dataset(ctx, dataset_name: str):
"""set up a dataset using the associated Formatter class."""
kwargs = {
"flag": "set_up_dataset",
"dataset_name": dataset_name,
}
_run_nerblackbox_main(ctx.obj, kwargs)
@nerbb.command(name="show_experiment_config")
@click.pass_context
@click.argument("experiment_name")
def show_experiment_config(ctx, experiment_name: str):
"""show a single experiment configuration in detail."""
kwargs = {
"flag": "show_experiment_config",
"experiment_name": experiment_name,
}
_run_nerblackbox_main(ctx.obj, kwargs)
@nerbb.command(name="show_experiment_configs")
@click.pass_context
def show_experiment_configs(ctx):
"""show overview on all available experiment configurations."""
kwargs = {
"flag": "show_experiment_configs",
}
_run_nerblackbox_main(ctx.obj, kwargs)
@nerbb.command(name="tensorboard")
def tensorboard():
"""show detailed experiment results in tensorboard. (port = 6006)."""
cd_dir = f'{join(os.environ.get("DATA_DIR"), "results")}'
subprocess.run(
f"cd {cd_dir}; tensorboard --logdir tensorboard --reload_multifile=true",
shell=True,
)
| 2.5625
| 3
|
system_upper.py
|
chenmy1903/student3
| 1
|
12780396
|
<reponame>chenmy1903/student3<filename>system_upper.py
"""提权组件
解决伽卡他卡使用SYSTEM权限运行的问题, 将破解器提到SYSTEM权限, 就能杀死伽卡他卡
"""
# Copyright 2022 chenmy1903
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import ctypes
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def is_admin():
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except:
return False
def system_upper(program: str):
"""将某个程序提权"""
return os.system(f"{psexec} -i -d -s {program}")
def is_in_exe_file():
"""获取是否在exe中运行"""
if sys.exec_prefix != os.path.dirname(sys.executable):
return True
return False
psexec = os.path.join(sys.exec_prefix, "cmd_lib", "PsExec.exe") if is_in_exe_file() else os.path.join(BASE_DIR, "cmd_lib", "PsExec.exe")
def get_pj_file():
"""获取破解器主文件"""
if is_in_exe_file(): # 直接从exe中提取主程序
main_file = os.path.join(sys.exec_prefix, "pj.exe")
else: # 在py文件中运行
main_file = os.path.join(BASE_DIR, 'pj.exe')
return main_file # 返回地址
def main():
system_upper(get_pj_file()) # 启动破解器
if __name__ == "__main__":
if is_in_exe_file() or is_admin():
main()
else:
ctypes.windll.shell32.ShellExecuteW(None, "runas", sys.executable, __file__, None, 1)
| 2.609375
| 3
|
configs/universenet/ablation/universenet50_2008_nosepc_fp16_4x4_mstrain_480_960_1x_coco.py
|
Jack-Hu-2001/UniverseNet
| 314
|
12780397
|
<gh_stars>100-1000
_base_ = [
'../../universenet/models/universenet50_2008.py',
'../../_base_/datasets/coco_detection_mstrain_480_960.py',
'../../_base_/schedules/schedule_1x.py', '../../_base_/default_runtime.py'
]
model = dict(
neck=dict(
_delete_=True,
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(type='GFLHead', stacked_convs=4))
data = dict(samples_per_gpu=4)
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(warmup_iters=1000)
fp16 = dict(loss_scale=512.)
| 1.3125
| 1
|
src/seq2seq.py
|
zhihanyang2022/min-char-seq2seq
| 0
|
12780398
|
<reponame>zhihanyang2022/min-char-seq2seq
import os
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import gin
from networks import Encoder, Decoder, DecoderWithAttention
SEQ_LEN_UPPER_LIM = 100
@gin.configurable(module=__name__)
class Seq2Seq:
"""Implement the sequence-2-sequence algorithm."""
def __init__(self, num_tokens, sos_token_index, eos_token_index, pad_token_index, device, use_attention=False, lr=1e-4, max_grad_norm=1):
self.num_tokens = num_tokens
self.loss_fn = nn.CrossEntropyLoss(ignore_index=pad_token_index, reduction='sum')
self.sos_token_index = sos_token_index
self.eos_token_index = eos_token_index
self.pad_token_index = pad_token_index
self.device = device
self.use_attention = use_attention
self.lr = lr
self.max_grad_norm = max_grad_norm
self.encoder = Encoder(num_tokens).to(device)
self.decoder = DecoderWithAttention(num_tokens).to(device) if self.use_attention else Decoder(num_tokens).to(device)
self.encoder_optim = optim.Adam(self.encoder.parameters(), lr=self.lr)
self.decoder_optim = optim.Adam(self.decoder.parameters(), lr=self.lr)
def update_networks(self, src_seqs, tgt_seqs, just_do_forward=False):
if just_do_forward:
self.encoder.eval()
self.decoder.eval()
else:
self.encoder.train()
self.decoder.train()
if isinstance(self.decoder, DecoderWithAttention):
encoder_outputs, hiddens = self.encoder(src_seqs, return_outputs=True)
else:
hiddens = self.encoder(src_seqs, return_outputs=False)
current_indices = tgt_seqs[:, 0] # represents SOS
loss_sum = 0
num_correct = 0
max_seq_len = tgt_seqs.shape[1]
for t in range(0, max_seq_len - 1):
# prediction
if isinstance(self.decoder, DecoderWithAttention):
next_logits, hiddens, _ = self.decoder.predict_next_logits(current_indices, hiddens, encoder_outputs) # next_logits has shape (bs, num_tokens)
else:
next_logits, hiddens = self.decoder.predict_next_logits(current_indices, hiddens)
# computing loss
next_indices_true = tgt_seqs[:, t+1] # (bs, )
loss_sum_t = self.loss_fn(next_logits, next_indices_true)
loss_sum += loss_sum_t
# computing acc
next_indices_generated = next_logits.argmax(dim=1)
num_correct_t = torch.sum(
next_indices_generated.eq(next_indices_true) * ~(next_indices_true.eq(self.pad_token_index))
)
num_correct += num_correct_t
# preparing for next timestep
next_indices = next_indices_true if np.random.uniform() <= 0.5 else next_indices_generated
current_indices = next_indices # for next iteration
# computing the number of entries over which we computed loss_sum and num_correct
num_non_pad_entries = int(torch.sum(~tgt_seqs[:, 1:].eq(self.pad_token_index)))
loss_ = loss_sum / num_non_pad_entries
acc_ = num_correct / num_non_pad_entries
if not just_do_forward:
self.encoder_optim.zero_grad()
self.decoder_optim.zero_grad()
loss_.backward()
torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), max_norm=self.max_grad_norm)
torch.nn.utils.clip_grad_norm_(self.decoder.parameters(), max_norm=self.max_grad_norm)
self.encoder_optim.step()
self.decoder_optim.step()
return float(loss_), float(acc_)
def transduce(self, input_seq):
"""
Take a single src sequence, and transduce it into a single tgt sequence.
:param input_seq:
:param start_token:
:param end_token:
:return:
"""
self.encoder.eval()
self.decoder.eval()
with torch.no_grad():
# input_seq has shape (seq_len, )
# start_token has shape (1, )
input_seq = input_seq.unsqueeze(0) # (1, seq_len)
encoder_outputs, hidden = self.encoder(input_seq, return_outputs=True) # (1, seq_len, hidden_size)
current_index = torch.tensor([self.sos_token_index]).long().to(self.device)
eos_token_index = torch.tensor([self.eos_token_index]).long().to(self.device)
target_seq_generated = [] # int(self.sos_token_index)]
list_of_attention_vectors = []
while True:
if isinstance(self.decoder, DecoderWithAttention):
next_logits, hidden, attention_vectors = self.decoder.predict_next_logits(current_index, hidden,
encoder_outputs)
# next_logits has shape (bs, num_tokens)
list_of_attention_vectors.append(attention_vectors)
else:
next_logits, hidden = self.decoder.predict_next_logits(current_index, hidden)
next_index_generated = next_logits.argmax(dim=1)
# print(int(next_index_generated), (attention_vectors.view(-1) > 0.5).float().cpu().numpy())
if int(next_index_generated) != eos_token_index:
target_seq_generated.append(int(next_index_generated))
current_index = next_index_generated # for next iteration
if int(current_index) == eos_token_index or len(target_seq_generated) >= SEQ_LEN_UPPER_LIM:
break
if isinstance(self.decoder, DecoderWithAttention):
# print(list_of_attention_vectors[0].shape)
attention_matrix = torch.cat(list_of_attention_vectors, dim=0).cpu().numpy()
# attention_matrix =
# attention_matrix[np.abs(attention_matrix) < 0.01] = 0
# attention_matrix[np.abs(attention_matrix) > 0.01] = 1
# print(attention_matrix)
return target_seq_generated, attention_matrix
else:
return target_seq_generated
def save(self, save_dir):
torch.save(self.encoder.state_dict(), os.path.join(save_dir, "encoder.pth"))
torch.save(self.decoder.state_dict(), os.path.join(save_dir, "decoder.pth"))
def load(self, save_dir):
self.encoder.load_state_dict(torch.load(os.path.join(save_dir, "encoder.pth"), map_location=self.device))
self.decoder.load_state_dict(torch.load(os.path.join(save_dir, "decoder.pth"), map_location=self.device))
| 2.375
| 2
|
api/urls.py
|
CSC301-TTS-Project/TrafficFinderServer
| 0
|
12780399
|
<gh_stars>0
# pages/urls.py
from django.urls import path
from .views import *
from rest_framework.authtoken import views
urlpatterns = [
path('getRoute', get_route),
path('insertNode', insert_node),
path('modifyNode', modify_node),
path('deleteNode', delete_node),
path('getKeys', get_api_keys),
path('getTrafficData', get_traffic_data),
path('getGeoJson', get_route_as_geojson),
path('login_user', views.obtain_auth_token),
path('signup_user', signup_user),
]
| 1.757813
| 2
|
bot.py
|
forever404/CTFd-Bot
| 4
|
12780400
|
from config import logininfo
import re,json,time,configparser,logging,sys,os,requests,asyncio
def login(login_url, username, password):
#请求头
my_headers = {
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36',
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding' : 'gzip',
'Accept-Language' : 'zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4'
}
#获取token
sss = requests.Session()
try:
r = sss.get(login_url, headers = my_headers)
except:
#logging.error('[error]fail to login,check your config and network')
return
reg = r'<input type="hidden" name="nonce" value="(.*)">'
pattern = re.compile(reg)
result = pattern.findall(r.content.decode('utf-8'))
token = result[0]
#postdata
my_data = {
'name' : username,
'password' : password,
'nonce' : token,
}
#登录后
try:
r = sss.post(login_url, headers = my_headers, data = my_data)
except:
#logging.error('[error]fail to login,check your config and network')
return
if r.ok == True:
logging.info('[success]login ok,start the robot...')
return sss
else:
pass
#logging.error('[error]fail to login,check your config and network')
#取配置文件
def readConf(configFile,subject,key):
cf = configparser.ConfigParser()
filename = cf.read(configFile)
return cf.get(subject,key)
#取用户列表
def get_user_list():
theSession = login(logininfo.login_url,logininfo.username,logininfo.password)
apiUrl = 'http://ip:port/api/v1/users' #ctfd 地址
try:
responseJson = theSession.get(apiUrl)
except:
logging.error('[error]fail to get api info,continue.')
return []
jsonInfo = json.loads(responseJson.text)
if jsonInfo['success'] != True:
logging.error("error to get userlist")
return []
userList = eval(str(jsonInfo['data']))
return userList
#取提交flag信息
def get_attempt_info():
theSession = login(logininfo.login_url,logininfo.username,logininfo.password)
apiUrl = 'http://ip:port/api/v1/submissions' #ctfd 地址
try:
responseJson = theSession.get(apiUrl)
except:
logging.error('[error0]fail to get api info,continue.')
return []
jsonInfo = json.loads(responseJson.text)
if jsonInfo['success'] != True:
logging.error("error to get attemptlist")
return []
allList = eval(str(jsonInfo['data']))
return allList
#异步循环发送请求
async def deal_user_list():
global userLen,userList
while True:
try:
tmpList = get_user_list()
tmpLen = len(tmpList)
print(userLen,tmpLen)
if tmpLen == 0:
await asyncio.sleep(3)
continue
if userLen < tmpLen:
for i in range(userLen,tmpLen):
message = tmpList[i]['name']+" 成功注册~"
requests.get(logininfo.group_api+message)
userLen = tmpLen
userList = tmpList
else:
userLen = tmpLen
userlist = tmpList
await asyncio.sleep(3)
except TypeError:
logging.error('[error1]fail to get api info,continue.')
continue
await asyncio.sleep(3)
async def deal_attemp_list():
global userLen,userList,allLen,allList
while True:
try:
tmpallList = get_attempt_info()
tmpallLen = len(tmpallList)
if tmpallLen == 0:
await asyncio.sleep(3)
continue
if allLen < tmpallLen:
for i in range(allLen,tmpallLen):
if tmpallList[i]['type'] == "correct":
chaname = ""
for s in userList:
if str(s['id']) == str(tmpallList[i]['user_id']):
chaname = s['name']
if chaname == "":
continue
await asyncio.sleep(3)
message = "恭喜" + chaname + "做出" + str(tmpallList[i]['challenge']['category'])+"题目-" + str(tmpallList[i]['challenge']['name'])
#requests.get(logininfo.url_api+message)
requests.get(logininfo.group_api+message)
allLen = tmpallLen
allList = tmpallList
else:
allLen = tmpallLen
allList = tmpallList
await asyncio.sleep(3)
except TypeError:
logging.error('[error2]fail to get api info,continue.')
continue
if __name__ == ("__main__"):
logging.basicConfig(filename='err.log',level=logging.ERROR,format='%(asctime)s %(filename)s[line:%(lineno)d] %(message)s',datefmt='%Y-%m-%d')
# 全局变量声明
userList = get_user_list()
#userLen = 0
userLen = len(userList)
allList = get_attempt_info()
allLen = len(allList)
#allLen = 0
loop = asyncio.get_event_loop()
tasks = [deal_user_list(),deal_attemp_list()]
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
| 2.59375
| 3
|
.idea/VirtualEnvironment/Lib/site-packages/tests/outcomes/imports/test_import_package_5/random_module/main.py
|
Vladpetr/NewsPortal
| 0
|
12780401
|
import in1.in2.main2 as m
import in1.file as f
print(m.x + f.y)
| 1.757813
| 2
|
tools/train_w2v.py
|
sketscripter/emotional-chatbot-cakechat
| 1,608
|
12780402
|
<reponame>sketscripter/emotional-chatbot-cakechat
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cakechat.utils.text_processing import get_processed_corpus_path, load_processed_dialogs_from_json, \
FileTextLinesIterator, get_dialog_lines_and_conditions, ProcessedLinesIterator, get_flatten_dialogs
from cakechat.utils.w2v.model import _get_w2v_model as get_w2v_model
from cakechat.config import TRAIN_CORPUS_NAME, VOCABULARY_MAX_SIZE, WORD_EMBEDDING_DIMENSION, W2V_WINDOW_SIZE, \
USE_SKIP_GRAM
if __name__ == '__main__':
processed_corpus_path = get_processed_corpus_path(TRAIN_CORPUS_NAME)
dialogs = load_processed_dialogs_from_json(
FileTextLinesIterator(processed_corpus_path), text_field_name='text', condition_field_name='condition')
training_dialogs_lines_for_w2v, _ = get_dialog_lines_and_conditions(
get_flatten_dialogs(dialogs), text_field_name='text', condition_field_name='condition')
tokenized_training_lines = ProcessedLinesIterator(training_dialogs_lines_for_w2v, processing_callbacks=[str.split])
get_w2v_model(
tokenized_lines=tokenized_training_lines,
corpus_name=TRAIN_CORPUS_NAME,
voc_size=VOCABULARY_MAX_SIZE,
vec_size=WORD_EMBEDDING_DIMENSION,
window_size=W2V_WINDOW_SIZE,
skip_gram=USE_SKIP_GRAM)
| 2.265625
| 2
|
biobert_ner/utils_ner.py
|
rufinob/ehr-relation-extraction
| 43
|
12780403
|
<filename>biobert_ner/utils_ner.py
import sys
sys.path.append("../")
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union, Dict
from ehr import HealthRecord
from filelock import FileLock
from transformers import PreTrainedTokenizer
import torch
from torch import nn
from torch.utils.data.dataset import Dataset
logger = logging.getLogger(__name__)
@dataclass
class InputExample:
"""
A single training/test example for token classification.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
guid: str
words: List[str]
labels: Optional[List[str]]
@dataclass
class InputFeatures:
"""
A single set of features of data.
Property names are the same names as the corresponding inputs to a model.
"""
input_ids: List[int]
attention_mask: List[int]
token_type_ids: Optional[List[int]] = None
label_ids: Optional[List[int]] = None
class Split(Enum):
train = "train_dev"
dev = "devel"
test = "test"
class NerTestDataset(Dataset):
"""
Dataset for test examples
"""
features: List[InputFeatures]
pad_token_label_id: int = nn.CrossEntropyLoss().ignore_index
def __init__(self, input_features):
self.features = input_features
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
class NerDataset(Dataset):
features: List[InputFeatures]
pad_token_label_id: int = nn.CrossEntropyLoss().ignore_index
# Use cross entropy ignore_index as padding label id so that only
# real label ids contribute to the loss later.
def __init__(
self,
data_dir: str,
tokenizer: PreTrainedTokenizer,
labels: List[str],
model_type: str,
max_seq_length: Optional[int] = None,
overwrite_cache=False,
mode: Split = Split.train,
):
# Load data features from cache or dataset file
cached_features_file = os.path.join(
data_dir, "cached_{}_{}_{}".format(mode.value, tokenizer.__class__.__name__, str(max_seq_length)),
)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}")
self.features = torch.load(cached_features_file)
else:
logger.info(f"Creating features from dataset file at {data_dir}")
examples = read_examples_from_file(data_dir, mode)
self.features = convert_examples_to_features(
examples,
labels,
max_seq_length,
tokenizer,
cls_token_at_end=bool(model_type in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=False,
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(tokenizer.padding_side == "left"),
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,
pad_token_label_id=self.pad_token_label_id,
)
logger.info(f"Saving features into cached file {cached_features_file}")
torch.save(self.features, cached_features_file)
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
def read_examples_from_file(data_dir, mode: Union[Split, str]) -> List[InputExample]:
if isinstance(mode, Split):
mode = mode.value
file_path = os.path.join(data_dir, f"{mode}.txt")
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f:
words = []
labels = []
for line in f:
line = line.rstrip()
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}", words=words, labels=labels))
guid_index += 1
words = []
labels = []
else:
splits = line.split(" ")
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}", words=words, labels=labels))
return examples
def convert_examples_to_features(
examples: List[InputExample],
label_list: List[str],
max_seq_length: int,
tokenizer: PreTrainedTokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
pad_token_label_id=-100,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
verbose=1,
) -> List[InputFeatures]:
"""
Loads a data file into a list of `InputFeatures`
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10_000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
label_ids = []
for word, label in zip(example.words, example.labels):
tokens.append(word)
if word.startswith("##"):
label_ids.append(pad_token_label_id)
else:
label_ids.append(label_map[label])
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = tokenizer.num_special_tokens_to_add()
if len(tokens) > max_seq_length - special_tokens_count:
logger.info("Length %d exceeds max seq len, truncating." % len(tokens))
tokens = tokens[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index < 2 and verbose == 1:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
if "token_type_ids" not in tokenizer.model_input_names:
segment_ids = None
features.append(
InputFeatures(
input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids, label_ids=label_ids
)
)
return features
def get_labels(path: str) -> List[str]:
if path:
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
def generate_input_files(ehr_records: List[HealthRecord], filename: str,
ade_records: List[Dict] = None, max_len: int = 510,
sep: str = ' '):
"""
Write EHR and ADE records to a file.
Parameters
----------
ehr_records : List[HealthRecord]
List of EHR records.
ade_records : List[Dict]
List of ADE records.
filename : str
File name to write to.
max_len : int, optional
Max length of an example. The default is 510.
sep : str, optional
Token-label separator. The default is a space.
"""
with open(filename, 'w') as f:
for record in ehr_records:
split_idx = record.get_split_points(max_len=max_len)
labels = record.get_labels()
tokens = record.get_tokens()
start = split_idx[0]
end = split_idx[1]
for i in range(1, len(split_idx)):
for (token, label) in zip(tokens[start:end + 1], labels[start:end + 1]):
f.write('{}{}{}\n'.format(token, sep, label))
start = end + 1
if i != len(split_idx) - 1:
end = split_idx[i + 1]
f.write('\n')
f.write('\n')
if ade_records is not None:
for ade in ade_records:
ade_tokens = ade['tokens']
ade_entities = ade['entities']
ent_label_map = {'Drug': 'DRUG', 'Adverse-Effect': 'ADE', 'ADE': 'ADE'}
ade_labels = ['O'] * len(ade_tokens)
for ent in ade_entities.values():
ent_type = ent.name
start_idx = ent.range[0]
end_idx = ent.range[1]
for idx in range(start_idx, end_idx + 1):
if idx == start_idx:
ade_labels[idx] = 'B-' + ent_label_map[ent_type]
else:
ade_labels[idx] = 'I-' + ent_label_map[ent_type]
for (token, label) in zip(ade_tokens, ade_labels):
f.write('{}{}{}\n'.format(token, sep, label))
f.write('\n')
print("Data successfully saved in " + filename)
| 2.53125
| 3
|
BasicModels/TFLinearRegression.py
|
amingolnari/Deep-Learning-Course
| 17
|
12780404
|
<filename>BasicModels/TFLinearRegression.py<gh_stars>10-100
"""
github : https://github.com/amingolnari/Deep-Learning-Course
Author : <NAME>
TF Version : 1.12.0
Date : 4/12/2018
TensorFlow Linear Regression
Code 200
"""
import numpy as np
import tensorflow as tf
tf.reset_default_graph() # Reset Graph
import matplotlib.pyplot as plt
x = np.linspace(0, 2*np.pi, 100, dtype = 'float32') # x = 0 --> 2*pi with lenght 100
y = -x + 2*np.random.rand(x.shape[0]) # y = -x + noise
# Expand Dim (n,) --> (n, 1)
x = np.expand_dims(x, axis = 1)
y = np.expand_dims(y, axis = 1)
# Initialization
InputShape = 1
NumClass = 1
lr = .01
Epochs = 50
# TF Input Graph and Target
Input = tf.placeholder(tf.float32, [None, InputShape])
Target = tf.placeholder(tf.float32, [None, NumClass])
# Weight and Bias
Weight = tf.Variable(tf.zeros([InputShape, NumClass]))
Bias = tf.Variable(tf.ones([NumClass]))
# TF Output Graph
Out = tf.add(tf.matmul(Input, Weight) ,Bias)
# Loss Function and Minimization MSE (Target - Output)
loss = tf.losses.mean_squared_error(Target, Out)
Train = tf.train.GradientDescentOptimizer(lr).minimize(loss)
# Init Graph
Sess = tf.Session()
Sess.run(tf.global_variables_initializer())
# Train The Model
for i in range(Epochs):
Sess.run(Train, feed_dict = {Input: x, Target: y})
# Plot Regression
Predict = Sess.run(Out, {Input: x, Target: y})
plt.plot(x, Predict, 'b')
plt.plot(x, y, 'r.')
plt.show()
| 3.1875
| 3
|
Program_Python_code/IOT_02/request_2.py
|
skyhigh8591/Learning_Test_Program
| 0
|
12780405
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 15 13:52:25 2020
@author: skyhigh
"""
import requests
from bs4 import BeautifulSoup
url_1 = 'http://192.168.1.27:6060/login'
html = requests.get(url_1)
html.encoding='utf-8'
if html.status_code == requests.codes.ok:
print("ok")
print("//////////////////////////////")
sp = BeautifulSoup(html.text,'html.parser')
data = sp.title
print(data)
print("//////////////////////////////")
data = sp.h1
print(data)
print("//////////////////////////////")
data=sp.button
print(data)
print("//////////////////////////////")
data = sp.find('h1')
print(data)
print("//////////////////////////////")
data = sp.find_all('input',{'id':'Lname'})
print(data)
print("//////////////////////////////")
| 2.78125
| 3
|
wrappers/vcfanno/wrapper.py
|
delvinso/crg2
| 7
|
12780406
|
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, <NAME>"
__email__ = "<EMAIL>"
__license__ = "BSD"
from snakemake.shell import shell
from os import path
import shutil
import tempfile
shell.executable("bash")
luascript = snakemake.params.get("lua_script")
if luascript:
luascriptprefix = "-lua {}".format(luascript)
else:
luascriptprefix = ""
basepath = snakemake.params.get("base_path")
basepathprefix = "-base-path {}".format(basepath) if basepath else ""
conf = snakemake.params.get("conf")
conf = conf if conf else ""
threads = snakemake.threads
threadsprefix = "-p {}".format(str(threads)) if threads else ""
outcalls = snakemake.output[0]
if outcalls.endswith(".vcf.gz"):
outprefix = "| bcftools view -Oz"
elif outcalls.endswith(".bcf"):
outprefix = "| bcftools view -Ob"
else:
outprefix = ""
incalls = snakemake.input[0]
if incalls.endswith(".bcf"):
incalls = "<(bcftools view {})".format(incalls)
log = snakemake.log_fmt_shell(stdout=False, stderr=True)
shell(
"(vcfanno {threadsprefix} {luascriptprefix} "
"{basepathprefix} "
"{conf} "
"{incalls} | sed -e 's/Number=A/Number=1/g' {outprefix} > {outcalls}) {log}"
)
| 1.84375
| 2
|
meiduo_mall/meiduo_mall/apps/carts/utils.py
|
devenshxw/meiduo_project
| 0
|
12780407
|
<filename>meiduo_mall/meiduo_mall/apps/carts/utils.py<gh_stars>0
import pickle, base64
from django_redis import get_redis_connection
def merge_cart_cookie_to_redis(request, user, response):
"""
登录后合并cookie购物车数据到Redis
:param request: 本次请求对象,获取cookie中的数据
:param response: 本次响应对象,清除cookie中的数据
:param user: 登录用户信息,获取user_id
:return: response
"""
# 获取cookie中的购物车数据
cookie_cart_str = request.COOKIES.get('carts')
# cookie中没有数据就响应结果
if not cookie_cart_str:
return response
cookie_cart_dict = pickle.loads(base64.b64decode(cookie_cart_str.encode()))
new_cart_dict = {}
new_cart_selected_add = []
new_cart_selected_remove = []
# 同步cookie中购物车数据
for sku_id, cookie_dict in cookie_cart_dict.items():
new_cart_dict[sku_id] = cookie_dict['count']
if cookie_dict['selected']:
new_cart_selected_add.append(sku_id)
else:
new_cart_selected_remove.append(sku_id)
# 将new_cart_dict写入到Redis数据库
redis_conn = get_redis_connection('carts')
pl = redis_conn.pipeline()
pl.hmset('carts_%s' % user.id, new_cart_dict)
# 将勾选状态同步到Redis数据库
if new_cart_selected_add:
pl.sadd('selected_%s' % user.id, *new_cart_selected_add)
if new_cart_selected_remove:
pl.srem('selected_%s' % user.id, *new_cart_selected_remove)
pl.execute()
# 清除cookie
response.delete_cookie('carts')
return response
| 2.359375
| 2
|
novelutils/data/scrapy_settings.py
|
vtkhang/novelutils-public
| 0
|
12780408
|
<reponame>vtkhang/novelutils-public<filename>novelutils/data/scrapy_settings.py
"""Store the settings of spider for utils crawler. """
def get_settings():
"""Return the settings of spider.
Returns
-------
dict
Spider settings.
"""
return {
"AUTOTHROTTLE_ENABLED": True,
"DEFAULT_REQUEST_HEADERS": {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36",
},
"LOG_FORMAT": "%(asctime)s [%(name)s] %(levelname)s: %(message)s",
"LOG_SHORT_NAMES": True,
}
| 1.9375
| 2
|
tests/test_euclidean_distance_from_label_centroid_map.py
|
elsandal/pyclesperanto_prototype
| 64
|
12780409
|
<filename>tests/test_euclidean_distance_from_label_centroid_map.py
import pyclesperanto_prototype as cle
import numpy as np
def test_euclidean_distance_from_label_centroid_map():
labels = cle.push(np.asarray([
[1, 1, 1, 2],
[1, 1, 1, 2],
[1, 1, 1, 2],
[2, 2, 2, 2]
]))
reference = cle.push(np.asarray([
[1.4142135, 1, 1.4142135, 2.3079278],
[1, 0, 1, 1.4285713],
[1.4142135, 1, 1.4142135, 0.86896616],
[2.3079278, 1.4285713, 0.86896616, 1.2121831]
]
))
print(cle.centroids_of_background_and_labels(labels))
result = cle.euclidean_distance_from_label_centroid_map(labels)
a = cle.pull(result)
b = cle.pull(reference)
print(a)
print(b)
assert (np.allclose(a, b, 0.001))
| 2.53125
| 3
|
Thresholding.py
|
KR-16/Open-CV
| 0
|
12780410
|
import cv2 as cv
import numpy as np
image = cv.imread("boy.jpg",cv.IMREAD_COLOR) # we can even read it in grayscale image also
#image is the cv::mat object of the image
# COVERTING THE IMAGE TO GRAY SCLAE USING cvtColor method
gray_scale = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
cv.imshow("Original Image",image)
cv.imshow("Gray Scale Image",gray_scale)
# RESIZING THE IMAGE
gray = cv.resize(gray_scale,(200,200))
cv.imshow("RESIZED GRAY IMAGE",gray)
ret,thresh_1 = cv.threshold(src = gray,thresh=100, maxval= 255, type = cv.THRESH_BINARY)
ret,thresh_2 = cv.threshold(src = gray,thresh=90,maxval=255,type=cv.THRESH_BINARY_INV)
ret,thresh_3 = cv.threshold(src = gray,thresh=90,maxval=255,type=cv.THRESH_TRUNC)
ret,thresh_4 = cv.threshold(src = gray,thresh=90,maxval=255,type=cv.THRESH_TOZERO)
ret,thresh_5 = cv.threshold(src = gray,thresh=90,maxval=255,type=cv.THRESH_TOZERO_INV)
print(ret)
cv.imshow("Thresh Binary Image",thresh_1)
cv.imshow("Thresh Binary Inverted Image",thresh_2)
cv.imshow("Thresh Truncated Image",thresh_3)
cv.imshow("Thresh TOZERO Image",thresh_4)
cv.imshow("Thresh TOZERO INVERSE Image",thresh_5)
cv.waitKey(0)
| 3.578125
| 4
|
star.py
|
Millmer/Starspot-Model
| 0
|
12780411
|
import random
import numpy as np
from utils import splitPoly
import matplotlib.patches as patches
import matplotlib.path as path
from matplotlib.transforms import Bbox
import cartopy.crs as ccrs
from spot import Spot
class Star:
# Stellar Radius in RSun, inclincation in degrees
# Limb darkening grid resolution (pixel*pixel grid)
# Rotation period in days
def __init__(self, params):
self.radius = params.rad_star
self.inc = params.sinc
self.res = params.res
self.period = params.prot
self.u = params.u
self.spots = None
self.initial_band = params.high_band
self.low_band = params.low_band
self.cycle = params.stellar_cycle
self.active_region = list(self.initial_band)
self.active_region_vel = [-(params.high_band[0]-params.low_band[0])/self.cycle, -(params.high_band[1] - params.low_band[1])/self.cycle]
self.params = params # Needed for new spot generation
# Create globe structure and set up initial projections
self.globe = ccrs.Globe(semimajor_axis=self.radius, semiminor_axis=self.radius, ellipse='sphere', flattening=1e-9)
self.rotated_proj = ccrs.RotatedPole(pole_longitude=180, pole_latitude=90-self.inc, central_rotated_longitude=0, globe=self.globe)
self.geodetic_proj = ccrs.Geodetic(globe=self.globe)
self.orth_proj = ccrs.Orthographic(globe=self.globe, central_latitude = self.inc, central_longitude=0)
# Visible surface
edge = 90
self.lon1, self.lat1, self.lon2, self.lat2 = -edge, -edge, edge, edge
# Circular grid for limb darkening formula scaled to unity
x = np.linspace(-1,1,self.res)
x, y = np.meshgrid(x,x)
self.grid = np.sqrt(x**2 + y**2)
self.greater_mask = np.ma.masked_greater(self.grid,1).mask
self.grid[self.greater_mask] = np.nan
self.totalGridSquares = self.res**2 - self.greater_mask.sum()
self.grid_x, self.grid_y = (x*self.radius, y*self.radius) # Re-scale grid back to given star radius
# Unspotted Flux
self.unspottedFlux = self.limbDarken()
self.totalUnspottedFlux = self.totalFlux(self.unspottedFlux)
# Spotted Flux
self.spottedFlux = None
self.totalSpottedFlux = None
# Apply quadratic limb darkening to model
def limbDarken(self):
mu = np.sqrt(1-self.grid**2)
mu_1 = 1-mu
u1 = self.u[0]
u2 = self.u[1]
unspottedFlux = 1-u1*mu_1-u2*(mu_1**2)
return unspottedFlux
# Add spots
def addSpots(self, spots):
self.spots = spots
self.spottedFlux = self.mapSpots()
self.totalSpottedFlux = self.totalFlux(self.spottedFlux)
# Life Cycle management
def update(self, cur_phase, t):
# Update projections
cur_long = 360*((cur_phase)%1)
self.updateProjections(cur_long)
# If spots, update them
if not self.spots == None:
self.updateSpots(t)
self.spottedFlux = self.mapSpots()
self.totalSpottedFlux = self.totalFlux(self.spottedFlux)
def updateProjections(self, cur_long):
# Calculte Projections based on current rotation
self.rotated_proj = ccrs.RotatedPole(pole_longitude=cur_long-180, pole_latitude=90-self.inc, central_rotated_longitude=0, globe=self.globe)
self.orth_proj = ccrs.Orthographic(globe=self.globe, central_latitude = self.inc, central_longitude=cur_long)
def updateSpots(self, t, dt=0):
# If no spots then ignore
if not self.spots == None:
# Update active latitudes first
if dt > 0: self.updateActiveRegion(dt)
# Update spots and remove if dead
doCull = []
for spot in self.spots:
if dt > 0: spot.update(self, t, dt)
if spot.dead: doCull.append(spot)
# Remove dead spots and replace
if len(doCull) > 0:
spotsToAddBack = len(doCull)
for obj in doCull:
self.spots.remove(obj)
for i in range(spotsToAddBack):
self.spots.append(Spot.gen_spot(self.params, self, t))
def updateActiveRegion(self, dt):
self.active_region[0] += dt*self.active_region_vel[0]
self.active_region[1] += dt*self.active_region_vel[1]
# Reset when lower than lower band limit
if self.active_region[0] < self.low_band[0] or self.active_region[1] < self.low_band[1]:
self.active_region = list(self.initial_band)
# Spot masking and mapping
def maskPixels(self, path):
XY = np.dstack((self.grid_x, self.grid_y))
XY_flat = XY.reshape((-1, 2))
mask_flat = path.contains_points(XY_flat)
mask = mask_flat.reshape(self.grid_x.shape)
return mask
def mapSpots(self):
# Create new flux array
spottedFlux = self.unspottedFlux*np.ones(self.unspottedFlux.shape)
# Map Spots
for i, spot in enumerate(self.spots):
# Get polygon
spotPoly = spot.poly
# Transform spot coords from Geodetic coord system to rotated projection
spot_vs = self.rotated_proj.transform_points(self.geodetic_proj, spotPoly.vertices[:,0], spotPoly.vertices[:,1])[:,0:2]
# Split poly to avoid issues at boundary
polys = splitPoly(spot_vs, 180)
for poly in polys:
# Get vertices of spot/tissot polygon
spot_vs = poly.get_xy()
# Mask in rotated projection (use mpl.Path.clip_to_bbox function)
spot_path = patches.Path(spot_vs).clip_to_bbox(Bbox([[self.lon1,self.lat1],[self.lon2,self.lat2]]))
# If spot in visible area calculate flux change
if len(spot_path.vertices):
# Transform masked path to orth projection as this is coordinate space LD grid is in
spot_vs = self.orth_proj.transform_points(self.rotated_proj, spot_path.vertices[:,0], spot_path.vertices[:,1])[:,0:2]
spot_path = patches.Path(spot_vs)
# Find pixels contained in mask and multiply by spot brightnesss
mask = self.maskPixels(spot_path)
spottedFlux[mask] = spottedFlux[mask]*spot.brightness
return spottedFlux
# Manage transit
def transit(self, planet, time, dt):
I = []
D = []
Time = []
planetPoly = patches.CirclePolygon((0,0),1,100)
while (planet.isTransiting(time)):
# Carry on now integrating planet across surface but don't rotate star
planetFlux = self.unspottedFlux*np.ones(self.unspottedFlux.shape) if self.spottedFlux is None else self.spottedFlux*np.ones(self.spottedFlux.shape)
# Find position of planet and scale to star's radius
X, Y = planet.skyPosAtTime(time)
planet_vx = self.radius*(planetPoly.get_path().vertices[:,0]*planet.rad + X)
planet_vy = self.radius*(planetPoly.get_path().vertices[:,1]*planet.rad + Y)
planet_path = path.Path(np.column_stack((planet_vx,planet_vy)))
# Find pixles contained within planet's disk and set to 0
mask = self.maskPixels(planet_path)
planetFlux[mask] = 0
totalTransitFlux = self.totalFlux(planetFlux)
I.append(totalTransitFlux)
if self.spots is None:
D.append(self.totalUnspottedFlux - totalTransitFlux)
else:
D.append(self.totalSpottedFlux - totalTransitFlux)
Time.append(time)
time += dt
return I, D, Time, time
# Helper func to sum over grid of flux values
def totalFlux(self, flx):
totalFlux = flx[~self.greater_mask].sum()/self.totalGridSquares
return totalFlux
| 2.4375
| 2
|
gubernator/pb_glance.py
|
Noahhoetger2001/test-infra
| 3,390
|
12780412
|
<gh_stars>1000+
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A tiny, minimal protobuf2 parser that's able to extract enough information
to be useful.
"""
import cStringIO as StringIO
def parse_protobuf(data, schema=None):
"""
Do a simple parse of a protobuf2 given minimal type information.
Args:
data: a string containing the encoded protocol buffer.
schema: a dict containing information about each field number.
The keys are field numbers, and the values represent:
- str: the name of the field
- dict: schema to recursively decode an embedded message.
May contain a 'name' key to name the field.
Returns:
dict: mapping from fields to values. The fields may be strings instead of
numbers if schema named them, and the value will *always* be
a list of values observed for that key.
"""
if schema is None:
schema = {}
buf = StringIO.StringIO(data)
def read_varint():
out = 0
shift = 0
c = 0x80
while c & 0x80:
c = ord(buf.read(1))
out = out | ((c & 0x7f) << shift)
shift += 7
return out
values = {}
while buf.tell() < len(data):
key = read_varint()
wire_type = key & 0b111
field_number = key >> 3
field_name = field_number
if wire_type == 0:
value = read_varint()
elif wire_type == 1: # 64-bit
value = buf.read(8)
elif wire_type == 2: # length-delim
length = read_varint()
value = buf.read(length)
if isinstance(schema.get(field_number), basestring):
field_name = schema[field_number]
elif field_number in schema:
# yes, I'm using dynamic features of a dynamic language.
# pylint: disable=redefined-variable-type
value = parse_protobuf(value, schema[field_number])
field_name = schema[field_number].get('name', field_name)
elif wire_type == 5: # 32-bit
value = buf.read(4)
else:
raise ValueError('unhandled wire type %d' % wire_type)
values.setdefault(field_name, []).append(value)
return values
| 2.40625
| 2
|
alembic/versions/36c44957687f_add_pledge_table.py
|
minsukkahng/pokr.kr
| 76
|
12780413
|
<reponame>minsukkahng/pokr.kr
"""Add pledge table
Revision ID: <KEY>
Revises: 3cea1b2cfa
Create Date: 2013-05-07 17:12:20.111941
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '3cea1b2cfa'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('pledge',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('candidacy_id', sa.Integer(), nullable=False),
sa.Column('pledge', sa.Unicode(length=128), nullable=True),
sa.Column('description', sa.UnicodeText(), nullable=True),
sa.ForeignKeyConstraint(['candidacy_id'], ['candidacy.id'], ),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('pledge')
| 1.265625
| 1
|
DEEP LEARNING/SNAKE GAME/Code/Manual_SnakeGame.py
|
SushP07/Machine-Learning
| 2
|
12780414
|
<reponame>SushP07/Machine-Learning
import pygame, random, sys
from pygame.locals import *
def collide(x1, x2, y1, y2, w1, w2, h1, h2):
if x1+w1>x2 and x1<x2+w2 and y1+h1>y2 and y1<y2+h2:
return True
else:
return False
def die(screen, score):
f=pygame.font.SysFont('Arial', 30);t=f.render('Your score was: '+str(score), True, (0, 0, 0));screen.blit(t, (10, 270));pygame.display.update();pygame.time.wait(2000);sys.exit(0)
xs = [290, 290, 290, 290, 290];ys = [290, 270, 250, 230, 210];dirs = 0;score = 0;
applepos = (random.randint(0, 590), random.randint(0, 590));
pygame.init();
s=pygame.display.set_mode((600, 600));
pygame.display.set_caption('Snake');
appleimage = pygame.Surface((10, 10));
appleimage.fill((0, 255, 0));
img = pygame.Surface((20, 20));
img.fill((255, 0, 0));
f = pygame.font.SysFont('Arial', 20);
clock = pygame.time.Clock()
while True:
clock.tick(10)
for e in pygame.event.get():
if e.type == QUIT:
sys.exit(0)
elif e.type == KEYDOWN:
if e.key == K_UP and dirs != 0:dirs = 2
elif e.key == K_DOWN and dirs != 2:dirs = 0
elif e.key == K_LEFT and dirs != 1:dirs = 3
elif e.key == K_RIGHT and dirs != 3:dirs = 1
i = len(xs)-1
while i >= 2:
if collide(xs[0], xs[i], ys[0], ys[i], 20, 20, 20, 20):
die(s, score)
i-= 1
if collide(xs[0], applepos[0], ys[0], applepos[1], 20, 10, 20, 10):
score+=1;
xs.append(700);
ys.append(700);
applepos=(random.randint(0,590),random.randint(0,590))
print(xs,ys)
if xs[0] < 0 or xs[0] > 580 or ys[0] < 0 or ys[0] > 580:
die(s, score)
i = len(xs)-1
while i >= 1:
xs[i] = xs[i-1];ys[i] = ys[i-1];i -= 1
if dirs==0:ys[0] += 20
elif dirs==1:xs[0] += 20
elif dirs==2:ys[0] -= 20
elif dirs==3:xs[0] -= 20
s.fill((255, 255, 255))
for i in range(0, len(xs)):
s.blit(img, (xs[i], ys[i]))
s.blit(appleimage, applepos);t=f.render(str(score), True, (0, 0, 0));s.blit(t, (10, 10));pygame.display.update()
| 3.4375
| 3
|
lnbits/helpers.py
|
frennkie/lnbits
| 0
|
12780415
|
import json
import os
import shortuuid
from typing import List, NamedTuple, Optional
from .settings import LNBITS_PATH
class Extension(NamedTuple):
code: str
is_valid: bool
name: Optional[str] = None
short_description: Optional[str] = None
icon: Optional[str] = None
contributors: Optional[List[str]] = None
class ExtensionManager:
def __init__(self, *, disabled: list = []):
self._disabled = disabled
self._extension_folders: List[str] = [x[1] for x in os.walk(os.path.join(LNBITS_PATH, "extensions"))][0]
@property
def extensions(self) -> List[Extension]:
output = []
for extension in [ext for ext in self._extension_folders if ext not in self._disabled]:
try:
with open(os.path.join(LNBITS_PATH, "extensions", extension, "config.json")) as json_file:
config = json.load(json_file)
is_valid = True
except Exception:
config = {}
is_valid = False
output.append(Extension(**{**{"code": extension, "is_valid": is_valid}, **config}))
return output
class Status:
OK = 200
CREATED = 201
NO_CONTENT = 204
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
UPGRADE_REQUIRED = 426
TOO_MANY_REQUESTS = 429
INTERNAL_SERVER_ERROR = 500
def urlsafe_short_hash() -> str:
return shortuuid.uuid()
| 2.640625
| 3
|
pgdocs/command/show.py
|
VEINHORN/pgdocs
| 0
|
12780416
|
<gh_stars>0
"""
Shows database objects description
"""
import subprocess
import validator
import psqlcmd as psql
def execute(host, port, database, schema, table):
host, port, database = validator.connection_props(host, port, database)
# print("host={}, port={}, schema={}, db={}, table={}".format(
# host, port, schema, database, table))
desc = table_desc(host, port, database, schema, table)
print("[{}.{}] description: {}".format(schema, table, desc))
def table_desc(host, port, database, schema, table):
byte_str = subprocess.check_output(
psql.get_table_desc(host, port, database, schema, table))
return byte_str.decode("utf-8").split("\n")[2].strip()
| 2.5
| 2
|
open_files.py
|
husmen/DoCA_GUI
| 3
|
12780417
|
<reponame>husmen/DoCA_GUI<gh_stars>1-10
""" module for opening various types of files """
import os
import json
import time
import shutil
import subprocess
import textract
import pandas as pd
from docx import Document
#from changeOffice import Change
from pptx import Presentation
from odf.opendocument import load
from odf import text
from pyexcel_xlsx import get_data
class OpenFile():
def __init__(self, location):
self.location = location
print("# opening {}".format(location))
if (location.endswith("docx") or location.endswith("doc") ):
self.text, self.paragraphs, self.tables, self.tables2 = self.text_al(location)
if (location.endswith("pptx") or location.endswith("ppt") or location.endswith("odt") or location.endswith("odp") ):
self.text, self.paragraphs = self.text_al(location)
self.tables = None
self.tables2 = None
if (location.endswith("xlsx") or location.endswith("xls") or location.endswith("ods")):
self.tables, self.tables2 = self.text_al(location)
self.text = self.tables
return
def text_al(self, dosya_yolu):
p = []
t1 = []
t2 = []
flag = 0
if (dosya_yolu.endswith("doc")):
cwd = os.getcwd()
#libreoffice --convert-to docx 0020.doc
dir_path = os.path.dirname(os.path.realpath(dosya_yolu))
#output_file = dosya_yolu + "x"
rc = subprocess.call(['libreoffice', '--convert-to', 'docx', '--outdir', dir_path, dosya_yolu])
output_file = os.path.join(cwd,"tmp/"+os.path.basename(dosya_yolu+"x"))
for _ in range(5):
try:
shutil.move(dosya_yolu+"x", output_file)
except:
time.sleep(1)
else:
print("file moved")
time.sleep(1)
break
return self.text_al(output_file)
if (dosya_yolu.endswith("xls")):
cwd = os.getcwd()
#libreoffice --convert-to docx 0020.doc
dir_path = os.path.dirname(os.path.realpath(dosya_yolu))
#output_file = dosya_yolu + "x"
rc = subprocess.call(['libreoffice', '--convert-to', 'xlsx', '--outdir', dir_path, dosya_yolu])
output_file = os.path.join(cwd,"tmp/"+os.path.basename(dosya_yolu+"x"))
for _ in range(5):
try:
shutil.move(dosya_yolu+"x", output_file)
except:
time.sleep(1)
else:
print("file moved")
time.sleep(1)
break
return self.text_al(output_file)
if (dosya_yolu.endswith("ppt")):
cwd = os.getcwd()
#libreoffice --convert-to docx 0020.doc
dir_path = os.path.dirname(os.path.realpath(dosya_yolu))
#output_file = dosya_yolu + "x"
rc = subprocess.call(['libreoffice', '--convert-to', 'pptx', '--outdir', dir_path, dosya_yolu])
output_file = os.path.join(cwd,"tmp/"+os.path.basename(dosya_yolu+"x"))
for _ in range(5):
try:
shutil.move(dosya_yolu+"x", output_file)
except:
time.sleep(1)
else:
print("file moved")
time.sleep(1)
break
return self.text_al(output_file)
# docx uzantili
if (dosya_yolu.endswith("docx")) or (flag == 1):
doc = Document(dosya_yolu)
paragraphs = doc.paragraphs
tables = doc.tables
fullText = ""
for paragraph in paragraphs:
if paragraph != "\n":
p.append(paragraph.text)
fullText = fullText + paragraph.text + "\n"
for table in tables:
t1.append(table)
#print(table)
tmp_t = []
for row in table.rows:
tmp_r = []
for cell in row.cells:
tmp_r.append(cell.text)
#print(cell.text)
tmp_t.append(tmp_r)
t2.append(tmp_t)
#print(tmp_t)
return fullText, p, t1, t2
# odt uzantili
elif (dosya_yolu.endswith("odt")):
#text1 = textract.process(dosya_yolu)
doc = load(dosya_yolu)
paragraphs = []
txt1 = ""
for paragraph in doc.getElementsByType(text.P):
if paragraph != "\n":
paragraphs.append(paragraph)
txt1 += str(paragraph)
return txt1, paragraphs
#print text1
# pptx uzantili
elif (dosya_yolu.endswith("pptx")) or (flag == 2):
paragraphs = []
prs = Presentation(dosya_yolu)
text_runs = ""
for slide in prs.slides:
for shape in slide.shapes:
if not shape.has_text_frame:
continue
for paragraph in shape.text_frame.paragraphs:
tmp = ""
for run in paragraph.runs:
tmp += run.text
text_runs += run.text
paragraphs.append(tmp)
for p in paragraphs:
if p == "":
del p
return text_runs, paragraphs
# odp uzantili
elif (dosya_yolu.endswith("odp")):
doc = load(dosya_yolu)
paragraphs = []
txt1 = ""
for paragraph in doc.getElementsByType(text.P):
if paragraph != "\n":
paragraphs.append(paragraph)
txt1 += str(paragraph)
#print (unicode(paragraph))
return txt1, paragraphs
# xlsx, xls, ods uzantili
elif (dosya_yolu.endswith("xlsx")) or (dosya_yolu.endswith("ods")):
data = get_data(dosya_yolu)
df_ = []
#print(data)
#print(json.dumps(data))
for sheet in data:
#print(json.dumps(data[sheet]))
try:
df = pd.DataFrame(data=data[sheet][1:],columns=data[sheet][0])
except:
df = None
#df = pd.DataFrame(data[sheet])
#df = pd.DataFrame(data=data[sheet][1:][1:],index=data[sheet][1:][0],columns=data[sheet][0][1:])
df_.append(df)
#print(df)
#print(data)
#print(df_)
return data, df_
| 2.546875
| 3
|
python/dataStructuresAndAlgorithms/arraytest.py
|
serdarkuyuk/Notes
| 1
|
12780418
|
<reponame>serdarkuyuk/Notes<gh_stars>1-10
from array import *
my_array = array("i", [1, 2, 3, 4, 5])
for i in my_array:
print(i)
print(my_array[0])
my_array.append(6)
my_array.insert(2, 100)
my_array.extend([3, 2, 1])
templist = [20, 21, 22]
my_array.fromlist(templist)
my_array.remove(1)
my_array.pop()
# gives the index of given number
my_array.index(100)
# reverse the array
my_array.reverse()
# shows the memory information and number of elements
print(my_array.buffer_info())
# how many of this element occur
print(my_array.count(3))
# tempstring = my_array.tostring()
# print(tempstring)
# convert array to list
my_array.tolist()
# slice
print(my_array)
import numpy as np
twoArray = np.array([[1, 2, 3, 4], [6, 3, 5, 4], [8, 5, 3, 1], [9, 6, 4, 2]])
newtwoArray = np.insert(twoArray, 2, [[2, 2, 2, 2]], axis=0)
newtwoArray = np.append(twoArray, [[2, 2, 2, 2]], axis=0)
print(twoArray)
print(newtwoArray)
def accessElement(array, rowIndex, colIndex):
if rowIndex >= len(array) and colIndex >= len(array[0]):
print("incorrect index")
else:
return array[rowIndex][colIndex]
# print(accessElement(twoArray, 1, 2))
def traverseArray(array):
for i in range(len(array)):
for j in range(len(array[0])):
print(array[i][j])
# print(traverseArray(twoArray))
def linearSearchArray(array, element):
for i in range(len(array)):
for j in range(len(array[0])):
if (array[i][j]) == element:
return (i, j)
return "There is no number"
# print(linearSearchArray(twoArray, 40))
# delete
newtdArray = np.delete(twoArray, 0, axis=1)
print(newtdArray)
| 3.640625
| 4
|
evcouplings/utils/summarize.py
|
thomashopf/EVcouplings-1
| 1
|
12780419
|
"""
Create summary statistics / plots for runs from
evcouplings app
Authors:
<NAME>
"""
# chose backend for command-line usage
import matplotlib
matplotlib.use("Agg")
from collections import defaultdict
import filelock
import pandas as pd
import click
import matplotlib.pyplot as plt
from evcouplings.utils.system import valid_file
from evcouplings.utils.config import read_config_file, InvalidParameterError
from evcouplings.utils.pipeline import FINAL_CONFIG_SUFFIX
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
def protein_monomer(prefix, configs):
"""
Create results summary for run using
protein_monomer pipeline
# TODO
"""
MIN_PROBABILITY = 0.9
ali_table = pd.DataFrame()
prefix_to_cfgs = {}
data = defaultdict(lambda: defaultdict())
# go through all config files
for cfg_file in configs:
# check if the file exists and has contents
# since run might not yet have finished or crashed
if valid_file(cfg_file):
# job input configuration
C = read_config_file(cfg_file)
sub_prefix = C["global"]["prefix"]
domain_threshold = C["align"]["domain_threshold"]
sub_index = (domain_threshold, sub_prefix)
final_state_cfg = sub_prefix + FINAL_CONFIG_SUFFIX
if not valid_file(final_state_cfg):
continue
# read final output state of job
R = read_config_file(final_state_cfg)
data[sub_index]["identities"] = R["identities_file"]
data[sub_index]["frequencies"] = R["frequencies_file"]
data[sub_index]["minimum_column_coverage"] = C["align"]["minimum_column_coverage"]
stat_file = R["statistics_file"]
ec_file = R.get("ec_file", "")
ec_comp_file = R.get("ec_compared_longrange_file", "")
prefix_to_cfgs[(sub_prefix)] = (C, R)
# read and modify alignment statistics
if valid_file(stat_file):
# get alignment stats for current job
stat_df = pd.read_csv(stat_file)
n_eff = R["effective_sequences"]
if n_eff is not None:
stat_df.loc[0, "N_eff"] = n_eff
stat_df.loc[0, "domain_threshold"] = domain_threshold
L = stat_df.loc[0, "num_cov"]
# try to get number of significant ECs in addition
if valid_file(ec_file):
ecs = pd.read_csv(ec_file)
min_seq_dist = C["compare"]["min_sequence_distance"]
num_sig = len(ecs.query(
"abs(i-j) >= @min_seq_dist and probability >= @MIN_PROBABILITY"
))
stat_df.loc[0, "num_significant"] = num_sig
# try to get EC precision in addition
if valid_file(ec_comp_file):
ec_comp = pd.read_csv(ec_comp_file)
stat_df.loc[0, "precision"] = ec_comp.iloc[L]["precision"]
# finally, append to global table
ali_table = ali_table.append(stat_df)
# sort table by sequence search threshold
ali_table = ali_table.sort_values(by="domain_threshold")
# when saving files, have to aquire lock to make sure
# jobs don't start overwriting results
# make plots and save
fig = _protein_monomer_plot(ali_table, data)
plot_file = prefix + "_job_statistics_summary.pdf"
lock_plot = filelock.FileLock(plot_file)
with lock_plot:
fig.savefig(plot_file, bbox_inches="tight")
# save ali statistics table
table_file = prefix + "_job_statistics_summary.csv"
lock_table = filelock.FileLock(table_file)
with lock_table:
ali_table.to_csv(
table_file, index=False, float_format="%.3f"
)
return ali_table
def _protein_monomer_plot(ali_table, data):
"""
# TODO
"""
import seaborn as sns
sns.set_palette("Paired", len(ali_table), None)
FONTSIZE = 16
# set up plot and grid
fig = plt.figure(figsize=(15, 15))
gridsize = ((3, 2))
ax_cov = plt.subplot2grid(gridsize, (0, 0), colspan=1)
ax_distr = plt.subplot2grid(gridsize, (0, 1), colspan=1)
ax_gaps = plt.subplot2grid(gridsize, (1, 0), colspan=2)
ax_sig = plt.subplot2grid(gridsize, (2, 0), colspan=1)
ax_comp = plt.subplot2grid(gridsize, (2, 1), colspan=1)
# 1) Number of sequences, coverage
l_seqs = ax_cov.plot(
ali_table.domain_threshold, ali_table.N_eff / ali_table.num_cov,
"ok-", label="# Sequences"
)
ax_cov.set_xlabel("Domain inclusion threshold")
ax_cov.set_ylabel("# effective sequences / L")
ax_cov.set_title("Sequences and coverage", fontsize=FONTSIZE)
ax_cov.legend(loc="lower left")
ax_cov2 = ax_cov.twinx()
l_cov = ax_cov2.plot(
ali_table.domain_threshold, ali_table.num_cov / ali_table.seqlen,
"o-", label="Coverage", color="#2079b4"
)
ax_cov2.set_ylabel("Coverage (% of region)")
ax_cov2.legend(loc="lower right")
ax_cov2.set_ylim(0, 1)
# 2) sequence identity & coverage distributions
for (domain_threshold, subjob), subdata in sorted(data.items()):
# sequence identities to query
if valid_file(subdata["identities"]):
ids = pd.read_csv(subdata["identities"]).identity_to_query.dropna()
ax_distr.hist(
ids, histtype="step", range=(0, 1.0),
bins=100, normed=True, cumulative=True, linewidth=3,
label=str(domain_threshold)
)
ali_table.loc[ali_table.prefix == subjob, "average_identity"] = ids.mean()
# coverage distribution
if valid_file(subdata["frequencies"]):
freqs = pd.read_csv(subdata["frequencies"])
# print(freqs.head())
ax_gaps.plot(
freqs.i, 1 - freqs.loc[:, "-"], "o", linewidth=3,
label=str(domain_threshold)
)
mincov = subdata["minimum_column_coverage"]
if mincov > 1:
mincov /= 100
ax_gaps.axhline(mincov, ls="--", color="k")
ax_distr.set_xlabel("% sequence identity to query")
ax_distr.set_title("Sequence identity distribution", fontsize=FONTSIZE)
ax_distr.set_xlim(0, 1)
ax_distr.set_ylim(0, 1)
ax_distr.legend()
ax_gaps.set_title("Gap statistics", fontsize=FONTSIZE)
ax_gaps.set_xlabel("Sequence index")
ax_gaps.set_ylabel("Column coverage (1 - % gaps)")
ax_gaps.autoscale(enable=True, axis='x', tight=True)
ax_gaps.set_ylim(0, 1)
ax_gaps.legend(loc="best")
# number of significant ECs, EC precision
if "num_significant" in ali_table.columns:
ax_sig.plot(
ali_table.domain_threshold,
ali_table.num_significant / ali_table.num_cov,
"ok-"
)
ax_sig.set_title("Significant ECs", fontsize=FONTSIZE)
ax_sig.set_xlabel("Domain inclusion threshold")
ax_sig.set_ylabel("Fraction of significant ECs (% of L)")
if "precision" in ali_table.columns:
ax_comp.plot(ali_table.domain_threshold, ali_table.precision, "ok-")
ax_comp.set_title("Comparison to 3D (top L ECs)", fontsize=FONTSIZE)
ax_comp.set_xlabel("Domain inclusion threshold")
ax_comp.set_ylabel("EC precision")
ax_comp.set_ylim(0, 1)
return fig
def protein_complex(prefix, configs):
"""
Create results summary for run using
protein_complex pipeline
"""
# TODO: this is only designed to work with skewnormal threshold
MIN_PROBABILITY = 0.9
# number of inter ECs to check for precision
NUM_INTER = 5
# TODO: create segments global variable and import
FIRST_SEGMENT = "A_1"
SECOND_SEGMENT = "B_1"
ali_table = pd.DataFrame()
prefix_to_cfgs = {}
data = defaultdict(lambda: defaultdict())
# go through all config files
for cfg_file in configs:
# check if the file exists and has contents
# since run might not yet have finished or crashed
if valid_file(cfg_file):
# job input configuration
C = read_config_file(cfg_file)
sub_prefix = C["global"]["prefix"]
sub_index = (sub_prefix)
final_state_cfg = sub_prefix + FINAL_CONFIG_SUFFIX
if not valid_file(final_state_cfg):
continue
# read final output state of job
R = read_config_file(final_state_cfg)
data[sub_index]["identities"] = R["identities_file"]
data[sub_index]["frequencies"] = R["frequencies_file"]
data[sub_index]["minimum_column_coverage"] = C["concatenate"]["minimum_column_coverage"]
stat_file = R["statistics_file"]
ec_file = R.get("ec_file", "")
ec_comp_file = R.get("ec_compared_longrange_file", "")
concat_stat_file = R.get("concatentation_statistics_file", "")
first_stat_file = R.get("first_statistics_file","")
second_stat_file = R.get("second_statistics_file","")
prefix_to_cfgs[(sub_prefix)] = (C, R)
# read and modify alignment statistics
if valid_file(stat_file):
# get alignment stats for current job
stat_df = pd.read_csv(stat_file)
n_eff = R["effective_sequences"]
if n_eff is not None:
stat_df.loc[0, "N_eff"] = n_eff
L = stat_df.loc[0, "num_cov"]
# try to get concatenation statistics in addition
if valid_file(concat_stat_file):
concat_stat_df = pd.read_csv(concat_stat_file)
# get and save n sequences per monomer aln
n_seqs_1 = concat_stat_df.loc[0, "num_seqs_1"]
n_seqs_2 = concat_stat_df.loc[0, "num_seqs_2"]
stat_df.loc[0, "first_n_seqs"] = int(n_seqs_1)
stat_df.loc[0, "second_n_seqs"] = int(n_seqs_2)
# get and save median n paralogs per monomer aln
n_paralogs_1 = concat_stat_df.loc[0, "median_num_per_species_1"]
n_paralogs_2 = concat_stat_df.loc[0, "median_num_per_species_2"]
stat_df.loc[0, "median_num_per_species_1"] = n_paralogs_1
stat_df.loc[0, "median_num_per_species_2"] = n_paralogs_2
# try to get number of significant ECs in addition
if valid_file(ec_file):
ecs = pd.read_csv(ec_file)
#number of significant monomer Ecs
min_seq_dist = C["compare"]["min_sequence_distance"]
num_sig = len(ecs.query(
"abs(i-j) >= @min_seq_dist and probability >= @MIN_PROBABILITY"
))
# number of inter-protein ECs significant
num_sig_inter = len(ecs.query(
"segment_i != segment_j and probability >= @MIN_PROBABILITY"
))
stat_df.loc[0, "num_significant"] = int(num_sig)
#rank of top inter contact
top_inter_rank = ecs.query("segment_i != segment_j").index[0]
stat_df.loc[0, "top_inter_rank"] = int(top_inter_rank)
# try to get EC precision in addition
if valid_file(ec_comp_file):
ec_comp = pd.read_csv(ec_comp_file)
ec_comp_1 = ec_comp.query("segment_i == segment_j == @FIRST_SEGMENT")
ec_comp_2 = ec_comp.query("segment_i == segment_j == @SECOND_SEGMENT")
ec_comp_inter = ec_comp.query("segment_i != segment_j")
# use the monomer statistics files to figure out how many sites in each monomer
if valid_file(first_stat_file) and valid_file(second_stat_file):
stats_1 = pd.read_csv(first_stat_file)
L_1 = L = stats_1.loc[0, "num_cov"]
stats_2 = pd.read_csv(second_stat_file)
L_2 = L = stats_2.loc[0, "num_cov"]
# precision of monomer 1
stat_df.loc[0, "first_monomer_precision"] = ec_comp_1.iloc[L_1]["segmentwise_precision"]
# precicions of monomer 2
stat_df.loc[0, "second_monomer_precision"]= ec_comp_2.iloc[L_2]["segmentwise_precision"]
# precision of top 5 inter
stat_df.loc[0, "inter_precision"] = ec_comp_inter.iloc[NUM_INTER]["segmentwise_precision"]
# finally, append to global table
ali_table = ali_table.append(stat_df)
# save ali statistics table
table_file = prefix + "_job_statistics_summary.csv"
lock_table = filelock.FileLock(table_file)
with lock_table:
ali_table.to_csv(
table_file, index=False, float_format="%.3f"
)
return ali_table
PIPELINE_TO_SUMMARIZER = {
"protein_monomer": protein_monomer,
"protein_complex": protein_complex,
}
@click.command(context_settings=CONTEXT_SETTINGS)
# run settings
@click.argument('pipeline', nargs=1, required=True)
@click.argument('prefix', nargs=1, required=True)
@click.argument('configs', nargs=-1)
def app(**kwargs):
"""
Create summary statistics for evcouplings pipeline runs
"""
try:
summarizer = PIPELINE_TO_SUMMARIZER[kwargs["pipeline"]]
except KeyError:
raise InvalidParameterError(
"Not a valid pipeline, valid selections are: {}".format(
",".join(PIPELINE_TO_SUMMARIZER.keys())
)
)
summarizer(kwargs["prefix"], kwargs["configs"])
if __name__ == '__main__':
app()
| 2.375
| 2
|
exercises/en/exc_02_02_02.py
|
Jette16/spacy-course
| 2,085
|
12780420
|
<filename>exercises/en/exc_02_02_02.py
from spacy.lang.en import English
nlp = English()
doc = nlp("<NAME> is a PERSON")
# Look up the hash for the string label "PERSON"
person_hash = ____.____.____[____]
print(person_hash)
# Look up the person_hash to get the string
person_string = ____.____.____[____]
print(person_string)
| 3.25
| 3
|
aiida_siesta/calculations/tkdict.py
|
pfebrer96/aiida_siesta_plugin
| 0
|
12780421
|
"""
Module with implementation of TKDict (translated-keys-dictionary) class.
It is actually a dictionary with 'translation insensitive' keys. For
example, in the FDFDict subclass:
MD.TypeOfRun, md-type-of-run, mdTypeOfRun, mdtypeofrun
all represent the same key in the dictionary. The actual form of the
key returned by methods such as 'keys()' is the latest to be used in
a setting operation.
<NAME> and <NAME>, 2017
"""
from collections.abc import MutableMapping
class TKDict(MutableMapping):
"""
Dictionary-like class that also contains character translation and deletion data.
Stores (value, initial-key) tuples accessible by a translated key.
"""
@classmethod
def translate_key(cls, key):
""" Definition of a rule for key translation. """
raise NotImplementedError
def __init__(self, *args, **kw):
"""
Create translated-keys-dictionary from initial data.
If several input keys translate to same string, only first occurrence is saved.
"""
# _storage is internal dictionary stored as: {<translated_key>: (<value>, <initial_key>), }
self._storage = {}
inp_dict = dict(*args, **kw)
for inp_key in inp_dict:
self[inp_key] = inp_dict[inp_key]
def keys(self):
""" Return list of last key occurences. """
# _storage keys are translated
return [self.get_last_key(k) for k in self._storage]
# return(self._storage.keys())
def iterkeys(self):
"""D.iterkeys() -> an iterator over the keys of D *** UPDATE """
for key in self:
value, last_key = self._storage[key] # pylint: disable=unused-variable
yield last_key
def __setitem__(self, key, value):
""" Store a (value, initial_key) tuple under translated key. """
trans_key = self.translate_key(key)
# check if we already have a translated key in _storage
# if so, overwrite the value in tuple, but not the initial key
self._storage.__setitem__(trans_key, (value, key))
def __getitem__(self, key):
""" Translate the key, unpack value-tuple and return the value if exists or None. """
trans_key = self.translate_key(key)
try:
value, last_key = self._storage[trans_key] # pylint: disable=unused-variable
#self._storage.__setitem__(trans_key, (value, key))
return value
except KeyError:
return None
def iteritems(self):
"""D.iteritems() -> an iterator over the (key, value) items of D """
for key in self:
value, last_key = self._storage[key]
yield (last_key, value)
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples"""
return [(self._storage[key][1], self._storage[key][0]) for key in self]
def get_last_key(self, key):
"""
Translate the key, unpack value-tuple and return
the corresponding initial key if exists or None.
"""
trans_key = self.translate_key(key)
try:
value, last_key = self._storage[trans_key] # pylint: disable=unused-variable
return last_key
except KeyError:
return None
def get_filtered_items(self):
for k, v in self._storage.items():
yield k, v[0]
def __delitem__(self, key):
""" Translate the key, purge value-tuple """
self._storage.__delitem__(self.translate_key(key))
def __iter__(self):
return iter(self._storage)
def __len__(self):
return len(self._storage)
def __repr__(self):
return self._storage.__repr__()
def __str__(self):
return self._storage.__str__()
class FDFDict(TKDict): # pylint: disable=too-many-ancestors
"""
FDFDict class represents data from .fdf-file.
This class follows a boilerplate raw python3-compatible translation rule.
Behavior: drop dashes/dots/colons from key -> lowercase.
(that keeps other special characters including underscores untouched)
"""
@classmethod
def translate_key(cls, key):
to_remove = "-.:"
if not isinstance(key, str):
raise Exception("Key name error in FDFDict")
# Unicode uses a single dictionary for translation
table = {ord(char): None for char in to_remove}
return key.translate(table).lower()
| 3.0625
| 3
|
tests/test_tokenizer.py
|
zoonru/diaparser
| 38
|
12780422
|
import shutil
import os
import argparse
import unittest
import io
from tokenizer.tokenizer import Tokenizer
class TestTokenizer(unittest.TestCase):
MODEL_DIR = os.path.expanduser('~/.cache/diaparser')
def setUp(self):
self.args = {
'lang': 'it',
'verbose': True
}
def test_download_resources(self):
tokenizer = Tokenizer(self.args['lang'])
self.assertTrue(os.path.isdir(self.MODEL_DIR))
self.assertTrue(os.path.exists(os.path.join(self.MODEL_DIR, 'tokenizer', self.args['lang'])))
self.assertTrue(os.path.exists(os.path.join(self.MODEL_DIR, 'tokenizer', self.args['lang'], 'tokenize')))
def test_tokenize(self):
tokenizer = Tokenizer(self.args['lang'])
sentences = tokenizer.predict('Ha chiamato il dr. Rossi.Vuole salutarti.')
self.assertEqual(len(sentences), 2)
def test_corpus_load(self):
tokenizer = Tokenizer(self.args['lang'])
sin = io.StringIO("Un corazziere contro Scalfaro. L'attore le disse baciami o torno a riprendermelo.")
for line in tokenizer.format(tokenizer.predict(sin.read())):
if line and not line.startswith('#'):
# CoNLL-U format has 10 tsv:
assert len(line.split('\t')) == 10, line
| 2.53125
| 3
|
general-practice/Exercises solved/mixed/Exercise28.py
|
lugabrielbueno/Projeto
| 0
|
12780423
|
<gh_stars>0
#Tax Calculator - Asks the user to enter a cost and either a country or state sale tax. It then returns the tax plus the total cost with tax.
# List with all states and sale taxes
all_states = [( 'Alaska', 0),
('Alabama',4),
('Arkansas',6.5),
('Arizona',5.6),
('California',7.5),
('Colorado',2.9),
('Connecticut',6.35),
('Delaware',0),
('Florida',6),
('Georgia',4),
('Hawaii',4),
('Iowa',6),
('Idaho',6.5),
('Illinois',6.25),
('Indiana',7),
('Kansas',6.5),
('Kentucky',6),
('Louisiana',4.45),
('Massachusetts',6.25),
('Maryland',6),
('Maine',5.5),
('Michigan',6),
('Minnesota',6.875),
('Missouri',4.225),
('Mississippi',7),
('Montana',0),
('North Carolina',4.75),
('North Dakota',5),
('Nebraska',5.5),
('New Hampshire',0),
('New Jersey',6.625),
('New Mexico',5.125),
('Nevada',6.85),
('New York',4),
('Ohio',5.75),
('Oklahoma',4.5),
('Oregon',0),
('Pennsylvania',6),
('Rhode Island',7),
('South Carolina',6),
('South Dakota',4.75),
('Tennessee',7),
('Texas',6.25),
('Utah',6.1),
('Virginia',5.3),
('Vermont',6),
('Washington',6.5),
('Wisconsin',5),
('West Virginia',6),
('Wyoming',4)]
def tax_by_state():
#Receiving the inputs
while True:
state = input('State : ').title()
cost = float(input('Your price : '))
#Looking for the state
for stat in all_states:
if stat[0] == state.title():
#Founded, now calculating the tax
tax = cost*(stat[1]/100)
#Condition to no taxes
if tax == 0:
print("There's no taxes for sale in {}".format(stat[0].title()))
return round(cost,2)
#if have taxes...
else:
print("That's your price with taxes")
return round(cost + cost*(stat[1]/100),2)
print(tax_by_state())
| 3.421875
| 3
|
tests/collectors/app_store_test.py
|
ecleya/autodot
| 0
|
12780424
|
<reponame>ecleya/autodot
import os
import shutil
import tempfile
from unittest import TestCase, mock
from collectors import app_store
APPS = b'''
497799835 Xcode (9.2)
425424353 The Unarchiver (3.11.3)
409183694 Keynote (7.3.1)
408981434 iMovie (10.1.8)
'''
class TestProject(TestCase):
@mock.patch('subprocess.check_output')
def test_installed_packages(self, mock_check_output):
mock_check_output.return_value = APPS
apps = [(app_id, app_name) for app_id, app_name in app_store.apps()]
self.assertEquals(
apps,
[
('497799835', 'Xcode'),
('425424353', 'The Unarchiver'),
('409183694', 'Keynote'),
('408981434', 'iMovie'),
]
)
| 2.15625
| 2
|
bridges/east_open_cv_bridge.py
|
ChristianKitte/Textextraktion-und-Einordnung-mit-Hilfe-neuronaler-Netze
| 0
|
12780425
|
"""
This file contains source code from another GitHub project. The comments made there apply. The source code
was licensed under the MIT License. The license text and a detailed reference can be found in the license
subfolder at models/east_open_cv/license. Many thanks to the author of the code.
For reasons of clarity unneeded parts of the original code were not taken over. The original project can
be found on the https://github.com/ZER-0-NE/EAST-Detector-for-text-detection-using-OpenCV page.
For a better understanding the documentation has been supplemented in parts. Code completely or predominantly
taken from the source was marked with "External code".
"""
import time
import cv2
import numpy as np
from imutils.object_detection import non_max_suppression
import bridges_config as config
class EastOpenCvBridge:
"""A bridge class for connecting to a text detector
"""
def __init__(self):
"""The constructor
"""
self.load_model()
def load_model(self):
"""Loads the underlying model together with its pre-trained weights.
"""
try:
self.model = cv2.dnn.readNet(config.EAST_OPENCV_MODEL_PATH)
except:
print('Error in method {0} in module {1}'.format('load_model', 'east_open_cv_bridge.py'))
def scann(self, image):
"""External code (add try...except and an extension)
Examines the passed image for text regions and returns them as a collection of boxes in the
form of a NumPy array. The passed image must be a raster image.
:param image:The image to be examined.
:return:A NumPy array of predicted text areas.
"""
try:
# load the input image and grab the image dimensions
self.orig = image.copy()
(H, W) = image.shape[:2]
# set the new width and height and then determine the ratio in change
# for both the width and height, should be multiple of 32
(newW, newH) = (320, 320)
rW = W / float(newW)
rH = H / float(newH)
# resize the image and grab the new image dimensions
image = cv2.resize(image, (newW, newH))
(H, W) = image.shape[:2]
# define the two output layer names for the EAST detector model that
# we are interested -- the first is the output probabilities and the
# second can be used to derive the bounding box coordinates of text
self.layerNames = [
"feature_fusion/Conv_7/Sigmoid",
"feature_fusion/concat_3"]
# construct a blob from the image and then perform a forward pass of
# the model to obtain the two output layer sets
blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),
(123.68, 116.78, 103.94), swapRB=True, crop=False)
start = time.time()
self.model.setInput(blob)
(scores, geometry) = self.model.forward(self.layerNames)
end = time.time()
# grab the number of rows and columns from the scores volume, then
# initialize our set of bounding box rectangles and corresponding
# confidence scores
(numRows, numCols) = scores.shape[2:4]
rects = [] # stores the bounding box coordiantes for text regions
confidences = [] # stores the probability associated with each bounding box region in rects
# loop over the number of rows
for y in range(0, numRows):
# extract the scores (probabilities), followed by the geometrical
# data used to derive potential bounding box coordinates that
# surround text
scoresData = scores[0, 0, y]
xData0 = geometry[0, 0, y]
xData1 = geometry[0, 1, y]
xData2 = geometry[0, 2, y]
xData3 = geometry[0, 3, y]
anglesData = geometry[0, 4, y]
# loop over the number of columns
for x in range(0, numCols):
# if our score does not have sufficient probability, ignore it
if scoresData[x] < 0.5:
continue
# compute the offset factor as our resulting feature maps will
# be 4x smaller than the input image
(offsetX, offsetY) = (x * 4.0, y * 4.0)
# extract the rotation angle for the prediction and then
# compute the sin and cosine
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
# use the geometry volume to derive the width and height of
# the bounding box
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
# compute both the starting and ending (x, y)-coordinates for
# the text prediction bounding box
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
startX = int(endX - w)
startY = int(endY - h)
# add the bounding box coordinates and probability score to
# our respective lists
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
# apply non-maxima suppression to suppress weak, overlapping bounding boxes
boxes = non_max_suppression(np.array(rects), probs=confidences)
"""
Extension to the original code to return a usable format.
"""
newboxes = []
# loop over the bounding boxes
for (startX, startY, endX, endY) in boxes:
# scale the bounding box coordinates based on the respective ratios
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
box = []
box.append([startX, startY])
box.append([endX, startY])
box.append([endX, endY])
box.append([startX, endY])
newboxes.append(box)
return np.asarray(newboxes)
except:
print('Error in method {0} in module {1}'.format('scann', 'east_open_cv_bridge.py'))
return None
| 2.78125
| 3
|
automated_analysis.py
|
AfricasVoices/Project-Constitution-Amendment-Ke
| 0
|
12780426
|
<reponame>AfricasVoices/Project-Constitution-Amendment-Ke
import argparse
import csv
from collections import OrderedDict
import sys
from core_data_modules.analysis.mapping import participation_maps, kenya_mapper
from core_data_modules.cleaners import Codes
from core_data_modules.logging import Logger
from core_data_modules.traced_data.io import TracedDataJsonIO
from core_data_modules.util import IOUtils
from core_data_modules.analysis import AnalysisConfiguration, engagement_counts, theme_distributions, \
repeat_participations, sample_messages, traffic_analysis, analysis_utils
from configurations.code_schemes import CodeSchemes
from src.lib import PipelineConfiguration
log = Logger(__name__)
IMG_SCALE_FACTOR = 10 # Increase this to increase the resolution of the outputted PNGs
CONSENT_WITHDRAWN_KEY = "consent_withdrawn"
SENT_ON_KEY = "sent_on"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Runs automated analysis over the outputs produced by "
"`generate_outputs.py`, and optionally uploads the outputs to Drive.")
parser.add_argument("user", help="User launching this program")
parser.add_argument("pipeline_configuration_file_path", metavar="pipeline-configuration-file",
help="Path to the pipeline configuration json file")
parser.add_argument("messages_json_input_path", metavar="messages-json-input-path",
help="Path to a JSONL file to read the TracedData of the messages data from")
parser.add_argument("individuals_json_input_path", metavar="individuals-json-input-path",
help="Path to a JSONL file to read the TracedData of the messages data from")
parser.add_argument("automated_analysis_output_dir", metavar="automated-analysis-output-dir",
help="Directory to write the automated analysis outputs to")
args = parser.parse_args()
user = args.user
pipeline_configuration_file_path = args.pipeline_configuration_file_path
messages_json_input_path = args.messages_json_input_path
individuals_json_input_path = args.individuals_json_input_path
automated_analysis_output_dir = args.automated_analysis_output_dir
IOUtils.ensure_dirs_exist(automated_analysis_output_dir)
IOUtils.ensure_dirs_exist(f"{automated_analysis_output_dir}/graphs")
log.info("Loading Pipeline Configuration File...")
with open(pipeline_configuration_file_path) as f:
pipeline_configuration = PipelineConfiguration.from_configuration_file(f)
Logger.set_project_name(pipeline_configuration.pipeline_name)
log.debug(f"Pipeline name is {pipeline_configuration.pipeline_name}")
sys.setrecursionlimit(30000)
# Read the messages dataset
log.info(f"Loading the messages dataset from {messages_json_input_path}...")
with open(messages_json_input_path) as f:
messages = TracedDataJsonIO.import_jsonl_to_traced_data_iterable(f)
for i in range (len(messages)):
messages[i] = dict(messages[i].items())
log.info(f"Loaded {len(messages)} messages")
# Read the individuals dataset
log.info(f"Loading the individuals dataset from {individuals_json_input_path}...")
with open(individuals_json_input_path) as f:
individuals = TracedDataJsonIO.import_jsonl_to_traced_data_iterable(f)
for i in range (len(individuals)):
individuals[i] = dict(individuals[i].items())
log.info(f"Loaded {len(individuals)} individuals")
def coding_plans_to_analysis_configurations(coding_plans):
analysis_configurations = []
for plan in coding_plans:
ccs = plan.coding_configurations
for cc in ccs:
if not cc.include_in_theme_distribution:
continue
analysis_configurations.append(
AnalysisConfiguration(cc.analysis_file_key, plan.raw_field, cc.coded_field, cc.code_scheme)
)
return analysis_configurations
log.info("Computing engagement counts...")
with open(f"{automated_analysis_output_dir}/engagement_counts.csv", "w") as f:
engagement_counts.export_engagement_counts_csv(
messages, individuals, CONSENT_WITHDRAWN_KEY,
coding_plans_to_analysis_configurations(PipelineConfiguration.RQA_CODING_PLANS),
f
)
log.info("Computing demographic distributions...")
with open(f"{automated_analysis_output_dir}/demographic_distributions.csv", "w") as f:
theme_distributions.export_theme_distributions_csv(
individuals, CONSENT_WITHDRAWN_KEY,
coding_plans_to_analysis_configurations(PipelineConfiguration.DEMOG_CODING_PLANS),
[],
f
)
log.info("Computing theme distributions...")
with open(f"{automated_analysis_output_dir}/theme_distributions.csv", "w") as f:
theme_distributions.export_theme_distributions_csv(
individuals, CONSENT_WITHDRAWN_KEY,
coding_plans_to_analysis_configurations(PipelineConfiguration.RQA_CODING_PLANS),
coding_plans_to_analysis_configurations(PipelineConfiguration.SURVEY_CODING_PLANS),
f
)
# Export raw messages labelled with Meta impact, gratitude and about conversation programmatically known as impact/success story
log.info("Exporting success story raw messages for each episode...")
success_story_string_values = ["gratitude", "about_conversation", "impact"]
with open(f"{automated_analysis_output_dir}/impact_messages.csv", "w") as f:
sample_messages.export_sample_messages_csv(
messages, CONSENT_WITHDRAWN_KEY,
coding_plans_to_analysis_configurations(PipelineConfiguration.RQA_CODING_PLANS),
f, filter_code_ids=success_story_string_values, limit_per_code=sys.maxsize
)
if pipeline_configuration.automated_analysis.traffic_labels is not None:
log.info("Exporting traffic analysis...")
with open(f"{automated_analysis_output_dir}/traffic_analysis.csv", "w") as f:
traffic_analysis.export_traffic_analysis_csv(
messages, CONSENT_WITHDRAWN_KEY,
coding_plans_to_analysis_configurations(PipelineConfiguration.RQA_CODING_PLANS),
SENT_ON_KEY,
pipeline_configuration.automated_analysis.traffic_labels,
f
)
log.info(f"Exporting participation maps for each Kenya county...")
participation_maps.export_participation_maps(
individuals, CONSENT_WITHDRAWN_KEY,
coding_plans_to_analysis_configurations(PipelineConfiguration.RQA_CODING_PLANS),
AnalysisConfiguration("county", "location_raw", "county_coded", CodeSchemes.KENYA_COUNTY),
kenya_mapper.export_kenya_counties_map,
f"{automated_analysis_output_dir}/maps/counties/county_",
export_by_theme=True
)
log.info(f"Exporting participation maps for each Kenya constituency...")
participation_maps.export_participation_maps(
individuals, CONSENT_WITHDRAWN_KEY,
coding_plans_to_analysis_configurations(PipelineConfiguration.RQA_CODING_PLANS),
AnalysisConfiguration("constituency", "location_raw", "constituency_coded", CodeSchemes.KENYA_CONSTITUENCY),
kenya_mapper.export_kenya_constituencies_map,
f"{automated_analysis_output_dir}/maps/constituencies/constituency_",
export_by_theme=True
)
log.info("Automated analysis python script complete")
| 2.171875
| 2
|
Class/cert.py
|
Ne00n/woodKubernetes
| 2
|
12780427
|
from Class.rqlite import rqlite
import simple_acme_dns, requests, json, time, sys, os
class Cert(rqlite):
def updateCert(self,data):
print("updating",data[0])
response = self.execute(['UPDATE certs SET fullchain = ?,privkey = ?,updated = ? WHERE domain = ?',data[1],data[2],data[3],data[0]])
print(json.dumps(response, indent=4, sort_keys=True))
def buildbuildUrls(self,urls,domain,token):
response = []
for url in urls:
subdomain = ""
parts = domain.split(".")
if len(parts) > 2:
parts = parts[:len(parts) -2]
subdomain = '.'.join(parts)
#api.dns.com/mahkey/%domain%/%sub%/TXT/add/%token%
url = url.replace("domain",domain.replace(subdomain+".",""))
subdomain = "_acme-challenge." + subdomain
url = url.replace("sub",subdomain)
url = url.replace("token",token)
response.append(url)
return response
def buildUrls(self,domain,token,api):
apis = self.query(["SELECT * FROM apis WHERE name = ?",api])
if apis is False: return False
if 'values' not in apis['results'][0]: return False
apis = apis['results'][0]['values'][0]
response = {"up":[],"down":[]}
urls = apis[2].split(",")
response['up'] = self.buildbuildUrls(urls,domain,token)
urls = apis[3].split(",")
response['down'] = self.buildbuildUrls(urls,domain,token)
return response
def getCert(self,domain,email,api):
directory = "https://acme-v02.api.letsencrypt.org/directory"
#directory = "https://acme-staging-v02.api.letsencrypt.org/directory"
try:
client = simple_acme_dns.ACMEClient(domains=[domain],email=email,directory=directory,nameservers=["8.8.8.8", "1.1.1.1"],new_account=True,generate_csr=True)
except Exception as e:
print(e)
return False
for acmeDomain, token in client.request_verification_tokens():
print("adding {domain} --> {token}".format(domain=acmeDomain, token=token))
urls = self.buildUrls(domain,token,api)
if urls is False: return False
for url in urls['up']:
r = requests.get(url,allow_redirects=False)
if (r.status_code != 200): return False
print("Waiting for dns propagation")
try:
if client.check_dns_propagation(timeout=1200):
print("Requesting certificate")
client.request_certificate()
fullchain = client.certificate.decode()
privkey = client.private_key.decode()
self.updateCert([domain,fullchain,privkey,int(time.time())])
else:
print("Failed to issue certificate for " + str(client.domains))
client.deactivate_account()
return False
except Exception as e:
print(e)
return False
finally:
for url in urls['down']:
r = requests.get(url,allow_redirects=False)
if (r.status_code != 200): return False
return True
def renew(self):
status = self.status()
if status is False:
print("rqlite gone")
return False
state = status['store']['raft']['state']
if state != "Leader":
print("Not leader, aborting.")
return False
print("Getting certs")
domains = self.query(['SELECT * FROM certs'])
if domains is False:
print("rqlite gone")
return False
if 'values' not in domains['results'][0]:
print("no certs added")
return False
for row in domains['results'][0]['values']:
if row[4] == None:
print("Missing cert for",row[0])
response = self.getCert(row[0],row[1],row[3])
if response is False:
print("Failed to get cert for",row[0])
return False
else:
print("Checking cert for",row[0])
if time.time() > (row[6] + (86400 * 30)):
print("Certificate is older than 30 days")
response = self.getCert(row[0],row[1],row[3])
if response is False:
print("Failed to get cert for",row[0])
return False
| 2.859375
| 3
|
setup.py
|
actingthegroat/pyxtf
| 27
|
12780428
|
from os import path
from setuptools import setup
from tools.generate_pyi import generate_pyi
def main():
# Generate .pyi files
import pyxtf.xtf_ctypes
generate_pyi(pyxtf.xtf_ctypes)
import pyxtf.vendors.kongsberg
generate_pyi(pyxtf.vendors.kongsberg)
# read the contents of README file
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Run setup script
setup(name='pyxtf',
version='1.2',
description='eXtended Triton Format (XTF) file interface',
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/oysstu/pyxtf',
license='MIT',
setup_requires=['numpy>=1.11'],
install_requires=['numpy>=1.11', 'matplotlib>=1.5.1'],
packages=['pyxtf', 'pyxtf.vendors'],
package_data={'': ['*.pyi']},
use_2to3=False,
classifiers=[
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Topic :: Scientific/Engineering',
'Programming Language :: Python :: 3 :: Only'
])
if __name__ == '__main__':
main()
| 1.671875
| 2
|
examples/gdl/font.py
|
simoncozens/pysilfont
| 41
|
12780429
|
<gh_stars>10-100
#!/usr/bin/env python
'The main font object for GDL creation. Depends on fonttools'
__url__ = 'http://github.com/silnrsi/pysilfont'
__copyright__ = 'Copyright (c) 2012 SIL International (http://www.sil.org)'
__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
import os, re, traceback
from silfont.gdl.glyph import Glyph
from silfont.gdl.psnames import Name
from xml.etree.cElementTree import ElementTree, parse, Element
from fontTools.ttLib import TTFont
# A collection of glyphs that have a given attachment point defined
class PointClass(object) :
def __init__(self, name) :
self.name = name
self.glyphs = []
self.dias = []
def addBaseGlyph(self, g) :
self.glyphs.append(g)
def addDiaGlyph(self, g) :
self.dias.append(g)
g.isDia = True
def hasDias(self) :
if len(self.dias) and len(self.glyphs) :
return True
else :
return False
def classGlyphs(self, isDia = False) :
if isDia :
return self.dias
else :
return self.glyphs
def isNotInClass(self, g, isDia = False) :
if not g : return False
if not g.isDia : return False
if isDia :
return g not in self.dias
else :
return g not in self.dias and g not in self.glyphs
class FontClass(object) :
def __init__(self, elements = None, fname = None, lineno = None, generated = False, editable = False) :
self.elements = elements or []
self.fname = fname
self.lineno = lineno
self.generated = generated
self.editable = editable
def append(self, element) :
self.elements.append(element)
class Font(object) :
def __init__(self, fontfile) :
self.glyphs = []
self.psnames = {}
self.canons = {}
self.gdls = {}
self.anchors = {}
self.ligs = {}
self.subclasses = {}
self.points = {}
self.classes = {}
self.aliases = {}
self.rules = {}
self.posRules = {}
if fontfile :
self.font = TTFont(fontfile)
for i, n in enumerate(self.font.getGlyphOrder()) :
self.addGlyph(i, n)
else :
self.font = None
def __len__(self) :
return len(self.glyphs)
# [] syntax returns the indicated element of the glyphs array.
def __getitem__(self, y) :
try :
return self.glyphs[y]
except IndexError :
return None
def glyph(self, name) :
return self.psnames.get(name, None)
def alias(self, s) :
return self.aliases.get(s, s)
def emunits(self) :
return 0
def initGlyphs(self, nGlyphs) :
#print "Font::initGlyphs",nGlyphs
self.glyphs = [None] * nGlyphs
self.numRealGlyphs = nGlyphs # does not include pseudo-glyphs
self.psnames = {}
self.canons = {}
self.gdls = {}
self.classes = {}
def addGlyph(self, index = None, psName = None, gdlName = None, factory = Glyph) :
#print "Font::addGlyph",index,psName,gdlName
if psName in self.psnames :
return self.psnames[psName]
if index is not None and index < len(self.glyphs) and self.glyphs[index] :
g = self.glyphs[index]
return g
g = factory(psName, index) # create a new glyph of the given class
self.renameGlyph(g, psName, gdlName)
if index is None : # give it the next available index
index = len(self.glyphs)
self.glyphs.append(g)
elif index >= len(self.glyphs) :
self.glyphs.extend([None] * (len(self.glyphs) - index + 1))
self.glyphs[index] = g
return g
def renameGlyph(self, g, name, gdlName = None) :
if g.psname != name :
for n in g.parseNames() :
del self.psnames[n.psname]
del self.canons[n.canonical()]
if gdlName :
self.setGDL(g, gdlName)
else :
self.setGDL(g, g.GDLName())
for n in g.parseNames() :
if n is None : break
self.psnames[n.psname] = g
self.canons[n.canonical()] = (n, g)
def setGDL(self, glyph, name) :
if not glyph : return
n = glyph.GDLName()
if n != name and n in self.gdls : del self.gdls[n]
if name and name in self.gdls and self.gdls[name] is not glyph :
count = 1
index = -2
name = name + "_1"
while name in self.gdls :
if self.gdls[name] is glyph : break
count = count + 1
name = name[0:index] + "_" + str(count)
if count == 10 : index = -3
if count == 100 : index = -4
self.gdls[name] = glyph
glyph.setGDL(name)
def addClass(self, name, elements, fname = None, lineno = 0, generated = False, editable = False) :
if name :
self.classes[name] = FontClass(elements, fname, lineno, generated, editable)
def addGlyphClass(self, name, gid, editable = False) :
if name not in self.classes :
self.classes[name] = FontClass()
if gid not in self.classes[name].elements :
self.classes[name].append(gid)
def addRules(self, rules, index) :
self.rules[index] = rules
def addPosRules(self, rules, index) :
self.posRules[index] = rules
def classUpdated(self, name, value) :
c = []
if name in self.classes :
for gid in self.classes[name].elements :
g = self[gid]
if g : g.removeClass(name)
if value is None and name in classes :
del self.classes[name]
return
for n in value.split() :
g = self.gdls.get(n, None)
if g :
c.append(g.gid)
g.addClass(name)
if name in self.classes :
self.classes[name].elements = c
else :
self.classes[name] = FontClass(c)
# Return the list of classes that should be updated in the AP XML file.
# This does not include classes that are auto-generated or defined in the hand-crafted GDL code.
def filterAutoClasses(self, names, autoGdlFile) :
res = []
for n in names :
c = self.classes[n]
if not c.generated and (not c.fname or c.fname == autoGdlFile) : res.append(n)
return res
def loadAlias(self, fname) :
with open(fname) as f :
for l in f.readlines() :
l = l.strip()
l = re.sub(ur'#.*$', '', l).strip()
if not len(l) : continue
try :
k, v = re.split(ur'\s*[,;\s]\s*', l, 1)
except ValueError :
k = l
v = ''
self.aliases[k] = v
# TODO: move this method to GraideFont, or refactor
def loadAP(self, apFileName) :
if not os.path.exists(apFileName) : return False
etree = parse(apFileName)
self.initGlyphs(len(etree.getroot())) # guess each child is a glyph
i = 0
for e in etree.getroot().iterfind("glyph") :
g = self.addGlyph(i, e.get('PSName'))
g.readAP(e, self)
i += 1
return True
def saveAP(self, apFileName, autoGdlFile) :
root = Element('font')
root.set('upem', str(self.emunits()))
root.set('producer', 'graide 1.0')
root.text = "\n\n"
for g in self.glyphs :
if g : g.createAP(root, self, autoGdlFile)
ElementTree(root).write(apFileName, encoding="utf-8", xml_declaration=True)
def createClasses(self) :
self.subclasses = {}
for k, v in self.canons.items() :
if v[0].ext :
h = v[0].head()
o = self.canons.get(h.canonical(), None)
if o :
if v[0].ext not in self.subclasses : self.subclasses[v[0].ext] = {}
self.subclasses[v[0].ext][o[1].GDLName()] = v[1].GDLName()
# for g in self.glyphs :
# if not g : continue
# for c in g.classes :
# if c not in self.classes :
# self.classes[c] = []
# self.classes[c].append(g.gid)
def calculatePointClasses(self) :
self.points = {}
for g in self.glyphs :
if not g : continue
for apName in g.anchors.keys() :
genericName = apName[:-1] # without the M or S
if genericName not in self.points :
self.points[genericName] = PointClass(genericName)
if apName.endswith('S') :
self.points[genericName].addBaseGlyph(g)
else :
self.points[genericName].addDiaGlyph(g)
def calculateOTLookups(self) :
if self.font :
for t in ('GSUB', 'GPOS') :
if t in self.font :
self.font[t].table.LookupList.process(self)
def getPointClasses(self) :
if len(self.points) == 0 :
self.calculatePointClasses()
return self.points
def ligClasses(self) :
self.ligs = {}
for g in self.glyphs :
if not g or not g.name : continue
(h, t) = g.name.split_last()
if t :
o = self.canons.get(h.canonical(), None)
if o and o[0].ext == t.ext :
t.ext = None
t.cname = None
tn = t.canonical(noprefix = True)
if tn in self.ligs :
self.ligs[tn].append((g.GDLName(), o[0].GDL()))
else :
self.ligs[tn] = [(g.GDLName(), o[0].GDL())]
def outGDL(self, fh, args) :
munits = self.emunits()
fh.write('table(glyph) {MUnits = ' + str(munits) + '};\n')
nglyphs = 0
for g in self.glyphs :
if not g or not g.psname : continue
if g.psname == '.notdef' :
fh.write(g.GDLName() + ' = glyphid(0)')
else :
fh.write(g.GDLName() + ' = postscript("' + g.psname + '")')
outs = []
if len(g.anchors) :
for a in g.anchors.keys() :
v = g.anchors[a]
outs.append(a + "=point(" + str(int(v[0])) + "m, " + str(int(v[1])) + "m)")
for (p, v) in g.gdl_properties.items() :
outs.append("%s=%s" % (p, v))
if len(outs) : fh.write(" {" + "; ".join(outs) + "}")
fh.write(";\n")
nglyphs += 1
fh.write("\n")
fh.write("\n/* Point Classes */\n")
for p in sorted(self.points.values(), key=lambda x: x.name) :
if not p.hasDias() : continue
n = p.name + "Dia"
self.outclass(fh, "c" + n, p.classGlyphs(True))
self.outclass(fh, "cTakes" + n, p.classGlyphs(False))
self.outclass(fh, 'cn' + n, filter(lambda x : p.isNotInClass(x, True), self.glyphs))
self.outclass(fh, 'cnTakes' + n, filter(lambda x : p.isNotInClass(x, False), self.glyphs))
fh.write("\n/* Classes */\n")
for c in sorted(self.classes.keys()) : # c = class name, l = class object
if c not in self.subclasses and not self.classes[c].generated : # don't output the class to the AP file if it was autogenerated
self.outclass(fh, c, self.classes[c].elements)
for p in self.subclasses.keys() :
ins = []
outs = []
for k, v in self.subclasses[p].items() :
ins.append(k)
outs.append(v)
n = p.replace('.', '_')
self.outclass(fh, 'cno_' + n, ins)
self.outclass(fh, 'c' + n, outs)
fh.write("/* Ligature Classes */\n")
for k in sorted(self.ligs.keys()) :
self.outclass(fh, "clig" + k, map(lambda x: self.gdls[x[0]], self.ligs[k]))
self.outclass(fh, "cligno_" + k, map(lambda x: self.gdls[x[1]], self.ligs[k]))
fh.write("\nendtable;\n")
fh.write("/* Substitution Rules */\n")
for k, v in sorted(self.rules.items(), key=lambda x:map(int,x[0].split('_'))) :
fh.write('\n// lookup ' + k + '\n')
fh.write('// ' + "\n// ".join(v) + "\n")
fh.write("\n/* Positioning Rules */\n")
for k, v in sorted(self.posRules.items(), key=lambda x:map(int,x[0].split('_'))) :
fh.write('\n// lookup ' + k + '\n')
fh.write('// ' + "\n// ".join(v) + "\n")
fh.write("\n\n#define MAXGLYPH %d\n\n" % (nglyphs - 1))
if args.include :
fh.write("#include \"%s\"\n" % args.include)
def outPosRules(self, fh, num) :
fh.write("""
#ifndef opt2
#define opt(x) [x]?
#define opt2(x) [opt(x) x]?
#define opt3(x) [opt2(x) x]?
#define opt4(x) [opt3(x) x]?
#endif
#define posrule(x) c##x##Dia {attach{to=@1; at=x##S; with=x##M}} / cTakes##x##Dia opt4(cnTakes##x##Dia) _;
table(positioning);
pass(%d);
""" % num)
for p in self.points.values() :
if p.hasDias() :
fh.write("posrule(%s);\n" % p.name)
fh.write("endpass;\nendtable;\n")
def outclass(self, fh, name, glyphs) :
fh.write(name + " = (")
count = 1
sep = ""
for g in glyphs :
if not g : continue
if isinstance(g, basestring) :
fh.write(sep + g)
else :
if g.GDLName() is None :
print "Can't output " + str(g.gid) + " to class " + name
else :
fh.write(sep + g.GDLName())
if count % 8 == 0 :
sep = ',\n '
else :
sep = ', '
count += 1
fh.write(');\n\n')
| 2.484375
| 2
|
internship-qualification/mercari-summer-internship-2017/tests.py
|
mikoim/funstuff
| 0
|
12780430
|
<reponame>mikoim/funstuff<filename>internship-qualification/mercari-summer-internship-2017/tests.py<gh_stars>0
import os
import uuid
from unittest import TestCase
import grpc
import grpc._channel
from google.protobuf import json_format as _json_format
import api_pb2
import api_pb2_grpc
def random_id() -> str:
return str(uuid.uuid4())
def sample(item_id='', name='', title='', description='', price=0, pv=0, status=False) -> dict:
return {
'id': item_id,
'name': name,
'title': title,
'description': description,
'price': price,
'pv': pv,
'status': status,
}
class APITest(TestCase):
@classmethod
def setUpClass(cls):
host = 'localhost:3000'
if os.environ.get('API_URL'):
host = os.environ.get('API_URL')
channel = grpc.insecure_channel(host)
cls._api = api_pb2_grpc.APIStub(channel)
@classmethod
def tearDownClass(cls):
pass # TODO: How to close the connection?
def test_AddItem(self):
item_id = random_id()
input_data = sample(item_id)
output_data = _json_format.MessageToDict(self._api.AddItem(api_pb2.Item(**input_data)), True)
self.assertDictEqual({'item': input_data}, output_data)
def test_AddItem_without_ID(self):
input_data = sample()
output_data = _json_format.MessageToDict(self._api.AddItem(api_pb2.Item(**input_data)), True)
self.assertNotEqual(output_data['item']['id'], '')
def test_GetItem(self):
item_id = random_id()
input_data = sample(item_id)
self._api.AddItem(api_pb2.Item(**input_data))
for n in range(2):
output_data = _json_format.MessageToDict(self._api.GetItem(api_pb2.GetItemRequest(id=item_id)), True)
input_data['pv'] += 1
self.assertDictEqual(input_data, output_data)
def test_UpdateItem(self):
item_id = random_id()
input_data = sample(item_id)
update_data = sample(item_id, 'apple', 'banana', 'cherry', 1, 2, True)
self._api.AddItem(api_pb2.Item(**input_data))
output_data = _json_format.MessageToDict(
self._api.UpdateItem(api_pb2.UpdateItemRequest(item=api_pb2.Item(**update_data))), True)
self.assertDictEqual({'item': update_data}, output_data)
output_data = _json_format.MessageToDict(self._api.GetItem(api_pb2.GetItemRequest(id=item_id)), True)
update_data['pv'] += 1
self.assertDictEqual(update_data, output_data)
def test_DeleteItem(self):
item_id = random_id()
input_data = sample(item_id)
self._api.AddItem(api_pb2.Item(**input_data))
self._api.DeleteItem(api_pb2.DeleteItemRequest(id=item_id))
with self.assertRaises(grpc._channel._Rendezvous) as e:
self._api.GetItem(api_pb2.GetItemRequest(id=item_id))
self.assertEqual(e.exception._state.code, grpc.StatusCode.NOT_FOUND)
def test_ListItem(self): # TODO: implement successful case
with self.assertRaises(grpc._channel._Rendezvous) as e:
self._api.ListItem(api_pb2.ListItemRequest(page=-1, limit=1))
self.assertEqual(e.exception._state.code, grpc.StatusCode.INVALID_ARGUMENT)
| 2.34375
| 2
|
src/pkg/caendr/caendr/models/datastore/user_token.py
|
AndersenLab/CAENDR
| 3
|
12780431
|
from caendr.models.datastore import Entity
class UserToken(Entity):
kind = 'user_token'
def __init__(self, *args, **kwargs):
super(UserToken, self).__init__(*args, **kwargs)
self.set_properties(**kwargs)
def set_properties(self, **kwargs):
''' Sets allowed properties for the UserToken instance '''
if 'username' in kwargs:
self.username = kwargs.get('username')
if 'revoked' in kwargs:
self.revoked = kwargs.get('revoked')
def save(self, *args, **kwargs):
''' Saves the UserToken entity to the datastore'''
super(UserToken, self).save(*args, **kwargs)
def revoke(self, *args, **kwargs):
''' Sets the UserToken revoked to True and saves'''
self.set_properties(revoked=True)
self.save()
| 2.515625
| 3
|
tests/lib/bes/git/test_git.py
|
reconstruir/bes
| 0
|
12780432
|
<reponame>reconstruir/bes
#!/usr/bin/env python
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
#
import os.path as path, os, unittest
from bes.testing.unit_test import unit_test
from bes.fs.file_util import file_util
from bes.fs.temp_file import temp_file
from bes.archive.archiver import archiver
from bes.git.git_unit_test import git_temp_home_func
from bes.system.env_override import env_override_temp_home_func
from bes.system.host import host
from bes.git.git import git
from bes.git.git_status import git_status
class test_git(unit_test):
def _create_tmp_repo(self, *args):
# make the temp dir predictable on macos
if host.is_macos():
d = '/private/tmp'
else:
d = None
tmp_dir = self.make_temp_dir(dir = d)
git.init(tmp_dir, *args)
return tmp_dir
def _create_tmp_files(self, tmp_repo):
foo = path.join(tmp_repo, 'foo.txt')
bar = path.join(tmp_repo, 'bar.txt')
file_util.save(foo, content = 'foo.txt\n')
file_util.save(bar, content = 'bar.txt\n')
return [ 'bar.txt', 'foo.txt' ]
@git_temp_home_func()
def test_add(self):
tmp_repo = self._create_tmp_repo()
new_files = self._create_tmp_files(tmp_repo)
git.add(tmp_repo, new_files)
expected_status = [ git_status(git_status.ADDED, f) for f in new_files ]
actual_status = git.status(tmp_repo, '.')
self.assertEqual( expected_status, actual_status )
@git_temp_home_func()
def test_commit(self):
tmp_repo = self._create_tmp_repo()
new_files = self._create_tmp_files(tmp_repo)
git.add(tmp_repo, new_files)
git.commit(tmp_repo, 'nomsg\n', '.')
@git_temp_home_func()
def test_clone(self):
tmp_repo = self._create_tmp_repo()
new_files = self._create_tmp_files(tmp_repo)
git.add(tmp_repo, new_files)
git.commit(tmp_repo, 'nomsg\n', '.')
cloned_tmp_repo = self.make_temp_dir()
git.clone(tmp_repo, cloned_tmp_repo)
expected_cloned_files = [ path.join(cloned_tmp_repo, path.basename(f)) for f in new_files ]
for f in expected_cloned_files:
self.assertTrue( path.exists(f) )
@git_temp_home_func()
def test_clone_or_pull(self):
tmp_repo = self._create_tmp_repo()
new_files = self._create_tmp_files(tmp_repo)
git.add(tmp_repo, new_files)
git.commit(tmp_repo, 'nomsg\n', '.')
cloned_tmp_repo = self.make_temp_dir()
git.clone(tmp_repo, cloned_tmp_repo)
expected_cloned_files = [ path.join(cloned_tmp_repo, path.basename(f)) for f in new_files ]
for f in expected_cloned_files:
self.assertTrue( path.exists(f) )
@git_temp_home_func()
def test_tag(self):
tmp_repo = self._create_tmp_repo()
new_files = self._create_tmp_files(tmp_repo)
git.add(tmp_repo, new_files)
git.commit(tmp_repo, 'nomsg\n', '.')
git.tag(tmp_repo, '1.0.0')
self.assertEqual( [ '1.0.0' ], git.list_local_tags(tmp_repo) )
git.tag(tmp_repo, '1.0.1')
self.assertEqual( [ '1.0.0', '1.0.1' ], git.list_local_tags(tmp_repo) )
git.tag(tmp_repo, '1.0.9')
git.tag(tmp_repo, '1.0.10')
self.assertEqual( [ '1.0.0', '1.0.1', '1.0.9', '1.0.10' ], git.list_local_tags(tmp_repo) )
self.assertEqual( '1.0.10', git.greatest_local_tag(tmp_repo) )
self.assertEqual( ['1.0.0', '1.0.1', '1.0.10', '1.0.9'], git.list_local_tags(tmp_repo, lexical = True) )
self.assertEqual( [ '1.0.10', '1.0.9', '1.0.1', '1.0.0' ], git.list_local_tags(tmp_repo, reverse = True) )
@git_temp_home_func()
def test_delete_local_tag(self):
tmp_repo = self._create_tmp_repo()
new_files = self._create_tmp_files(tmp_repo)
git.add(tmp_repo, new_files)
git.commit(tmp_repo, 'nomsg\n', '.')
git.tag(tmp_repo, '1.0.0')
git.tag(tmp_repo, '1.0.1')
git.tag(tmp_repo, '1.0.9')
git.tag(tmp_repo, '1.0.10')
self.assertEqual( [ '1.0.0', '1.0.1', '1.0.9', '1.0.10' ], git.list_local_tags(tmp_repo) )
git.delete_local_tag(tmp_repo, '1.0.9')
self.assertEqual( [ '1.0.0', '1.0.1', '1.0.10' ], git.list_local_tags(tmp_repo) )
git.delete_local_tag(tmp_repo, '1.0.0')
self.assertEqual( [ '1.0.1', '1.0.10' ], git.list_local_tags(tmp_repo) )
git.delete_local_tag(tmp_repo, '1.0.10')
self.assertEqual( [ '1.0.1' ], git.list_local_tags(tmp_repo) )
git.delete_local_tag(tmp_repo, '1.0.1')
self.assertEqual( [], git.list_local_tags(tmp_repo) )
@git_temp_home_func()
def test_tag_allow_downgrade_error(self):
tmp_repo = self._create_tmp_repo()
new_files = self._create_tmp_files(tmp_repo)
git.add(tmp_repo, new_files)
git.commit(tmp_repo, 'nomsg\n', '.')
git.tag(tmp_repo, '1.0.100')
self.assertEqual( '1.0.100', git.greatest_local_tag(tmp_repo) )
with self.assertRaises(ValueError) as ctx:
git.tag(tmp_repo, '1.0.99')
@git_temp_home_func()
def test_tag_allow_downgrade(self):
tmp_repo = self._create_tmp_repo()
new_files = self._create_tmp_files(tmp_repo)
git.add(tmp_repo, new_files)
git.commit(tmp_repo, 'nomsg\n', '.')
git.tag(tmp_repo, '1.0.100')
self.assertEqual( '1.0.100', git.greatest_local_tag(tmp_repo) )
git.tag(tmp_repo, '1.0.99', allow_downgrade = True)
self.assertEqual( '1.0.100', git.greatest_local_tag(tmp_repo) )
self.assertEqual( [ '1.0.99', '1.0.100' ], git.list_local_tags(tmp_repo) )
@git_temp_home_func()
def test_read_gitignore(self):
tmp_repo = self._create_tmp_repo()
new_files = self._create_tmp_files(tmp_repo)
git.add(tmp_repo, new_files)
git.commit(tmp_repo, 'nomsg\n', '.')
self.assertEqual( None, git.read_gitignore(tmp_repo) )
file_util.save(path.join(tmp_repo, '.gitignore'), content = 'foo.txt\nbar.txt\nBUILD\n*~\n')
git.add(tmp_repo, '.gitignore')
git.commit(tmp_repo, 'add .gitignore\n', '.')
self.assertEqual( [
'foo.txt',
'bar.txt',
'BUILD',
'*~',
], git.read_gitignore(tmp_repo) )
@git_temp_home_func()
def test_archive_local_repo(self):
tmp_repo = self._create_tmp_repo()
new_files = self._create_tmp_files(tmp_repo)
git.add(tmp_repo, new_files)
git.commit(tmp_repo, 'nomsg\n', '.')
tmp_archive = self.make_temp_file()
git.archive(tmp_repo, 'master', 'foo', tmp_archive)
self.assertEqual( [
'foo-master/',
'foo-master/bar.txt',
'foo-master/foo.txt',
], archiver.members(tmp_archive) )
@git_temp_home_func()
def test_archive_local_repo_untracked(self):
tmp_repo = self._create_tmp_repo()
new_files = self._create_tmp_files(tmp_repo)
git.add(tmp_repo, new_files)
git.commit(tmp_repo, 'nomsg\n', '.')
file_util.save(path.join(tmp_repo, 'kiwi.txt'), content = 'this is kiwi.txt\n')
tmp_archive = self.make_temp_file()
git.archive(tmp_repo, 'master', 'foo', tmp_archive, untracked = True)
self.assertEqual( [
'foo-master/',
'foo-master/bar.txt',
'foo-master/foo.txt',
'foo-master/kiwi.txt',
], archiver.members(tmp_archive) )
@git_temp_home_func()
def test_archive_local_repo_untracked_gitignore(self):
tmp_repo = self._create_tmp_repo()
new_files = self._create_tmp_files(tmp_repo)
git.add(tmp_repo, new_files)
git.commit(tmp_repo, 'nomsg\n', '.')
file_util.save(path.join(tmp_repo, 'kiwi.txt'), content = 'this is kiwi.txt\n')
file_util.save(path.join(tmp_repo, 'ignored.txt'), content = 'this is ignored.txt\n')
file_util.save(path.join(tmp_repo, '.gitignore'), content = 'ignored.txt\n')
tmp_archive = self.make_temp_file()
git.archive(tmp_repo, 'master', 'foo', tmp_archive, untracked = True)
self.assertEqual( [
'foo-master/',
'foo-master/.gitignore',
'foo-master/bar.txt',
'foo-master/foo.txt',
'foo-master/kiwi.txt',
], archiver.members(tmp_archive) )
@env_override_temp_home_func()
def test_config(self):
self.assertEqual( None, git.config_get_value('user.name') )
self.assertEqual( None, git.config_get_value('user.email') )
git.config_set_value('user.name', 'foo bar')
self.assertEqual( 'foo bar', git.config_get_value('user.name') )
git.config_set_value('user.email', '<EMAIL>')
self.assertEqual( '<EMAIL>', git.config_get_value('user.email') )
self.assertEqual( ( 'foo bar', '<EMAIL>' ), git.config_get_identity() )
git.config_set_identity('green kiwi', '<EMAIL>')
self.assertEqual( ( '<NAME>', '<EMAIL>' ), git.config_get_identity() )
git.config_unset_value('user.email')
self.assertEqual( ( '<NAME>', None ), git.config_get_identity() )
git.config_unset_value('user.name')
self.assertEqual( ( None, None ), git.config_get_identity() )
@git_temp_home_func()
def test_has_changes(self):
tmp_repo = self._create_tmp_repo()
self.assertFalse( git.has_changes(tmp_repo) )
new_files = self._create_tmp_files(tmp_repo)
self.assertFalse( git.has_changes(tmp_repo) )
git.add(tmp_repo, new_files)
self.assertTrue( git.has_changes(tmp_repo) )
git.commit(tmp_repo, 'nomsg\n', '.')
self.assertFalse( git.has_changes(tmp_repo) )
@git_temp_home_func()
def test_has_changes(self):
tmp_repo = self._create_tmp_repo()
self.assertFalse( git.has_changes(tmp_repo) )
new_files = self._create_tmp_files(tmp_repo)
self.assertFalse( git.has_changes(tmp_repo) )
git.add(tmp_repo, new_files)
self.assertTrue( git.has_changes(tmp_repo) )
git.commit(tmp_repo, 'nomsg\n', '.')
self.assertFalse( git.has_changes(tmp_repo) )
@git_temp_home_func()
def test_has_determine_where(self):
self.assertEqual( 'both', git.determine_where(True, True) )
self.assertEqual( 'local', git.determine_where(True, False) )
self.assertEqual( 'remote', git.determine_where(False, True) )
self.assertEqual( 'both', git.determine_where(None, None) )
@git_temp_home_func()
def test_is_long_hash(self):
self.assertTrue( git.is_long_hash('cd138635e1a94a6f2da6acbce3e2f2d584121d28') )
self.assertFalse( git.is_long_hash('zd138635e1a94a6f2da6acbce3e2f2d584121d28') )
self.assertFalse( git.is_long_hash('cd13863') )
@git_temp_home_func()
def test_is_short_hash(self):
self.assertTrue( git.is_short_hash('cd13863') )
self.assertFalse( git.is_short_hash('cd138635e1a94a6f2da6acbce3e2f2d584121d28') )
self.assertFalse( git.is_short_hash('zd13863') )
@git_temp_home_func()
def test_is_repo_true(self):
tmp_repo = self._create_tmp_repo()
tmp_bare_repo = self._create_tmp_repo('--bare')
self.assertTrue( git.is_repo(tmp_repo) )
self.assertFalse( git.is_bare_repo(tmp_repo) )
@git_temp_home_func()
def test_is_repo_false(self):
tmp_repo = self.make_temp_dir()
self.assertFalse( git.is_repo(tmp_repo) )
@git_temp_home_func()
def test_is_bare_repo_true(self):
tmp_repo = self._create_tmp_repo()
tmp_bare_repo = self._create_tmp_repo('--bare')
self.assertFalse( git.is_bare_repo(tmp_repo) )
self.assertTrue( git.is_bare_repo(tmp_bare_repo) )
@git_temp_home_func()
def test_is_bare_repo_false(self):
tmp_bare_repo = self.make_temp_dir()
self.assertFalse( git.is_bare_repo(tmp_bare_repo) )
@git_temp_home_func()
def test_find_root_dir(self):
tmp_repo = self._create_tmp_repo()
self.assertEqual( tmp_repo, git.find_root_dir(start_dir = tmp_repo) )
d = path.join(tmp_repo, 'foo', 'bar', 'baz')
file_util.mkdir(d)
self.assertEqual( tmp_repo, git.find_root_dir(start_dir = d) )
self.assertEqual( None, git.find_root_dir(self.make_temp_dir()) )
if __name__ == '__main__':
unit_test.main()
| 2.078125
| 2
|
returns/ejemplo_1_return_un_valor.py
|
Invarato/Jarroba
| 2
|
12780433
|
<filename>returns/ejemplo_1_return_un_valor.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def dame_un_texto():
return "Texto"
def dame_un_numero():
return 123
def dame_un_booleano():
return True
def dame_un_listado():
return ["A", "B", "C"]
def dame_un_diccionario():
return {
"casa": "house",
"tarta": "cake"
}
def dame_un_none():
return None
if __name__ == "__main__":
valor = dame_un_texto()
print(valor)
print(dame_un_numero())
print(dame_un_booleano())
print(dame_un_listado())
print(dame_un_diccionario())
print(dame_un_none())
| 3.125
| 3
|
tests/shinytest/commands/test_commands.py
|
shinymud/ShinyMUD
| 35
|
12780434
|
from shinytest import ShinyTestCase
# Test all of the general commands!
class TestGeneralCommands(ShinyTestCase):
def test_command_register(self):
from shinymud.models.area import Area
from shinymud.data import config
from shinymud.models.player import Player
from shinymud.commands import CommandRegister
cmds = CommandRegister()
self.assertEqual(cmds['supercalifragilisticexpieladocious'], None,
"Command Register is returning things it doesn't have!")
cmds.register((lambda : 3), ['bob', 'sam'])
self.assertEqual(cmds['bob'](), 3,
"Registered command 'bob' returned wrong value.")
self.assertEqual(cmds['sam'](), 3,
"Registered command 'sam' returned wrong value.")
self.assertEqual(cmds['bob'], cmds['sam'],
"Registered aliases 'bob' and 'sam' did not return same function.")
def test_chat_command(self):
from shinymud.models.area import Area
from shinymud.data import config
from shinymud.models.player import Player
from shinymud.commands.commands import Chat
bob = Player(('bob', 'bar'))
alice = Player(('alice', 'bar'))
sam = Player(('sam', 'bar'))
bob.mode = None
bob.playerize({'name':'bob', 'password':'<PASSWORD>'})
bob.outq = []
sam.mode = None
sam.playerize({'name':'sam', 'password':'<PASSWORD>'})
sam.outq = []
self.world.player_add(bob)
self.world.player_add(sam)
self.world.player_add(alice)
Chat(bob, 'lol, hey guys!', 'chat').run()
chat = config.chat_color + 'Bob chats, "lol, hey guys!"' + config.clear_fcolor
self.assertTrue(chat in sam.outq)
self.assertTrue(chat in bob.outq)
self.assertFalse(chat in alice.outq)
sam.channels['chat'] = False
print sam.channels
print bob.channels
sam.outq = []
bob.outq = []
alice.outq = []
Chat(bob, 'lol, hey guys!', 'chat').run()
print sam.channels
print sam.outq
print bob.channels
print bob.outq
self.assertFalse(chat in sam.outq)
self.assertTrue(chat in bob.outq)
self.assertFalse(chat in alice.outq)
def test_give_command(self):
from shinymud.models.area import Area
from shinymud.data import config
from shinymud.models.player import Player
from shinymud.commands.commands import Give
area = Area.create({'name':'blarg'})
room = area.new_room()
bob = Player(('bob', 'bar'))
bob.mode = None
bob.playerize({'name':'bob', 'password':'<PASSWORD>'})
alice = Player(('alice', 'bar'))
alice.mode = None
alice.playerize({'name':'alice', 'password':'<PASSWORD>'})
self.world.player_add(bob)
self.world.player_add(alice)
room.add_char(bob)
room.add_char(alice)
alice.location = room
bob.location = room
proto_npc = area.new_npc()
npc = proto_npc.load()
room.add_char(npc)
item = area.new_item()
item.build_set_keywords('bauble', bob)
item.build_set_name('a bauble', bob)
bob.item_add(item.load())
self.assertEqual(len(bob.inventory), 1)
Give(bob, 'bauble to alice', 'give').run()
self.assertEqual(len(bob.inventory), 0)
self.assertEqual(len(alice.inventory), 1)
to_alice = 'Bob gives you a bauble.'
self.assertTrue(to_alice in alice.outq)
to_bob = 'You give a bauble to Alice.'
self.assertTrue(to_bob in bob.outq)
Give(alice, 'bauble to shiny', 'give').run()
self.assertEqual(len(alice.inventory), 0)
self.assertEqual(len(npc.inventory), 1)
to_alice = 'You give a bauble to %s.' % npc.name
alice.world.log.debug(alice.outq)
self.assertTrue(to_alice in alice.outq)
to_shiny = 'Alice gives you a bauble.'
self.assertTrue(to_shiny in npc.actionq)
#Test Money
bob.currency = 100
com = config.CURRENCY + ' to alice'
#Test give one currency unit
self.assertEqual(alice.currency, 0)
Give(bob, com, 'give').run()
self.assertEqual(bob.currency, 99)
self.assertEqual(alice.currency, 1)
#test give multiple currencies
com = '99' + config.CURRENCY + ' to alice'
Give(bob, com, 'give').run()
self.assertEqual(bob.currency, 0)
self.assertEqual(alice.currency, 100)
#test give more than bob has
com = '1000' + config.CURRENCY + ' to alice'
Give(bob, com, 'give').run()
self.assertEqual(bob.currency, 0)
self.assertEqual(alice.currency, 100)
def test_set_command(self):
from shinymud.models.area import Area
from shinymud.data import config
from shinymud.models.player import Player
from shinymud.commands.commands import Set
bob = Player(('bob', 'bar'))
bob.mode = None
bob.playerize({'name':'bob', 'password':'<PASSWORD>'})
# Test setting e-mail
Set(bob, 'email <EMAIL>', 'set').run()
self.assertEqual('<EMAIL>', bob.email)
# Test setting title
Set(bob, 'title is the best EVAR', 'set').run()
self.assertEqual('is the best EVAR', bob.title)
# Try to set goto_appear and goto_disappear (both should fail
# since this player shouldn't have permissions)
Set(bob, 'goto_appear Bob pops in from nowhere.', 'set').run()
eresult = 'You don\'t have the permissions to set that.'
self.assertTrue(eresult in bob.outq)
bob.outq = []
Set(bob, 'goto_disappear foo', 'set').run()
self.assertTrue(eresult in bob.outq)
bob.permissions = bob.permissions | config.BUILDER
# Try to set goto_appear and goto_disappear (both should now
# succeed now that the player has adequate permissions)
Set(bob, 'goto_appear Bob pops in from nowhere.', 'set').run()
self.assertEqual('Bob pops in from nowhere.', bob.goto_appear)
bob.outq = []
Set(bob, 'goto_disappear foo', 'set').run()
self.assertEqual('foo', bob.goto_disappear)
def test_goto_command(self):
from shinymud.models.area import Area
from shinymud.data import config
from shinymud.models.player import Player
from shinymud.commands.commands import Goto
blarg_area = Area.create({'name':'blarg'})
foo_area = Area.create({'name':'foo'})
blarg_room = blarg_area.new_room()
foo_room = foo_area.new_room()
bob = Player(('bob', 'bar'))
bob.mode = None
bob.playerize({'name':'bob', 'password':'<PASSWORD>'})
self.world.player_add(bob)
bob.permissions = bob.permissions | config.BUILDER
generic_fail = 'Type "help goto" for help with this command.'
# We should fail if we only specify a room number when we aren't in
# an area
Goto(bob, '%s' % foo_room.id, 'goto').run()
self.assertEqual(bob.location, None)
bob.world.log.debug(bob.outq)
self.assertTrue(generic_fail in bob.outq)
# We should fail if we try to go to a room in an area that doesn't
# exist
message = 'Area "food" doesn\'t exist.'
Goto(bob, '1 food', 'goto').run()
self.assertEqual(bob.location, None)
bob.world.log.debug(bob.outq)
self.assertTrue(message in bob.outq)
# We should fail if we try to go to a room that doesn't exist (in an
# area that does)
message = 'Room "4005" doesn\'t exist in area blarg.'
Goto(bob, '4005 blarg', 'goto').run()
self.assertEqual(bob.location, None)
bob.world.log.debug(bob.outq)
self.assertTrue(message in bob.outq)
# We should succeed in going to a room and area that exists
Goto(bob, '%s %s' % (foo_room.id, foo_room.area.name), 'goto').run()
self.assertEqual(bob.location, foo_room)
Goto(bob, '%s %s' % (blarg_room.id, blarg_room.area.name), 'goto').run()
self.assertEqual(bob.location, blarg_room)
blarg_r2 = blarg_area.new_room()
Goto(bob, '%s' % (blarg_r2.id), 'goto').run()
self.assertEqual(bob.location, blarg_r2)
# We should get a help message if there is only white space given
bob.outq = []
Goto(bob, ' ', 'goto').run()
fail = 'Type "help goto" for help with this command.'
self.assertTrue(fail in bob.outq)
| 2.25
| 2
|
vang/misc/wc.py
|
bjuvensjo/scripts
| 6
|
12780435
|
#!/usr/bin/env python3
import argparse
from os import walk
from pprint import pprint
from re import fullmatch
from sys import argv
def is_excluded(file, excluded):
return any([fullmatch(ex, file) for ex in excluded])
def is_included(file, included):
return any([fullmatch(ex, file) for ex in included])
def get_files(root_dir, excluded=(), included=('.*',)):
for root, dirs, files in walk(root_dir):
for f in files:
if is_included(f, included) and not is_excluded(f, excluded):
yield root, f
def count_words(line):
n = 0
for s in line.split(' '):
if s.strip():
n += 1
return n
def count_letters(line):
return len(line.strip())
def count(root, file):
line_count = 0
word_count = 0
letter_count = 0
with open(f'{root}/{file}', 'rt', encoding='utf-8') as f:
for line in f:
if line.strip():
line_count += 1
word_count += count_words(line)
letter_count += count_letters(line)
return line_count, word_count, letter_count
def count_all(dirs=('.',), excluded=(), included=('.*',)):
total_files = 0
total_lines = 0
total_words = 0
total_letters = 0
for d in dirs:
for root, file in get_files(d, excluded, included):
total_files += 1
line_count, word_count, letter_count = count(root, file)
total_lines += line_count
total_words += word_count
total_letters += letter_count
return {'files': total_files, 'lines': total_lines, 'words': total_words, 'letters': total_letters}
def parse_args(args):
parser = argparse.ArgumentParser(description='Count files, lines, words and letters.')
parser.add_argument('-d', '--dirs', nargs='*', default=['.'], help='Directories to count in')
parser.add_argument('-e', '--excluded', nargs='*', default=[],
help='File name exclusion patterns, e.g .*Test\\..* .*IT\\..*')
parser.add_argument('-i', '--included', nargs='*', default=['.*'],
help='File name inclusion patterns, e.g .*\\.groovy .*\\.java .*\\.py')
return parser.parse_args(args)
def main(dirs=('.',), excluded=(), included=('.*',)):
result = count_all(dirs, excluded=excluded, included=included)
pprint(result)
if __name__ == '__main__': # pragma: no cover
main(**parse_args(argv[1:]).__dict__)
# code_dirs = ['/Users/magnus/git/rsimulator/rsimulator-camel-direct',
# '/Users/magnus/git/rsimulator/rsimulator-core',
# '/Users/magnus/git/rsimulator/rsimulator-cxf-rt-transport']
# code_dirs = ['/Users/magnus/git/rsimulator']
#
# result = count_all(
# code_dirs,
# # excluded=('.*Test\..*', '.*IT\..*', 'test.*'),
# included=('.*\.groovy', '.*\.java', '.*\.kt', '.*\.py', 'Jenkinsfile'))
# pprint(result)
| 3.203125
| 3
|
test/test_grocy.py
|
cerebrate/pygrocy
| 1
|
12780436
|
from unittest import TestCase
from unittest.mock import patch, mock_open
from datetime import datetime
import responses
from pygrocy import Grocy
from pygrocy.grocy import Product
from pygrocy.grocy import Group
from pygrocy.grocy import ShoppingListProduct
from pygrocy.grocy_api_client import CurrentStockResponse, GrocyApiClient
class TestGrocy(TestCase):
def setUp(self):
self.grocy = Grocy("https://example.com", "api_key")
def test_init(self):
assert isinstance(self.grocy, Grocy)
@responses.activate
def test_get_chores_valid_no_details(self):
resp = [
{
"chore_id": "1",
"last_tracked_time": "2019-11-18 00:00:00",
"next_estimated_execution_time": "2019-11-25 00:00:00",
"track_date_only": "1"
},
{
"chore_id": "2",
"last_tracked_time": "2019-11-16 00:00:00",
"next_estimated_execution_time": "2019-11-23 00:00:00",
"track_date_only": "1"
},
{
"chore_id": "3",
"last_tracked_time": "2019-11-10 00:00:00",
"next_estimated_execution_time": "2019-12-10 00:00:00",
"track_date_only": "1"
},
{
"chore_id": "4",
"last_tracked_time": "2019-11-18 00:00:00",
"next_estimated_execution_time": "2019-11-25 00:00:00",
"track_date_only": "1",
}
]
responses.add(responses.GET, "https://example.com:9192/api/chores", json=resp, status=200)
chores = self.grocy.chores(get_details=False)
assert isinstance(chores, list)
assert len(chores) == 4
assert chores[0].chore_id == 1
assert chores[1].chore_id == 2
assert chores[2].chore_id == 3
assert chores[3].chore_id == 4
@responses.activate
def test_product_get_details_valid(self):
current_stock_response = CurrentStockResponse({
"product_id": 0,
"amount": "0.33",
"best_before_date": "2019-05-02"
})
product = Product(current_stock_response)
api_client = GrocyApiClient("https://example.com", "api_key")
resp = {
"product": {
"id": 0,
"name": "string",
"description": "string",
"location_id": 0,
"qu_id_purchase": 0,
"qu_id_stock": 0,
"qu_factor_purchase_to_stock": 0,
"barcode": "string",
"product_group_id": 0,
"min_stock_amount": 0,
"default_best_before_days": 0,
"picture_file_name": "string",
"allow_partial_units_in_stock": True,
"row_created_timestamp": "2019-05-02T18:30:48.041Z"
},
"quantity_unit_purchase": {
"id": 0,
"name": "string",
"name_plural": "string",
"description": "string",
"row_created_timestamp": "2019-05-02T18:30:48.041Z"
},
"quantity_unit_stock": {
"id": 0,
"name": "string",
"name_plural": "string",
"description": "string",
"row_created_timestamp": "2019-05-02T18:30:48.041Z"
},
"last_purchased": "2019-05-02",
"last_used": "2019-05-02T18:30:48.041Z",
"stock_amount": 0,
"stock_amount_opened": 0,
"next_best_before_date": "2019-05-02T18:30:48.041Z",
"last_price": 0,
"location": {
"id": 0,
"name": "string",
"description": "string",
"row_created_timestamp": "2019-05-02T18:30:48.041Z"
}
}
responses.add(responses.GET, "https://example.com:9192/api/stock/products/0", json=resp, status=200)
product.get_details(api_client)
assert product.name == "string"
assert product.product_group_id == 0
@responses.activate
def test_product_get_details_invalid_no_data(self):
current_stock_response = CurrentStockResponse({
"product_id": 0,
"amount": "0.33",
"best_before_date": "2019-05-02"
})
product = Product(current_stock_response)
api_client = GrocyApiClient("https://example.com", "api_key")
responses.add(responses.GET, "https://example.com:9192/api/stock/products/0", status=200)
product.get_details(api_client)
assert product.name is None
@responses.activate
def test_get_stock_valid(self):
resp = [
{
"product_id": 0,
"amount": "0.33",
"best_before_date": "2019-05-02"
}
]
responses.add(responses.GET, "https://example.com:9192/api/stock", json=resp, status=200)
stock = self.grocy.stock()
assert isinstance(stock, list)
assert len(stock) == 1
for prod in stock:
assert isinstance(prod, Product)
@responses.activate
def test_get_stock_invalid_no_data(self):
responses.add(responses.GET, "https://example.com:9192/api/stock", status=200)
assert self.grocy.stock() is None
@responses.activate
def test_get_stock_invalid_missing_data(self):
resp = [
{
}
]
responses.add(responses.GET, "https://example.com:9192/api/stock", json=resp, status=200)
@responses.activate
def test_get_shopping_list_valid(self):
resp = [
{
"id": 1,
"product_id": 6,
"note": "string",
"amount": 2,
"row_created_timestamp": "2019-04-17 10:30:00",
"shopping_list_id": 1,
"done": 0
}
]
responses.add(responses.GET, "https://example.com:9192/api/objects/shopping_list", json=resp, status=200)
shopping_list = self.grocy.shopping_list()
assert isinstance(shopping_list, list)
assert len(shopping_list) == 1
for item in shopping_list:
assert isinstance(item, ShoppingListProduct)
@responses.activate
def test_get_shopping_list_invalid_no_data(self):
responses.add(responses.GET, "https://example.com:9192/api/objects/shopping_list", status=400)
assert self.grocy.shopping_list() is None
@responses.activate
def test_get_shopping_list_invalid_missing_data(self):
resp = [
{
}
]
responses.add(responses.GET, "https://example.com:9192/api/objects/shopping_list", json=resp, status=200)
@responses.activate
def test_add_missing_product_to_shopping_list_valid(self):
responses.add(responses.POST, "https://example.com:9192/api/stock/shoppinglist/add-missing-products", status=204)
assert self.grocy.add_missing_product_to_shopping_list().status_code == 204
@responses.activate
def test_add_missing_product_to_shopping_list_error(self):
responses.add(responses.POST, "https://example.com:9192/api/stock/shoppinglist/add-missing-products", status=400)
assert self.grocy.add_missing_product_to_shopping_list().status_code != 204
@responses.activate
def test_add_product_to_shopping_list_valid(self):
responses.add(responses.POST, "https://example.com:9192/api/stock/shoppinglist/add-product", status=204)
assert self.grocy.add_product_to_shopping_list(1).status_code == 204
@responses.activate
def test_add_product_to_shopping_list_error(self):
responses.add(responses.POST, "https://example.com:9192/api/stock/shoppinglist/add-product", status=400)
assert self.grocy.add_product_to_shopping_list(1).status_code != 204
@responses.activate
def test_clear_shopping_list_valid(self):
responses.add(responses.POST, "https://example.com:9192/api/stock/shoppinglist/clear", status=204)
assert self.grocy.clear_shopping_list().status_code == 204
@responses.activate
def test_clear_shopping_list_error(self):
responses.add(responses.POST, "https://example.com:9192/api/stock/shoppinglist/clear", status=400)
assert self.grocy.clear_shopping_list().status_code != 204
@responses.activate
def test_remove_product_in_shopping_list_valid(self):
responses.add(responses.POST, "https://example.com:9192/api/stock/shoppinglist/remove-product", status=204)
assert self.grocy.remove_product_in_shopping_list(1).status_code == 204
@responses.activate
def test_remove_product_in_shopping_list_error(self):
responses.add(responses.POST, "https://example.com:9192/api/stock/shoppinglist/remove-product", status=400)
assert self.grocy.remove_product_in_shopping_list(1).status_code != 204
@responses.activate
def test_get_product_groups_valid(self):
resp = [
{
"id": 1,
"name": "string",
"description": "string",
"row_created_timestamp": "2019-04-17 10:30:00",
}
]
responses.add(responses.GET, "https://example.com:9192/api/objects/product_groups", json=resp, status=200)
product_groups_list = self.grocy.product_groups()
assert isinstance(product_groups_list, list)
assert len(product_groups_list) == 1
for item in product_groups_list:
assert isinstance(item, Group)
@responses.activate
def test_get_product_groups_invalid_no_data(self):
responses.add(responses.GET, "https://example.com:9192/api/objects/product_groups", status=400)
assert self.grocy.product_groups() is None
@responses.activate
def test_get_product_groups_invalid_missing_data(self):
resp = [
{
}
]
responses.add(responses.GET, "https://example.com:9192/api/objects/product_groups", json=resp, status=200)
@responses.activate
def test_upload_product_picture_valid(self):
with patch("os.path.exists" ) as m_exist:
with patch("builtins.open", mock_open()) as m_open:
m_exist.return_value = True
api_client = GrocyApiClient("https://example.com", "api_key")
responses.add(responses.PUT, "https://example.com:9192/api/files/productpictures/MS5qcGc=", status=204)
assert api_client.upload_product_picture(1,"/somepath/pic.jpg").status_code == 204
@responses.activate
def test_upload_product_picture_invalid_missing_data(self):
with patch("os.path.exists" ) as m_exist:
m_exist.return_value = False
api_client = GrocyApiClient("https://example.com", "api_key")
responses.add(responses.PUT, "https://example.com:9192/api/files/productpictures/MS5qcGc=", status=204)
assert api_client.upload_product_picture(1,"/somepath/pic.jpg") is None
@responses.activate
def test_upload_product_picture_error(self):
with patch("os.path.exists" ) as m_exist:
with patch("builtins.open", mock_open()) as m_open:
m_exist.return_value = True
api_client = GrocyApiClient("https://example.com", "api_key")
responses.add(responses.PUT, "https://example.com:9192/api/files/productpictures/MS5qcGc=", status=400)
assert api_client.upload_product_picture(1,"/somepath/pic.jpg").status_code != 204
@responses.activate
def test_update_product_pic_valid(self):
api_client = GrocyApiClient("https://example.com", "api_key")
responses.add(responses.PUT, "https://example.com:9192/api/objects/products/1", status=204)
assert api_client.update_product_pic(1).status_code == 204
@responses.activate
def test_update_product_pic_error(self):
api_client = GrocyApiClient("https://example.com", "api_key")
responses.add(responses.PUT, "https://example.com:9192/api/objects/products/1", status=400)
assert api_client.update_product_pic(1).status_code != 204
@responses.activate
def test_get_expiring_products_valid(self):
resp = {
"expiring_products" : [
{
"product_id": 0,
"amount": "0.33",
"best_before_date": "2019-05-02",
"amount_opened": "0"
}
],
"expired_products": [],
"missing_products": []
}
responses.add(responses.GET, "https://example.com:9192/api/stock/volatile", json=resp, status=200)
expiring_product = self.grocy.expiring_products()
assert isinstance(expiring_product, list)
assert len(expiring_product) == 1
for prod in expiring_product:
assert isinstance(prod, Product)
@responses.activate
def test_get_expiring_invalid_no_data(self):
resp = {
"expiring_products": [],
"expired_products": [],
"missing_products": []
}
responses.add(responses.GET, "https://example.com:9192/api/stock/volatile", json=resp, status=200)
assert not self.grocy.expiring_products()
@responses.activate
def test_get_expiring_invalid_missing_data(self):
resp = {}
responses.add(responses.GET, "https://example.com:9192/api/stock/volatile", json=resp, status=200)
@responses.activate
def test_get_expired_products_valid(self):
resp = {
"expired_products" : [
{
"product_id": 0,
"amount": "0.33",
"best_before_date": "2019-05-02",
"amount_opened": "0"
}
],
"expiring_products": [],
"missing_products": []
}
responses.add(responses.GET, "https://example.com:9192/api/stock/volatile", json=resp, status=200)
expired_product = self.grocy.expired_products()
assert isinstance(expired_product, list)
assert len(expired_product) == 1
for prod in expired_product:
assert isinstance(prod, Product)
@responses.activate
def test_get_expired_invalid_no_data(self):
resp = {
"expiring_products": [],
"expired_products": [],
"missing_products": []
}
responses.add(responses.GET, "https://example.com:9192/api/stock/volatile", json=resp, status=200)
assert not self.grocy.expired_products()
@responses.activate
def test_get_expired_invalid_missing_data(self):
resp = {}
responses.add(responses.GET, "https://example.com:9192/api/stock/volatile", json=resp, status=200)
@responses.activate
def test_get_missing_products_valid(self):
resp = {
"missing_products" : [
{
"product_id": 0,
"amount": "0.33",
"best_before_date": "2019-05-02",
"amount_opened": "0"
}
],
"expired_products": [],
"expiring_products": []
}
responses.add(responses.GET, "https://example.com:9192/api/stock/volatile", json=resp, status=200)
missing_product = self.grocy.missing_products()
assert isinstance(missing_product, list)
assert len(missing_product) == 1
for prod in missing_product:
assert isinstance(prod, Product)
@responses.activate
def test_get_missing_invalid_no_data(self):
resp = {
"expiring_products": [],
"expired_products": [],
"missing_products": []
}
responses.add(responses.GET, "https://example.com:9192/api/stock/volatile", json=resp, status=200)
assert not self.grocy.missing_products()
@responses.activate
def test_get_stock_invalid_missing_data(self):
resp = {}
responses.add(responses.GET, "https://example.com:9192/api/stock/volatile", json=resp, status=200)
@responses.activate
def test_get_userfields_valid(self):
resp = {
"uf1": 0,
"uf2": "string"
}
responses.add(responses.GET, "https://example.com:9192/api/userfields/chores/1", json=resp, status=200)
a_chore_uf = self.grocy.get_userfields("chores",1)
assert a_chore_uf['uf1'] == 0
@responses.activate
def test_get_userfields_invalid_no_data(self):
resp = []
responses.add(responses.GET, "https://example.com:9192/api/userfields/chores/1", json=resp ,status=200)
assert not self.grocy.get_userfields("chores",1)
@responses.activate
def test_set_userfields_valid(self):
responses.add(responses.PUT, "https://example.com:9192/api/userfields/chores/1", status=204)
assert self.grocy.set_userfields("chores",1,"auserfield","value").status_code == 204
@responses.activate
def test_set_userfields_error(self):
responses.add(responses.PUT, "https://example.com:9192/api/userfields/chores/1", status=400)
assert self.grocy.set_userfields("chores",1,"auserfield","value").status_code != 204
@responses.activate
def test_get_last_db_changed_valid(self):
resp = { "changed_time": "2019-09-18T05:30:58.598Z" }
responses.add(responses.GET, "https://example.com:9192/api/system/db-changed-time", json=resp, status=200)
timestamp = self.grocy.get_last_db_changed()
assert isinstance(timestamp, datetime)
@responses.activate
def test_get_last_db_changed_invalid_no_data(self):
resp = {}
responses.add(responses.GET, "https://example.com:9192/api/system/db-changed-time", json=resp ,status=200)
assert self.grocy.get_last_db_changed() is None
| 2.53125
| 3
|
langtojson/__init__.py
|
Ars2014/langtojson
| 0
|
12780437
|
<reponame>Ars2014/langtojson
"""Top-level package for LangToJson."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| 0.714844
| 1
|
python3/ltk/actions/reference_action.py
|
Lingotek/filesystem-connector
| 11
|
12780438
|
from ltk.actions.action import *
class ReferenceAction(Action):
def __init__(self, path):
Action.__init__(self, path)
def reference_add_action(self, filename, doc_id):
if self._check_filename(filename, doc_id):
material = []
while True:
while True:
prompt_message = "Reference Material file: "
# Python 2
# file_input = raw_input(prompt_message)
# End Python 2
# Python 3
file_input = input(prompt_message)
if not file_input:
logger.warning("You must enter a path to reference material")
continue
ref_file = os.path.abspath(os.path.expanduser(file_input))
if os.path.isfile(ref_file):
break
else:
logger.error(ref_file+" could not be found")
prompt_message = "Reference Material Name: "
# Python 2
# name_input = raw_input(prompt_message)
# End Python 2
# Python 3
name_input = input(prompt_message)
prompt_message = "Reference Material Description: "
# Python 2
# desc_input = raw_input(prompt_message)
# End Python 2
# Python 3
desc_input = input(prompt_message)
reference = {'file': ref_file}
if name_input:
reference['name'] = name_input
else:
reference['name'] = os.path.basename(ref_file)
if desc_input:
reference['description'] = desc_input
material.append(reference)
if not yes_no_prompt("Would you like to add another reference material?", default_yes=False):
break
if doc_id:
document_id = filename
else:
doc_entity = self.doc_manager.get_doc_by_prop('file_name', self.norm_path(filename))
if not doc_entity:
logger.error("{0} could not be found in local database".format(self.norm_path(filename)))
return
document_id = doc_entity['id']
for reference in material:
response = self.api.document_add_reference(document_id, reference)
if response.status_code == 404:
logger.warning("The reference material could not be added because the document could not be found in Lingotek. The document may still be in the process of uploading.")
elif response.status_code != 202:
logger.info("The reference material could not be added")
logger.error(response.json()['messages'])
else:
logger.info("{0} ({1}) has been added to the document".format(reference['name'], response.json()['properties']['id']))
def reference_list_action(self, filename, doc_id):
if self._check_filename(filename, doc_id):
if doc_id:
document_id = filename
else:
doc_entity = self.doc_manager.get_doc_by_prop('file_name', self.norm_path(filename))
if not doc_entity:
logger.error("{0} could not be found in local database".format(self.norm_path(filename)))
return
document_id = doc_entity['id']
self._list_reference_material(document_id)
def reference_download_action(self, filename, doc_id, get_all, path):
if not path:
path = self.path
if self._check_filename(filename, doc_id):
if doc_id:
document_id = filename
else:
doc_entity = self.doc_manager.get_doc_by_prop('file_name', self.norm_path(filename))
if not doc_entity:
logger.error("{0} could not be found in local database".format(self.norm_path(filename)))
return
document_id = doc_entity['id']
table = self._list_reference_material(document_id)
tablemap = {}
for row in table:
tablemap.update({row[0]: {'name': row[1], 'id': row[2]}})
if len(tablemap) > 0:
chosen_list = []
if get_all:
chosen_list = tablemap.values()
while not len(chosen_list) > 0:
prompt_message = 'Reference materials to download: (Separate indices by comma) '
# Python 2
# choice = raw_input(prompt_message)
# End Python 2
# Python 3
choice = input(prompt_message)
# End Python 3
try:
choices = (choice.replace(", ",",")).split(",")
for index in choices:
chosen_list.append(tablemap[int(index)])
except ValueError:
logger.error('Some unexpected, non-integer value was included')
chosen_list = []
except KeyError:
logger.error('An index not in the list was included')
chosen_list = []
for reference in chosen_list:
response = self.api.document_download_reference(document_id, reference['id'])
if response.status_code == 404:
logger.error("{0} ({1}) not found".format(reference['name'], reference['id']))
elif response.status_code == 200:
self._download_reference(response, path, reference['name'])
else:
logger.info("{0} ({1}) could not be downloaded".format(reference['name'], reference['id']))
logger.error(response.json()['messages'])
def reference_remove_action(self, filename, doc_id, remove_all):
if self._check_filename(filename, doc_id):
if doc_id:
document_id = filename
else:
doc_entity = self.doc_manager.get_doc_by_prop('file_name', self.norm_path(filename))
if not doc_entity:
logger.error("{0} could not be found in local database".format(self.norm_path(filename)))
return
document_id = doc_entity['id']
table = self._list_reference_material(document_id)
tablemap = {}
for row in table:
tablemap.update({row[0]: {'name': row[1], 'id': row[2]}})
if len(tablemap) > 0:
chosen_list = []
if remove_all:
chosen_list = tablemap.values()
while not len(chosen_list) > 0:
prompt_message = 'Reference materials to remove: (Separate indices by comma) '
# Python 2
# choice = raw_input(prompt_message)
# End Python 2
# Python 3
choice = input(prompt_message)
# End Python 3
try:
choices = (choice.replace(", ",",")).split(",")
for index in choices:
chosen_list.append(tablemap[int(index)])
except ValueError:
logger.error('Some unexpected, non-integer value was included')
chosen_list = []
except KeyError:
logger.error('An index not in the list was included')
chosen_list = []
for reference in chosen_list:
response = self.api.document_remove_reference(document_id, reference['id'])
if response.status_code == 404:
logger.error("{0} ({1}) not found".format(reference['name'], reference['id']))
elif response.status_code == 204:
logger.info("{0} ({1}) deleted".format(reference['name'], reference['id']))
else:
logger.info("{0} ({1}) could not be deleted".format(reference['name'], reference['id']))
logger.error(response.json()['messages'])
def _check_filename(self, filename, doc_id):
if doc_id:
#if document ID is specified, no need to validate the filename. Just send the ID and let the API handle the error if the ID doesn't exist
return True
if os.path.isfile(filename):
foundfile = self.get_doc_filenames_in_path(filename)
if len(foundfile) == 0:
logger.warning(filename+" has not been added yet.")
return False
elif len(foundfile) == 1:
return True
else:
logger.warning("Only manage reference material on one file at a time")
return False
elif os.path.isdir(filename):
logger.error(filename+" is not a file")
return False
else:
logger.error(filename+" could not be found")
return False
def _list_reference_material(self, document_id):
response = self.api.document_list_reference(document_id)
if response.status_code == 404:
logger.warning("The document could not be found in Lingotek.")
return []
elif response.status_code != 200:
logger.info("The reference material list could not be retrieved")
logger.error(response.json()['messages'])
return []
else:
if response.json()['properties']['size'] > 0:
materials = response.json()['entities']
index = 0
table = []
for material in materials:
row = [index, material['properties']['name'], material['properties']['id']]
if 'description' in material['properties'] and material['properties']['description']:
row.append(material['properties']['description'])
table.append(row)
index += 1
print(tabulate(table, headers=['','Name','ID','Description']))
return table
else:
print("There is no reference material attached to this document")
return []
def _download_reference(self, response, path, name):
filepath = os.path.join(path, name)
if os.path.isfile(filepath):
if not yes_no_prompt("There is already a file {0}. Would you like to overwrite it?".format(filepath), default_yes=False):
return
try:
with open(filepath, 'wb') as file:
for chunk in response.iter_content(1024):
file.write(chunk)
except IOError as e:
print(e.errno)
print(e)
return
logger.info("Downloaded {0}".format(filepath))
| 2.375
| 2
|
alloy/robot/baxter.py
|
CMU-TBD/alloy
| 0
|
12780439
|
# Copyright - Transporation, Bots, and Disability Lab - Carnegie Mellon University
# Released under MIT License
"""
Common Operations/Codes that are re-written on Baxter
"""
import numpy as np
from pyquaternion import Quaternion
from alloy.math import *
__all__ = [
'convert_joint_angles_to_numpy','transform_pose_into_rotation_matrix',
'calculate_pose_difference'
]
def convert_joint_angles_to_numpy(joint_angles, joint_names):
"""Convert the dictionary based joint angles given by baxter interface to
a numpy array according to the given joint names
"""
arr = np.zeros(7)
for i, key in enumerate(joint_names):
arr[i] = joint_angles[key]
return arr
def transform_pose_into_rotation_matrix(pose_np):
#pose_np = pose_to_numpy(pose)
translation_comp = pose_np[0:3]
trans_mat = Quaternion(pose_np[3:]).transformation_matrix
trans_mat[0:3,3] = translation_comp
return trans_mat
def calculate_pose_difference(p1, p2):
"""Calculate the pose error from p1 to p2. Note the resulting
error is calculated in the frame of p1 and not the base frame
do p[0:3] = p[0:3] - np.cross(x[0:3],p[3:])
"""
error = np.zeros(6,)
#the position error is just the difference in position
error[0:3] = p2[0:3] - p1[0:3]
#orientation error is more tricky
desire_q = Quaternion(p2[3:])
error_q = desire_q * Quaternion(p1[3:]).inverse
error[3:] = error_q.axis * error_q.angle
return error
#transform_quaternion = Quaternion(pose_np[3:]). Quaternion(pose_np[3:])
# def calculate_pose_difference(p1, p2):
# """Calculate the error from p1 to p2. Note the resulting
# error is calculated in the frame of p1 and not the base frame
# do p[0:3] = p[0:3] - np.cross(x[0:3],p[3:])
# """
# mat1 = transform_pose_into_rotation_matrix(p1)
# mat2 = transform_pose_into_rotation_matrix(p2)
# error = calculate_error_between_two_transformation_matrix(mat1, mat2)
# return calculate_error_between_two_transformation_matrix(mat1, mat2)
| 2.53125
| 3
|
scripts/kitty_b64dump.py
|
SkyLeach/poweruser_tools
| 1
|
12780440
|
<filename>scripts/kitty_b64dump.py
"""
File: kitty_b64dump.py
Author: SkyLeach
Email: <EMAIL>
Github: https://github.com/skyleach/poweruser_tools
Description: Simple script to dump base64 kitt-encoded text to terminals that can handle the kitty graphic output format.
"""
import sys
from base64 import standard_b64encode
def serialize_gr_command(cmd, payload=None):
cmd = ','.join('{}={}'.format(k, v) for k, v in cmd.items())
ans = []
w = ans.append
w(b'\033_G'), w(cmd.encode('utf8'))
if payload:
w(b';')
w(payload)
w(b'\033\\')
return b''.join(ans)
def write_chunked(cmd, data):
data = standard_b64encode(data)
while data:
chunk, data = data[:4096], data[4096:]
m = 1 if data else 0
cmd['m'] = m
sys.stdout.write(serialize_gr_command(cmd, chunk))
# sys.stdout.buffer.write(serialize_gr_command(cmd, chunk))
sys.stdout.flush()
cmd.clear()
with open(sys.argv[-1], 'rb') as f:
write_chunked({'a': 'T', 'f': 100}, f.read())
| 2.75
| 3
|
3.2.tree_populating.py
|
luvalenz/time-series-variability-tree
| 1
|
12780441
|
<filename>3.2.tree_populating.py
import argparse
import sys
import os
import time_series_utils
from subsequence_tree import SubsequenceTree
from subsequence_tree_2 import BottomUpSubsequenceTree
from subsequence_tree_3 import BottomUpSubsequenceTree as Tree3
from subsequence_tree_4 import KMedioidsSubsequenceTree
import pickle
import dill
parser = argparse.ArgumentParser(
description='Build subsequence tree')
parser.add_argument('--dataset_root', default='', type=str)
parser.add_argument('--input_paths_file', default='', type=str)
parser.add_argument('--class_table_path', default='', type=str)
parser.add_argument('--tree_path', required=True, type=str)
parser.add_argument('--part', required=True, type=int)
parser.add_argument('--n_parts', required=True, type=int)
args = parser.parse_args(sys.argv[1:])
dataset_root = args.dataset_root
input_paths_file = args.input_paths_file
class_table_path = args.class_table_path
tree_path = args.tree_path
part = args.part
n_parts = args.n_parts
print('part{} of {}'.format(part, n_parts))
if input_paths_file != '':
print('Reading file paths')
with open(input_paths_file, 'r') as f:
lightcurves_paths = f.readlines()
print('DONE')
elif class_table_path != '':
class_table = time_series_utils.read_class_table(class_table_path)
lightcurves_paths = class_table['path'].values
lightcurves_paths = [os.path.join(dataset_root, p) for p in lightcurves_paths]
print('Reading dataset...')
dataset = time_series_utils.read_files(lightcurves_paths, part, n_parts)
print('DONE')
with open(tree_path, 'rb') as f:
tree = dill.load(f)
dataset = (lc for lc in dataset if lc.total_time >= tree.time_window)
output_path = tree_path + '.part{}of{}'.format(part, n_parts)
print(output_path)
tree.populate(dataset)
print('DONE')
print('Saving tree...')
with open(output_path, 'wb') as f:
dill.dump(tree, f)
print('DONE')
| 2.359375
| 2
|
impersonate/urls.py
|
saifrim/django-impersonate
| 8
|
12780442
|
from django.conf.urls import url
from .views import impersonate, list_users, search_users, stop_impersonate
try:
# Django <=1.9
from django.conf.urls import patterns
except ImportError:
patterns = None
urlpatterns = [
url(r'^stop/$',
stop_impersonate,
name='impersonate-stop'),
url(r'^list/$',
list_users,
{'template': 'impersonate/list_users.html'},
name='impersonate-list'),
url(r'^search/$',
search_users,
{'template': 'impersonate/search_users.html'},
name='impersonate-search'),
url(r'^(?P<uid>.+)/$',
impersonate,
name='impersonate-start'),
]
if patterns is not None:
urlpatterns = patterns('', *urlpatterns)
| 1.875
| 2
|
bidwire/scrapers/massgov/results_page_scraper.py
|
RagtagOpen/bidwire
| 5
|
12780443
|
<filename>bidwire/scrapers/massgov/results_page_scraper.py
from lxml import etree, html
from utils import ensure_absolute_url
SITE_ROOT = 'https://www.mass.gov'
def scrape_results_page(page_str, xpath_list):
"""Scrapes HTML page and returns dictionary of URL => document title
Args:
page_str -- the entire HTML page as a string
xpath_list -- list of xpath expressions (section and link element)
Returns:
document_ids -- a dictionary of relative URL path => description
"""
assert xpath_list and len(xpath_list) > 1
document_ids = {}
tree = html.fromstring(page_str)
document_list = tree.xpath(xpath_list[0])
doc_xpath = xpath_list[1]
for doc in document_list:
elems = doc.xpath(doc_xpath)
if elems:
url = ensure_absolute_url(SITE_ROOT, elems[0].get('href'))
document_ids[url] = elems[0].text.strip()
return document_ids
| 3.078125
| 3
|
rump/router/__init__.py
|
bninja/rump
| 6
|
12780444
|
import contextlib
import logging
import re
import pilo
from .. import exc, Request, parser, Rule, Rules, Upstream
logger = logging.getLogger(__name__)
class Dynamic(pilo.Form):
"""
Represents the dynamic components:
- settings and
- selection rules
of a ``rump.Router``.U seful if you want to centrally control (e.g. in
zookeeper, redis, postgresql, etc) those aspects upstream selection.
"""
#: A string identifying the type of dynamic (e.g. "redis", "zookeeper", etc).
_type_ = pilo.fields.Type.abstract()
def can_connect(self, router):
"""
:param router: The ``rump.Router`` associated with this dynmaic.
:return: True if it can connect, otherwise False.
"""
raise NotImplementedError
def connect(self, router):
"""
Connect to dynamic.
:param router: The ``rump.Router`` associated with this dynmaic.
"""
raise NotImplementedError
def is_connected(self, router):
"""
Check if connected to dynamic.
:param router: The ``rump.Router`` associated with this dynmaic.
:return:
"""
raise NotImplementedError
def disconnect(self, router):
"""
Disconnect from dynamic.
:param router: The ``rump.Router`` associated with this dynmaic.
"""
raise NotImplementedError
def load(self, router, cxn):
"""
Load remote dynamic settings.
:param router: The ``rump.Router`` associated with this dynmaic.
"""
raise NotImplementedError
def save(self, router, cxn):
"""
Save local changes to remote dynamic.
:param router: The ``rump.Router`` associated with this dynmaic.
"""
raise NotImplementedError
def watch(self, router, callback):
"""
Watch for changes and invoke `callback` for `router` when the occur.
:param router: The ``rump.Router`` associated with this dynmaic.
:param callback: Callback taking `router` as its single argument.
"""
raise NotImplementedError
class Router(pilo.Form):
"""
Encapsulates:
- settings (e.g. `Router.host`)
- request schema (i.e. `Router.request_type`)
- upstream selection rules (e.g. `Router.rules`, `Router.overrides`)
and a dynamic (i.e. `Router.dynamic`) for remote control.
"""
#: Name of this router
name = pilo.fields.String()
#: Whether this router is enabled.
enabled = pilo.fields.Boolean(default=True).tag('dynamic')
#: Host patterns whose requests should be handle by this router.
hosts = pilo.fields.List(pilo.fields.String(), default=list).tag('dynamic')
@hosts.field.parse
def hosts(self, path):
value = path.primitive()
if not isinstance(value, basestring):
if not hasattr(value, 'match'):
self.ctx.errors.invalid('not a string or regex')
return pilo.ERROR
return value
try:
return re.compile(value)
except re.error, ex:
self.ctx.errors.invalid('{0} - {1}'.format(str(ex), value))
return pilo.ERROR
#: Whether routing rules should be compiled.
compile_rules = pilo.fields.Boolean(default=True).tag('dynamic')
#: Whether to automatically disable failing rules.
auto_disable_rules = pilo.fields.Boolean(default=True).tag('dynamic')
#: Upstream to use when a request matches *no* routing rules.
default_upstream = pilo.fields.String(default=None).tag('dynamic')
@default_upstream.parse
def default_upstream(self, path):
value = path.primitive()
if value is None:
return value
if isinstance(value, Upstream):
return value
try:
return self.upstream_parser(value)
except exc.ParseException, ex:
self.ctx.errors.invalid(str(ex))
return pilo.ERROR
#: Type to use for representing requests.
request_type = pilo.fields.Code(default=lambda: Request)
#: Upstream selection rules.
rules = pilo.fields.List(pilo.fields.String(), ignore=None)
@rules.field.parse
def rules(self, path):
value = path.primitive()
if isinstance(value, (Rule, Rule.compiled_type)):
value = str(value)
try:
return self.rule_parser(value)
except exc.ParseException, ex:
self.ctx.errors.invalid(str(ex))
return pilo.ERROR
@rules.default
def rules(self):
return Rules(
auto_disable=self.auto_disable_rules,
compile=self.compile_rules,
)
@rules.munge
def rules(self, value):
return Rules(
value,
auto_disable=self.auto_disable_rules,
compile=self.compile_rules,
)
#: Upstream selection rules.
overrides = pilo.fields.List(pilo.fields.String(), ignore=None).tag('dynamic')
@overrides.field.parse
def overrides(self, path):
value = path.primitive()
if isinstance(value, Rule):
return path.value
try:
return self.rule_parser(value)
except exc.ParseException, ex:
self.ctx.errors.invalid(str(ex))
return pilo.ERROR
@overrides.default
def overrides(self):
return Rules(
auto_disable=self.auto_disable_rules,
compile=self.compile_rules,
)
@overrides.munge
def overrides(self, value):
return Rules(
value,
auto_disable=self.auto_disable_rules,
compile=self.compile_rules,
)
#: Dynamic configuration source.
dynamic = pilo.fields.PolymorphicSubForm(Dynamic._type_, default=None)
@property
def upstream_parser(self):
return parser.for_upstream()
@property
def rule_parser(self):
return parser.for_rule(self.request_type)
# match
def match_me(self, request):
"""
Should this router do upstream selection for `request`?
:param request: The request (e.g. ``rump.wsgi.Request``) to evaluate.
:return: True if it should, otherwise False.
"""
for host in self.hosts:
m = host.match(request.host)
if m:
return m
def match_upstream(self, request):
"""
Determines the ``rump.Upstream` for a `request`.
:param request: An instance of `Router.request_type` to evaluate.
:return: ``rump.Upstream` selected or None if there is none.
"""
return (
self.overrides.match(request) or
self.rules.match(request) or
self.default_upstream
)
# dynamic
@property
def is_dynamic(self):
return self.dynamic is not None and self.dynamic.can_connect(self)
def connect(self):
@contextlib.contextmanager
def _disconnect():
try:
yield
finally:
self.disconnect()
if not self.is_dynamic:
raise exc.RouterNotDynamic(self)
self.dynamic.connect(self)
return _disconnect()
@property
def is_connected(self):
return self.is_dynamic and self.dynamic.is_connected(self)
def disconnect(self):
if self.is_connected:
self.dynamic.disconnect(self)
def load(self):
if not self.is_connected:
raise exc.RouterNotConnected(self)
self.dynamic.load(self)
def save(self):
if not self.is_connected:
raise exc.RouterNotConnected(self)
self.dynamic.save(self)
def watch(self, callback):
if not self.is_connected:
raise exc.RouterNotConnected(self)
return self.dynamic.watch(self, callback)
try:
from .etcd import EtcD
except ImportError, ex:
logger.info('etcd dynamic unavailable - %s', ex)
try:
from .redis import Redis
except ImportError, ex:
logger.info('redis dynamic unavailable - %s', ex)
try:
from .zookeeper import Zookeeper
except ImportError:
logger.info('zookeeper dynamic unavailable - %s', ex)
| 2.34375
| 2
|
docassemble_base/tests/test3.py
|
abramsmatthew/adpllc-test
| 1
|
12780445
|
<reponame>abramsmatthew/adpllc-test<filename>docassemble_base/tests/test3.py<gh_stars>1-10
#! /usr/bin/python
import ast
import sys
mycode = """\
if b < 6:
a = 77
c = 72
else:
a = 66
sys.exit()
"""
mycode = """\
user.foobar.name.last = 77
if b < 6:
a = 77
c = 72
else:
a = 66
"""
class myextract(ast.NodeVisitor):
def __init__(self):
self.stack = []
def visit_Name(self, node):
self.stack.append(node.id)
ast.NodeVisitor.generic_visit(self, node)
def visit_Attribute(self, node):
self.stack.append(node.attr)
ast.NodeVisitor.generic_visit(self, node)
class myvisitnode(ast.NodeVisitor):
def __init__(self):
self.names = {}
self.targets = {}
self.depth = 0;
def generic_visit(self, node):
#print ' ' * self.depth + type(node).__name__
self.depth += 1
ast.NodeVisitor.generic_visit(self, node)
self.depth -= 1
def visit_Assign(self, node):
for key, val in ast.iter_fields(node):
if key == 'targets':
for subnode in val:
crawler = myextract()
crawler.visit(subnode)
self.targets[".".join(reversed(crawler.stack))] = 1
self.depth += 1
ast.NodeVisitor.generic_visit(self, node)
self.depth -= 1
def visit_Name(self, node):
self.names[node.id] = 1
ast.NodeVisitor.generic_visit(self, node)
# def visit_Assign(self, node):
# for key, val in ast.iter_fields(node):
# if key == 'targets':
# for subnode in val:
# if type(subnode).__name__ == 'Name':
# self.targets[subnode.id] = 1
# elif type(subnode).__name__ == 'Attribute':
# print "Attribute:"
# for key, val in ast.iter_fields(subnode):
# print str(key) + " " + str(val)
# ast.NodeVisitor.generic_visit(self, node)
myvisitor = myvisitnode()
t = ast.parse(mycode)
# print ast.dump(t)
# sys.exit()
myvisitor.visit(t)
predefines = set(globals().keys()) | set(locals().keys())
print "Targets:"
print [item for item in myvisitor.targets.keys() if item not in predefines]
definables = set(predefines) | set(myvisitor.targets.keys())
print "Names:"
print [item for item in myvisitor.names.keys() if item not in definables]
# print "Globals:"
# print globals().keys()
# print "Locals:"
# print locals().keys()
# Module(body=[Assign(targets=[Attribute(value=Attribute(value=Attribute(value=Name(id='user', ctx=Load()), attr='foobar', ctx=Load()), attr='name', ctx=Load()), attr='last', ctx=Store())], value=Num(n=77))])
| 2.71875
| 3
|
test/index2.py
|
ChenWei-python13/python13_001
| 0
|
12780446
|
def index2():
return "index3"
| 1.34375
| 1
|
main.py
|
Alice-OSENSE/feature_err_analysis
| 0
|
12780447
|
import cv2
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import scipy.io
from scipy import optimize
from feature_func import *
from preprocess import *
from utils import *
def fit_data(gt_count, feature_data, function):
return optimize.curve_fit(function, feature_data, gt_count)
def plot_data(gt_count, feature_data, test_func=None):
plt.scatter(feature_data, gt_count, label='raw data')
if test_func != None:
params, params_var = fit_data(gt_count, feature_data, test_func)
x_linspace = np.linspace(min(feature_data), max(feature_data), num=len(feature_data))
plt.plot(x_linspace, test_func(x_linspace, *params), label='Fitted quadratic polynomial')
def test_func(x, a2, a1, a0):
return a2 * np.power(x, 2) + a1 * np.power(x, 1) + a0
def retrieve_data(image_root_path, mod=10):
# processing ucsd pedestrian dataset
sub_folder_index = 0
image_count = 0
images = []
gt_count_in_images = []
for sub_folder in image_root_path.glob('**/'):
print(sub_folder.name.split('.')[0].split('_')[-1])
if sub_folder_index == 0 or sub_folder.name.split('_')[0] != 'vidf1' or int(sub_folder.name.split('.')[0].split('_')[-1]) > 9:
sub_folder_index += 1
continue
print(sub_folder.name)
mat_path = annotation_root_path / (sub_folder.name.split('.')[0] + '_frame_full.mat')
mat = read_mat(mat_path)
for f in sub_folder.iterdir():
if not f.is_file():
continue
frame_index = int(f.name[-7:-4]) - 1
if image_count % mod == 0:
img = cv2.imread(str(f), 0)
images.append(img)
gt_count_in_images.append(mat['frame'][0][frame_index][0][0][0].shape[0])
image_count += 1
sub_folder_index += 1
return images, gt_count_in_images
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
background_image_path = '/home/osense-office/Documents/dataset/surveillance/ucsdpeds/background.png'
background_image = cv2.imread(background_image_path, 0)
image_root_dir = '/home/osense-office/Documents/dataset/surveillance/ucsdpeds/vidf'
image_root_path = Path(image_root_dir)
annotation_root_dir = '/home/osense-office/Documents/dataset/surveillance/ucsdpeds/vidf-cvpr'
annotation_root_path = Path(annotation_root_dir)
pmap = get_pmapxy('/home/osense-office/Documents/dataset/surveillance/ucsdpeds/vidf-cvpr/vidf1_33_dmap3.mat')
images, gt_count_in_images = retrieve_data(image_root_path, mod=30)
print(len(images))
edited = get_abs_diff(images, background_image)
blurred = get_foreground_mask(edited, threshold=25)
seg_peri = get_seg_perimeter(blurred)
# perspective_seg_size = get_seg_size(edited, pmapxy=pmap)
plot_data(gt_count_in_images, seg_peri, test_func)
plt.legend(loc='best')
plt.title(label='segmentation perimeter against people count')
plt.show()
| 2.59375
| 3
|
fp_demo/functional2.py
|
AegirAexx/python-sandbox
| 0
|
12780448
|
<gh_stars>0
""" Playing around with filter higher order function and lambda expresions. """
from pprint import pprint # Nicer formatting when printing tuples/lists
from scientist import scientists
# Using one lambda in filter.
WINNERS = tuple(filter(lambda x: x.nobel is True, scientists))
print('---- Nobel Winners: -----')
pprint(WINNERS)
print()
print()
# Using two lambdas in one filter.
PHYSICS_WINNERS = tuple(
filter(lambda p: p.field == 'physics' and p.nobel, scientists))
print('---- Physics Nobel Winners: ------')
pprint(PHYSICS_WINNERS)
def physics_filter(sci):
""" Defining a filter function. """
return sci.field == 'physics'
def nobel_filter(sci):
""" Defining a filter function. """
return sci.nobel
print()
print()
print('------ with stacked filter functions ------')
# Stacking filters together to make "blocks" that can be used many times.
PHY_WIN = tuple(filter(physics_filter, filter(nobel_filter, scientists)))
pprint(PHY_WIN)
print()
print()
# List comprehension - Filter'ish
LIST_COMP = tuple(
x for x in scientists if x.nobel is True and x.field == 'chemistry')
print('------ with list comprehension ------')
pprint(LIST_COMP)
| 3.40625
| 3
|
batcher.py
|
charelF/ABM
| 0
|
12780449
|
# Code for parallelization of the sensitivity analysis.
# This is accompanied by para.sh
import sys
import pandas as pd
from model import RegionModel
def run(i):
'''
performs a single simulation of a system
'''
m = RegionModel(int_trade, *df.iloc[i, 1:7])
for k in range(max_steps):
m.step()
# Get data
m.compute_statistics()
m.datacollector.collect(m)
outcomes = m.datacollector.get_model_vars_dataframe()
with open(r"data_int_on.csv","a") as f:
f.write("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n".format(i, int_trade,*df.iloc[i, 1:7], *outcomes.iloc[0]))
max_steps = 1000
df = pd.read_csv('out.csv')
batches = int(sys.argv[1])
batch = int(sys.argv[2])
int_trade = True if int(sys.argv[3]) == 1 else False
runs_per_batch = int(len(df.index)/batches) + 1
for i in range(batch * runs_per_batch, (batch + 1) * runs_per_batch):
run(i)
| 2.6875
| 3
|
OOP/Exams/Exam_16_august_2020/01. Structure_Skeleton (4)/project/system.py
|
petel3/Softuni_education
| 2
|
12780450
|
<reponame>petel3/Softuni_education<gh_stars>1-10
from project.hardware.hardware import Hardware
from project.hardware.heavy_hardware import HeavyHardware
from project.hardware.power_hardware import PowerHardware
from project.software.express_software import ExpressSoftware
from project.software.light_software import LightSoftware
class System:
_hardware = []
_software = []
@staticmethod
def register_power_hardware(name: str, capacity: int, memory: int):
System._hardware.append(PowerHardware(name, capacity, memory))
@staticmethod
def register_heavy_hardware(name: str, capacity: int, memory: int):
System._hardware.append(HeavyHardware(name, capacity, memory))
@staticmethod
def register_express_software(hardware_name: str, name: str, capacity_consumption: int, memory_consumption: int):
try:
hardware = [h for h in System._hardware if h.name == hardware_name][0]
software = ExpressSoftware(name, capacity_consumption, memory_consumption)
hardware.install(software)
System._software.append(software)
except IndexError:
return "Hardware does not exist"
except:
raise Exception("Software cannot be installed")
@staticmethod
def register_light_software(hardware_name: str, name: str, capacity_consumption: int, memory_consumption: int):
try:
hardware = [h for h in System._hardware if h.name == hardware_name][0]
software = LightSoftware(name, capacity_consumption, memory_consumption)
hardware.install(software)
System._software.append(software)
except IndexError:
return "Hardware does not exist"
except:
raise Exception("Software cannot be installed")
@staticmethod
def release_software_component(hardware_name: str, software_name: str):
try:
hardware = [h for h in System._hardware if h.name == hardware_name][0]
software = [s for s in System._software if s.name == software_name][0]
hardware.uninstall(software)
except IndexError:
return "Some of the components do not exist"
@staticmethod
def analyze():
memory_used = 0
total_memory_used = 0
capacity_used = 0
total_capacity_used = 0
for hardware in System._hardware:
total_memory_used += hardware.memory
total_capacity_used += hardware.capacity
memory_used += hardware.total_memory
capacity_used += hardware.total_capacity
return f"System Analysis\n"\
f"Hardware Components: {len(System._hardware)}\n"\
f"Software Components: {len(System._software)}\n"\
f"Total Operational Memory: {memory_used} / {total_memory_used}\n"\
f"Total Capacity Taken: {capacity_used} / {total_capacity_used}"
@staticmethod
def system_split():
result = []
for hardware in System._hardware:
soft_components = [s.name for s in hardware.software_components]
info = f"Hardware Component - {hardware.name}\n" \
f"Express Software Components: {len([s for s in hardware.software_components if s.software_type == 'Express'])}\n" \
f"Light Software Components: {len([s for s in hardware.software_components if s.software_type == 'Light'])}\n" \
f"Memory Usage: {hardware.total_memory} / {hardware.memory}\n" \
f"Capacity Usage: {hardware.total_capacity} / {hardware.capacity}\n" \
f"Type: {hardware.hardware_type}\n" \
f"Software Components: {', '.join(soft_components) if soft_components else None}"
result.append(info)
return '\n'.join(result).strip()
| 2.734375
| 3
|